CVE ID
stringlengths
13
43
CVE Page
stringlengths
45
48
CWE ID
stringclasses
90 values
codeLink
stringlengths
46
139
commit_id
stringlengths
6
81
commit_message
stringlengths
3
13.3k
func_after
stringlengths
14
241k
func_before
stringlengths
14
241k
lang
stringclasses
3 values
project
stringclasses
309 values
vul
int8
0
1
CVE-2017-5019
https://www.cvedetails.com/cve/CVE-2017-5019/
CWE-416
https://github.com/chromium/chromium/commit/f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
Convert FrameHostMsg_DidAddMessageToConsole to Mojo. Note: Since this required changing the test RenderViewImplTest.DispatchBeforeUnloadCanDetachFrame, I manually re-introduced https://crbug.com/666714 locally (the bug the test was added for), and reran the test to confirm that it still covers the bug. Bug: 786836 Change-Id: I110668fa6f0f261fd2ac36bb91a8d8b31c99f4f1 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1526270 Commit-Queue: Lowell Manners <lowell@chromium.org> Reviewed-by: Daniel Cheng <dcheng@chromium.org> Reviewed-by: Camille Lamy <clamy@chromium.org> Cr-Commit-Position: refs/heads/master@{#653137}
void RenderFrameHostImpl::SetNetworkFactoryForTesting( const CreateNetworkFactoryCallback& create_network_factory_callback) { DCHECK(!BrowserThread::IsThreadInitialized(BrowserThread::UI) || BrowserThread::CurrentlyOn(BrowserThread::UI)); DCHECK(create_network_factory_callback.is_null() || GetCreateNetworkFactoryCallbackForRenderFrame().is_null()) << "It is not expected that this is called with non-null callback when " << "another overriding callback is already set."; GetCreateNetworkFactoryCallbackForRenderFrame() = create_network_factory_callback; }
void RenderFrameHostImpl::SetNetworkFactoryForTesting( const CreateNetworkFactoryCallback& create_network_factory_callback) { DCHECK(!BrowserThread::IsThreadInitialized(BrowserThread::UI) || BrowserThread::CurrentlyOn(BrowserThread::UI)); DCHECK(create_network_factory_callback.is_null() || GetCreateNetworkFactoryCallbackForRenderFrame().is_null()) << "It is not expected that this is called with non-null callback when " << "another overriding callback is already set."; GetCreateNetworkFactoryCallbackForRenderFrame() = create_network_factory_callback; }
C
Chrome
0
CVE-2017-0818
https://www.cvedetails.com/cve/CVE-2017-0818/
CWE-772
https://android.googlesource.com/platform/frameworks/av/+/d07f5c14e811951ff9b411ceb84e7288e0d04aaf
d07f5c14e811951ff9b411ceb84e7288e0d04aaf
Fix memory leak in OggExtractor Test: added a temporal log and run poc Bug: 63581671 Change-Id: I436a08e54d5e831f9fbdb33c26d15397ce1fbeba (cherry picked from commit 63079e7c8e12cda4eb124fbe565213d30b9ea34c)
int64_t MyOpusExtractor::getTimeUsOfGranule(uint64_t granulePos) const { uint64_t pcmSamplePosition = 0; if (granulePos > mCodecDelay) { pcmSamplePosition = granulePos - mCodecDelay; } if (pcmSamplePosition > INT64_MAX / 1000000ll) { return INT64_MAX; } return pcmSamplePosition * 1000000ll / kOpusSampleRate; }
int64_t MyOpusExtractor::getTimeUsOfGranule(uint64_t granulePos) const { uint64_t pcmSamplePosition = 0; if (granulePos > mCodecDelay) { pcmSamplePosition = granulePos - mCodecDelay; } if (pcmSamplePosition > INT64_MAX / 1000000ll) { return INT64_MAX; } return pcmSamplePosition * 1000000ll / kOpusSampleRate; }
C
Android
0
CVE-2013-7281
https://www.cvedetails.com/cve/CVE-2013-7281/
CWE-200
https://github.com/torvalds/linux/commit/bceaa90240b6019ed73b49965eac7d167610be69
bceaa90240b6019ed73b49965eac7d167610be69
inet: prevent leakage of uninitialized memory to user in recv syscalls Only update *addr_len when we actually fill in sockaddr, otherwise we can return uninitialized memory from the stack to the caller in the recvfrom, recvmmsg and recvmsg syscalls. Drop the the (addr_len == NULL) checks because we only get called with a valid addr_len pointer either from sock_common_recvmsg or inet_recvmsg. If a blocking read waits on a socket which is concurrently shut down we now return zero and set msg_msgnamelen to 0. Reported-by: mpb <mpb.mail@gmail.com> Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
static int udp_v6_push_pending_frames(struct sock *sk) { struct sk_buff *skb; struct udphdr *uh; struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct flowi6 *fl6; int err = 0; int is_udplite = IS_UDPLITE(sk); __wsum csum = 0; if (up->pending == AF_INET) return udp_push_pending_frames(sk); fl6 = &inet->cork.fl.u.ip6; /* Grab the skbuff where UDP header space exists. */ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; /* * Create a UDP header */ uh = udp_hdr(skb); uh->source = fl6->fl6_sport; uh->dest = fl6->fl6_dport; uh->len = htons(up->len); uh->check = 0; if (is_udplite) csum = udplite_csum_outgoing(sk, skb); else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, up->len); goto send; } else csum = udp_csum_outgoing(sk, skb); /* add protocol-dependent pseudo-header */ uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, up->len, fl6->flowi6_proto, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; send: err = ip6_push_pending_frames(sk); if (err) { if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_OUTDATAGRAMS, is_udplite); out: up->len = 0; up->pending = 0; return err; }
static int udp_v6_push_pending_frames(struct sock *sk) { struct sk_buff *skb; struct udphdr *uh; struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct flowi6 *fl6; int err = 0; int is_udplite = IS_UDPLITE(sk); __wsum csum = 0; if (up->pending == AF_INET) return udp_push_pending_frames(sk); fl6 = &inet->cork.fl.u.ip6; /* Grab the skbuff where UDP header space exists. */ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; /* * Create a UDP header */ uh = udp_hdr(skb); uh->source = fl6->fl6_sport; uh->dest = fl6->fl6_dport; uh->len = htons(up->len); uh->check = 0; if (is_udplite) csum = udplite_csum_outgoing(sk, skb); else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, up->len); goto send; } else csum = udp_csum_outgoing(sk, skb); /* add protocol-dependent pseudo-header */ uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, up->len, fl6->flowi6_proto, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; send: err = ip6_push_pending_frames(sk); if (err) { if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_OUTDATAGRAMS, is_udplite); out: up->len = 0; up->pending = 0; return err; }
C
linux
0
CVE-2013-0311
https://www.cvedetails.com/cve/CVE-2013-0311/
null
https://github.com/torvalds/linux/commit/bd97120fc3d1a11f3124c7c9ba1d91f51829eb85
bd97120fc3d1a11f3124c7c9ba1d91f51829eb85
vhost: fix length for cross region descriptor If a single descriptor crosses a region, the second chunk length should be decremented by size translated so far, instead it includes the full descriptor length. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
static int vhost_update_used_flags(struct vhost_virtqueue *vq) { void __user *used; if (__put_user(vq->used_flags, &vq->used->flags) < 0) return -EFAULT; if (unlikely(vq->log_used)) { /* Make sure the flag is seen before log. */ smp_wmb(); /* Log used flag write. */ used = &vq->used->flags; log_write(vq->log_base, vq->log_addr + (used - (void __user *)vq->used), sizeof vq->used->flags); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } return 0; }
static int vhost_update_used_flags(struct vhost_virtqueue *vq) { void __user *used; if (__put_user(vq->used_flags, &vq->used->flags) < 0) return -EFAULT; if (unlikely(vq->log_used)) { /* Make sure the flag is seen before log. */ smp_wmb(); /* Log used flag write. */ used = &vq->used->flags; log_write(vq->log_base, vq->log_addr + (used - (void __user *)vq->used), sizeof vq->used->flags); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } return 0; }
C
linux
0
CVE-2015-6782
https://www.cvedetails.com/cve/CVE-2015-6782/
CWE-20
https://github.com/chromium/chromium/commit/e1e0c4301aaa8228e362f2409dbde2d4d1896866
e1e0c4301aaa8228e362f2409dbde2d4d1896866
Don't change Document load progress in any page dismissal events. This can confuse the logic for blocking modal dialogs. BUG=536652 Review URL: https://codereview.chromium.org/1373113002 Cr-Commit-Position: refs/heads/master@{#351419}
void Document::didChangeVisibilityState() { dispatchEvent(Event::create(EventTypeNames::visibilitychange)); dispatchEvent(Event::create(EventTypeNames::webkitvisibilitychange)); PageVisibilityState state = pageVisibilityState(); for (DocumentVisibilityObserver* observer : m_visibilityObservers) observer->didChangeVisibilityState(state); if (state == PageVisibilityStateVisible) timeline().setAllCompositorPending(); if (hidden() && m_canvasFontCache) m_canvasFontCache->pruneAll(); }
void Document::didChangeVisibilityState() { dispatchEvent(Event::create(EventTypeNames::visibilitychange)); dispatchEvent(Event::create(EventTypeNames::webkitvisibilitychange)); PageVisibilityState state = pageVisibilityState(); for (DocumentVisibilityObserver* observer : m_visibilityObservers) observer->didChangeVisibilityState(state); if (state == PageVisibilityStateVisible) timeline().setAllCompositorPending(); if (hidden() && m_canvasFontCache) m_canvasFontCache->pruneAll(); }
C
Chrome
0
CVE-2018-1065
https://www.cvedetails.com/cve/CVE-2018-1065/
CWE-476
https://github.com/torvalds/linux/commit/57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
netfilter: add back stackpointer size checks The rationale for removing the check is only correct for rulesets generated by ip(6)tables. In iptables, a jump can only occur to a user-defined chain, i.e. because we size the stack based on number of user-defined chains we cannot exceed stack size. However, the underlying binary format has no such restriction, and the validation step only ensures that the jump target is a valid rule start point. IOW, its possible to build a rule blob that has no user-defined chains but does contain a jump. If this happens, no jump stack gets allocated and crash occurs because no jumpstack was allocated. Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset") Reported-by: syzbot+e783f671527912cd9403@syzkaller.appspotmail.com Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
cleanup_entry(struct ipt_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) cleanup_match(ematch, net); t = ipt_get_target(e); par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV4; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(&e->counters); }
cleanup_entry(struct ipt_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) cleanup_match(ematch, net); t = ipt_get_target(e); par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV4; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(&e->counters); }
C
linux
0
CVE-2018-18352
https://www.cvedetails.com/cve/CVE-2018-18352/
CWE-732
https://github.com/chromium/chromium/commit/a9cbaa7a40e2b2723cfc2f266c42f4980038a949
a9cbaa7a40e2b2723cfc2f266c42f4980038a949
Simplify "WouldTaintOrigin" concept in media/blink Currently WebMediaPlayer has three predicates: - DidGetOpaqueResponseFromServiceWorker - HasSingleSecurityOrigin - DidPassCORSAccessCheck . These are used to determine whether the response body is available for scripts. They are known to be confusing, and actually MediaElementAudioSourceHandler::WouldTaintOrigin misuses them. This CL merges the three predicates to one, WouldTaintOrigin, to remove the confusion. Now the "response type" concept is available and we don't need a custom CORS check, so this CL removes BaseAudioContext::WouldTaintOrigin. This CL also renames URLData::has_opaque_data_ and its (direct and indirect) data accessors to match the spec. Bug: 849942, 875153 Change-Id: I6acf50169d7445c4ff614e80ac606f79ee577d2a Reviewed-on: https://chromium-review.googlesource.com/c/1238098 Reviewed-by: Fredrik Hubinette <hubbe@chromium.org> Reviewed-by: Kinuko Yasuda <kinuko@chromium.org> Reviewed-by: Raymond Toy <rtoy@chromium.org> Commit-Queue: Yutaka Hirano <yhirano@chromium.org> Cr-Commit-Position: refs/heads/master@{#598258}
void HTMLMediaElement::ParserDidSetAttributes() { HTMLElement::ParserDidSetAttributes(); if (FastHasAttribute(mutedAttr)) muted_ = true; }
void HTMLMediaElement::ParserDidSetAttributes() { HTMLElement::ParserDidSetAttributes(); if (FastHasAttribute(mutedAttr)) muted_ = true; }
C
Chrome
0
CVE-2018-14361
https://www.cvedetails.com/cve/CVE-2018-14361/
CWE-20
https://github.com/neomutt/neomutt/commit/9e927affe3a021175f354af5fa01d22657c20585
9e927affe3a021175f354af5fa01d22657c20585
Add alloc fail check in nntp_fetch_headers
int nntp_post(const char *msg) { struct NntpData *nntp_data, nntp_tmp; char buf[LONG_STRING]; if (Context && Context->magic == MUTT_NNTP) nntp_data = Context->data; else { CurrentNewsSrv = nntp_select_server(NewsServer, false); if (!CurrentNewsSrv) return -1; nntp_data = &nntp_tmp; nntp_data->nserv = CurrentNewsSrv; nntp_data->group = NULL; } FILE *fp = mutt_file_fopen(msg, "r"); if (!fp) { mutt_perror(msg); return -1; } mutt_str_strfcpy(buf, "POST\r\n", sizeof(buf)); if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) { mutt_file_fclose(&fp); return -1; } if (buf[0] != '3') { mutt_error(_("Can't post article: %s"), buf); mutt_file_fclose(&fp); return -1; } buf[0] = '.'; buf[1] = '\0'; while (fgets(buf + 1, sizeof(buf) - 2, fp)) { size_t len = strlen(buf); if (buf[len - 1] == '\n') { buf[len - 1] = '\r'; buf[len] = '\n'; len++; buf[len] = '\0'; } if (mutt_socket_send_d(nntp_data->nserv->conn, buf[1] == '.' ? buf : buf + 1, MUTT_SOCK_LOG_HDR) < 0) { mutt_file_fclose(&fp); return nntp_connect_error(nntp_data->nserv); } } mutt_file_fclose(&fp); if ((buf[strlen(buf) - 1] != '\n' && mutt_socket_send_d(nntp_data->nserv->conn, "\r\n", MUTT_SOCK_LOG_HDR) < 0) || mutt_socket_send_d(nntp_data->nserv->conn, ".\r\n", MUTT_SOCK_LOG_HDR) < 0 || mutt_socket_readln(buf, sizeof(buf), nntp_data->nserv->conn) < 0) { return nntp_connect_error(nntp_data->nserv); } if (buf[0] != '2') { mutt_error(_("Can't post article: %s"), buf); return -1; } return 0; }
int nntp_post(const char *msg) { struct NntpData *nntp_data, nntp_tmp; char buf[LONG_STRING]; if (Context && Context->magic == MUTT_NNTP) nntp_data = Context->data; else { CurrentNewsSrv = nntp_select_server(NewsServer, false); if (!CurrentNewsSrv) return -1; nntp_data = &nntp_tmp; nntp_data->nserv = CurrentNewsSrv; nntp_data->group = NULL; } FILE *fp = mutt_file_fopen(msg, "r"); if (!fp) { mutt_perror(msg); return -1; } mutt_str_strfcpy(buf, "POST\r\n", sizeof(buf)); if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) { mutt_file_fclose(&fp); return -1; } if (buf[0] != '3') { mutt_error(_("Can't post article: %s"), buf); mutt_file_fclose(&fp); return -1; } buf[0] = '.'; buf[1] = '\0'; while (fgets(buf + 1, sizeof(buf) - 2, fp)) { size_t len = strlen(buf); if (buf[len - 1] == '\n') { buf[len - 1] = '\r'; buf[len] = '\n'; len++; buf[len] = '\0'; } if (mutt_socket_send_d(nntp_data->nserv->conn, buf[1] == '.' ? buf : buf + 1, MUTT_SOCK_LOG_HDR) < 0) { mutt_file_fclose(&fp); return nntp_connect_error(nntp_data->nserv); } } mutt_file_fclose(&fp); if ((buf[strlen(buf) - 1] != '\n' && mutt_socket_send_d(nntp_data->nserv->conn, "\r\n", MUTT_SOCK_LOG_HDR) < 0) || mutt_socket_send_d(nntp_data->nserv->conn, ".\r\n", MUTT_SOCK_LOG_HDR) < 0 || mutt_socket_readln(buf, sizeof(buf), nntp_data->nserv->conn) < 0) { return nntp_connect_error(nntp_data->nserv); } if (buf[0] != '2') { mutt_error(_("Can't post article: %s"), buf); return -1; } return 0; }
C
neomutt
0
CVE-2011-2802
https://www.cvedetails.com/cve/CVE-2011-2802/
CWE-399
https://github.com/chromium/chromium/commit/4ab22cfc619ee8ff17a8c50e289ec3b30731ceba
4ab22cfc619ee8ff17a8c50e289ec3b30731ceba
In chromedriver, add /log url to get the contents of the chromedriver log remotely. Also add a 'chrome.verbose' boolean startup option. Remove usage of VLOG(1) in chromedriver. We do not need as complicated logging as in Chrome. BUG=85241 TEST=none Review URL: http://codereview.chromium.org/7104085 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@88591 0039d316-1c4b-4281-b951-d872f2087c98
KeyMap::~KeyMap() {}
KeyMap::~KeyMap() {}
C
Chrome
0
CVE-2015-6763
https://www.cvedetails.com/cve/CVE-2015-6763/
null
https://github.com/chromium/chromium/commit/f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
MacViews: Enable secure text input for password Textfields. In Cocoa the NSTextInputContext automatically enables secure text input when activated and it's in the secure text entry mode. RenderWidgetHostViewMac did the similar thing for ages following the WebKit example. views::Textfield needs to do the same thing in a fashion that's sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions are possible when the Textfield gets focus, activates the secure text input mode and the RWHVM loses focus immediately afterwards and disables the secure text input instead of leaving it in the enabled state. BUG=818133,677220 Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b Reviewed-on: https://chromium-review.googlesource.com/943064 Commit-Queue: Michail Pishchagin <mblsha@yandex-team.ru> Reviewed-by: Pavel Feldman <pfeldman@chromium.org> Reviewed-by: Avi Drissman <avi@chromium.org> Reviewed-by: Peter Kasting <pkasting@chromium.org> Cr-Commit-Position: refs/heads/master@{#542517}
void HTMLInputElement::DefaultEventHandler(Event* evt) { if (evt->IsMouseEvent() && evt->type() == EventTypeNames::click && ToMouseEvent(evt)->button() == static_cast<short>(WebPointerProperties::Button::kLeft)) { input_type_view_->HandleClickEvent(ToMouseEvent(evt)); if (evt->DefaultHandled()) return; } if (evt->IsKeyboardEvent() && evt->type() == EventTypeNames::keydown) { input_type_view_->HandleKeydownEvent(ToKeyboardEvent(evt)); if (evt->DefaultHandled()) return; } bool call_base_class_early = IsTextField() && (evt->type() == EventTypeNames::keydown || evt->type() == EventTypeNames::keypress); if (call_base_class_early) { TextControlElement::DefaultEventHandler(evt); if (evt->DefaultHandled()) return; } if (evt->type() == EventTypeNames::DOMActivate) { input_type_view_->HandleDOMActivateEvent(evt); if (evt->DefaultHandled()) return; } if (evt->IsKeyboardEvent() && evt->type() == EventTypeNames::keypress) { input_type_view_->HandleKeypressEvent(ToKeyboardEvent(evt)); if (evt->DefaultHandled()) return; } if (evt->IsKeyboardEvent() && evt->type() == EventTypeNames::keyup) { input_type_view_->HandleKeyupEvent(ToKeyboardEvent(evt)); if (evt->DefaultHandled()) return; } if (input_type_view_->ShouldSubmitImplicitly(evt)) { if (type() == InputTypeNames::search) { GetDocument() .GetTaskRunner(TaskType::kUserInteraction) ->PostTask(FROM_HERE, WTF::Bind(&HTMLInputElement::OnSearch, WrapPersistent(this))); } DispatchFormControlChangeEvent(); HTMLFormElement* form_for_submission = input_type_view_->FormForSubmission(); if (form_for_submission) { form_for_submission->SubmitImplicitly(evt, CanTriggerImplicitSubmission()); } evt->SetDefaultHandled(); return; } if (evt->IsBeforeTextInsertedEvent()) { input_type_view_->HandleBeforeTextInsertedEvent( static_cast<BeforeTextInsertedEvent*>(evt)); } if (evt->IsMouseEvent() && evt->type() == EventTypeNames::mousedown) { input_type_view_->HandleMouseDownEvent(ToMouseEvent(evt)); if (evt->DefaultHandled()) return; } input_type_view_->ForwardEvent(evt); if (!call_base_class_early && !evt->DefaultHandled()) TextControlElement::DefaultEventHandler(evt); }
void HTMLInputElement::DefaultEventHandler(Event* evt) { if (evt->IsMouseEvent() && evt->type() == EventTypeNames::click && ToMouseEvent(evt)->button() == static_cast<short>(WebPointerProperties::Button::kLeft)) { input_type_view_->HandleClickEvent(ToMouseEvent(evt)); if (evt->DefaultHandled()) return; } if (evt->IsKeyboardEvent() && evt->type() == EventTypeNames::keydown) { input_type_view_->HandleKeydownEvent(ToKeyboardEvent(evt)); if (evt->DefaultHandled()) return; } bool call_base_class_early = IsTextField() && (evt->type() == EventTypeNames::keydown || evt->type() == EventTypeNames::keypress); if (call_base_class_early) { TextControlElement::DefaultEventHandler(evt); if (evt->DefaultHandled()) return; } if (evt->type() == EventTypeNames::DOMActivate) { input_type_view_->HandleDOMActivateEvent(evt); if (evt->DefaultHandled()) return; } if (evt->IsKeyboardEvent() && evt->type() == EventTypeNames::keypress) { input_type_view_->HandleKeypressEvent(ToKeyboardEvent(evt)); if (evt->DefaultHandled()) return; } if (evt->IsKeyboardEvent() && evt->type() == EventTypeNames::keyup) { input_type_view_->HandleKeyupEvent(ToKeyboardEvent(evt)); if (evt->DefaultHandled()) return; } if (input_type_view_->ShouldSubmitImplicitly(evt)) { if (type() == InputTypeNames::search) { GetDocument() .GetTaskRunner(TaskType::kUserInteraction) ->PostTask(FROM_HERE, WTF::Bind(&HTMLInputElement::OnSearch, WrapPersistent(this))); } DispatchFormControlChangeEvent(); HTMLFormElement* form_for_submission = input_type_view_->FormForSubmission(); if (form_for_submission) { form_for_submission->SubmitImplicitly(evt, CanTriggerImplicitSubmission()); } evt->SetDefaultHandled(); return; } if (evt->IsBeforeTextInsertedEvent()) { input_type_view_->HandleBeforeTextInsertedEvent( static_cast<BeforeTextInsertedEvent*>(evt)); } if (evt->IsMouseEvent() && evt->type() == EventTypeNames::mousedown) { input_type_view_->HandleMouseDownEvent(ToMouseEvent(evt)); if (evt->DefaultHandled()) return; } input_type_view_->ForwardEvent(evt); if (!call_base_class_early && !evt->DefaultHandled()) TextControlElement::DefaultEventHandler(evt); }
C
Chrome
0
CVE-2019-5760
https://www.cvedetails.com/cve/CVE-2019-5760/
CWE-416
https://github.com/chromium/chromium/commit/3514a77e7fa2e5b8bfe5d98af22964bbd69d680f
3514a77e7fa2e5b8bfe5d98af22964bbd69d680f
Check weak pointers in RTCPeerConnectionHandler::WebRtcSetDescriptionObserverImpl Bug: 912074 Change-Id: I8ba86751f5d5bf12db51520f985ef0d3dae63ed8 Reviewed-on: https://chromium-review.googlesource.com/c/1411916 Commit-Queue: Guido Urdaneta <guidou@chromium.org> Reviewed-by: Henrik Boström <hbos@chromium.org> Cr-Commit-Position: refs/heads/master@{#622945}
void RTCPeerConnectionHandler::OnConnectionChange( webrtc::PeerConnectionInterface::PeerConnectionState new_state) { DCHECK(task_runner_->RunsTasksInCurrentSequence()); if (!is_closed_) client_->DidChangePeerConnectionState(new_state); }
void RTCPeerConnectionHandler::OnConnectionChange( webrtc::PeerConnectionInterface::PeerConnectionState new_state) { DCHECK(task_runner_->RunsTasksInCurrentSequence()); if (!is_closed_) client_->DidChangePeerConnectionState(new_state); }
C
Chrome
0
CVE-2018-6111
https://www.cvedetails.com/cve/CVE-2018-6111/
CWE-20
https://github.com/chromium/chromium/commit/3c8e4852477d5b1e2da877808c998dc57db9460f
3c8e4852477d5b1e2da877808c998dc57db9460f
DevTools: speculative fix for crash in NetworkHandler::Disable This keeps BrowserContext* and StoragePartition* instead of RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost upon closure of DevTools front-end. Bug: 801117, 783067, 780694 Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b Reviewed-on: https://chromium-review.googlesource.com/876657 Commit-Queue: Andrey Kosyakov <caseq@chromium.org> Reviewed-by: Dmitry Gozman <dgozman@chromium.org> Cr-Commit-Position: refs/heads/master@{#531157}
void NetworkHandler::NavigationPreloadResponseReceived( const std::string& request_id, const GURL& url, const network::ResourceResponseHead& head) { if (!enabled_) return; std::unique_ptr<DictionaryValue> headers_dict(DictionaryValue::create()); size_t iterator = 0; std::string name; std::string value; while (head.headers->EnumerateHeaderLines(&iterator, &name, &value)) headers_dict->setString(name, value); std::unique_ptr<Network::Response> response( Network::Response::Create() .SetUrl(url.spec()) .SetStatus(head.headers->response_code()) .SetStatusText(head.headers->GetStatusText()) .SetHeaders(Object::fromValue(headers_dict.get(), nullptr)) .SetMimeType(head.mime_type) .SetConnectionReused(head.load_timing.socket_reused) .SetConnectionId(head.load_timing.socket_log_id) .SetSecurityState(securityState(url, head.cert_status)) .SetEncodedDataLength(head.encoded_data_length) .SetTiming(getTiming(head.load_timing)) .SetFromDiskCache(!head.load_timing.request_start_time.is_null() && head.response_time < head.load_timing.request_start_time) .Build()); if (head.raw_request_response_info) { if (head.raw_request_response_info->http_status_code) { response->SetStatus(head.raw_request_response_info->http_status_code); response->SetStatusText(head.raw_request_response_info->http_status_text); } if (head.raw_request_response_info->request_headers.size()) { response->SetRequestHeaders( getHeaders(head.raw_request_response_info->request_headers)); } if (!head.raw_request_response_info->request_headers_text.empty()) { response->SetRequestHeadersText( head.raw_request_response_info->request_headers_text); } if (head.raw_request_response_info->response_headers.size()) response->SetHeaders( getHeaders(head.raw_request_response_info->response_headers)); if (!head.raw_request_response_info->response_headers_text.empty()) response->SetHeadersText( head.raw_request_response_info->response_headers_text); } response->SetProtocol(getProtocol(url, head)); response->SetRemoteIPAddress(head.socket_address.HostForURL()); response->SetRemotePort(head.socket_address.port()); frontend_->ResponseReceived( request_id, "" /* loader_id */, base::TimeTicks::Now().ToInternalValue() / static_cast<double>(base::Time::kMicrosecondsPerSecond), Page::ResourceTypeEnum::Other, std::move(response)); }
void NetworkHandler::NavigationPreloadResponseReceived( const std::string& request_id, const GURL& url, const network::ResourceResponseHead& head) { if (!enabled_) return; std::unique_ptr<DictionaryValue> headers_dict(DictionaryValue::create()); size_t iterator = 0; std::string name; std::string value; while (head.headers->EnumerateHeaderLines(&iterator, &name, &value)) headers_dict->setString(name, value); std::unique_ptr<Network::Response> response( Network::Response::Create() .SetUrl(url.spec()) .SetStatus(head.headers->response_code()) .SetStatusText(head.headers->GetStatusText()) .SetHeaders(Object::fromValue(headers_dict.get(), nullptr)) .SetMimeType(head.mime_type) .SetConnectionReused(head.load_timing.socket_reused) .SetConnectionId(head.load_timing.socket_log_id) .SetSecurityState(securityState(url, head.cert_status)) .SetEncodedDataLength(head.encoded_data_length) .SetTiming(getTiming(head.load_timing)) .SetFromDiskCache(!head.load_timing.request_start_time.is_null() && head.response_time < head.load_timing.request_start_time) .Build()); if (head.raw_request_response_info) { if (head.raw_request_response_info->http_status_code) { response->SetStatus(head.raw_request_response_info->http_status_code); response->SetStatusText(head.raw_request_response_info->http_status_text); } if (head.raw_request_response_info->request_headers.size()) { response->SetRequestHeaders( getHeaders(head.raw_request_response_info->request_headers)); } if (!head.raw_request_response_info->request_headers_text.empty()) { response->SetRequestHeadersText( head.raw_request_response_info->request_headers_text); } if (head.raw_request_response_info->response_headers.size()) response->SetHeaders( getHeaders(head.raw_request_response_info->response_headers)); if (!head.raw_request_response_info->response_headers_text.empty()) response->SetHeadersText( head.raw_request_response_info->response_headers_text); } response->SetProtocol(getProtocol(url, head)); response->SetRemoteIPAddress(head.socket_address.HostForURL()); response->SetRemotePort(head.socket_address.port()); frontend_->ResponseReceived( request_id, "" /* loader_id */, base::TimeTicks::Now().ToInternalValue() / static_cast<double>(base::Time::kMicrosecondsPerSecond), Page::ResourceTypeEnum::Other, std::move(response)); }
C
Chrome
0
CVE-2013-7271
https://www.cvedetails.com/cve/CVE-2013-7271/
CWE-20
https://github.com/torvalds/linux/commit/f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <davem@davemloft.net> Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
static int x25_wait_for_data(struct sock *sk, long timeout) { DECLARE_WAITQUEUE(wait, current); int rc = 0; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (sk->sk_shutdown & RCV_SHUTDOWN) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = -EAGAIN; if (!timeout) break; rc = 0; if (skb_queue_empty(&sk->sk_receive_queue)) { release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; }
static int x25_wait_for_data(struct sock *sk, long timeout) { DECLARE_WAITQUEUE(wait, current); int rc = 0; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (sk->sk_shutdown & RCV_SHUTDOWN) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = -EAGAIN; if (!timeout) break; rc = 0; if (skb_queue_empty(&sk->sk_receive_queue)) { release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; }
C
linux
0
CVE-2019-5754
https://www.cvedetails.com/cve/CVE-2019-5754/
CWE-310
https://github.com/chromium/chromium/commit/fd2335678e96c34d14f4b20f0d9613dfbd1ccdb4
fd2335678e96c34d14f4b20f0d9613dfbd1ccdb4
Fix a bug in network_session_configurator.cc in which support for HTTPS URLS in QUIC proxies was always set to false. BUG=914497 Change-Id: I56ad16088168302598bb448553ba32795eee3756 Reviewed-on: https://chromium-review.googlesource.com/c/1417356 Auto-Submit: Ryan Hamilton <rch@chromium.org> Commit-Queue: Zhongyi Shi <zhongyi@chromium.org> Reviewed-by: Zhongyi Shi <zhongyi@chromium.org> Cr-Commit-Position: refs/heads/master@{#623763}
int GetQuicMaxNumMigrationsToNonDefaultNetworkOnPathDegrading( const VariationParameters& quic_trial_params) { int value; if (base::StringToInt( GetVariationParam( quic_trial_params, "max_migrations_to_non_default_network_on_path_degrading"), &value)) { return value; } return 0; }
int GetQuicMaxNumMigrationsToNonDefaultNetworkOnPathDegrading( const VariationParameters& quic_trial_params) { int value; if (base::StringToInt( GetVariationParam( quic_trial_params, "max_migrations_to_non_default_network_on_path_degrading"), &value)) { return value; } return 0; }
C
Chrome
0
CVE-2018-18338
https://www.cvedetails.com/cve/CVE-2018-18338/
CWE-119
https://github.com/chromium/chromium/commit/78d89fe556cb5dabbc47b4967cdf55e607e29580
78d89fe556cb5dabbc47b4967cdf55e607e29580
Fix *StaticBitmapImage ThreadChecker and unaccelerated SkImage destroy - AcceleratedStaticBitmapImage was misusing ThreadChecker by having its own detach logic. Using proper DetachThread is simpler, cleaner and correct. - UnacceleratedStaticBitmapImage didn't destroy the SkImage in the proper thread, leading to GrContext/SkSp problems. Bug: 890576 Change-Id: Ic71e7f7322b0b851774628247aa5256664bc0723 Reviewed-on: https://chromium-review.googlesource.com/c/1307775 Reviewed-by: Gabriel Charette <gab@chromium.org> Reviewed-by: Jeremy Roman <jbroman@chromium.org> Commit-Queue: Fernando Serboncini <fserb@chromium.org> Cr-Commit-Position: refs/heads/master@{#604427}
WebGraphicsContext3DProvider* AcceleratedStaticBitmapImage::ContextProvider() const { if (!IsValid()) return nullptr; return texture_holder_->ContextProvider(); }
WebGraphicsContext3DProvider* AcceleratedStaticBitmapImage::ContextProvider() const { if (!IsValid()) return nullptr; return texture_holder_->ContextProvider(); }
C
Chrome
0
CVE-2016-5218
https://www.cvedetails.com/cve/CVE-2016-5218/
CWE-20
https://github.com/chromium/chromium/commit/45d901b56f578a74b19ba0d10fa5c4c467f19303
45d901b56f578a74b19ba0d10fa5c4c467f19303
Paint tab groups with the group color. * The background of TabGroupHeader now uses the group color. * The backgrounds of tabs in the group are tinted with the group color. This treatment, along with the colors chosen, are intended to be a placeholder. Bug: 905491 Change-Id: Ic808548f8eba23064606e7fb8c9bba281d0d117f Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1610504 Commit-Queue: Bret Sepulveda <bsep@chromium.org> Reviewed-by: Taylor Bergquist <tbergquist@chromium.org> Cr-Commit-Position: refs/heads/master@{#660498}
void TabStrip::ChangeTabGroup(int model_index, base::Optional<int> old_group, base::Optional<int> new_group) { tab_at(model_index)->SetGroup(new_group); if (new_group.has_value() && !group_headers_[new_group.value()]) { auto header = std::make_unique<TabGroupHeader>(this, new_group.value()); header->set_owned_by_client(); AddChildView(header.get()); group_headers_[new_group.value()] = std::move(header); } if (old_group.has_value() && controller_->ListTabsInGroup(old_group.value()).size() == 0) { group_headers_.erase(old_group.value()); } UpdateIdealBounds(); AnimateToIdealBounds(); }
void TabStrip::ChangeTabGroup(int model_index, base::Optional<int> old_group, base::Optional<int> new_group) { if (new_group.has_value() && !group_headers_[new_group.value()]) { const TabGroupData* group_data = controller_->GetDataForGroup(new_group.value()); auto header = std::make_unique<TabGroupHeader>(group_data->title()); header->set_owned_by_client(); AddChildView(header.get()); group_headers_[new_group.value()] = std::move(header); } if (old_group.has_value() && controller_->ListTabsInGroup(old_group.value()).size() == 0) { group_headers_.erase(old_group.value()); } UpdateIdealBounds(); AnimateToIdealBounds(); }
C
Chrome
1
CVE-2015-1300
https://www.cvedetails.com/cve/CVE-2015-1300/
CWE-254
https://github.com/chromium/chromium/commit/9c391ac04f9ac478c8b0e43b359c2b43a6c892ab
9c391ac04f9ac478c8b0e43b359c2b43a6c892ab
Use pdf compositor service for printing when OOPIF is enabled When OOPIF is enabled (by site-per-process flag or top-document-isolation feature), use the pdf compositor service for converting PaintRecord to PDF on renderers. In the future, this will make compositing PDF from multiple renderers possible. TBR=jzfeng@chromium.org BUG=455764 Change-Id: I3c28f03f4358e4228239fe1a33384f85e7716e8f Reviewed-on: https://chromium-review.googlesource.com/699765 Commit-Queue: Wei Li <weili@chromium.org> Reviewed-by: Daniel Cheng <dcheng@chromium.org> Reviewed-by: Lei Zhang <thestig@chromium.org> Cr-Commit-Position: refs/heads/master@{#511616}
bool PrintViewManagerBase::RenderAllMissingPagesNow() { if (!print_job_.get() || !print_job_->is_job_pending()) return false; if (!web_contents() || !web_contents()->GetRenderViewHost() || !web_contents()->GetRenderViewHost()->IsRenderViewLive()) { return false; } if (print_job_->document() && print_job_->document()->IsComplete()) { printing_succeeded_ = true; return true; } if (!RunInnerMessageLoop()) { return false; } return true; }
bool PrintViewManagerBase::RenderAllMissingPagesNow() { if (!print_job_.get() || !print_job_->is_job_pending()) return false; if (!web_contents() || !web_contents()->GetRenderViewHost() || !web_contents()->GetRenderViewHost()->IsRenderViewLive()) { return false; } if (print_job_->document() && print_job_->document()->IsComplete()) { printing_succeeded_ = true; return true; } if (!RunInnerMessageLoop()) { return false; } return true; }
C
Chrome
0
CVE-2018-18354
https://www.cvedetails.com/cve/CVE-2018-18354/
CWE-20
https://github.com/chromium/chromium/commit/d19a75fc26fd0ab1ce79ef3d1c1c9b3cc1fbd098
d19a75fc26fd0ab1ce79ef3d1c1c9b3cc1fbd098
Validate external protocols before launching on Windows Bug: 889459 Change-Id: Id33ca6444bff1e6dd71b6000823cf6fec09746ef Reviewed-on: https://chromium-review.googlesource.com/c/1256208 Reviewed-by: Greg Thompson <grt@chromium.org> Commit-Queue: Mustafa Emre Acer <meacer@chromium.org> Cr-Commit-Position: refs/heads/master@{#597611}
base::string16 GetApplicationNameForProtocol(const GURL& url) { if (base::win::GetVersion() >= base::win::VERSION_WIN8) { base::string16 application_name = GetAppForProtocolUsingAssocQuery(url); if (!application_name.empty()) return application_name; } return GetAppForProtocolUsingRegistry(url); }
base::string16 GetApplicationNameForProtocol(const GURL& url) { base::string16 application_name; if (base::win::GetVersion() >= base::win::VERSION_WIN8) { application_name = GetAppForProtocolUsingAssocQuery(url); if (!application_name.empty()) return application_name; } return GetAppForProtocolUsingRegistry(url); }
C
Chrome
1
CVE-2015-8746
https://www.cvedetails.com/cve/CVE-2015-8746/
null
https://github.com/torvalds/linux/commit/18e3b739fdc826481c6a1335ce0c5b19b3d415da
18e3b739fdc826481c6a1335ce0c5b19b3d415da
NFS: Fix a NULL pointer dereference of migration recovery ops for v4.2 client ---Steps to Reproduce-- <nfs-server> # cat /etc/exports /nfs/referal *(rw,insecure,no_subtree_check,no_root_squash,crossmnt) /nfs/old *(ro,insecure,subtree_check,root_squash,crossmnt) <nfs-client> # mount -t nfs nfs-server:/nfs/ /mnt/ # ll /mnt/*/ <nfs-server> # cat /etc/exports /nfs/referal *(rw,insecure,no_subtree_check,no_root_squash,crossmnt,refer=/nfs/old/@nfs-server) /nfs/old *(ro,insecure,subtree_check,root_squash,crossmnt) # service nfs restart <nfs-client> # ll /mnt/*/ --->>>>> oops here [ 5123.102925] BUG: unable to handle kernel NULL pointer dereference at (null) [ 5123.103363] IP: [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4] [ 5123.103752] PGD 587b9067 PUD 3cbf5067 PMD 0 [ 5123.104131] Oops: 0000 [#1] [ 5123.104529] Modules linked in: nfsv4(OE) nfs(OE) fscache(E) nfsd(OE) xfs libcrc32c iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi coretemp crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel ppdev vmw_balloon parport_pc parport i2c_piix4 shpchp auth_rpcgss nfs_acl vmw_vmci lockd grace sunrpc vmwgfx drm_kms_helper ttm drm mptspi serio_raw scsi_transport_spi e1000 mptscsih mptbase ata_generic pata_acpi [last unloaded: nfsd] [ 5123.105887] CPU: 0 PID: 15853 Comm: ::1-manager Tainted: G OE 4.2.0-rc6+ #214 [ 5123.106358] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 05/20/2014 [ 5123.106860] task: ffff88007620f300 ti: ffff88005877c000 task.ti: ffff88005877c000 [ 5123.107363] RIP: 0010:[<ffffffffa03ed38b>] [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4] [ 5123.107909] RSP: 0018:ffff88005877fdb8 EFLAGS: 00010246 [ 5123.108435] RAX: ffff880053f3bc00 RBX: ffff88006ce6c908 RCX: ffff880053a0d240 [ 5123.108968] RDX: ffffea0000e6d940 RSI: ffff8800399a0000 RDI: ffff88006ce6c908 [ 5123.109503] RBP: ffff88005877fe28 R08: ffffffff81c708a0 R09: 0000000000000000 [ 5123.110045] R10: 00000000000001a2 R11: ffff88003ba7f5c8 R12: ffff880054c55800 [ 5123.110618] R13: 0000000000000000 R14: ffff880053a0d240 R15: ffff880053a0d240 [ 5123.111169] FS: 0000000000000000(0000) GS:ffffffff81c27000(0000) knlGS:0000000000000000 [ 5123.111726] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 5123.112286] CR2: 0000000000000000 CR3: 0000000054cac000 CR4: 00000000001406f0 [ 5123.112888] Stack: [ 5123.113458] ffffea0000e6d940 ffff8800399a0000 00000000000167d0 0000000000000000 [ 5123.114049] 0000000000000000 0000000000000000 0000000000000000 00000000a7ec82c6 [ 5123.114662] ffff88005877fe18 ffffea0000e6d940 ffff8800399a0000 ffff880054c55800 [ 5123.115264] Call Trace: [ 5123.115868] [<ffffffffa03fb44b>] nfs4_try_migration+0xbb/0x220 [nfsv4] [ 5123.116487] [<ffffffffa03fcb3b>] nfs4_run_state_manager+0x4ab/0x7b0 [nfsv4] [ 5123.117104] [<ffffffffa03fc690>] ? nfs4_do_reclaim+0x510/0x510 [nfsv4] [ 5123.117813] [<ffffffff810a4527>] kthread+0xd7/0xf0 [ 5123.118456] [<ffffffff810a4450>] ? kthread_worker_fn+0x160/0x160 [ 5123.119108] [<ffffffff816d9cdf>] ret_from_fork+0x3f/0x70 [ 5123.119723] [<ffffffff810a4450>] ? kthread_worker_fn+0x160/0x160 [ 5123.120329] Code: 4c 8b 6a 58 74 17 eb 52 48 8d 55 a8 89 c6 4c 89 e7 e8 4a b5 ff ff 8b 45 b0 85 c0 74 1c 4c 89 f9 48 8b 55 90 48 8b 75 98 48 89 df <41> ff 55 00 3d e8 d8 ff ff 41 89 c6 74 cf 48 8b 4d c8 65 48 33 [ 5123.121643] RIP [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4] [ 5123.122308] RSP <ffff88005877fdb8> [ 5123.122942] CR2: 0000000000000000 Fixes: ec011fe847 ("NFS: Introduce a vector of migration recovery ops") Cc: stable@vger.kernel.org # v3.13+ Signed-off-by: Kinglong Mee <kinglongmee@gmail.com> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
nfs4_label_init_security(struct inode *dir, struct dentry *dentry, struct iattr *sattr, struct nfs4_label *l) { return NULL; }
nfs4_label_init_security(struct inode *dir, struct dentry *dentry, struct iattr *sattr, struct nfs4_label *l) { return NULL; }
C
linux
0
CVE-2017-5546
https://www.cvedetails.com/cve/CVE-2017-5546/
null
https://github.com/torvalds/linux/commit/c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
mm/slab.c: fix SLAB freelist randomization duplicate entries This patch fixes a bug in the freelist randomization code. When a high random number is used, the freelist will contain duplicate entries. It will result in different allocations sharing the same chunk. It will result in odd behaviours and crashes. It should be uncommon but it depends on the machines. We saw it happening more often on some machines (every few hours of running tests). Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization") Link: http://lkml.kernel.org/r/20170103181908.143178-1-thgarnie@google.com Signed-off-by: John Sperbeck <jsperbeck@google.com> Signed-off-by: Thomas Garnier <thgarnie@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
size_t ksize(const void *objp) { size_t size; BUG_ON(!objp); if (unlikely(objp == ZERO_SIZE_PTR)) return 0; size = virt_to_cache(objp)->object_size; /* We assume that ksize callers could use the whole allocated area, * so we need to unpoison this area. */ kasan_unpoison_shadow(objp, size); return size; }
size_t ksize(const void *objp) { size_t size; BUG_ON(!objp); if (unlikely(objp == ZERO_SIZE_PTR)) return 0; size = virt_to_cache(objp)->object_size; /* We assume that ksize callers could use the whole allocated area, * so we need to unpoison this area. */ kasan_unpoison_shadow(objp, size); return size; }
C
linux
0
CVE-2014-1713
https://www.cvedetails.com/cve/CVE-2014-1713/
CWE-399
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
f85a87ec670ad0fce9d98d90c9a705b72a288154
document.location bindings fix BUG=352374 R=jochen@chromium.org Review URL: https://codereview.chromium.org/196343011 git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
static void locationReplaceableAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info) { TestObjectPython* proxyImp = V8TestObjectPython::toNative(info.Holder()); RefPtr<TestNode> imp = WTF::getPtr(proxyImp->locationReplaceable()); if (!imp) return; V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, cppValue, jsValue); imp->setHref(cppValue); }
static void locationReplaceableAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info) { TestObjectPython* proxyImp = V8TestObjectPython::toNative(info.Holder()); TestNode* imp = WTF::getPtr(proxyImp->locationReplaceable()); if (!imp) return; V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, cppValue, jsValue); imp->setHref(cppValue); }
C
Chrome
1
CVE-2015-8374
https://www.cvedetails.com/cve/CVE-2015-8374/
CWE-200
https://github.com/torvalds/linux/commit/0305cd5f7fca85dae392b9ba85b116896eb7c1c7
0305cd5f7fca85dae392b9ba85b116896eb7c1c7
Btrfs: fix truncation of compressed and inlined extents When truncating a file to a smaller size which consists of an inline extent that is compressed, we did not discard (or made unusable) the data between the new file size and the old file size, wasting metadata space and allowing for the truncated data to be leaked and the data corruption/loss mentioned below. We were also not correctly decrementing the number of bytes used by the inode, we were setting it to zero, giving a wrong report for callers of the stat(2) syscall. The fsck tool also reported an error about a mismatch between the nbytes of the file versus the real space used by the file. Now because we weren't discarding the truncated region of the file, it was possible for a caller of the clone ioctl to actually read the data that was truncated, allowing for a security breach without requiring root access to the system, using only standard filesystem operations. The scenario is the following: 1) User A creates a file which consists of an inline and compressed extent with a size of 2000 bytes - the file is not accessible to any other users (no read, write or execution permission for anyone else); 2) The user truncates the file to a size of 1000 bytes; 3) User A makes the file world readable; 4) User B creates a file consisting of an inline extent of 2000 bytes; 5) User B issues a clone operation from user A's file into its own file (using a length argument of 0, clone the whole range); 6) User B now gets to see the 1000 bytes that user A truncated from its file before it made its file world readbale. User B also lost the bytes in the range [1000, 2000[ bytes from its own file, but that might be ok if his/her intention was reading stale data from user A that was never supposed to be public. Note that this contrasts with the case where we truncate a file from 2000 bytes to 1000 bytes and then truncate it back from 1000 to 2000 bytes. In this case reading any byte from the range [1000, 2000[ will return a value of 0x00, instead of the original data. This problem exists since the clone ioctl was added and happens both with and without my recent data loss and file corruption fixes for the clone ioctl (patch "Btrfs: fix file corruption and data loss after cloning inline extents"). So fix this by truncating the compressed inline extents as we do for the non-compressed case, which involves decompressing, if the data isn't already in the page cache, compressing the truncated version of the extent, writing the compressed content into the inline extent and then truncate it. The following test case for fstests reproduces the problem. In order for the test to pass both this fix and my previous fix for the clone ioctl that forbids cloning a smaller inline extent into a larger one, which is titled "Btrfs: fix file corruption and data loss after cloning inline extents", are needed. Without that other fix the test fails in a different way that does not leak the truncated data, instead part of destination file gets replaced with zeroes (because the destination file has a larger inline extent than the source). seq=`basename $0` seqres=$RESULT_DIR/$seq echo "QA output created by $seq" tmp=/tmp/$$ status=1 # failure is the default! trap "_cleanup; exit \$status" 0 1 2 3 15 _cleanup() { rm -f $tmp.* } # get standard environment, filters and checks . ./common/rc . ./common/filter # real QA test starts here _need_to_be_root _supported_fs btrfs _supported_os Linux _require_scratch _require_cloner rm -f $seqres.full _scratch_mkfs >>$seqres.full 2>&1 _scratch_mount "-o compress" # Create our test files. File foo is going to be the source of a clone operation # and consists of a single inline extent with an uncompressed size of 512 bytes, # while file bar consists of a single inline extent with an uncompressed size of # 256 bytes. For our test's purpose, it's important that file bar has an inline # extent with a size smaller than foo's inline extent. $XFS_IO_PROG -f -c "pwrite -S 0xa1 0 128" \ -c "pwrite -S 0x2a 128 384" \ $SCRATCH_MNT/foo | _filter_xfs_io $XFS_IO_PROG -f -c "pwrite -S 0xbb 0 256" $SCRATCH_MNT/bar | _filter_xfs_io # Now durably persist all metadata and data. We do this to make sure that we get # on disk an inline extent with a size of 512 bytes for file foo. sync # Now truncate our file foo to a smaller size. Because it consists of a # compressed and inline extent, btrfs did not shrink the inline extent to the # new size (if the extent was not compressed, btrfs would shrink it to 128 # bytes), it only updates the inode's i_size to 128 bytes. $XFS_IO_PROG -c "truncate 128" $SCRATCH_MNT/foo # Now clone foo's inline extent into bar. # This clone operation should fail with errno EOPNOTSUPP because the source # file consists only of an inline extent and the file's size is smaller than # the inline extent of the destination (128 bytes < 256 bytes). However the # clone ioctl was not prepared to deal with a file that has a size smaller # than the size of its inline extent (something that happens only for compressed # inline extents), resulting in copying the full inline extent from the source # file into the destination file. # # Note that btrfs' clone operation for inline extents consists of removing the # inline extent from the destination inode and copy the inline extent from the # source inode into the destination inode, meaning that if the destination # inode's inline extent is larger (N bytes) than the source inode's inline # extent (M bytes), some bytes (N - M bytes) will be lost from the destination # file. Btrfs could copy the source inline extent's data into the destination's # inline extent so that we would not lose any data, but that's currently not # done due to the complexity that would be needed to deal with such cases # (specially when one or both extents are compressed), returning EOPNOTSUPP, as # it's normally not a very common case to clone very small files (only case # where we get inline extents) and copying inline extents does not save any # space (unlike for normal, non-inlined extents). $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/foo $SCRATCH_MNT/bar # Now because the above clone operation used to succeed, and due to foo's inline # extent not being shinked by the truncate operation, our file bar got the whole # inline extent copied from foo, making us lose the last 128 bytes from bar # which got replaced by the bytes in range [128, 256[ from foo before foo was # truncated - in other words, data loss from bar and being able to read old and # stale data from foo that should not be possible to read anymore through normal # filesystem operations. Contrast with the case where we truncate a file from a # size N to a smaller size M, truncate it back to size N and then read the range # [M, N[, we should always get the value 0x00 for all the bytes in that range. # We expected the clone operation to fail with errno EOPNOTSUPP and therefore # not modify our file's bar data/metadata. So its content should be 256 bytes # long with all bytes having the value 0xbb. # # Without the btrfs bug fix, the clone operation succeeded and resulted in # leaking truncated data from foo, the bytes that belonged to its range # [128, 256[, and losing data from bar in that same range. So reading the # file gave us the following content: # # 0000000 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 # * # 0000200 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a # * # 0000400 echo "File bar's content after the clone operation:" od -t x1 $SCRATCH_MNT/bar # Also because the foo's inline extent was not shrunk by the truncate # operation, btrfs' fsck, which is run by the fstests framework everytime a # test completes, failed reporting the following error: # # root 5 inode 257 errors 400, nbytes wrong status=0 exit Cc: stable@vger.kernel.org Signed-off-by: Filipe Manana <fdmanana@suse.com>
static int btrfs_set_inode_index_count(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key key, found_key; struct btrfs_path *path; struct extent_buffer *leaf; int ret; key.objectid = btrfs_ino(inode); key.type = BTRFS_DIR_INDEX_KEY; key.offset = (u64)-1; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; /* FIXME: we should be able to handle this */ if (ret == 0) goto out; ret = 0; /* * MAGIC NUMBER EXPLANATION: * since we search a directory based on f_pos we have to start at 2 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody * else has to start at 2 */ if (path->slots[0] == 0) { BTRFS_I(inode)->index_cnt = 2; goto out; } path->slots[0]--; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != btrfs_ino(inode) || found_key.type != BTRFS_DIR_INDEX_KEY) { BTRFS_I(inode)->index_cnt = 2; goto out; } BTRFS_I(inode)->index_cnt = found_key.offset + 1; out: btrfs_free_path(path); return ret; }
static int btrfs_set_inode_index_count(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key key, found_key; struct btrfs_path *path; struct extent_buffer *leaf; int ret; key.objectid = btrfs_ino(inode); key.type = BTRFS_DIR_INDEX_KEY; key.offset = (u64)-1; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; /* FIXME: we should be able to handle this */ if (ret == 0) goto out; ret = 0; /* * MAGIC NUMBER EXPLANATION: * since we search a directory based on f_pos we have to start at 2 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody * else has to start at 2 */ if (path->slots[0] == 0) { BTRFS_I(inode)->index_cnt = 2; goto out; } path->slots[0]--; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != btrfs_ino(inode) || found_key.type != BTRFS_DIR_INDEX_KEY) { BTRFS_I(inode)->index_cnt = 2; goto out; } BTRFS_I(inode)->index_cnt = found_key.offset + 1; out: btrfs_free_path(path); return ret; }
C
linux
0
CVE-2016-1621
https://www.cvedetails.com/cve/CVE-2016-1621/
CWE-119
https://android.googlesource.com/platform/external/libvpx/+/5a9753fca56f0eeb9f61e342b2fccffc364f9426
5a9753fca56f0eeb9f61e342b2fccffc364f9426
Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
const char *arg_next(struct arg *arg) { if (arg->argv[0]) arg->argv += arg->argv_step; return *arg->argv; }
const char *arg_next(struct arg *arg) { if (arg->argv[0]) arg->argv += arg->argv_step; return *arg->argv; }
C
Android
0
CVE-2016-7916
https://www.cvedetails.com/cve/CVE-2016-7916/
CWE-362
https://github.com/torvalds/linux/commit/8148a73c9901a8794a50f950083c00ccf97d43b3
8148a73c9901a8794a50f950083c00ccf97d43b3
proc: prevent accessing /proc/<PID>/environ until it's ready If /proc/<PID>/environ gets read before the envp[] array is fully set up in create_{aout,elf,elf_fdpic,flat}_tables(), we might end up trying to read more bytes than are actually written, as env_start will already be set but env_end will still be zero, making the range calculation underflow, allowing to read beyond the end of what has been written. Fix this as it is done for /proc/<PID>/cmdline by testing env_end for zero. It is, apparently, intentionally set last in create_*_tables(). This bug was found by the PaX size_overflow plugin that detected the arithmetic underflow of 'this_len = env_end - (env_start + src)' when env_end is still zero. The expected consequence is that userland trying to access /proc/<PID>/environ of a not yet fully set up process may get inconsistent data as we're in the middle of copying in the environment variables. Fixes: https://forums.grsecurity.net/viewtopic.php?f=3&t=4363 Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=116461 Signed-off-by: Mathias Krause <minipli@googlemail.com> Cc: Emese Revfy <re.emese@gmail.com> Cc: Pax Team <pageexec@freemail.hu> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Mateusz Guzik <mguzik@redhat.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Jarod Wilson <jarod@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int show_timer(struct seq_file *m, void *v) { struct k_itimer *timer; struct timers_private *tp = m->private; int notify; static const char * const nstr[] = { [SIGEV_SIGNAL] = "signal", [SIGEV_NONE] = "none", [SIGEV_THREAD] = "thread", }; timer = list_entry((struct list_head *)v, struct k_itimer, list); notify = timer->it_sigev_notify; seq_printf(m, "ID: %d\n", timer->it_id); seq_printf(m, "signal: %d/%p\n", timer->sigq->info.si_signo, timer->sigq->info.si_value.sival_ptr); seq_printf(m, "notify: %s/%s.%d\n", nstr[notify & ~SIGEV_THREAD_ID], (notify & SIGEV_THREAD_ID) ? "tid" : "pid", pid_nr_ns(timer->it_pid, tp->ns)); seq_printf(m, "ClockID: %d\n", timer->it_clock); return 0; }
static int show_timer(struct seq_file *m, void *v) { struct k_itimer *timer; struct timers_private *tp = m->private; int notify; static const char * const nstr[] = { [SIGEV_SIGNAL] = "signal", [SIGEV_NONE] = "none", [SIGEV_THREAD] = "thread", }; timer = list_entry((struct list_head *)v, struct k_itimer, list); notify = timer->it_sigev_notify; seq_printf(m, "ID: %d\n", timer->it_id); seq_printf(m, "signal: %d/%p\n", timer->sigq->info.si_signo, timer->sigq->info.si_value.sival_ptr); seq_printf(m, "notify: %s/%s.%d\n", nstr[notify & ~SIGEV_THREAD_ID], (notify & SIGEV_THREAD_ID) ? "tid" : "pid", pid_nr_ns(timer->it_pid, tp->ns)); seq_printf(m, "ClockID: %d\n", timer->it_clock); return 0; }
C
linux
0
CVE-2011-3110
https://www.cvedetails.com/cve/CVE-2011-3110/
CWE-119
https://github.com/chromium/chromium/commit/23a52bd208885df236cde3ad2cd162b094c0bbe4
23a52bd208885df236cde3ad2cd162b094c0bbe4
Do not require DevTools extension resources to be white-listed in manifest. Currently, resources used by DevTools extensions need to be white-listed as web_accessible_resources in manifest. This is quite inconvenitent and appears to be an overkill, given the fact that DevTools front-end is (a) trusted and (b) picky on the frames it loads. This change adds resources that belong to DevTools extensions and are being loaded into a DevTools front-end page to the list of exceptions from web_accessible_resources check. BUG=none TEST=DevToolsExtensionTest.* Review URL: https://chromiumcodereview.appspot.com/9663076 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@126378 0039d316-1c4b-4281-b951-d872f2087c98
void ChromeContentRendererClient::DidCreateScriptContext( WebFrame* frame, v8::Handle<v8::Context> context, int extension_group, int world_id) { extension_dispatcher_->DidCreateScriptContext( frame, context, extension_group, world_id); }
void ChromeContentRendererClient::DidCreateScriptContext( WebFrame* frame, v8::Handle<v8::Context> context, int extension_group, int world_id) { extension_dispatcher_->DidCreateScriptContext( frame, context, extension_group, world_id); }
C
Chrome
0
CVE-2018-11596
https://www.cvedetails.com/cve/CVE-2018-11596/
CWE-119
https://github.com/espruino/Espruino/commit/ce1924193862d58cb43d3d4d9dada710a8361b89
ce1924193862d58cb43d3d4d9dada710a8361b89
fix jsvGetString regression
bool jsvIsFlatString(const JsVar *v) { return v && (v->flags&JSV_VARTYPEMASK)==JSV_FLAT_STRING; }
bool jsvIsFlatString(const JsVar *v) { return v && (v->flags&JSV_VARTYPEMASK)==JSV_FLAT_STRING; }
C
Espruino
0
CVE-2017-14604
https://www.cvedetails.com/cve/CVE-2017-14604/
CWE-20
https://github.com/GNOME/nautilus/commit/1630f53481f445ada0a455e9979236d31a8d3bb0
1630f53481f445ada0a455e9979236d31a8d3bb0
mime-actions: use file metadata for trusting desktop files Currently we only trust desktop files that have the executable bit set, and don't replace the displayed icon or the displayed name until it's trusted, which prevents for running random programs by a malicious desktop file. However, the executable permission is preserved if the desktop file comes from a compressed file. To prevent this, add a metadata::trusted metadata to the file once the user acknowledges the file as trusted. This adds metadata to the file, which cannot be added unless it has access to the computer. Also remove the SHEBANG "trusted" content we were putting inside the desktop file, since that doesn't add more security since it can come with the file itself. https://bugzilla.gnome.org/show_bug.cgi?id=777991
nautilus_mime_get_default_application_for_file (NautilusFile *file) { GAppInfo *app; char *mime_type; char *uri_scheme; if (!nautilus_mime_actions_check_if_required_attributes_ready (file)) { return NULL; } mime_type = nautilus_file_get_mime_type (file); app = g_app_info_get_default_for_type (mime_type, !nautilus_file_is_local_or_fuse (file)); g_free (mime_type); if (app == NULL) { uri_scheme = nautilus_file_get_uri_scheme (file); if (uri_scheme != NULL) { app = g_app_info_get_default_for_uri_scheme (uri_scheme); g_free (uri_scheme); } } return app; }
nautilus_mime_get_default_application_for_file (NautilusFile *file) { GAppInfo *app; char *mime_type; char *uri_scheme; if (!nautilus_mime_actions_check_if_required_attributes_ready (file)) { return NULL; } mime_type = nautilus_file_get_mime_type (file); app = g_app_info_get_default_for_type (mime_type, !nautilus_file_is_local_or_fuse (file)); g_free (mime_type); if (app == NULL) { uri_scheme = nautilus_file_get_uri_scheme (file); if (uri_scheme != NULL) { app = g_app_info_get_default_for_uri_scheme (uri_scheme); g_free (uri_scheme); } } return app; }
C
nautilus
0
CVE-2015-3418
https://www.cvedetails.com/cve/CVE-2015-3418/
CWE-369
https://cgit.freedesktop.org/xorg/xserver/commit/?id=dc777c346d5d452a53b13b917c45f6a1bad2f20b
dc777c346d5d452a53b13b917c45f6a1bad2f20b
null
ServerOrder(void) { int whichbyte = 1; if (*((char *) &whichbyte)) return LSBFirst; return MSBFirst; }
ServerOrder(void) { int whichbyte = 1; if (*((char *) &whichbyte)) return LSBFirst; return MSBFirst; }
C
xserver
0
CVE-2013-1944
https://www.cvedetails.com/cve/CVE-2013-1944/
CWE-200
https://github.com/bagder/curl/commit/2eb8dcf26cb37f09cffe26909a646e702dbcab66
2eb8dcf26cb37f09cffe26909a646e702dbcab66
cookie: fix tailmatching to prevent cross-domain leakage Cookies set for 'example.com' could accidentaly also be sent by libcurl to the 'bexample.com' (ie with a prefix to the first domain name). This is a security vulnerabilty, CVE-2013-1944. Bug: http://curl.haxx.se/docs/adv_20130412.html
static int cookie_output(struct CookieInfo *c, const char *dumphere) { struct Cookie *co; FILE *out; bool use_stdout=FALSE; if((NULL == c) || (0 == c->numcookies)) /* If there are no known cookies, we don't write or even create any destination file */ return 0; if(strequal("-", dumphere)) { /* use stdout */ out = stdout; use_stdout=TRUE; } else { out = fopen(dumphere, "w"); if(!out) return 1; /* failure */ } if(c) { char *format_ptr; fputs("# Netscape HTTP Cookie File\n" "# http://curl.haxx.se/docs/http-cookies.html\n" "# This file was generated by libcurl! Edit at your own risk.\n\n", out); co = c->cookies; while(co) { format_ptr = get_netscape_format(co); if(format_ptr == NULL) { fprintf(out, "#\n# Fatal libcurl error\n"); if(!use_stdout) fclose(out); return 1; } fprintf(out, "%s\n", format_ptr); free(format_ptr); co=co->next; } } if(!use_stdout) fclose(out); return 0; }
static int cookie_output(struct CookieInfo *c, const char *dumphere) { struct Cookie *co; FILE *out; bool use_stdout=FALSE; if((NULL == c) || (0 == c->numcookies)) /* If there are no known cookies, we don't write or even create any destination file */ return 0; if(strequal("-", dumphere)) { /* use stdout */ out = stdout; use_stdout=TRUE; } else { out = fopen(dumphere, "w"); if(!out) return 1; /* failure */ } if(c) { char *format_ptr; fputs("# Netscape HTTP Cookie File\n" "# http://curl.haxx.se/docs/http-cookies.html\n" "# This file was generated by libcurl! Edit at your own risk.\n\n", out); co = c->cookies; while(co) { format_ptr = get_netscape_format(co); if(format_ptr == NULL) { fprintf(out, "#\n# Fatal libcurl error\n"); if(!use_stdout) fclose(out); return 1; } fprintf(out, "%s\n", format_ptr); free(format_ptr); co=co->next; } } if(!use_stdout) fclose(out); return 0; }
C
curl
0
CVE-2012-2875
https://www.cvedetails.com/cve/CVE-2012-2875/
null
https://github.com/chromium/chromium/commit/40ed2b7ae4f6f5adb1b0ce9acf9c4dece339c2a6
40ed2b7ae4f6f5adb1b0ce9acf9c4dece339c2a6
gdata: Define the resource ID for the root directory Per the spec, the resource ID for the root directory is defined as "folder:root". Add the resource ID to the root directory in our file system representation so we can look up the root directory by the resource ID. BUG=127697 TEST=add unit tests Review URL: https://chromiumcodereview.appspot.com/10332253 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137928 0039d316-1c4b-4281-b951-d872f2087c98
bool ReadDirectoryAndCompare(const FilePath& file_path, GDataDirectory* directory) { file_system_->ReadDirectoryByPathAsync( file_path, base::Bind(&CallbackHelper::ReadDirectoryCallback, callback_helper_.get())); message_loop_.RunAllPending(); if (directory == NULL) { return (callback_helper_->last_error_ == base::PLATFORM_FILE_ERROR_NOT_FOUND && callback_helper_->directory_proto_ == NULL); } if (callback_helper_->last_error_ != base::PLATFORM_FILE_OK) return false; scoped_ptr<GDataDirectoryProto> directory_proto = callback_helper_->directory_proto_.Pass(); return (directory->resource_id() == directory_proto->gdata_entry().resource_id()); }
bool ReadDirectoryAndCompare(const FilePath& file_path, GDataDirectory* directory) { file_system_->ReadDirectoryByPathAsync( file_path, base::Bind(&CallbackHelper::ReadDirectoryCallback, callback_helper_.get())); message_loop_.RunAllPending(); if (directory == NULL) { return (callback_helper_->last_error_ == base::PLATFORM_FILE_ERROR_NOT_FOUND && callback_helper_->directory_proto_ == NULL); } if (callback_helper_->last_error_ != base::PLATFORM_FILE_OK) return false; scoped_ptr<GDataDirectoryProto> directory_proto = callback_helper_->directory_proto_.Pass(); return (directory->resource_id() == directory_proto->gdata_entry().resource_id()); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/ee8d6fd30b022ac2c87b7a190c954e7bb3c9b21e
ee8d6fd30b022ac2c87b7a190c954e7bb3c9b21e
Clean up calls like "gfx::Rect(0, 0, size().width(), size().height()". The caller can use the much shorter "gfx::Rect(size())", since gfx::Rect has a constructor that just takes a Size. BUG=none TEST=none Review URL: http://codereview.chromium.org/2204001 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@48283 0039d316-1c4b-4281-b951-d872f2087c98
bool GetSelectedTabFunction::RunImpl() { Browser* browser; int window_id = -1; if (HasOptionalArgument(0)) { EXTENSION_FUNCTION_VALIDATE(args_->GetInteger(0, &window_id)); browser = GetBrowserInProfileWithId(profile(), window_id, include_incognito(), &error_); } else { browser = GetCurrentBrowser(); if (!browser) error_ = keys::kNoCurrentWindowError; } if (!browser) return false; TabStripModel* tab_strip = browser->tabstrip_model(); TabContents* contents = tab_strip->GetSelectedTabContents(); if (!contents) { error_ = keys::kNoSelectedTabError; return false; } result_.reset(ExtensionTabUtil::CreateTabValue(contents, tab_strip, tab_strip->selected_index())); return true; }
bool GetSelectedTabFunction::RunImpl() { Browser* browser; int window_id = -1; if (HasOptionalArgument(0)) { EXTENSION_FUNCTION_VALIDATE(args_->GetInteger(0, &window_id)); browser = GetBrowserInProfileWithId(profile(), window_id, include_incognito(), &error_); } else { browser = GetCurrentBrowser(); if (!browser) error_ = keys::kNoCurrentWindowError; } if (!browser) return false; TabStripModel* tab_strip = browser->tabstrip_model(); TabContents* contents = tab_strip->GetSelectedTabContents(); if (!contents) { error_ = keys::kNoSelectedTabError; return false; } result_.reset(ExtensionTabUtil::CreateTabValue(contents, tab_strip, tab_strip->selected_index())); return true; }
C
Chrome
0
CVE-2013-6626
https://www.cvedetails.com/cve/CVE-2013-6626/
null
https://github.com/chromium/chromium/commit/90fb08ed0146c9beacfd4dde98a20fc45419fff3
90fb08ed0146c9beacfd4dde98a20fc45419fff3
Cancel JavaScript dialogs when an interstitial appears. BUG=295695 TEST=See bug for repro steps. Review URL: https://chromiumcodereview.appspot.com/24360011 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@225026 0039d316-1c4b-4281-b951-d872f2087c98
void WebContentsImpl::NotifySwapped(RenderViewHost* old_render_view_host) { notify_disconnection_ = true; FOR_EACH_OBSERVER(WebContentsObserver, observers_, RenderViewHostSwapped(old_render_view_host)); NotificationService::current()->Notify( NOTIFICATION_WEB_CONTENTS_SWAPPED, Source<WebContents>(this), Details<RenderViewHost>(old_render_view_host)); RemoveBrowserPluginEmbedder(); }
void WebContentsImpl::NotifySwapped(RenderViewHost* old_render_view_host) { notify_disconnection_ = true; FOR_EACH_OBSERVER(WebContentsObserver, observers_, RenderViewHostSwapped(old_render_view_host)); NotificationService::current()->Notify( NOTIFICATION_WEB_CONTENTS_SWAPPED, Source<WebContents>(this), Details<RenderViewHost>(old_render_view_host)); RemoveBrowserPluginEmbedder(); }
C
Chrome
0
CVE-2018-6111
https://www.cvedetails.com/cve/CVE-2018-6111/
CWE-20
https://github.com/chromium/chromium/commit/3c8e4852477d5b1e2da877808c998dc57db9460f
3c8e4852477d5b1e2da877808c998dc57db9460f
DevTools: speculative fix for crash in NetworkHandler::Disable This keeps BrowserContext* and StoragePartition* instead of RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost upon closure of DevTools front-end. Bug: 801117, 783067, 780694 Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b Reviewed-on: https://chromium-review.googlesource.com/876657 Commit-Queue: Andrey Kosyakov <caseq@chromium.org> Reviewed-by: Dmitry Gozman <dgozman@chromium.org> Cr-Commit-Position: refs/heads/master@{#531157}
EmulationHandler::EmulationHandler() : DevToolsDomainHandler(Emulation::Metainfo::domainName), touch_emulation_enabled_(false), device_emulation_enabled_(false), host_(nullptr) { }
EmulationHandler::EmulationHandler() : DevToolsDomainHandler(Emulation::Metainfo::domainName), touch_emulation_enabled_(false), device_emulation_enabled_(false), host_(nullptr) { }
C
Chrome
0
CVE-2017-12168
https://www.cvedetails.com/cve/CVE-2017-12168/
CWE-617
https://github.com/torvalds/linux/commit/9e3f7a29694049edd728e2400ab57ad7553e5aa9
9e3f7a29694049edd728e2400ab57ad7553e5aa9
arm64: KVM: pmu: Fix AArch32 cycle counter access We're missing the handling code for the cycle counter accessed from a 32bit guest, leading to unexpected results. Cc: stable@vger.kernel.org # 4.6+ Signed-off-by: Wei Huang <wei@redhat.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
static void reset_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; }
static void reset_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; }
C
linux
0
CVE-2013-0840
https://www.cvedetails.com/cve/CVE-2013-0840/
null
https://github.com/chromium/chromium/commit/7f48b71cb22bb2fc9fcec2013e9eaff55381a43d
7f48b71cb22bb2fc9fcec2013e9eaff55381a43d
Filter more incoming URLs in the CreateWindow path. BUG=170532 Review URL: https://chromiumcodereview.appspot.com/12036002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@178728 0039d316-1c4b-4281-b951-d872f2087c98
void RenderViewHostImpl::SetSwappedOut(bool is_swapped_out) { is_swapped_out_ = is_swapped_out; is_waiting_for_beforeunload_ack_ = false; is_waiting_for_unload_ack_ = false; has_timed_out_on_unload_ = false; }
void RenderViewHostImpl::SetSwappedOut(bool is_swapped_out) { is_swapped_out_ = is_swapped_out; is_waiting_for_beforeunload_ack_ = false; is_waiting_for_unload_ack_ = false; has_timed_out_on_unload_ = false; }
C
Chrome
0
CVE-2016-0842
https://www.cvedetails.com/cve/CVE-2016-0842/
CWE-119
https://android.googlesource.com/platform/external/libavc/+/943323f1d9d3dd5c2634deb26cbe72343ca6b3db
943323f1d9d3dd5c2634deb26cbe72343ca6b3db
Return error when there are more mmco params than allocated size Bug: 25818142 Change-Id: I5c1b23985eeca5192b42703c627ca3d060e4e13d
WORD32 ih264d_update_default_index_list(dpb_manager_t *ps_dpb_mgr) { WORD32 i; struct dpb_info_t *ps_next_dpb = ps_dpb_mgr->ps_dpb_st_head; for(i = 0; i < ps_dpb_mgr->u1_num_st_ref_bufs; i++) { ps_dpb_mgr->ps_def_dpb[i] = ps_next_dpb->ps_pic_buf; ps_next_dpb = ps_next_dpb->ps_prev_short; } ps_next_dpb = ps_dpb_mgr->ps_dpb_ht_head; for(;i< ps_dpb_mgr->u1_num_st_ref_bufs + ps_dpb_mgr->u1_num_lt_ref_bufs; i++) { ps_dpb_mgr->ps_def_dpb[i] = ps_next_dpb->ps_pic_buf; ps_next_dpb = ps_next_dpb->ps_prev_long; } return 0; }
WORD32 ih264d_update_default_index_list(dpb_manager_t *ps_dpb_mgr) { WORD32 i; struct dpb_info_t *ps_next_dpb = ps_dpb_mgr->ps_dpb_st_head; for(i = 0; i < ps_dpb_mgr->u1_num_st_ref_bufs; i++) { ps_dpb_mgr->ps_def_dpb[i] = ps_next_dpb->ps_pic_buf; ps_next_dpb = ps_next_dpb->ps_prev_short; } ps_next_dpb = ps_dpb_mgr->ps_dpb_ht_head; for(;i< ps_dpb_mgr->u1_num_st_ref_bufs + ps_dpb_mgr->u1_num_lt_ref_bufs; i++) { ps_dpb_mgr->ps_def_dpb[i] = ps_next_dpb->ps_pic_buf; ps_next_dpb = ps_next_dpb->ps_prev_long; } return 0; }
C
Android
0
CVE-2016-10133
https://www.cvedetails.com/cve/CVE-2016-10133/
CWE-119
http://git.ghostscript.com/?p=mujs.git;a=commit;h=77ab465f1c394bb77f00966cd950650f3f53cb24
77ab465f1c394bb77f00966cd950650f3f53cb24
null
void js_call(js_State *J, int n) { js_Object *obj; int savebot; if (!js_iscallable(J, -n-2)) js_typeerror(J, "called object is not a function"); obj = js_toobject(J, -n-2); savebot = BOT; BOT = TOP - n - 1; if (obj->type == JS_CFUNCTION) { jsR_pushtrace(J, obj->u.f.function->name, obj->u.f.function->filename, obj->u.f.function->line); if (obj->u.f.function->lightweight) jsR_calllwfunction(J, n, obj->u.f.function, obj->u.f.scope); else jsR_callfunction(J, n, obj->u.f.function, obj->u.f.scope); --J->tracetop; } else if (obj->type == JS_CSCRIPT) { jsR_pushtrace(J, obj->u.f.function->name, obj->u.f.function->filename, obj->u.f.function->line); jsR_callscript(J, n, obj->u.f.function, obj->u.f.scope); --J->tracetop; } else if (obj->type == JS_CCFUNCTION) { jsR_pushtrace(J, obj->u.c.name, "native", 0); jsR_callcfunction(J, n, obj->u.c.length, obj->u.c.function); --J->tracetop; } BOT = savebot; }
void js_call(js_State *J, int n) { js_Object *obj; int savebot; if (!js_iscallable(J, -n-2)) js_typeerror(J, "called object is not a function"); obj = js_toobject(J, -n-2); savebot = BOT; BOT = TOP - n - 1; if (obj->type == JS_CFUNCTION) { jsR_pushtrace(J, obj->u.f.function->name, obj->u.f.function->filename, obj->u.f.function->line); if (obj->u.f.function->lightweight) jsR_calllwfunction(J, n, obj->u.f.function, obj->u.f.scope); else jsR_callfunction(J, n, obj->u.f.function, obj->u.f.scope); --J->tracetop; } else if (obj->type == JS_CSCRIPT) { jsR_pushtrace(J, obj->u.f.function->name, obj->u.f.function->filename, obj->u.f.function->line); jsR_callscript(J, n, obj->u.f.function, obj->u.f.scope); --J->tracetop; } else if (obj->type == JS_CCFUNCTION) { jsR_pushtrace(J, obj->u.c.name, "native", 0); jsR_callcfunction(J, n, obj->u.c.length, obj->u.c.function); --J->tracetop; } BOT = savebot; }
C
ghostscript
0
CVE-2017-16359
https://www.cvedetails.com/cve/CVE-2017-16359/
CWE-476
https://github.com/radare/radare2/commit/62e39f34b2705131a2d08aff0c2e542c6a52cf0e
62e39f34b2705131a2d08aff0c2e542c6a52cf0e
Fix #8764 - huge vd_aux caused pointer wraparound
int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; }
int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; }
C
radare2
0
CVE-2011-3053
https://www.cvedetails.com/cve/CVE-2011-3053/
CWE-399
https://github.com/chromium/chromium/commit/c442b3eda2f1fdd4d1d4864c34c43cbaf223acae
c442b3eda2f1fdd4d1d4864c34c43cbaf223acae
chromeos: Move audio, power, and UI files into subdirs. This moves more files from chrome/browser/chromeos/ into subdirectories. BUG=chromium-os:22896 TEST=did chrome os builds both with and without aura TBR=sky Review URL: http://codereview.chromium.org/9125006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@116746 0039d316-1c4b-4281-b951-d872f2087c98
BrowserInit::LaunchWithProfile::LaunchWithProfile( const FilePath& cur_dir, const CommandLine& command_line, IsFirstRun is_first_run) : cur_dir_(cur_dir), command_line_(command_line), profile_(NULL), browser_init_(NULL), is_first_run_(is_first_run == IS_FIRST_RUN) { }
BrowserInit::LaunchWithProfile::LaunchWithProfile( const FilePath& cur_dir, const CommandLine& command_line, IsFirstRun is_first_run) : cur_dir_(cur_dir), command_line_(command_line), profile_(NULL), browser_init_(NULL), is_first_run_(is_first_run == IS_FIRST_RUN) { }
C
Chrome
0
CVE-2011-2861
https://www.cvedetails.com/cve/CVE-2011-2861/
CWE-20
https://github.com/chromium/chromium/commit/8262245d384be025f13e2a5b3a03b7e5c98374ce
8262245d384be025f13e2a5b3a03b7e5c98374ce
DevTools: move DevToolsAgent/Client into content. BUG=84078 TEST= Review URL: http://codereview.chromium.org/7461019 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@93596 0039d316-1c4b-4281-b951-d872f2087c98
int ChromeContentBrowserClient::GetCrashSignalFD( const std::string& process_type) { if (process_type == switches::kRendererProcess) return RendererCrashHandlerHostLinux::GetInstance()->GetDeathSignalSocket(); if (process_type == switches::kExtensionProcess) { ExtensionCrashHandlerHostLinux* crash_handler = ExtensionCrashHandlerHostLinux::GetInstance(); return crash_handler->GetDeathSignalSocket(); } if (process_type == switches::kPluginProcess) return PluginCrashHandlerHostLinux::GetInstance()->GetDeathSignalSocket(); if (process_type == switches::kPpapiPluginProcess) return PpapiCrashHandlerHostLinux::GetInstance()->GetDeathSignalSocket(); if (process_type == switches::kGpuProcess) return GpuCrashHandlerHostLinux::GetInstance()->GetDeathSignalSocket(); return -1; }
int ChromeContentBrowserClient::GetCrashSignalFD( const std::string& process_type) { if (process_type == switches::kRendererProcess) return RendererCrashHandlerHostLinux::GetInstance()->GetDeathSignalSocket(); if (process_type == switches::kExtensionProcess) { ExtensionCrashHandlerHostLinux* crash_handler = ExtensionCrashHandlerHostLinux::GetInstance(); return crash_handler->GetDeathSignalSocket(); } if (process_type == switches::kPluginProcess) return PluginCrashHandlerHostLinux::GetInstance()->GetDeathSignalSocket(); if (process_type == switches::kPpapiPluginProcess) return PpapiCrashHandlerHostLinux::GetInstance()->GetDeathSignalSocket(); if (process_type == switches::kGpuProcess) return GpuCrashHandlerHostLinux::GetInstance()->GetDeathSignalSocket(); return -1; }
C
Chrome
0
CVE-2016-4809
https://www.cvedetails.com/cve/CVE-2016-4809/
CWE-20
https://github.com/libarchive/libarchive/commit/fd7e0c02
fd7e0c02
Reject cpio symlinks that exceed 1MB
record_hardlink(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry) { struct links_entry *le; dev_t dev; int64_t ino; if (archive_entry_nlink(entry) <= 1) return (ARCHIVE_OK); dev = archive_entry_dev(entry); ino = archive_entry_ino64(entry); /* * First look in the list of multiply-linked files. If we've * already dumped it, convert this entry to a hard link entry. */ for (le = cpio->links_head; le; le = le->next) { if (le->dev == dev && le->ino == ino) { archive_entry_copy_hardlink(entry, le->name); if (--le->links <= 0) { if (le->previous != NULL) le->previous->next = le->next; if (le->next != NULL) le->next->previous = le->previous; if (cpio->links_head == le) cpio->links_head = le->next; free(le->name); free(le); } return (ARCHIVE_OK); } } le = (struct links_entry *)malloc(sizeof(struct links_entry)); if (le == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory adding file to list"); return (ARCHIVE_FATAL); } if (cpio->links_head != NULL) cpio->links_head->previous = le; le->next = cpio->links_head; le->previous = NULL; cpio->links_head = le; le->dev = dev; le->ino = ino; le->links = archive_entry_nlink(entry) - 1; le->name = strdup(archive_entry_pathname(entry)); if (le->name == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory adding file to list"); return (ARCHIVE_FATAL); } return (ARCHIVE_OK); }
record_hardlink(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry) { struct links_entry *le; dev_t dev; int64_t ino; if (archive_entry_nlink(entry) <= 1) return (ARCHIVE_OK); dev = archive_entry_dev(entry); ino = archive_entry_ino64(entry); /* * First look in the list of multiply-linked files. If we've * already dumped it, convert this entry to a hard link entry. */ for (le = cpio->links_head; le; le = le->next) { if (le->dev == dev && le->ino == ino) { archive_entry_copy_hardlink(entry, le->name); if (--le->links <= 0) { if (le->previous != NULL) le->previous->next = le->next; if (le->next != NULL) le->next->previous = le->previous; if (cpio->links_head == le) cpio->links_head = le->next; free(le->name); free(le); } return (ARCHIVE_OK); } } le = (struct links_entry *)malloc(sizeof(struct links_entry)); if (le == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory adding file to list"); return (ARCHIVE_FATAL); } if (cpio->links_head != NULL) cpio->links_head->previous = le; le->next = cpio->links_head; le->previous = NULL; cpio->links_head = le; le->dev = dev; le->ino = ino; le->links = archive_entry_nlink(entry) - 1; le->name = strdup(archive_entry_pathname(entry)); if (le->name == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory adding file to list"); return (ARCHIVE_FATAL); } return (ARCHIVE_OK); }
C
libarchive
0
CVE-2016-6309
https://www.cvedetails.com/cve/CVE-2016-6309/
CWE-416
https://git.openssl.org/?p=openssl.git;a=commit;h=acacbfa7565c78d2273c0b2a2e5e803f44afefeb
acacbfa7565c78d2273c0b2a2e5e803f44afefeb
null
int SSL_in_init(SSL *s) { return s->statem.in_init; }
int SSL_in_init(SSL *s) { return s->statem.in_init; }
C
openssl
0
CVE-2011-3055
https://www.cvedetails.com/cve/CVE-2011-3055/
null
https://github.com/chromium/chromium/commit/e9372a1bfd3588a80fcf49aa07321f0971dd6091
e9372a1bfd3588a80fcf49aa07321f0971dd6091
[V8] Pass Isolate to throwNotEnoughArgumentsError() https://bugs.webkit.org/show_bug.cgi?id=86983 Reviewed by Adam Barth. The objective is to pass Isolate around in V8 bindings. This patch passes Isolate to throwNotEnoughArgumentsError(). No tests. No change in behavior. * bindings/scripts/CodeGeneratorV8.pm: (GenerateArgumentsCountCheck): (GenerateEventConstructorCallback): * bindings/scripts/test/V8/V8Float64Array.cpp: (WebCore::Float64ArrayV8Internal::fooCallback): * bindings/scripts/test/V8/V8TestActiveDOMObject.cpp: (WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback): (WebCore::TestActiveDOMObjectV8Internal::postMessageCallback): * bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp: (WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback): * bindings/scripts/test/V8/V8TestEventConstructor.cpp: (WebCore::V8TestEventConstructor::constructorCallback): * bindings/scripts/test/V8/V8TestEventTarget.cpp: (WebCore::TestEventTargetV8Internal::itemCallback): (WebCore::TestEventTargetV8Internal::dispatchEventCallback): * bindings/scripts/test/V8/V8TestInterface.cpp: (WebCore::TestInterfaceV8Internal::supplementalMethod2Callback): (WebCore::V8TestInterface::constructorCallback): * bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp: (WebCore::TestMediaQueryListListenerV8Internal::methodCallback): * bindings/scripts/test/V8/V8TestNamedConstructor.cpp: (WebCore::V8TestNamedConstructorConstructorCallback): * bindings/scripts/test/V8/V8TestObj.cpp: (WebCore::TestObjV8Internal::voidMethodWithArgsCallback): (WebCore::TestObjV8Internal::intMethodWithArgsCallback): (WebCore::TestObjV8Internal::objMethodWithArgsCallback): (WebCore::TestObjV8Internal::methodWithSequenceArgCallback): (WebCore::TestObjV8Internal::methodReturningSequenceCallback): (WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback): (WebCore::TestObjV8Internal::serializedValueCallback): (WebCore::TestObjV8Internal::idbKeyCallback): (WebCore::TestObjV8Internal::optionsObjectCallback): (WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback): (WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback): (WebCore::TestObjV8Internal::methodWithCallbackArgCallback): (WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback): (WebCore::TestObjV8Internal::overloadedMethod1Callback): (WebCore::TestObjV8Internal::overloadedMethod2Callback): (WebCore::TestObjV8Internal::overloadedMethod3Callback): (WebCore::TestObjV8Internal::overloadedMethod4Callback): (WebCore::TestObjV8Internal::overloadedMethod5Callback): (WebCore::TestObjV8Internal::overloadedMethod6Callback): (WebCore::TestObjV8Internal::overloadedMethod7Callback): (WebCore::TestObjV8Internal::overloadedMethod11Callback): (WebCore::TestObjV8Internal::overloadedMethod12Callback): (WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback): (WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback): (WebCore::TestObjV8Internal::convert1Callback): (WebCore::TestObjV8Internal::convert2Callback): (WebCore::TestObjV8Internal::convert3Callback): (WebCore::TestObjV8Internal::convert4Callback): (WebCore::TestObjV8Internal::convert5Callback): (WebCore::TestObjV8Internal::strictFunctionCallback): (WebCore::V8TestObj::constructorCallback): * bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp: (WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback): (WebCore::V8TestSerializedScriptValueInterface::constructorCallback): * bindings/v8/ScriptController.cpp: (WebCore::setValueAndClosePopupCallback): * bindings/v8/V8Proxy.cpp: (WebCore::V8Proxy::throwNotEnoughArgumentsError): * bindings/v8/V8Proxy.h: (V8Proxy): * bindings/v8/custom/V8AudioContextCustom.cpp: (WebCore::V8AudioContext::constructorCallback): * bindings/v8/custom/V8DataViewCustom.cpp: (WebCore::V8DataView::getInt8Callback): (WebCore::V8DataView::getUint8Callback): (WebCore::V8DataView::setInt8Callback): (WebCore::V8DataView::setUint8Callback): * bindings/v8/custom/V8DirectoryEntryCustom.cpp: (WebCore::V8DirectoryEntry::getDirectoryCallback): (WebCore::V8DirectoryEntry::getFileCallback): * bindings/v8/custom/V8IntentConstructor.cpp: (WebCore::V8Intent::constructorCallback): * bindings/v8/custom/V8SVGLengthCustom.cpp: (WebCore::V8SVGLength::convertToSpecifiedUnitsCallback): * bindings/v8/custom/V8WebGLRenderingContextCustom.cpp: (WebCore::getObjectParameter): (WebCore::V8WebGLRenderingContext::getAttachedShadersCallback): (WebCore::V8WebGLRenderingContext::getExtensionCallback): (WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback): (WebCore::V8WebGLRenderingContext::getParameterCallback): (WebCore::V8WebGLRenderingContext::getProgramParameterCallback): (WebCore::V8WebGLRenderingContext::getShaderParameterCallback): (WebCore::V8WebGLRenderingContext::getUniformCallback): (WebCore::vertexAttribAndUniformHelperf): (WebCore::uniformHelperi): (WebCore::uniformMatrixHelper): * bindings/v8/custom/V8WebKitMutationObserverCustom.cpp: (WebCore::V8WebKitMutationObserver::constructorCallback): (WebCore::V8WebKitMutationObserver::observeCallback): * bindings/v8/custom/V8WebSocketCustom.cpp: (WebCore::V8WebSocket::constructorCallback): (WebCore::V8WebSocket::sendCallback): * bindings/v8/custom/V8XMLHttpRequestCustom.cpp: (WebCore::V8XMLHttpRequest::openCallback): git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538
static v8::Handle<v8::Value> XMLObjAttrAttrGetter(v8::Local<v8::String> name, const v8::AccessorInfo& info) { INC_STATS("DOM.TestObj.XMLObjAttr._get"); TestObj* imp = V8TestObj::toNative(info.Holder()); return toV8(imp->xmlObjAttr(), info.GetIsolate()); }
static v8::Handle<v8::Value> XMLObjAttrAttrGetter(v8::Local<v8::String> name, const v8::AccessorInfo& info) { INC_STATS("DOM.TestObj.XMLObjAttr._get"); TestObj* imp = V8TestObj::toNative(info.Holder()); return toV8(imp->xmlObjAttr(), info.GetIsolate()); }
C
Chrome
0
CVE-2016-3138
https://www.cvedetails.com/cve/CVE-2016-3138/
null
https://github.com/torvalds/linux/commit/8835ba4a39cf53f705417b3b3a94eb067673f2c9
8835ba4a39cf53f705417b3b3a94eb067673f2c9
USB: cdc-acm: more sanity checking An attack has become available which pretends to be a quirky device circumventing normal sanity checks and crashes the kernel by an insufficient number of interfaces. This patch adds a check to the code path for quirky devices. Signed-off-by: Oliver Neukum <ONeukum@suse.com> CC: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) { struct acm *acm = container_of(port, struct acm, port); int retval = -ENODEV; int i; dev_dbg(&acm->control->dev, "%s\n", __func__); mutex_lock(&acm->mutex); if (acm->disconnected) goto disconnected; retval = usb_autopm_get_interface(acm->control); if (retval) goto error_get_interface; /* * FIXME: Why do we need this? Allocating 64K of physically contiguous * memory is really nasty... */ set_bit(TTY_NO_WRITE_SPLIT, &tty->flags); acm->control->needs_remote_wakeup = 1; acm->ctrlurb->dev = acm->dev; retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL); if (retval) { dev_err(&acm->control->dev, "%s - usb_submit_urb(ctrl irq) failed\n", __func__); goto error_submit_urb; } acm_tty_set_termios(tty, NULL); /* * Unthrottle device in case the TTY was closed while throttled. */ spin_lock_irq(&acm->read_lock); acm->throttled = 0; acm->throttle_req = 0; spin_unlock_irq(&acm->read_lock); retval = acm_submit_read_urbs(acm, GFP_KERNEL); if (retval) goto error_submit_read_urbs; usb_autopm_put_interface(acm->control); mutex_unlock(&acm->mutex); return 0; error_submit_read_urbs: for (i = 0; i < acm->rx_buflimit; i++) usb_kill_urb(acm->read_urbs[i]); usb_kill_urb(acm->ctrlurb); error_submit_urb: usb_autopm_put_interface(acm->control); error_get_interface: disconnected: mutex_unlock(&acm->mutex); return usb_translate_errors(retval); }
static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) { struct acm *acm = container_of(port, struct acm, port); int retval = -ENODEV; int i; dev_dbg(&acm->control->dev, "%s\n", __func__); mutex_lock(&acm->mutex); if (acm->disconnected) goto disconnected; retval = usb_autopm_get_interface(acm->control); if (retval) goto error_get_interface; /* * FIXME: Why do we need this? Allocating 64K of physically contiguous * memory is really nasty... */ set_bit(TTY_NO_WRITE_SPLIT, &tty->flags); acm->control->needs_remote_wakeup = 1; acm->ctrlurb->dev = acm->dev; retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL); if (retval) { dev_err(&acm->control->dev, "%s - usb_submit_urb(ctrl irq) failed\n", __func__); goto error_submit_urb; } acm_tty_set_termios(tty, NULL); /* * Unthrottle device in case the TTY was closed while throttled. */ spin_lock_irq(&acm->read_lock); acm->throttled = 0; acm->throttle_req = 0; spin_unlock_irq(&acm->read_lock); retval = acm_submit_read_urbs(acm, GFP_KERNEL); if (retval) goto error_submit_read_urbs; usb_autopm_put_interface(acm->control); mutex_unlock(&acm->mutex); return 0; error_submit_read_urbs: for (i = 0; i < acm->rx_buflimit; i++) usb_kill_urb(acm->read_urbs[i]); usb_kill_urb(acm->ctrlurb); error_submit_urb: usb_autopm_put_interface(acm->control); error_get_interface: disconnected: mutex_unlock(&acm->mutex); return usb_translate_errors(retval); }
C
linux
0
CVE-2016-1586
https://www.cvedetails.com/cve/CVE-2016-1586/
CWE-20
https://git.launchpad.net/oxide/commit/?id=29014da83e5fc358d6bff0f574e9ed45e61a35ac
29014da83e5fc358d6bff0f574e9ed45e61a35ac
null
void OxideQQuickWebView::stop() { Q_D(OxideQQuickWebView); if (!d->proxy_) { return; } d->proxy_->stop(); }
void OxideQQuickWebView::stop() { Q_D(OxideQQuickWebView); if (!d->proxy_) { return; } d->proxy_->stop(); }
CPP
launchpad
0
CVE-2014-1713
https://www.cvedetails.com/cve/CVE-2014-1713/
CWE-399
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
f85a87ec670ad0fce9d98d90c9a705b72a288154
document.location bindings fix BUG=352374 R=jochen@chromium.org Review URL: https://codereview.chromium.org/196343011 git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
static void stringArrayAttributeAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info) { TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder()); v8SetReturnValue(info, v8Array(imp->stringArrayAttribute(), info.GetIsolate())); }
static void stringArrayAttributeAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info) { TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder()); v8SetReturnValue(info, v8Array(imp->stringArrayAttribute(), info.GetIsolate())); }
C
Chrome
0
CVE-2019-5827
https://www.cvedetails.com/cve/CVE-2019-5827/
CWE-190
https://github.com/chromium/chromium/commit/517ac71c9ee27f856f9becde8abea7d1604af9d4
517ac71c9ee27f856f9becde8abea7d1604af9d4
sqlite: backport bugfixes for dbfuzz2 Bug: 952406 Change-Id: Icbec429742048d6674828726c96d8e265c41b595 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1568152 Reviewed-by: Chris Mumford <cmumford@google.com> Commit-Queue: Darwin Huang <huangdarwin@chromium.org> Cr-Commit-Position: refs/heads/master@{#651030}
static void codeDeferredSeek( WhereInfo *pWInfo, /* Where clause context */ Index *pIdx, /* Index scan is using */ int iCur, /* Cursor for IPK b-tree */ int iIdxCur /* Index cursor */ ){ Parse *pParse = pWInfo->pParse; /* Parse context */ Vdbe *v = pParse->pVdbe; /* Vdbe to generate code within */ assert( iIdxCur>0 ); assert( pIdx->aiColumn[pIdx->nColumn-1]==-1 ); sqlite3VdbeAddOp3(v, OP_DeferredSeek, iIdxCur, 0, iCur); if( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE) && DbMaskAllZero(sqlite3ParseToplevel(pParse)->writeMask) ){ int i; Table *pTab = pIdx->pTable; int *ai = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*(pTab->nCol+1)); if( ai ){ ai[0] = pTab->nCol; for(i=0; i<pIdx->nColumn-1; i++){ assert( pIdx->aiColumn[i]<pTab->nCol ); if( pIdx->aiColumn[i]>=0 ) ai[pIdx->aiColumn[i]+1] = i+1; } sqlite3VdbeChangeP4(v, -1, (char*)ai, P4_INTARRAY); } } }
static void codeDeferredSeek( WhereInfo *pWInfo, /* Where clause context */ Index *pIdx, /* Index scan is using */ int iCur, /* Cursor for IPK b-tree */ int iIdxCur /* Index cursor */ ){ Parse *pParse = pWInfo->pParse; /* Parse context */ Vdbe *v = pParse->pVdbe; /* Vdbe to generate code within */ assert( iIdxCur>0 ); assert( pIdx->aiColumn[pIdx->nColumn-1]==-1 ); sqlite3VdbeAddOp3(v, OP_DeferredSeek, iIdxCur, 0, iCur); if( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE) && DbMaskAllZero(sqlite3ParseToplevel(pParse)->writeMask) ){ int i; Table *pTab = pIdx->pTable; int *ai = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*(pTab->nCol+1)); if( ai ){ ai[0] = pTab->nCol; for(i=0; i<pIdx->nColumn-1; i++){ assert( pIdx->aiColumn[i]<pTab->nCol ); if( pIdx->aiColumn[i]>=0 ) ai[pIdx->aiColumn[i]+1] = i+1; } sqlite3VdbeChangeP4(v, -1, (char*)ai, P4_INTARRAY); } } }
C
Chrome
0
CVE-2018-20456
https://www.cvedetails.com/cve/CVE-2018-20456/
CWE-125
https://github.com/radare/radare2/commit/9b46d38dd3c4de6048a488b655c7319f845af185
9b46d38dd3c4de6048a488b655c7319f845af185
Fix #12372 and #12373 - Crash in x86 assembler (#12380) 0 ,0,[bP-bL-bP-bL-bL-r-bL-bP-bL-bL- mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx-- leA ,0,[bP-bL-bL-bP-bL-bP-bL-60@bL- leA ,0,[bP-bL-r-bP-bL-bP-bL-60@bL- mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx--
static int oplea(RAsm *a, ut8 *data, const Opcode *op){ int l = 0; int mod = 0; st32 offset = 0; int reg = 0; int rm = 0; if (op->operands[0].type & OT_REGALL && op->operands[1].type & (OT_MEMORY | OT_CONSTANT)) { if (a->bits == 64) { data[l++] = 0x48; } data[l++] = 0x8d; if (op->operands[1].regs[0] == X86R_UNDEFINED) { int high = 0xff00 & op->operands[1].offset; data[l++] = op->operands[0].reg << 3 | 5; data[l++] = op->operands[1].offset; data[l++] = high >> 8; data[l++] = op->operands[1].offset >> 16; data[l++] = op->operands[1].offset >> 24; return l; } else { reg = op->operands[0].reg; rm = op->operands[1].regs[0]; offset = op->operands[1].offset * op->operands[1].offset_sign; if (offset != 0 || op->operands[1].regs[0] == X86R_EBP) { mod = 1; if (offset >= 128 || offset < -128) { mod = 2; } data[l++] = mod << 6 | reg << 3 | rm; if (op->operands[1].regs[0] == X86R_ESP) { data[l++] = 0x24; } data[l++] = offset; if (mod == 2) { data[l++] = offset >> 8; data[l++] = offset >> 16; data[l++] = offset >> 24; } } else { data[l++] = op->operands[0].reg << 3 | op->operands[1].regs[0]; if (op->operands[1].regs[0] == X86R_ESP) { data[l++] = 0x24; } } } } return l; }
static int oplea(RAsm *a, ut8 *data, const Opcode *op){ int l = 0; int mod = 0; st32 offset = 0; int reg = 0; int rm = 0; if (op->operands[0].type & OT_REGALL && op->operands[1].type & (OT_MEMORY | OT_CONSTANT)) { if (a->bits == 64) { data[l++] = 0x48; } data[l++] = 0x8d; if (op->operands[1].regs[0] == X86R_UNDEFINED) { int high = 0xff00 & op->operands[1].offset; data[l++] = op->operands[0].reg << 3 | 5; data[l++] = op->operands[1].offset; data[l++] = high >> 8; data[l++] = op->operands[1].offset >> 16; data[l++] = op->operands[1].offset >> 24; return l; } else { reg = op->operands[0].reg; rm = op->operands[1].regs[0]; offset = op->operands[1].offset * op->operands[1].offset_sign; if (offset != 0 || op->operands[1].regs[0] == X86R_EBP) { mod = 1; if (offset >= 128 || offset < -128) { mod = 2; } data[l++] = mod << 6 | reg << 3 | rm; if (op->operands[1].regs[0] == X86R_ESP) { data[l++] = 0x24; } data[l++] = offset; if (mod == 2) { data[l++] = offset >> 8; data[l++] = offset >> 16; data[l++] = offset >> 24; } } else { data[l++] = op->operands[0].reg << 3 | op->operands[1].regs[0]; if (op->operands[1].regs[0] == X86R_ESP) { data[l++] = 0x24; } } } } return l; }
C
radare2
0
CVE-2011-1292
https://www.cvedetails.com/cve/CVE-2011-1292/
CWE-399
https://github.com/chromium/chromium/commit/5f372f899b8709dac700710b5f0f90959dcf9ecb
5f372f899b8709dac700710b5f0f90959dcf9ecb
Add support for autofill server experiments BUG=none TEST=unit_tests --gtest_filter=AutoFillMetricsTest.QualityMetricsWithExperimentId:AutoFillQueryXmlParserTest.ParseExperimentId Review URL: http://codereview.chromium.org/6260027 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@73216 0039d316-1c4b-4281-b951-d872f2087c98
virtual ~AutoFillMetricsTest() { autofill_manager_.reset(NULL); test_personal_data_ = NULL; }
virtual ~AutoFillMetricsTest() { autofill_manager_.reset(NULL); test_personal_data_ = NULL; }
C
Chrome
0
CVE-2016-10012
https://www.cvedetails.com/cve/CVE-2016-10012/
CWE-119
https://github.com/openbsd/src/commit/3095060f479b86288e31c79ecbc5131a66bcd2f9
3095060f479b86288e31c79ecbc5131a66bcd2f9
Remove support for pre-authentication compression. Doing compression early in the protocol probably seemed reasonable in the 1990s, but today it's clearly a bad idea in terms of both cryptography (cf. multiple compression oracle attacks in TLS) and attack surface. Moreover, to support it across privilege-separation zlib needed the assistance of a complex shared-memory manager that made the required attack surface considerably larger. Prompted by Guido Vranken pointing out a compiler-elided security check in the shared memory manager found by Stack (http://css.csail.mit.edu/stack/); ok deraadt@ markus@ NB. pre-auth authentication has been disabled by default in sshd for >10 years.
int server_match_spec_complete(struct connection_info *ci) { if (ci->user && ci->host && ci->address) return 1; /* complete */ if (!ci->user && !ci->host && !ci->address) return -1; /* empty */ return 0; /* partial */ }
int server_match_spec_complete(struct connection_info *ci) { if (ci->user && ci->host && ci->address) return 1; /* complete */ if (!ci->user && !ci->host && !ci->address) return -1; /* empty */ return 0; /* partial */ }
C
src
0
CVE-2017-15923
https://www.cvedetails.com/cve/CVE-2017-15923/
null
https://cgit.kde.org/konversation.git/commit/?h=1.7&id=6a7f59ee1b9dbc6e5cf9e5f3b306504d02b73ef0
6a7f59ee1b9dbc6e5cf9e5f3b306504d02b73ef0
null
bool IRCView::search(const QString& pattern, bool caseSensitive, bool wholeWords, bool forward, bool fromCursor) { if (pattern.isEmpty()) return true; m_pattern = pattern; m_forward = forward; m_searchFlags = 0; if (caseSensitive) m_searchFlags |= QTextDocument::FindCaseSensitively; if (wholeWords) m_searchFlags |= QTextDocument::FindWholeWords; if (!fromCursor) m_forward ? moveCursor(QTextCursor::Start) : moveCursor(QTextCursor::End); return searchNext(); }
bool IRCView::search(const QString& pattern, bool caseSensitive, bool wholeWords, bool forward, bool fromCursor) { if (pattern.isEmpty()) return true; m_pattern = pattern; m_forward = forward; m_searchFlags = 0; if (caseSensitive) m_searchFlags |= QTextDocument::FindCaseSensitively; if (wholeWords) m_searchFlags |= QTextDocument::FindWholeWords; if (!fromCursor) m_forward ? moveCursor(QTextCursor::Start) : moveCursor(QTextCursor::End); return searchNext(); }
CPP
kde
0
CVE-2013-7421
https://www.cvedetails.com/cve/CVE-2013-7421/
CWE-264
https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
crypto: prefix module autoloading with "crypto-" This prefixes all crypto module loading with "crypto-" so we never run the risk of exposing module auto-loading to userspace via a crypto API, as demonstrated by Mathias Krause: https://lkml.org/lkml/2013/3/4/70 Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); u32 *flags = &cipher->base.crt_flags; const u32 *K = (const u32 *)key; u32 tmp[DES3_EDE_EXPKEY_WORDS]; int i, ret; pr_debug(DEV_DBG_NAME " [%s]", __func__); if (keylen != DES3_EDE_KEY_SIZE) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", __func__); return -EINVAL; } /* Checking key interdependency for weak key detection. */ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", __func__); return -EINVAL; } for (i = 0; i < 3; i++) { ret = des_ekey(tmp, key + i*DES_KEY_SIZE); if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; pr_debug(DEV_DBG_NAME " [%s]: " "CRYPTO_TFM_REQ_WEAK_KEY", __func__); return -EINVAL; } } memcpy(ctx->key, key, keylen); ctx->keylen = keylen; ctx->updated = 0; return 0; }
static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); u32 *flags = &cipher->base.crt_flags; const u32 *K = (const u32 *)key; u32 tmp[DES3_EDE_EXPKEY_WORDS]; int i, ret; pr_debug(DEV_DBG_NAME " [%s]", __func__); if (keylen != DES3_EDE_KEY_SIZE) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", __func__); return -EINVAL; } /* Checking key interdependency for weak key detection. */ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", __func__); return -EINVAL; } for (i = 0; i < 3; i++) { ret = des_ekey(tmp, key + i*DES_KEY_SIZE); if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; pr_debug(DEV_DBG_NAME " [%s]: " "CRYPTO_TFM_REQ_WEAK_KEY", __func__); return -EINVAL; } } memcpy(ctx->key, key, keylen); ctx->keylen = keylen; ctx->updated = 0; return 0; }
C
linux
0
CVE-2018-20784
https://www.cvedetails.com/cve/CVE-2018-20784/
CWE-400
https://github.com/torvalds/linux/commit/c40f7d74c741a907cfaeb73a7697081881c497d0
c40f7d74c741a907cfaeb73a7697081881c497d0
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the scheduler under high loads, starting at around the v4.18 time frame, and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list manipulation. Do a (manual) revert of: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") It turns out that the list_del_leaf_cfs_rq() introduced by this commit is a surprising property that was not considered in followup commits such as: 9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list") As Vincent Guittot explains: "I think that there is a bigger problem with commit a9e7f6544b9c and cfs_rq throttling: Let take the example of the following topology TG2 --> TG1 --> root: 1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1 cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in one path because it has never been used and can't be throttled so tmp_alone_branch will point to leaf_cfs_rq_list at the end. 2) Then TG1 is throttled 3) and we add TG3 as a new child of TG1. 4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1 cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list. With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list. So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1 cfs_rq is removed from the list. Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list but tmp_alone_branch still points to TG3 cfs_rq because its throttled parent can't be enqueued when the lock is released. tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should. So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch points on another TG cfs_rq, the next TG cfs_rq that will be added, will be linked outside rq->leaf_cfs_rq_list - which is bad. In addition, we can break the ordering of the cfs_rq in rq->leaf_cfs_rq_list but this ordering is used to update and propagate the update from leaf down to root." Instead of trying to work through all these cases and trying to reproduce the very high loads that produced the lockup to begin with, simplify the code temporarily by reverting a9e7f6544b9c - which change was clearly not thought through completely. This (hopefully) gives us a kernel that doesn't lock up so people can continue to enjoy their holidays without worrying about regressions. ;-) [ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ] Analyzed-by: Xie XiuQi <xiexiuqi@huawei.com> Analyzed-by: Vincent Guittot <vincent.guittot@linaro.org> Reported-by: Zhipeng Xie <xiezhipeng1@huawei.com> Reported-by: Sargun Dhillon <sargun@sargun.me> Reported-by: Xie XiuQi <xiexiuqi@huawei.com> Tested-by: Zhipeng Xie <xiezhipeng1@huawei.com> Tested-by: Sargun Dhillon <sargun@sargun.me> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Vincent Guittot <vincent.guittot@linaro.org> Cc: <stable@vger.kernel.org> # v4.13+ Cc: Bin Li <huawei.libin@huawei.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") Link: http://lkml.kernel.org/r/1545879866-27809-1-git-send-email-xiexiuqi@huawei.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
static unsigned long capacity_spare_without(int cpu, struct task_struct *p) { return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0); }
static unsigned long capacity_spare_without(int cpu, struct task_struct *p) { return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0); }
C
linux
0
CVE-2012-2862
https://www.cvedetails.com/cve/CVE-2012-2862/
CWE-399
https://github.com/chromium/chromium/commit/c4f40933f2cd7f975af63e56ea4cdcdc6c636f73
c4f40933f2cd7f975af63e56ea4cdcdc6c636f73
accelerators: Remove deprecated Accelerator ctor that takes booleans. BUG=128242 R=ben@chromium.org Review URL: https://chromiumcodereview.appspot.com/10399085 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137957 0039d316-1c4b-4281-b951-d872f2087c98
virtual ~DummyCapsLockDelegate() {}
virtual ~DummyCapsLockDelegate() {}
C
Chrome
0
CVE-2012-1179
https://www.cvedetails.com/cve/CVE-2012-1179/
CWE-264
https://github.com/torvalds/linux/commit/4a1d704194a441bf83c636004a479e01360ec850
4a1d704194a441bf83c636004a479e01360ec850
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream. In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [akpm@linux-foundation.org: checkpatch fixes] Reported-by: Ulrich Obergfell <uobergfe@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Jones <davej@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Mark Salter <msalter@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); pud_t *pud = pud_offset(pgd, addr); pmd_t *pmd = pmd_offset(pud, addr); struct address_space *mapping; pgoff_t index; static unsigned long resume; static unsigned long nr_shown; static unsigned long nr_unshown; /* * Allow a burst of 60 reports, then keep quiet for that minute; * or allow a steady drip of one report per second. */ if (nr_shown == 60) { if (time_before(jiffies, resume)) { nr_unshown++; return; } if (nr_unshown) { printk(KERN_ALERT "BUG: Bad page map: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } nr_shown = 0; } if (nr_shown++ == 0) resume = jiffies + 60 * HZ; mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; index = linear_page_index(vma, addr); printk(KERN_ALERT "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", current->comm, (long long)pte_val(pte), (long long)pmd_val(*pmd)); if (page) dump_page(page); printk(KERN_ALERT "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); /* * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y */ if (vma->vm_ops) print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n", (unsigned long)vma->vm_ops->fault); if (vma->vm_file && vma->vm_file->f_op) print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n", (unsigned long)vma->vm_file->f_op->mmap); dump_stack(); add_taint(TAINT_BAD_PAGE); }
static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); pud_t *pud = pud_offset(pgd, addr); pmd_t *pmd = pmd_offset(pud, addr); struct address_space *mapping; pgoff_t index; static unsigned long resume; static unsigned long nr_shown; static unsigned long nr_unshown; /* * Allow a burst of 60 reports, then keep quiet for that minute; * or allow a steady drip of one report per second. */ if (nr_shown == 60) { if (time_before(jiffies, resume)) { nr_unshown++; return; } if (nr_unshown) { printk(KERN_ALERT "BUG: Bad page map: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } nr_shown = 0; } if (nr_shown++ == 0) resume = jiffies + 60 * HZ; mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; index = linear_page_index(vma, addr); printk(KERN_ALERT "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", current->comm, (long long)pte_val(pte), (long long)pmd_val(*pmd)); if (page) dump_page(page); printk(KERN_ALERT "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); /* * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y */ if (vma->vm_ops) print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n", (unsigned long)vma->vm_ops->fault); if (vma->vm_file && vma->vm_file->f_op) print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n", (unsigned long)vma->vm_file->f_op->mmap); dump_stack(); add_taint(TAINT_BAD_PAGE); }
C
linux
0
CVE-2014-3122
https://www.cvedetails.com/cve/CVE-2014-3122/
CWE-264
https://github.com/torvalds/linux/commit/57e68e9cd65b4b8eb4045a1e0d0746458502554c
57e68e9cd65b4b8eb4045a1e0d0746458502554c
mm: try_to_unmap_cluster() should lock_page() before mlocking A BUG_ON(!PageLocked) was triggered in mlock_vma_page() by Sasha Levin fuzzing with trinity. The call site try_to_unmap_cluster() does not lock the pages other than its check_page parameter (which is already locked). The BUG_ON in mlock_vma_page() is not documented and its purpose is somewhat unclear, but apparently it serializes against page migration, which could otherwise fail to transfer the PG_mlocked flag. This would not be fatal, as the page would be eventually encountered again, but NR_MLOCK accounting would become distorted nevertheless. This patch adds a comment to the BUG_ON in mlock_vma_page() and munlock_vma_page() to that effect. The call site try_to_unmap_cluster() is fixed so that for page != check_page, trylock_page() is attempted (to avoid possible deadlocks as we already have check_page locked) and mlock_vma_page() is performed only upon success. If the page lock cannot be obtained, the page is left without PG_mlocked, which is again not a problem in the whole unevictable memory design. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Bob Liu <bob.liu@oracle.com> Reported-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Michel Lespinasse <walken@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static void __page_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); if (PageAnon(page)) return; /* * If the page isn't exclusively mapped into this vma, * we must use the _oldest_ possible anon_vma for the * page mapping! */ if (!exclusive) anon_vma = anon_vma->root; anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; page->index = linear_page_index(vma, address); }
static void __page_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); if (PageAnon(page)) return; /* * If the page isn't exclusively mapped into this vma, * we must use the _oldest_ possible anon_vma for the * page mapping! */ if (!exclusive) anon_vma = anon_vma->root; anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; page->index = linear_page_index(vma, address); }
C
linux
0
CVE-2013-2861
https://www.cvedetails.com/cve/CVE-2013-2861/
CWE-399
https://github.com/chromium/chromium/commit/508b89a64ab700aa09f21fc666a5588b47360eab
508b89a64ab700aa09f21fc666a5588b47360eab
Upgrade old app host to new app launcher on startup This patch is a continuation of https://codereview.chromium.org/16805002/. BUG=248825 Review URL: https://chromiumcodereview.appspot.com/17022015 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@209604 0039d316-1c4b-4281-b951-d872f2087c98
bool IsAppLauncherPresent() { return GetAppLauncherInstallationState() != NOT_INSTALLED; }
bool IsAppLauncherPresent() { return GetAppLauncherInstallationState() != NOT_INSTALLED; }
C
Chrome
0
CVE-2013-1956
https://www.cvedetails.com/cve/CVE-2013-1956/
CWE-264
https://github.com/torvalds/linux/commit/3151527ee007b73a0ebd296010f1c0454a919c7d
3151527ee007b73a0ebd296010f1c0454a919c7d
userns: Don't allow creation if the user is chrooted Guarantee that the policy of which files may be access that is established by setting the root directory will not be violated by user namespaces by verifying that the root directory points to the root of the mount namespace at the time of user namespace creation. Changing the root is a privileged operation, and as a matter of policy it serves to limit unprivileged processes to files below the current root directory. For reasons of simplicity and comprehensibility the privilege to change the root directory is gated solely on the CAP_SYS_CHROOT capability in the user namespace. Therefore when creating a user namespace we must ensure that the policy of which files may be access can not be violated by changing the root directory. Anyone who runs a processes in a chroot and would like to use user namespace can setup the same view of filesystems with a mount namespace instead. With this result that this is not a practical limitation for using user namespaces. Cc: stable@vger.kernel.org Acked-by: Serge Hallyn <serge.hallyn@canonical.com> Reported-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
static ssize_t map_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos, int cap_setid, struct uid_gid_map *map, struct uid_gid_map *parent_map) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct uid_gid_map new_map; unsigned idx; struct uid_gid_extent *extent = NULL; unsigned long page = 0; char *kbuf, *pos, *next_line; ssize_t ret = -EINVAL; /* * The id_map_mutex serializes all writes to any given map. * * Any map is only ever written once. * * An id map fits within 1 cache line on most architectures. * * On read nothing needs to be done unless you are on an * architecture with a crazy cache coherency model like alpha. * * There is a one time data dependency between reading the * count of the extents and the values of the extents. The * desired behavior is to see the values of the extents that * were written before the count of the extents. * * To achieve this smp_wmb() is used on guarantee the write * order and smp_read_barrier_depends() is guaranteed that we * don't have crazy architectures returning stale data. * */ mutex_lock(&id_map_mutex); ret = -EPERM; /* Only allow one successful write to the map */ if (map->nr_extents != 0) goto out; /* Require the appropriate privilege CAP_SETUID or CAP_SETGID * over the user namespace in order to set the id mapping. */ if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid)) goto out; /* Get a buffer */ ret = -ENOMEM; page = __get_free_page(GFP_TEMPORARY); kbuf = (char *) page; if (!page) goto out; /* Only allow <= page size writes at the beginning of the file */ ret = -EINVAL; if ((*ppos != 0) || (count >= PAGE_SIZE)) goto out; /* Slurp in the user data */ ret = -EFAULT; if (copy_from_user(kbuf, buf, count)) goto out; kbuf[count] = '\0'; /* Parse the user data */ ret = -EINVAL; pos = kbuf; new_map.nr_extents = 0; for (;pos; pos = next_line) { extent = &new_map.extent[new_map.nr_extents]; /* Find the end of line and ensure I don't look past it */ next_line = strchr(pos, '\n'); if (next_line) { *next_line = '\0'; next_line++; if (*next_line == '\0') next_line = NULL; } pos = skip_spaces(pos); extent->first = simple_strtoul(pos, &pos, 10); if (!isspace(*pos)) goto out; pos = skip_spaces(pos); extent->lower_first = simple_strtoul(pos, &pos, 10); if (!isspace(*pos)) goto out; pos = skip_spaces(pos); extent->count = simple_strtoul(pos, &pos, 10); if (*pos && !isspace(*pos)) goto out; /* Verify there is not trailing junk on the line */ pos = skip_spaces(pos); if (*pos != '\0') goto out; /* Verify we have been given valid starting values */ if ((extent->first == (u32) -1) || (extent->lower_first == (u32) -1 )) goto out; /* Verify count is not zero and does not cause the extent to wrap */ if ((extent->first + extent->count) <= extent->first) goto out; if ((extent->lower_first + extent->count) <= extent->lower_first) goto out; /* Do the ranges in extent overlap any previous extents? */ if (mappings_overlap(&new_map, extent)) goto out; new_map.nr_extents++; /* Fail if the file contains too many extents */ if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) && (next_line != NULL)) goto out; } /* Be very certaint the new map actually exists */ if (new_map.nr_extents == 0) goto out; ret = -EPERM; /* Validate the user is allowed to use user id's mapped to. */ if (!new_idmap_permitted(ns, cap_setid, &new_map)) goto out; /* Map the lower ids from the parent user namespace to the * kernel global id space. */ for (idx = 0; idx < new_map.nr_extents; idx++) { u32 lower_first; extent = &new_map.extent[idx]; lower_first = map_id_range_down(parent_map, extent->lower_first, extent->count); /* Fail if we can not map the specified extent to * the kernel global id space. */ if (lower_first == (u32) -1) goto out; extent->lower_first = lower_first; } /* Install the map */ memcpy(map->extent, new_map.extent, new_map.nr_extents*sizeof(new_map.extent[0])); smp_wmb(); map->nr_extents = new_map.nr_extents; *ppos = count; ret = count; out: mutex_unlock(&id_map_mutex); if (page) free_page(page); return ret; }
static ssize_t map_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos, int cap_setid, struct uid_gid_map *map, struct uid_gid_map *parent_map) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct uid_gid_map new_map; unsigned idx; struct uid_gid_extent *extent = NULL; unsigned long page = 0; char *kbuf, *pos, *next_line; ssize_t ret = -EINVAL; /* * The id_map_mutex serializes all writes to any given map. * * Any map is only ever written once. * * An id map fits within 1 cache line on most architectures. * * On read nothing needs to be done unless you are on an * architecture with a crazy cache coherency model like alpha. * * There is a one time data dependency between reading the * count of the extents and the values of the extents. The * desired behavior is to see the values of the extents that * were written before the count of the extents. * * To achieve this smp_wmb() is used on guarantee the write * order and smp_read_barrier_depends() is guaranteed that we * don't have crazy architectures returning stale data. * */ mutex_lock(&id_map_mutex); ret = -EPERM; /* Only allow one successful write to the map */ if (map->nr_extents != 0) goto out; /* Require the appropriate privilege CAP_SETUID or CAP_SETGID * over the user namespace in order to set the id mapping. */ if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid)) goto out; /* Get a buffer */ ret = -ENOMEM; page = __get_free_page(GFP_TEMPORARY); kbuf = (char *) page; if (!page) goto out; /* Only allow <= page size writes at the beginning of the file */ ret = -EINVAL; if ((*ppos != 0) || (count >= PAGE_SIZE)) goto out; /* Slurp in the user data */ ret = -EFAULT; if (copy_from_user(kbuf, buf, count)) goto out; kbuf[count] = '\0'; /* Parse the user data */ ret = -EINVAL; pos = kbuf; new_map.nr_extents = 0; for (;pos; pos = next_line) { extent = &new_map.extent[new_map.nr_extents]; /* Find the end of line and ensure I don't look past it */ next_line = strchr(pos, '\n'); if (next_line) { *next_line = '\0'; next_line++; if (*next_line == '\0') next_line = NULL; } pos = skip_spaces(pos); extent->first = simple_strtoul(pos, &pos, 10); if (!isspace(*pos)) goto out; pos = skip_spaces(pos); extent->lower_first = simple_strtoul(pos, &pos, 10); if (!isspace(*pos)) goto out; pos = skip_spaces(pos); extent->count = simple_strtoul(pos, &pos, 10); if (*pos && !isspace(*pos)) goto out; /* Verify there is not trailing junk on the line */ pos = skip_spaces(pos); if (*pos != '\0') goto out; /* Verify we have been given valid starting values */ if ((extent->first == (u32) -1) || (extent->lower_first == (u32) -1 )) goto out; /* Verify count is not zero and does not cause the extent to wrap */ if ((extent->first + extent->count) <= extent->first) goto out; if ((extent->lower_first + extent->count) <= extent->lower_first) goto out; /* Do the ranges in extent overlap any previous extents? */ if (mappings_overlap(&new_map, extent)) goto out; new_map.nr_extents++; /* Fail if the file contains too many extents */ if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) && (next_line != NULL)) goto out; } /* Be very certaint the new map actually exists */ if (new_map.nr_extents == 0) goto out; ret = -EPERM; /* Validate the user is allowed to use user id's mapped to. */ if (!new_idmap_permitted(ns, cap_setid, &new_map)) goto out; /* Map the lower ids from the parent user namespace to the * kernel global id space. */ for (idx = 0; idx < new_map.nr_extents; idx++) { u32 lower_first; extent = &new_map.extent[idx]; lower_first = map_id_range_down(parent_map, extent->lower_first, extent->count); /* Fail if we can not map the specified extent to * the kernel global id space. */ if (lower_first == (u32) -1) goto out; extent->lower_first = lower_first; } /* Install the map */ memcpy(map->extent, new_map.extent, new_map.nr_extents*sizeof(new_map.extent[0])); smp_wmb(); map->nr_extents = new_map.nr_extents; *ppos = count; ret = count; out: mutex_unlock(&id_map_mutex); if (page) free_page(page); return ret; }
C
linux
0
CVE-2013-6381
https://www.cvedetails.com/cve/CVE-2013-6381/
CWE-119
https://github.com/torvalds/linux/commit/6fb392b1a63ae36c31f62bc3fc8630b49d602b62
6fb392b1a63ae36c31f62bc3fc8630b49d602b62
qeth: avoid buffer overflow in snmp ioctl Check user-defined length in snmp ioctl request and allow request only if it fits into a qeth command buffer. Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com> Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com> Reviewed-by: Heiko Carstens <heicars2@linux.vnet.ibm.com> Reported-by: Nico Golde <nico@ngolde.de> Reported-by: Fabian Yamaguchi <fabs@goesec.de> Cc: <stable@vger.kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
void qeth_clear_cmd_buffers(struct qeth_channel *channel) { int cnt; for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) qeth_release_buffer(channel, &channel->iob[cnt]); channel->buf_no = 0; channel->io_buf_no = 0; }
void qeth_clear_cmd_buffers(struct qeth_channel *channel) { int cnt; for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) qeth_release_buffer(channel, &channel->iob[cnt]); channel->buf_no = 0; channel->io_buf_no = 0; }
C
linux
0
CVE-2018-19045
https://www.cvedetails.com/cve/CVE-2018-19045/
CWE-200
https://github.com/acassen/keepalived/commit/c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
Add command line and configuration option to set umask Issue #1048 identified that files created by keepalived are created with mode 0666. This commit changes the default to 0644, and also allows the umask to be specified in the configuration or as a command line option. Signed-off-by: Quentin Armitage <quentin@armitage.org.uk>
get_netlink_rcv_bufs_size(vector_t *strvec, const char *type) { unsigned val; if (!strvec) return 0; if (vector_size(strvec) < 2) { report_config_error(CONFIG_GENERAL_ERROR, "%s_rcv_bufs size missing", type); return 0; } if (!read_unsigned_strvec(strvec, 1, &val, 0, UINT_MAX, false)) { report_config_error(CONFIG_GENERAL_ERROR, "%s_rcv_bufs size (%s) invalid", type, FMT_STR_VSLOT(strvec, 1)); return 0; } return val; }
get_netlink_rcv_bufs_size(vector_t *strvec, const char *type) { unsigned val; if (!strvec) return 0; if (vector_size(strvec) < 2) { report_config_error(CONFIG_GENERAL_ERROR, "%s_rcv_bufs size missing", type); return 0; } if (!read_unsigned_strvec(strvec, 1, &val, 0, UINT_MAX, false)) { report_config_error(CONFIG_GENERAL_ERROR, "%s_rcv_bufs size (%s) invalid", type, FMT_STR_VSLOT(strvec, 1)); return 0; } return val; }
C
keepalived
0
CVE-2015-3215
https://www.cvedetails.com/cve/CVE-2015-3215/
CWE-20
https://github.com/YanVugenfirer/kvm-guest-drivers-windows/commit/723416fa4210b7464b28eab89cc76252e6193ac1
723416fa4210b7464b28eab89cc76252e6193ac1
NetKVM: BZ#1169718: Checking the length only on read Signed-off-by: Joseph Hindin <yhindin@rehat.com>
bool CParaNdisTX::RestartQueue(bool DoKick) { TSpinLocker LockedContext(m_Lock); auto res = ParaNdis_SynchronizeWithInterrupt(m_Context, m_messageIndex, CParaNdisTX::RestartQueueSynchronously, this) ? true : false; if(DoKick) { Kick(); } return res; }
bool CParaNdisTX::RestartQueue(bool DoKick) { TSpinLocker LockedContext(m_Lock); auto res = ParaNdis_SynchronizeWithInterrupt(m_Context, m_messageIndex, CParaNdisTX::RestartQueueSynchronously, this) ? true : false; if(DoKick) { Kick(); } return res; }
C
kvm-guest-drivers-windows
0
CVE-2013-0842
https://www.cvedetails.com/cve/CVE-2013-0842/
null
https://github.com/chromium/chromium/commit/10cbaf017570ba6454174c55b844647aa6a9b3b4
10cbaf017570ba6454174c55b844647aa6a9b3b4
Validate that paths don't contain embedded NULLs at deserialization. BUG=166867 Review URL: https://chromiumcodereview.appspot.com/11743009 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@174935 0039d316-1c4b-4281-b951-d872f2087c98
void ParamTraits<base::PlatformFileInfo>::Log(const param_type& p, std::string* l) { l->append("("); LogParam(p.size, l); l->append(","); LogParam(p.is_directory, l); l->append(","); LogParam(p.last_modified.ToDoubleT(), l); l->append(","); LogParam(p.last_accessed.ToDoubleT(), l); l->append(","); LogParam(p.creation_time.ToDoubleT(), l); l->append(")"); }
void ParamTraits<base::PlatformFileInfo>::Log(const param_type& p, std::string* l) { l->append("("); LogParam(p.size, l); l->append(","); LogParam(p.is_directory, l); l->append(","); LogParam(p.last_modified.ToDoubleT(), l); l->append(","); LogParam(p.last_accessed.ToDoubleT(), l); l->append(","); LogParam(p.creation_time.ToDoubleT(), l); l->append(")"); }
C
Chrome
0
CVE-2016-6307
https://www.cvedetails.com/cve/CVE-2016-6307/
CWE-400
https://git.openssl.org/?p=openssl.git;a=commit;h=4b390b6c3f8df925dc92a3dd6b022baa9a2f4650
4b390b6c3f8df925dc92a3dd6b022baa9a2f4650
null
int tls_construct_change_cipher_spec(SSL *s) { unsigned char *p; p = (unsigned char *)s->init_buf->data; *p = SSL3_MT_CCS; s->init_num = 1; s->init_off = 0; return 1; }
int tls_construct_change_cipher_spec(SSL *s) { unsigned char *p; p = (unsigned char *)s->init_buf->data; *p = SSL3_MT_CCS; s->init_num = 1; s->init_off = 0; return 1; }
C
openssl
0
CVE-2016-1907
https://www.cvedetails.com/cve/CVE-2016-1907/
CWE-119
https://anongit.mindrot.org/openssh.git/commit/?id=2fecfd486bdba9f51b3a789277bb0733ca36e1c0
2fecfd486bdba9f51b3a789277bb0733ca36e1c0
null
sshpkt_put_string(struct ssh *ssh, const void *v, size_t len) { return sshbuf_put_string(ssh->state->outgoing_packet, v, len); }
sshpkt_put_string(struct ssh *ssh, const void *v, size_t len) { return sshbuf_put_string(ssh->state->outgoing_packet, v, len); }
C
mindrot
0
CVE-2019-12439
https://www.cvedetails.com/cve/CVE-2019-12439/
CWE-20
https://github.com/projectatomic/bubblewrap/commit/efc89e3b939b4bde42c10f065f6b7b02958ed50e
efc89e3b939b4bde42c10f065f6b7b02958ed50e
Don't create our own temporary mount point for pivot_root An attacker could pre-create /tmp/.bubblewrap-$UID and make it a non-directory, non-symlink (in which case mounting our tmpfs would fail, causing denial of service), or make it a symlink under their control (potentially allowing bad things if the protected_symlinks sysctl is not enabled). Instead, temporarily mount the tmpfs on a directory that we are sure exists and is not attacker-controlled. /tmp (the directory itself, not a subdirectory) will do. Fixes: #304 Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=923557 Signed-off-by: Simon McVittie <smcv@debian.org> Closes: #305 Approved by: cgwalters
close_ops_fd (void) { SetupOp *op; for (op = ops; op != NULL; op = op->next) { if (op->fd != -1) { (void) close (op->fd); op->fd = -1; } } }
close_ops_fd (void) { SetupOp *op; for (op = ops; op != NULL; op = op->next) { if (op->fd != -1) { (void) close (op->fd); op->fd = -1; } } }
C
bubblewrap
0
CVE-2012-5148
https://www.cvedetails.com/cve/CVE-2012-5148/
CWE-20
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
e89cfcb9090e8c98129ae9160c513f504db74599
Remove TabContents from TabStripModelObserver::TabDetachedAt. BUG=107201 TEST=no visible change Review URL: https://chromiumcodereview.appspot.com/11293205 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
void BrowserWindowGtk::TabDetachedAt(TabContents* contents, int index) { void BrowserWindowGtk::TabDetachedAt(WebContents* contents, int index) { if (index == browser_->active_index()) { infobar_container_->ChangeTabContents(NULL); UpdateDevToolsForContents(NULL); } contents_container_->DetachTab(contents); }
void BrowserWindowGtk::TabDetachedAt(TabContents* contents, int index) { if (index == browser_->active_index()) { infobar_container_->ChangeTabContents(NULL); UpdateDevToolsForContents(NULL); } contents_container_->DetachTab(contents); }
C
Chrome
1
CVE-2019-13307
https://www.cvedetails.com/cve/CVE-2019-13307/
CWE-119
https://github.com/ImageMagick/ImageMagick/commit/025e77fcb2f45b21689931ba3bf74eac153afa48
025e77fcb2f45b21689931ba3bf74eac153afa48
https://github.com/ImageMagick/ImageMagick/issues/1615
static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; channels++; } return((size_t) (channels == 0 ? 1 : channels)); }
static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; channels++; } return((size_t) (channels == 0 ? 1 : channels)); }
C
ImageMagick6
0
CVE-2016-7513
https://www.cvedetails.com/cve/CVE-2016-7513/
CWE-189
https://github.com/ImageMagick/ImageMagick/commit/a54fe0e8600eaf3dc6fe717d3c0398001507f723
a54fe0e8600eaf3dc6fe717d3c0398001507f723
null
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image, const ssize_t x,const ssize_t y,MagickPixelPacket *pixel, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); GetMagickPixelPacket(image,pixel); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]); SetMagickPixelPacket(image,pixels,indexes,pixel); return(MagickTrue); }
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image, const ssize_t x,const ssize_t y,MagickPixelPacket *pixel, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); GetMagickPixelPacket(image,pixel); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]); SetMagickPixelPacket(image,pixels,indexes,pixel); return(MagickTrue); }
C
ImageMagick
0
CVE-2011-4621
https://www.cvedetails.com/cve/CVE-2011-4621/
null
https://github.com/torvalds/linux/commit/f26f9aff6aaf67e9a430d16c266f91b13a5bff64
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
Sched: fix skip_clock_update optimization idle_balance() drops/retakes rq->lock, leaving the previous task vulnerable to set_tsk_need_resched(). Clear it after we return from balancing instead, and in setup_thread_stack() as well, so no successfully descheduled or never scheduled task has it set. Need resched confused the skip_clock_update logic, which assumes that the next call to update_rq_clock() will come nearly immediately after being set. Make the optimization robust against the waking a sleeper before it sucessfully deschedules case by checking that the current task has not been dequeued before setting the flag, since it is that useless clock update we're trying to save, and clear unconditionally in schedule() proper instead of conditionally in put_prev_task(). Signed-off-by: Mike Galbraith <efault@gmx.de> Reported-by: Bjoern B. Brandenburg <bbb.lst@gmail.com> Tested-by: Yong Zhang <yong.zhang0@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: stable@kernel.org LKML-Reference: <1291802742.1417.9.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, u64 reset) { struct cpuacct *ca = cgroup_ca(cgrp); int err = 0; int i; if (reset) { err = -EINVAL; goto out; } for_each_present_cpu(i) cpuacct_cpuusage_write(ca, i, 0); out: return err; }
static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, u64 reset) { struct cpuacct *ca = cgroup_ca(cgrp); int err = 0; int i; if (reset) { err = -EINVAL; goto out; } for_each_present_cpu(i) cpuacct_cpuusage_write(ca, i, 0); out: return err; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/a03d4448faf2c40f4ef444a88cb9aace5b98e8c4
a03d4448faf2c40f4ef444a88cb9aace5b98e8c4
Introduce background.scripts feature for extension manifests. This optimizes for the common use case where background pages just include a reference to one or more script files and no additional HTML. BUG=107791 Review URL: http://codereview.chromium.org/9150008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@117110 0039d316-1c4b-4281-b951-d872f2087c98
void TestingAutomationProvider::FindTabbedBrowserWindow(int* handle) { *handle = 0; Browser* browser = BrowserList::FindTabbedBrowser(profile_, false); if (browser) *handle = browser_tracker_->Add(browser); }
void TestingAutomationProvider::FindTabbedBrowserWindow(int* handle) { *handle = 0; Browser* browser = BrowserList::FindTabbedBrowser(profile_, false); if (browser) *handle = browser_tracker_->Add(browser); }
C
Chrome
0
CVE-2017-9059
https://www.cvedetails.com/cve/CVE-2017-9059/
CWE-404
https://github.com/torvalds/linux/commit/c70422f760c120480fee4de6c38804c72aa26bc1
c70422f760c120480fee4de6c38804c72aa26bc1
Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux Pull nfsd updates from Bruce Fields: "Another RDMA update from Chuck Lever, and a bunch of miscellaneous bugfixes" * tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits) nfsd: Fix up the "supattr_exclcreat" attributes nfsd: encoders mustn't use unitialized values in error cases nfsd: fix undefined behavior in nfsd4_layout_verify lockd: fix lockd shutdown race NFSv4: Fix callback server shutdown SUNRPC: Refactor svc_set_num_threads() NFSv4.x/callback: Create the callback service through svc_create_pooled lockd: remove redundant check on block svcrdma: Clean out old XDR encoders svcrdma: Remove the req_map cache svcrdma: Remove unused RDMA Write completion handler svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt svcrdma: Clean up RPC-over-RDMA backchannel reply processing svcrdma: Report Write/Reply chunk overruns svcrdma: Clean up RDMA_ERROR path svcrdma: Use rdma_rw API in RPC reply path svcrdma: Introduce local rdma_rw API helpers svcrdma: Clean up svc_rdma_get_inv_rkey() svcrdma: Add helper to save pages under I/O svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT ...
static inline u32 nfsd4_layoutcommit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) { return (op_encode_hdr_size + 1 /* locr_newsize */ + 2 /* ns_size */) * sizeof(__be32); }
static inline u32 nfsd4_layoutcommit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) { return (op_encode_hdr_size + 1 /* locr_newsize */ + 2 /* ns_size */) * sizeof(__be32); }
C
linux
0
CVE-2013-4113
https://www.cvedetails.com/cve/CVE-2013-4113/
CWE-119
https://git.php.net/?p=php-src.git;a=commit;h=7d163e8a0880ae8af2dd869071393e5dc07ef271
7d163e8a0880ae8af2dd869071393e5dc07ef271
null
PHP_MINFO_FUNCTION(xml) { php_info_print_table_start(); php_info_print_table_row(2, "XML Support", "active"); php_info_print_table_row(2, "XML Namespace Support", "active"); #if defined(LIBXML_DOTTED_VERSION) && defined(LIBXML_EXPAT_COMPAT) php_info_print_table_row(2, "libxml2 Version", LIBXML_DOTTED_VERSION); #else php_info_print_table_row(2, "EXPAT Version", XML_ExpatVersion()); #endif php_info_print_table_end(); }
PHP_MINFO_FUNCTION(xml) { php_info_print_table_start(); php_info_print_table_row(2, "XML Support", "active"); php_info_print_table_row(2, "XML Namespace Support", "active"); #if defined(LIBXML_DOTTED_VERSION) && defined(LIBXML_EXPAT_COMPAT) php_info_print_table_row(2, "libxml2 Version", LIBXML_DOTTED_VERSION); #else php_info_print_table_row(2, "EXPAT Version", XML_ExpatVersion()); #endif php_info_print_table_end(); }
C
php
0
CVE-2011-3896
https://www.cvedetails.com/cve/CVE-2011-3896/
CWE-119
https://github.com/chromium/chromium/commit/5925dff83699508b5e2735afb0297dfb310e159d
5925dff83699508b5e2735afb0297dfb310e159d
Implement a bubble that appears at the top of the screen when a tab enters fullscreen mode via webkitRequestFullScreen(), telling the user how to exit fullscreen. This is implemented as an NSView rather than an NSWindow because the floating chrome that appears in presentation mode should overlap the bubble. Content-initiated fullscreen mode makes use of 'presentation mode' on the Mac: the mode in which the UI is hidden, accessible by moving the cursor to the top of the screen. On Snow Leopard, this mode is synonymous with fullscreen mode. On Lion, however, fullscreen mode does not imply presentation mode: in non-presentation fullscreen mode, the chrome is permanently shown. It is possible to switch between presentation mode and fullscreen mode using the presentation mode UI control. When a tab initiates fullscreen mode on Lion, we enter presentation mode if not in presentation mode already. When the user exits fullscreen mode using Chrome UI (i.e. keyboard shortcuts, menu items, buttons, switching tabs, etc.) we return the user to the mode they were in before the tab entered fullscreen. BUG=14471 TEST=Enter fullscreen mode using webkitRequestFullScreen. You should see a bubble pop down from the top of the screen. Need to test the Lion logic somehow, with no Lion trybots. BUG=96883 Original review http://codereview.chromium.org/7890056/ TBR=thakis Review URL: http://codereview.chromium.org/7920024 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@101624 0039d316-1c4b-4281-b951-d872f2087c98
TabContents* Browser::AddRestoredTab( const std::vector<TabNavigation>& navigations, int tab_index, int selected_navigation, const std::string& extension_app_id, bool select, bool pin, bool from_last_session, SessionStorageNamespace* session_storage_namespace) { GURL restore_url = navigations.at(selected_navigation).virtual_url(); TabContentsWrapper* wrapper = TabContentsFactory( profile(), tab_util::GetSiteInstanceForNewTab(NULL, profile_, restore_url), MSG_ROUTING_NONE, GetSelectedTabContents(), session_storage_namespace); TabContents* new_tab = wrapper->tab_contents(); wrapper->extension_tab_helper()->SetExtensionAppById(extension_app_id); std::vector<NavigationEntry*> entries; TabNavigation::CreateNavigationEntriesFromTabNavigations( profile_, navigations, &entries); new_tab->controller().Restore( selected_navigation, from_last_session, &entries); DCHECK_EQ(0u, entries.size()); int add_types = select ? TabStripModel::ADD_ACTIVE : TabStripModel::ADD_NONE; if (pin) { tab_index = std::min(tab_index, tabstrip_model()->IndexOfFirstNonMiniTab()); add_types |= TabStripModel::ADD_PINNED; } tab_handler_->GetTabStripModel()->InsertTabContentsAt(tab_index, wrapper, add_types); if (select) { window_->Activate(); } else { new_tab->view()->SizeContents(window_->GetRestoredBounds().size()); new_tab->HideContents(); } SessionService* session_service = SessionServiceFactory::GetForProfileIfExisting(profile_); if (session_service) session_service->TabRestored(wrapper, pin); return new_tab; }
TabContents* Browser::AddRestoredTab( const std::vector<TabNavigation>& navigations, int tab_index, int selected_navigation, const std::string& extension_app_id, bool select, bool pin, bool from_last_session, SessionStorageNamespace* session_storage_namespace) { GURL restore_url = navigations.at(selected_navigation).virtual_url(); TabContentsWrapper* wrapper = TabContentsFactory( profile(), tab_util::GetSiteInstanceForNewTab(NULL, profile_, restore_url), MSG_ROUTING_NONE, GetSelectedTabContents(), session_storage_namespace); TabContents* new_tab = wrapper->tab_contents(); wrapper->extension_tab_helper()->SetExtensionAppById(extension_app_id); std::vector<NavigationEntry*> entries; TabNavigation::CreateNavigationEntriesFromTabNavigations( profile_, navigations, &entries); new_tab->controller().Restore( selected_navigation, from_last_session, &entries); DCHECK_EQ(0u, entries.size()); int add_types = select ? TabStripModel::ADD_ACTIVE : TabStripModel::ADD_NONE; if (pin) { tab_index = std::min(tab_index, tabstrip_model()->IndexOfFirstNonMiniTab()); add_types |= TabStripModel::ADD_PINNED; } tab_handler_->GetTabStripModel()->InsertTabContentsAt(tab_index, wrapper, add_types); if (select) { window_->Activate(); } else { new_tab->view()->SizeContents(window_->GetRestoredBounds().size()); new_tab->HideContents(); } SessionService* session_service = SessionServiceFactory::GetForProfileIfExisting(profile_); if (session_service) session_service->TabRestored(wrapper, pin); return new_tab; }
C
Chrome
0
CVE-2016-1641
https://www.cvedetails.com/cve/CVE-2016-1641/
null
https://github.com/chromium/chromium/commit/75ca8ffd7bd7c58ace1144df05e1307d8d707662
75ca8ffd7bd7c58ace1144df05e1307d8d707662
Don't call WebContents::DownloadImage() callback if the WebContents were deleted BUG=583718 Review URL: https://codereview.chromium.org/1685343004 Cr-Commit-Position: refs/heads/master@{#375700}
void WebContentsImpl::ReloadLoFiImages() { SendToAllFrames(new FrameMsg_ReloadLoFiImages(MSG_ROUTING_NONE)); }
void WebContentsImpl::ReloadLoFiImages() { SendToAllFrames(new FrameMsg_ReloadLoFiImages(MSG_ROUTING_NONE)); }
C
Chrome
0
CVE-2016-1639
https://www.cvedetails.com/cve/CVE-2016-1639/
null
https://github.com/chromium/chromium/commit/c66b1fc49870c514b1c1e8b53498153176d7ec2b
c66b1fc49870c514b1c1e8b53498153176d7ec2b
cros: Check initial auth type when showing views login. Bug: 859611 Change-Id: I0298db9bbf4aed6bd40600aef2e1c5794e8cd058 Reviewed-on: https://chromium-review.googlesource.com/1123056 Reviewed-by: Xiaoyin Hu <xiaoyinh@chromium.org> Commit-Queue: Jacob Dufault <jdufault@chromium.org> Cr-Commit-Position: refs/heads/master@{#572224}
void UserSelectionScreen::OnUserImageChanged(const user_manager::User& user) { if (!handler_) return; handler_->OnUserImageChanged(user); }
void UserSelectionScreen::OnUserImageChanged(const user_manager::User& user) { if (!handler_) return; handler_->OnUserImageChanged(user); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/de485eb849be99305925de2257da3b85325df2fd
de485eb849be99305925de2257da3b85325df2fd
Disable ash configuring display when running mash. When running ash inside of mash_shell on a device, DisplayConfigurator will request a NativeDisplayDelegate from Ozone. Ozone is initialized only in the mus process, not in the ash process, so ash crashes at this point. Add accessor for configure_display_ to avoid crashing. The default display size can be set via command line with --ash-host-window-bounds flag until a better solution exists. BUG=590096 Review URL: https://codereview.chromium.org/1782093003 Cr-Commit-Position: refs/heads/master@{#380663}
void Shell::SetDisplayWorkAreaInsets(Window* contains, const gfx::Insets& insets) { if (!window_tree_host_manager_->UpdateWorkAreaOfDisplayNearestWindow( contains, insets)) { return; } FOR_EACH_OBSERVER(ShellObserver, observers_, OnDisplayWorkAreaInsetsChanged()); }
void Shell::SetDisplayWorkAreaInsets(Window* contains, const gfx::Insets& insets) { if (!window_tree_host_manager_->UpdateWorkAreaOfDisplayNearestWindow( contains, insets)) { return; } FOR_EACH_OBSERVER(ShellObserver, observers_, OnDisplayWorkAreaInsetsChanged()); }
C
Chrome
0
CVE-2017-18236
https://www.cvedetails.com/cve/CVE-2017-18236/
CWE-835
https://cgit.freedesktop.org/exempi/commit/?id=fe59605d3520bf2ca4e0a963d194f10e9fee5806
fe59605d3520bf2ca4e0a963d194f10e9fee5806
null
unsigned int ASF_LegacyManager::GetFieldMaxSize ( fieldType field ) { unsigned int maxSize = 0; switch ( field ) { case fieldCreationDate : maxSize = 8; break; case fieldTitle : case fieldAuthor : case fieldCopyright : case fieldDescription : maxSize = 0xFFFF; break; case fieldCopyrightURL : #if ! Exclude_LicenseURL_Recon case fieldLicenseURL : #endif maxSize = 0xFFFFFFFF; break; default: break; } return maxSize; }
unsigned int ASF_LegacyManager::GetFieldMaxSize ( fieldType field ) { unsigned int maxSize = 0; switch ( field ) { case fieldCreationDate : maxSize = 8; break; case fieldTitle : case fieldAuthor : case fieldCopyright : case fieldDescription : maxSize = 0xFFFF; break; case fieldCopyrightURL : #if ! Exclude_LicenseURL_Recon case fieldLicenseURL : #endif maxSize = 0xFFFFFFFF; break; default: break; } return maxSize; }
CPP
exempi
0
CVE-2017-17858
https://www.cvedetails.com/cve/CVE-2017-17858/
CWE-119
http://git.ghostscript.com/?p=mupdf.git;a=commit;h=55c3f68d638ac1263a386e0aaa004bb6e8bde731
55c3f68d638ac1263a386e0aaa004bb6e8bde731
null
pdf_prime_xref_index(fz_context *ctx, pdf_document *doc) { int i, j; int *idx = doc->xref_index; for (i = doc->num_xref_sections-1; i >= 0; i--) { pdf_xref *xref = &doc->xref_sections[i]; pdf_xref_subsec *subsec = xref->subsec; while (subsec != NULL) { int start = subsec->start; int end = subsec->start + subsec->len; for (j = start; j < end; j++) { char t = subsec->table[j-start].type; if (t != 0 && t != 'f') idx[j] = i; } subsec = subsec->next; } } }
pdf_prime_xref_index(fz_context *ctx, pdf_document *doc) { int i, j; int *idx = doc->xref_index; for (i = doc->num_xref_sections-1; i >= 0; i--) { pdf_xref *xref = &doc->xref_sections[i]; pdf_xref_subsec *subsec = xref->subsec; while (subsec != NULL) { int start = subsec->start; int end = subsec->start + subsec->len; for (j = start; j < end; j++) { char t = subsec->table[j-start].type; if (t != 0 && t != 'f') idx[j] = i; } subsec = subsec->next; } } }
C
ghostscript
0
CVE-2016-3156
https://www.cvedetails.com/cve/CVE-2016-3156/
CWE-399
https://github.com/torvalds/linux/commit/fbd40ea0180a2d328c5adc61414dc8bab9335ce2
fbd40ea0180a2d328c5adc61414dc8bab9335ce2
ipv4: Don't do expensive useless work during inetdev destroy. When an inetdev is destroyed, every address assigned to the interface is removed. And in this scenerio we do two pointless things which can be very expensive if the number of assigned interfaces is large: 1) Address promotion. We are deleting all addresses, so there is no point in doing this. 2) A full nf conntrack table purge for every address. We only need to do this once, as is already caught by the existing masq_dev_notifier so masq_inet_event() can skip this. Reported-by: Solar Designer <solar@openwall.com> Signed-off-by: David S. Miller <davem@davemloft.net> Tested-by: Cyrill Gorcunov <gorcunov@openvz.org>
static int __net_init fib_net_init(struct net *net) { int error; #ifdef CONFIG_IP_ROUTE_CLASSID net->ipv4.fib_num_tclassid_users = 0; #endif error = ip_fib_net_init(net); if (error < 0) goto out; error = nl_fib_lookup_init(net); if (error < 0) goto out_nlfl; error = fib_proc_init(net); if (error < 0) goto out_proc; out: return error; out_proc: nl_fib_lookup_exit(net); out_nlfl: ip_fib_net_exit(net); goto out; }
static int __net_init fib_net_init(struct net *net) { int error; #ifdef CONFIG_IP_ROUTE_CLASSID net->ipv4.fib_num_tclassid_users = 0; #endif error = ip_fib_net_init(net); if (error < 0) goto out; error = nl_fib_lookup_init(net); if (error < 0) goto out_nlfl; error = fib_proc_init(net); if (error < 0) goto out_proc; out: return error; out_proc: nl_fib_lookup_exit(net); out_nlfl: ip_fib_net_exit(net); goto out; }
C
linux
0
CVE-2017-5112
https://www.cvedetails.com/cve/CVE-2017-5112/
CWE-119
https://github.com/chromium/chromium/commit/f6ac1dba5e36f338a490752a2cbef3339096d9fe
f6ac1dba5e36f338a490752a2cbef3339096d9fe
Reset ES3 pixel pack parameters and PIXEL_PACK_BUFFER binding in DrawingBuffer before ReadPixels() and recover them later. BUG=740603 TEST=new conformance test R=kbr@chromium.org,piman@chromium.org Change-Id: I3ea54c6cc34f34e249f7c8b9f792d93c5e1958f4 Reviewed-on: https://chromium-review.googlesource.com/570840 Reviewed-by: Antoine Labour <piman@chromium.org> Reviewed-by: Kenneth Russell <kbr@chromium.org> Commit-Queue: Zhenyao Mo <zmo@chromium.org> Cr-Commit-Position: refs/heads/master@{#486518}
void WebGL2RenderingContextBase::uniform1uiv( const WebGLUniformLocation* location, Vector<GLuint>& value, GLuint src_offset, GLuint src_length) { if (isContextLost() || !ValidateUniformParameters("uniform1uiv", location, value.data(), value.size(), 1, src_offset, src_length)) return; ContextGL()->Uniform1uiv( location->Location(), src_length ? src_length : (value.size() - src_offset), value.data() + src_offset); }
void WebGL2RenderingContextBase::uniform1uiv( const WebGLUniformLocation* location, Vector<GLuint>& value, GLuint src_offset, GLuint src_length) { if (isContextLost() || !ValidateUniformParameters("uniform1uiv", location, value.data(), value.size(), 1, src_offset, src_length)) return; ContextGL()->Uniform1uiv( location->Location(), src_length ? src_length : (value.size() - src_offset), value.data() + src_offset); }
C
Chrome
0
CVE-2016-10250
https://www.cvedetails.com/cve/CVE-2016-10250/
CWE-476
https://github.com/mdadams/jasper/commit/bdfe95a6e81ffb4b2fad31a76b57943695beed20
bdfe95a6e81ffb4b2fad31a76b57943695beed20
Fixed another problem with incorrect cleanup of JP2 box data upon error.
static int jp2_cmap_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_cmap_t *cmap = &box->data.cmap; jp2_cmapent_t *ent; unsigned int i; cmap->numchans = (box->datalen) / 4; if (!(cmap->ents = jas_alloc2(cmap->numchans, sizeof(jp2_cmapent_t)))) { return -1; } for (i = 0; i < cmap->numchans; ++i) { ent = &cmap->ents[i]; if (jp2_getuint16(in, &ent->cmptno) || jp2_getuint8(in, &ent->map) || jp2_getuint8(in, &ent->pcol)) { return -1; } } return 0; }
static int jp2_cmap_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_cmap_t *cmap = &box->data.cmap; jp2_cmapent_t *ent; unsigned int i; cmap->numchans = (box->datalen) / 4; if (!(cmap->ents = jas_alloc2(cmap->numchans, sizeof(jp2_cmapent_t)))) { return -1; } for (i = 0; i < cmap->numchans; ++i) { ent = &cmap->ents[i]; if (jp2_getuint16(in, &ent->cmptno) || jp2_getuint8(in, &ent->map) || jp2_getuint8(in, &ent->pcol)) { return -1; } } return 0; }
C
jasper
0
CVE-2018-6053
https://www.cvedetails.com/cve/CVE-2018-6053/
CWE-200
https://github.com/chromium/chromium/commit/6c6888565ff1fde9ef21ef17c27ad4c8304643d2
6c6888565ff1fde9ef21ef17c27ad4c8304643d2
TopSites: Clear thumbnails from the cache when their URLs get removed We already cleared the thumbnails from persistent storage, but they remained in the in-memory cache, so they remained accessible (until the next Chrome restart) even after all browsing data was cleared. Bug: 758169 Change-Id: Id916d22358430a82e6d5043ac04fa463a32f824f Reviewed-on: https://chromium-review.googlesource.com/758640 Commit-Queue: Marc Treib <treib@chromium.org> Reviewed-by: Sylvain Defresne <sdefresne@chromium.org> Cr-Commit-Position: refs/heads/master@{#514861}
void TopSitesImpl::AddTemporaryThumbnail(const GURL& url, base::RefCountedMemory* thumbnail, const ThumbnailScore& score) { if (temp_images_.size() == kMaxTempTopImages) temp_images_.pop_front(); TempImage image; image.first = url; image.second.thumbnail = thumbnail; image.second.thumbnail_score = score; temp_images_.push_back(image); }
void TopSitesImpl::AddTemporaryThumbnail(const GURL& url, base::RefCountedMemory* thumbnail, const ThumbnailScore& score) { if (temp_images_.size() == kMaxTempTopImages) temp_images_.pop_front(); TempImage image; image.first = url; image.second.thumbnail = thumbnail; image.second.thumbnail_score = score; temp_images_.push_back(image); }
C
Chrome
0
CVE-2011-2858
https://www.cvedetails.com/cve/CVE-2011-2858/
CWE-119
https://github.com/chromium/chromium/commit/c13e1da62b5f5f0e6fe8c1f769a5a28415415244
c13e1da62b5f5f0e6fe8c1f769a5a28415415244
Revert "Revert 100494 - Fix bug in SimulateAttrib0.""" TEST=none BUG=95625 TBR=apatrick@chromium.org Review URL: http://codereview.chromium.org/7796016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@100507 0039d316-1c4b-4281-b951-d872f2087c98
bool CompareRowSegment( GLint x, GLint y, GLsizei width, const void* data) const { DCHECK(x + width <= width_ || width == 0); return memcmp(data, GetPixelAddress(expected_pixels_, x, y), width * bytes_per_pixel_) == 0; }
bool CompareRowSegment( GLint x, GLint y, GLsizei width, const void* data) const { DCHECK(x + width <= width_ || width == 0); return memcmp(data, GetPixelAddress(expected_pixels_, x, y), width * bytes_per_pixel_) == 0; }
C
Chrome
0
CVE-2016-10197
https://www.cvedetails.com/cve/CVE-2016-10197/
CWE-125
https://github.com/libevent/libevent/commit/ec65c42052d95d2c23d1d837136d1cf1d9ecef9e
ec65c42052d95d2c23d1d837136d1cf1d9ecef9e
evdns: fix searching empty hostnames From #332: Here follows a bug report by **Guido Vranken** via the _Tor bug bounty program_. Please credit Guido accordingly. ## Bug report The DNS code of Libevent contains this rather obvious OOB read: ```c static char * search_make_new(const struct search_state *const state, int n, const char *const base_name) { const size_t base_len = strlen(base_name); const char need_to_append_dot = base_name[base_len - 1] == '.' ? 0 : 1; ``` If the length of ```base_name``` is 0, then line 3125 reads 1 byte before the buffer. This will trigger a crash on ASAN-protected builds. To reproduce: Build libevent with ASAN: ``` $ CFLAGS='-fomit-frame-pointer -fsanitize=address' ./configure && make -j4 ``` Put the attached ```resolv.conf``` and ```poc.c``` in the source directory and then do: ``` $ gcc -fsanitize=address -fomit-frame-pointer poc.c .libs/libevent.a $ ./a.out ================================================================= ==22201== ERROR: AddressSanitizer: heap-buffer-overflow on address 0x60060000efdf at pc 0x4429da bp 0x7ffe1ed47300 sp 0x7ffe1ed472f8 READ of size 1 at 0x60060000efdf thread T0 ``` P.S. we can add a check earlier, but since this is very uncommon, I didn't add it. Fixes: #332
reply_handle(struct request *const req, u16 flags, u32 ttl, struct reply *reply) { int error; char addrbuf[128]; static const int error_codes[] = { DNS_ERR_FORMAT, DNS_ERR_SERVERFAILED, DNS_ERR_NOTEXIST, DNS_ERR_NOTIMPL, DNS_ERR_REFUSED }; ASSERT_LOCKED(req->base); ASSERT_VALID_REQUEST(req); if (flags & 0x020f || !reply || !reply->have_answer) { /* there was an error */ if (flags & 0x0200) { error = DNS_ERR_TRUNCATED; } else if (flags & 0x000f) { u16 error_code = (flags & 0x000f) - 1; if (error_code > 4) { error = DNS_ERR_UNKNOWN; } else { error = error_codes[error_code]; } } else if (reply && !reply->have_answer) { error = DNS_ERR_NODATA; } else { error = DNS_ERR_UNKNOWN; } switch (error) { case DNS_ERR_NOTIMPL: case DNS_ERR_REFUSED: /* we regard these errors as marking a bad nameserver */ if (req->reissue_count < req->base->global_max_reissues) { char msg[64]; evutil_snprintf(msg, sizeof(msg), "Bad response %d (%s)", error, evdns_err_to_string(error)); nameserver_failed(req->ns, msg); if (!request_reissue(req)) return; } break; case DNS_ERR_SERVERFAILED: /* rcode 2 (servfailed) sometimes means "we * are broken" and sometimes (with some binds) * means "that request was very confusing." * Treat this as a timeout, not a failure. */ log(EVDNS_LOG_DEBUG, "Got a SERVERFAILED from nameserver" "at %s; will allow the request to time out.", evutil_format_sockaddr_port_( (struct sockaddr *)&req->ns->address, addrbuf, sizeof(addrbuf))); /* Call the timeout function */ evdns_request_timeout_callback(0, 0, req); return; default: /* we got a good reply from the nameserver: it is up. */ if (req->handle == req->ns->probe_request) { /* Avoid double-free */ req->ns->probe_request = NULL; } nameserver_up(req->ns); } if (req->handle->search_state && req->request_type != TYPE_PTR) { /* if we have a list of domains to search in, * try the next one */ if (!search_try_next(req->handle)) { /* a new request was issued so this * request is finished and */ /* the user callback will be made when * that request (or a */ /* child of it) finishes. */ return; } } /* all else failed. Pass the failure up */ reply_schedule_callback(req, ttl, error, NULL); request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1); } else { /* all ok, tell the user */ reply_schedule_callback(req, ttl, 0, reply); if (req->handle == req->ns->probe_request) req->ns->probe_request = NULL; /* Avoid double-free */ nameserver_up(req->ns); request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1); } }
reply_handle(struct request *const req, u16 flags, u32 ttl, struct reply *reply) { int error; char addrbuf[128]; static const int error_codes[] = { DNS_ERR_FORMAT, DNS_ERR_SERVERFAILED, DNS_ERR_NOTEXIST, DNS_ERR_NOTIMPL, DNS_ERR_REFUSED }; ASSERT_LOCKED(req->base); ASSERT_VALID_REQUEST(req); if (flags & 0x020f || !reply || !reply->have_answer) { /* there was an error */ if (flags & 0x0200) { error = DNS_ERR_TRUNCATED; } else if (flags & 0x000f) { u16 error_code = (flags & 0x000f) - 1; if (error_code > 4) { error = DNS_ERR_UNKNOWN; } else { error = error_codes[error_code]; } } else if (reply && !reply->have_answer) { error = DNS_ERR_NODATA; } else { error = DNS_ERR_UNKNOWN; } switch (error) { case DNS_ERR_NOTIMPL: case DNS_ERR_REFUSED: /* we regard these errors as marking a bad nameserver */ if (req->reissue_count < req->base->global_max_reissues) { char msg[64]; evutil_snprintf(msg, sizeof(msg), "Bad response %d (%s)", error, evdns_err_to_string(error)); nameserver_failed(req->ns, msg); if (!request_reissue(req)) return; } break; case DNS_ERR_SERVERFAILED: /* rcode 2 (servfailed) sometimes means "we * are broken" and sometimes (with some binds) * means "that request was very confusing." * Treat this as a timeout, not a failure. */ log(EVDNS_LOG_DEBUG, "Got a SERVERFAILED from nameserver" "at %s; will allow the request to time out.", evutil_format_sockaddr_port_( (struct sockaddr *)&req->ns->address, addrbuf, sizeof(addrbuf))); /* Call the timeout function */ evdns_request_timeout_callback(0, 0, req); return; default: /* we got a good reply from the nameserver: it is up. */ if (req->handle == req->ns->probe_request) { /* Avoid double-free */ req->ns->probe_request = NULL; } nameserver_up(req->ns); } if (req->handle->search_state && req->request_type != TYPE_PTR) { /* if we have a list of domains to search in, * try the next one */ if (!search_try_next(req->handle)) { /* a new request was issued so this * request is finished and */ /* the user callback will be made when * that request (or a */ /* child of it) finishes. */ return; } } /* all else failed. Pass the failure up */ reply_schedule_callback(req, ttl, error, NULL); request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1); } else { /* all ok, tell the user */ reply_schedule_callback(req, ttl, 0, reply); if (req->handle == req->ns->probe_request) req->ns->probe_request = NULL; /* Avoid double-free */ nameserver_up(req->ns); request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1); } }
C
libevent
0
CVE-2013-6432
https://www.cvedetails.com/cve/CVE-2013-6432/
null
https://github.com/torvalds/linux/commit/cf970c002d270c36202bd5b9c2804d3097a52da0
cf970c002d270c36202bd5b9c2804d3097a52da0
ping: prevent NULL pointer dereference on write to msg_name A plain read() on a socket does set msg->msg_name to NULL. So check for NULL pointer first. Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
int ping_init_sock(struct sock *sk) { struct net *net = sock_net(sk); kgid_t group = current_egid(); struct group_info *group_info = get_current_groups(); int i, j, count = group_info->ngroups; kgid_t low, high; inet_get_ping_group_range_net(net, &low, &high); if (gid_lte(low, group) && gid_lte(group, high)) return 0; for (i = 0; i < group_info->nblocks; i++) { int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); for (j = 0; j < cp_count; j++) { kgid_t gid = group_info->blocks[i][j]; if (gid_lte(low, gid) && gid_lte(gid, high)) return 0; } count -= cp_count; } return -EACCES; }
int ping_init_sock(struct sock *sk) { struct net *net = sock_net(sk); kgid_t group = current_egid(); struct group_info *group_info = get_current_groups(); int i, j, count = group_info->ngroups; kgid_t low, high; inet_get_ping_group_range_net(net, &low, &high); if (gid_lte(low, group) && gid_lte(group, high)) return 0; for (i = 0; i < group_info->nblocks; i++) { int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); for (j = 0; j < cp_count; j++) { kgid_t gid = group_info->blocks[i][j]; if (gid_lte(low, gid) && gid_lte(gid, high)) return 0; } count -= cp_count; } return -EACCES; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/27c68f543e5eba779902447445dfb05ec3f5bf75
27c68f543e5eba779902447445dfb05ec3f5bf75
Revert of Add accelerated VP9 decode infrastructure and an implementation for VA-API. (patchset #7 id:260001 of https://codereview.chromium.org/1318863003/ ) Reason for revert: I think this patch broke compile step for Chromium Linux ChromeOS MSan Builder. First failing build: http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20Linux%20ChromeOS%20MSan%20Builder/builds/8310 All recent builds: http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20Linux%20ChromeOS%20MSan%20Builder?numbuilds=200 Sorry for the revert. I'll re-revert if I'm wrong. Cheers, Tommy Original issue's description: > Add accelerated VP9 decode infrastructure and an implementation for VA-API. > > - Add a hardware/platform-independent VP9Decoder class and related > infrastructure, implementing AcceleratedVideoDecoder interface. VP9Decoder > performs the initial stages of the decode process, which are to be done > on host/in software, such as stream parsing and reference frame management. > > - Add a VP9Accelerator interface, used by the VP9Decoder to offload the > remaining stages of the decode process to hardware. VP9Accelerator > implementations are platform-specific. > > - Add the first implementation of VP9Accelerator - VaapiVP9Accelerator - and > integrate it with VaapiVideoDecodeAccelerator, for devices which provide > hardware VP9 acceleration through VA-API. Hook it up to the new > infrastructure and VP9Decoder. > > - Extend Vp9Parser to provide functionality required by VP9Decoder and > VP9Accelerator, including superframe parsing, handling of loop filter > and segmentation initialization, state persistence across frames and > resetting when needed. Also add code calculating segmentation dequants > and loop filter levels. > > - Update vp9_parser_unittest to the new Vp9Parser interface and flow. > > TEST=vp9_parser_unittest,vda_unittest,Chrome VP9 playback > BUG=chrome-os-partner:41469,chrome-os-partner:41470,chromium:525331 > TBR=dpranke@chromium.org > > Committed: https://crrev.com/e3cc0a661b8abfdc74f569940949bc1f336ece40 > Cr-Commit-Position: refs/heads/master@{#349312} TBR=wuchengli@chromium.org,kcwu@chromium.org,sandersd@chromium.org,jorgelo@chromium.org,posciak@chromium.org NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=chrome-os-partner:41469,chrome-os-partner:41470,chromium:525331 Review URL: https://codereview.chromium.org/1357513002 Cr-Commit-Position: refs/heads/master@{#349443}
VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::CreateVP8Picture() { scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface(); if (!va_surface) return nullptr; return new VaapiVP8Picture(va_surface); }
VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::CreateVP8Picture() { scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface(); if (!va_surface) return nullptr; return new VaapiVP8Picture(va_surface); }
C
Chrome
0
CVE-2013-2861
https://www.cvedetails.com/cve/CVE-2013-2861/
CWE-399
https://github.com/chromium/chromium/commit/508b89a64ab700aa09f21fc666a5588b47360eab
508b89a64ab700aa09f21fc666a5588b47360eab
Upgrade old app host to new app launcher on startup This patch is a continuation of https://codereview.chromium.org/16805002/. BUG=248825 Review URL: https://chromiumcodereview.appspot.com/17022015 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@209604 0039d316-1c4b-4281-b951-d872f2087c98
string16 GetAppModelId() { base::FilePath initial_profile_path; CommandLine* command_line = CommandLine::ForCurrentProcess(); if (command_line->HasSwitch(switches::kUserDataDir)) { initial_profile_path = command_line->GetSwitchValuePath(switches::kUserDataDir).AppendASCII( chrome::kInitialProfile); } return ShellIntegration::GetAppListAppModelIdForProfile(initial_profile_path); }
string16 GetAppModelId() { base::FilePath initial_profile_path; CommandLine* command_line = CommandLine::ForCurrentProcess(); if (command_line->HasSwitch(switches::kUserDataDir)) { initial_profile_path = command_line->GetSwitchValuePath(switches::kUserDataDir).AppendASCII( chrome::kInitialProfile); } return ShellIntegration::GetAppListAppModelIdForProfile(initial_profile_path); }
C
Chrome
0
CVE-2018-6942
https://www.cvedetails.com/cve/CVE-2018-6942/
CWE-476
https://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=29c759284e305ec428703c9a5831d0b1fc3497ef
29c759284e305ec428703c9a5831d0b1fc3497ef
null
Ins_IUP( TT_ExecContext exc ) { IUP_WorkerRec V; FT_Byte mask; FT_UInt first_point; /* first point of contour */ FT_UInt end_point; /* end point (last+1) of contour */ FT_UInt first_touched; /* first touched point in contour */ FT_UInt cur_touched; /* current touched point in contour */ FT_UInt point; /* current point */ FT_Short contour; /* current contour */ #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL /* See `ttinterp.h' for details on backward compatibility mode. */ /* Allow IUP until it has been called on both axes. Immediately */ /* return on subsequent ones. */ if ( SUBPIXEL_HINTING_MINIMAL && exc->backward_compatibility ) { if ( exc->iupx_called && exc->iupy_called ) return; if ( exc->opcode & 1 ) exc->iupx_called = TRUE; else exc->iupy_called = TRUE; } #endif /* ignore empty outlines */ if ( exc->pts.n_contours == 0 ) return; if ( exc->opcode & 1 ) { mask = FT_CURVE_TAG_TOUCH_X; V.orgs = exc->pts.org; V.curs = exc->pts.cur; V.orus = exc->pts.orus; } else { mask = FT_CURVE_TAG_TOUCH_Y; V.orgs = (FT_Vector*)( (FT_Pos*)exc->pts.org + 1 ); V.curs = (FT_Vector*)( (FT_Pos*)exc->pts.cur + 1 ); V.orus = (FT_Vector*)( (FT_Pos*)exc->pts.orus + 1 ); } V.max_points = exc->pts.n_points; contour = 0; point = 0; #ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY if ( SUBPIXEL_HINTING_INFINALITY && exc->ignore_x_mode ) { exc->iup_called = TRUE; if ( exc->sph_tweak_flags & SPH_TWEAK_SKIP_IUP ) return; } #endif /* TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY */ do { end_point = exc->pts.contours[contour] - exc->pts.first_point; first_point = point; if ( BOUNDS( end_point, exc->pts.n_points ) ) end_point = exc->pts.n_points - 1; while ( point <= end_point && ( exc->pts.tags[point] & mask ) == 0 ) point++; if ( point <= end_point ) { first_touched = point; cur_touched = point; point++; while ( point <= end_point ) { if ( ( exc->pts.tags[point] & mask ) != 0 ) { _iup_worker_interpolate( &V, cur_touched + 1, point - 1, cur_touched, point ); cur_touched = point; } point++; } if ( cur_touched == first_touched ) _iup_worker_shift( &V, first_point, end_point, cur_touched ); else { _iup_worker_interpolate( &V, (FT_UShort)( cur_touched + 1 ), end_point, cur_touched, first_touched ); if ( first_touched > 0 ) _iup_worker_interpolate( &V, first_point, first_touched - 1, cur_touched, first_touched ); } } contour++; } while ( contour < exc->pts.n_contours ); }
Ins_IUP( TT_ExecContext exc ) { IUP_WorkerRec V; FT_Byte mask; FT_UInt first_point; /* first point of contour */ FT_UInt end_point; /* end point (last+1) of contour */ FT_UInt first_touched; /* first touched point in contour */ FT_UInt cur_touched; /* current touched point in contour */ FT_UInt point; /* current point */ FT_Short contour; /* current contour */ #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL /* See `ttinterp.h' for details on backward compatibility mode. */ /* Allow IUP until it has been called on both axes. Immediately */ /* return on subsequent ones. */ if ( SUBPIXEL_HINTING_MINIMAL && exc->backward_compatibility ) { if ( exc->iupx_called && exc->iupy_called ) return; if ( exc->opcode & 1 ) exc->iupx_called = TRUE; else exc->iupy_called = TRUE; } #endif /* ignore empty outlines */ if ( exc->pts.n_contours == 0 ) return; if ( exc->opcode & 1 ) { mask = FT_CURVE_TAG_TOUCH_X; V.orgs = exc->pts.org; V.curs = exc->pts.cur; V.orus = exc->pts.orus; } else { mask = FT_CURVE_TAG_TOUCH_Y; V.orgs = (FT_Vector*)( (FT_Pos*)exc->pts.org + 1 ); V.curs = (FT_Vector*)( (FT_Pos*)exc->pts.cur + 1 ); V.orus = (FT_Vector*)( (FT_Pos*)exc->pts.orus + 1 ); } V.max_points = exc->pts.n_points; contour = 0; point = 0; #ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY if ( SUBPIXEL_HINTING_INFINALITY && exc->ignore_x_mode ) { exc->iup_called = TRUE; if ( exc->sph_tweak_flags & SPH_TWEAK_SKIP_IUP ) return; } #endif /* TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY */ do { end_point = exc->pts.contours[contour] - exc->pts.first_point; first_point = point; if ( BOUNDS( end_point, exc->pts.n_points ) ) end_point = exc->pts.n_points - 1; while ( point <= end_point && ( exc->pts.tags[point] & mask ) == 0 ) point++; if ( point <= end_point ) { first_touched = point; cur_touched = point; point++; while ( point <= end_point ) { if ( ( exc->pts.tags[point] & mask ) != 0 ) { _iup_worker_interpolate( &V, cur_touched + 1, point - 1, cur_touched, point ); cur_touched = point; } point++; } if ( cur_touched == first_touched ) _iup_worker_shift( &V, first_point, end_point, cur_touched ); else { _iup_worker_interpolate( &V, (FT_UShort)( cur_touched + 1 ), end_point, cur_touched, first_touched ); if ( first_touched > 0 ) _iup_worker_interpolate( &V, first_point, first_touched - 1, cur_touched, first_touched ); } } contour++; } while ( contour < exc->pts.n_contours ); }
C
savannah
0
CVE-2017-0592
https://www.cvedetails.com/cve/CVE-2017-0592/
CWE-119
https://android.googlesource.com/platform/frameworks/av/+/acc192347665943ca674acf117e4f74a88436922
acc192347665943ca674acf117e4f74a88436922
FLACExtractor: copy protect mWriteBuffer Bug: 30895578 Change-Id: I4cba36bbe3502678210e5925181683df9726b431
MediaBuffer *readBuffer() { return readBuffer(false, 0LL); }
MediaBuffer *readBuffer() { return readBuffer(false, 0LL); }
C
Android
0
null
null
null
https://github.com/chromium/chromium/commit/df831400bcb63db4259b5858281b1727ba972a2a
df831400bcb63db4259b5858281b1727ba972a2a
WebKit2: Support window bounce when panning. https://bugs.webkit.org/show_bug.cgi?id=58065 <rdar://problem/9244367> Reviewed by Adam Roben. Make gestureDidScroll synchronous, as once we scroll, we need to know whether or not we are at the beginning or end of the scrollable document. If we are at either end of the scrollable document, we call the Windows 7 API to bounce the window to give an indication that you are past an end of the document. * UIProcess/WebPageProxy.cpp: (WebKit::WebPageProxy::gestureDidScroll): Pass a boolean for the reply, and return it. * UIProcess/WebPageProxy.h: * UIProcess/win/WebView.cpp: (WebKit::WebView::WebView): Inititalize a new variable. (WebKit::WebView::onGesture): Once we send the message to scroll, check if have gone to an end of the document, and if we have, bounce the window. * UIProcess/win/WebView.h: * WebProcess/WebPage/WebPage.h: * WebProcess/WebPage/WebPage.messages.in: GestureDidScroll is now sync. * WebProcess/WebPage/win/WebPageWin.cpp: (WebKit::WebPage::gestureDidScroll): When we are done scrolling, check if we have a vertical scrollbar and if we are at the beginning or the end of the scrollable document. git-svn-id: svn://svn.chromium.org/blink/trunk@83197 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void WebPage::gestureWillBegin(const WebCore::IntPoint& point, bool& canBeginPanning) { bool hitScrollbar = false; HitTestRequest request(HitTestRequest::ReadOnly); for (Frame* childFrame = m_page->mainFrame(); childFrame; childFrame = EventHandler::subframeForTargetNode(m_gestureTargetNode.get())) { ScrollView* scollView = childFrame->view(); if (!scollView) break; RenderView* renderView = childFrame->document()->renderView(); if (!renderView) break; RenderLayer* layer = renderView->layer(); if (!layer) break; HitTestResult result = scollView->windowToContents(point); layer->hitTest(request, result); m_gestureTargetNode = result.innerNode(); if (!hitScrollbar) hitScrollbar = result.scrollbar(); } if (hitScrollbar) { canBeginPanning = false; return; } if (!m_gestureTargetNode) { canBeginPanning = false; return; } for (RenderObject* renderer = m_gestureTargetNode->renderer(); renderer; renderer = renderer->parent()) { if (renderer->isBox() && toRenderBox(renderer)->canBeScrolledAndHasScrollableArea()) { canBeginPanning = true; return; } } canBeginPanning = false; }
void WebPage::gestureWillBegin(const WebCore::IntPoint& point, bool& canBeginPanning) { bool hitScrollbar = false; HitTestRequest request(HitTestRequest::ReadOnly); for (Frame* childFrame = m_page->mainFrame(); childFrame; childFrame = EventHandler::subframeForTargetNode(m_gestureTargetNode.get())) { ScrollView* scollView = childFrame->view(); if (!scollView) break; RenderView* renderView = childFrame->document()->renderView(); if (!renderView) break; RenderLayer* layer = renderView->layer(); if (!layer) break; HitTestResult result = scollView->windowToContents(point); layer->hitTest(request, result); m_gestureTargetNode = result.innerNode(); if (!hitScrollbar) hitScrollbar = result.scrollbar(); } if (hitScrollbar) { canBeginPanning = false; return; } if (!m_gestureTargetNode) { canBeginPanning = false; return; } for (RenderObject* renderer = m_gestureTargetNode->renderer(); renderer; renderer = renderer->parent()) { if (renderer->isBox() && toRenderBox(renderer)->canBeScrolledAndHasScrollableArea()) { canBeginPanning = true; return; } } canBeginPanning = false; }
C
Chrome
0
CVE-2017-5068
https://www.cvedetails.com/cve/CVE-2017-5068/
CWE-362
https://github.com/chromium/chromium/commit/13e1002277287ed0090b2ca76c2d01545e677935
13e1002277287ed0090b2ca76c2d01545e677935
Fix the crash after clamshell -> tablet transition in overview mode. This CL just reverted some changes that were made in https://chromium-review.googlesource.com/c/chromium/src/+/1658955. In that CL, we changed the clamshell <-> tablet transition when clamshell split view mode is enabled, however, we should keep the old behavior unchanged if the feature is not enabled, i.e., overview should be ended if it's active before the transition. Otherwise, it will cause a nullptr dereference crash since |split_view_drag_indicators_| is not created in clamshell overview and will be used in tablet overview. Bug: 982507 Change-Id: I238fe9472648a446cff4ab992150658c228714dd Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1705474 Commit-Queue: Xiaoqian Dai <xdai@chromium.org> Reviewed-by: Mitsuru Oshima (Slow - on/off site) <oshima@chromium.org> Cr-Commit-Position: refs/heads/master@{#679306}
void TabletModeWindowManager::OnSplitViewModeEnded() { switch (Shell::Get()->split_view_controller()->end_reason()) { case SplitViewController::EndReason::kNormal: case SplitViewController::EndReason::kUnsnappableWindowActivated: case SplitViewController::EndReason::kPipExpanded: break; case SplitViewController::EndReason::kHomeLauncherPressed: case SplitViewController::EndReason::kActiveUserChanged: case SplitViewController::EndReason::kWindowDragStarted: case SplitViewController::EndReason::kExitTabletMode: return; } MruWindowTracker::WindowList windows = Shell::Get()->mru_window_tracker()->BuildWindowListIgnoreModal(kAllDesks); for (auto* window : windows) MaximizeIfSnapped(window); }
void TabletModeWindowManager::OnSplitViewModeEnded() { switch (Shell::Get()->split_view_controller()->end_reason()) { case SplitViewController::EndReason::kNormal: case SplitViewController::EndReason::kUnsnappableWindowActivated: case SplitViewController::EndReason::kPipExpanded: break; case SplitViewController::EndReason::kHomeLauncherPressed: case SplitViewController::EndReason::kActiveUserChanged: case SplitViewController::EndReason::kWindowDragStarted: case SplitViewController::EndReason::kExitTabletMode: return; } MruWindowTracker::WindowList windows = Shell::Get()->mru_window_tracker()->BuildWindowListIgnoreModal(kAllDesks); for (auto* window : windows) MaximizeIfSnapped(window); }
C
Chrome
0
CVE-2012-5148
https://www.cvedetails.com/cve/CVE-2012-5148/
CWE-20
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
e89cfcb9090e8c98129ae9160c513f504db74599
Remove TabContents from TabStripModelObserver::TabDetachedAt. BUG=107201 TEST=no visible change Review URL: https://chromiumcodereview.appspot.com/11293205 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
void Browser::OpenFile() { content::RecordAction(UserMetricsAction("OpenFile")); select_file_dialog_ = ui::SelectFileDialog::Create( this, new ChromeSelectFilePolicy( chrome::GetActiveWebContents(this))); const FilePath directory = profile_->last_selected_directory(); gfx::NativeWindow parent_window = window_->GetNativeWindow(); ui::SelectFileDialog::FileTypeInfo file_types; file_types.support_gdata = true; select_file_dialog_->SelectFile(ui::SelectFileDialog::SELECT_OPEN_FILE, string16(), directory, &file_types, 0, FILE_PATH_LITERAL(""), parent_window, NULL); }
void Browser::OpenFile() { content::RecordAction(UserMetricsAction("OpenFile")); select_file_dialog_ = ui::SelectFileDialog::Create( this, new ChromeSelectFilePolicy( chrome::GetActiveWebContents(this))); const FilePath directory = profile_->last_selected_directory(); gfx::NativeWindow parent_window = window_->GetNativeWindow(); ui::SelectFileDialog::FileTypeInfo file_types; file_types.support_gdata = true; select_file_dialog_->SelectFile(ui::SelectFileDialog::SELECT_OPEN_FILE, string16(), directory, &file_types, 0, FILE_PATH_LITERAL(""), parent_window, NULL); }
C
Chrome
0
CVE-2015-6766
https://www.cvedetails.com/cve/CVE-2015-6766/
null
https://github.com/chromium/chromium/commit/2cb006bc9d3ad16353ed49c2b75faea618156d0f
2cb006bc9d3ad16353ed49c2b75faea618156d0f
Fix possible map::end() dereference in AppCacheUpdateJob triggered by a compromised renderer. BUG=551044 Review URL: https://codereview.chromium.org/1418783005 Cr-Commit-Position: refs/heads/master@{#358815}
AppCacheRequestHandler* AppCacheHost::CreateRequestHandler( net::URLRequest* request, ResourceType resource_type, bool should_reset_appcache) { if (is_for_dedicated_worker()) { AppCacheHost* parent_host = GetParentAppCacheHost(); if (parent_host) return parent_host->CreateRequestHandler( request, resource_type, should_reset_appcache); return NULL; } if (AppCacheRequestHandler::IsMainResourceType(resource_type)) { first_party_url_ = request->first_party_for_cookies(); return new AppCacheRequestHandler( this, resource_type, should_reset_appcache); } if ((associated_cache() && associated_cache()->is_complete()) || is_selection_pending()) { return new AppCacheRequestHandler( this, resource_type, should_reset_appcache); } return NULL; }
AppCacheRequestHandler* AppCacheHost::CreateRequestHandler( net::URLRequest* request, ResourceType resource_type, bool should_reset_appcache) { if (is_for_dedicated_worker()) { AppCacheHost* parent_host = GetParentAppCacheHost(); if (parent_host) return parent_host->CreateRequestHandler( request, resource_type, should_reset_appcache); return NULL; } if (AppCacheRequestHandler::IsMainResourceType(resource_type)) { first_party_url_ = request->first_party_for_cookies(); return new AppCacheRequestHandler( this, resource_type, should_reset_appcache); } if ((associated_cache() && associated_cache()->is_complete()) || is_selection_pending()) { return new AppCacheRequestHandler( this, resource_type, should_reset_appcache); } return NULL; }
C
Chrome
0
CVE-2018-16075
https://www.cvedetails.com/cve/CVE-2018-16075/
CWE-254
https://github.com/chromium/chromium/commit/d913f72b4875cf0814fc3f03ad7c00642097c4a4
d913f72b4875cf0814fc3f03ad7c00642097c4a4
Remove RequireCSSExtensionForFile runtime enabled flag. The feature has long since been stable (since M64) and doesn't seem to be a need for this flag. BUG=788936 Change-Id: I666390b869289c328acb4a2daa5bf4154e1702c0 Reviewed-on: https://chromium-review.googlesource.com/c/1324143 Reviewed-by: Mike West <mkwst@chromium.org> Reviewed-by: Camille Lamy <clamy@chromium.org> Commit-Queue: Dave Tapuska <dtapuska@chromium.org> Cr-Commit-Position: refs/heads/master@{#607329}
void WebRuntimeFeatures::EnableCSSHexAlphaColor(bool enable) { RuntimeEnabledFeatures::SetCSSHexAlphaColorEnabled(enable); }
void WebRuntimeFeatures::EnableCSSHexAlphaColor(bool enable) { RuntimeEnabledFeatures::SetCSSHexAlphaColorEnabled(enable); }
C
Chrome
0
CVE-2012-3552
https://www.cvedetails.com/cve/CVE-2012-3552/
CWE-362
https://github.com/torvalds/linux/commit/f6d8bd051c391c1c0458a30b2a7abcd939329259
f6d8bd051c391c1c0458a30b2a7abcd939329259
inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
static int inet_gso_send_check(struct sk_buff *skb) { const struct iphdr *iph; const struct net_protocol *ops; int proto; int ihl; int err = -EINVAL; if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) goto out; iph = ip_hdr(skb); ihl = iph->ihl * 4; if (ihl < sizeof(*iph)) goto out; if (unlikely(!pskb_may_pull(skb, ihl))) goto out; __skb_pull(skb, ihl); skb_reset_transport_header(skb); iph = ip_hdr(skb); proto = iph->protocol & (MAX_INET_PROTOS - 1); err = -EPROTONOSUPPORT; rcu_read_lock(); ops = rcu_dereference(inet_protos[proto]); if (likely(ops && ops->gso_send_check)) err = ops->gso_send_check(skb); rcu_read_unlock(); out: return err; }
static int inet_gso_send_check(struct sk_buff *skb) { const struct iphdr *iph; const struct net_protocol *ops; int proto; int ihl; int err = -EINVAL; if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) goto out; iph = ip_hdr(skb); ihl = iph->ihl * 4; if (ihl < sizeof(*iph)) goto out; if (unlikely(!pskb_may_pull(skb, ihl))) goto out; __skb_pull(skb, ihl); skb_reset_transport_header(skb); iph = ip_hdr(skb); proto = iph->protocol & (MAX_INET_PROTOS - 1); err = -EPROTONOSUPPORT; rcu_read_lock(); ops = rcu_dereference(inet_protos[proto]); if (likely(ops && ops->gso_send_check)) err = ops->gso_send_check(skb); rcu_read_unlock(); out: return err; }
C
linux
0
CVE-2012-5136
https://www.cvedetails.com/cve/CVE-2012-5136/
CWE-20
https://github.com/chromium/chromium/commit/401d30ef93030afbf7e81e53a11b68fc36194502
401d30ef93030afbf7e81e53a11b68fc36194502
Refactoring: Move m_mayDisplaySeamlesslyWithParent down to Document The member is used only in Document, thus no reason to stay in SecurityContext. TEST=none BUG=none R=haraken@chromium.org, abarth, haraken, hayato Review URL: https://codereview.chromium.org/27615003 git-svn-id: svn://svn.chromium.org/blink/trunk@159829 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void Document::addConsoleMessageWithRequestIdentifier(MessageSource source, MessageLevel level, const String& message, unsigned long requestIdentifier) { if (!isContextThread()) { postTask(AddConsoleMessageTask::create(source, level, message)); return; } if (Page* page = this->page()) page->console().addMessage(source, level, message, String(), 0, 0, 0, 0, requestIdentifier); }
void Document::addConsoleMessageWithRequestIdentifier(MessageSource source, MessageLevel level, const String& message, unsigned long requestIdentifier) { if (!isContextThread()) { postTask(AddConsoleMessageTask::create(source, level, message)); return; } if (Page* page = this->page()) page->console().addMessage(source, level, message, String(), 0, 0, 0, 0, requestIdentifier); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/df831400bcb63db4259b5858281b1727ba972a2a
df831400bcb63db4259b5858281b1727ba972a2a
WebKit2: Support window bounce when panning. https://bugs.webkit.org/show_bug.cgi?id=58065 <rdar://problem/9244367> Reviewed by Adam Roben. Make gestureDidScroll synchronous, as once we scroll, we need to know whether or not we are at the beginning or end of the scrollable document. If we are at either end of the scrollable document, we call the Windows 7 API to bounce the window to give an indication that you are past an end of the document. * UIProcess/WebPageProxy.cpp: (WebKit::WebPageProxy::gestureDidScroll): Pass a boolean for the reply, and return it. * UIProcess/WebPageProxy.h: * UIProcess/win/WebView.cpp: (WebKit::WebView::WebView): Inititalize a new variable. (WebKit::WebView::onGesture): Once we send the message to scroll, check if have gone to an end of the document, and if we have, bounce the window. * UIProcess/win/WebView.h: * WebProcess/WebPage/WebPage.h: * WebProcess/WebPage/WebPage.messages.in: GestureDidScroll is now sync. * WebProcess/WebPage/win/WebPageWin.cpp: (WebKit::WebPage::gestureDidScroll): When we are done scrolling, check if we have a vertical scrollbar and if we are at the beginning or the end of the scrollable document. git-svn-id: svn://svn.chromium.org/blink/trunk@83197 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void WebPageProxy::setCursor(const WebCore::Cursor& cursor) { m_pageClient->setCursor(cursor); }
void WebPageProxy::setCursor(const WebCore::Cursor& cursor) { m_pageClient->setCursor(cursor); }
C
Chrome
0
CVE-2018-6794
https://www.cvedetails.com/cve/CVE-2018-6794/
CWE-693
https://github.com/OISF/suricata/pull/3202/commits/e1ef57c848bbe4e567d5d4b66d346a742e3f77a1
e1ef57c848bbe4e567d5d4b66d346a742e3f77a1
stream: still inspect packets dropped by stream The detect engine would bypass packets that are set as dropped. This seems sane, as these packets are going to be dropped anyway. However, it lead to the following corner case: stream events that triggered the drop could not be matched on the rules. The packet with the event wouldn't make it to the detect engine due to the bypass. This patch changes the logic to not bypass DROP packets anymore. Packets that are dropped by the stream engine will set the no payload inspection flag, so avoid needless cost.
static int StreamTcpPacketStateNone(ThreadVars *tv, Packet *p, StreamTcpThread *stt, TcpSession *ssn, PacketQueue *pq) { if (p->tcph->th_flags & TH_RST) { StreamTcpSetEvent(p, STREAM_RST_BUT_NO_SESSION); SCLogDebug("RST packet received, no session setup"); return -1; } else if (p->tcph->th_flags & TH_FIN) { StreamTcpSetEvent(p, STREAM_FIN_BUT_NO_SESSION); SCLogDebug("FIN packet received, no session setup"); return -1; /* SYN/ACK */ } else if ((p->tcph->th_flags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { if (stream_config.midstream == FALSE && stream_config.async_oneside == FALSE) return 0; if (ssn == NULL) { ssn = StreamTcpNewSession(p, stt->ssn_pool_id); if (ssn == NULL) { StatsIncr(tv, stt->counter_tcp_ssn_memcap); return -1; } StatsIncr(tv, stt->counter_tcp_sessions); } /* set the state */ StreamTcpPacketSetState(p, ssn, TCP_SYN_RECV); SCLogDebug("ssn %p: =~ midstream picked ssn state is now " "TCP_SYN_RECV", ssn); ssn->flags |= STREAMTCP_FLAG_MIDSTREAM; /* Flag used to change the direct in the later stage in the session */ ssn->flags |= STREAMTCP_FLAG_MIDSTREAM_SYNACK; if (stream_config.async_oneside) { SCLogDebug("ssn %p: =~ ASYNC", ssn); ssn->flags |= STREAMTCP_FLAG_ASYNC; } /* sequence number & window */ ssn->server.isn = TCP_GET_SEQ(p); STREAMTCP_SET_RA_BASE_SEQ(&ssn->server, ssn->server.isn); ssn->server.next_seq = ssn->server.isn + 1; ssn->server.window = TCP_GET_WINDOW(p); SCLogDebug("ssn %p: server window %u", ssn, ssn->server.window); ssn->client.isn = TCP_GET_ACK(p) - 1; STREAMTCP_SET_RA_BASE_SEQ(&ssn->client, ssn->client.isn); ssn->client.next_seq = ssn->client.isn + 1; ssn->client.last_ack = TCP_GET_ACK(p); ssn->server.last_ack = TCP_GET_SEQ(p); ssn->server.next_win = ssn->server.last_ack + ssn->server.window; /** If the client has a wscale option the server had it too, * so set the wscale for the server to max. Otherwise none * will have the wscale opt just like it should. */ if (TCP_HAS_WSCALE(p)) { ssn->client.wscale = TCP_GET_WSCALE(p); ssn->server.wscale = TCP_WSCALE_MAX; SCLogDebug("ssn %p: wscale enabled. client %u server %u", ssn, ssn->client.wscale, ssn->server.wscale); } SCLogDebug("ssn %p: ssn->client.isn %"PRIu32", ssn->client.next_seq" " %"PRIu32", ssn->client.last_ack %"PRIu32"", ssn, ssn->client.isn, ssn->client.next_seq, ssn->client.last_ack); SCLogDebug("ssn %p: ssn->server.isn %"PRIu32", ssn->server.next_seq" " %"PRIu32", ssn->server.last_ack %"PRIu32"", ssn, ssn->server.isn, ssn->server.next_seq, ssn->server.last_ack); /* Set the timestamp value for both streams, if packet has timestamp * option enabled.*/ if (TCP_HAS_TS(p)) { ssn->server.last_ts = TCP_GET_TSVAL(p); ssn->client.last_ts = TCP_GET_TSECR(p); SCLogDebug("ssn %p: ssn->server.last_ts %" PRIu32" " "ssn->client.last_ts %" PRIu32"", ssn, ssn->server.last_ts, ssn->client.last_ts); ssn->flags |= STREAMTCP_FLAG_TIMESTAMP; ssn->server.last_pkt_ts = p->ts.tv_sec; if (ssn->server.last_ts == 0) ssn->server.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; if (ssn->client.last_ts == 0) ssn->client.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; } else { ssn->server.last_ts = 0; ssn->client.last_ts = 0; } if (TCP_GET_SACKOK(p) == 1) { ssn->flags |= STREAMTCP_FLAG_SACKOK; SCLogDebug("ssn %p: SYN/ACK with SACK permitted, assuming " "SACK permitted for both sides", ssn); } /* packet thinks it is in the wrong direction, flip it */ StreamTcpPacketSwitchDir(ssn, p); } else if (p->tcph->th_flags & TH_SYN) { if (ssn == NULL) { ssn = StreamTcpNewSession(p, stt->ssn_pool_id); if (ssn == NULL) { StatsIncr(tv, stt->counter_tcp_ssn_memcap); return -1; } StatsIncr(tv, stt->counter_tcp_sessions); } /* set the state */ StreamTcpPacketSetState(p, ssn, TCP_SYN_SENT); SCLogDebug("ssn %p: =~ ssn state is now TCP_SYN_SENT", ssn); if (stream_config.async_oneside) { SCLogDebug("ssn %p: =~ ASYNC", ssn); ssn->flags |= STREAMTCP_FLAG_ASYNC; } /* set the sequence numbers and window */ ssn->client.isn = TCP_GET_SEQ(p); STREAMTCP_SET_RA_BASE_SEQ(&ssn->client, ssn->client.isn); ssn->client.next_seq = ssn->client.isn + 1; /* Set the stream timestamp value, if packet has timestamp option * enabled. */ if (TCP_HAS_TS(p)) { ssn->client.last_ts = TCP_GET_TSVAL(p); SCLogDebug("ssn %p: %02x", ssn, ssn->client.last_ts); if (ssn->client.last_ts == 0) ssn->client.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; ssn->client.last_pkt_ts = p->ts.tv_sec; ssn->client.flags |= STREAMTCP_STREAM_FLAG_TIMESTAMP; } ssn->server.window = TCP_GET_WINDOW(p); if (TCP_HAS_WSCALE(p)) { ssn->flags |= STREAMTCP_FLAG_SERVER_WSCALE; ssn->server.wscale = TCP_GET_WSCALE(p); } if (TCP_GET_SACKOK(p) == 1) { ssn->flags |= STREAMTCP_FLAG_CLIENT_SACKOK; SCLogDebug("ssn %p: SACK permited on SYN packet", ssn); } SCLogDebug("ssn %p: ssn->client.isn %" PRIu32 ", " "ssn->client.next_seq %" PRIu32 ", ssn->client.last_ack " "%"PRIu32"", ssn, ssn->client.isn, ssn->client.next_seq, ssn->client.last_ack); } else if (p->tcph->th_flags & TH_ACK) { if (stream_config.midstream == FALSE) return 0; if (ssn == NULL) { ssn = StreamTcpNewSession(p, stt->ssn_pool_id); if (ssn == NULL) { StatsIncr(tv, stt->counter_tcp_ssn_memcap); return -1; } StatsIncr(tv, stt->counter_tcp_sessions); } /* set the state */ StreamTcpPacketSetState(p, ssn, TCP_ESTABLISHED); SCLogDebug("ssn %p: =~ midstream picked ssn state is now " "TCP_ESTABLISHED", ssn); ssn->flags = STREAMTCP_FLAG_MIDSTREAM; ssn->flags |= STREAMTCP_FLAG_MIDSTREAM_ESTABLISHED; if (stream_config.async_oneside) { SCLogDebug("ssn %p: =~ ASYNC", ssn); ssn->flags |= STREAMTCP_FLAG_ASYNC; } /** window scaling for midstream pickups, we can't do much other * than assume that it's set to the max value: 14 */ ssn->client.wscale = TCP_WSCALE_MAX; ssn->server.wscale = TCP_WSCALE_MAX; /* set the sequence numbers and window */ ssn->client.isn = TCP_GET_SEQ(p) - 1; STREAMTCP_SET_RA_BASE_SEQ(&ssn->client, ssn->client.isn); ssn->client.next_seq = TCP_GET_SEQ(p) + p->payload_len; ssn->client.window = TCP_GET_WINDOW(p) << ssn->client.wscale; ssn->client.last_ack = TCP_GET_SEQ(p); ssn->client.next_win = ssn->client.last_ack + ssn->client.window; SCLogDebug("ssn %p: ssn->client.isn %u, ssn->client.next_seq %u", ssn, ssn->client.isn, ssn->client.next_seq); ssn->server.isn = TCP_GET_ACK(p) - 1; STREAMTCP_SET_RA_BASE_SEQ(&ssn->server, ssn->server.isn); ssn->server.next_seq = ssn->server.isn + 1; ssn->server.last_ack = TCP_GET_ACK(p); ssn->server.next_win = ssn->server.last_ack; SCLogDebug("ssn %p: ssn->client.next_win %"PRIu32", " "ssn->server.next_win %"PRIu32"", ssn, ssn->client.next_win, ssn->server.next_win); SCLogDebug("ssn %p: ssn->client.last_ack %"PRIu32", " "ssn->server.last_ack %"PRIu32"", ssn, ssn->client.last_ack, ssn->server.last_ack); /* Set the timestamp value for both streams, if packet has timestamp * option enabled.*/ if (TCP_HAS_TS(p)) { ssn->client.last_ts = TCP_GET_TSVAL(p); ssn->server.last_ts = TCP_GET_TSECR(p); SCLogDebug("ssn %p: ssn->server.last_ts %" PRIu32" " "ssn->client.last_ts %" PRIu32"", ssn, ssn->server.last_ts, ssn->client.last_ts); ssn->flags |= STREAMTCP_FLAG_TIMESTAMP; ssn->client.last_pkt_ts = p->ts.tv_sec; if (ssn->server.last_ts == 0) ssn->server.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; if (ssn->client.last_ts == 0) ssn->client.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; } else { ssn->server.last_ts = 0; ssn->client.last_ts = 0; } StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn, &ssn->client, p, pq); ssn->flags |= STREAMTCP_FLAG_SACKOK; SCLogDebug("ssn %p: assuming SACK permitted for both sides", ssn); } else { SCLogDebug("default case"); } return 0; }
static int StreamTcpPacketStateNone(ThreadVars *tv, Packet *p, StreamTcpThread *stt, TcpSession *ssn, PacketQueue *pq) { if (p->tcph->th_flags & TH_RST) { StreamTcpSetEvent(p, STREAM_RST_BUT_NO_SESSION); SCLogDebug("RST packet received, no session setup"); return -1; } else if (p->tcph->th_flags & TH_FIN) { StreamTcpSetEvent(p, STREAM_FIN_BUT_NO_SESSION); SCLogDebug("FIN packet received, no session setup"); return -1; /* SYN/ACK */ } else if ((p->tcph->th_flags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { if (stream_config.midstream == FALSE && stream_config.async_oneside == FALSE) return 0; if (ssn == NULL) { ssn = StreamTcpNewSession(p, stt->ssn_pool_id); if (ssn == NULL) { StatsIncr(tv, stt->counter_tcp_ssn_memcap); return -1; } StatsIncr(tv, stt->counter_tcp_sessions); } /* set the state */ StreamTcpPacketSetState(p, ssn, TCP_SYN_RECV); SCLogDebug("ssn %p: =~ midstream picked ssn state is now " "TCP_SYN_RECV", ssn); ssn->flags |= STREAMTCP_FLAG_MIDSTREAM; /* Flag used to change the direct in the later stage in the session */ ssn->flags |= STREAMTCP_FLAG_MIDSTREAM_SYNACK; if (stream_config.async_oneside) { SCLogDebug("ssn %p: =~ ASYNC", ssn); ssn->flags |= STREAMTCP_FLAG_ASYNC; } /* sequence number & window */ ssn->server.isn = TCP_GET_SEQ(p); STREAMTCP_SET_RA_BASE_SEQ(&ssn->server, ssn->server.isn); ssn->server.next_seq = ssn->server.isn + 1; ssn->server.window = TCP_GET_WINDOW(p); SCLogDebug("ssn %p: server window %u", ssn, ssn->server.window); ssn->client.isn = TCP_GET_ACK(p) - 1; STREAMTCP_SET_RA_BASE_SEQ(&ssn->client, ssn->client.isn); ssn->client.next_seq = ssn->client.isn + 1; ssn->client.last_ack = TCP_GET_ACK(p); ssn->server.last_ack = TCP_GET_SEQ(p); ssn->server.next_win = ssn->server.last_ack + ssn->server.window; /** If the client has a wscale option the server had it too, * so set the wscale for the server to max. Otherwise none * will have the wscale opt just like it should. */ if (TCP_HAS_WSCALE(p)) { ssn->client.wscale = TCP_GET_WSCALE(p); ssn->server.wscale = TCP_WSCALE_MAX; SCLogDebug("ssn %p: wscale enabled. client %u server %u", ssn, ssn->client.wscale, ssn->server.wscale); } SCLogDebug("ssn %p: ssn->client.isn %"PRIu32", ssn->client.next_seq" " %"PRIu32", ssn->client.last_ack %"PRIu32"", ssn, ssn->client.isn, ssn->client.next_seq, ssn->client.last_ack); SCLogDebug("ssn %p: ssn->server.isn %"PRIu32", ssn->server.next_seq" " %"PRIu32", ssn->server.last_ack %"PRIu32"", ssn, ssn->server.isn, ssn->server.next_seq, ssn->server.last_ack); /* Set the timestamp value for both streams, if packet has timestamp * option enabled.*/ if (TCP_HAS_TS(p)) { ssn->server.last_ts = TCP_GET_TSVAL(p); ssn->client.last_ts = TCP_GET_TSECR(p); SCLogDebug("ssn %p: ssn->server.last_ts %" PRIu32" " "ssn->client.last_ts %" PRIu32"", ssn, ssn->server.last_ts, ssn->client.last_ts); ssn->flags |= STREAMTCP_FLAG_TIMESTAMP; ssn->server.last_pkt_ts = p->ts.tv_sec; if (ssn->server.last_ts == 0) ssn->server.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; if (ssn->client.last_ts == 0) ssn->client.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; } else { ssn->server.last_ts = 0; ssn->client.last_ts = 0; } if (TCP_GET_SACKOK(p) == 1) { ssn->flags |= STREAMTCP_FLAG_SACKOK; SCLogDebug("ssn %p: SYN/ACK with SACK permitted, assuming " "SACK permitted for both sides", ssn); } /* packet thinks it is in the wrong direction, flip it */ StreamTcpPacketSwitchDir(ssn, p); } else if (p->tcph->th_flags & TH_SYN) { if (ssn == NULL) { ssn = StreamTcpNewSession(p, stt->ssn_pool_id); if (ssn == NULL) { StatsIncr(tv, stt->counter_tcp_ssn_memcap); return -1; } StatsIncr(tv, stt->counter_tcp_sessions); } /* set the state */ StreamTcpPacketSetState(p, ssn, TCP_SYN_SENT); SCLogDebug("ssn %p: =~ ssn state is now TCP_SYN_SENT", ssn); if (stream_config.async_oneside) { SCLogDebug("ssn %p: =~ ASYNC", ssn); ssn->flags |= STREAMTCP_FLAG_ASYNC; } /* set the sequence numbers and window */ ssn->client.isn = TCP_GET_SEQ(p); STREAMTCP_SET_RA_BASE_SEQ(&ssn->client, ssn->client.isn); ssn->client.next_seq = ssn->client.isn + 1; /* Set the stream timestamp value, if packet has timestamp option * enabled. */ if (TCP_HAS_TS(p)) { ssn->client.last_ts = TCP_GET_TSVAL(p); SCLogDebug("ssn %p: %02x", ssn, ssn->client.last_ts); if (ssn->client.last_ts == 0) ssn->client.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; ssn->client.last_pkt_ts = p->ts.tv_sec; ssn->client.flags |= STREAMTCP_STREAM_FLAG_TIMESTAMP; } ssn->server.window = TCP_GET_WINDOW(p); if (TCP_HAS_WSCALE(p)) { ssn->flags |= STREAMTCP_FLAG_SERVER_WSCALE; ssn->server.wscale = TCP_GET_WSCALE(p); } if (TCP_GET_SACKOK(p) == 1) { ssn->flags |= STREAMTCP_FLAG_CLIENT_SACKOK; SCLogDebug("ssn %p: SACK permited on SYN packet", ssn); } SCLogDebug("ssn %p: ssn->client.isn %" PRIu32 ", " "ssn->client.next_seq %" PRIu32 ", ssn->client.last_ack " "%"PRIu32"", ssn, ssn->client.isn, ssn->client.next_seq, ssn->client.last_ack); } else if (p->tcph->th_flags & TH_ACK) { if (stream_config.midstream == FALSE) return 0; if (ssn == NULL) { ssn = StreamTcpNewSession(p, stt->ssn_pool_id); if (ssn == NULL) { StatsIncr(tv, stt->counter_tcp_ssn_memcap); return -1; } StatsIncr(tv, stt->counter_tcp_sessions); } /* set the state */ StreamTcpPacketSetState(p, ssn, TCP_ESTABLISHED); SCLogDebug("ssn %p: =~ midstream picked ssn state is now " "TCP_ESTABLISHED", ssn); ssn->flags = STREAMTCP_FLAG_MIDSTREAM; ssn->flags |= STREAMTCP_FLAG_MIDSTREAM_ESTABLISHED; if (stream_config.async_oneside) { SCLogDebug("ssn %p: =~ ASYNC", ssn); ssn->flags |= STREAMTCP_FLAG_ASYNC; } /** window scaling for midstream pickups, we can't do much other * than assume that it's set to the max value: 14 */ ssn->client.wscale = TCP_WSCALE_MAX; ssn->server.wscale = TCP_WSCALE_MAX; /* set the sequence numbers and window */ ssn->client.isn = TCP_GET_SEQ(p) - 1; STREAMTCP_SET_RA_BASE_SEQ(&ssn->client, ssn->client.isn); ssn->client.next_seq = TCP_GET_SEQ(p) + p->payload_len; ssn->client.window = TCP_GET_WINDOW(p) << ssn->client.wscale; ssn->client.last_ack = TCP_GET_SEQ(p); ssn->client.next_win = ssn->client.last_ack + ssn->client.window; SCLogDebug("ssn %p: ssn->client.isn %u, ssn->client.next_seq %u", ssn, ssn->client.isn, ssn->client.next_seq); ssn->server.isn = TCP_GET_ACK(p) - 1; STREAMTCP_SET_RA_BASE_SEQ(&ssn->server, ssn->server.isn); ssn->server.next_seq = ssn->server.isn + 1; ssn->server.last_ack = TCP_GET_ACK(p); ssn->server.next_win = ssn->server.last_ack; SCLogDebug("ssn %p: ssn->client.next_win %"PRIu32", " "ssn->server.next_win %"PRIu32"", ssn, ssn->client.next_win, ssn->server.next_win); SCLogDebug("ssn %p: ssn->client.last_ack %"PRIu32", " "ssn->server.last_ack %"PRIu32"", ssn, ssn->client.last_ack, ssn->server.last_ack); /* Set the timestamp value for both streams, if packet has timestamp * option enabled.*/ if (TCP_HAS_TS(p)) { ssn->client.last_ts = TCP_GET_TSVAL(p); ssn->server.last_ts = TCP_GET_TSECR(p); SCLogDebug("ssn %p: ssn->server.last_ts %" PRIu32" " "ssn->client.last_ts %" PRIu32"", ssn, ssn->server.last_ts, ssn->client.last_ts); ssn->flags |= STREAMTCP_FLAG_TIMESTAMP; ssn->client.last_pkt_ts = p->ts.tv_sec; if (ssn->server.last_ts == 0) ssn->server.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; if (ssn->client.last_ts == 0) ssn->client.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; } else { ssn->server.last_ts = 0; ssn->client.last_ts = 0; } StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn, &ssn->client, p, pq); ssn->flags |= STREAMTCP_FLAG_SACKOK; SCLogDebug("ssn %p: assuming SACK permitted for both sides", ssn); } else { SCLogDebug("default case"); } return 0; }
C
suricata
0
CVE-2018-17570
https://www.cvedetails.com/cve/CVE-2018-17570/
CWE-190
https://github.com/viabtc/viabtc_exchange_server/commit/4a7c27bfe98f409623d4d857894d017ff0672cc9#diff-515c81af848352583bff286d6224875f
4a7c27bfe98f409623d4d857894d017ff0672cc9#diff-515c81af848352583bff286d6224875f
Merge pull request #131 from benjaminchodroff/master fix memory corruption and other 32bit overflows
nw_buf_list *nw_buf_list_create(nw_buf_pool *pool, uint32_t limit) { nw_buf_list *list = malloc(sizeof(nw_buf_list)); if (list == NULL) return NULL; list->pool = pool; list->count = 0; list->limit = limit; list->head = NULL; list->tail = NULL; return list; }
nw_buf_list *nw_buf_list_create(nw_buf_pool *pool, uint32_t limit) { nw_buf_list *list = malloc(sizeof(nw_buf_list)); if (list == NULL) return NULL; list->pool = pool; list->count = 0; list->limit = limit; list->head = NULL; list->tail = NULL; return list; }
C
viabtc_exchange_server
0
CVE-2018-6033
https://www.cvedetails.com/cve/CVE-2018-6033/
CWE-20
https://github.com/chromium/chromium/commit/a8d6ae61d266d8bc44c3dd2d08bda32db701e359
a8d6ae61d266d8bc44c3dd2d08bda32db701e359
Downloads : Fixed an issue of opening incorrect download file When one download overwrites another completed download, calling download.open in the old download causes the new download to open, which could be dangerous and undesirable. In this CL, we are trying to avoid this by blocking the opening of the old download. Bug: 793620 Change-Id: Ic948175756700ad7c08489c3cc347330daedb6f8 Reviewed-on: https://chromium-review.googlesource.com/826477 Reviewed-by: David Trainor <dtrainor@chromium.org> Reviewed-by: Xing Liu <xingliu@chromium.org> Reviewed-by: John Abd-El-Malek <jam@chromium.org> Commit-Queue: Shakti Sahu <shaktisahu@chromium.org> Cr-Commit-Position: refs/heads/master@{#525810}
DownloadItemImpl::GetReceivedSlices() const { return received_slices_; }
DownloadItemImpl::GetReceivedSlices() const { return received_slices_; }
C
Chrome
0