func
string
target
string
cwe
list
project
string
commit_id
string
hash
string
size
int64
message
string
vul
int64
void elv_unregister(struct elevator_type *e) { /* unregister */ spin_lock(&elv_list_lock); list_del_init(&e->list); spin_unlock(&elv_list_lock); /* * Destroy icq_cache if it exists. icq's are RCU managed. Make * sure all RCU operations are complete before proceeding. */ if (e->icq_cache) { rcu_barrier(); kmem_cache_destroy(e->icq_cache); e->icq_cache = NULL; } }
Safe
[ "CWE-416" ]
linux
c3e2219216c92919a6bd1711f340f5faa98695e6
1.6081057570157788e+38
17
block: free sched's request pool in blk_cleanup_queue In theory, IO scheduler belongs to request queue, and the request pool of sched tags belongs to the request queue too. However, the current tags allocation interfaces are re-used for both driver tags and sched tags, and driver tags is definitely host wide, and doesn't belong to any request queue, same with its request pool. So we need tagset instance for freeing request of sched tags. Meantime, blk_mq_free_tag_set() often follows blk_cleanup_queue() in case of non-BLK_MQ_F_TAG_SHARED, this way requires that request pool of sched tags to be freed before calling blk_mq_free_tag_set(). Commit 47cdee29ef9d94e ("block: move blk_exit_queue into __blk_release_queue") moves blk_exit_queue into __blk_release_queue for simplying the fast path in generic_make_request(), then causes oops during freeing requests of sched tags in __blk_release_queue(). Fix the above issue by move freeing request pool of sched tags into blk_cleanup_queue(), this way is safe becasue queue has been frozen and no any in-queue requests at that time. Freeing sched tags has to be kept in queue's release handler becasue there might be un-completed dispatch activity which might refer to sched tags. Cc: Bart Van Assche <bvanassche@acm.org> Cc: Christoph Hellwig <hch@lst.de> Fixes: 47cdee29ef9d94e485eb08f962c74943023a5271 ("block: move blk_exit_queue into __blk_release_queue") Tested-by: Yi Zhang <yi.zhang@redhat.com> Reported-by: kernel test robot <rong.a.chen@intel.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
0
static int __init ghash_mod_init(void) { return crypto_register_shash(&ghash_alg); }
Safe
[ "CWE-476" ]
linux
7ed47b7d142ec99ad6880bbbec51e9f12b3af74c
2.4001615762601235e+38
4
crypto: ghash - Avoid null pointer dereference if no key is set The ghash_update function passes a pointer to gf128mul_4k_lle which will be NULL if ghash_setkey is not called or if the most recent call to ghash_setkey failed to allocate memory. This causes an oops. Fix this up by returning an error code in the null case. This is trivially triggered from unprivileged userspace through the AF_ALG interface by simply writing to the socket without setting a key. The ghash_final function has a similar issue, but triggering it requires a memory allocation failure in ghash_setkey _after_ at least one successful call to ghash_update. BUG: unable to handle kernel NULL pointer dereference at 00000670 IP: [<d88c92d4>] gf128mul_4k_lle+0x23/0x60 [gf128mul] *pde = 00000000 Oops: 0000 [#1] PREEMPT SMP Modules linked in: ghash_generic gf128mul algif_hash af_alg nfs lockd nfs_acl sunrpc bridge ipv6 stp llc Pid: 1502, comm: hashatron Tainted: G W 3.1.0-rc9-00085-ge9308cf #32 Bochs Bochs EIP: 0060:[<d88c92d4>] EFLAGS: 00000202 CPU: 0 EIP is at gf128mul_4k_lle+0x23/0x60 [gf128mul] EAX: d69db1f0 EBX: d6b8ddac ECX: 00000004 EDX: 00000000 ESI: 00000670 EDI: d6b8ddac EBP: d6b8ddc8 ESP: d6b8dda4 DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068 Process hashatron (pid: 1502, ti=d6b8c000 task=d6810000 task.ti=d6b8c000) Stack: 00000000 d69db1f0 00000163 00000000 d6b8ddc8 c101a520 d69db1f0 d52aa000 00000ff0 d6b8dde8 d88d310f d6b8a3f8 d52aa000 00001000 d88d502c d6b8ddfc 00001000 d6b8ddf4 c11676ed d69db1e8 d6b8de24 c11679ad d52aa000 00000000 Call Trace: [<c101a520>] ? kmap_atomic_prot+0x37/0xa6 [<d88d310f>] ghash_update+0x85/0xbe [ghash_generic] [<c11676ed>] crypto_shash_update+0x18/0x1b [<c11679ad>] shash_ahash_update+0x22/0x36 [<c11679cc>] shash_async_update+0xb/0xd [<d88ce0ba>] hash_sendpage+0xba/0xf2 [algif_hash] [<c121b24c>] kernel_sendpage+0x39/0x4e [<d88ce000>] ? 0xd88cdfff [<c121b298>] sock_sendpage+0x37/0x3e [<c121b261>] ? kernel_sendpage+0x4e/0x4e [<c10b4dbc>] pipe_to_sendpage+0x56/0x61 [<c10b4e1f>] splice_from_pipe_feed+0x58/0xcd [<c10b4d66>] ? splice_from_pipe_begin+0x10/0x10 [<c10b51f5>] __splice_from_pipe+0x36/0x55 [<c10b4d66>] ? splice_from_pipe_begin+0x10/0x10 [<c10b6383>] splice_from_pipe+0x51/0x64 [<c10b63c2>] ? default_file_splice_write+0x2c/0x2c [<c10b63d5>] generic_splice_sendpage+0x13/0x15 [<c10b4d66>] ? splice_from_pipe_begin+0x10/0x10 [<c10b527f>] do_splice_from+0x5d/0x67 [<c10b6865>] sys_splice+0x2bf/0x363 [<c129373b>] ? sysenter_exit+0xf/0x16 [<c104dc1e>] ? trace_hardirqs_on_caller+0x10e/0x13f [<c129370c>] sysenter_do_call+0x12/0x32 Code: 83 c4 0c 5b 5e 5f c9 c3 55 b9 04 00 00 00 89 e5 57 8d 7d e4 56 53 8d 5d e4 83 ec 18 89 45 e0 89 55 dc 0f b6 70 0f c1 e6 04 01 d6 <f3> a5 be 0f 00 00 00 4e 89 d8 e8 48 ff ff ff 8b 45 e0 89 da 0f EIP: [<d88c92d4>] gf128mul_4k_lle+0x23/0x60 [gf128mul] SS:ESP 0068:d6b8dda4 CR2: 0000000000000670 ---[ end trace 4eaa2a86a8e2da24 ]--- note: hashatron[1502] exited with preempt_count 1 BUG: scheduling while atomic: hashatron/1502/0x10000002 INFO: lockdep is turned off. [...] Signed-off-by: Nick Bowler <nbowler@elliptictech.com> Cc: stable@kernel.org [2.6.37+] Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
0
mboxlist_delayed_deletemailbox(const char *name, int isadmin, const char *userid, const struct auth_state *auth_state, struct mboxevent *mboxevent, int checkacl, int localonly, int force) { mbentry_t *mbentry = NULL; strarray_t existing = STRARRAY_INITIALIZER; int i; char newname[MAX_MAILBOX_BUFFER]; int r = 0; long myrights; if (!isadmin && force) return IMAP_PERMISSION_DENIED; /* delete of a user.X folder */ mbname_t *mbname = mbname_from_intname(name); if (mbname_userid(mbname) && !strarray_size(mbname_boxes(mbname))) { /* Can't DELETE INBOX (your own inbox) */ if (!strcmpsafe(mbname_userid(mbname), userid)) { r = IMAP_MAILBOX_NOTSUPPORTED; goto done; } /* Only admins may delete user */ if (!isadmin) { r = IMAP_PERMISSION_DENIED; goto done; } } if (!isadmin && mbname_userid(mbname)) { struct buf attrib = BUF_INITIALIZER; annotatemore_lookup(mbname_intname(mbname), "/specialuse", mbname_userid(mbname), &attrib); if (attrib.len) r = IMAP_MAILBOX_SPECIALUSE; buf_free(&attrib); if (r) goto done; } r = mboxlist_lookup(name, &mbentry, NULL); if (r) goto done; /* check if user has Delete right (we've already excluded non-admins * from deleting a user mailbox) */ if (checkacl) { myrights = cyrus_acl_myrights(auth_state, mbentry->acl); if (!(myrights & ACL_DELETEMBOX)) { /* User has admin rights over their own mailbox namespace */ if (mboxname_userownsmailbox(userid, name) && (config_implicitrights & ACL_ADMIN)) { isadmin = 1; } /* Lie about error if privacy demands */ r = (isadmin || (myrights & ACL_LOOKUP)) ? IMAP_PERMISSION_DENIED : IMAP_MAILBOX_NONEXISTENT; goto done; } } /* check if there are already too many! */ mboxname_todeleted(name, newname, 0); r = mboxlist_mboxtree(newname, addmbox_to_list, &existing, MBOXTREE_SKIP_ROOT); if (r) goto done; /* keep the last 19, so the new one is the 20th */ for (i = 0; i < (int)existing.count - 19; i++) { const char *subname = strarray_nth(&existing, i); syslog(LOG_NOTICE, "too many subfolders for %s, deleting %s (%d / %d)", newname, subname, i+1, (int)existing.count); r = mboxlist_deletemailbox(subname, 1, userid, auth_state, NULL, 0, 1, 1); if (r) goto done; } /* get the deleted name */ mboxname_todeleted(name, newname, 1); /* Get mboxlist_renamemailbox to do the hard work. No ACL checks needed */ r = mboxlist_renamemailbox((char *)name, newname, mbentry->partition, 0 /* uidvalidity */, 1 /* isadmin */, userid, auth_state, mboxevent, localonly /* local_only */, force, 1); done: strarray_fini(&existing); mboxlist_entry_free(&mbentry); mbname_free(&mbname); return r; }
Safe
[ "CWE-20" ]
cyrus-imapd
6bd33275368edfa71ae117de895488584678ac79
2.6461358556334156e+38
96
mboxlist: fix uninitialised memory use where pattern is "Other Users"
0
int kvm_mmu_vendor_module_init(void) { int ret = -ENOMEM; /* * MMU roles use union aliasing which is, generally speaking, an * undefined behavior. However, we supposedly know how compilers behave * and the current status quo is unlikely to change. Guardians below are * supposed to let us know if the assumption becomes false. */ BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32)); BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32)); BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64)); kvm_mmu_reset_all_pte_masks(); pte_list_desc_cache = kmem_cache_create("pte_list_desc", sizeof(struct pte_list_desc), 0, SLAB_ACCOUNT, NULL); if (!pte_list_desc_cache) goto out; mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", sizeof(struct kvm_mmu_page), 0, SLAB_ACCOUNT, NULL); if (!mmu_page_header_cache) goto out; if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) goto out; ret = register_shrinker(&mmu_shrinker); if (ret) goto out; return 0; out: mmu_destroy_caches(); return ret; }
Safe
[ "CWE-476" ]
linux
9f46c187e2e680ecd9de7983e4d081c3391acc76
1.2133139609718877e+37
41
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID With shadow paging enabled, the INVPCID instruction results in a call to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the invlpg callback is not set and the result is a NULL pointer dereference. Fix it trivially by checking for mmu->invlpg before every call. There are other possibilities: - check for CR0.PG, because KVM (like all Intel processors after P5) flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a nop with paging disabled - check for EFER.LMA, because KVM syncs and flushes when switching MMU contexts outside of 64-bit mode All of these are tricky, go for the simple solution. This is CVE-2022-1789. Reported-by: Yongkang Jia <kangel@zju.edu.cn> Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
tcp_seq_analysis_packet( void *ptr, packet_info *pinfo, epan_dissect_t *edt _U_, const void *tcp_info) { seq_analysis_info_t *sainfo = (seq_analysis_info_t *) ptr; const struct tcpheader *tcph = (const struct tcpheader *)tcp_info; const char* flags; seq_analysis_item_t *sai = sequence_analysis_create_sai_with_addresses(pinfo, sainfo); if (!sai) return TAP_PACKET_DONT_REDRAW; sai->frame_number = pinfo->num; sai->port_src=pinfo->srcport; sai->port_dst=pinfo->destport; flags = tcp_flags_to_str(NULL, tcph); if ((tcph->th_have_seglen)&&(tcph->th_seglen!=0)){ sai->frame_label = g_strdup_printf("%s - Len: %u",flags, tcph->th_seglen); } else{ sai->frame_label = g_strdup(flags); } wmem_free(NULL, (void*)flags); if (tcph->th_flags & TH_ACK) sai->comment = g_strdup_printf("Seq = %u Ack = %u",tcph->th_seq, tcph->th_ack); else sai->comment = g_strdup_printf("Seq = %u",tcph->th_seq); sai->line_style = 1; sai->conv_num = (guint16) tcph->th_stream; sai->display = TRUE; g_queue_push_tail(sainfo->items, sai); return TAP_PACKET_REDRAW; }
Safe
[ "CWE-354" ]
wireshark
7f3fe6164a68b76d9988c4253b24d43f498f1753
1.27369138032096e+38
39
TCP: do not use an unknown status when the checksum is 0xffff Otherwise it triggers an assert when adding the column as the field is defined as BASE_NONE and not BASE_DEC or BASE_HEX. Thus an unknown value (not in proto_checksum_vals[)array) cannot be represented. Mark the checksum as bad even if we process the packet. Closes #16816 Conflicts: epan/dissectors/packet-tcp.c
0
static int psi_memory_show(struct seq_file *m, void *v) { return psi_show(m, &psi_system, PSI_MEM); }
Safe
[ "CWE-787" ]
linux
6fcca0fa48118e6d63733eb4644c6cd880c15b8f
2.576515491746391e+37
4
sched/psi: Fix OOB write when writing 0 bytes to PSI files Issuing write() with count parameter set to 0 on any file under /proc/pressure/ will cause an OOB write because of the access to buf[buf_size-1] when NUL-termination is performed. Fix this by checking for buf_size to be non-zero. Signed-off-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Link: https://lkml.kernel.org/r/20200203212216.7076-1-surenb@google.com
0
static void pt_guest_enter(struct vcpu_vmx *vmx) { if (vmx_pt_mode_is_system()) return; /* * GUEST_IA32_RTIT_CTL is already set in the VMCS. * Save host state before VM entry. */ rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { wrmsrl(MSR_IA32_RTIT_CTL, 0); pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); } }
Safe
[ "CWE-703" ]
linux
6cd88243c7e03845a450795e134b488fc2afb736
2.2930264875876404e+38
16
KVM: x86: do not report a vCPU as preempted outside instruction boundaries If a vCPU is outside guest mode and is scheduled out, it might be in the process of making a memory access. A problem occurs if another vCPU uses the PV TLB flush feature during the period when the vCPU is scheduled out, and a virtual address has already been translated but has not yet been accessed, because this is equivalent to using a stale TLB entry. To avoid this, only report a vCPU as preempted if sure that the guest is at an instruction boundary. A rescheduling request will be delivered to the host physical CPU as an external interrupt, so for simplicity consider any vmexit *not* instruction boundary except for external interrupts. It would in principle be okay to report the vCPU as preempted also if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the vmentry/vmexit overhead unnecessarily, and optimistic spinning is also unlikely to succeed. However, leave it for later because right now kvm_vcpu_check_block() is doing memory accesses. Even though the TLB flush issue only applies to virtual memory address, it's very much preferrable to be conservative. Reported-by: Jann Horn <jannh@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
static void spl_heap_it_rewind(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { /* do nothing, the iterator always points to the top element */ }
Safe
[]
php-src
1cbd25ca15383394ffa9ee8601c5de4c0f2f90e1
7.198428574097181e+36
4
Fix bug #69737 - Segfault when SplMinHeap::compare produces fatal error
0
static int ehci_get_pid(EHCIqtd *qtd) { switch (get_field(qtd->token, QTD_TOKEN_PID)) { case 0: return USB_TOKEN_OUT; case 1: return USB_TOKEN_IN; case 2: return USB_TOKEN_SETUP; default: fprintf(stderr, "bad token\n"); return 0; } }
Safe
[]
qemu
791f97758e223de3290592d169f8e6339c281714
8.82496447686563e+37
14
usb: ehci: fix memory leak in ehci_init_transfer In ehci_init_transfer function, if the 'cpage' is bigger than 4, it doesn't free the 'p->sgl' once allocated previously thus leading a memory leak issue. This patch avoid this. Signed-off-by: Li Qiang <liqiang6-s@360.cn> Message-id: 5821c0f4.091c6b0a.e0c92.e811@mx.google.com Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
0
SplashUnivariatePattern::SplashUnivariatePattern(SplashColorMode colorModeA, GfxState *stateA, GfxUnivariateShading *shadingA) { Matrix ctm; double xMin, yMin, xMax, yMax; shading = shadingA; state = stateA; colorMode = colorModeA; state->getCTM(&ctm); ctm.invertTo(&ictm); // get the function domain t0 = shading->getDomain0(); t1 = shading->getDomain1(); dt = t1 - t0; stateA->getUserClipBBox(&xMin, &yMin, &xMax, &yMax); shadingA->setupCache(&ctm, xMin, yMin, xMax, yMax); gfxMode = shadingA->getColorSpace()->getMode(); }
Safe
[ "CWE-369" ]
poppler
b224e2f5739fe61de9fa69955d016725b2a4b78d
1.9569572251992297e+38
20
SplashOutputDev::tilingPatternFill: Fix crash on broken file Issue #802
0
__releases(q->queue_lock) { lockdep_assert_held(q->queue_lock); trace_block_unplug(q, depth, !from_schedule); if (from_schedule) blk_run_queue_async(q); else __blk_run_queue(q); spin_unlock_irq(q->queue_lock); }
Safe
[ "CWE-416", "CWE-703" ]
linux
54648cf1ec2d7f4b6a71767799c45676a138ca24
7.784823597979054e+37
12
block: blk_init_allocated_queue() set q->fq as NULL in the fail case We find the memory use-after-free issue in __blk_drain_queue() on the kernel 4.14. After read the latest kernel 4.18-rc6 we think it has the same problem. Memory is allocated for q->fq in the blk_init_allocated_queue(). If the elevator init function called with error return, it will run into the fail case to free the q->fq. Then the __blk_drain_queue() uses the same memory after the free of the q->fq, it will lead to the unpredictable event. The patch is to set q->fq as NULL in the fail case of blk_init_allocated_queue(). Fixes: commit 7c94e1c157a2 ("block: introduce blk_flush_queue to drive flush machinery") Cc: <stable@vger.kernel.org> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com> Signed-off-by: xiao jin <jin.xiao@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
0
} static JSValue js_sys_basename(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv) { return js_sys_file_opt(ctx, this_val, argc, argv, OPT_FILEBASENAME);
Safe
[ "CWE-787" ]
gpac
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
6.920495359763307e+37
4
fixed #2138
0
static void decode_finish_row(H264Context *h) { int top = 16 * (h->mb_y >> FIELD_PICTURE(h)); int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h); int height = 16 << FRAME_MBAFF(h); int deblock_border = (16 + 4) << FRAME_MBAFF(h); if (h->deblocking_filter) { if ((top + height) >= pic_height) height += deblock_border; top -= deblock_border; } if (top >= pic_height || (top + height) < 0) return; height = FFMIN(height, pic_height - top); if (top < 0) { height = top + height; top = 0; } ff_h264_draw_horiz_band(h, top, height); if (h->droppable || h->er.error_occurred) return; ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1, h->picture_structure == PICT_BOTTOM_FIELD); }
Safe
[ "CWE-703" ]
FFmpeg
29ffeef5e73b8f41ff3a3f2242d356759c66f91f
1.881612396003551e+38
30
avcodec/h264: do not trust last_pic_droppable when marking pictures as done This simplifies the code and fixes a deadlock Fixes Ticket2927 Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
0
bool asn1_write_enumerated(struct asn1_data *data, uint8_t v) { if (!asn1_push_tag(data, ASN1_ENUMERATED)) return false; if (!asn1_write_uint8(data, v)) return false; return asn1_pop_tag(data); }
Safe
[ "CWE-399" ]
samba
9d989c9dd7a5b92d0c5d65287935471b83b6e884
2.425306354675049e+38
6
CVE-2015-7540: lib: util: Check *every* asn1 return call and early return. BUG: https://bugzilla.samba.org/show_bug.cgi?id=9187 Signed-off-by: Jeremy Allison <jra@samba.org> Reviewed-by: Volker Lendecke <Volker.Lendecke@SerNet.DE> Autobuild-User(master): Jeremy Allison <jra@samba.org> Autobuild-Date(master): Fri Sep 19 01:29:00 CEST 2014 on sn-devel-104 (cherry picked from commit b9d3fd4cc551df78a7b066ee8ce43bbaa3ff994a)
0
static bool pathmatch(const char* cookie_path, const char* request_uri) { size_t cookie_path_len; size_t uri_path_len; char* uri_path = NULL; char* pos; bool ret = FALSE; /* cookie_path must not have last '/' separator. ex: /sample */ cookie_path_len = strlen(cookie_path); if(1 == cookie_path_len) { /* cookie_path must be '/' */ return TRUE; } uri_path = strdup(request_uri); if(!uri_path) return FALSE; pos = strchr(uri_path, '?'); if(pos) *pos = 0x0; /* #-fragments are already cut off! */ if(0 == strlen(uri_path) || uri_path[0] != '/') { free(uri_path); uri_path = strdup("/"); if(!uri_path) return FALSE; } /* here, RFC6265 5.1.4 says 4. Output the characters of the uri-path from the first character up to, but not including, the right-most %x2F ("/"). but URL path /hoge?fuga=xxx means /hoge/index.cgi?fuga=xxx in some site without redirect. Ignore this algorithm because /hoge is uri path for this case (uri path is not /). */ uri_path_len = strlen(uri_path); if(uri_path_len < cookie_path_len) { ret = FALSE; goto pathmatched; } /* not using checkprefix() because matching should be case-sensitive */ if(strncmp(cookie_path, uri_path, cookie_path_len)) { ret = FALSE; goto pathmatched; } /* The cookie-path and the uri-path are identical. */ if(cookie_path_len == uri_path_len) { ret = TRUE; goto pathmatched; } /* here, cookie_path_len < url_path_len */ if(uri_path[cookie_path_len] == '/') { ret = TRUE; goto pathmatched; } ret = FALSE; pathmatched: free(uri_path); return ret; }
Safe
[ "CWE-119" ]
curl
b5f947b8ac0e282c61c75b69cd5b9d37dafc6959
1.0043596869428143e+38
70
cookie: cookie parser out of boundary memory access The internal libcurl function called sanitize_cookie_path() that cleans up the path element as given to it from a remote site or when read from a file, did not properly validate the input. If given a path that consisted of a single double-quote, libcurl would index a newly allocated memory area with index -1 and assign a zero to it, thus destroying heap memory it wasn't supposed to. CVE-2015-3145 Bug: http://curl.haxx.se/docs/adv_20150422C.html Reported-by: Hanno Böck
0
void SegmentSumFunctor<T, Index>::operator()( OpKernelContext* ctx, const GPUDevice& d, const Index output_rows, const TensorShape& segment_ids_shape, typename TTypes<Index>::ConstFlat segment_ids, const Index data_size, const T* data, typename TTypes<T, 2>::Tensor output) { if (output.size() == 0) { return; } // Set 'output' to zeros. GpuLaunchConfig config = GetGpuLaunchConfig(output.size(), d); TF_CHECK_OK(GpuLaunchKernel(SetZero<T>, config.block_count, config.thread_per_block, 0, d.stream(), output.size(), output.data())); if (data_size == 0 || segment_ids_shape.num_elements() == 0) { return; } // Launch kernel to compute sorted segment sum. // Notes: // *) 'input_total_size' is the total number of elements to process. // *) 'segment_ids.shape' is a prefix of data's shape. // *) 'input_outer_dim_size' is the total number of segments to process. const Index input_total_size = data_size; const Index input_outer_dim_size = segment_ids.dimension(0); const Index input_inner_dim_size = input_total_size / input_outer_dim_size; const int OuterDimTileSize = 8; const Index input_outer_dim_num_stripe = Eigen::divup(input_outer_dim_size, Index(OuterDimTileSize)); const Index total_stripe_count = input_inner_dim_size * input_outer_dim_num_stripe; config = GetGpuLaunchConfig(total_stripe_count, d); TF_CHECK_OK(GpuLaunchKernel( SortedSegmentSumCustomKernel<T, Index, OuterDimTileSize>, config.block_count, config.thread_per_block, 0, d.stream(), input_outer_dim_size, input_inner_dim_size, output_rows, segment_ids.data(), data, output.data(), total_stripe_count)); }
Safe
[ "CWE-703", "CWE-681", "CWE-787" ]
tensorflow
db4f9717c41bccc3ce10099ab61996b246099892
2.9210941236017435e+38
41
Fix heap buffer overflow in UnsortedSegmentSum. When Index=int32, data_size and num_segments were truncated from int64 to int32. This truncation can produce negative numbers, which causes UnsortedSegmentFunctor to access out of bounds memory. Also: - Switches some indexing calculations to int64 to avoid signed integer overflow when either the input or output tensors have more than 2**31 - 1 elements. - Fixes a range check error in the GPU kernel. The segment ID was checked against an upper bound measured in elements, not segments. PiperOrigin-RevId: 256451663
0
static bool anal_bb_edge (RCore *core, const char *input) { // "afbe" switch-bb-addr case-bb-addr char *arg = strdup (r_str_trim_ro(input)); char *sp = strchr (arg, ' '); if (sp) { *sp++ = 0; ut64 sw_at = r_num_math (core->num, arg); ut64 cs_at = r_num_math (core->num, sp); RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, sw_at, 0); if (fcn) { RAnalBlock *bb; RListIter *iter; r_list_foreach (fcn->bbs, iter, bb) { if (sw_at >= bb->addr && sw_at < (bb->addr + bb->size)) { if (!bb->switch_op) { bb->switch_op = r_anal_switch_op_new ( sw_at, 0, 0); } r_anal_switch_op_add_case (bb->switch_op, cs_at, 0, cs_at); } } free (arg); return true; } } free (arg); return false; }
Safe
[ "CWE-125", "CWE-787" ]
radare2
a1bc65c3db593530775823d6d7506a457ed95267
2.9809913635639176e+38
28
Fix #12375 - Crash in bd+ao (#12382)
0
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) { struct rtattr *rta = tb[0]; struct crypto_attr_type *algt; if (!rta) return ERR_PTR(-ENOENT); if (RTA_PAYLOAD(rta) < sizeof(*algt)) return ERR_PTR(-EINVAL); if (rta->rta_type != CRYPTOA_TYPE) return ERR_PTR(-EINVAL); algt = RTA_DATA(rta); return algt; }
Safe
[ "CWE-284", "CWE-264", "CWE-269" ]
linux
4943ba16bbc2db05115707b3ff7b4874e9e3c560
1.3840437803722165e+38
16
crypto: include crypto- module prefix in template This adds the module loading prefix "crypto-" to the template lookup as well. For example, attempting to load 'vfat(blowfish)' via AF_ALG now correctly includes the "crypto-" prefix at every level, correctly rejecting "vfat": net-pf-38 algif-hash crypto-vfat(blowfish) crypto-vfat(blowfish)-all crypto-vfat Reported-by: Mathias Krause <minipli@googlemail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Acked-by: Mathias Krause <minipli@googlemail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
0
static int ntop_http_get(lua_State* vm) { char *url, *username = NULL, *pwd = NULL; int timeout = 30; bool return_content = true; ntop->getTrace()->traceEvent(TRACE_INFO, "%s() called", __FUNCTION__); if(ntop_lua_check(vm, __FUNCTION__, 1, LUA_TSTRING)) return(CONST_LUA_PARAM_ERROR); if((url = (char*)lua_tostring(vm, 1)) == NULL) return(CONST_LUA_PARAM_ERROR); if(lua_type(vm, 2) == LUA_TSTRING) { username = (char*)lua_tostring(vm, 2); if(lua_type(vm, 3) == LUA_TSTRING) { pwd = (char*)lua_tostring(vm, 3); if(lua_type(vm, 4) == LUA_TNUMBER) { timeout = lua_tointeger(vm, 4); if(timeout < 1) timeout = 1; /* This optional parameter specifies if the result of HTTP GET has to be returned to LUA or not. Usually the content has to be returned, but in some causes it just matters to time (for instance when use for testing HTTP services) */ if(lua_type(vm, 4) == LUA_TBOOLEAN) { return_content = lua_toboolean(vm, 5) ? true : false; } } } } if(Utils::httpGet(vm, url, username, pwd, timeout, return_content)) return(CONST_LUA_OK); else return(CONST_LUA_ERROR); }
Safe
[ "CWE-254" ]
ntopng
2e0620be3410f5e22c9aa47e261bc5a12be692c6
2.0905912599567624e+38
37
Added security fix to avoid escalating privileges to non-privileged users Many thanks to Dolev Farhi for reporting it
0
relpTcpWaitWriteable(relpTcp_t *const pThis, struct timespec *const tTimeout) { int r; struct timespec tCurr; /* current time */ struct pollfd pfd; clock_gettime(CLOCK_REALTIME, &tCurr); const int timeout = (tTimeout->tv_sec - tCurr.tv_sec) * 1000 + (tTimeout->tv_nsec - tCurr.tv_nsec) / 1000000000; if(timeout < 0) { r = 0; goto done; } pThis->pEngine->dbgprint("librelp: telpTcpWaitWritable doing poll() " "on fd %d, timoeut %d\n", pThis->sock, timeout); pfd.fd = pThis->sock; pfd.events = POLLOUT; r = poll(&pfd, 1, timeout); done: return r; }
Safe
[ "CWE-787" ]
librelp
2cfe657672636aa5d7d2a14cfcb0a6ab9d1f00cf
3.386664389605486e+38
21
unify error message generation
0
RZ_API void rz_analysis_var_delete(RzAnalysisVar *var) { rz_return_if_fail(var); RzAnalysisFunction *fcn = var->fcn; size_t i; for (i = 0; i < rz_pvector_len(&fcn->vars); i++) { RzAnalysisVar *v = rz_pvector_at(&fcn->vars, i); if (v == var) { rz_pvector_remove_at(&fcn->vars, i); var_free(v); return; } } }
Safe
[ "CWE-703" ]
rizin
6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939
9.664096213891642e+37
13
Initialize retctx,ctx before freeing the inner elements In rz_core_analysis_type_match retctx structure was initialized on the stack only after a "goto out_function", where a field of that structure was freed. When the goto path is taken, the field is not properly initialized and it cause cause a crash of Rizin or have other effects. Fixes: CVE-2021-4022
0
static ssize_t callback_static_file_uncompressed_stream(void * cls, uint64_t pos, char * buf, size_t max) { (void)(pos); if (cls != NULL) { return fread (buf, sizeof(char), max, (FILE *)cls); } else { return U_STREAM_END; } }
Safe
[ "CWE-269", "CWE-22" ]
glewlwyd
e3f7245c33897bf9b3a75acfcdb8b7b93974bf11
1.2462736093658907e+37
8
Fix file access check for directory traversal, and fix call for callback_static_file_uncompressed if header not set
0
static void ipip6_tunnel_setup(struct net_device *dev) { dev->netdev_ops = &ipip6_netdev_ops; dev->destructor = free_netdev; dev->type = ARPHRD_SIT; dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr); dev->flags = IFF_NOARP; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; dev->iflink = 0; dev->addr_len = 4; dev->features |= NETIF_F_NETNS_LOCAL; }
Safe
[]
linux-2.6
d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978
1.7865526668038552e+38
14
tunnels: fix netns vs proto registration ordering Same stuff as in ip_gre patch: receive hook can be called before netns setup is done, oopsing in net_generic(). Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
GF_Err adaf_box_read(GF_Box *s, GF_BitStream *bs) { GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox*)s; ISOM_DECREASE_SIZE(ptr, 3); ptr->selective_enc = gf_bs_read_u8(bs); gf_bs_read_u8(bs);//resersed ptr->IV_length = gf_bs_read_u8(bs); return GF_OK; }
Safe
[ "CWE-703" ]
gpac
f19668964bf422cf5a63e4dbe1d3c6c75edadcbb
3.103789118850428e+38
10
fixed #1879
0
static const char *set_http_method(cmd_parms *cmd, void *conf, const char *arg) { const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (err != NULL) return err; ap_method_register(cmd->pool, arg); return NULL; }
Safe
[ "CWE-416", "CWE-284" ]
httpd
4cc27823899e070268b906ca677ee838d07cf67a
2.6773981697818644e+38
8
core: Disallow Methods' registration at run time (.htaccess), they may be used only if registered at init time (httpd.conf). Calling ap_method_register() in children processes is not the right scope since it won't be shared for all requests. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1807655 13f79535-47bb-0310-9956-ffa450edef68
0
content::FontAccessDelegate* ElectronBrowserClient::GetFontAccessDelegate() { if (!font_access_delegate_) font_access_delegate_ = std::make_unique<ElectronFontAccessDelegate>(); return font_access_delegate_.get(); }
Safe
[]
electron
ea1f402417022c59c0794e97c87e6be2553989e7
3.202238976095013e+38
5
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33323) (#33350) * fix: ensure ElectronBrowser mojo service is only bound to authorized render frames Notes: no-notes * refactor: extract electron API IPC to its own mojo interface * fix: just check main frame not primary main frame
0
void gdImageSetPixel (gdImagePtr im, int x, int y, int color) { int p; switch (color) { case gdStyled: if (!im->style) { /* Refuse to draw if no style is set. */ return; } else { p = im->style[im->stylePos++]; } if (p != gdTransparent) { gdImageSetPixel(im, x, y, p); } im->stylePos = im->stylePos % im->styleLength; break; case gdStyledBrushed: if (!im->style) { /* Refuse to draw if no style is set. */ return; } p = im->style[im->stylePos++]; if (p != gdTransparent && p != 0) { gdImageSetPixel(im, x, y, gdBrushed); } im->stylePos = im->stylePos % im->styleLength; break; case gdBrushed: gdImageBrushApply(im, x, y); break; case gdTiled: gdImageTileApply(im, x, y); break; case gdAntiAliased: gdImageAntiAliasedApply(im, x, y); break; default: if (gdImageBoundsSafe(im, x, y)) { if (im->trueColor) { switch (im->alphaBlendingFlag) { default: case gdEffectReplace: im->tpixels[y][x] = color; break; case gdEffectAlphaBlend: im->tpixels[y][x] = gdAlphaBlend(im->tpixels[y][x], color); break; case gdEffectNormal: im->tpixels[y][x] = gdAlphaBlend(im->tpixels[y][x], color); break; case gdEffectOverlay : im->tpixels[y][x] = gdLayerOverlay(im->tpixels[y][x], color); break; } } else { im->pixels[y][x] = color; } } break; } }
Safe
[ "CWE-119" ]
php-src
e7f2356665c2569191a946b6fc35b437f0ae1384
1.8462918094478854e+38
61
Fix #66387: Stack overflow with imagefilltoborder The stack overflow is caused by the recursive algorithm in combination with a very large negative coordinate passed to gdImageFillToBorder(). As there is already a clipping for large positive coordinates to the width and height of the image, it seems to be consequent to clip to zero also.
0
tsize_t t2p_write_pdf_xobject_icccs_dict(T2P* t2p, TIFF* output){ tsize_t written=0; char buffer[32]; int buflen=0; written += t2pWriteFile(output, (tdata_t) "/N ", 3); buflen=snprintf(buffer, sizeof(buffer), "%u \n", t2p->tiff_samplesperpixel); check_snprintf_ret(t2p, buflen, buffer); written += t2pWriteFile(output, (tdata_t) buffer, buflen); written += t2pWriteFile(output, (tdata_t) "/Alternate ", 11); t2p->pdf_colorspace ^= T2P_CS_ICCBASED; written += t2p_write_pdf_xobject_cs(t2p, output); t2p->pdf_colorspace |= T2P_CS_ICCBASED; written += t2p_write_pdf_stream_dict(t2p->tiff_iccprofilelength, 0, output); return(written); }
Safe
[ "CWE-787" ]
libtiff
7be2e452ddcf6d7abca88f41d3761e6edab72b22
1.3117775247684563e+38
18
tiff2pdf.c: properly calculate datasize when saving to JPEG YCbCr fixes #220
0
static RCmdDesc *argv_new(RCmd *cmd, RCmdDesc *parent, const char *name, RCmdArgvCb cb, const RCmdDescHelp *help, bool ht_insert) { RCmdDesc *res = create_cmd_desc (cmd, parent, R_CMD_DESC_TYPE_ARGV, name, help, ht_insert); if (!res) { return NULL; } res->d.argv_data.cb = cb; return res; }
Safe
[ "CWE-125", "CWE-787" ]
radare2
0052500c1ed5bf8263b26b9fd7773dbdc6f170c4
3.302824782323459e+38
9
Fix heap OOB read in macho.iterate_chained_fixups ##crash * Reported by peacock-doris via huntr.dev * Reproducer 'tests_65305' mrmacete: * Return early if segs_count is 0 * Initialize segs_count also for reconstructed fixups Co-authored-by: pancake <pancake@nopcode.org> Co-authored-by: Francesco Tamagni <mrmacete@protonmail.ch>
0
e_ews_connection_download_oal_file_sync (EEwsConnection *cnc, const gchar *cache_filename, EwsProgressFn progress_fn, gpointer progress_data, GCancellable *cancellable, GError **error) { EAsyncClosure *closure; GAsyncResult *result; gboolean success; g_return_val_if_fail (E_IS_EWS_CONNECTION (cnc), FALSE); closure = e_async_closure_new (); e_ews_connection_download_oal_file ( cnc, cache_filename, progress_fn, progress_data, cancellable, e_async_closure_callback, closure); result = e_async_closure_wait (closure); success = e_ews_connection_download_oal_file_finish ( cnc, result, error); e_async_closure_free (closure); return success; }
Safe
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
2.1263771238780704e+38
29
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
0
void ha_partition::column_bitmaps_signal() { handler::column_bitmaps_signal(); /* Must read all partition fields to make position() call possible */ bitmap_union(table->read_set, &m_part_info->full_part_field_set); }
Safe
[]
mysql-server
be901b60ae59c93848c829d1b0b2cb523ab8692e
1.2472350848806367e+38
6
Bug#26390632: CREATE TABLE CAN CAUSE MYSQL TO EXIT. Analysis ======== CREATE TABLE of InnoDB table with a partition name which exceeds the path limit can cause the server to exit. During the preparation of the partition name, there was no check to identify whether the complete path name for partition exceeds the max supported path length, causing the server to exit during subsequent processing. Fix === During the preparation of partition name, check and report an error if the partition path name exceeds the maximum path name limit. This is a 5.5 patch.
0
static unsigned int poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; u32 mask = 0; sock_poll_wait(file, sk_sleep(sk), wait); switch ((int)sock->state) { case SS_UNCONNECTED: if (!tipc_sk_port(sk)->congested) mask |= POLLOUT; break; case SS_READY: case SS_CONNECTED: if (!tipc_sk_port(sk)->congested) mask |= POLLOUT; /* fall thru' */ case SS_CONNECTING: case SS_LISTENING: if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= (POLLIN | POLLRDNORM); break; case SS_DISCONNECTING: mask = (POLLIN | POLLRDNORM | POLLHUP); break; } return mask; }
Safe
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
1.2885592542772956e+38
30
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <davem@davemloft.net> Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer) { id->dev_class = timer->tmr_class; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = timer->card ? timer->card->number : -1; id->device = timer->tmr_device; id->subdevice = timer->tmr_subdevice; }
Safe
[ "CWE-200", "CWE-362" ]
linux
ee8413b01045c74340aa13ad5bdf905de32be736
3.318266437687332e+37
8
ALSA: timer: Fix double unlink of active_list ALSA timer instance object has a couple of linked lists and they are unlinked unconditionally at snd_timer_stop(). Meanwhile snd_timer_interrupt() unlinks it, but it calls list_del() which leaves the element list itself unchanged. This ends up with unlinking twice, and it was caught by syzkaller fuzzer. The fix is to use list_del_init() variant properly there, too. Reported-by: Dmitry Vyukov <dvyukov@google.com> Tested-by: Dmitry Vyukov <dvyukov@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Takashi Iwai <tiwai@suse.de>
0
static void raw_cmd_done(int flag) { int i; if (!flag) { raw_cmd->flags |= FD_RAW_FAILURE; raw_cmd->flags |= FD_RAW_HARDFAILURE; } else { raw_cmd->reply_count = inr; if (raw_cmd->reply_count > MAX_REPLIES) raw_cmd->reply_count = 0; for (i = 0; i < raw_cmd->reply_count; i++) raw_cmd->reply[i] = reply_buffer[i]; if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE)) { unsigned long flags; flags = claim_dma_lock(); raw_cmd->length = fd_get_dma_residue(); release_dma_lock(flags); } if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) && (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0))) raw_cmd->flags |= FD_RAW_FAILURE; if (disk_change(current_drive)) raw_cmd->flags |= FD_RAW_DISK_CHANGE; else raw_cmd->flags &= ~FD_RAW_DISK_CHANGE; if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER) motor_off_callback(&motor_off_timer[current_drive]); if (raw_cmd->next && (!(raw_cmd->flags & FD_RAW_FAILURE) || !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) && ((raw_cmd->flags & FD_RAW_FAILURE) || !(raw_cmd->flags & FD_RAW_STOP_IF_SUCCESS))) { raw_cmd = raw_cmd->next; return; } } generic_done(flag); }
Safe
[ "CWE-190", "CWE-125" ]
linux
da99466ac243f15fbba65bd261bfc75ffa1532b6
9.364553806337444e+37
43
floppy: fix out-of-bounds read in copy_buffer This fixes a global out-of-bounds read access in the copy_buffer function of the floppy driver. The FDDEFPRM ioctl allows one to set the geometry of a disk. The sect and head fields (unsigned int) of the floppy_drive structure are used to compute the max_sector (int) in the make_raw_rw_request function. It is possible to overflow the max_sector. Next, max_sector is passed to the copy_buffer function and used in one of the memcpy calls. An unprivileged user could trigger the bug if the device is accessible, but requires a floppy disk to be inserted. The patch adds the check for the .sect * .head multiplication for not overflowing in the set_geometry function. The bug was found by syzkaller. Signed-off-by: Denis Efremov <efremov@ispras.ru> Tested-by: Willy Tarreau <w@1wt.eu> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
static int b43_op_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev = wl->current_dev; struct ieee80211_conf *conf = &hw->conf; if (idx != 0) return -ENOENT; survey->channel = conf->chandef.chan; survey->filled = SURVEY_INFO_NOISE_DBM; survey->noise = dev->stats.link_noise; return 0; }
Safe
[ "CWE-134" ]
wireless
9538cbaab6e8b8046039b4b2eb6c9d614dc782bd
1.9505746607650597e+38
16
b43: stop format string leaking into error msgs The module parameter "fwpostfix" is userspace controllable, unfiltered, and is used to define the firmware filename. b43_do_request_fw() populates ctx->errors[] on error, containing the firmware filename. b43err() parses its arguments as a format string. For systems with b43 hardware, this could lead to a uid-0 to ring-0 escalation. CVE-2013-2852 Signed-off-by: Kees Cook <keescook@chromium.org> Cc: stable@vger.kernel.org Signed-off-by: John W. Linville <linville@tuxdriver.com>
0
static inline int emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) { return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); }
Safe
[]
kvm
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
2.3063186757096263e+38
5
KVM: x86: Check non-canonical addresses upon WRMSR Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is written to certain MSRs. The behavior is "almost" identical for AMD and Intel (ignoring MSRs that are not implemented in either architecture since they would anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if non-canonical address is written on Intel but not on AMD (which ignores the top 32-bits). Accordingly, this patch injects a #GP on the MSRs which behave identically on Intel and AMD. To eliminate the differences between the architecutres, the value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to canonical value before writing instead of injecting a #GP. Some references from Intel and AMD manuals: According to Intel SDM description of WRMSR instruction #GP is expected on WRMSR "If the source register contains a non-canonical address and ECX specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE, IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP." According to AMD manual instruction manual: LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical form, a general-protection exception (#GP) occurs." IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the base field must be in canonical form or a #GP fault will occur." IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must be in canonical form." This patch fixes CVE-2014-3610. Cc: stable@vger.kernel.org Signed-off-by: Nadav Amit <namit@cs.technion.ac.il> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t pfn, int max_level) { struct kvm_lpage_info *linfo; int host_level; max_level = min(max_level, max_huge_page_level); for ( ; max_level > PG_LEVEL_4K; max_level--) { linfo = lpage_info_slot(gfn, slot, max_level); if (!linfo->disallow_lpage) break; } if (max_level == PG_LEVEL_4K) return PG_LEVEL_4K; host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot); return min(host_level, max_level); }
Safe
[ "CWE-476" ]
linux
9f46c187e2e680ecd9de7983e4d081c3391acc76
8.829728485717466e+37
20
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID With shadow paging enabled, the INVPCID instruction results in a call to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the invlpg callback is not set and the result is a NULL pointer dereference. Fix it trivially by checking for mmu->invlpg before every call. There are other possibilities: - check for CR0.PG, because KVM (like all Intel processors after P5) flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a nop with paging disabled - check for EFER.LMA, because KVM syncs and flushes when switching MMU contexts outside of 64-bit mode All of these are tricky, go for the simple solution. This is CVE-2022-1789. Reported-by: Yongkang Jia <kangel@zju.edu.cn> Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
check_file_permissions_reduced(i_ctx_t *i_ctx_p, const char *fname, int len, gx_io_device *iodev, const char *permitgroup) { long i; ref *permitlist = NULL; /* an empty string (first character == 0) if '\' character is */ /* recognized as a file name separator as on DOS & Windows */ const char *win_sep2 = "\\"; bool use_windows_pathsep = (gs_file_name_check_separator(win_sep2, 1, win_sep2) == 1); uint plen = gp_file_name_parents(fname, len); /* we're protecting arbitrary file system accesses, not Postscript device accesses. * Although, note that %pipe% is explicitly checked for and disallowed elsewhere */ if (iodev != iodev_default(imemory)) { return 0; } /* Assuming a reduced file name. */ if (dict_find_string(&(i_ctx_p->userparams), permitgroup, &permitlist) <= 0) return 0; /* if Permissions not found, just allow access */ for (i=0; i<r_size(permitlist); i++) { ref permitstring; const string_match_params win_filename_params = { '*', '?', '\\', true, true /* ignore case & '/' == '\\' */ }; const byte *permstr; uint permlen; int cwd_len = 0; if (array_get(imemory, permitlist, i, &permitstring) < 0 || r_type(&permitstring) != t_string ) break; /* any problem, just fail */ permstr = permitstring.value.bytes; permlen = r_size(&permitstring); /* * Check if any file name is permitted with "*". */ if (permlen == 1 && permstr[0] == '*') return 0; /* success */ /* * If the filename starts with parent references, * the permission element must start with same number of parent references. */ if (plen != 0 && plen != gp_file_name_parents((const char *)permstr, permlen)) continue; cwd_len = gp_file_name_cwds((const char *)permstr, permlen); /* * If the permission starts with "./", absolute paths * are not permitted. */ if (cwd_len > 0 && gp_file_name_is_absolute(fname, len)) continue; /* * If the permission starts with "./", relative paths * with no "./" are allowed as well as with "./". * 'fname' has no "./" because it is reduced. */ if (string_match( (const unsigned char*) fname, len, permstr + cwd_len, permlen - cwd_len, use_windows_pathsep ? &win_filename_params : NULL)) return 0; /* success */ } /* not found */ return gs_error_invalidfileaccess; }
Vulnerable
[]
ghostpdl
0d3901189f245232f0161addf215d7268c4d05a3
4.268067420768732e+37
68
Bug 699657: properly apply file permissions to .tempfile
1
R_API void r_str_stripLine(char *str, const char *key) { size_t i, j, klen, slen, off; const char *ptr; if (!str || !key) { return; } klen = strlen (key); slen = strlen (str); for (i = 0; i < slen; ) { ptr = (char*) r_mem_mem ((ut8*) str + i, slen - i, (ut8*) "\n", 1); if (!ptr) { ptr = (char*) r_mem_mem ((ut8*) str + i, slen - i, (ut8*) key, klen); if (ptr) { str[i] = '\0'; break; } break; } off = (size_t) (ptr - (str + i)) + 1; ptr = (char*) r_mem_mem ((ut8*) str + i, off, (ut8*) key, klen); if (ptr) { for (j = i; j < slen - off + 1; j++) { str[j] = str[j + off]; } slen -= off; } else { i += off; } } }
Safe
[ "CWE-78" ]
radare2
04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9
1.3486192891337198e+38
34
Fix command injection on PDB download (#16966) * Fix r_sys_mkdirp with absolute path on Windows * Fix build with --with-openssl * Use RBuffer in r_socket_http_answer() * r_socket_http_answer: Fix read for big responses * Implement r_str_escape_sh() * Cleanup r_socket_connect() on Windows * Fix socket being created without a protocol * Fix socket connect with SSL ##socket * Use select() in r_socket_ready() * Fix read failing if received only protocol answer * Fix double-free * r_socket_http_get: Fail if req. SSL with no support * Follow redirects in r_socket_http_answer() * Fix r_socket_http_get result length with R2_CURL=1 * Also follow redirects * Avoid using curl for downloading PDBs * Use r_socket_http_get() on UNIXs * Use WinINet API on Windows for r_socket_http_get() * Fix command injection * Fix r_sys_cmd_str_full output for binary data * Validate GUID on PDB download * Pass depth to socket_http_get_recursive() * Remove 'r_' and '__' from static function names * Fix is_valid_guid * Fix for comments
0
void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb, struct hsr_port *port) { struct hsr_node *node_dst; if (!skb_mac_header_was_set(skb)) { WARN_ONCE(1, "%s: Mac header not set\n", __func__); return; } if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest)) return; node_dst = find_node_by_AddrA(&port->hsr->node_db, eth_hdr(skb)->h_dest); if (!node_dst) { WARN_ONCE(1, "%s: Unknown node\n", __func__); return; } if (port->type != node_dst->AddrB_port) return; ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->MacAddressB); }
Safe
[ "CWE-772", "CWE-269", "CWE-401" ]
linux
6caabe7f197d3466d238f70915d65301f1716626
2.546487323329324e+38
23
net: hsr: fix memory leak in hsr_dev_finalize() If hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER) failed to add port, it directly returns res and forgets to free the node that allocated in hsr_create_self_node(), and forgets to delete the node->mac_list linked in hsr->self_node_db. BUG: memory leak unreferenced object 0xffff8881cfa0c780 (size 64): comm "syz-executor.0", pid 2077, jiffies 4294717969 (age 2415.377s) hex dump (first 32 bytes): e0 c7 a0 cf 81 88 ff ff 00 02 00 00 00 00 ad de ................ 00 e6 49 cd 81 88 ff ff c0 9b 87 d0 81 88 ff ff ..I............. backtrace: [<00000000e2ff5070>] hsr_dev_finalize+0x736/0x960 [hsr] [<000000003ed2e597>] hsr_newlink+0x2b2/0x3e0 [hsr] [<000000003fa8c6b6>] __rtnl_newlink+0xf1f/0x1600 net/core/rtnetlink.c:3182 [<000000001247a7ad>] rtnl_newlink+0x66/0x90 net/core/rtnetlink.c:3240 [<00000000e7d1b61d>] rtnetlink_rcv_msg+0x54e/0xb90 net/core/rtnetlink.c:5130 [<000000005556bd3a>] netlink_rcv_skb+0x129/0x340 net/netlink/af_netlink.c:2477 [<00000000741d5ee6>] netlink_unicast_kernel net/netlink/af_netlink.c:1310 [inline] [<00000000741d5ee6>] netlink_unicast+0x49a/0x650 net/netlink/af_netlink.c:1336 [<000000009d56f9b7>] netlink_sendmsg+0x88b/0xdf0 net/netlink/af_netlink.c:1917 [<0000000046b35c59>] sock_sendmsg_nosec net/socket.c:621 [inline] [<0000000046b35c59>] sock_sendmsg+0xc3/0x100 net/socket.c:631 [<00000000d208adc9>] __sys_sendto+0x33e/0x560 net/socket.c:1786 [<00000000b582837a>] __do_sys_sendto net/socket.c:1798 [inline] [<00000000b582837a>] __se_sys_sendto net/socket.c:1794 [inline] [<00000000b582837a>] __x64_sys_sendto+0xdd/0x1b0 net/socket.c:1794 [<00000000c866801d>] do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290 [<00000000fea382d9>] entry_SYSCALL_64_after_hwframe+0x49/0xbe [<00000000e01dacb3>] 0xffffffffffffffff Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.") Reported-by: Hulk Robot <hulkci@huawei.com> Signed-off-by: Mao Wenan <maowenan@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb, int len, char ev_type, struct mon_bus *mbus) { void *src; if (len <= 0) return 'L'; if (len >= DATA_MAX) len = DATA_MAX; if (ep->is_in) { if (ev_type != 'C') return '<'; } else { if (ev_type != 'S') return '>'; } if (urb->num_sgs == 0) { src = urb->transfer_buffer; if (src == NULL) return 'Z'; /* '0' would be not as pretty. */ } else { struct scatterlist *sg = urb->sg; if (PageHighMem(sg_page(sg))) return 'D'; /* For the text interface we copy only the first sg buffer */ len = min_t(int, sg->length, len); src = sg_virt(sg); } memcpy(ep->data, src, len); return 0; }
Safe
[ "CWE-787" ]
linux
a5f596830e27e15f7a0ecd6be55e433d776986d8
5.029561263060605e+37
36
usb: usbmon: Read text within supplied buffer size This change fixes buffer overflows and silent data corruption with the usbmon device driver text file read operations. Signed-off-by: Fredrik Noring <noring@nocrew.org> Signed-off-by: Pete Zaitcev <zaitcev@redhat.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
0
TEST(EqOp, MatchesArrayValue) { BSONObj operand = BSON("a" << 5); EqualityMatchExpression eq("a", operand["a"]); ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(5.0 << 6)), NULL)); ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL)); }
Safe
[]
mongo
64095239f41e9f3841d8be9088347db56d35c891
2.0865700927126616e+38
6
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
0
guint16 rtps_util_add_vendor_id(proto_tree *tree, tvbuff_t *tvb, gint offset) { guint8 major, minor; guint16 vendor_id; major = tvb_get_guint8(tvb, offset); minor = tvb_get_guint8(tvb, offset+1); vendor_id = tvb_get_ntohs(tvb, offset); proto_tree_add_uint_format_value(tree, hf_rtps_vendor_id, tvb, offset, 2, vendor_id, "%02d.%02d (%s)", major, minor, val_to_str_const(vendor_id, vendor_vals, "Unknown")); return vendor_id; }
Safe
[ "CWE-401" ]
wireshark
33e63d19e5496c151bad69f65cdbc7cba2b4c211
1.3471592572755135e+38
16
RTPS: Fixup our coherent set map. coherent_set_tracking.coherent_set_registry_map uses a struct as a key, but the hash and comparison routines treat keys as a sequence of bytes. Make sure every key byte is initialized. Fixes #16994. Call wmem_strong_hash on our key in coherent_set_key_hash_by_key instead of creating and leaking a GBytes struct.
0
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); trace_kmem_cache_alloc_node(_RET_IP_, ret, s->object_size, s->size, gfpflags, node); return ret; }
Safe
[]
linux
fd4d9c7d0c71866ec0c2825189ebd2ce35bd95b8
9.210361560877217e+37
9
mm: slub: add missing TID bump in kmem_cache_alloc_bulk() When kmem_cache_alloc_bulk() attempts to allocate N objects from a percpu freelist of length M, and N > M > 0, it will first remove the M elements from the percpu freelist, then call ___slab_alloc() to allocate the next element and repopulate the percpu freelist. ___slab_alloc() can re-enable IRQs via allocate_slab(), so the TID must be bumped before ___slab_alloc() to properly commit the freelist head change. Fix it by unconditionally bumping c->tid when entering the slowpath. Cc: stable@vger.kernel.org Fixes: ebe909e0fdb3 ("slub: improve bulk alloc strategy") Signed-off-by: Jann Horn <jannh@google.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) { uint i; uint j= 0; bool found= FALSE; uchar *part_rec_buf_ptr= m_ordered_rec_buffer; int saved_error= HA_ERR_END_OF_FILE; DBUG_ENTER("ha_partition::handle_ordered_index_scan"); if (m_key_not_found) { m_key_not_found= false; bitmap_clear_all(&m_key_not_found_partitions); } m_top_entry= NO_CURRENT_PART_ID; queue_remove_all(&m_queue); /* Position part_rec_buf_ptr to point to the first used partition >= start_part. There may be partitions marked by used_partitions, but is before start_part. These partitions has allocated record buffers but is dynamically pruned, so those buffers must be skipped. */ uint first_used_part= bitmap_get_first_set(&m_part_info->used_partitions); for (; first_used_part < m_part_spec.start_part; first_used_part++) { if (bitmap_is_set(&(m_part_info->used_partitions), first_used_part)) part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS; } DBUG_PRINT("info", ("m_part_spec.start_part %u first_used_part %u", m_part_spec.start_part, first_used_part)); for (i= first_used_part; i <= m_part_spec.end_part; i++) { if (!(bitmap_is_set(&(m_part_info->used_partitions), i))) continue; DBUG_PRINT("info", ("reading from part %u (scan_type: %u)", i, m_index_scan_type)); DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr)); uchar *rec_buf_ptr= part_rec_buf_ptr + PARTITION_BYTES_IN_POS; int error; handler *file= m_file[i]; switch (m_index_scan_type) { case partition_index_read: error= file->index_read_map(rec_buf_ptr, m_start_key.key, m_start_key.keypart_map, m_start_key.flag); break; case partition_index_first: error= file->index_first(rec_buf_ptr); reverse_order= FALSE; break; case partition_index_last: error= file->index_last(rec_buf_ptr); reverse_order= TRUE; break; case partition_index_read_last: error= file->index_read_last_map(rec_buf_ptr, m_start_key.key, m_start_key.keypart_map); reverse_order= TRUE; break; case partition_read_range: { /* This can only read record to table->record[0], as it was set when the table was being opened. We have to memcpy data ourselves. */ error= file->read_range_first(m_start_key.key? &m_start_key: NULL, end_range, eq_range, TRUE); memcpy(rec_buf_ptr, table->record[0], m_rec_length); reverse_order= FALSE; break; } default: DBUG_ASSERT(FALSE); DBUG_RETURN(HA_ERR_END_OF_FILE); } if (!error) { found= TRUE; /* Initialize queue without order first, simply insert */ queue_element(&m_queue, j++)= part_rec_buf_ptr; } else if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) { DBUG_RETURN(error); } else if (error == HA_ERR_KEY_NOT_FOUND) { DBUG_PRINT("info", ("HA_ERR_KEY_NOT_FOUND from partition %u", i)); bitmap_set_bit(&m_key_not_found_partitions, i); m_key_not_found= true; saved_error= error; } part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS; } if (found) { /* We found at least one partition with data, now sort all entries and after that read the first entry and copy it to the buffer to return in. */ queue_set_max_at_top(&m_queue, reverse_order); queue_set_cmp_arg(&m_queue, (void*)m_curr_key_info); m_queue.elements= j; queue_fix(&m_queue); return_top_record(buf); table->status= 0; DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry)); DBUG_RETURN(0); } DBUG_RETURN(saved_error); }
Safe
[]
mysql-server
be901b60ae59c93848c829d1b0b2cb523ab8692e
4.805283609918976e+37
117
Bug#26390632: CREATE TABLE CAN CAUSE MYSQL TO EXIT. Analysis ======== CREATE TABLE of InnoDB table with a partition name which exceeds the path limit can cause the server to exit. During the preparation of the partition name, there was no check to identify whether the complete path name for partition exceeds the max supported path length, causing the server to exit during subsequent processing. Fix === During the preparation of partition name, check and report an error if the partition path name exceeds the maximum path name limit. This is a 5.5 patch.
0
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) { struct rt6_info *rt; rt = (struct rt6_info *) dst; /* All IPV6 dsts are created with ->obsolete set to the value * DST_OBSOLETE_FORCE_CHK which forces validation calls down * into this function always. */ if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie)) return NULL; if (rt6_check_expired(rt)) return NULL; return dst; }
Safe
[ "CWE-17" ]
linux-stable
9d289715eb5c252ae15bd547cb252ca547a3c4f2
2.0761749244519497e+38
18
ipv6: stop sending PTB packets for MTU < 1280 Reduce the attack vector and stop generating IPv6 Fragment Header for paths with an MTU smaller than the minimum required IPv6 MTU size (1280 byte) - called atomic fragments. See IETF I-D "Deprecating the Generation of IPv6 Atomic Fragments" [1] for more information and how this "feature" can be misused. [1] https://tools.ietf.org/html/draft-ietf-6man-deprecate-atomfrag-generation-00 Signed-off-by: Fernando Gont <fgont@si6networks.com> Signed-off-by: Hagen Paul Pfeifer <hagen@jauu.net> Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); struct page **pages; void *addr = NULL; *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); if (gfp & GFP_ATOMIC) return __iommu_alloc_atomic(dev, size, handle); /* * Following is a work-around (a.k.a. hack) to prevent pages * with __GFP_COMP being passed to split_page() which cannot * handle them. The real problem is that this flag probably * should be 0 on ARM as it is not supported on this * platform; see CONFIG_HUGETLBFS. */ gfp &= ~(__GFP_COMP); pages = __iommu_alloc_buffer(dev, size, gfp, attrs); if (!pages) return NULL; *handle = __iommu_create_mapping(dev, pages, size); if (*handle == DMA_ERROR_CODE) goto err_buffer; if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) return pages; addr = __iommu_alloc_remap(pages, size, gfp, prot, __builtin_return_address(0)); if (!addr) goto err_mapping; return addr; err_mapping: __iommu_remove_mapping(dev, *handle, size); err_buffer: __iommu_free_buffer(dev, pages, size, attrs); return NULL; }
Safe
[ "CWE-284", "CWE-264" ]
linux
0ea1ec713f04bdfac343c9702b21cd3a7c711826
2.713929699056341e+38
46
ARM: dma-mapping: don't allow DMA mappings to be marked executable DMA mapping permissions were being derived from pgprot_kernel directly without using PAGE_KERNEL. This causes them to be marked with executable permission, which is not what we want. Fix this. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
0
onig_region_init(OnigRegion* region) { region->num_regs = 0; region->allocated = 0; region->beg = (int* )0; region->end = (int* )0; region->history_root = (OnigCaptureTreeNode* )0; }
Safe
[ "CWE-125" ]
oniguruma
690313a061f7a4fa614ec5cc8368b4f2284e059b
2.6235571141006536e+38
8
fix #57 : DATA_ENSURE() check must be before data access
0
uchar *in_datetime::get_value(Item *item) { tmp.val= item->val_datetime_packed(current_thd); if (item->null_value) return 0; tmp.unsigned_flag= 1L; return (uchar*) &tmp; }
Safe
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
3.054227938686407e+38
8
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
0
int nfs41_walk_client_list(struct nfs_client *new, struct nfs_client **result, const struct cred *cred) { struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); struct nfs_client *pos, *prev = NULL; int status = -NFS4ERR_STALE_CLIENTID; spin_lock(&nn->nfs_client_lock); list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { if (pos == new) goto found; status = nfs4_match_client(pos, new, &prev, nn); if (status < 0) goto out; if (status != 0) continue; /* * Note that session trunking is just a special subcase of * client id trunking. In either case, we want to fall back * to using the existing nfs_client. */ if (!nfs4_check_serverowner_major_id(pos->cl_serverowner, new->cl_serverowner)) continue; found: refcount_inc(&pos->cl_count); *result = pos; status = 0; break; } out: spin_unlock(&nn->nfs_client_lock); nfs_put_client(prev); return status; }
Safe
[ "CWE-703" ]
linux
dd99e9f98fbf423ff6d365b37a98e8879170f17c
1.8951320811439154e+38
41
NFSv4: Initialise connection to the server in nfs4_alloc_client() Set up the connection to the NFSv4 server in nfs4_alloc_client(), before we've added the struct nfs_client to the net-namespace's nfs_client_list so that a downed server won't cause other mounts to hang in the trunking detection code. Reported-by: Michael Wakabayashi <mwakabayashi@vmware.com> Fixes: 5c6e5b60aae4 ("NFS: Fix an Oops in the pNFS files and flexfiles connection setup to the DS") Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
0
static double mp_list_whds(_cimg_math_parser& mp) { const unsigned int ind = (unsigned int)cimg::mod((int)_mp_arg(2),mp.imglist.width()); return (double)mp.imglist[ind]._width*mp.imglist[ind]._height*mp.imglist[ind]._depth*mp.imglist[ind]._spectrum; }
Safe
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
9.978722745577659e+37
4
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
0
static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) { }
Safe
[]
linux
7bced397510ab569d31de4c70b39e13355046387
2.335414537162043e+38
3
net_dma: simple removal Per commit "77873803363c net_dma: mark broken" net_dma is no longer used and there is no plan to fix it. This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards. Reverting the remainder of the net_dma induced changes is deferred to subsequent patches. Marked for stable due to Roman's report of a memory leak in dma_pin_iovec_pages(): https://lkml.org/lkml/2014/9/3/177 Cc: Dave Jiang <dave.jiang@intel.com> Cc: Vinod Koul <vinod.koul@intel.com> Cc: David Whipple <whipple@securedatainnovations.ch> Cc: Alexander Duyck <alexander.h.duyck@intel.com> Cc: <stable@vger.kernel.org> Reported-by: Roman Gushchin <klamm@yandex-team.ru> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
0
E_P24 (uschar *p21, uschar *c8, uschar *p24) { smbhash (p24, c8, p21, 1); smbhash (p24 + 8, c8, p21 + 7, 1); smbhash (p24 + 16, c8, p21 + 14, 1); }
Safe
[ "CWE-125" ]
exim
57aa14b216432be381b6295c312065b2fd034f86
2.6996763937941602e+38
6
Fix SPA authenticator, checking client-supplied data before using it. Bug 2571
0
bool operator <(const SilenceEntry& other) const { if (flags & SF_EXEMPT && other.flags & ~SF_EXEMPT) return true; if (other.flags & SF_EXEMPT && flags & ~SF_EXEMPT) return false; if (flags < other.flags) return true; if (other.flags < flags) return false; return mask < other.mask; }
Safe
[ "CWE-416" ]
inspircd
7b47de3c194f239c5fea09a0e49696c9af017d51
2.050593096305283e+38
12
Copy the silence flags when sending update notifications. This fixes a crash bug in the silence module on some versions of GCC.
0
side_get_n_unix_fds (ProxySide *side, int n_fds) { GList *res = NULL; while (side->control_messages != NULL) { GSocketControlMessage *control_message = side->control_messages->data; if (G_IS_UNIX_FD_MESSAGE (control_message)) { GUnixFDMessage *fd_message = G_UNIX_FD_MESSAGE (control_message); GUnixFDList *fd_list = g_unix_fd_message_get_fd_list (fd_message); int len = g_unix_fd_list_get_length (fd_list); /* I believe that socket control messages are never merged, and the sender side sends only one unix-fd-list per message, so at this point there should always be one full fd list per requested number of fds */ if (len != n_fds) { g_warning ("Not right nr of fds in socket message"); return NULL; } side->control_messages = g_list_delete_link (side->control_messages, side->control_messages); return g_list_append (NULL, control_message); } g_object_unref (control_message); side->control_messages = g_list_delete_link (side->control_messages, side->control_messages); } return res; }
Safe
[ "CWE-284", "CWE-436" ]
flatpak
52346bf187b5a7f1c0fe9075b328b7ad6abe78f6
6.302008389847447e+37
35
Fix vulnerability in dbus proxy During the authentication all client data is directly forwarded to the dbus daemon as is, until we detect the BEGIN command after which we start filtering the binary dbus protocol. Unfortunately the detection of the BEGIN command in the proxy did not exactly match the detection in the dbus daemon. A BEGIN followed by a space or tab was considered ok in the daemon but not by the proxy. This could be exploited to send arbitrary dbus messages to the host, which can be used to break out of the sandbox. This was noticed by Gabriel Campana of The Google Security Team. This fix makes the detection of the authentication phase end match the dbus code. In addition we duplicate the authentication line validation from dbus, which includes ensuring all data is ASCII, and limiting the size of a line to 16k. In fact, we add some extra stringent checks, disallowing ASCII control chars and requiring that auth lines start with a capital letter.
0
void select_connection(struct st_command *command) { DBUG_ENTER("select_connection"); static DYNAMIC_STRING ds_connection; const struct command_arg connection_args[] = { { "connection_name", ARG_STRING, TRUE, &ds_connection, "Name of the connection that we switch to." } }; check_command_args(command, command->first_argument, connection_args, sizeof(connection_args)/sizeof(struct command_arg), ' '); DBUG_PRINT("info", ("changing connection: %s", ds_connection.str)); select_connection_name(ds_connection.str); dynstr_free(&ds_connection); DBUG_VOID_RETURN; }
Safe
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
2.7537359977962716e+38
16
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
0
static void deliver_merge_vpoll_reply(icalcomponent *ical, icalcomponent *reply) { icalproperty *mastervoterp; const char *voter; icalcomponent *comp; mastervoterp = icalcomponent_get_first_property(reply, ICAL_VOTER_PROPERTY); voter = icalproperty_get_voter(mastervoterp); /* Process each existing VPOLL subcomponent */ for (comp = icalcomponent_get_first_component(ical, ICAL_ANY_COMPONENT); comp; comp = icalcomponent_get_next_component(ical, ICAL_ANY_COMPONENT)) { icalproperty *itemid, *voterp; int id; itemid = icalcomponent_get_first_property(comp, ICAL_POLLITEMID_PROPERTY); if (!itemid) continue; id = icalproperty_get_pollitemid(itemid); /* Remove any existing voter property from the subcomponent */ for (voterp = icalcomponent_get_first_property(comp, ICAL_VOTER_PROPERTY); voterp && strcmp(voter, icalproperty_get_voter(voterp)); voterp = icalcomponent_get_next_property(comp, ICAL_VOTER_PROPERTY)); if (voterp) { icalcomponent_remove_property(comp, voterp); icalproperty_free(voterp); } /* Find matching poll-item-id in the reply */ for (itemid = icalcomponent_get_first_property(reply, ICAL_POLLITEMID_PROPERTY); itemid && (id != icalproperty_get_pollitemid(itemid)); itemid = icalcomponent_get_next_property(reply, ICAL_POLLITEMID_PROPERTY)); if (itemid) { icalparameter *param; /* Add a VOTER property with params from the reply */ voterp = icalproperty_new_clone(mastervoterp); for (param = icalproperty_get_first_parameter(itemid, ICAL_ANY_PARAMETER); param; param = icalproperty_get_next_parameter(itemid, ICAL_ANY_PARAMETER)) { switch (icalparameter_isa(param)) { case ICAL_PUBLICCOMMENT_PARAMETER: case ICAL_RESPONSE_PARAMETER: icalproperty_add_parameter(voterp, icalparameter_new_clone(param)); break; default: break; } } icalcomponent_add_property(comp, voterp); } } }
Safe
[ "CWE-787" ]
cyrus-imapd
a5779db8163b99463e25e7c476f9cbba438b65f3
2.339914022978912e+38
71
HTTP: don't overrun buffer when parsing strings with sscanf()
0
void SetRemoteIP(const CString& s) { m_sRemoteIP = s; }
Safe
[ "CWE-399" ]
znc
11508aa72efab4fad0dbd8292b9614d9371b20a9
2.8812170920925104e+38
1
Fix crash in bouncedcc module. It happens when DCC RESUME is received. Affected ZNC versions: 0.200, 0.202. Thanks to howeyc for reporting this and providing the patch.
0
helpFile(char *base) { return expandPath(Strnew_m_charp(w3m_help_dir(), "/", base, NULL)->ptr); }
Safe
[ "CWE-59", "CWE-241" ]
w3m
18dcbadf2771cdb0c18509b14e4e73505b242753
2.1848450120300056e+38
4
Make temporary directory safely when ~/.w3m is unwritable
0
static int __net_init arp_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_ARP); }
Safe
[ "CWE-200" ]
linux-2.6
42eab94fff18cb1091d3501cd284d6bd6cc9c143
3.0462502371422447e+38
4
netfilter: arp_tables: fix infoleak to userspace Structures ipt_replace, compat_ipt_replace, and xt_get_revision are copied from userspace. Fields of these structs that are zero-terminated strings are not checked. When they are used as argument to a format string containing "%s" in request_module(), some sensitive information is leaked to userspace via argument of spawned modprobe process. The first bug was introduced before the git epoch; the second is introduced by 6b7d31fc (v2.6.15-rc1); the third is introduced by 6b7d31fc (v2.6.15-rc1). To trigger the bug one should have CAP_NET_ADMIN. Signed-off-by: Vasiliy Kulikov <segoon@openwall.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
0
_wrap_umac128_set_key(void *ctx, size_t len, const uint8_t * key) { if (unlikely(len != 16)) abort(); umac128_set_key(ctx, key); }
Safe
[ "CWE-476" ]
gnutls
3db352734472d851318944db13be73da61300568
5.801477702062253e+36
6
wrap_nettle_hash_fast: avoid calling _update with zero-length input As Nettle's hash update functions internally call memcpy, providing zero-length input may cause undefined behavior. Signed-off-by: Daiki Ueno <ueno@gnu.org>
0
close_std_fd(void) { close(STDIN_FILENO); close(STDOUT_FILENO); close(STDERR_FILENO); }
Safe
[ "CWE-59", "CWE-61" ]
keepalived
04f2d32871bb3b11d7dc024039952f2fe2750306
2.355921493861498e+36
6
When opening files for write, ensure they aren't symbolic links Issue #1048 identified that if, for example, a non privileged user created a symbolic link from /etc/keepalvied.data to /etc/passwd, writing to /etc/keepalived.data (which could be invoked via DBus) would cause /etc/passwd to be overwritten. This commit stops keepalived writing to pathnames where the ultimate component is a symbolic link, by setting O_NOFOLLOW whenever opening a file for writing. This might break some setups, where, for example, /etc/keepalived.data was a symbolic link to /home/fred/keepalived.data. If this was the case, instead create a symbolic link from /home/fred/keepalived.data to /tmp/keepalived.data, so that the file is still accessible via /home/fred/keepalived.data. There doesn't appear to be a way around this backward incompatibility, since even checking if the pathname is a symbolic link prior to opening for writing would create a race condition. Signed-off-by: Quentin Armitage <quentin@armitage.org.uk>
0
int ___pskb_trim(struct sk_buff *skb, unsigned int len) { struct sk_buff **fragp; struct sk_buff *frag; int offset = skb_headlen(skb); int nfrags = skb_shinfo(skb)->nr_frags; int i; int err; if (skb_cloned(skb) && unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) return err; i = 0; if (offset >= len) goto drop_pages; for (; i < nfrags; i++) { int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); if (end < len) { offset = end; continue; } skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); drop_pages: skb_shinfo(skb)->nr_frags = i; for (; i < nfrags; i++) skb_frag_unref(skb, i); if (skb_has_frag_list(skb)) skb_drop_fraglist(skb); goto done; } for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); fragp = &frag->next) { int end = offset + frag->len; if (skb_shared(frag)) { struct sk_buff *nfrag; nfrag = skb_clone(frag, GFP_ATOMIC); if (unlikely(!nfrag)) return -ENOMEM; nfrag->next = frag->next; consume_skb(frag); frag = nfrag; *fragp = frag; } if (end < len) { offset = end; continue; } if (end > len && unlikely((err = pskb_trim(frag, len - offset)))) return err; if (frag->next) skb_drop_list(&frag->next); break; } done: if (len > skb_headlen(skb)) { skb->data_len -= skb->len - len; skb->len = len; } else { skb->len = len; skb->data_len = 0; skb_set_tail_pointer(skb, len); } if (!skb->sk || skb->destructor == sock_edemux) skb_condense(skb); return 0;
Safe
[ "CWE-20" ]
linux
2b16f048729bf35e6c28a40cbfad07239f9dcd90
3.2522497751761486e+38
83
net: create skb_gso_validate_mac_len() If you take a GSO skb, and split it into packets, will the MAC length (L2 + L3 + L4 headers + payload) of those packets be small enough to fit within a given length? Move skb_gso_mac_seglen() to skbuff.h with other related functions like skb_gso_network_seglen() so we can use it, and then create skb_gso_validate_mac_len to do the full calculation. Signed-off-by: Daniel Axtens <dja@axtens.net> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static void change_theme(const char *name, int verbose) { THEME_REC *rec; rec = theme_load(name); if (rec != NULL) { current_theme = rec; signal_emit("theme changed", 1, rec); if (verbose) { printformat(NULL, NULL, MSGLEVEL_CLIENTNOTICE, TXT_THEME_CHANGED, rec->name, rec->path); } } else if (verbose) { printformat(NULL, NULL, MSGLEVEL_CLIENTERROR, TXT_THEME_NOT_FOUND, name); } }
Safe
[ "CWE-416" ]
irssi
43e44d553d44e313003cee87e6ea5e24d68b84a1
2.6204013392628543e+38
19
Merge branch 'security' into 'master' Security Closes GL#12, GL#13, GL#14, GL#15, GL#16 See merge request irssi/irssi!23
0
pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask) { int i; for (i=0; mask; i++, mask>>=1) { if ((mask & 0x1) == 0) continue; ia64_set_pmc(i, pmcs[i]); } ia64_srlz_d(); }
Safe
[]
linux-2.6
41d5e5d73ecef4ef56b7b4cde962929a712689b4
1.5298669198258794e+37
10
[IA64] permon use-after-free fix Perfmon associates vmalloc()ed memory with a file descriptor, and installs a vma mapping that memory. Unfortunately, the vm_file field is not filled in, so processes with mappings to that memory do not prevent the file from being closed and the memory freed. This results in use-after-free bugs and multiple freeing of pages, etc. I saw this bug on an Altix on SLES9. Haven't reproduced upstream but it looks like the same issue is there. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Stephane Eranian <eranian@hpl.hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
0
nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readdirargs *args) { int len; u32 max_blocksize = svc_max_payload(rqstp); p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->cookie); args->verf = p; p += 2; args->dircount = ntohl(*p++); args->count = ntohl(*p++); len = args->count = min(args->count, max_blocksize); while (len > 0) { struct page *p = *(rqstp->rq_next_page++); if (!args->buffer) args->buffer = page_address(p); len -= PAGE_SIZE; } return xdr_argsize_check(rqstp, p); }
Safe
[ "CWE-119", "CWE-703" ]
linux
13bf9fbff0e5e099e2b6f003a0ab8ae145436309
1.6080120296774678e+38
24
nfsd: stricter decoding of write-like NFSv2/v3 ops The NFSv2/v3 code does not systematically check whether we decode past the end of the buffer. This generally appears to be harmless, but there are a few places where we do arithmetic on the pointers involved and don't account for the possibility that a length could be negative. Add checks to catch these. Reported-by: Tuomas Haanpää <thaan@synopsys.com> Reported-by: Ari Kauppi <ari@synopsys.com> Reviewed-by: NeilBrown <neilb@suse.com> Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields <bfields@redhat.com>
0
static int java_linear_sweep(RAnal *anal, RAnalState *state, ut64 addr) { RAnalBlock *bb = state->current_bb; if (state->current_bb_head && state->current_bb->type & R_ANAL_BB_TYPE_TAIL) { //r_anal_ex_update_bb_cfg_head_tail (state->current_bb_head, state->current_bb_head, state->current_bb); } // basic filter for handling the different type of operations // depending on flags some may be called more than once // if (bb->type2 & R_ANAL_EX_ILL_OP) handle_bb_ill_op (anal, state); // if (bb->type2 & R_ANAL_EX_COND_OP) handle_bb_cond_op (anal, state); // if (bb->type2 & R_ANAL_EX_UNK_OP) handle_bb_unknown_op (anal, state); // if (bb->type2 & R_ANAL_EX_NULL_OP) handle_bb_null_op (anal, state); // if (bb->type2 & R_ANAL_EX_NOP_OP) handle_bb_nop_op (anal, state); // if (bb->type2 & R_ANAL_EX_REP_OP) handle_bb_rep_op (anal, state); // if (bb->type2 & R_ANAL_EX_STORE_OP) handle_bb_store_op (anal, state); // if (bb->type2 & R_ANAL_EX_LOAD_OP) handle_bb_load_op (anal, state // if (bb->type2 & R_ANAL_EX_REG_OP) handle_bb_reg_op (anal, state); // if (bb->type2 & R_ANAL_EX_OBJ_OP) handle_bb_obj_op (anal, state); // if (bb->type2 & R_ANAL_EX_STACK_OP) handle_bb_stack_op (anal, state); // if (bb->type2 & R_ANAL_EX_BIN_OP) handle_bb_bin_op (anal, state); if (bb->type2 & R_ANAL_EX_CODE_OP) handle_bb_cf_linear_sweep (anal, state); // if (bb->type2 & R_ANAL_EX_DATA_OP) handle_bb_data_op (anal, state); return 0; }
Safe
[ "CWE-125" ]
radare2
224e6bc13fa353dd3b7f7a2334588f1c4229e58d
1.4315786385356386e+38
24
Fix #10296 - Heap out of bounds read in java_switch_op()
0
static int spi_gpio_probe(struct platform_device *pdev) { int status; struct spi_master *master; struct spi_gpio *spi_gpio; struct device *dev = &pdev->dev; struct spi_bitbang *bb; const struct of_device_id *of_id; of_id = of_match_device(spi_gpio_dt_ids, &pdev->dev); master = spi_alloc_master(dev, sizeof(*spi_gpio)); if (!master) return -ENOMEM; status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master); if (status) { spi_master_put(master); return status; } if (of_id) status = spi_gpio_probe_dt(pdev, master); else status = spi_gpio_probe_pdata(pdev, master); if (status) return status; spi_gpio = spi_master_get_devdata(master); status = spi_gpio_request(dev, spi_gpio); if (status) return status; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL | SPI_CS_HIGH; if (!spi_gpio->mosi) { /* HW configuration without MOSI pin * * No setting SPI_MASTER_NO_RX here - if there is only * a MOSI pin connected the host can still do RX by * changing the direction of the line. */ master->flags = SPI_MASTER_NO_TX; } master->bus_num = pdev->id; master->setup = spi_gpio_setup; master->cleanup = spi_gpio_cleanup; bb = &spi_gpio->bitbang; bb->master = master; /* * There is some additional business, apart from driving the CS GPIO * line, that we need to do on selection. This makes the local * callback for chipselect always get called. */ master->flags |= SPI_MASTER_GPIO_SS; bb->chipselect = spi_gpio_chipselect; bb->set_line_direction = spi_gpio_set_direction; if (master->flags & SPI_MASTER_NO_TX) { bb->txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0; bb->txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1; bb->txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2; bb->txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3; } else { bb->txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; bb->txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; bb->txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; bb->txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3; } bb->setup_transfer = spi_bitbang_setup_transfer; status = spi_bitbang_init(&spi_gpio->bitbang); if (status) return status; return devm_spi_register_master(&pdev->dev, spi_master_get(master)); }
Safe
[ "CWE-400", "CWE-401" ]
linux
d3b0ffa1d75d5305ebe34735598993afbb8a869d
4.692453446178001e+37
82
spi: gpio: prevent memory leak in spi_gpio_probe In spi_gpio_probe an SPI master is allocated via spi_alloc_master, but this controller should be released if devm_add_action_or_reset fails, otherwise memory leaks. In order to avoid leak spi_contriller_put must be called in case of failure for devm_add_action_or_reset. Fixes: 8b797490b4db ("spi: gpio: Make sure spi_master_put() is called in every error path") Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com> Link: https://lore.kernel.org/r/20190930205241.5483-1-navid.emamdoost@gmail.com Signed-off-by: Mark Brown <broonie@kernel.org>
0
TEST_F( QuicUnencryptedServerTransportTest, ReceiveZeroRttPacketFromChangedPeerAddress) { server->getNonConstConn().transportSettings.disableMigration = false; fakeHandshake->allowZeroRttKeys(); recvClientHello(); auto data = IOBuf::copyBuffer("bad data"); StreamId streamId = 2; auto packetData = packetToBuf(createStreamPacket( *clientConnectionId, *server->getConn().serverConnectionId, clientNextAppDataPacketNum++, streamId, *data, 0 /* cipherOverhead */, 0 /* largestAcked */, std::make_pair( LongHeader::Types::ZeroRtt, server->getConn().supportedVersions[0]), false)); folly::SocketAddress newPeer("100.101.102.103", 23456); try { deliverData(std::move(packetData), true, &newPeer); } catch (const std::runtime_error& ex) { EXPECT_EQ(std::string(ex.what()), "Invalid migration"); } EXPECT_TRUE(server->getConn().localConnectionError); EXPECT_EQ( server->getConn().localConnectionError->second, "Migration not allowed during handshake"); }
Safe
[ "CWE-617", "CWE-703" ]
mvfst
a67083ff4b8dcbb7ee2839da6338032030d712b0
8.921643734198786e+37
32
Close connection if we derive an extra 1-rtt write cipher Summary: Fixes CVE-2021-24029 Reviewed By: mjoras, lnicco Differential Revision: D26613890 fbshipit-source-id: 19bb2be2c731808144e1a074ece313fba11f1945
0
bool swiotlb_free(struct device *dev, struct page *page, size_t size) { phys_addr_t tlb_addr = page_to_phys(page); if (!is_swiotlb_buffer(dev, tlb_addr)) return false; swiotlb_release_slots(dev, tlb_addr); return true; }
Safe
[ "CWE-200", "CWE-401" ]
linux
aa6f8dcbab473f3a3c7454b74caa46d36cdc5d13
1.1424983598278662e+38
11
swiotlb: rework "fix info leak with DMA_FROM_DEVICE" Unfortunately, we ended up merging an old version of the patch "fix info leak with DMA_FROM_DEVICE" instead of merging the latest one. Christoph (the swiotlb maintainer), he asked me to create an incremental fix (after I have pointed this out the mix up, and asked him for guidance). So here we go. The main differences between what we got and what was agreed are: * swiotlb_sync_single_for_device is also required to do an extra bounce * We decided not to introduce DMA_ATTR_OVERWRITE until we have exploiters * The implantation of DMA_ATTR_OVERWRITE is flawed: DMA_ATTR_OVERWRITE must take precedence over DMA_ATTR_SKIP_CPU_SYNC Thus this patch removes DMA_ATTR_OVERWRITE, and makes swiotlb_sync_single_for_device() bounce unconditionally (that is, also when dir == DMA_TO_DEVICE) in order do avoid synchronising back stale data from the swiotlb buffer. Let me note, that if the size used with dma_sync_* API is less than the size used with dma_[un]map_*, under certain circumstances we may still end up with swiotlb not being transparent. In that sense, this is no perfect fix either. To get this bullet proof, we would have to bounce the entire mapping/bounce buffer. For that we would have to figure out the starting address, and the size of the mapping in swiotlb_sync_single_for_device(). While this does seem possible, there seems to be no firm consensus on how things are supposed to work. Signed-off-by: Halil Pasic <pasic@linux.ibm.com> Fixes: ddbd89deb7d3 ("swiotlb: fix info leak with DMA_FROM_DEVICE") Cc: stable@vger.kernel.org Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
GetSpecificPortMappingEntry(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewInternalPort>%u</NewInternalPort>" "<NewInternalClient>%s</NewInternalClient>" "<NewEnabled>1</NewEnabled>" "<NewPortMappingDescription>%s</NewPortMappingDescription>" "<NewLeaseDuration>%u</NewLeaseDuration>" "</u:%sResponse>"; char body[1024]; int bodylen; struct NameValueParserData data; const char * r_host, * ext_port, * protocol; unsigned short eport, iport; char int_ip[32]; char desc[64]; unsigned int leaseduration = 0; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); r_host = GetValueFromNameValueList(&data, "NewRemoteHost"); ext_port = GetValueFromNameValueList(&data, "NewExternalPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); #ifdef UPNP_STRICT if(!ext_port || !protocol || !r_host) #else if(!ext_port || !protocol) #endif { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } #ifndef SUPPORT_REMOTEHOST #ifdef UPNP_STRICT if (r_host && (r_host[0] != '\0') && (0 != strcmp(r_host, "*"))) { ClearNameValueList(&data); SoapError(h, 726, "RemoteHostOnlySupportsWildcard"); return; } #endif #endif eport = (unsigned short)atoi(ext_port); if(eport == 0) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } /* TODO : add r_host as an input parameter ... * We prevent several Port Mapping with same external port * but different remoteHost to be set up, so that is not * a priority. */ r = upnp_get_redirection_infos(eport, protocol, &iport, int_ip, sizeof(int_ip), desc, sizeof(desc), NULL, 0, &leaseduration); if(r < 0) { SoapError(h, 714, "NoSuchEntryInArray"); } else { syslog(LOG_INFO, "%s: rhost='%s' %s %s found => %s:%u desc='%s'", action, r_host ? r_host : "NULL", ext_port, protocol, int_ip, (unsigned int)iport, desc); bodylen = snprintf(body, sizeof(body), resp, action, ns/*SERVICE_TYPE_WANIPC*/, (unsigned int)iport, int_ip, desc, leaseduration, action); BuildSendAndCloseSoapResp(h, body, bodylen); } ClearNameValueList(&data); }
Safe
[ "CWE-476" ]
miniupnp
13585f15c7f7dc28bbbba1661efb280d530d114c
3.219110129475555e+38
86
GetOutboundPinholeTimeout: check args
0
SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second, unsigned long, third, void __user *, ptr, long, fifth) { long err; /* No need for backward compatibility. We can start fresh... */ if (call <= SEMCTL) { switch (call) { case SEMOP: err = sys_semtimedop(first, ptr, (unsigned)second, NULL); goto out; case SEMTIMEDOP: err = sys_semtimedop(first, ptr, (unsigned)second, (const struct timespec __user *) (unsigned long) fifth); goto out; case SEMGET: err = sys_semget(first, (int)second, (int)third); goto out; case SEMCTL: { err = sys_semctl(first, second, (int)third | IPC_64, (union semun) ptr); goto out; } default: err = -ENOSYS; goto out; }; } if (call <= MSGCTL) { switch (call) { case MSGSND: err = sys_msgsnd(first, ptr, (size_t)second, (int)third); goto out; case MSGRCV: err = sys_msgrcv(first, ptr, (size_t)second, fifth, (int)third); goto out; case MSGGET: err = sys_msgget((key_t)first, (int)second); goto out; case MSGCTL: err = sys_msgctl(first, (int)second | IPC_64, ptr); goto out; default: err = -ENOSYS; goto out; }; } if (call <= SHMCTL) { switch (call) { case SHMAT: { ulong raddr; err = do_shmat(first, ptr, (int)second, &raddr); if (!err) { if (put_user(raddr, (ulong __user *) third)) err = -EFAULT; } goto out; } case SHMDT: err = sys_shmdt(ptr); goto out; case SHMGET: err = sys_shmget(first, (size_t)second, (int)third); goto out; case SHMCTL: err = sys_shmctl(first, (int)second | IPC_64, ptr); goto out; default: err = -ENOSYS; goto out; }; } else { err = -ENOSYS; } out: return err; }
Safe
[]
linux
5a0efea09f42f7c92bd98a38d66b4dff9589266b
2.907132090657381e+38
83
sparc64: Sharpen address space randomization calculations. A recent patch to the x86 randomization code caused me to take a quick look at what we do on sparc64, and in doing so I noticed that we sometimes calculate a non-page-aligned randomization value and stick it into mmap_base. I also noticed that since I copied the logic over from PowerPC, the powerpc code has tweaked the randomization ranges in ways that would benefit us as well. For one thing, we should allow up to at least 8MB of randomization otherwise huge-page regions when HPAGE_SIZE is 4MB never randomize at all. And on the 64-bit side we were using up to 4GB. Tone it down to 1GB as 4GB can result in a lot of address space wastage. Finally, make sure all computations are unsigned. Signed-off-by: David S. Miller <davem@davemloft.net>
0
dir_s_glob(int argc, VALUE *argv, VALUE obj) { VALUE str, rflags, ary, opts, base; int flags; argc = rb_scan_args(argc, argv, "11:", &str, &rflags, &opts); if (argc == 2) flags = NUM2INT(rflags); else flags = 0; dir_glob_options(opts, &base, &flags); ary = rb_check_array_type(str); if (NIL_P(ary)) { ary = rb_push_glob(str, base, flags); } else { VALUE v = ary; ary = dir_globs(RARRAY_LEN(v), RARRAY_CONST_PTR(v), base, flags); RB_GC_GUARD(v); } if (rb_block_given_p()) { rb_ary_each(ary); return Qnil; } return ary; }
Safe
[ "CWE-22" ]
ruby
bd5661a3cbb38a8c3a3ea10cd76c88bbef7871b8
2.595712463944822e+38
28
dir.c: check NUL bytes * dir.c (GlobPathValue): should be used in rb_push_glob only. other methods should use FilePathValue. https://hackerone.com/reports/302338 * dir.c (rb_push_glob): expand GlobPathValue git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62989 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
0
static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_comp_state *cs) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); if (req->flags & REQ_F_CUR_POS) req->file->f_pos = kiocb->ki_pos; if (ret >= 0 && kiocb->ki_complete == io_complete_rw) __io_complete_rw(req, ret, 0, cs); else io_rw_done(kiocb, ret); }
Safe
[ "CWE-416" ]
linux
6d816e088c359866f9867057e04f244c608c42fe
3.2716518315461883e+38
12
io_uring: hold 'ctx' reference around task_work queue + execute We're holding the request reference, but we need to go one higher to ensure that the ctx remains valid after the request has finished. If the ring is closed with pending task_work inflight, and the given io_kiocb finishes sync during issue, then we need a reference to the ring itself around the task_work execution cycle. Cc: stable@vger.kernel.org # v5.7+ Reported-by: syzbot+9b260fc33297966f5a8e@syzkaller.appspotmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
0
*/ static inline bool io_check_restriction(struct io_ring_ctx *ctx, struct io_kiocb *req, unsigned int sqe_flags) { if (likely(!ctx->restricted)) return true; if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) return false; if ((sqe_flags & ctx->restrictions.sqe_flags_required) != ctx->restrictions.sqe_flags_required) return false; if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed | ctx->restrictions.sqe_flags_required)) return false; return true;
Safe
[ "CWE-125" ]
linux
89c2b3b74918200e46699338d7bcc19b1ea12110
2.2194805289332815e+38
20
io_uring: reexpand under-reexpanded iters [ 74.211232] BUG: KASAN: stack-out-of-bounds in iov_iter_revert+0x809/0x900 [ 74.212778] Read of size 8 at addr ffff888025dc78b8 by task syz-executor.0/828 [ 74.214756] CPU: 0 PID: 828 Comm: syz-executor.0 Not tainted 5.14.0-rc3-next-20210730 #1 [ 74.216525] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014 [ 74.219033] Call Trace: [ 74.219683] dump_stack_lvl+0x8b/0xb3 [ 74.220706] print_address_description.constprop.0+0x1f/0x140 [ 74.224226] kasan_report.cold+0x7f/0x11b [ 74.226085] iov_iter_revert+0x809/0x900 [ 74.227960] io_write+0x57d/0xe40 [ 74.232647] io_issue_sqe+0x4da/0x6a80 [ 74.242578] __io_queue_sqe+0x1ac/0xe60 [ 74.245358] io_submit_sqes+0x3f6e/0x76a0 [ 74.248207] __do_sys_io_uring_enter+0x90c/0x1a20 [ 74.257167] do_syscall_64+0x3b/0x90 [ 74.257984] entry_SYSCALL_64_after_hwframe+0x44/0xae old_size = iov_iter_count(); ... iov_iter_revert(old_size - iov_iter_count()); If iov_iter_revert() is done base on the initial size as above, and the iter is truncated and not reexpanded in the middle, it miscalculates borders causing problems. This trace is due to no one reexpanding after generic_write_checks(). Now iters store how many bytes has been truncated, so reexpand them to the initial state right before reverting. Cc: stable@vger.kernel.org Reported-by: Palash Oswal <oswalpalash@gmail.com> Reported-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com> Reported-and-tested-by: syzbot+9671693590ef5aad8953@syzkaller.appspotmail.com Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
0
relpTcpSend(relpTcp_t *pThis, relpOctet_t *pBuf, ssize_t *pLenBuf) { ssize_t written; ENTER_RELPFUNC; RELPOBJ_assert(pThis, Tcp); #ifdef ENABLE_TLS if(pThis->bEnableTLS) { written = gnutls_record_send(pThis->session, pBuf, *pLenBuf); pThis->pEngine->dbgprint("librelp: TLS send returned %d\n", (int) written); if(written == GNUTLS_E_AGAIN || written == GNUTLS_E_INTERRUPTED) { pThis->rtryOp = relpTCP_RETRY_send; written = 0; } else { pThis->rtryOp = relpTCP_RETRY_none; if(written < 1) { chkGnutlsCode(pThis, "TLS record write failed", RELP_RET_IO_ERR, written); ABORT_FINALIZE(RELP_RET_IO_ERR); } } } else { #endif /* #ifdef ENABLE_TLS */ written = send(pThis->sock, pBuf, *pLenBuf, 0); const int errno_save = errno; pThis->pEngine->dbgprint("relpTcpSend: sock %d, lenbuf %zd, send returned %d [errno %d]\n", (int)pThis->sock, *pLenBuf, (int) written, errno_save); if(written == -1) { switch(errno_save) { case EAGAIN: case EINTR: /* this is fine, just retry... */ written = 0; break; default: ABORT_FINALIZE(RELP_RET_IO_ERR); break; } } #ifdef ENABLE_TLS } #endif /* #ifdef ENABLE_TLS */ *pLenBuf = written; finalize_it: LEAVE_RELPFUNC; }
Safe
[ "CWE-787" ]
librelp
2cfe657672636aa5d7d2a14cfcb0a6ab9d1f00cf
2.151009419531668e+38
46
unify error message generation
0
static int fill_buffer(multipart_buffer *self TSRMLS_DC) { int bytes_to_read, total_read = 0, actual_read = 0; /* shift the existing data if necessary */ if (self->bytes_in_buffer > 0 && self->buf_begin != self->buffer) { memmove(self->buffer, self->buf_begin, self->bytes_in_buffer); } self->buf_begin = self->buffer; /* calculate the free space in the buffer */ bytes_to_read = self->bufsize - self->bytes_in_buffer; /* read the required number of bytes */ while (bytes_to_read > 0) { char *buf = self->buffer + self->bytes_in_buffer; actual_read = sapi_module.read_post(buf, bytes_to_read TSRMLS_CC); /* update the buffer length */ if (actual_read > 0) { self->bytes_in_buffer += actual_read; SG(read_post_bytes) += actual_read; total_read += actual_read; bytes_to_read -= actual_read; } else { break; } } return total_read; }
Safe
[ "CWE-399" ]
php-src
4605d536d23b00813d11cc906bb48d39bdcf5f25
4.41680618711396e+37
34
Fixed bug #69364 - use smart_str to assemble strings
0
static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { unsigned long min_count, ret; if (hstate_is_gigantic(h) && !gigantic_page_supported()) return h->max_huge_pages; /* * Increase the pool size * First take pages out of surplus state. Then make up the * remaining difference by allocating fresh huge pages. * * We might race with __alloc_buddy_huge_page() here and be unable * to convert a surplus huge page to a normal huge page. That is * not critical, though, it just means the overall size of the * pool might be one hugepage larger than it needs to be, but * within all the constraints specified by the sysctls. */ spin_lock(&hugetlb_lock); while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, -1)) break; } while (count > persistent_huge_pages(h)) { /* * If this allocation races such that we no longer need the * page, free_huge_page will handle it by freeing the page * and reducing the surplus. */ spin_unlock(&hugetlb_lock); /* yield cpu to avoid soft lockup */ cond_resched(); if (hstate_is_gigantic(h)) ret = alloc_fresh_gigantic_page(h, nodes_allowed); else ret = alloc_fresh_huge_page(h, nodes_allowed); spin_lock(&hugetlb_lock); if (!ret) goto out; /* Bail for signals. Probably ctrl-c from user */ if (signal_pending(current)) goto out; } /* * Decrease the pool size * First return free pages to the buddy allocator (being careful * to keep enough around to satisfy reservations). Then place * pages into surplus state as needed so the pool will shrink * to the desired size as pages become free. * * By placing pages into the surplus state independent of the * overcommit value, we are allowing the surplus pool size to * exceed overcommit. There are few sane options here. Since * __alloc_buddy_huge_page() is checking the global counter, * though, we'll note that we're not allowed to exceed surplus * and won't grow the pool anywhere else. Not until one of the * sysctls are changed, or the surplus pages go out of use. */ min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; min_count = max(count, min_count); try_to_free_low(h, min_count, nodes_allowed); while (min_count < persistent_huge_pages(h)) { if (!free_pool_huge_page(h, nodes_allowed, 0)) break; cond_resched_lock(&hugetlb_lock); } while (count < persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, 1)) break; } out: ret = persistent_huge_pages(h); spin_unlock(&hugetlb_lock); return ret; }
Safe
[ "CWE-703" ]
linux
5af10dfd0afc559bb4b0f7e3e8227a1578333995
3.3564386664975706e+38
81
userfaultfd: hugetlbfs: remove superfluous page unlock in VM_SHARED case huge_add_to_page_cache->add_to_page_cache implicitly unlocks the page before returning in case of errors. The error returned was -EEXIST by running UFFDIO_COPY on a non-hole offset of a VM_SHARED hugetlbfs mapping. It was an userland bug that triggered it and the kernel must cope with it returning -EEXIST from ioctl(UFFDIO_COPY) as expected. page dumped because: VM_BUG_ON_PAGE(!PageLocked(page)) kernel BUG at mm/filemap.c:964! invalid opcode: 0000 [#1] SMP CPU: 1 PID: 22582 Comm: qemu-system-x86 Not tainted 4.11.11-300.fc26.x86_64 #1 RIP: unlock_page+0x4a/0x50 Call Trace: hugetlb_mcopy_atomic_pte+0xc0/0x320 mcopy_atomic+0x96f/0xbe0 userfaultfd_ioctl+0x218/0xe90 do_vfs_ioctl+0xa5/0x600 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x1a/0xa9 Link: http://lkml.kernel.org/r/20170802165145.22628-2-aarcange@redhat.com Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Tested-by: Maxime Coquelin <maxime.coquelin@redhat.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Alexey Perevalov <a.perevalov@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *objp; struct array_cache *ac; check_irq_off(); ac = cpu_cache_get(cachep); if (likely(ac->avail)) { ac->touched = 1; objp = ac->entry[--ac->avail]; STATS_INC_ALLOCHIT(cachep); goto out; } STATS_INC_ALLOCMISS(cachep); objp = cache_alloc_refill(cachep, flags); /* * the 'ac' may be updated by cache_alloc_refill(), * and kmemleak_erase() requires its correct value. */ ac = cpu_cache_get(cachep); out: /* * To avoid a false negative, if an object that is in one of the * per-CPU caches is leaked, we need to make sure kmemleak doesn't * treat the array pointers as a reference to the object. */ if (objp) kmemleak_erase(&ac->entry[ac->avail]); return objp; }
Safe
[ "CWE-703" ]
linux
c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
2.6695324364667936e+38
34
mm/slab.c: fix SLAB freelist randomization duplicate entries This patch fixes a bug in the freelist randomization code. When a high random number is used, the freelist will contain duplicate entries. It will result in different allocations sharing the same chunk. It will result in odd behaviours and crashes. It should be uncommon but it depends on the machines. We saw it happening more often on some machines (every few hours of running tests). Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization") Link: http://lkml.kernel.org/r/20170103181908.143178-1-thgarnie@google.com Signed-off-by: John Sperbeck <jsperbeck@google.com> Signed-off-by: Thomas Garnier <thgarnie@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
Diagnostics_area *get_stmt_da() { return m_stmt_da; }
Safe
[ "CWE-416" ]
server
4681b6f2d8c82b4ec5cf115e83698251963d80d5
2.950283640433123e+38
2
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob the bug was that in_vector array in Item_func_in was allocated in the statement arena, not in the table->expr_arena. revert part of the 5acd391e8b2d. Instead, change the arena correctly in fix_all_session_vcol_exprs(). Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force item tree changes to be rolled back (because they were allocated in the wrong arena and didn't persist. now they do)
0
QPDFWriter::setR4EncryptionParameters( char const* user_password, char const* owner_password, bool allow_accessibility, bool allow_extract, qpdf_r3_print_e print, qpdf_r3_modify_e modify, bool encrypt_metadata, bool use_aes) { std::set<int> clear; interpretR3EncryptionParameters( clear, user_password, owner_password, allow_accessibility, allow_extract, print, modify); this->m->encrypt_use_aes = use_aes; this->m->encrypt_metadata = encrypt_metadata; setEncryptionParameters(user_password, owner_password, 4, 4, 16, clear); }
Safe
[ "CWE-125" ]
qpdf
1868a10f8b06631362618bfc85ca8646da4b4b71
1.697616183127703e+38
14
Replace all atoi calls with QUtil::string_to_int The latter catches underflow/overflow.
0
void CClient::SnapSetStaticsize(int ItemType, int Size) { m_SnapshotDelta.SetStaticsize(ItemType, Size); }
Safe
[ "CWE-119", "CWE-787" ]
teeworlds
ff254722a2683867fcb3e67569ffd36226c4bc62
2.6943051956190853e+38
4
added some checks to snap handling
0
static int ZEND_FASTCALL ZEND_ECHO_SPEC_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zval z_copy; zval *z = &opline->op1.u.constant; if (IS_CONST != IS_CONST && Z_TYPE_P(z) == IS_OBJECT && Z_OBJ_HT_P(z)->get_method != NULL && zend_std_cast_object_tostring(z, &z_copy, IS_STRING TSRMLS_CC) == SUCCESS) { zend_print_variable(&z_copy); zval_dtor(&z_copy); } else { zend_print_variable(z); } ZEND_VM_NEXT_OPCODE(); }
Safe
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
2.766449872485573e+38
18
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
0
static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self, MirrorBlockJob *s, uint64_t offset, uint64_t bytes) { uint64_t self_start_chunk = offset / s->granularity; uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); uint64_t self_nb_chunks = self_end_chunk - self_start_chunk; while (find_next_bit(s->in_flight_bitmap, self_end_chunk, self_start_chunk) < self_end_chunk && s->ret >= 0) { MirrorOp *op; QTAILQ_FOREACH(op, &s->ops_in_flight, next) { uint64_t op_start_chunk = op->offset / s->granularity; uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes, s->granularity) - op_start_chunk; if (op == self) { continue; } if (ranges_overlap(self_start_chunk, self_nb_chunks, op_start_chunk, op_nb_chunks)) { if (self) { /* * If the operation is already (indirectly) waiting for us, * or will wait for us as soon as it wakes up, then just go * on (instead of producing a deadlock in the former case). */ if (op->waiting_for_op) { continue; } self->waiting_for_op = op; } qemu_co_queue_wait(&op->waiting_requests, NULL); if (self) { self->waiting_for_op = NULL; } break; } } } }
Safe
[ "CWE-476" ]
qemu
66fed30c9cd11854fc878a4eceb507e915d7c9cd
3.0401149743487918e+38
52
block/mirror: fix NULL pointer dereference in mirror_wait_on_conflicts() In mirror_iteration() we call mirror_wait_on_conflicts() with `self` parameter set to NULL. Starting from commit d44dae1a7c we dereference `self` pointer in mirror_wait_on_conflicts() without checks if it is not NULL. Backtrace: Program terminated with signal SIGSEGV, Segmentation fault. #0 mirror_wait_on_conflicts (self=0x0, s=<optimized out>, offset=<optimized out>, bytes=<optimized out>) at ../block/mirror.c:172 172 self->waiting_for_op = op; [Current thread is 1 (Thread 0x7f0908931ec0 (LWP 380249))] (gdb) bt #0 mirror_wait_on_conflicts (self=0x0, s=<optimized out>, offset=<optimized out>, bytes=<optimized out>) at ../block/mirror.c:172 #1 0x00005610c5d9d631 in mirror_run (job=0x5610c76a2c00, errp=<optimized out>) at ../block/mirror.c:491 #2 0x00005610c5d58726 in job_co_entry (opaque=0x5610c76a2c00) at ../job.c:917 #3 0x00005610c5f046c6 in coroutine_trampoline (i0=<optimized out>, i1=<optimized out>) at ../util/coroutine-ucontext.c:173 #4 0x00007f0909975820 in ?? () at ../sysdeps/unix/sysv/linux/x86_64/__start_context.S:91 from /usr/lib64/libc.so.6 Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=2001404 Fixes: d44dae1a7c ("block/mirror: fix active mirror dead-lock in mirror_wait_on_conflicts") Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Message-Id: <20210910124533.288318-1-sgarzare@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Signed-off-by: Hanna Reitz <hreitz@redhat.com>
0
int get_ctrls( Operation *op, SlapReply *rs, int sendres ) { int nctrls = 0; ber_tag_t tag; ber_len_t len; char *opaque; BerElement *ber = op->o_ber; struct berval bv; #ifdef SLAP_CONTROL_X_WHATFAILED /* NOTE: right now, slapd checks the validity of each control * while parsing. As a consequence, it can only detect one * cause of failure at a time. This results in returning * exactly one OID with the whatFailed control, or no control * at all. */ char *failed_oid = NULL; #endif len = ber_pvt_ber_remaining(ber); if( len == 0) { /* no controls */ rs->sr_err = LDAP_SUCCESS; return rs->sr_err; } if(( tag = ber_peek_tag( ber, &len )) != LDAP_TAG_CONTROLS ) { if( tag == LBER_ERROR ) { rs->sr_err = SLAPD_DISCONNECT; rs->sr_text = "unexpected data in PDU"; } goto return_results; } Debug( LDAP_DEBUG_TRACE, "=> get_ctrls\n", 0, 0, 0 ); if( op->o_protocol < LDAP_VERSION3 ) { rs->sr_err = SLAPD_DISCONNECT; rs->sr_text = "controls require LDAPv3"; goto return_results; } /* one for first control, one for termination */ op->o_ctrls = op->o_tmpalloc( 2 * sizeof(LDAPControl *), op->o_tmpmemctx ); #if 0 if( op->ctrls == NULL ) { rs->sr_err = LDAP_NO_MEMORY; rs->sr_text = "no memory"; goto return_results; } #endif op->o_ctrls[nctrls] = NULL; /* step through each element */ for( tag = ber_first_element( ber, &len, &opaque ); tag != LBER_ERROR; tag = ber_next_element( ber, &len, opaque ) ) { LDAPControl *c; LDAPControl **tctrls; c = op->o_tmpalloc( sizeof(LDAPControl), op->o_tmpmemctx ); memset(c, 0, sizeof(LDAPControl)); /* allocate pointer space for current controls (nctrls) * + this control + extra NULL */ tctrls = op->o_tmprealloc( op->o_ctrls, (nctrls+2) * sizeof(LDAPControl *), op->o_tmpmemctx ); #if 0 if( tctrls == NULL ) { ch_free( c ); ldap_controls_free(op->o_ctrls); op->o_ctrls = NULL; rs->sr_err = LDAP_NO_MEMORY; rs->sr_text = "no memory"; goto return_results; } #endif op->o_ctrls = tctrls; op->o_ctrls[nctrls++] = c; op->o_ctrls[nctrls] = NULL; tag = ber_scanf( ber, "{m" /*}*/, &bv ); c->ldctl_oid = bv.bv_val; if( tag == LBER_ERROR ) { Debug( LDAP_DEBUG_TRACE, "=> get_ctrls: get oid failed.\n", 0, 0, 0 ); slap_free_ctrls( op, op->o_ctrls ); op->o_ctrls = NULL; rs->sr_err = SLAPD_DISCONNECT; rs->sr_text = "decoding controls error"; goto return_results; } else if( c->ldctl_oid == NULL ) { Debug( LDAP_DEBUG_TRACE, "get_ctrls: conn %lu got empty OID.\n", op->o_connid, 0, 0 ); slap_free_ctrls( op, op->o_ctrls ); op->o_ctrls = NULL; rs->sr_err = LDAP_PROTOCOL_ERROR; rs->sr_text = "OID field is empty"; goto return_results; } tag = ber_peek_tag( ber, &len ); if( tag == LBER_BOOLEAN ) { ber_int_t crit; tag = ber_scanf( ber, "b", &crit ); if( tag == LBER_ERROR ) { Debug( LDAP_DEBUG_TRACE, "=> get_ctrls: get crit failed.\n", 0, 0, 0 ); slap_free_ctrls( op, op->o_ctrls ); op->o_ctrls = NULL; rs->sr_err = SLAPD_DISCONNECT; rs->sr_text = "decoding controls error"; goto return_results; } c->ldctl_iscritical = (crit != 0); tag = ber_peek_tag( ber, &len ); } if( tag == LBER_OCTETSTRING ) { tag = ber_scanf( ber, "m", &c->ldctl_value ); if( tag == LBER_ERROR ) { Debug( LDAP_DEBUG_TRACE, "=> get_ctrls: conn %lu: " "%s (%scritical): get value failed.\n", op->o_connid, c->ldctl_oid, c->ldctl_iscritical ? "" : "non" ); slap_free_ctrls( op, op->o_ctrls ); op->o_ctrls = NULL; rs->sr_err = SLAPD_DISCONNECT; rs->sr_text = "decoding controls error"; goto return_results; } } Debug( LDAP_DEBUG_TRACE, "=> get_ctrls: oid=\"%s\" (%scritical)\n", c->ldctl_oid, c->ldctl_iscritical ? "" : "non", 0 ); rs->sr_err = slap_parse_ctrl( op, rs, c, &rs->sr_text ); if ( rs->sr_err != LDAP_SUCCESS ) { #ifdef SLAP_CONTROL_X_WHATFAILED failed_oid = c->ldctl_oid; #endif goto return_results; } } return_results: Debug( LDAP_DEBUG_TRACE, "<= get_ctrls: n=%d rc=%d err=\"%s\"\n", nctrls, rs->sr_err, rs->sr_text ? rs->sr_text : ""); if( sendres && rs->sr_err != LDAP_SUCCESS ) { if( rs->sr_err == SLAPD_DISCONNECT ) { rs->sr_err = LDAP_PROTOCOL_ERROR; send_ldap_disconnect( op, rs ); rs->sr_err = SLAPD_DISCONNECT; } else { #ifdef SLAP_CONTROL_X_WHATFAILED /* might have not been parsed yet? */ if ( failed_oid != NULL ) { if ( !get_whatFailed( op ) ) { /* look it up */ /* step through each remaining element */ for ( ; tag != LBER_ERROR; tag = ber_next_element( ber, &len, opaque ) ) { LDAPControl c = { 0 }; tag = ber_scanf( ber, "{m" /*}*/, &bv ); c.ldctl_oid = bv.bv_val; if ( tag == LBER_ERROR ) { slap_free_ctrls( op, op->o_ctrls ); op->o_ctrls = NULL; break; } else if ( c.ldctl_oid == NULL ) { slap_free_ctrls( op, op->o_ctrls ); op->o_ctrls = NULL; break; } tag = ber_peek_tag( ber, &len ); if ( tag == LBER_BOOLEAN ) { ber_int_t crit; tag = ber_scanf( ber, "b", &crit ); if( tag == LBER_ERROR ) { slap_free_ctrls( op, op->o_ctrls ); op->o_ctrls = NULL; break; } tag = ber_peek_tag( ber, &len ); } if ( tag == LBER_OCTETSTRING ) { tag = ber_scanf( ber, "m", &c.ldctl_value ); if( tag == LBER_ERROR ) { slap_free_ctrls( op, op->o_ctrls ); op->o_ctrls = NULL; break; } } if ( strcmp( c.ldctl_oid, LDAP_CONTROL_X_WHATFAILED ) == 0 ) { const char *text; slap_parse_ctrl( op, rs, &c, &text ); break; } } } if ( get_whatFailed( op ) ) { char *oids[ 2 ]; oids[ 0 ] = failed_oid; oids[ 1 ] = NULL; slap_ctrl_whatFailed_add( op, rs, oids ); } } #endif send_ldap_result( op, rs ); } } return rs->sr_err; }
Safe
[ "CWE-125" ]
openldap
21981053a1195ae1555e23df4d9ac68d34ede9dd
3.0979829938030488e+38
249
ITS#9408 fix vrfilter double-free
0
void TearDown() override { cleanUpXdsConnection(); client_ssl_ctx_.reset(); cleanupUpstreamAndDownstream(); codec_client_.reset(); }
Safe
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
6.440196849797628e+37
7
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <mklein@lyft.com>
0
vhost_user_get_vring_base(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; uint64_t val; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; /* We have to stop the queue (virtio) if it is running. */ vhost_destroy_device_notify(dev); dev->flags &= ~VIRTIO_DEV_READY; dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; /* Here we are safe to get the indexes */ if (vq_is_packed(dev)) { /* * Bit[0:14]: avail index * Bit[15]: avail wrap counter */ val = vq->last_avail_idx & 0x7fff; val |= vq->avail_wrap_counter << 15; ctx->msg.payload.state.num = val; } else { ctx->msg.payload.state.num = vq->last_avail_idx; } VHOST_LOG_CONFIG(INFO, "(%s) vring base idx:%d file:%d\n", dev->ifname, ctx->msg.payload.state.index, ctx->msg.payload.state.num); /* * Based on current qemu vhost-user implementation, this message is * sent and only sent in vhost_vring_stop. * TODO: cleanup the vring, it isn't usable since here. */ if (vq->kickfd >= 0) close(vq->kickfd); vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; if (vq->callfd >= 0) close(vq->callfd); vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; vq->signalled_used_valid = false; if (vq_is_packed(dev)) { rte_free(vq->shadow_used_packed); vq->shadow_used_packed = NULL; } else { rte_free(vq->shadow_used_split); vq->shadow_used_split = NULL; } rte_free(vq->batch_copy_elems); vq->batch_copy_elems = NULL; rte_free(vq->log_cache); vq->log_cache = NULL; ctx->msg.size = sizeof(ctx->msg.payload.state); ctx->fd_num = 0; vhost_user_iotlb_flush_all(vq); vring_invalidate(dev, vq); return RTE_VHOST_MSG_RESULT_REPLY; }
Safe
[ "CWE-125", "CWE-787" ]
dpdk
6442c329b9d2ded0f44b27d2016aaba8ba5844c5
1.966696967179099e+38
73
vhost: fix queue number check when setting inflight FD In function vhost_user_set_inflight_fd, queue number in inflight message is used to access virtqueue. However, queue number could be larger than VHOST_MAX_VRING and cause write OOB as this number will be used to write inflight info in virtqueue structure. This patch checks the queue number to avoid the issue and also make sure virtqueues are allocated before setting inflight information. Fixes: ad0a4ae491fe ("vhost: checkout resubmit inflight information") Cc: stable@dpdk.org Reported-by: Wenxiang Qian <leonwxqian@gmail.com> Signed-off-by: Chenbo Xia <chenbo.xia@intel.com> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
0
void release_thread(struct task_struct *dead_task) { }
Safe
[ "CWE-284", "CWE-264" ]
linux
a4780adeefd042482f624f5e0d577bf9cdcbb760
1.3375397658475048e+38
3
ARM: 7735/2: Preserve the user r/w register TPIDRURW on context switch and fork Since commit 6a1c53124aa1 the user writeable TLS register was zeroed to prevent it from being used as a covert channel between two tasks. There are more and more applications coming to Windows RT, Wine could support them, but mostly they expect to have the thread environment block (TEB) in TPIDRURW. This patch preserves that register per thread instead of clearing it. Unlike the TPIDRURO, which is already switched, the TPIDRURW can be updated from userspace so needs careful treatment in the case that we modify TPIDRURW and call fork(). To avoid this we must always read TPIDRURW in copy_thread. Signed-off-by: André Hentschel <nerv@dawncrow.de> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Jonathan Austin <jonathan.austin@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
0
run_update_thread (CamelEwsStore *ews_store, gboolean folder_list, GCancellable *cancellable) { GThread *thread; struct ScheduleUpdateData *sud; g_return_if_fail (ews_store != NULL); g_return_if_fail (cancellable != NULL); sud = g_new0 (struct ScheduleUpdateData, 1); sud->ews_store = g_object_ref (ews_store); sud->cancellable = g_object_ref (cancellable); thread = g_thread_new ( NULL, folder_list ? camel_ews_folder_list_update_thread : camel_ews_folder_update_thread, sud); g_thread_unref (thread); }
Safe
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
3.0346944762352487e+38
20
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
0
bool finalize() { return copy_up_to(src_len); }
Safe
[ "CWE-617" ]
server
2e7891080667c59ac80f788eef4d59d447595772
1.8672900468261633e+38
2
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view This bug could manifest itself after pushing a where condition over a mergeable derived table / view / CTE DT into a grouping view / derived table / CTE V whose item list contained set functions with constant arguments such as MIN(2), SUM(1) etc. In such cases the field references used in the condition pushed into the view V that correspond set functions are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation of the virtual method const_item() for the class Item_direct_view_ref the wrapped set functions with constant arguments could be erroneously taken for constant items. This could lead to a wrong result set returned by the main select query in 10.2. In 10.4 where a possibility of pushing condition from HAVING into WHERE had been added this could cause a crash. Approved by Sergey Petrunya <sergey.petrunya@mariadb.com>
0
static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, struct btrfs_path *p, int write_lock_level) { struct btrfs_fs_info *fs_info = root->fs_info; struct extent_buffer *b; int root_lock; int level = 0; /* We try very hard to do read locks on the root */ root_lock = BTRFS_READ_LOCK; if (p->search_commit_root) { /* * The commit roots are read only so we always do read locks, * and we always must hold the commit_root_sem when doing * searches on them, the only exception is send where we don't * want to block transaction commits for a long time, so * we need to clone the commit root in order to avoid races * with transaction commits that create a snapshot of one of * the roots used by a send operation. */ if (p->need_commit_sem) { down_read(&fs_info->commit_root_sem); b = btrfs_clone_extent_buffer(root->commit_root); up_read(&fs_info->commit_root_sem); if (!b) return ERR_PTR(-ENOMEM); } else { b = root->commit_root; atomic_inc(&b->refs); } level = btrfs_header_level(b); /* * Ensure that all callers have set skip_locking when * p->search_commit_root = 1. */ ASSERT(p->skip_locking == 1); goto out; } if (p->skip_locking) { b = btrfs_root_node(root); level = btrfs_header_level(b); goto out; } /* * If the level is set to maximum, we can skip trying to get the read * lock. */ if (write_lock_level < BTRFS_MAX_LEVEL) { /* * We don't know the level of the root node until we actually * have it read locked */ b = btrfs_read_lock_root_node(root); level = btrfs_header_level(b); if (level > write_lock_level) goto out; /* Whoops, must trade for write lock */ btrfs_tree_read_unlock(b); free_extent_buffer(b); } b = btrfs_lock_root_node(root); root_lock = BTRFS_WRITE_LOCK; /* The level might have changed, check again */ level = btrfs_header_level(b); out: p->nodes[level] = b; if (!p->skip_locking) p->locks[level] = root_lock; /* * Callers are responsible for dropping b's references. */ return b; }
Safe
[ "CWE-362" ]
linux
dbcc7d57bffc0c8cac9dac11bec548597d59a6a5
1.4246210066233425e+38
83
btrfs: fix race when cloning extent buffer during rewind of an old root While resolving backreferences, as part of a logical ino ioctl call or fiemap, we can end up hitting a BUG_ON() when replaying tree mod log operations of a root, triggering a stack trace like the following: ------------[ cut here ]------------ kernel BUG at fs/btrfs/ctree.c:1210! invalid opcode: 0000 [#1] SMP KASAN PTI CPU: 1 PID: 19054 Comm: crawl_335 Tainted: G W 5.11.0-2d11c0084b02-misc-next+ #89 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 RIP: 0010:__tree_mod_log_rewind+0x3b1/0x3c0 Code: 05 48 8d 74 10 (...) RSP: 0018:ffffc90001eb70b8 EFLAGS: 00010297 RAX: 0000000000000000 RBX: ffff88812344e400 RCX: ffffffffb28933b6 RDX: 0000000000000007 RSI: dffffc0000000000 RDI: ffff88812344e42c RBP: ffffc90001eb7108 R08: 1ffff11020b60a20 R09: ffffed1020b60a20 R10: ffff888105b050f9 R11: ffffed1020b60a1f R12: 00000000000000ee R13: ffff8880195520c0 R14: ffff8881bc958500 R15: ffff88812344e42c FS: 00007fd1955e8700(0000) GS:ffff8881f5600000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007efdb7928718 CR3: 000000010103a006 CR4: 0000000000170ee0 Call Trace: btrfs_search_old_slot+0x265/0x10d0 ? lock_acquired+0xbb/0x600 ? btrfs_search_slot+0x1090/0x1090 ? free_extent_buffer.part.61+0xd7/0x140 ? free_extent_buffer+0x13/0x20 resolve_indirect_refs+0x3e9/0xfc0 ? lock_downgrade+0x3d0/0x3d0 ? __kasan_check_read+0x11/0x20 ? add_prelim_ref.part.11+0x150/0x150 ? lock_downgrade+0x3d0/0x3d0 ? __kasan_check_read+0x11/0x20 ? lock_acquired+0xbb/0x600 ? __kasan_check_write+0x14/0x20 ? do_raw_spin_unlock+0xa8/0x140 ? rb_insert_color+0x30/0x360 ? prelim_ref_insert+0x12d/0x430 find_parent_nodes+0x5c3/0x1830 ? resolve_indirect_refs+0xfc0/0xfc0 ? lock_release+0xc8/0x620 ? fs_reclaim_acquire+0x67/0xf0 ? lock_acquire+0xc7/0x510 ? lock_downgrade+0x3d0/0x3d0 ? lockdep_hardirqs_on_prepare+0x160/0x210 ? lock_release+0xc8/0x620 ? fs_reclaim_acquire+0x67/0xf0 ? lock_acquire+0xc7/0x510 ? poison_range+0x38/0x40 ? unpoison_range+0x14/0x40 ? trace_hardirqs_on+0x55/0x120 btrfs_find_all_roots_safe+0x142/0x1e0 ? find_parent_nodes+0x1830/0x1830 ? btrfs_inode_flags_to_xflags+0x50/0x50 iterate_extent_inodes+0x20e/0x580 ? tree_backref_for_extent+0x230/0x230 ? lock_downgrade+0x3d0/0x3d0 ? read_extent_buffer+0xdd/0x110 ? lock_downgrade+0x3d0/0x3d0 ? __kasan_check_read+0x11/0x20 ? lock_acquired+0xbb/0x600 ? __kasan_check_write+0x14/0x20 ? _raw_spin_unlock+0x22/0x30 ? __kasan_check_write+0x14/0x20 iterate_inodes_from_logical+0x129/0x170 ? iterate_inodes_from_logical+0x129/0x170 ? btrfs_inode_flags_to_xflags+0x50/0x50 ? iterate_extent_inodes+0x580/0x580 ? __vmalloc_node+0x92/0xb0 ? init_data_container+0x34/0xb0 ? init_data_container+0x34/0xb0 ? kvmalloc_node+0x60/0x80 btrfs_ioctl_logical_to_ino+0x158/0x230 btrfs_ioctl+0x205e/0x4040 ? __might_sleep+0x71/0xe0 ? btrfs_ioctl_get_supported_features+0x30/0x30 ? getrusage+0x4b6/0x9c0 ? __kasan_check_read+0x11/0x20 ? lock_release+0xc8/0x620 ? __might_fault+0x64/0xd0 ? lock_acquire+0xc7/0x510 ? lock_downgrade+0x3d0/0x3d0 ? lockdep_hardirqs_on_prepare+0x210/0x210 ? lockdep_hardirqs_on_prepare+0x210/0x210 ? __kasan_check_read+0x11/0x20 ? do_vfs_ioctl+0xfc/0x9d0 ? ioctl_file_clone+0xe0/0xe0 ? lock_downgrade+0x3d0/0x3d0 ? lockdep_hardirqs_on_prepare+0x210/0x210 ? __kasan_check_read+0x11/0x20 ? lock_release+0xc8/0x620 ? __task_pid_nr_ns+0xd3/0x250 ? lock_acquire+0xc7/0x510 ? __fget_files+0x160/0x230 ? __fget_light+0xf2/0x110 __x64_sys_ioctl+0xc3/0x100 do_syscall_64+0x37/0x80 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7fd1976e2427 Code: 00 00 90 48 8b 05 (...) RSP: 002b:00007fd1955e5cf8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 00007fd1955e5f40 RCX: 00007fd1976e2427 RDX: 00007fd1955e5f48 RSI: 00000000c038943b RDI: 0000000000000004 RBP: 0000000001000000 R08: 0000000000000000 R09: 00007fd1955e6120 R10: 0000557835366b00 R11: 0000000000000246 R12: 0000000000000004 R13: 00007fd1955e5f48 R14: 00007fd1955e5f40 R15: 00007fd1955e5ef8 Modules linked in: ---[ end trace ec8931a1c36e57be ]--- (gdb) l *(__tree_mod_log_rewind+0x3b1) 0xffffffff81893521 is in __tree_mod_log_rewind (fs/btrfs/ctree.c:1210). 1205 * the modification. as we're going backwards, we do the 1206 * opposite of each operation here. 1207 */ 1208 switch (tm->op) { 1209 case MOD_LOG_KEY_REMOVE_WHILE_FREEING: 1210 BUG_ON(tm->slot < n); 1211 fallthrough; 1212 case MOD_LOG_KEY_REMOVE_WHILE_MOVING: 1213 case MOD_LOG_KEY_REMOVE: 1214 btrfs_set_node_key(eb, &tm->key, tm->slot); Here's what happens to hit that BUG_ON(): 1) We have one tree mod log user (through fiemap or the logical ino ioctl), with a sequence number of 1, so we have fs_info->tree_mod_seq == 1; 2) Another task is at ctree.c:balance_level() and we have eb X currently as the root of the tree, and we promote its single child, eb Y, as the new root. Then, at ctree.c:balance_level(), we call: tree_mod_log_insert_root(eb X, eb Y, 1); 3) At tree_mod_log_insert_root() we create tree mod log elements for each slot of eb X, of operation type MOD_LOG_KEY_REMOVE_WHILE_FREEING each with a ->logical pointing to ebX->start. These are placed in an array named tm_list. Lets assume there are N elements (N pointers in eb X); 4) Then, still at tree_mod_log_insert_root(), we create a tree mod log element of operation type MOD_LOG_ROOT_REPLACE, ->logical set to ebY->start, ->old_root.logical set to ebX->start, ->old_root.level set to the level of eb X and ->generation set to the generation of eb X; 5) Then tree_mod_log_insert_root() calls tree_mod_log_free_eb() with tm_list as argument. After that, tree_mod_log_free_eb() calls __tree_mod_log_insert() for each member of tm_list in reverse order, from highest slot in eb X, slot N - 1, to slot 0 of eb X; 6) __tree_mod_log_insert() sets the sequence number of each given tree mod log operation - it increments fs_info->tree_mod_seq and sets fs_info->tree_mod_seq as the sequence number of the given tree mod log operation. This means that for the tm_list created at tree_mod_log_insert_root(), the element corresponding to slot 0 of eb X has the highest sequence number (1 + N), and the element corresponding to the last slot has the lowest sequence number (2); 7) Then, after inserting tm_list's elements into the tree mod log rbtree, the MOD_LOG_ROOT_REPLACE element is inserted, which gets the highest sequence number, which is N + 2; 8) Back to ctree.c:balance_level(), we free eb X by calling btrfs_free_tree_block() on it. Because eb X was created in the current transaction, has no other references and writeback did not happen for it, we add it back to the free space cache/tree; 9) Later some other task T allocates the metadata extent from eb X, since it is marked as free space in the space cache/tree, and uses it as a node for some other btree; 10) The tree mod log user task calls btrfs_search_old_slot(), which calls get_old_root(), and finally that calls __tree_mod_log_oldest_root() with time_seq == 1 and eb_root == eb Y; 11) First iteration of the while loop finds the tree mod log element with sequence number N + 2, for the logical address of eb Y and of type MOD_LOG_ROOT_REPLACE; 12) Because the operation type is MOD_LOG_ROOT_REPLACE, we don't break out of the loop, and set root_logical to point to tm->old_root.logical which corresponds to the logical address of eb X; 13) On the next iteration of the while loop, the call to tree_mod_log_search_oldest() returns the smallest tree mod log element for the logical address of eb X, which has a sequence number of 2, an operation type of MOD_LOG_KEY_REMOVE_WHILE_FREEING and corresponds to the old slot N - 1 of eb X (eb X had N items in it before being freed); 14) We then break out of the while loop and return the tree mod log operation of type MOD_LOG_ROOT_REPLACE (eb Y), and not the one for slot N - 1 of eb X, to get_old_root(); 15) At get_old_root(), we process the MOD_LOG_ROOT_REPLACE operation and set "logical" to the logical address of eb X, which was the old root. We then call tree_mod_log_search() passing it the logical address of eb X and time_seq == 1; 16) Then before calling tree_mod_log_search(), task T adds a key to eb X, which results in adding a tree mod log operation of type MOD_LOG_KEY_ADD to the tree mod log - this is done at ctree.c:insert_ptr() - but after adding the tree mod log operation and before updating the number of items in eb X from 0 to 1... 17) The task at get_old_root() calls tree_mod_log_search() and gets the tree mod log operation of type MOD_LOG_KEY_ADD just added by task T. Then it enters the following if branch: if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) { (...) } (...) Calls read_tree_block() for eb X, which gets a reference on eb X but does not lock it - task T has it locked. Then it clones eb X while it has nritems set to 0 in its header, before task T sets nritems to 1 in eb X's header. From hereupon we use the clone of eb X which no other task has access to; 18) Then we call __tree_mod_log_rewind(), passing it the MOD_LOG_KEY_ADD mod log operation we just got from tree_mod_log_search() in the previous step and the cloned version of eb X; 19) At __tree_mod_log_rewind(), we set the local variable "n" to the number of items set in eb X's clone, which is 0. Then we enter the while loop, and in its first iteration we process the MOD_LOG_KEY_ADD operation, which just decrements "n" from 0 to (u32)-1, since "n" is declared with a type of u32. At the end of this iteration we call rb_next() to find the next tree mod log operation for eb X, that gives us the mod log operation of type MOD_LOG_KEY_REMOVE_WHILE_FREEING, for slot 0, with a sequence number of N + 1 (steps 3 to 6); 20) Then we go back to the top of the while loop and trigger the following BUG_ON(): (...) switch (tm->op) { case MOD_LOG_KEY_REMOVE_WHILE_FREEING: BUG_ON(tm->slot < n); fallthrough; (...) Because "n" has a value of (u32)-1 (4294967295) and tm->slot is 0. Fix this by taking a read lock on the extent buffer before cloning it at ctree.c:get_old_root(). This should be done regardless of the extent buffer having been freed and reused, as a concurrent task might be modifying it (while holding a write lock on it). Reported-by: Zygo Blaxell <ce3g8jdj@umail.furryterror.org> Link: https://lore.kernel.org/linux-btrfs/20210227155037.GN28049@hungrycats.org/ Fixes: 834328a8493079 ("Btrfs: tree mod log's old roots could still be part of the tree") CC: stable@vger.kernel.org # 4.4+ Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
0
struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) { struct tcp_md5sig_pool * __percpu *p; spin_lock_bh(&tcp_md5sig_pool_lock); p = tcp_md5sig_pool; if (p) tcp_md5sig_users++; spin_unlock_bh(&tcp_md5sig_pool_lock); return (p ? *per_cpu_ptr(p, cpu) : NULL); }
Safe
[ "CWE-400", "CWE-119", "CWE-703" ]
linux
baff42ab1494528907bf4d5870359e31711746ae
2.5605610612439002e+38
10
net: Fix oops from tcp_collapse() when using splice() tcp_read_sock() can have a eat skbs without immediately advancing copied_seq. This can cause a panic in tcp_collapse() if it is called as a result of the recv_actor dropping the socket lock. A userspace program that splices data from a socket to either another socket or to a file can trigger this bug. Signed-off-by: Steven J. Magnani <steve@digidescorp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
decompileGETMEMBER(int n, SWF_ACTION *actions, int maxn) { struct SWF_ACTIONPUSHPARAM *obj, *mem, *var; char *vname, *varname,*memname; int len; mem=pop(); var=pop(); varname=getName(var); memname=getName(mem); #ifdef DEBUG printf("*getMember* varName %s (type=%d) memName=%s (type=%d)\n", varname,var->Type, memname,mem->Type); #endif len = strlen(varname)+strlen(memname); if (mem->Type == PUSH_INT || mem->Type == PUSH_DOUBLE || mem->Type == PUSH_VARIABLE || mem->Type == PUSH_REGISTER || mem->Type == 12 ) { vname = malloc(len+3); strcpy(vname,varname); strcat(vname,"["); strcat(vname,memname); strcat(vname,"]"); } else { vname = malloc(len+2); strcpy(vname,varname); strcat(vname,"."); strcat(vname,memname); } obj = newVar(vname); pushvar(obj); return 0; }
Safe
[ "CWE-119", "CWE-125" ]
libming
da9d86eab55cbf608d5c916b8b690f5b76bca462
1.1523877955746656e+38
36
decompileAction: Prevent heap buffer overflow and underflow with using OpCode
0
long SSL_SESSION_set_timeout(SSL_SESSION* sess, long t) { if (!sess) return SSL_ERROR_NONE; sess->SetTimeOut(t); return SSL_SUCCESS; }
Safe
[ "CWE-254" ]
mysql-server
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
8.621040856014756e+37
8
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
0
cmsPipeline* CMSEXPORT cmsPipelineDup(const cmsPipeline* lut) { cmsPipeline* NewLUT; cmsStage *NewMPE, *Anterior = NULL, *mpe; cmsBool First = TRUE; if (lut == NULL) return NULL; NewLUT = cmsPipelineAlloc(lut ->ContextID, lut ->InputChannels, lut ->OutputChannels); for (mpe = lut ->Elements; mpe != NULL; mpe = mpe ->Next) { NewMPE = cmsStageDup(mpe); if (NewMPE == NULL) { cmsPipelineFree(NewLUT); return NULL; } if (First) { NewLUT ->Elements = NewMPE; First = FALSE; } else { Anterior ->Next = NewMPE; } Anterior = NewMPE; } NewLUT ->Eval16Fn = lut ->Eval16Fn; NewLUT ->EvalFloatFn = lut ->EvalFloatFn; NewLUT ->DupDataFn = lut ->DupDataFn; NewLUT ->FreeDataFn = lut ->FreeDataFn; if (NewLUT ->DupDataFn != NULL) NewLUT ->Data = NewLUT ->DupDataFn(lut ->ContextID, lut->Data); NewLUT ->SaveAs8Bits = lut ->SaveAs8Bits; BlessLUT(NewLUT); return NewLUT; }
Vulnerable
[]
Little-CMS
886e2f524268efe8a1c3aa838c28e446fda24486
3.205326016663137e+38
45
Fixes from coverity check
1
static void serialize_plist(node_t* node, void* data) { uint64_t *index_val = NULL; struct serialize_s *ser = (struct serialize_s *) data; uint64_t current_index = ser->objects->len; //first check that node is not yet in objects void* val = hash_table_lookup(ser->ref_table, node); if (val) { //data is already in table return; } //insert new ref index_val = (uint64_t *) malloc(sizeof(uint64_t)); assert(index_val != NULL); *index_val = current_index; hash_table_insert(ser->ref_table, node, index_val); //now append current node to object array ptr_array_add(ser->objects, node); //now recurse on children node_iterator_t *ni = node_iterator_create(node->children); node_t *ch; while ((ch = node_iterator_next(ni))) { serialize_plist(ch, data); } node_iterator_destroy(ni); return; }
Safe
[ "CWE-20", "CWE-119", "CWE-415", "CWE-787" ]
libplist
32ee5213fe64f1e10ec76c1ee861ee6f233120dd
1.7342195267897916e+38
32
bplist: Fix data range check for string/data/dict/array nodes Passing a size of 0xFFFFFFFFFFFFFFFF to parse_string_node() might result in a memcpy with a size of -1, leading to undefined behavior. This commit makes sure that the actual node data (which depends on the size) is in the range start_of_object..start_of_object+size. Credit to OSS-Fuzz
0
const char *what() const { return s_.empty() ? nullptr : s_.c_str(); }
Safe
[ "CWE-125" ]
cpp-peglib
b3b29ce8f3acf3a32733d930105a17d7b0ba347e
7.705215758082548e+37
1
Fix #122
0
SPL_METHOD(SplFileInfo, getLinkTarget) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int ret; char buff[MAXPATHLEN]; zend_error_handling error_handling; if (zend_parse_parameters_none() == FAILURE) { return; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); #if defined(PHP_WIN32) || HAVE_SYMLINK if (intern->file_name == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty filename"); RETURN_FALSE; } else if (!IS_ABSOLUTE_PATH(intern->file_name, intern->file_name_len)) { char expanded_path[MAXPATHLEN]; if (!expand_filepath_with_mode(intern->file_name, expanded_path, NULL, 0, CWD_EXPAND TSRMLS_CC)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "No such file or directory"); RETURN_FALSE; } ret = php_sys_readlink(expanded_path, buff, MAXPATHLEN - 1); } else { ret = php_sys_readlink(intern->file_name, buff, MAXPATHLEN-1); } #else ret = -1; /* always fail if not implemented */ #endif if (ret == -1) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Unable to read link %s, error: %s", intern->file_name, strerror(errno)); RETVAL_FALSE; } else { /* Append NULL to the end of the string */ buff[ret] = '\0'; RETVAL_STRINGL(buff, ret, 1); } zend_restore_error_handling(&error_handling TSRMLS_CC); }
Vulnerable
[ "CWE-190" ]
php-src
7245bff300d3fa8bacbef7897ff080a6f1c23eba
3.218184919037776e+37
43
Fix bug #72262 - do not overflow int
1
int wc_ecc_make_key_ex(WC_RNG* rng, int keysize, ecc_key* key, int curve_id) { int err; #ifndef WOLFSSL_ATECC508A DECLARE_CURVE_SPECS(ECC_CURVE_FIELD_COUNT) #endif if (key == NULL || rng == NULL) { return BAD_FUNC_ARG; } /* make sure required variables are reset */ wc_ecc_reset(key); err = wc_ecc_set_curve(key, keysize, curve_id); if (err != 0) { return err; } #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_ECC) if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_ECC) { #ifdef HAVE_CAVIUM /* TODO: Not implemented */ #elif defined(HAVE_INTEL_QA) /* TODO: Not implemented */ #else if (wc_AsyncTestInit(&key->asyncDev, ASYNC_TEST_ECC_MAKE)) { WC_ASYNC_TEST* testDev = &key->asyncDev.test; testDev->eccMake.rng = rng; testDev->eccMake.key = key; testDev->eccMake.size = keysize; testDev->eccMake.curve_id = curve_id; return WC_PENDING_E; } #endif } #endif /* WOLFSSL_ASYNC_CRYPT */ #ifdef WOLFSSL_ATECC508A key->type = ECC_PRIVATEKEY; err = atcatls_create_key(key->slot, key->pubkey_raw); if (err != ATCA_SUCCESS) { err = BAD_COND_E; } /* populate key->pubkey */ err = mp_read_unsigned_bin(key->pubkey.x, key->pubkey_raw, ECC_MAX_CRYPTO_HW_SIZE); if (err = MP_OKAY) err = mp_read_unsigned_bin(key->pubkey.y, key->pubkey_raw + ECC_MAX_CRYPTO_HW_SIZE, ECC_MAX_CRYPTO_HW_SIZE); #else #ifdef WOLFSSL_HAVE_SP_ECC #ifndef WOLFSSL_SP_NO_256 if (key->idx != ECC_CUSTOM_IDX && ecc_sets[key->idx].id == ECC_SECP256R1) { err = sp_ecc_make_key_256(rng, &key->k, &key->pubkey, key->heap); if (err == MP_OKAY) key->type = ECC_PRIVATEKEY; } else #endif #endif #ifdef WOLFSSL_SP_MATH err = WC_KEY_SIZE_E; #else { /* setup the key variables */ err = mp_init(&key->k); /* load curve info */ if (err == MP_OKAY) err = wc_ecc_curve_load(key->dp, &curve, ECC_CURVE_FIELD_ALL); /* generate k */ if (err == MP_OKAY) err = wc_ecc_gen_k(rng, key->dp->size, &key->k, curve->order); /* generate public key from k */ if (err == MP_OKAY) err = wc_ecc_make_pub_ex(key, curve, NULL); if (err == MP_OKAY) key->type = ECC_PRIVATEKEY; /* cleanup these on failure case only */ if (err != MP_OKAY) { /* clean up */ mp_forcezero(&key->k); } /* cleanup allocations */ wc_ecc_curve_free(curve); } #endif #endif /* WOLFSSL_ATECC508A */ return err; }
Safe
[ "CWE-200" ]
wolfssl
9b9568d500f31f964af26ba8d01e542e1f27e5ca
3.345361998198992e+38
101
Change ECDSA signing to use blinding.
0
static inline GLenum to_gl_swizzle(int swizzle) { switch (swizzle) { case PIPE_SWIZZLE_RED: return GL_RED; case PIPE_SWIZZLE_GREEN: return GL_GREEN; case PIPE_SWIZZLE_BLUE: return GL_BLUE; case PIPE_SWIZZLE_ALPHA: return GL_ALPHA; case PIPE_SWIZZLE_ZERO: return GL_ZERO; case PIPE_SWIZZLE_ONE: return GL_ONE; default: assert(0); return 0; } }
Safe
[ "CWE-787" ]
virglrenderer
cbc8d8b75be360236cada63784046688aeb6d921
6.014334291561518e+37
14
vrend: check transfer bounds for negative values too and report error Closes #138 Signed-off-by: Gert Wollny <gert.wollny@collabora.com> Reviewed-by: Emil Velikov <emil.velikov@collabora.com>
0