func
string
target
string
cwe
list
project
string
commit_id
string
hash
string
size
int64
message
string
vul
int64
static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct genl_info *info) { enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; u32 freq; int result; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; if (!nl80211_can_set_dev_channel(wdev)) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { channel_type = nla_get_u32(info->attrs[ NL80211_ATTR_WIPHY_CHANNEL_TYPE]); if (channel_type != NL80211_CHAN_NO_HT && channel_type != NL80211_CHAN_HT20 && channel_type != NL80211_CHAN_HT40PLUS && channel_type != NL80211_CHAN_HT40MINUS) return -EINVAL; } freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); mutex_lock(&rdev->devlist_mtx); if (wdev) { wdev_lock(wdev); result = cfg80211_set_freq(rdev, wdev, freq, channel_type); wdev_unlock(wdev); } else { result = cfg80211_set_freq(rdev, NULL, freq, channel_type); } mutex_unlock(&rdev->devlist_mtx); return result; }
Safe
[ "CWE-362", "CWE-119" ]
linux
208c72f4fe44fe09577e7975ba0e7fa0278f3d03
2.1430857287576674e+38
38
nl80211: fix check for valid SSID size in scan operations In both trigger_scan and sched_scan operations, we were checking for the SSID length before assigning the value correctly. Since the memory was just kzalloc'ed, the check was always failing and SSID with over 32 characters were allowed to go through. This was causing a buffer overflow when copying the actual SSID to the proper place. This bug has been there since 2.6.29-rc4. Cc: stable@kernel.org Signed-off-by: Luciano Coelho <coelho@ti.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
0
generate_cookie (void) { guint32 cookie; cookie = (guint32)g_random_int_range (1, G_MAXINT32); return cookie; }
Safe
[]
gnome-screensaver
284c9924969a49dbf2d5fae1d680d3310c4df4a3
3.3019899642787902e+37
8
Remove session inhibitors if the originator falls of the bus This fixes a problem where totem leaves inhibitors behind, see bug 600488.
0
static int kvm_request_guest_time_update(struct kvm_vcpu *v) { struct kvm_vcpu_arch *vcpu = &v->arch; if (!vcpu->time_page) return 0; set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests); return 1; }
Safe
[ "CWE-476" ]
linux-2.6
59839dfff5eabca01cc4e20b45797a60a80af8cb
1.4916939629626748e+37
9
KVM: x86: check for cr3 validity in ioctl_set_sregs Matt T. Yourst notes that kvm_arch_vcpu_ioctl_set_sregs lacks validity checking for the new cr3 value: "Userspace callers of KVM_SET_SREGS can pass a bogus value of cr3 to the kernel. This will trigger a NULL pointer access in gfn_to_rmap() when userspace next tries to call KVM_RUN on the affected VCPU and kvm attempts to activate the new non-existent page table root. This happens since kvm only validates that cr3 points to a valid guest physical memory page when code *inside* the guest sets cr3. However, kvm currently trusts the userspace caller (e.g. QEMU) on the host machine to always supply a valid page table root, rather than properly validating it along with the rest of the reloaded guest state." http://sourceforge.net/tracker/?func=detail&atid=893831&aid=2687641&group_id=180599 Check for a valid cr3 address in kvm_arch_vcpu_ioctl_set_sregs, triple fault in case of failure. Cc: stable@kernel.org Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
0
int ssl3_send_certificate_request(SSL *s) { unsigned char *p,*d; int i,j,nl,off,n; STACK_OF(X509_NAME) *sk=NULL; X509_NAME *name; BUF_MEM *buf; if (s->state == SSL3_ST_SW_CERT_REQ_A) { buf=s->init_buf; d=p=(unsigned char *)&(buf->data[4]); /* get the list of acceptable cert types */ p++; n=ssl3_get_req_cert_type(s,p); d[0]=n; p+=n; n++; off=n; p+=2; n+=2; sk=SSL_get_client_CA_list(s); nl=0; if (sk != NULL) { for (i=0; i<sk_X509_NAME_num(sk); i++) { name=sk_X509_NAME_value(sk,i); j=i2d_X509_NAME(name,NULL); if (!BUF_MEM_grow_clean(buf,4+n+j+2)) { SSLerr(SSL_F_SSL3_SEND_CERTIFICATE_REQUEST,ERR_R_BUF_LIB); goto err; } p=(unsigned char *)&(buf->data[4+n]); if (!(s->options & SSL_OP_NETSCAPE_CA_DN_BUG)) { s2n(j,p); i2d_X509_NAME(name,&p); n+=2+j; nl+=2+j; } else { d=p; i2d_X509_NAME(name,&p); j-=2; s2n(j,d); j+=2; n+=j; nl+=j; } } } /* else no CA names */ p=(unsigned char *)&(buf->data[4+off]); s2n(nl,p); d=(unsigned char *)buf->data; *(d++)=SSL3_MT_CERTIFICATE_REQUEST; l2n3(n,d); /* we should now have things packed up, so lets send * it off */ s->init_num=n+4; s->init_off=0; #ifdef NETSCAPE_HANG_BUG p=(unsigned char *)s->init_buf->data + s->init_num; /* do the header */ *(p++)=SSL3_MT_SERVER_DONE; *(p++)=0; *(p++)=0; *(p++)=0; s->init_num += 4; #endif s->state = SSL3_ST_SW_CERT_REQ_B; } /* SSL3_ST_SW_CERT_REQ_B */ return(ssl3_do_write(s,SSL3_RT_HANDSHAKE)); err: return(-1); }
Safe
[]
openssl
ee2ffc279417f15fef3b1073c7dc81a908991516
3.38045415981186e+37
88
Add Next Protocol Negotiation.
0
iasecc_pin_get_policy (struct sc_card *card, struct sc_pin_cmd_data *data, struct iasecc_pin_policy *pin) { struct sc_context *ctx = card->ctx; struct sc_file *save_current_df = NULL, *save_current_ef = NULL; struct iasecc_sdo sdo; struct sc_path path; int rv; LOG_FUNC_CALLED(ctx); sc_log(ctx, "iasecc_pin_get_policy(card:%p)", card); if (data->pin_type != SC_AC_CHV) { sc_log(ctx, "PIN policy only available for CHV type"); LOG_FUNC_RETURN(ctx, SC_ERROR_INVALID_ARGUMENTS); } if (card->cache.valid && card->cache.current_df) { sc_file_dup(&save_current_df, card->cache.current_df); if (save_current_df == NULL) { rv = SC_ERROR_OUT_OF_MEMORY; sc_log(ctx, "Cannot duplicate current DF file"); goto err; } } if (card->cache.valid && card->cache.current_ef) { sc_file_dup(&save_current_ef, card->cache.current_ef); if (save_current_ef == NULL) { rv = SC_ERROR_OUT_OF_MEMORY; sc_log(ctx, "Cannot duplicate current EF file"); goto err; } } if (!(data->pin_reference & IASECC_OBJECT_REF_LOCAL) && card->cache.valid && card->cache.current_df) { sc_format_path("3F00", &path); path.type = SC_PATH_TYPE_FILE_ID; rv = iasecc_select_file(card, &path, NULL); LOG_TEST_GOTO_ERR(ctx, rv, "Unable to select MF"); } memset(&sdo, 0, sizeof(sdo)); sdo.sdo_class = IASECC_SDO_CLASS_CHV; sdo.sdo_ref = data->pin_reference & ~IASECC_OBJECT_REF_LOCAL; sc_log(ctx, "iasecc_pin_get_policy() reference %i", sdo.sdo_ref); rv = iasecc_sdo_get_data(card, &sdo); LOG_TEST_GOTO_ERR(ctx, rv, "Cannot get SDO PIN data"); if (sdo.docp.acls_contact.size == 0) { rv = SC_ERROR_INVALID_DATA; sc_log(ctx, "Extremely strange ... there is no ACLs"); goto err; } sc_log(ctx, "iasecc_pin_get_policy() sdo.docp.size.size %"SC_FORMAT_LEN_SIZE_T"u", sdo.docp.size.size); memcpy(pin->scbs, sdo.docp.scbs, sizeof(pin->scbs)); pin->min_length = (sdo.data.chv.size_min.value ? *sdo.data.chv.size_min.value : -1); pin->max_length = (sdo.data.chv.size_max.value ? *sdo.data.chv.size_max.value : -1); pin->tries_maximum = (sdo.docp.tries_maximum.value ? *sdo.docp.tries_maximum.value : -1); pin->tries_remaining = (sdo.docp.tries_remaining.value ? *sdo.docp.tries_remaining.value : -1); if (sdo.docp.size.value && sdo.docp.size.size <= sizeof(int)) { unsigned int n = 0; unsigned int i; for (i=0; i<sdo.docp.size.size; i++) n = (n << 8) + *(sdo.docp.size.value + i); pin->stored_length = n; } else { pin->stored_length = -1; } sc_log(ctx, "PIN policy: size max/min %i/%i, tries max/left %i/%i", pin->max_length, pin->min_length, pin->tries_maximum, pin->tries_remaining); iasecc_sdo_free_fields(card, &sdo); if (save_current_df) { sc_log(ctx, "iasecc_pin_get_policy() restore current DF"); rv = iasecc_select_file(card, &save_current_df->path, NULL); LOG_TEST_GOTO_ERR(ctx, rv, "Cannot return to saved DF"); } if (save_current_ef) { sc_log(ctx, "iasecc_pin_get_policy() restore current EF"); rv = iasecc_select_file(card, &save_current_ef->path, NULL); LOG_TEST_GOTO_ERR(ctx, rv, "Cannot return to saved EF"); } err: sc_file_free(save_current_df); sc_file_free(save_current_ef); LOG_FUNC_RETURN(ctx, rv); }
Safe
[]
OpenSC
ae1cf0be90396fb6c0be95829bf0d3eecbd2fd1c
7.118990436849107e+37
99
iasecc: Prevent stack buffer overflow when empty ACL is returned Thanks oss-fuzz https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=30800
0
static int selinux_syslog(int type) { int rc; switch (type) { case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */ case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */ rc = task_has_system(current, SYSTEM__SYSLOG_READ); break; case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */ case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */ /* Set level of messages printed to console */ case SYSLOG_ACTION_CONSOLE_LEVEL: rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE); break; case SYSLOG_ACTION_CLOSE: /* Close log */ case SYSLOG_ACTION_OPEN: /* Open log */ case SYSLOG_ACTION_READ: /* Read from log */ case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */ case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ default: rc = task_has_system(current, SYSTEM__SYSLOG_MOD); break; } return rc; }
Safe
[ "CWE-264" ]
linux
259e5e6c75a910f3b5e656151dc602f53f9d7548
3.2580100364419917e+38
26
Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs With this change, calling prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) disables privilege granting operations at execve-time. For example, a process will not be able to execute a setuid binary to change their uid or gid if this bit is set. The same is true for file capabilities. Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that LSMs respect the requested behavior. To determine if the NO_NEW_PRIVS bit is set, a task may call prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); It returns 1 if set and 0 if it is not set. If any of the arguments are non-zero, it will return -1 and set errno to -EINVAL. (PR_SET_NO_NEW_PRIVS behaves similarly.) This functionality is desired for the proposed seccomp filter patch series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the system call behavior for itself and its child tasks without being able to impact the behavior of a more privileged task. Another potential use is making certain privileged operations unprivileged. For example, chroot may be considered "safe" if it cannot affect privileged tasks. Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is set and AppArmor is in use. It is fixed in a subsequent patch. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Will Drewry <wad@chromium.org> Acked-by: Eric Paris <eparis@redhat.com> Acked-by: Kees Cook <keescook@chromium.org> v18: updated change desc v17: using new define values as per 3.4 Signed-off-by: James Morris <james.l.morris@oracle.com>
0
static int schannel_init(void) { return (Curl_sspi_global_init() == CURLE_OK ? 1 : 0); }
Safe
[ "CWE-290" ]
curl
b09c8ee15771c614c4bf3ddac893cdb12187c844
3.2815035542312194e+38
4
vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid() To make sure we set and extract the correct session. Reported-by: Mingtao Yang Bug: https://curl.se/docs/CVE-2021-22890.html CVE-2021-22890
0
static int may_init_module(void) { if (!capable(CAP_SYS_MODULE) || modules_disabled) return -EPERM; return 0; }
Safe
[ "CWE-362", "CWE-347" ]
linux
0c18f29aae7ce3dadd26d8ee3505d07cc982df75
2.3462196959481285e+38
7
module: limit enabling module.sig_enforce Irrespective as to whether CONFIG_MODULE_SIG is configured, specifying "module.sig_enforce=1" on the boot command line sets "sig_enforce". Only allow "sig_enforce" to be set when CONFIG_MODULE_SIG is configured. This patch makes the presence of /sys/module/module/parameters/sig_enforce dependent on CONFIG_MODULE_SIG=y. Fixes: fda784e50aac ("module: export module signature enforcement status") Reported-by: Nayna Jain <nayna@linux.ibm.com> Tested-by: Mimi Zohar <zohar@linux.ibm.com> Tested-by: Jessica Yu <jeyu@kernel.org> Signed-off-by: Mimi Zohar <zohar@linux.ibm.com> Signed-off-by: Jessica Yu <jeyu@kernel.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
static size_t vhost_get_avail_size(struct vhost_virtqueue *vq, unsigned int num) { size_t event __maybe_unused = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; return sizeof(*vq->avail) + sizeof(*vq->avail->ring) * num + event; }
Safe
[ "CWE-120" ]
linux
060423bfdee3f8bc6e2c1bac97de24d5415e2bc4
4.518549567894835e+37
9
vhost: make sure log_num < in_num The code assumes log_num < in_num everywhere, and that is true as long as in_num is incremented by descriptor iov count, and log_num by 1. However this breaks if there's a zero sized descriptor. As a result, if a malicious guest creates a vring desc with desc.len = 0, it may cause the host kernel to crash by overflowing the log array. This bug can be triggered during the VM migration. There's no need to log when desc.len = 0, so just don't increment log_num in this case. Fixes: 3a4d5c94e959 ("vhost_net: a kernel-level virtio server") Cc: stable@vger.kernel.org Reviewed-by: Lidong Chen <lidongchen@tencent.com> Signed-off-by: ruippan <ruippan@tencent.com> Signed-off-by: yongduan <yongduan@tencent.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Tyler Hicks <tyhicks@canonical.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
0
void Save() { if (!on_heap_ && size_ > 0) { char* s = new char[size_]; memcpy(s, str_, size_); str_ = s; on_heap_ = true; } }
Safe
[]
node
7b3fb22290c3b6acb497ca85cf2f1648d75c8154
1.3188983381382265e+38
8
typo in node_http_parser
0
void CLASS setPentaxBodyFeatures (unsigned id) { imgdata.lens.makernotes.CamID = id; switch (id) { case 0x12994: case 0x12aa2: case 0x12b1a: case 0x12b60: case 0x12b62: case 0x12b7e: case 0x12b80: case 0x12b9c: case 0x12b9d: case 0x12ba2: case 0x12c1e: case 0x12c20: case 0x12cd2: case 0x12cd4: case 0x12cfa: case 0x12d72: case 0x12d73: case 0x12db8: case 0x12dfe: case 0x12e6c: case 0x12e76: case 0x12ef8: case 0x12f52: case 0x12f70: case 0x12f71: case 0x12fb6: case 0x12fc0: case 0x12fca: case 0x1301a: case 0x13024: case 0x1309c: case 0x13222: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_K; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_K; imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC; break; case 0x13092: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_K; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_K; imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF; break; case 0x12e08: case 0x13010: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_645; imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_MF; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_645; imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_MF; break; case 0x12ee4: case 0x12f66: case 0x12f7a: case 0x1302e: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_Q; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_Q; break; default: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; } return; }
Safe
[ "CWE-119", "CWE-125", "CWE-787" ]
LibRaw
d13e8f6d1e987b7491182040a188c16a395f1d21
7.179935977375932e+37
66
CVE-2017-1438 credits; fix for Kodak 65000 out of bounds access
0
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) { if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; if (!page_count(page)) return -EINVAL; vma->vm_flags |= VM_INSERTPAGE; return insert_page(vma, addr, page, vma->vm_page_prot); }
Safe
[ "CWE-20" ]
linux-2.6
89f5b7da2a6bad2e84670422ab8192382a5aeb9f
1.971176825671826e+38
10
Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit 557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed the ZERO_PAGE from the VM mappings, any users of get_user_pages() will generally now populate the VM with real empty pages needlessly. We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but since fault handling no longer uses ZERO_PAGE for new anonymous pages, we now need to handle that special case in follow_page() instead. In particular, the removal of ZERO_PAGE effectively removed the core file writing optimization where we would skip writing pages that had not been populated at all, and increased memory pressure a lot by allocating all those useless newly zeroed pages. This reinstates the optimization by making the unmapped PTE case the same as for a non-existent page table, which already did this correctly. While at it, this also fixes the XIP case for follow_page(), where the caller could not differentiate between the case of a page that simply could not be used (because it had no "struct page" associated with it) and a page that just wasn't mapped. We do that by simply returning an error pointer for pages that could not be turned into a "struct page *". The error is arbitrarily picked to be EFAULT, since that was what get_user_pages() already used for the equivalent IO-mapped page case. [ Also removed an impossible test for pte_offset_map_lock() failing: that's not how that function works ] Acked-by: Oleg Nesterov <oleg@tv-sign.ru> Acked-by: Nick Piggin <npiggin@suse.de> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
is_code_ctype(OnigCodePoint code, unsigned int ctype) { if (code < 256) return ENC_IS_ISO_8859_1_CTYPE(code, ctype); else return FALSE; }
Safe
[ "CWE-125" ]
oniguruma
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
2.1330267743087905e+38
7
onig-5.9.2
0
} inline float abs(const float a) { return (float)std::fabs((double)a);
Safe
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
2.9291117286867852e+38
3
Fix other issues in 'CImg<T>::load_bmp()'.
0
void fmessage(char* fmt, ...) { // TODO: this function is duplicated in src/fnet/interface.c if (arg_quiet) return; va_list args; va_start(args,fmt); vfprintf(stderr, fmt, args); va_end(args); fflush(0); }
Safe
[ "CWE-269", "CWE-94" ]
firejail
27cde3d7d1e4e16d4190932347c7151dc2a84c50
3.260609458898298e+38
10
fixing CVE-2022-31214
0
GF_Err tfxd_box_read(GF_Box *s, GF_BitStream *bs) { GF_MSSTimeExtBox *ptr = (GF_MSSTimeExtBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); if (ptr->version == 0x01) { ISOM_DECREASE_SIZE(ptr, 16); ptr->absolute_time_in_track_timescale = gf_bs_read_u64(bs); ptr->fragment_duration_in_track_timescale = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 8); ptr->absolute_time_in_track_timescale = gf_bs_read_u32(bs); ptr->fragment_duration_in_track_timescale = gf_bs_read_u32(bs); } return GF_OK; }
Safe
[ "CWE-787" ]
gpac
388ecce75d05e11fc8496aa4857b91245007d26e
3.313312147606184e+38
19
fixed #1587
0
static void process_bin_update(conn *c) { char *key; int nkey; int vlen; item *it; protocol_binary_request_set* req = binary_get_request(c); assert(c != NULL); key = binary_get_key(c); nkey = c->binary_header.request.keylen; /* fix byteorder in the request */ req->message.body.flags = ntohl(req->message.body.flags); req->message.body.expiration = ntohl(req->message.body.expiration); vlen = c->binary_header.request.bodylen - (nkey + c->binary_header.request.extlen); if (settings.verbose > 1) { int ii; if (c->cmd == PROTOCOL_BINARY_CMD_ADD) { fprintf(stderr, "<%d ADD ", c->sfd); } else if (c->cmd == PROTOCOL_BINARY_CMD_SET) { fprintf(stderr, "<%d SET ", c->sfd); } else { fprintf(stderr, "<%d REPLACE ", c->sfd); } for (ii = 0; ii < nkey; ++ii) { fprintf(stderr, "%c", key[ii]); } fprintf(stderr, " Value len is %d", vlen); fprintf(stderr, "\n"); } if (settings.detail_enabled) { stats_prefix_record_set(key, nkey); } it = item_alloc(key, nkey, req->message.body.flags, realtime(req->message.body.expiration), vlen+2); if (it == 0) { enum store_item_type status; if (! item_size_ok(nkey, req->message.body.flags, vlen + 2)) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_E2BIG, NULL, vlen); status = TOO_LARGE; } else { out_of_memory(c, "SERVER_ERROR Out of memory allocating item"); /* This error generating method eats the swallow value. Add here. */ c->sbytes = vlen; status = NO_MEMORY; } /* FIXME: losing c->cmd since it's translated below. refactor? */ LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE, NULL, status, 0, key, nkey); /* Avoid stale data persisting in cache because we failed alloc. * Unacceptable for SET. Anywhere else too? */ if (c->cmd == PROTOCOL_BINARY_CMD_SET) { it = item_get(key, nkey, c); if (it) { item_unlink(it); item_remove(it); } } /* swallow the data line */ c->write_and_go = conn_swallow; return; } ITEM_set_cas(it, c->binary_header.request.cas); switch (c->cmd) { case PROTOCOL_BINARY_CMD_ADD: c->cmd = NREAD_ADD; break; case PROTOCOL_BINARY_CMD_SET: c->cmd = NREAD_SET; break; case PROTOCOL_BINARY_CMD_REPLACE: c->cmd = NREAD_REPLACE; break; default: assert(0); } if (ITEM_get_cas(it) != 0) { c->cmd = NREAD_CAS; } c->item = it; c->ritem = ITEM_data(it); c->rlbytes = vlen; conn_set_state(c, conn_nread); c->substate = bin_read_set_value; }
Safe
[ "CWE-190" ]
memcached
bd578fc34b96abe0f8d99c1409814a09f51ee71c
2.8709543443160527e+38
98
CVE reported by cisco talos
0
bash_servicename_completion_function (text, state) const char *text; int state; { #if defined (__WIN32__) || defined (__OPENNT) || !defined (HAVE_GETSERVENT) return ((char *)NULL); #else static char *sname = (char *)NULL; static struct servent *srvent; static int snamelen, firstc; char *value; char **alist, *aentry; int afound; if (state == 0) { FREE (sname); firstc = *text; sname = savestring (text); snamelen = strlen (sname); setservent (0); } while (srvent = getservent ()) { afound = 0; if (snamelen == 0 || (STREQN (sname, srvent->s_name, snamelen))) break; /* Not primary, check aliases */ for (alist = srvent->s_aliases; *alist; alist++) { aentry = *alist; if (STREQN (sname, aentry, snamelen)) { afound = 1; break; } } if (afound) break; } if (srvent == 0) { endservent (); return ((char *)NULL); } value = afound ? savestring (aentry) : savestring (srvent->s_name); return value; #endif }
Safe
[ "CWE-20" ]
bash
4f747edc625815f449048579f6e65869914dd715
3.0536779405515412e+38
54
Bash-4.4 patch 7
0
static void mptsas_fetch_request(MPTSASState *s) { PCIDevice *pci = (PCIDevice *) s; char req[MPTSAS_MAX_REQUEST_SIZE]; MPIRequestHeader *hdr = (MPIRequestHeader *)req; hwaddr addr; int size; /* Read the message header from the guest first. */ addr = s->host_mfa_high_addr | MPTSAS_FIFO_GET(s, request_post); pci_dma_read(pci, addr, req, sizeof(*hdr)); if (hdr->Function < ARRAY_SIZE(mpi_request_sizes) && mpi_request_sizes[hdr->Function]) { /* Read the rest of the request based on the type. Do not * reread everything, as that could cause a TOC/TOU mismatch * and leak data from the QEMU stack. */ size = mpi_request_sizes[hdr->Function]; assert(size <= MPTSAS_MAX_REQUEST_SIZE); pci_dma_read(pci, addr + sizeof(*hdr), &req[sizeof(*hdr)], size - sizeof(*hdr)); } if (hdr->Function == MPI_FUNCTION_SCSI_IO_REQUEST) { /* SCSI I/O requests are separate from mptsas_process_message * because they cannot be sent through the doorbell yet. */ mptsas_process_scsi_io_request(s, (MPIMsgSCSIIORequest *)req, addr); } else { mptsas_process_message(s, (MPIRequestHeader *)req); } }
Safe
[ "CWE-416" ]
qemu
3791642c8d60029adf9b00bcb4e34d7d8a1aea4d
1.2010627804817058e+37
33
mptsas: Remove unused MPTSASState 'pending' field (CVE-2021-3392) While processing SCSI i/o requests in mptsas_process_scsi_io_request(), the Megaraid emulator appends new MPTSASRequest object 'req' to the 's->pending' queue. In case of an error, this same object gets dequeued in mptsas_free_request() only if SCSIRequest object 'req->sreq' is initialised. This may lead to a use-after-free issue. Since s->pending is actually not used, simply remove it from MPTSASState. Cc: qemu-stable@nongnu.org Signed-off-by: Michael Tokarev <mjt@tls.msk.ru> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reported-by: Cheolwoo Myung <cwmyung@snu.ac.kr> Message-id: 20210419134247.1467982-1-f4bug@amsat.org Message-Id: <20210416102243.1293871-1-mjt@msgid.tls.msk.ru> Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Reported-by: Cheolwoo Myung <cwmyung@snu.ac.kr> BugLink: https://bugs.launchpad.net/qemu/+bug/1914236 (CVE-2021-3392) Fixes: e351b826112 ("hw: Add support for LSI SAS1068 (mptsas) device") [PMD: Reworded description, added more tags] Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
0
int wc_ecc_init(ecc_key* key) { return wc_ecc_init_ex(key, NULL, INVALID_DEVID); }
Safe
[ "CWE-326", "CWE-203" ]
wolfssl
1de07da61f0c8e9926dcbd68119f73230dae283f
3.391243687578309e+38
4
Constant time EC map to affine for private operations For fast math, use a constant time modular inverse when mapping to affine when operation involves a private key - key gen, calc shared secret, sign.
0
async_result_cb (GObject *obj, GAsyncResult *result, gpointer user_data) { GAsyncResult **result_out = user_data; *result_out = g_object_ref (result); }
Safe
[ "CWE-668" ]
flatpak
cd2142888fc4c199723a0dfca1f15ea8788a5483
2.0143881896148725e+38
8
Don't expose /proc when running apply_extra As shown by CVE-2019-5736, it is sometimes possible for the sandbox app to access outside files using /proc/self/exe. This is not typically an issue for flatpak as the sandbox runs as the user which has no permissions to e.g. modify the host files. However, when installing apps using extra-data into the system repo we *do* actually run a sandbox as root. So, in this case we disable mounting /proc in the sandbox, which will neuter attacks like this.
0
void PeerListWidget::showPeerListMenu(const QPoint&) { QMenu menu; bool emptyMenu = true; BitTorrent::TorrentHandle *const torrent = m_properties->getCurrentTorrent(); if (!torrent) return; // Add Peer Action QAction *addPeerAct = 0; if (!torrent->isQueued() && !torrent->isChecking()) { addPeerAct = menu.addAction(GuiIconProvider::instance()->getIcon("user-group-new"), tr("Add a new peer...")); emptyMenu = false; } QAction *banAct = 0; QAction *copyPeerAct = 0; if (!selectionModel()->selectedRows().isEmpty()) { copyPeerAct = menu.addAction(GuiIconProvider::instance()->getIcon("edit-copy"), tr("Copy IP:port")); menu.addSeparator(); banAct = menu.addAction(GuiIconProvider::instance()->getIcon("user-group-delete"), tr("Ban peer permanently")); emptyMenu = false; } if (emptyMenu) return; QAction *act = menu.exec(QCursor::pos()); if (act == 0) return; if (act == addPeerAct) { QList<BitTorrent::PeerAddress> peersList = PeersAdditionDlg::askForPeers(); int peerCount = 0; foreach (const BitTorrent::PeerAddress &addr, peersList) { if (torrent->connectPeer(addr)) { qDebug("Adding peer %s...", qPrintable(addr.ip.toString())); Logger::instance()->addMessage(tr("Manually adding peer '%1'...").arg(addr.ip.toString())); peerCount++; } else { Logger::instance()->addMessage(tr("The peer '%1' could not be added to this torrent.").arg(addr.ip.toString()), Log::WARNING); } } if (peerCount < peersList.length()) QMessageBox::information(0, tr("Peer addition"), tr("Some peers could not be added. Check the Log for details.")); else if (peerCount > 0) QMessageBox::information(0, tr("Peer addition"), tr("The peers were added to this torrent.")); return; } if (act == banAct) { banSelectedPeers(); return; } if (act == copyPeerAct) { copySelectedPeers(); return; } }
Safe
[ "CWE-20", "CWE-79" ]
qBittorrent
6ca3e4f094da0a0017cb2d483ec1db6176bb0b16
3.0381158918919633e+38
52
Add Utils::String::toHtmlEscaped
0
string handleStartupResponse(StartupDetails &details) { TRACE_POINT(); string socketAddress; while (true) { string line; try { line = readMessageLine(details); } catch (const SystemException &e) { throwPreloaderSpawnException("An error occurred while starting up " "the preloader. There was an I/O error while reading its " "startup response: " + e.sys(), SpawnException::PRELOADER_STARTUP_PROTOCOL_ERROR, details); } catch (const TimeoutException &) { throwPreloaderSpawnException("An error occurred while starting up " "the preloader: it did not write a startup response in time.", SpawnException::PRELOADER_STARTUP_TIMEOUT, details); } if (line.empty()) { throwPreloaderSpawnException("An error occurred while starting up " "the preloader. It unexpected closed the connection while " "sending its startup response.", SpawnException::PRELOADER_STARTUP_PROTOCOL_ERROR, details); } else if (line[line.size() - 1] != '\n') { throwPreloaderSpawnException("An error occurred while starting up " "the preloader. It sent a line without a newline character " "in its startup response.", SpawnException::PRELOADER_STARTUP_PROTOCOL_ERROR, details); } else if (line == "\n") { break; } string::size_type pos = line.find(": "); if (pos == string::npos) { throwPreloaderSpawnException("An error occurred while starting up " "the preloader. It sent a startup response line without " "separator.", SpawnException::PRELOADER_STARTUP_PROTOCOL_ERROR, details); } string key = line.substr(0, pos); string value = line.substr(pos + 2, line.size() - pos - 3); if (key == "socket") { socketAddress = fixupSocketAddress(options, value); } else { throwPreloaderSpawnException("An error occurred while starting up " "the preloader. It sent an unknown startup response line " "called '" + key + "'.", SpawnException::PRELOADER_STARTUP_PROTOCOL_ERROR, details); } } if (socketAddress.empty()) { throwPreloaderSpawnException("An error occurred while starting up " "the preloader. It did not report a socket address in its " "startup response.", SpawnException::PRELOADER_STARTUP_PROTOCOL_ERROR, details); } return socketAddress; }
Vulnerable
[]
passenger
8c6693e0818772c345c979840d28312c2edd4ba4
3.745773464646341e+37
70
Security check socket filenames reported by spawned application processes.
1
void sqlite3WindowUnlinkFromSelect(Window *p){ if( p->ppThis ){ *p->ppThis = p->pNextWin; if( p->pNextWin ) p->pNextWin->ppThis = p->ppThis; p->ppThis = 0; } }
Safe
[ "CWE-476" ]
sqlite
75e95e1fcd52d3ec8282edb75ac8cd0814095d54
3.1521811734813538e+38
7
When processing constant integer values in ORDER BY clauses of window definitions (see check-in [7e4809eadfe99ebf]) be sure to fully disable the constant value to avoid an invalid pointer dereference if the expression is ever duplicated. This fixes a crash report from Yongheng and Rui. FossilOrigin-Name: 1ca0bd982ab1183bbafce0d260e4dceda5eb766ed2e7793374a88d1ae0bdd2ca
0
static int LEfloatint (lua_Number f, lua_Integer i) { if (l_intfitsf(i)) return luai_numle(f, cast_num(i)); /* compare them as floats */ else { /* f <= i <=> ceil(f) <= i */ lua_Integer fi; if (luaV_flttointeger(f, &fi, F2Iceil)) /* fi = ceil(f) */ return fi <= i; /* compare them as integers */ else /* 'f' is either greater or less than all integers */ return f < 0; /* less? */ } }
Safe
[ "CWE-416", "CWE-125", "CWE-787" ]
lua
eb41999461b6f428186c55abd95f4ce1a76217d5
1.9458390100174914e+37
11
Fixed bugs of stack reallocation x GC Macro 'checkstackGC' was doing a GC step after resizing the stack; the GC could shrink the stack and undo the resize. Moreover, macro 'checkstackp' also does a GC step, which could remove the preallocated CallInfo when calling a function. (Its name has been changed to 'checkstackGCp' to emphasize that it calls the GC.)
0
void ConnectionManagerImpl::ActiveStream::completeRequest() { filter_manager_.streamInfo().onRequestComplete(); Upstream::HostDescriptionConstSharedPtr upstream_host = connection_manager_.read_callbacks_->upstreamHost(); if (upstream_host != nullptr) { Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats = upstream_host->cluster().requestResponseSizeStats(); if (req_resp_stats.has_value()) { req_resp_stats->get().upstream_rq_body_size_.recordValue( filter_manager_.streamInfo().bytesReceived()); req_resp_stats->get().upstream_rs_body_size_.recordValue( filter_manager_.streamInfo().bytesSent()); } } if (connection_manager_.remote_close_) { filter_manager_.streamInfo().setResponseCodeDetails( StreamInfo::ResponseCodeDetails::get().DownstreamRemoteDisconnect); filter_manager_.streamInfo().setResponseFlag( StreamInfo::ResponseFlag::DownstreamConnectionTermination); } // TODO(danzh) bring HTTP/3 to parity here. if (connection_manager_.codec_->protocol() != Protocol::Http3) { ASSERT(filter_manager_.streamInfo().responseCodeDetails().has_value()); } connection_manager_.stats_.named_.downstream_rq_active_.dec(); if (filter_manager_.streamInfo().healthCheck()) { connection_manager_.config_.tracingStats().health_check_.inc(); } if (active_span_) { Tracing::HttpTracerUtility::finalizeDownstreamSpan( *active_span_, request_headers_.get(), response_headers_.get(), response_trailers_.get(), filter_manager_.streamInfo(), *this); } if (state_.successful_upgrade_) { connection_manager_.stats_.named_.downstream_cx_upgrades_active_.dec(); } }
Safe
[ "CWE-22" ]
envoy
5333b928d8bcffa26ab19bf018369a835f697585
3.011735497486896e+38
40
Implement handling of escaped slash characters in URL path Fixes: CVE-2021-29492 Signed-off-by: Yan Avlasov <yavlasov@google.com>
0
is_mouse_topline(win_T *wp) { return orig_topline == wp->w_topline #ifdef FEAT_DIFF && orig_topfill == wp->w_topfill #endif ; }
Safe
[ "CWE-125", "CWE-787" ]
vim
e178af5a586ea023622d460779fdcabbbfac0908
8.516175410293708e+37
8
patch 8.2.5160: accessing invalid memory after changing terminal size Problem: Accessing invalid memory after changing terminal size. Solution: Adjust cmdline_row and msg_row to the value of Rows.
0
static inline struct iw_statistics *get_wireless_stats(struct net_device *dev) { /* New location */ if((dev->wireless_handlers != NULL) && (dev->wireless_handlers->get_wireless_stats != NULL)) return dev->wireless_handlers->get_wireless_stats(dev); /* Old location, will be phased out in next WE */ return (dev->get_wireless_stats ? dev->get_wireless_stats(dev) : (struct iw_statistics *) NULL); }
Safe
[ "CWE-200" ]
linux-2.6
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
1.4066026314861024e+38
12
[NETLINK]: Missing initializations in dumped data Mostly missing initialization of padding fields of 1 or 2 bytes length, two instances of uninitialized nlmsgerr->msg of 16 bytes length. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
0
inline void Virtual_column_info::print(String* str) { expr->print_for_table_def(str); }
Safe
[ "CWE-617" ]
server
2e7891080667c59ac80f788eef4d59d447595772
3.112066357049682e+38
4
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view This bug could manifest itself after pushing a where condition over a mergeable derived table / view / CTE DT into a grouping view / derived table / CTE V whose item list contained set functions with constant arguments such as MIN(2), SUM(1) etc. In such cases the field references used in the condition pushed into the view V that correspond set functions are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation of the virtual method const_item() for the class Item_direct_view_ref the wrapped set functions with constant arguments could be erroneously taken for constant items. This could lead to a wrong result set returned by the main select query in 10.2. In 10.4 where a possibility of pushing condition from HAVING into WHERE had been added this could cause a crash. Approved by Sergey Petrunya <sergey.petrunya@mariadb.com>
0
u32 acpi_ns_opens_scope(acpi_object_type type) { ACPI_FUNCTION_ENTRY(); if (type > ACPI_TYPE_LOCAL_MAX) { /* type code out of range */ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type)); return (ACPI_NS_NORMAL); } return (((u32)acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE); }
Safe
[ "CWE-200", "CWE-755" ]
linux
3b2d69114fefa474fca542e51119036dceb4aa6f
1.001296084047112e+38
14
ACPICA: Namespace: fix operand cache leak ACPICA commit a23325b2e583556eae88ed3f764e457786bf4df6 I found some ACPI operand cache leaks in ACPI early abort cases. Boot log of ACPI operand cache leak is as follows: >[ 0.174332] ACPI: Added _OSI(Module Device) >[ 0.175504] ACPI: Added _OSI(Processor Device) >[ 0.176010] ACPI: Added _OSI(3.0 _SCP Extensions) >[ 0.177032] ACPI: Added _OSI(Processor Aggregator Device) >[ 0.178284] ACPI: SCI (IRQ16705) allocation failed >[ 0.179352] ACPI Exception: AE_NOT_ACQUIRED, Unable to install System Control Interrupt handler (20160930/evevent-131) >[ 0.180008] ACPI: Unable to start the ACPI Interpreter >[ 0.181125] ACPI Error: Could not remove SCI handler (20160930/evmisc-281) >[ 0.184068] kmem_cache_destroy Acpi-Operand: Slab cache still has objects >[ 0.185358] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.10.0-rc3 #2 >[ 0.186820] Hardware name: innotek gmb_h virtual_box/virtual_box, BIOS virtual_box 12/01/2006 >[ 0.188000] Call Trace: >[ 0.188000] ? dump_stack+0x5c/0x7d >[ 0.188000] ? kmem_cache_destroy+0x224/0x230 >[ 0.188000] ? acpi_sleep_proc_init+0x22/0x22 >[ 0.188000] ? acpi_os_delete_cache+0xa/0xd >[ 0.188000] ? acpi_ut_delete_caches+0x3f/0x7b >[ 0.188000] ? acpi_terminate+0x5/0xf >[ 0.188000] ? acpi_init+0x288/0x32e >[ 0.188000] ? __class_create+0x4c/0x80 >[ 0.188000] ? video_setup+0x7a/0x7a >[ 0.188000] ? do_one_initcall+0x4e/0x1b0 >[ 0.188000] ? kernel_init_freeable+0x194/0x21a >[ 0.188000] ? rest_init+0x80/0x80 >[ 0.188000] ? kernel_init+0xa/0x100 >[ 0.188000] ? ret_from_fork+0x25/0x30 When early abort is occurred due to invalid ACPI information, Linux kernel terminates ACPI by calling acpi_terminate() function. The function calls acpi_ns_terminate() function to delete namespace data and ACPI operand cache (acpi_gbl_module_code_list). But the deletion code in acpi_ns_terminate() function is wrapped in ACPI_EXEC_APP definition, therefore the code is only executed when the definition exists. If the define doesn't exist, ACPI operand cache (acpi_gbl_module_code_list) is leaked, and stack dump is shown in kernel log. This causes a security threat because the old kernel (<= 4.9) shows memory locations of kernel functions in stack dump, therefore kernel ASLR can be neutralized. To fix ACPI operand leak for enhancing security, I made a patch which removes the ACPI_EXEC_APP define in acpi_ns_terminate() function for executing the deletion code unconditionally. Link: https://github.com/acpica/acpica/commit/a23325b2 Signed-off-by: Seunghun Han <kkamagui@gmail.com> Signed-off-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
0
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { const struct sys_reg_desc *r; void __user *uaddr = (void __user *)(unsigned long)reg->addr; if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) return demux_c15_get(reg->id, uaddr); if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) return -ENOENT; r = index_to_sys_reg_desc(vcpu, reg->id); if (!r) return get_invariant_sys_reg(reg->id, uaddr); if (r->get_user) return (r->get_user)(vcpu, r, reg, uaddr); return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); }
Safe
[ "CWE-20", "CWE-617" ]
linux
9e3f7a29694049edd728e2400ab57ad7553e5aa9
2.7704669358397988e+38
20
arm64: KVM: pmu: Fix AArch32 cycle counter access We're missing the handling code for the cycle counter accessed from a 32bit guest, leading to unexpected results. Cc: stable@vger.kernel.org # 4.6+ Signed-off-by: Wei Huang <wei@redhat.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
0
static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr) { void __user *uaddr = (void __user*)(unsigned long)attr->addr; if ((u64)(unsigned long)uaddr != attr->addr) return ERR_PTR_USR(-EFAULT); return uaddr; }
Safe
[ "CWE-459" ]
linux
683412ccf61294d727ead4a73d97397396e69a6b
6.0083490590079675e+37
8
KVM: SEV: add cache flush to solve SEV cache incoherency issues Flush the CPU caches when memory is reclaimed from an SEV guest (where reclaim also includes it being unmapped from KVM's memslots). Due to lack of coherency for SEV encrypted memory, failure to flush results in silent data corruption if userspace is malicious/broken and doesn't ensure SEV guest memory is properly pinned and unpinned. Cache coherency is not enforced across the VM boundary in SEV (AMD APM vol.2 Section 15.34.7). Confidential cachelines, generated by confidential VM guests have to be explicitly flushed on the host side. If a memory page containing dirty confidential cachelines was released by VM and reallocated to another user, the cachelines may corrupt the new user at a later time. KVM takes a shortcut by assuming all confidential memory remain pinned until the end of VM lifetime. Therefore, KVM does not flush cache at mmu_notifier invalidation events. Because of this incorrect assumption and the lack of cache flushing, malicous userspace can crash the host kernel: creating a malicious VM and continuously allocates/releases unpinned confidential memory pages when the VM is running. Add cache flush operations to mmu_notifier operations to ensure that any physical memory leaving the guest VM get flushed. In particular, hook mmu_notifier_invalidate_range_start and mmu_notifier_release events and flush cache accordingly. The hook after releasing the mmu lock to avoid contention with other vCPUs. Cc: stable@vger.kernel.org Suggested-by: Sean Christpherson <seanjc@google.com> Reported-by: Mingwei Zhang <mizhang@google.com> Signed-off-by: Mingwei Zhang <mizhang@google.com> Message-Id: <20220421031407.2516575-4-mizhang@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
void LinkResolver::linktime_resolve_interface_method(methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS) { // normal interface method resolution resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, true, CHECK); assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier"); assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier"); }
Safe
[]
jdk8u
f14e35d20e1a4d0f507f05838844152f2242c6d3
3.2372556626998537e+38
8
8281866: Enhance MethodHandle invocations Reviewed-by: andrew Backport-of: d974d9da365f787f67971d88c79371c8b0769f75
0
gdImagePtr gdImageRotateBilinear(gdImagePtr src, const float degrees, const int bgColor) { float _angle = (float)((- degrees / 180.0f) * M_PI); const unsigned int src_w = gdImageSX(src); const unsigned int src_h = gdImageSY(src); unsigned int new_width = abs((int)(src_w*cos(_angle))) + abs((int)(src_h*sin(_angle) + 0.5f)); unsigned int new_height = abs((int)(src_w*sin(_angle))) + abs((int)(src_h*cos(_angle) + 0.5f)); const gdFixed f_0_5 = gd_ftofx(0.5f); const gdFixed f_H = gd_itofx(src_h/2); const gdFixed f_W = gd_itofx(src_w/2); const gdFixed f_cos = gd_ftofx(cos(-_angle)); const gdFixed f_sin = gd_ftofx(sin(-_angle)); const gdFixed f_1 = gd_itofx(1); unsigned int i; unsigned int dst_offset_x; unsigned int dst_offset_y = 0; unsigned int src_offset_x, src_offset_y; gdImagePtr dst; /* impact perf a bit, but not that much. Implementation for palette images can be done at a later point. */ if (src->trueColor == 0) { gdImagePaletteToTrueColor(src); } dst = gdImageCreateTrueColor(new_width, new_height); if (dst == NULL) { return NULL; } dst->saveAlphaFlag = 1; for (i = 0; i < new_height; i++) { unsigned int j; dst_offset_x = 0; for (j=0; j < new_width; j++) { const gdFixed f_i = gd_itofx((int)i - (int)new_height / 2); const gdFixed f_j = gd_itofx((int)j - (int)new_width / 2); const gdFixed f_m = gd_mulfx(f_j,f_sin) + gd_mulfx(f_i,f_cos) + f_0_5 + f_H; const gdFixed f_n = gd_mulfx(f_j,f_cos) - gd_mulfx(f_i,f_sin) + f_0_5 + f_W; const unsigned int m = gd_fxtoi(f_m); const unsigned int n = gd_fxtoi(f_n); if ((m > 0) && (m < src_h - 1) && (n > 0) && (n < src_w - 1)) { const gdFixed f_f = f_m - gd_itofx(m); const gdFixed f_g = f_n - gd_itofx(n); const gdFixed f_w1 = gd_mulfx(f_1-f_f, f_1-f_g); const gdFixed f_w2 = gd_mulfx(f_1-f_f, f_g); const gdFixed f_w3 = gd_mulfx(f_f, f_1-f_g); const gdFixed f_w4 = gd_mulfx(f_f, f_g); if (n < src_w - 1) { src_offset_x = n + 1; src_offset_y = m; } if (m < src_h - 1) { src_offset_x = n; src_offset_y = m + 1; } if (!((n >= src_w - 1) || (m >= src_h - 1))) { src_offset_x = n + 1; src_offset_y = m + 1; } { const int pixel1 = src->tpixels[src_offset_y][src_offset_x]; register int pixel2, pixel3, pixel4; if (src_offset_y + 1 >= src_h) { pixel2 = bgColor; pixel3 = bgColor; pixel4 = bgColor; } else if (src_offset_x + 1 >= src_w) { pixel2 = bgColor; pixel3 = bgColor; pixel4 = bgColor; } else { pixel2 = src->tpixels[src_offset_y][src_offset_x + 1]; pixel3 = src->tpixels[src_offset_y + 1][src_offset_x]; pixel4 = src->tpixels[src_offset_y + 1][src_offset_x + 1]; } { const gdFixed f_r1 = gd_itofx(gdTrueColorGetRed(pixel1)); const gdFixed f_r2 = gd_itofx(gdTrueColorGetRed(pixel2)); const gdFixed f_r3 = gd_itofx(gdTrueColorGetRed(pixel3)); const gdFixed f_r4 = gd_itofx(gdTrueColorGetRed(pixel4)); const gdFixed f_g1 = gd_itofx(gdTrueColorGetGreen(pixel1)); const gdFixed f_g2 = gd_itofx(gdTrueColorGetGreen(pixel2)); const gdFixed f_g3 = gd_itofx(gdTrueColorGetGreen(pixel3)); const gdFixed f_g4 = gd_itofx(gdTrueColorGetGreen(pixel4)); const gdFixed f_b1 = gd_itofx(gdTrueColorGetBlue(pixel1)); const gdFixed f_b2 = gd_itofx(gdTrueColorGetBlue(pixel2)); const gdFixed f_b3 = gd_itofx(gdTrueColorGetBlue(pixel3)); const gdFixed f_b4 = gd_itofx(gdTrueColorGetBlue(pixel4)); const gdFixed f_a1 = gd_itofx(gdTrueColorGetAlpha(pixel1)); const gdFixed f_a2 = gd_itofx(gdTrueColorGetAlpha(pixel2)); const gdFixed f_a3 = gd_itofx(gdTrueColorGetAlpha(pixel3)); const gdFixed f_a4 = gd_itofx(gdTrueColorGetAlpha(pixel4)); const gdFixed f_red = gd_mulfx(f_w1, f_r1) + gd_mulfx(f_w2, f_r2) + gd_mulfx(f_w3, f_r3) + gd_mulfx(f_w4, f_r4); const gdFixed f_green = gd_mulfx(f_w1, f_g1) + gd_mulfx(f_w2, f_g2) + gd_mulfx(f_w3, f_g3) + gd_mulfx(f_w4, f_g4); const gdFixed f_blue = gd_mulfx(f_w1, f_b1) + gd_mulfx(f_w2, f_b2) + gd_mulfx(f_w3, f_b3) + gd_mulfx(f_w4, f_b4); const gdFixed f_alpha = gd_mulfx(f_w1, f_a1) + gd_mulfx(f_w2, f_a2) + gd_mulfx(f_w3, f_a3) + gd_mulfx(f_w4, f_a4); const unsigned char red = (unsigned char) CLAMP(gd_fxtoi(f_red), 0, 255); const unsigned char green = (unsigned char) CLAMP(gd_fxtoi(f_green), 0, 255); const unsigned char blue = (unsigned char) CLAMP(gd_fxtoi(f_blue), 0, 255); const unsigned char alpha = (unsigned char) CLAMP(gd_fxtoi(f_alpha), 0, 127); dst->tpixels[dst_offset_y][dst_offset_x++] = gdTrueColorAlpha(red, green, blue, alpha); } } } else { dst->tpixels[dst_offset_y][dst_offset_x++] = bgColor; } } dst_offset_y++; } return dst; }
Safe
[ "CWE-125" ]
libgd
4f65a3e4eedaffa1efcf9ee1eb08f0b504fbc31a
5.136914713566533e+37
121
Fixed memory overrun bug in gdImageScaleTwoPass _gdContributionsCalc would compute a window size and then adjust the left and right positions of the window to make a window within that size. However, it was storing the values in the struct *before* it made the adjustment. This change fixes that.
0
void MirrorJob::HandleFile(FileInfo *file) { int res; struct stat st; // TODO: get rid of local hacks. const char *dst_name=file->name; if(FlagSet(TARGET_FLAT)) dst_name=basename_ptr(dst_name); // dir_name returns pointer to static data - need to dup it. const char *source_name=dir_file(source_dir,file->name); source_name=alloca_strdup(source_name); const char *target_name=dir_file(target_dir,dst_name); target_name=alloca_strdup(target_name); const char *source_name_rel=dir_file(source_relative_dir,file->name); source_name_rel=alloca_strdup(source_name_rel); const char *target_name_rel=dir_file(target_relative_dir,dst_name); target_name_rel=alloca_strdup(target_name_rel); FileInfo::type filetype=FileInfo::NORMAL; if(file->Has(file->TYPE)) filetype=file->filetype; else { FileInfo *target=target_set->FindByName(file->name); if(target && target->Has(target->TYPE)) filetype=target->filetype; } switch(filetype) { case(FileInfo::NORMAL): case(FileInfo::REDIRECT): { bool remove_target=false; bool cont_this=false; bool use_pget=(pget_n>1) && target_is_local; if(file->Has(file->SIZE) && file->size<pget_minchunk*2) use_pget=false; if(target_is_local) { if(lstat(target_name,&st)!=-1) { // few safety checks. FileInfo *old=new_files_set->FindByName(file->name); if(old) goto skip; // file has appeared after mirror start old=old_files_set->FindByName(file->name); if(old && ((old->Has(old->SIZE) && old->size!=st.st_size) ||(old->Has(old->DATE) && old->date!=st.st_mtime))) goto skip; // the file has changed after mirror start if(!script_only && access(target_name,W_OK)==-1) { // try to enable write access. chmod(target_name,st.st_mode|0200); } } } FileInfo *old=target_set->FindByName(FileCopy::TempFileName(file->name)); if(old) { if(FlagSet(CONTINUE) && old->Has(file->TYPE) && old->filetype==old->NORMAL && (FlagSet(IGNORE_TIME) || (file->Has(file->DATE) && old->Has(old->DATE) && file->date + file->date.ts_prec < old->date - old->date.ts_prec)) && file->Has(file->SIZE) && old->Has(old->SIZE) && file->size >= old->size) { cont_this=true; stats.mod_files++; } else if(!to_rm_mismatched->FindByName(file->name)) { if(!FlagSet(OVERWRITE)) { remove_target=true; Report(_("Removing old file `%s'"),target_name_rel); } else { Report(_("Overwriting old file `%s'"),target_name_rel); } stats.mod_files++; } else stats.new_files++; } else if(FlagSet(ONLY_EXISTING)) { Report(_("Skipping file `%s' (only-existing)"),source_name_rel); goto skip; } else stats.new_files++; Report(_("Transferring file `%s'"),source_name_rel); if(script) { ArgV args(use_pget?"pget":"get"); if(use_pget) { args.Append("-n"); args.Append(pget_n); } if(cont_this) args.Append("-c"); if(remove_target) args.Append("-e"); if(FlagSet(ASCII)) args.Append("-a"); if(remove_source_files) args.Append("-E"); args.Append("-O"); args.Append(target_is_local?target_dir.get() :target_session->GetConnectURL().get()); args.Append(source_session->GetFileURL(file->name)); xstring_ca cmd(args.CombineQuoted()); fprintf(script,"%s\n",cmd.get()); if(script_only) goto skip; } FileCopyPeer *src_peer=0; if(source_is_local) src_peer=new FileCopyPeerFDStream(new FileStream(source_name,O_RDONLY),FileCopyPeer::GET); else src_peer=new FileCopyPeerFA(source_session->Clone(),file->name,FA::RETRIEVE); FileCopyPeer *dst_peer=0; if(target_is_local) dst_peer=new FileCopyPeerFDStream(new FileStream(target_name,O_WRONLY|O_CREAT|(cont_this?0:O_TRUNC)),FileCopyPeer::PUT); else dst_peer=new FileCopyPeerFA(target_session->Clone(),dst_name,FA::STORE); FileCopy *c=FileCopy::New(src_peer,dst_peer,cont_this); if(remove_source_files) c->RemoveSourceLater(); if(remove_target) c->RemoveTargetFirst(); if(FlagSet(ASCII)) c->Ascii(); CopyJob *cp=(use_pget ? new pgetJob(c,file->name,pget_n) : new CopyJob(c,file->name,"mirror")); if(file->Has(file->DATE)) cp->SetDate(file->date); if(file->Has(file->SIZE) && !FlagSet(IGNORE_SIZE)) cp->SetSize(file->size); TransferStarted(cp); cp->cmdline.vset("\\transfer `",source_name_rel,"'",NULL); set_state(WAITING_FOR_TRANSFER); break; } case(FileInfo::DIRECTORY): { if(recursion_mode==RECURSION_NEVER || FlagSet(NO_RECURSION)) goto skip; bool create_target_subdir=true; const FileInfo *old=0; if(FlagSet(TARGET_FLAT)) { create_target_subdir=false; target_name=target_dir; goto do_submirror; } if(target_set) old=target_set->FindByName(file->name); if(!old) { if(FlagSet(ONLY_EXISTING)) { Report(_("Skipping directory `%s' (only-existing)"),target_name_rel); goto skip; } } else if(old->TypeIs(old->DIRECTORY)) { create_target_subdir=false; } if(target_is_local && !script_only) { if((FlagSet(RETR_SYMLINKS)?stat:lstat)(target_name,&st)!=-1) { if(S_ISDIR(st.st_mode)) { // try to enable write access // only if not enabled as chmod can clear sgid flags on directories if(st.st_mode!=(st.st_mode|0700)) chmod(target_name,st.st_mode|0700); create_target_subdir=false; } else { Report(_("Removing old local file `%s'"),target_name_rel); if(remove(target_name)==-1) { eprintf("mirror: remove(%s): %s\n",target_name,strerror(errno)); goto skip; } create_target_subdir=true; } } } do_submirror: // launch sub-mirror MirrorJob *mj=new MirrorJob(this, source_session->Clone(),target_session->Clone(), source_name,target_name); AddWaiting(mj); mj->cmdline.vset("\\mirror `",source_name_rel,"'",NULL); mj->source_relative_dir.set(source_name_rel); mj->target_relative_dir.set(target_name_rel); mj->create_target_dir=create_target_subdir; if(verbose_report>=3) { if(FlagSet(SCAN_ALL_FIRST)) Report(_("Scanning directory `%s'"),mj->target_relative_dir.get()); else Report(_("Mirroring directory `%s'"),mj->target_relative_dir.get()); } break; } case(FileInfo::SYMLINK): { if(FlagSet(NO_SYMLINKS)) goto skip; if(!file->symlink) goto skip; if(!target_is_local) { if(script) { ArgV args("ln"); args.Append("-s"); args.Append(file->symlink); args.Append(target_name); xstring_ca cmd(args.CombineQuoted()); fprintf(script,"%s\n",cmd.get()); if(script_only) goto skip; } bool remove_target=false; FileInfo *old=target_set->FindByName(file->name); if(old && !to_rm_mismatched->FindByName(file->name)) { Report(_("Removing old file `%s'"),target_name_rel); remove_target=true; stats.mod_symlinks++; } else stats.new_symlinks++; Report(_("Making symbolic link `%s' to `%s'"),target_name_rel,file->symlink.get()); mvJob *j=new mvJob(target_session->Clone(),file->symlink,target_name,FA::SYMLINK); if(remove_target) j->RemoveTargetFirst(); JobStarted(j); RemoveSourceLater(file); break; } if(script) { ArgV args("shell"); args.Append("ln"); args.Append("-sf"); args.Append(shell_encode(file->symlink)); args.Append(shell_encode(target_name)); xstring_ca cmd(args.CombineQuoted()); fprintf(script,"%s\n",cmd.get()); if(script_only) goto skip; } struct stat st; if(lstat(target_name,&st)!=-1) { Report(_("Removing old local file `%s'"),target_name_rel); stats.mod_symlinks++; if(remove(target_name)==-1) { eprintf("mirror: remove(%s): %s\n",target_name,strerror(errno)); goto skip; } } else { if(FlagSet(ONLY_EXISTING)) { Report(_("Skipping symlink `%s' (only-existing)"),target_name_rel); goto skip; } stats.new_symlinks++; } Report(_("Making symbolic link `%s' to `%s'"),target_name_rel,file->symlink.get()); res=symlink(file->symlink,target_name); if(res==-1) eprintf("mirror: symlink(%s): %s\n",target_name,strerror(errno)); RemoveSourceLater(file); break; } case FileInfo::UNKNOWN: break; } skip: return; }
Safe
[ "CWE-20", "CWE-401" ]
lftp
a27e07d90a4608ceaf928b1babb27d4d803e1992
2.0073047103009737e+38
315
mirror: prepend ./ to rm and chmod arguments to avoid URL recognition (fix #452)
0
boolean jpegqs_start_decompress(j_decompress_ptr cinfo, jpegqs_control_t *opts) { boolean ret; int use_jpeqqs = opts->niter > 0 || opts->flags & JPEGQS_UPSAMPLE_UV; if (use_jpeqqs) cinfo->buffered_image = TRUE; ret = jpeg_start_decompress(cinfo); if (use_jpeqqs) { while (!jpeg_input_complete(cinfo)) { jpeg_start_output(cinfo, cinfo->input_scan_number); jpeg_finish_output(cinfo); } do_quantsmooth(cinfo, jpeg_read_coefficients(cinfo), opts); jpeg_start_output(cinfo, cinfo->input_scan_number); } return ret; }
Safe
[]
jpeg-quantsmooth
3ab3838e610d361b71d937738edf156505c59c58
2.674003847338116e+38
14
avoid divide-by-zero on damaged JPEG files Also fixed misleading indentation warnings from GCC.
0
static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); int ret; if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) return -EINVAL; ret = macvtap_do_read(q, iocb, m->msg_iov, total_len, flags & MSG_DONTWAIT); if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; } return ret; }
Safe
[ "CWE-119", "CWE-787" ]
linux
b92946e2919134ebe2a4083e4302236295ea2a73
1.9861488836345087e+38
16
macvtap: zerocopy: validate vectors before building skb There're several reasons that the vectors need to be validated: - Return error when caller provides vectors whose num is greater than UIO_MAXIOV. - Linearize part of skb when userspace provides vectors grater than MAX_SKB_FRAGS. - Return error when userspace provides vectors whose total length may exceed - MAX_SKB_FRAGS * PAGE_SIZE. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
0
PHP_FUNCTION(fnmatch) { char *pattern, *filename; int pattern_len, filename_len; long flags = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ss|l", &pattern, &pattern_len, &filename, &filename_len, &flags) == FAILURE) { return; } if (filename_len >= MAXPATHLEN) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Filename exceeds the maximum allowed length of %d characters", MAXPATHLEN); RETURN_FALSE; } if (pattern_len >= MAXPATHLEN) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Pattern exceeds the maximum allowed length of %d characters", MAXPATHLEN); RETURN_FALSE; } RETURN_BOOL( ! fnmatch( pattern, filename, flags )); }
Vulnerable
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
2.726210860946441e+38
21
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
1
static void vnc_dpy_copy(DisplayChangeListener *dcl, int src_x, int src_y, int dst_x, int dst_y, int w, int h) { VncDisplay *vd = container_of(dcl, VncDisplay, dcl); VncState *vs, *vn; uint8_t *src_row; uint8_t *dst_row; int i, x, y, pitch, inc, w_lim, s; int cmp_bytes; if (!vd->server) { /* no client connected */ return; } vnc_refresh_server_surface(vd); QTAILQ_FOREACH_SAFE(vs, &vd->clients, next, vn) { if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) { vs->force_update = 1; vnc_update_client(vs, 1, true); /* vs might be free()ed here */ } } /* do bitblit op on the local surface too */ pitch = vnc_server_fb_stride(vd); src_row = vnc_server_fb_ptr(vd, src_x, src_y); dst_row = vnc_server_fb_ptr(vd, dst_x, dst_y); y = dst_y; inc = 1; if (dst_y > src_y) { /* copy backwards */ src_row += pitch * (h-1); dst_row += pitch * (h-1); pitch = -pitch; y = dst_y + h - 1; inc = -1; } w_lim = w - (VNC_DIRTY_PIXELS_PER_BIT - (dst_x % VNC_DIRTY_PIXELS_PER_BIT)); if (w_lim < 0) { w_lim = w; } else { w_lim = w - (w_lim % VNC_DIRTY_PIXELS_PER_BIT); } for (i = 0; i < h; i++) { for (x = 0; x <= w_lim; x += s, src_row += cmp_bytes, dst_row += cmp_bytes) { if (x == w_lim) { if ((s = w - w_lim) == 0) break; } else if (!x) { s = (VNC_DIRTY_PIXELS_PER_BIT - (dst_x % VNC_DIRTY_PIXELS_PER_BIT)); s = MIN(s, w_lim); } else { s = VNC_DIRTY_PIXELS_PER_BIT; } cmp_bytes = s * VNC_SERVER_FB_BYTES; if (memcmp(src_row, dst_row, cmp_bytes) == 0) continue; memmove(dst_row, src_row, cmp_bytes); QTAILQ_FOREACH(vs, &vd->clients, next) { if (!vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) { set_bit(((x + dst_x) / VNC_DIRTY_PIXELS_PER_BIT), vs->dirty[y]); } } } src_row += pitch - w * VNC_SERVER_FB_BYTES; dst_row += pitch - w * VNC_SERVER_FB_BYTES; y += inc; } QTAILQ_FOREACH(vs, &vd->clients, next) { if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) { vnc_copy(vs, src_x, src_y, dst_x, dst_y, w, h); } } }
Safe
[]
qemu
4c65fed8bdf96780735dbdb92a8bd0d6b6526cc3
3.2627460264997856e+38
80
ui: vnc: avoid floating point exception While sending 'SetPixelFormat' messages to a VNC server, the client could set the 'red-max', 'green-max' and 'blue-max' values to be zero. This leads to a floating point exception in write_png_palette while doing frame buffer updates. Reported-by: Lian Yihan <lianyihan@360.cn> Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org> Reviewed-by: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
0
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) { u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); int ret = 0; if (interruptibility & GUEST_INTR_STATE_STI) ret |= KVM_X86_SHADOW_INT_STI; if (interruptibility & GUEST_INTR_STATE_MOV_SS) ret |= KVM_X86_SHADOW_INT_MOV_SS; return ret; }
Safe
[]
kvm
a642fc305053cc1c6e47e4f4df327895747ab485
1.0189316169443413e+38
12
kvm: vmx: handle invvpid vm exit gracefully On systems with invvpid instruction support (corresponding bit in IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid causes vm exit, which is currently not handled and results in propagation of unknown exit to userspace. Fix this by installing an invvpid vm exit handler. This is CVE-2014-3646. Cc: stable@vger.kernel.org Signed-off-by: Petr Matousek <pmatouse@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
dissect_kafka_create_topics_request_create_topic_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset, kafka_api_version_t api_version) { proto_item *subti, *subsubti; proto_tree *subtree, *subsubtree; int topic_start, topic_len; subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Create Topic Request"); /* topic */ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, api_version >= 5, &topic_start, &topic_len); /* num_partitions */ proto_tree_add_item(subtree, hf_kafka_num_partitions, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; /* replication_factor */ proto_tree_add_item(subtree, hf_kafka_replication_factor, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; /* [replica_assignment] */ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_replica_assignment, &subsubti, "Replica Assignments"); offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version >= 5, api_version, &dissect_kafka_create_topics_request_replica_assignment, NULL); proto_item_set_end(subsubti, tvb, offset); /* [config] */ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_config, &subsubti, "Configs"); offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version >= 5, api_version, &dissect_kafka_create_topics_request_config, NULL); proto_item_set_end(subsubti, tvb, offset); if (api_version >= 5) { offset = dissect_kafka_tagged_fields(tvb, pinfo, tree, offset, 0); } proto_item_set_end(subti, tvb, offset); proto_item_append_text(subti, " (Topic=%s)", tvb_get_string_enc(wmem_packet_scope(), tvb, topic_start, topic_len, ENC_UTF_8)); return offset; }
Safe
[ "CWE-401" ]
wireshark
f4374967bbf9c12746b8ec3cd54dddada9dd353e
5.51676133871097e+37
49
Kafka: Limit our decompression size. Don't assume that the Internet has our best interests at heart when it gives us the size of our decompression buffer. Assign an arbitrary limit of 50 MB. This fixes #16739 in that it takes care of ** (process:17681): WARNING **: 20:03:07.440: Dissector bug, protocol Kafka, in packet 31: ../epan/proto.c:7043: failed assertion "end >= fi->start" which is different from the original error output. It looks like *that* might have taken care of in one of the other recent Kafka bug fixes. The decompression routines return a success or failure status. Use gbooleans instead of ints for that.
0
void replace_mount_options(struct super_block *sb, char *options) { char *old = sb->s_options; rcu_assign_pointer(sb->s_options, options); if (old) { synchronize_rcu(); kfree(old); } }
Safe
[ "CWE-269" ]
user-namespace
a6138db815df5ee542d848318e5dae681590fccd
1.2581936143822086e+37
9
mnt: Only change user settable mount flags in remount Kenton Varda <kenton@sandstorm.io> discovered that by remounting a read-only bind mount read-only in a user namespace the MNT_LOCK_READONLY bit would be cleared, allowing an unprivileged user to the remount a read-only mount read-write. Correct this by replacing the mask of mount flags to preserve with a mask of mount flags that may be changed, and preserve all others. This ensures that any future bugs with this mask and remount will fail in an easy to detect way where new mount flags simply won't change. Cc: stable@vger.kernel.org Acked-by: Serge E. Hallyn <serge.hallyn@ubuntu.com> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
0
int udf_expand_file_adinicb(struct inode *inode) { struct page *page; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); int err; WARN_ON_ONCE(!inode_is_locked(inode)); if (!iinfo->i_lenAlloc) { if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; up_write(&iinfo->i_data_sem); mark_inode_dirty(inode); return 0; } /* * Release i_data_sem so that we can lock a page - page lock ranks * above i_data_sem. i_mutex still protects us against file changes. */ up_write(&iinfo->i_data_sem); page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); if (!page) return -ENOMEM; if (!PageUptodate(page)) { kaddr = kmap_atomic(page); memset(kaddr + iinfo->i_lenAlloc, 0x00, PAGE_SIZE - iinfo->i_lenAlloc); memcpy(kaddr, iinfo->i_data + iinfo->i_lenEAttr, iinfo->i_lenAlloc); flush_dcache_page(page); SetPageUptodate(page); kunmap_atomic(kaddr); } down_write(&iinfo->i_data_sem); memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; set_page_dirty(page); unlock_page(page); up_write(&iinfo->i_data_sem); err = filemap_fdatawrite(inode->i_mapping); if (err) { /* Restore everything back so that we don't lose data... */ lock_page(page); down_write(&iinfo->i_data_sem); kaddr = kmap_atomic(page); memcpy(iinfo->i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); kunmap_atomic(kaddr); unlock_page(page); iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; inode->i_data.a_ops = &udf_adinicb_aops; up_write(&iinfo->i_data_sem); } put_page(page); mark_inode_dirty(inode); return err; }
Vulnerable
[ "CWE-476" ]
linux
ea8569194b43f0f01f0a84c689388542c7254a1f
2.703151452776371e+38
70
udf: Restore i_lenAlloc when inode expansion fails When we fail to expand inode from inline format to a normal format, we restore inode to contain the original inline formatting but we forgot to set i_lenAlloc back. The mismatch between i_lenAlloc and i_size was then causing further problems such as warnings and lost data down the line. Reported-by: butt3rflyh4ck <butterflyhuangxx@gmail.com> CC: stable@vger.kernel.org Fixes: 7e49b6f2480c ("udf: Convert UDF to new truncate calling sequence") Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jan Kara <jack@suse.cz>
1
iakerb_gss_set_sec_context_option(OM_uint32 *minor_status, gss_ctx_id_t *context_handle, const gss_OID desired_object, const gss_buffer_t value) { iakerb_ctx_id_t ctx = (iakerb_ctx_id_t)*context_handle; if (ctx == NULL || ctx->gssc == GSS_C_NO_CONTEXT) return GSS_S_UNAVAILABLE; return krb5_gss_set_sec_context_option(minor_status, &ctx->gssc, desired_object, value); }
Safe
[ "CWE-18" ]
krb5
e04f0283516e80d2f93366e0d479d13c9b5c8c2a
2.3258265431106426e+38
13
Fix IAKERB context aliasing bugs [CVE-2015-2696] The IAKERB mechanism currently replaces its context handle with the krb5 mechanism handle upon establishment, under the assumption that most GSS functions are only called after context establishment. This assumption is incorrect, and can lead to aliasing violations for some programs. Maintain the IAKERB context structure after context establishment and add new IAKERB entry points to refer to it with that type. Add initiate and established flags to the IAKERB context structure for use in gss_inquire_context() prior to context establishment. CVE-2015-2696: In MIT krb5 1.9 and later, applications which call gss_inquire_context() on a partially-established IAKERB context can cause the GSS-API library to read from a pointer using the wrong type, generally causing a process crash. Java server applications using the native JGSS provider are vulnerable to this bug. A carefully crafted IAKERB packet might allow the gss_inquire_context() call to succeed with attacker-determined results, but applications should not make access control decisions based on gss_inquire_context() results prior to context establishment. CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C [ghudson@mit.edu: several bugfixes, style changes, and edge-case behavior changes; commit message and CVE description] ticket: 8244 target_version: 1.14 tags: pullup
0
void BN_with_flags(BIGNUM *dest, const BIGNUM *b, int flags) { dest->d = b->d; dest->top = b->top; dest->dmax = b->dmax; dest->neg = b->neg; dest->flags = ((dest->flags & BN_FLG_MALLOCED) | (b->flags & ~BN_FLG_MALLOCED) | BN_FLG_STATIC_DATA | flags); }
Safe
[ "CWE-310" ]
openssl
aab7c770353b1dc4ba045938c8fb446dd1c4531e
1.1634317094595527e+37
10
Elliptic curve scalar multiplication with timing attack defenses Co-authored-by: Nicola Tuveri <nic.tuv@gmail.com> Co-authored-by: Cesar Pereida Garcia <cesar.pereidagarcia@tut.fi> Co-authored-by: Sohaib ul Hassan <soh.19.hassan@gmail.com> Reviewed-by: Andy Polyakov <appro@openssl.org> Reviewed-by: Matt Caswell <matt@openssl.org> (Merged from https://github.com/openssl/openssl/pull/6009) (cherry picked from commit 40e48e54582e46c1a01e184ecf5bd31f4f7f8294)
0
othercase(charset,ch) CHARSET_INFO *charset; int ch; { /* In MySQL some multi-byte character sets have 'ctype' array but don't have 'to_lower' and 'to_upper' arrays. In this case we handle only basic latin letters a..z and A..Z. If 'to_lower' and 'to_upper' arrays are empty in a character set, then my_isalpha(cs, ch) should never return TRUE for characters other than basic latin letters. Otherwise it should be considered as a mistake in character set definition. */ assert(my_isalpha(charset,ch)); if (my_isupper(charset,ch)) { return(charset->to_lower ? my_tolower(charset,ch) : ch - 'A' + 'a'); } else if (my_islower(charset,ch)) { return(charset->to_upper ? my_toupper(charset,ch) : ch - 'a' + 'A'); } else /* peculiar, but could happen */ return(ch); }
Safe
[ "CWE-190" ]
mysql-server
dc45e408250c582eb532417a42cef5b5a8e2fe77
3.326226891717427e+38
29
Bug#20642505: HENRY SPENCER REGULAR EXPRESSIONS (REGEX) LIBRARY The MySQL server uses Henry Spencer's library for regular expressions to support the REGEXP/RLIKE string operator. This changeset adapts a recent fix from the upstream for better 32-bit compatiblity. (Note that we cannot simply use the current upstream version as a drop-in replacement for the version used by the server as the latter has been extended to understand MySQL charsets etc.)
0
acl_fetch_stcode(struct proxy *px, struct session *l4, void *l7, unsigned int opt, const struct arg *args, struct sample *smp) { struct http_txn *txn = l7; char *ptr; int len; CHECK_HTTP_MESSAGE_FIRST(); if (txn->rsp.msg_state < HTTP_MSG_BODY) return 0; len = txn->rsp.sl.st.c_l; ptr = txn->rsp.chn->buf->p + txn->rsp.sl.st.c; smp->type = SMP_T_UINT; smp->data.uint = __strl2ui(ptr, len); smp->flags = SMP_F_VOL_1ST; return 1; }
Safe
[]
haproxy
aae75e3279c6c9bd136413a72dafdcd4986bb89a
2.4194190738698037e+38
20
BUG/CRITICAL: using HTTP information in tcp-request content may crash the process During normal HTTP request processing, request buffers are realigned if there are less than global.maxrewrite bytes available after them, in order to leave enough room for rewriting headers after the request. This is done in http_wait_for_request(). However, if some HTTP inspection happens during a "tcp-request content" rule, this realignment is not performed. In theory this is not a problem because empty buffers are always aligned and TCP inspection happens at the beginning of a connection. But with HTTP keep-alive, it also happens at the beginning of each subsequent request. So if a second request was pipelined by the client before the first one had a chance to be forwarded, the second request will not be realigned. Then, http_wait_for_request() will not perform such a realignment either because the request was already parsed and marked as such. The consequence of this, is that the rewrite of a sufficient number of such pipelined, unaligned requests may leave less room past the request been processed than the configured reserve, which can lead to a buffer overflow if request processing appends some data past the end of the buffer. A number of conditions are required for the bug to be triggered : - HTTP keep-alive must be enabled ; - HTTP inspection in TCP rules must be used ; - some request appending rules are needed (reqadd, x-forwarded-for) - since empty buffers are always realigned, the client must pipeline enough requests so that the buffer always contains something till the point where there is no more room for rewriting. While such a configuration is quite unlikely to be met (which is confirmed by the bug's lifetime), a few people do use these features together for very specific usages. And more importantly, writing such a configuration and the request to attack it is trivial. A quick workaround consists in forcing keep-alive off by adding "option httpclose" or "option forceclose" in the frontend. Alternatively, disabling HTTP-based TCP inspection rules enough if the application supports it. At first glance, this bug does not look like it could lead to remote code execution, as the overflowing part is controlled by the configuration and not by the user. But some deeper analysis should be performed to confirm this. And anyway, corrupting the process' memory and crashing it is quite trivial. Special thanks go to Yves Lafon from the W3C who reported this bug and deployed significant efforts to collect the relevant data needed to understand it in less than one week. CVE-2013-1912 was assigned to this issue. Note that 1.4 is also affected so the fix must be backported.
0
bool asn1_write_implicit_Integer(struct asn1_data *data, int i) { if (i == -1) { /* -1 is special as it consists of all-0xff bytes. In push_int_bigendian this is the only case that is not properly handled, as all 0xff bytes would be handled as leading ones to be ignored. */ return asn1_write_uint8(data, 0xff); } else { return push_int_bigendian(data, i, i<0); } }
Safe
[ "CWE-399" ]
samba
9d989c9dd7a5b92d0c5d65287935471b83b6e884
5.76746051386027e+35
12
CVE-2015-7540: lib: util: Check *every* asn1 return call and early return. BUG: https://bugzilla.samba.org/show_bug.cgi?id=9187 Signed-off-by: Jeremy Allison <jra@samba.org> Reviewed-by: Volker Lendecke <Volker.Lendecke@SerNet.DE> Autobuild-User(master): Jeremy Allison <jra@samba.org> Autobuild-Date(master): Fri Sep 19 01:29:00 CEST 2014 on sn-devel-104 (cherry picked from commit b9d3fd4cc551df78a7b066ee8ce43bbaa3ff994a)
0
static int bpf_fill_super(struct super_block *sb, void *data, int silent) { static struct tree_descr bpf_rfiles[] = { { "" } }; struct inode *inode; int ret; ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles); if (ret) return ret; sb->s_op = &bpf_super_ops; inode = sb->s_root->d_inode; inode->i_op = &bpf_dir_iops; inode->i_mode &= ~S_IALLUGO; inode->i_mode |= S_ISVTX | S_IRWXUGO; return 0; }
Safe
[ "CWE-703" ]
linux
92117d8443bc5afacc8d5ba82e541946310f106e
1.376585465704047e+38
19
bpf: fix refcnt overflow On a system with >32Gbyte of phyiscal memory and infinite RLIMIT_MEMLOCK, the malicious application may overflow 32-bit bpf program refcnt. It's also possible to overflow map refcnt on 1Tb system. Impose 32k hard limit which means that the same bpf program or map cannot be shared by more than 32k processes. Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs") Reported-by: Jann Horn <jannh@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static my_bool process_set_gtid_purged(MYSQL* mysql_con) { MYSQL_RES *gtid_mode_res; MYSQL_ROW gtid_mode_row; char *gtid_mode_val= 0; char buf[32], query[64]; if (opt_set_gtid_purged_mode == SET_GTID_PURGED_OFF) return FALSE; /* nothing to be done */ /* Check if the server has the knowledge of GTIDs(pre mysql-5.6) or if the gtid_mode is ON or OFF. */ my_snprintf(query, sizeof(query), "SHOW VARIABLES LIKE %s", quote_for_like("gtid_mode", buf)); if (mysql_query_with_error_report(mysql_con, &gtid_mode_res, query)) return TRUE; gtid_mode_row = mysql_fetch_row(gtid_mode_res); /* gtid_mode_row is NULL for pre 5.6 versions. For versions >= 5.6, get the gtid_mode value from the second column. */ gtid_mode_val = gtid_mode_row ? (char*)gtid_mode_row[1] : NULL; if (gtid_mode_val && strcmp(gtid_mode_val, "OFF")) { /* For any gtid_mode !=OFF and irrespective of --set-gtid-purged being AUTO or ON, add GTID_PURGED in the output. */ if (opt_databases || !opt_alldbs || !opt_dump_triggers || !opt_routines || !opt_events) { fprintf(stderr,"Warning: A partial dump from a server that has GTIDs will " "by default include the GTIDs of all transactions, even " "those that changed suppressed parts of the database. If " "you don't want to restore GTIDs, pass " "--set-gtid-purged=OFF. To make a complete dump, pass " "--all-databases --triggers --routines --events. \n"); } set_session_binlog(FALSE); if (add_set_gtid_purged(mysql_con)) return TRUE; } else /* gtid_mode is off */ { if (opt_set_gtid_purged_mode == SET_GTID_PURGED_ON) { fprintf(stderr, "Error: Server has GTIDs disabled.\n"); return TRUE; } } return FALSE; }
Safe
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
1.5686640898499732e+38
60
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
0
static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); unsigned long load_avg = cpu_runnable_load(rq); if (nr_running) return load_avg / nr_running; return 0; }
Safe
[ "CWE-400", "CWE-703" ]
linux
de53fd7aedb100f03e5d2231cfce0e4993282425
2.631505708414591e+38
11
sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices It has been observed, that highly-threaded, non-cpu-bound applications running under cpu.cfs_quota_us constraints can hit a high percentage of periods throttled while simultaneously not consuming the allocated amount of quota. This use case is typical of user-interactive non-cpu bound applications, such as those running in kubernetes or mesos when run on multiple cpu cores. This has been root caused to cpu-local run queue being allocated per cpu bandwidth slices, and then not fully using that slice within the period. At which point the slice and quota expires. This expiration of unused slice results in applications not being able to utilize the quota for which they are allocated. The non-expiration of per-cpu slices was recently fixed by 'commit 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift condition")'. Prior to that it appears that this had been broken since at least 'commit 51f2176d74ac ("sched/fair: Fix unlocked reads of some cfs_b->quota/period")' which was introduced in v3.16-rc1 in 2014. That added the following conditional which resulted in slices never being expired. if (cfs_rq->runtime_expires != cfs_b->runtime_expires) { /* extend local deadline, drift is bounded above by 2 ticks */ cfs_rq->runtime_expires += TICK_NSEC; Because this was broken for nearly 5 years, and has recently been fixed and is now being noticed by many users running kubernetes (https://github.com/kubernetes/kubernetes/issues/67577) it is my opinion that the mechanisms around expiring runtime should be removed altogether. This allows quota already allocated to per-cpu run-queues to live longer than the period boundary. This allows threads on runqueues that do not use much CPU to continue to use their remaining slice over a longer period of time than cpu.cfs_period_us. However, this helps prevent the above condition of hitting throttling while also not fully utilizing your cpu quota. This theoretically allows a machine to use slightly more than its allotted quota in some periods. This overflow would be bounded by the remaining quota left on each per-cpu runqueueu. This is typically no more than min_cfs_rq_runtime=1ms per cpu. For CPU bound tasks this will change nothing, as they should theoretically fully utilize all of their quota in each period. For user-interactive tasks as described above this provides a much better user/application experience as their cpu utilization will more closely match the amount they requested when they hit throttling. This means that cpu limits no longer strictly apply per period for non-cpu bound applications, but that they are still accurate over longer timeframes. This greatly improves performance of high-thread-count, non-cpu bound applications with low cfs_quota_us allocation on high-core-count machines. In the case of an artificial testcase (10ms/100ms of quota on 80 CPU machine), this commit resulted in almost 30x performance improvement, while still maintaining correct cpu quota restrictions. That testcase is available at https://github.com/indeedeng/fibtest. Fixes: 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift condition") Signed-off-by: Dave Chiluk <chiluk+linux@indeed.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Phil Auld <pauld@redhat.com> Reviewed-by: Ben Segall <bsegall@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: John Hammond <jhammond@indeed.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Kyle Anderson <kwa@yelp.com> Cc: Gabriel Munos <gmunoz@netflix.com> Cc: Peter Oskolkov <posk@posk.io> Cc: Cong Wang <xiyou.wangcong@gmail.com> Cc: Brendan Gregg <bgregg@netflix.com> Link: https://lkml.kernel.org/r/1563900266-19734-2-git-send-email-chiluk+linux@indeed.com
0
tgs_make_reply(astgs_request_t r, krb5_const_principal tgt_name, const EncTicketPart *tgt, const krb5_keyblock *replykey, int rk_is_subkey, const EncryptionKey *serverkey, const krb5_keyblock *sessionkey, krb5_kvno kvno, AuthorizationData *auth_data, hdb_entry_ex *server, krb5_principal server_principal, hdb_entry_ex *client, krb5_principal client_principal, const char *tgt_realm, hdb_entry_ex *krbtgt, krb5_enctype krbtgt_etype, krb5_principals spp, const krb5_data *rspac, const METHOD_DATA *enc_pa_data) { krb5_context context = r->context; krb5_kdc_configuration *config = r->config; KDC_REQ_BODY *b = &r->req.req_body; const char **e_text = &r->e_text; krb5_data *reply = r->reply; KDC_REP rep; EncKDCRepPart ek; EncTicketPart et; KDCOptions f = b->kdc_options; krb5_error_code ret; int is_weak = 0; memset(&rep, 0, sizeof(rep)); memset(&et, 0, sizeof(et)); memset(&ek, 0, sizeof(ek)); rep.pvno = 5; rep.msg_type = krb_tgs_rep; et.authtime = tgt->authtime; _kdc_fix_time(&b->till); et.endtime = min(tgt->endtime, *b->till); ALLOC(et.starttime); *et.starttime = kdc_time; ret = check_tgs_flags(r, b, tgt_name, tgt, &et); if(ret) goto out; /* We should check the transited encoding if: 1) the request doesn't ask not to be checked 2) globally enforcing a check 3) principal requires checking 4) we allow non-check per-principal, but principal isn't marked as allowing this 5) we don't globally allow this */ #define GLOBAL_FORCE_TRANSITED_CHECK \ (config->trpolicy == TRPOLICY_ALWAYS_CHECK) #define GLOBAL_ALLOW_PER_PRINCIPAL \ (config->trpolicy == TRPOLICY_ALLOW_PER_PRINCIPAL) #define GLOBAL_ALLOW_DISABLE_TRANSITED_CHECK \ (config->trpolicy == TRPOLICY_ALWAYS_HONOUR_REQUEST) /* these will consult the database in future release */ #define PRINCIPAL_FORCE_TRANSITED_CHECK(P) 0 #define PRINCIPAL_ALLOW_DISABLE_TRANSITED_CHECK(P) 0 ret = fix_transited_encoding(context, config, !f.disable_transited_check || GLOBAL_FORCE_TRANSITED_CHECK || PRINCIPAL_FORCE_TRANSITED_CHECK(server) || !((GLOBAL_ALLOW_PER_PRINCIPAL && PRINCIPAL_ALLOW_DISABLE_TRANSITED_CHECK(server)) || GLOBAL_ALLOW_DISABLE_TRANSITED_CHECK), &tgt->transited, &et, krb5_principal_get_realm(context, client_principal), krb5_principal_get_realm(context, server->entry.principal), tgt_realm); if(ret) goto out; ret = copy_Realm(&server_principal->realm, &rep.ticket.realm); if (ret) goto out; _krb5_principal2principalname(&rep.ticket.sname, server_principal); ret = copy_Realm(&tgt_name->realm, &rep.crealm); if (ret) goto out; /* * RFC 8062 states "if the ticket in the TGS request is an anonymous * one, the client and client realm are copied from that ticket". So * whilst the TGT flag check below is superfluous, it is included in * order to follow the specification to its letter. */ if (et.flags.anonymous && !tgt->flags.anonymous) _kdc_make_anonymous_principalname(&rep.cname); else ret = copy_PrincipalName(&tgt_name->name, &rep.cname); if (ret) goto out; rep.ticket.tkt_vno = 5; ek.caddr = et.caddr; { time_t life; life = et.endtime - *et.starttime; if(client && client->entry.max_life) life = min(life, *client->entry.max_life); if(server->entry.max_life) life = min(life, *server->entry.max_life); et.endtime = *et.starttime + life; } if(f.renewable_ok && tgt->flags.renewable && et.renew_till == NULL && et.endtime < *b->till && tgt->renew_till != NULL) { et.flags.renewable = 1; ALLOC(et.renew_till); *et.renew_till = *b->till; } if(et.renew_till){ time_t renew; renew = *et.renew_till - *et.starttime; if(client && client->entry.max_renew) renew = min(renew, *client->entry.max_renew); if(server->entry.max_renew) renew = min(renew, *server->entry.max_renew); *et.renew_till = *et.starttime + renew; } if(et.renew_till){ *et.renew_till = min(*et.renew_till, *tgt->renew_till); *et.starttime = min(*et.starttime, *et.renew_till); et.endtime = min(et.endtime, *et.renew_till); } *et.starttime = min(*et.starttime, et.endtime); if(*et.starttime == et.endtime){ ret = KRB5KDC_ERR_NEVER_VALID; goto out; } if(et.renew_till && et.endtime == *et.renew_till){ free(et.renew_till); et.renew_till = NULL; et.flags.renewable = 0; } et.flags.pre_authent = tgt->flags.pre_authent; et.flags.hw_authent = tgt->flags.hw_authent; et.flags.ok_as_delegate = server->entry.flags.ok_as_delegate; /* See MS-KILE 3.3.5.1 */ if (!server->entry.flags.forwardable) et.flags.forwardable = 0; if (!server->entry.flags.proxiable) et.flags.proxiable = 0; /* * For anonymous tickets, we should filter out positive authorization data * that could reveal the client's identity, and return a policy error for * restrictive authorization data. Policy for unknown authorization types * is implementation dependent. */ if (rspac->length && !et.flags.anonymous) { /* * No not need to filter out the any PAC from the * auth_data since it's signed by the KDC. */ ret = _kdc_tkt_add_if_relevant_ad(context, &et, KRB5_AUTHDATA_WIN2K_PAC, rspac); if (ret) goto out; } if (auth_data) { unsigned int i = 0; /* XXX check authdata */ if (et.authorization_data == NULL) { et.authorization_data = calloc(1, sizeof(*et.authorization_data)); if (et.authorization_data == NULL) { ret = ENOMEM; krb5_set_error_message(context, ret, "malloc: out of memory"); goto out; } } for(i = 0; i < auth_data->len ; i++) { ret = add_AuthorizationData(et.authorization_data, &auth_data->val[i]); if (ret) { krb5_set_error_message(context, ret, "malloc: out of memory"); goto out; } } /* Filter out type KRB5SignedPath */ ret = find_KRB5SignedPath(context, et.authorization_data, NULL); if (ret == 0) { if (et.authorization_data->len == 1) { free_AuthorizationData(et.authorization_data); free(et.authorization_data); et.authorization_data = NULL; } else { AuthorizationData *ad = et.authorization_data; free_AuthorizationDataElement(&ad->val[ad->len - 1]); ad->len--; } } } ret = krb5_copy_keyblock_contents(context, sessionkey, &et.key); if (ret) goto out; et.crealm = rep.crealm; et.cname = rep.cname; ek.key = et.key; /* MIT must have at least one last_req */ ek.last_req.val = calloc(1, sizeof(*ek.last_req.val)); if (ek.last_req.val == NULL) { ret = ENOMEM; goto out; } ek.last_req.len = 1; /* set after alloc to avoid null deref on cleanup */ ek.nonce = b->nonce; ek.flags = et.flags; ek.authtime = et.authtime; ek.starttime = et.starttime; ek.endtime = et.endtime; ek.renew_till = et.renew_till; ek.srealm = rep.ticket.realm; ek.sname = rep.ticket.sname; _kdc_log_timestamp(r, "TGS-REQ", et.authtime, et.starttime, et.endtime, et.renew_till); /* Don't sign cross realm tickets, they can't be checked anyway */ { char *realm = get_krbtgt_realm(&ek.sname); if (realm == NULL || strcmp(realm, ek.srealm) == 0) { ret = _kdc_add_KRB5SignedPath(context, config, krbtgt, krbtgt_etype, client_principal, NULL, spp, &et); if (ret) goto out; } } if (enc_pa_data->len) { rep.padata = calloc(1, sizeof(*rep.padata)); if (rep.padata == NULL) { ret = ENOMEM; goto out; } ret = copy_METHOD_DATA(enc_pa_data, rep.padata); if (ret) goto out; } if (krb5_enctype_valid(context, serverkey->keytype) != 0 && _kdc_is_weak_exception(server->entry.principal, serverkey->keytype)) { krb5_enctype_enable(context, serverkey->keytype); is_weak = 1; } /* It is somewhat unclear where the etype in the following encryption should come from. What we have is a session key in the passed tgt, and a list of preferred etypes *for the new ticket*. Should we pick the best possible etype, given the keytype in the tgt, or should we look at the etype list here as well? What if the tgt session key is DES3 and we want a ticket with a (say) CAST session key. Should the DES3 etype be added to the etype list, even if we don't want a session key with DES3? */ ret = _kdc_encode_reply(context, config, NULL, 0, &rep, &et, &ek, serverkey->keytype, kvno, serverkey, 0, replykey, rk_is_subkey, e_text, reply); if (is_weak) krb5_enctype_disable(context, serverkey->keytype); r->reply_key.keytype = replykey->keytype; _log_astgs_req(r, serverkey->keytype); out: free_TGS_REP(&rep); free_TransitedEncoding(&et.transited); if(et.starttime) free(et.starttime); if(et.renew_till) free(et.renew_till); if(et.authorization_data) { free_AuthorizationData(et.authorization_data); free(et.authorization_data); } free_LastReq(&ek.last_req); memset(et.key.keyvalue.data, 0, et.key.keyvalue.length); free_EncryptionKey(&et.key); return ret; }
Safe
[ "CWE-476" ]
heimdal
04171147948d0a3636bc6374181926f0fb2ec83a
1.7682513330710294e+38
314
kdc: validate sname in TGS-REQ In tgs_build_reply(), validate the server name in the TGS-REQ is present before dereferencing.
0
mime_header_decoder_delete(struct mime_header_decoder_data *pd) { if (pd) { mbfl_convert_filter_delete(pd->conv2_filter); mbfl_convert_filter_delete(pd->conv1_filter); mbfl_convert_filter_delete(pd->deco_filter); mbfl_memory_device_clear(&pd->outdev); mbfl_memory_device_clear(&pd->tmpdev); mbfl_free((void*)pd); } }
Safe
[ "CWE-119" ]
php-src
64f42c73efc58e88671ad76b6b6bc8e2b62713e1
8.089915486435586e+36
11
Fixed bug #71906: AddressSanitizer: negative-size-param (-1) in mbfl_strcut
0
void Greeter::setTheme(const QString &theme) { m_theme = theme; }
Safe
[ "CWE-284", "CWE-264" ]
sddm
4cfed6b0a625593fb43876f04badc4dd99799d86
2.018164485656025e+38
3
Disable greeters from loading KDE's debug hander Some themes may use KDE components which will automatically load KDE's crash handler. If the greeter were to then somehow crash, that would leave a crash handler allowing other actions, albeit as the locked down SDDM user. Only SDDM users using the breeze theme from plasma-workspace are affected. Safest and simplest fix is to handle this inside SDDM disabling kcrash via an environment variable for all future themes that may use these libraries. CVE-2015-0856
0
u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *event) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; unsigned int nest; u64 ts; /* If the event includes an absolute time, then just use that */ if (event->type_len == RINGBUF_TYPE_TIME_STAMP) return rb_event_time_stamp(event); nest = local_read(&cpu_buffer->committing); verify_event(cpu_buffer, event); if (WARN_ON_ONCE(!nest)) goto fail; /* Read the current saved nesting level time stamp */ if (likely(--nest < MAX_NEST)) return cpu_buffer->event_stamp[nest]; /* Shouldn't happen, warn if it does */ WARN_ONCE(1, "nest (%d) greater than max", nest); fail: /* Can only fail on 32 bit */ if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) /* Screw it, just read the current time */ ts = rb_time_stamp(cpu_buffer->buffer); return ts; }
Safe
[ "CWE-835" ]
linux
67f0d6d9883c13174669f88adac4f0ee656cc16a
7.89694175438762e+37
31
tracing: Fix bug in rb_per_cpu_empty() that might cause deadloop. The "rb_per_cpu_empty()" misinterpret the condition (as not-empty) when "head_page" and "commit_page" of "struct ring_buffer_per_cpu" points to the same buffer page, whose "buffer_data_page" is empty and "read" field is non-zero. An error scenario could be constructed as followed (kernel perspective): 1. All pages in the buffer has been accessed by reader(s) so that all of them will have non-zero "read" field. 2. Read and clear all buffer pages so that "rb_num_of_entries()" will return 0 rendering there's no more data to read. It is also required that the "read_page", "commit_page" and "tail_page" points to the same page, while "head_page" is the next page of them. 3. Invoke "ring_buffer_lock_reserve()" with large enough "length" so that it shot pass the end of current tail buffer page. Now the "head_page", "commit_page" and "tail_page" points to the same page. 4. Discard current event with "ring_buffer_discard_commit()", so that "head_page", "commit_page" and "tail_page" points to a page whose buffer data page is now empty. When the error scenario has been constructed, "tracing_read_pipe" will be trapped inside a deadloop: "trace_empty()" returns 0 since "rb_per_cpu_empty()" returns 0 when it hits the CPU containing such constructed ring buffer. Then "trace_find_next_entry_inc()" always return NULL since "rb_num_of_entries()" reports there's no more entry to read. Finally "trace_seq_to_user()" returns "-EBUSY" spanking "tracing_read_pipe" back to the start of the "waitagain" loop. I've also written a proof-of-concept script to construct the scenario and trigger the bug automatically, you can use it to trace and validate my reasoning above: https://github.com/aegistudio/RingBufferDetonator.git Tests has been carried out on linux kernel 5.14-rc2 (2734d6c1b1a089fb593ef6a23d4b70903526fe0c), my fixed version of kernel (for testing whether my update fixes the bug) and some older kernels (for range of affected kernels). Test result is also attached to the proof-of-concept repository. Link: https://lore.kernel.org/linux-trace-devel/YPaNxsIlb2yjSi5Y@aegistudio/ Link: https://lore.kernel.org/linux-trace-devel/YPgrN85WL9VyrZ55@aegistudio Cc: stable@vger.kernel.org Fixes: bf41a158cacba ("ring-buffer: make reentrant") Suggested-by: Linus Torvalds <torvalds@linuxfoundation.org> Signed-off-by: Haoran Luo <www@aegistudio.net> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
0
conntrack_init(struct conntrack *ct) { unsigned i, j; long long now = time_msec(); ct_rwlock_init(&ct->resources_lock); ct_rwlock_wrlock(&ct->resources_lock); hmap_init(&ct->nat_conn_keys); hmap_init(&ct->alg_expectations); ovs_list_init(&ct->alg_exp_list); ct_rwlock_unlock(&ct->resources_lock); for (i = 0; i < CONNTRACK_BUCKETS; i++) { struct conntrack_bucket *ctb = &ct->buckets[i]; ct_lock_init(&ctb->lock); ct_lock_lock(&ctb->lock); hmap_init(&ctb->connections); for (j = 0; j < ARRAY_SIZE(ctb->exp_lists); j++) { ovs_list_init(&ctb->exp_lists[j]); } ct_lock_unlock(&ctb->lock); ovs_mutex_init(&ctb->cleanup_mutex); ovs_mutex_lock(&ctb->cleanup_mutex); ctb->next_cleanup = now + CT_TM_MIN; ovs_mutex_unlock(&ctb->cleanup_mutex); } ct->hash_basis = random_uint32(); atomic_count_init(&ct->n_conn, 0); atomic_init(&ct->n_conn_limit, DEFAULT_N_CONN_LIMIT); latch_init(&ct->clean_thread_exit); ct->clean_thread = ovs_thread_create("ct_clean", clean_thread_main, ct); }
Safe
[ "CWE-400" ]
ovs
35c280072c1c3ed58202745b7d27fbbd0736999b
1.344250730146375e+37
33
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <joakim.hindersson@elastx.se> Acked-by: Ilya Maximets <i.maximets@ovn.org> Signed-off-by: Flavio Leitner <fbl@sysclose.org> Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
0
static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) { b->p_mark = prob_minus(b->p_mark, q->decrement); }
Safe
[ "CWE-330" ]
linux
55667441c84fa5e0911a0aac44fb059c15ba6da2
1.3135892585602263e+38
4
net/flow_dissector: switch to siphash UDP IPv6 packets auto flowlabels are using a 32bit secret (static u32 hashrnd in net/core/flow_dissector.c) and apply jhash() over fields known by the receivers. Attackers can easily infer the 32bit secret and use this information to identify a device and/or user, since this 32bit secret is only set at boot time. Really, using jhash() to generate cookies sent on the wire is a serious security concern. Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be a dead end. Trying to periodically change the secret (like in sch_sfq.c) could change paths taken in the network for long lived flows. Let's switch to siphash, as we did in commit df453700e8d8 ("inet: switch IP ID generator to siphash") Using a cryptographically strong pseudo random function will solve this privacy issue and more generally remove other weak points in the stack. Packet schedulers using skb_get_hash_perturb() benefit from this change. Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default") Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels") Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel") Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Jonathan Berger <jonathann1@walla.com> Reported-by: Amit Klein <aksecurity@gmail.com> Reported-by: Benny Pinkas <benny@pinkas.net> Cc: Tom Herbert <tom@herbertland.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static int bcf_dec_typed_int1_safe(uint8_t *p, uint8_t *end, uint8_t **q, int32_t *val) { uint32_t t; if (end - p < 2) return -1; t = *p++ & 0xf; /* Use if .. else if ... else instead of switch to force order. Assumption is that small integers are more frequent than big ones. */ if (t == BCF_BT_INT8) { *q = p + 1; *val = *(int8_t *) p; } else if (t == BCF_BT_INT16) { if (end - p < 2) return -1; *q = p + 2; *val = le_to_i16(p); } else if (t == BCF_BT_INT32) { if (end - p < 4) return -1; *q = p + 4; *val = le_to_i32(p); #ifdef VCF_ALLOW_INT64 } else if (t == BCF_BT_INT64) { // This case should never happen because there should be no 64-bit BCFs // at all, definitely not coming from htslib if (end - p < 8) return -1; *q = p + 8; *val = le_to_i64(p); #endif } else { return -1; } return 0; }
Safe
[ "CWE-787" ]
htslib
dcd4b7304941a8832fba2d0fc4c1e716e7a4e72c
2.3473138694179356e+38
31
Fix check for VCF record size The check for excessive record size in vcf_parse_format() only looked at individual fields. It was therefore possible to exceed the limit and overflow fmt_aux_t::offset by having multiple fields with a combined size that went over INT_MAX. Fix by including the amount of memory used so far in the check. Credit to OSS-Fuzz Fixes oss-fuzz 24097
0
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev) { int i, r; struct kvm_io_bus *new_bus, *bus; bus = kvm->buses[bus_idx]; new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL); if (!new_bus) return -ENOMEM; r = -ENOENT; for (i = 0; i < new_bus->dev_count; i++) if (new_bus->range[i].dev == dev) { r = 0; new_bus->dev_count--; new_bus->range[i] = new_bus->range[new_bus->dev_count]; sort(new_bus->range, new_bus->dev_count, sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp, NULL); break; } if (r) { kfree(new_bus); return r; } rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus); return r; }
Safe
[ "CWE-399" ]
kvm
5b40572ed5f0344b9dbee486a17c589ce1abe1a3
2.704122066330232e+37
34
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings If some vcpus are created before KVM_CREATE_IRQCHIP, then irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading to potential NULL pointer dereferences. Fix by: - ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called - ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP This is somewhat long winded because vcpu->arch.apic is created without kvm->lock held. Based on earlier patch by Michael Ellerman. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Avi Kivity <avi@redhat.com>
0
static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) { VirtIONet *n = opaque; VirtIODevice *vdev = VIRTIO_DEVICE(n); int ret, i, link_down; if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) return -EINVAL; ret = virtio_load(vdev, f); if (ret) { return ret; } qemu_get_buffer(f, n->mac, ETH_ALEN); n->vqs[0].tx_waiting = qemu_get_be32(f); virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f)); if (version_id >= 3) n->status = qemu_get_be16(f); if (version_id >= 4) { if (version_id < 8) { n->promisc = qemu_get_be32(f); n->allmulti = qemu_get_be32(f); } else { n->promisc = qemu_get_byte(f); n->allmulti = qemu_get_byte(f); } } if (version_id >= 5) { n->mac_table.in_use = qemu_get_be32(f); /* MAC_TABLE_ENTRIES may be different from the saved image */ if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) { qemu_get_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN); } else { int64_t i; /* Overflow detected - can happen if source has a larger MAC table. * We simply set overflow flag so there's no need to maintain the * table of addresses, discard them all. * Note: 64 bit math to avoid integer overflow. */ for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) { qemu_get_byte(f); } n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1; n->mac_table.in_use = 0; } } if (version_id >= 6) qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); if (version_id >= 7) { if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) { error_report("virtio-net: saved image requires vnet_hdr=on"); return -1; } } if (version_id >= 9) { n->mac_table.multi_overflow = qemu_get_byte(f); n->mac_table.uni_overflow = qemu_get_byte(f); } if (version_id >= 10) { n->alluni = qemu_get_byte(f); n->nomulti = qemu_get_byte(f); n->nouni = qemu_get_byte(f); n->nobcast = qemu_get_byte(f); } if (version_id >= 11) { if (qemu_get_byte(f) && !peer_has_ufo(n)) { error_report("virtio-net: saved image requires TUN_F_UFO support"); return -1; } } if (n->max_queues > 1) { if (n->max_queues != qemu_get_be16(f)) { error_report("virtio-net: different max_queues "); return -1; } n->curr_queues = qemu_get_be16(f); if (n->curr_queues > n->max_queues) { error_report("virtio-net: curr_queues %x > max_queues %x", n->curr_queues, n->max_queues); return -1; } for (i = 1; i < n->curr_queues; i++) { n->vqs[i].tx_waiting = qemu_get_be32(f); } } if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) { n->curr_guest_offloads = qemu_get_be64(f); } else { n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); } if (peer_has_vnet_hdr(n)) { virtio_net_apply_guest_offloads(n); } virtio_net_set_queues(n); /* Find the first multicast entry in the saved MAC filter */ for (i = 0; i < n->mac_table.in_use; i++) { if (n->mac_table.macs[i * ETH_ALEN] & 1) { break; } } n->mac_table.first_multi = i; /* nc.link_down can't be migrated, so infer link_down according * to link status bit in n->status */ link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; for (i = 0; i < n->max_queues; i++) { qemu_get_subqueue(n->nic, i)->link_down = link_down; } return 0; }
Safe
[ "CWE-119" ]
qemu
98f93ddd84800f207889491e0b5d851386b459cf
2.8656223422149838e+38
129
virtio-net: out-of-bounds buffer write on load CVE-2013-4149 QEMU 1.3.0 out-of-bounds buffer write in virtio_net_load()@hw/net/virtio-net.c > } else if (n->mac_table.in_use) { > uint8_t *buf = g_malloc0(n->mac_table.in_use); We are allocating buffer of size n->mac_table.in_use > qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN); and read to the n->mac_table.in_use size buffer n->mac_table.in_use * ETH_ALEN bytes, corrupting memory. If adversary controls state then memory written there is controlled by adversary. Reviewed-by: Michael Roth <mdroth@linux.vnet.ibm.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
0
static inline int vmsvga_copy_rect(struct vmsvga_state_s *s, int x0, int y0, int x1, int y1, int w, int h) { DisplaySurface *surface = qemu_console_surface(s->vga.con); uint8_t *vram = s->vga.vram_ptr; int bypl = surface_stride(surface); int bypp = surface_bytes_per_pixel(surface); int width = bypp * w; int line = h; uint8_t *ptr[2]; if (!vmsvga_verify_rect(surface, "vmsvga_copy_rect/src", x0, y0, w, h)) { return -1; } if (!vmsvga_verify_rect(surface, "vmsvga_copy_rect/dst", x1, y1, w, h)) { return -1; } if (y1 > y0) { ptr[0] = vram + bypp * x0 + bypl * (y0 + h - 1); ptr[1] = vram + bypp * x1 + bypl * (y1 + h - 1); for (; line > 0; line --, ptr[0] -= bypl, ptr[1] -= bypl) { memmove(ptr[1], ptr[0], width); } } else { ptr[0] = vram + bypp * x0 + bypl * y0; ptr[1] = vram + bypp * x1 + bypl * y1; for (; line > 0; line --, ptr[0] += bypl, ptr[1] += bypl) { memmove(ptr[1], ptr[0], width); } } vmsvga_update_rect_delayed(s, x1, y1, w, h); return 0; }
Safe
[]
qemu
fa892e9abb728e76afcf27323ab29c57fb0fe7aa
1.1356134223502782e+38
35
ui/cursor: fix integer overflow in cursor_alloc (CVE-2021-4206) Prevent potential integer overflow by limiting 'width' and 'height' to 512x512. Also change 'datasize' type to size_t. Refer to security advisory https://starlabs.sg/advisories/22-4206/ for more information. Fixes: CVE-2021-4206 Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20220407081712.345609-1-mcascell@redhat.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
0
static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) { return xdp_data_meta_unsupported(xdp) ? 0 : xdp->data - xdp->data_meta; }
Safe
[ "CWE-120" ]
linux
050fad7c4534c13c8eb1d9c2ba66012e014773cb
7.984920371961698e+37
5
bpf: fix truncated jump targets on heavy expansions Recently during testing, I ran into the following panic: [ 207.892422] Internal error: Accessing user space memory outside uaccess.h routines: 96000004 [#1] SMP [ 207.901637] Modules linked in: binfmt_misc [...] [ 207.966530] CPU: 45 PID: 2256 Comm: test_verifier Tainted: G W 4.17.0-rc3+ #7 [ 207.974956] Hardware name: FOXCONN R2-1221R-A4/C2U4N_MB, BIOS G31FB18A 03/31/2017 [ 207.982428] pstate: 60400005 (nZCv daif +PAN -UAO) [ 207.987214] pc : bpf_skb_load_helper_8_no_cache+0x34/0xc0 [ 207.992603] lr : 0xffff000000bdb754 [ 207.996080] sp : ffff000013703ca0 [ 207.999384] x29: ffff000013703ca0 x28: 0000000000000001 [ 208.004688] x27: 0000000000000001 x26: 0000000000000000 [ 208.009992] x25: ffff000013703ce0 x24: ffff800fb4afcb00 [ 208.015295] x23: ffff00007d2f5038 x22: ffff00007d2f5000 [ 208.020599] x21: fffffffffeff2a6f x20: 000000000000000a [ 208.025903] x19: ffff000009578000 x18: 0000000000000a03 [ 208.031206] x17: 0000000000000000 x16: 0000000000000000 [ 208.036510] x15: 0000ffff9de83000 x14: 0000000000000000 [ 208.041813] x13: 0000000000000000 x12: 0000000000000000 [ 208.047116] x11: 0000000000000001 x10: ffff0000089e7f18 [ 208.052419] x9 : fffffffffeff2a6f x8 : 0000000000000000 [ 208.057723] x7 : 000000000000000a x6 : 00280c6160000000 [ 208.063026] x5 : 0000000000000018 x4 : 0000000000007db6 [ 208.068329] x3 : 000000000008647a x2 : 19868179b1484500 [ 208.073632] x1 : 0000000000000000 x0 : ffff000009578c08 [ 208.078938] Process test_verifier (pid: 2256, stack limit = 0x0000000049ca7974) [ 208.086235] Call trace: [ 208.088672] bpf_skb_load_helper_8_no_cache+0x34/0xc0 [ 208.093713] 0xffff000000bdb754 [ 208.096845] bpf_test_run+0x78/0xf8 [ 208.100324] bpf_prog_test_run_skb+0x148/0x230 [ 208.104758] sys_bpf+0x314/0x1198 [ 208.108064] el0_svc_naked+0x30/0x34 [ 208.111632] Code: 91302260 f9400001 f9001fa1 d2800001 (29500680) [ 208.117717] ---[ end trace 263cb8a59b5bf29f ]--- The program itself which caused this had a long jump over the whole instruction sequence where all of the inner instructions required heavy expansions into multiple BPF instructions. Additionally, I also had BPF hardening enabled which requires once more rewrites of all constant values in order to blind them. Each time we rewrite insns, bpf_adj_branches() would need to potentially adjust branch targets which cross the patchlet boundary to accommodate for the additional delta. Eventually that lead to the case where the target offset could not fit into insn->off's upper 0x7fff limit anymore where then offset wraps around becoming negative (in s16 universe), or vice versa depending on the jump direction. Therefore it becomes necessary to detect and reject any such occasions in a generic way for native eBPF and cBPF to eBPF migrations. For the latter we can simply check bounds in the bpf_convert_filter()'s BPF_EMIT_JMP helper macro and bail out once we surpass limits. The bpf_patch_insn_single() for native eBPF (and cBPF to eBPF in case of subsequent hardening) is a bit more complex in that we need to detect such truncations before hitting the bpf_prog_realloc(). Thus the latter is split into an extra pass to probe problematic offsets on the original program in order to fail early. With that in place and carefully tested I no longer hit the panic and the rewrites are rejected properly. The above example panic I've seen on bpf-next, though the issue itself is generic in that a guard against this issue in bpf seems more appropriate in this case. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
0
static inline void __netif_reschedule(struct Qdisc *q) { struct softnet_data *sd; unsigned long flags; local_irq_save(flags); sd = &__get_cpu_var(softnet_data); q->next_sched = NULL; *sd->output_queue_tailp = q; sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); }
Safe
[ "CWE-264" ]
linux
8909c9ad8ff03611c9c96c9a92656213e4bb495b
9.05769720117912e+37
13
net: don't allow CAP_NET_ADMIN to load non-netdev kernel modules Since a8f80e8ff94ecba629542d9b4b5f5a8ee3eb565c any process with CAP_NET_ADMIN may load any module from /lib/modules/. This doesn't mean that CAP_NET_ADMIN is a superset of CAP_SYS_MODULE as modules are limited to /lib/modules/**. However, CAP_NET_ADMIN capability shouldn't allow anybody load any module not related to networking. This patch restricts an ability of autoloading modules to netdev modules with explicit aliases. This fixes CVE-2011-1019. Arnd Bergmann suggested to leave untouched the old pre-v2.6.32 behavior of loading netdev modules by name (without any prefix) for processes with CAP_SYS_MODULE to maintain the compatibility with network scripts that use autoloading netdev modules by aliases like "eth0", "wlan0". Currently there are only three users of the feature in the upstream kernel: ipip, ip_gre and sit. root@albatros:~# capsh --drop=$(seq -s, 0 11),$(seq -s, 13 34) -- root@albatros:~# grep Cap /proc/$$/status CapInh: 0000000000000000 CapPrm: fffffff800001000 CapEff: fffffff800001000 CapBnd: fffffff800001000 root@albatros:~# modprobe xfs FATAL: Error inserting xfs (/lib/modules/2.6.38-rc6-00001-g2bf4ca3/kernel/fs/xfs/xfs.ko): Operation not permitted root@albatros:~# lsmod | grep xfs root@albatros:~# ifconfig xfs xfs: error fetching interface information: Device not found root@albatros:~# lsmod | grep xfs root@albatros:~# lsmod | grep sit root@albatros:~# ifconfig sit sit: error fetching interface information: Device not found root@albatros:~# lsmod | grep sit root@albatros:~# ifconfig sit0 sit0 Link encap:IPv6-in-IPv4 NOARP MTU:1480 Metric:1 root@albatros:~# lsmod | grep sit sit 10457 0 tunnel4 2957 1 sit For CAP_SYS_MODULE module loading is still relaxed: root@albatros:~# grep Cap /proc/$$/status CapInh: 0000000000000000 CapPrm: ffffffffffffffff CapEff: ffffffffffffffff CapBnd: ffffffffffffffff root@albatros:~# ifconfig xfs xfs: error fetching interface information: Device not found root@albatros:~# lsmod | grep xfs xfs 745319 0 Reference: https://lkml.org/lkml/2011/2/24/203 Signed-off-by: Vasiliy Kulikov <segoon@openwall.com> Signed-off-by: Michael Tokarev <mjt@tls.msk.ru> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Kees Cook <kees.cook@canonical.com> Signed-off-by: James Morris <jmorris@namei.org>
0
int is_dir(const char *path) { struct stat statbuf; int ret = stat(path, &statbuf); if (ret == 0 && S_ISDIR(statbuf.st_mode)) return 1; return 0; }
Safe
[ "CWE-59", "CWE-61" ]
lxc
592fd47a6245508b79fe6ac819fe6d3b2c1289be
2.681684453829299e+38
8
CVE-2015-1335: Protect container mounts against symlinks When a container starts up, lxc sets up the container's inital fstree by doing a bunch of mounting, guided by the container configuration file. The container config is owned by the admin or user on the host, so we do not try to guard against bad entries. However, since the mount target is in the container, it's possible that the container admin could divert the mount with symbolic links. This could bypass proper container startup (i.e. confinement of a root-owned container by the restrictive apparmor policy, by diverting the required write to /proc/self/attr/current), or bypass the (path-based) apparmor policy by diverting, say, /proc to /mnt in the container. To prevent this, 1. do not allow mounts to paths containing symbolic links 2. do not allow bind mounts from relative paths containing symbolic links. Details: Define safe_mount which ensures that the container has not inserted any symbolic links into any mount targets for mounts to be done during container setup. The host's mount path may contain symbolic links. As it is under the control of the administrator, that's ok. So safe_mount begins the check for symbolic links after the rootfs->mount, by opening that directory. It opens each directory along the path using openat() relative to the parent directory using O_NOFOLLOW. When the target is reached, it mounts onto /proc/self/fd/<targetfd>. Use safe_mount() in mount_entry(), when mounting container proc, and when needed. In particular, safe_mount() need not be used in any case where: 1. the mount is done in the container's namespace 2. the mount is for the container's rootfs 3. the mount is relative to a tmpfs or proc/sysfs which we have just safe_mount()ed ourselves Since we were using proc/net as a temporary placeholder for /proc/sys/net during container startup, and proc/net is a symbolic link, use proc/tty instead. Update the lxc.container.conf manpage with details about the new restrictions. Finally, add a testcase to test some symbolic link possibilities. Reported-by: Roman Fiedler Signed-off-by: Serge Hallyn <serge.hallyn@ubuntu.com> Acked-by: Stéphane Graber <stgraber@ubuntu.com>
0
ztpqr_scale_wb_common(i_ctx_t *i_ctx_p, int idx) { os_ptr op = osp; double a[4], Ps; /* a[0] = ws, a[1] = bs, a[2] = wd, a[3] = bd */ double result; int code; int i; code = real_param(op, &Ps); if (code < 0) return code; for (i = 0; i < 4; i++) { ref tmp; code = array_get(imemory, op - 4 + i, idx, &tmp); if (code >= 0) code = real_param(&tmp, &a[i]); if (code < 0) return code; } if (a[0] == a[1]) return_error(gs_error_undefinedresult); result = a[3] + (a[2] - a[3]) * (Ps - a[1]) / (a[0] - a[1]); make_real(op - 4, result); pop(4); return 0; }
Safe
[ "CWE-704" ]
ghostpdl
548bb434e81dadcc9f71adf891a3ef5bea8e2b4e
3.0200683622293353e+38
27
PS interpreter - add some type checking These were 'probably' safe anyway, since they mostly treat the objects as integers without checking, which at least can't result in a crash. Nevertheless, we ought to check. The return from comparedictkeys could be wrong if one of the keys had a value which was not an array, it could incorrectly decide the two were in fact the same.
0
int64_t ClockMath::floorDivide(int64_t numerator, int64_t denominator) { return (numerator >= 0) ? numerator / denominator : ((numerator + 1) / denominator) - 1; }
Safe
[ "CWE-190" ]
icu
71dd84d4ffd6600a70e5bca56a22b957e6642bd4
2.9011972608547417e+38
4
ICU-12504 in ICU4C Persian cal, use int64_t math for one operation to avoid overflow; add tests in C and J X-SVN-Rev: 40654
0
void Convert::generic_convert(const char * in, int size, CharVector & out) { buf_.clear(); decode_->decode(in, size, buf_); FilterChar * start = buf_.pbegin(); FilterChar * stop = buf_.pend(); if (!filter.empty()) filter.process(start, stop); encode_->encode(start, stop, out); }
Safe
[ "CWE-125" ]
aspell
de29341638833ba7717bd6b5e6850998454b044b
1.788887366894189e+38
10
Don't allow null-terminated UCS-2/4 strings using the original API. Detect if the encoding is UCS-2/4 and the length is -1 in affected API functions and refuse to convert the string. If the string ends up being converted somehow, abort with an error message in DecodeDirect and ConvDirect. To convert a null terminated string in Decode/ConvDirect, a negative number corresponding to the width of the underlying character type for the encoding is expected; for example, if the encoding is "ucs-2" then a the size is expected to be -2. Also fix a 1-3 byte over-read in DecodeDirect when reading UCS-2/4 strings when a size is provided (found by OSS-Fuzz). Also fix a bug in DecodeDirect that caused DocumentChecker to return the wrong offsets when working with UCS-2/4 strings.
0
void Monitor::reset_probe_timeout() { cancel_probe_timeout(); probe_timeout_event = new C_MonContext(this, [this](int r) { probe_timeout(r); }); double t = g_conf->mon_probe_timeout; if (timer.add_event_after(t, probe_timeout_event)) { dout(10) << "reset_probe_timeout " << probe_timeout_event << " after " << t << " seconds" << dendl; } else { probe_timeout_event = nullptr; } }
Safe
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
2.0413917595539077e+38
14
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <sage@redhat.com> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
0
gst_qtdemux_sync_streams (GstQTDemux * demux) { gint i; if (demux->n_streams <= 1) return; for (i = 0; i < demux->n_streams; i++) { QtDemuxStream *stream; GstClockTime end_time; stream = demux->streams[i]; if (!stream->pad) continue; /* TODO advance time on subtitle streams here, if any some day */ /* some clips/trailers may have unbalanced streams at the end, * so send EOS on shorter stream to prevent stalling others */ /* do not mess with EOS if SEGMENT seeking */ if (demux->segment.flags & GST_SEEK_FLAG_SEGMENT) continue; if (demux->pullbased) { /* loop mode is sample time based */ if (!STREAM_IS_EOS (stream)) continue; } else { /* push mode is byte position based */ if (stream->n_samples && stream->samples[stream->n_samples - 1].offset >= demux->offset) continue; } if (stream->sent_eos) continue; /* only act if some gap */ end_time = stream->segments[stream->n_segments - 1].stop_time; GST_LOG_OBJECT (demux, "current position: %" GST_TIME_FORMAT ", stream end: %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->segment.position), GST_TIME_ARGS (end_time)); if (GST_CLOCK_TIME_IS_VALID (end_time) && (end_time + 2 * GST_SECOND < demux->segment.position)) { GstEvent *event; GST_DEBUG_OBJECT (demux, "sending EOS for stream %s", GST_PAD_NAME (stream->pad)); stream->sent_eos = TRUE; event = gst_event_new_eos (); if (demux->segment_seqnum) gst_event_set_seqnum (event, demux->segment_seqnum); gst_pad_push_event (stream->pad, event); } } }
Safe
[ "CWE-125" ]
gst-plugins-good
d0949baf3dadea6021d54abef6802fed5a06af75
3.1289308721200124e+38
58
qtdemux: Fix out of bounds read in tag parsing code We can't simply assume that the length of the tag value as given inside the stream is correct but should also check against the amount of data we have actually available. https://bugzilla.gnome.org/show_bug.cgi?id=775451
0
cnt_miss(struct worker *wrk, struct req *req) { CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); AN(req->vcl); CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); CHECK_OBJ_ORNULL(req->stale_oc, OBJCORE_MAGIC); VCL_miss_method(req->vcl, wrk, req, NULL, NULL); switch (wrk->handling) { case VCL_RET_FETCH: wrk->stats->cache_miss++; VBF_Fetch(wrk, req, req->objcore, req->stale_oc, VBF_NORMAL); if (req->stale_oc != NULL) (void)HSH_DerefObjCore(wrk, &req->stale_oc, 0); req->req_step = R_STP_FETCH; return (REQ_FSM_MORE); case VCL_RET_FAIL: req->req_step = R_STP_VCLFAIL; break; case VCL_RET_SYNTH: req->req_step = R_STP_SYNTH; break; case VCL_RET_RESTART: req->req_step = R_STP_RESTART; break; case VCL_RET_PASS: req->req_step = R_STP_PASS; break; default: WRONG("Illegal return from vcl_miss{}"); } VRY_Clear(req); if (req->stale_oc != NULL) (void)HSH_DerefObjCore(wrk, &req->stale_oc, 0); AZ(HSH_DerefObjCore(wrk, &req->objcore, 1)); return (REQ_FSM_MORE); }
Safe
[ "CWE-212" ]
varnish-cache
bd7b3d6d47ccbb5e1747126f8e2a297f38e56b8c
7.524818117127606e+37
39
Clear err_code and err_reason at start of request handling req->err_code and req->err_reason are set when going to synthetic handling. From there the resp.reason HTTP field is set from req->err_reason if set, or the generic code based on req->err_code is used if it was NULL. This patch clears these members so that a value from the handling of a previous request doesn't linger. Fixes: VSV00004
0
matchUrl(char *base, ObjectPtr object) { int n = strlen(base); if(object->key_size < n) return 0; if(memcmp(base, object->key, n) != 0) return 0; return (object->key_size == n) || (((char*)object->key)[n] == '?'); }
Safe
[ "CWE-617" ]
polipo
0e2b44af619e46e365971ea52b97457bc0778cd3
3.416362805966464e+37
9
Try to read POST requests to local configuration interface correctly.
0
int set_unit_path(const char *p) { /* This is mostly for debug purposes */ if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0) return -errno; return 0; }
Safe
[ "CWE-269" ]
systemd
bf65b7e0c9fc215897b676ab9a7c9d1c688143ba
1.5632596431406343e+38
7
core: imply NNP and SUID/SGID restriction for DynamicUser=yes service Let's be safe, rather than sorry. This way DynamicUser=yes services can neither take benefit of, nor create SUID/SGID binaries. Given that DynamicUser= is a recent addition only we should be able to get away with turning this on, even though this is strictly speaking a binary compatibility breakage.
0
static inline int num_user_pages(unsigned long addr, unsigned long len) { const unsigned long spage = addr & PAGE_MASK; const unsigned long epage = (addr + len - 1) & PAGE_MASK; return 1 + ((epage - spage) >> PAGE_SHIFT); }
Safe
[ "CWE-416" ]
linux
3d2a9d642512c21a12d19b9250e7a835dcb41a79
9.505268593846367e+37
8
IB/hfi1: Ensure correct mm is used at all times Two earlier bug fixes have created a security problem in the hfi1 driver. One fix aimed to solve an issue where current->mm was not valid when closing the hfi1 cdev. It attempted to do this by saving a cached value of the current->mm pointer at file open time. This is a problem if another process with access to the FD calls in via write() or ioctl() to pin pages via the hfi driver. The other fix tried to solve a use after free by taking a reference on the mm. To fix this correctly we use the existing cached value of the mm in the mmu notifier. Now we can check in the insert, evict, etc. routines that current->mm matched what the notifier was registered for. If not, then don't allow access. The register of the mmu notifier will save the mm pointer. Since in do_exit() the exit_mm() is called before exit_files(), which would call our close routine a reference is needed on the mm. We rely on the mmgrab done by the registration of the notifier, whereas before it was explicit. The mmu notifier deregistration happens when the user context is torn down, the creation of which triggered the registration. Also of note is we do not do any explicit work to protect the interval tree notifier. It doesn't seem that this is going to be needed since we aren't actually doing anything with current->mm. The interval tree notifier stuff still has a FIXME noted from a previous commit that will be addressed in a follow on patch. Cc: <stable@vger.kernel.org> Fixes: e0cf75deab81 ("IB/hfi1: Fix mm_struct use after free") Fixes: 3faa3d9a308e ("IB/hfi1: Make use of mm consistent") Link: https://lore.kernel.org/r/20201125210112.104301.51331.stgit@awfm-01.aw.intel.com Suggested-by: Jann Horn <jannh@google.com> Reported-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
0
unsigned FAST_FUNC udhcp_option_idx(const char *name, const char *option_strings) { int n = index_in_strings(option_strings, name); if (n >= 0) return n; { char *buf, *d; const char *s; s = option_strings; while (*s) s += strlen(s) + 1; d = buf = xzalloc(s - option_strings); s = option_strings; while (!(*s == '\0' && s[1] == '\0')) { *d++ = (*s == '\0' ? ' ' : *s); s++; } bb_error_msg_and_die("unknown option '%s', known options: %s", name, buf); } }
Safe
[ "CWE-125" ]
busybox
6d3b4bb24da9a07c263f3c1acf8df85382ff562c
1.6242741241879427e+38
23
udhcpc: check that 4-byte options are indeed 4-byte, closes 11506 function old new delta udhcp_get_option32 - 27 +27 udhcp_get_option 231 248 +17 ------------------------------------------------------------------------------ (add/remove: 1/0 grow/shrink: 1/0 up/down: 44/0) Total: 44 bytes Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
0
explicit RaggedTensorToVariantOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("batched_input", &batched_input_)); }
Safe
[ "CWE-476" ]
tensorflow
b055b9c474cd376259dde8779908f9eeaf097d93
1.7911783451336435e+38
4
Fix `tf.raw_ops.RaggedTensorToVariant` invalid resize. PiperOrigin-RevId: 368299574 Change-Id: I751c186325aa0bab397928845e790e60c2d90918
0
static void tx3g_dump_style_nobox(FILE * trace, GF_StyleRecord *rec, u32 *shift_offset, u32 so_count) { gf_fprintf(trace, "<Style "); if (rec->startCharOffset || rec->endCharOffset) tx3g_print_char_offsets(trace, rec->startCharOffset, rec->endCharOffset, shift_offset, so_count); gf_fprintf(trace, "styles=\""); if (!rec->style_flags) { gf_fprintf(trace, "Normal"); } else { if (rec->style_flags & 1) gf_fprintf(trace, "Bold "); if (rec->style_flags & 2) gf_fprintf(trace, "Italic "); if (rec->style_flags & 4) gf_fprintf(trace, "Underlined "); } gf_fprintf(trace, "\" fontID=\"%d\" fontSize=\"%d\" ", rec->fontID, rec->font_size); tx3g_dump_rgba8(trace, "color", rec->text_color); gf_fprintf(trace, "/>\n"); }
Safe
[ "CWE-787" ]
gpac
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
1.3442104065637246e+38
18
fixed #2138
0
void CSoundFile::KeyOff(ModChannel *pChn) const { const bool bKeyOn = !pChn->dwFlags[CHN_KEYOFF]; pChn->dwFlags.set(CHN_KEYOFF); if(pChn->pModInstrument != nullptr && !pChn->VolEnv.flags[ENV_ENABLED]) { pChn->dwFlags.set(CHN_NOTEFADE); } if (!pChn->nLength) return; if (pChn->dwFlags[CHN_SUSTAINLOOP] && pChn->pModSample && bKeyOn) { const ModSample *pSmp = pChn->pModSample; if(pSmp->uFlags[CHN_LOOP]) { if (pSmp->uFlags[CHN_PINGPONGLOOP]) pChn->dwFlags.set(CHN_PINGPONGLOOP); else pChn->dwFlags.reset(CHN_PINGPONGLOOP | CHN_PINGPONGFLAG); pChn->dwFlags.set(CHN_LOOP); pChn->nLength = pSmp->nLength; pChn->nLoopStart = pSmp->nLoopStart; pChn->nLoopEnd = pSmp->nLoopEnd; if (pChn->nLength > pChn->nLoopEnd) pChn->nLength = pChn->nLoopEnd; if(pChn->position.GetUInt() > pChn->nLength) { // Test case: SusAfterLoop.it pChn->position.Set(pChn->position.GetInt() - pChn->nLength + pChn->nLoopStart); } } else { pChn->dwFlags.reset(CHN_LOOP | CHN_PINGPONGLOOP | CHN_PINGPONGFLAG); pChn->nLength = pSmp->nLength; } } if (pChn->pModInstrument) { const ModInstrument *pIns = pChn->pModInstrument; if((pIns->VolEnv.dwFlags[ENV_LOOP] || (GetType() & (MOD_TYPE_XM | MOD_TYPE_MT2 | MOD_TYPE_MDL))) && pIns->nFadeOut != 0) { pChn->dwFlags.set(CHN_NOTEFADE); } if (pIns->VolEnv.nReleaseNode != ENV_RELEASE_NODE_UNSET && pChn->VolEnv.nEnvValueAtReleaseJump == NOT_YET_RELEASED) { pChn->VolEnv.nEnvValueAtReleaseJump = pIns->VolEnv.GetValueFromPosition(pChn->VolEnv.nEnvPosition, 256); pChn->VolEnv.nEnvPosition = pIns->VolEnv[pIns->VolEnv.nReleaseNode].tick; } } }
Safe
[ "CWE-125" ]
openmpt
7ebf02af2e90f03e0dbd0e18b8b3164f372fb97c
6.577528309561028e+37
50
[Fix] Possible out-of-bounds read when computing length of some IT files with pattern loops (OpenMPT: formats that are converted to IT, libopenmpt: IT/ITP/MO3), caught with afl-fuzz. git-svn-id: https://source.openmpt.org/svn/openmpt/trunk/OpenMPT@10027 56274372-70c3-4bfc-bfc3-4c3a0b034d27
0
void RenameCollectionTest::tearDown() { _targetNss = {}; _sourceNss = {}; _opObserver = nullptr; _replCoord = nullptr; _opCtx = {}; auto service = getServiceContext(); repl::DropPendingCollectionReaper::set(service, {}); repl::StorageInterface::set(service, {}); ServiceContextMongoDTest::tearDown(); }
Safe
[ "CWE-20" ]
mongo
35c1b1f588f04926a958ad2fe4d9c59d79f81e8b
6.840203472612164e+37
13
SERVER-35636 renameCollectionForApplyOps checks for complete namespace
0
int Http2Handler::verify_npn_result() { const unsigned char *next_proto = nullptr; unsigned int next_proto_len; // Check the negotiated protocol in NPN or ALPN #ifndef OPENSSL_NO_NEXTPROTONEG SSL_get0_next_proto_negotiated(ssl_, &next_proto, &next_proto_len); #endif // !OPENSSL_NO_NEXTPROTONEG for (int i = 0; i < 2; ++i) { if (next_proto) { auto proto = StringRef{next_proto, next_proto_len}; if (sessions_->get_config()->verbose) { std::cout << "The negotiated protocol: " << proto << std::endl; } if (util::check_h2_is_selected(proto)) { return 0; } break; } else { #if OPENSSL_VERSION_NUMBER >= 0x10002000L SSL_get0_alpn_selected(ssl_, &next_proto, &next_proto_len); #else // OPENSSL_VERSION_NUMBER < 0x10002000L break; #endif // OPENSSL_VERSION_NUMBER < 0x10002000L } } if (sessions_->get_config()->verbose) { std::cerr << "Client did not advertise HTTP/2 protocol." << " (nghttp2 expects " << NGHTTP2_PROTO_VERSION_ID << ")" << std::endl; } return -1; }
Safe
[]
nghttp2
95efb3e19d174354ca50c65d5d7227d92bcd60e1
2.6304668768861717e+38
32
Don't read too greedily
0
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags, save_rflags; rflags = vmcs_readl(GUEST_RFLAGS); if (to_vmx(vcpu)->rmode.vm86_active) { rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; save_rflags = to_vmx(vcpu)->rmode.save_rflags; rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; } return rflags; }
Safe
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
2.876425712283937e+38
12
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
0
static int md_mergeable_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *biovec) { struct mddev *mddev = q->queuedata; int ret; rcu_read_lock(); if (mddev->suspended) { /* Must always allow one vec */ if (bvm->bi_size == 0) ret = biovec->bv_len; else ret = 0; } else { struct md_personality *pers = mddev->pers; if (pers && pers->mergeable_bvec) ret = pers->mergeable_bvec(mddev, bvm, biovec); else ret = biovec->bv_len; } rcu_read_unlock(); return ret; }
Safe
[ "CWE-200" ]
linux
b6878d9e03043695dbf3fa1caa6dfc09db225b16
6.816010430084719e+37
23
md: use kzalloc() when bitmap is disabled In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a mdu_bitmap_file_t called "file". 5769 file = kmalloc(sizeof(*file), GFP_NOIO); 5770 if (!file) 5771 return -ENOMEM; This structure is copied to user space at the end of the function. 5786 if (err == 0 && 5787 copy_to_user(arg, file, sizeof(*file))) 5788 err = -EFAULT But if bitmap is disabled only the first byte of "file" is initialized with zero, so it's possible to read some bytes (up to 4095) of kernel space memory from user space. This is an information leak. 5775 /* bitmap disabled, zero the first byte and copy out */ 5776 if (!mddev->bitmap_info.file) 5777 file->pathname[0] = '\0'; Signed-off-by: Benjamin Randazzo <benjamin@randazzo.fr> Signed-off-by: NeilBrown <neilb@suse.com>
0
static encodePtr make_persistent_sdl_encoder(encodePtr enc, HashTable *ptr_map, HashTable *bp_types, HashTable *bp_encoders) { encodePtr penc = NULL; penc = malloc(sizeof(encode)); memset(penc, 0, sizeof(encode)); *penc = *enc; if (penc->details.type_str) { penc->details.type_str = strdup(penc->details.type_str); } if (penc->details.ns) { penc->details.ns = strdup(penc->details.ns); } if (penc->details.sdl_type) { make_persistent_sdl_type_ref(&penc->details.sdl_type, ptr_map, bp_types); } return penc; }
Safe
[ "CWE-476" ]
php-src
3c939e3f69955d087e0bb671868f7267dfb2a502
2.5070956959693248e+38
22
Fix bug #80672 - Null Dereference in SoapClient
0
static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) { return -1; }
Safe
[ "CWE-400", "CWE-703", "CWE-835" ]
linux
c40f7d74c741a907cfaeb73a7697081881c497d0
1.4367433361377165e+38
4
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the scheduler under high loads, starting at around the v4.18 time frame, and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list manipulation. Do a (manual) revert of: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") It turns out that the list_del_leaf_cfs_rq() introduced by this commit is a surprising property that was not considered in followup commits such as: 9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list") As Vincent Guittot explains: "I think that there is a bigger problem with commit a9e7f6544b9c and cfs_rq throttling: Let take the example of the following topology TG2 --> TG1 --> root: 1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1 cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in one path because it has never been used and can't be throttled so tmp_alone_branch will point to leaf_cfs_rq_list at the end. 2) Then TG1 is throttled 3) and we add TG3 as a new child of TG1. 4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1 cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list. With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list. So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1 cfs_rq is removed from the list. Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list but tmp_alone_branch still points to TG3 cfs_rq because its throttled parent can't be enqueued when the lock is released. tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should. So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch points on another TG cfs_rq, the next TG cfs_rq that will be added, will be linked outside rq->leaf_cfs_rq_list - which is bad. In addition, we can break the ordering of the cfs_rq in rq->leaf_cfs_rq_list but this ordering is used to update and propagate the update from leaf down to root." Instead of trying to work through all these cases and trying to reproduce the very high loads that produced the lockup to begin with, simplify the code temporarily by reverting a9e7f6544b9c - which change was clearly not thought through completely. This (hopefully) gives us a kernel that doesn't lock up so people can continue to enjoy their holidays without worrying about regressions. ;-) [ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ] Analyzed-by: Xie XiuQi <xiexiuqi@huawei.com> Analyzed-by: Vincent Guittot <vincent.guittot@linaro.org> Reported-by: Zhipeng Xie <xiezhipeng1@huawei.com> Reported-by: Sargun Dhillon <sargun@sargun.me> Reported-by: Xie XiuQi <xiexiuqi@huawei.com> Tested-by: Zhipeng Xie <xiezhipeng1@huawei.com> Tested-by: Sargun Dhillon <sargun@sargun.me> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Vincent Guittot <vincent.guittot@linaro.org> Cc: <stable@vger.kernel.org> # v4.13+ Cc: Bin Li <huawei.libin@huawei.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") Link: http://lkml.kernel.org/r/1545879866-27809-1-git-send-email-xiexiuqi@huawei.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
0
const Track* Tracks::GetTrackByIndex(uint32_t index) const { if (track_entries_ == NULL) return NULL; if (index >= track_entries_size_) return NULL; return track_entries_[index]; }
Safe
[ "CWE-20" ]
libvpx
f00890eecdf8365ea125ac16769a83aa6b68792d
1.7681375690059313e+38
9
update libwebm to libwebm-1.0.0.27-352-g6ab9fcf https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d
0
void vfio_pci_intx_mask(struct vfio_pci_device *vdev) { struct pci_dev *pdev = vdev->pdev; unsigned long flags; spin_lock_irqsave(&vdev->irqlock, flags); /* * Masking can come from interrupt, ioctl, or config space * via INTx disable. The latter means this can get called * even when not using intx delivery. In this case, just * try to have the physical bit follow the virtual bit. */ if (unlikely(!is_intx(vdev))) { if (vdev->pci_2_3) pci_intx(pdev, 0); } else if (!vdev->ctx[0].masked) { /* * Can't use check_and_mask here because we always want to * mask, not just when something is pending. */ if (vdev->pci_2_3) pci_intx(pdev, 0); else disable_irq_nosync(pdev->irq); vdev->ctx[0].masked = true; } spin_unlock_irqrestore(&vdev->irqlock, flags); }
Safe
[ "CWE-399", "CWE-190" ]
linux
05692d7005a364add85c6e25a6c4447ce08f913a
1.9931295817949775e+38
31
vfio/pci: Fix integer overflows, bitmask check The VFIO_DEVICE_SET_IRQS ioctl did not sufficiently sanitize user-supplied integers, potentially allowing memory corruption. This patch adds appropriate integer overflow checks, checks the range bounds for VFIO_IRQ_SET_DATA_NONE, and also verifies that only single element in the VFIO_IRQ_SET_DATA_TYPE_MASK bitmask is set. VFIO_IRQ_SET_ACTION_TYPE_MASK is already correctly checked later in vfio_pci_set_irqs_ioctl(). Furthermore, a kzalloc is changed to a kcalloc because the use of a kzalloc with an integer multiplication allowed an integer overflow condition to be reached without this patch. kcalloc checks for overflow and should prevent a similar occurrence. Signed-off-by: Vlad Tsyrklevich <vlad@tsyrklevich.net> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
0
void gf_m2ts_demux_dmscc_init(GF_M2TS_Demuxer *ts) { char temp_dir[GF_MAX_PATH]; u32 length; GF_Err e; ts->dsmcc_controler = gf_list_new(); ts->process_dmscc = 1; strcpy(temp_dir, gf_get_default_cache_directory() ); length = (u32) strlen(temp_dir); if(temp_dir[length-1] == GF_PATH_SEPARATOR) { temp_dir[length-1] = 0; } ts->dsmcc_root_dir = (char*)gf_calloc(strlen(temp_dir)+strlen("CarouselData")+2,sizeof(char)); sprintf(ts->dsmcc_root_dir,"%s%cCarouselData",temp_dir,GF_PATH_SEPARATOR); e = gf_mkdir(ts->dsmcc_root_dir); if(e) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[Process DSMCC] Error during the creation of the directory %s \n",ts->dsmcc_root_dir)); } }
Safe
[ "CWE-416", "CWE-125" ]
gpac
1ab4860609f2e7a35634930571e7d0531297e090
2.150139935593907e+38
23
fixed potential crash on PMT IOD parse - cf #1268 #1269
0
static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk) { inet_sk(sk)->inet_rcv_saddr = addr->v4.sin_addr.s_addr; }
Safe
[ "CWE-119", "CWE-787" ]
linux
8e2d61e0aed2b7c4ecb35844fe07e0b2b762dee4
3.2483592077709743e+38
4
sctp: fix race on protocol/netns initialization Consider sctp module is unloaded and is being requested because an user is creating a sctp socket. During initialization, sctp will add the new protocol type and then initialize pernet subsys: status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; status = sctp_v6_protosw_init(); if (status) goto err_v6_protosw_init; status = register_pernet_subsys(&sctp_net_ops); The problem is that after those calls to sctp_v{4,6}_protosw_init(), it is possible for userspace to create SCTP sockets like if the module is already fully loaded. If that happens, one of the possible effects is that we will have readers for net->sctp.local_addr_list list earlier than expected and sctp_net_init() does not take precautions while dealing with that list, leading to a potential panic but not limited to that, as sctp_sock_init() will copy a bunch of blank/partially initialized values from net->sctp. The race happens like this: CPU 0 | CPU 1 socket() | __sock_create | socket() inet_create | __sock_create list_for_each_entry_rcu( | answer, &inetsw[sock->type], | list) { | inet_create /* no hits */ | if (unlikely(err)) { | ... | request_module() | /* socket creation is blocked | * the module is fully loaded | */ | sctp_init | sctp_v4_protosw_init | inet_register_protosw | list_add_rcu(&p->list, | last_perm); | | list_for_each_entry_rcu( | answer, &inetsw[sock->type], sctp_v6_protosw_init | list) { | /* hit, so assumes protocol | * is already loaded | */ | /* socket creation continues | * before netns is initialized | */ register_pernet_subsys | Simply inverting the initialization order between register_pernet_subsys() and sctp_v4_protosw_init() is not possible because register_pernet_subsys() will create a control sctp socket, so the protocol must be already visible by then. Deferring the socket creation to a work-queue is not good specially because we loose the ability to handle its errors. So, as suggested by Vlad, the fix is to split netns initialization in two moments: defaults and control socket, so that the defaults are already loaded by when we register the protocol, while control socket initialization is kept at the same moment it is today. Fixes: 4db67e808640 ("sctp: Make the address lists per network namespace") Signed-off-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static int map_search_self_callback(struct ldb_request *req, struct ldb_reply *ares) { struct ldb_context *ldb; struct map_context *ac; int ret; ac = talloc_get_type(req->context, struct map_context); ldb = ldb_module_get_ctx(ac->module); if (!ares) { return ldb_module_done(ac->req, NULL, NULL, LDB_ERR_OPERATIONS_ERROR); } if (ares->error != LDB_SUCCESS) { return ldb_module_done(ac->req, ares->controls, ares->response, ares->error); } /* We are interested only in the single reply */ switch(ares->type) { case LDB_REPLY_ENTRY: /* We have already found a remote DN */ if (ac->local_dn) { ldb_set_errstring(ldb, "Too many results!"); return ldb_module_done(ac->req, NULL, NULL, LDB_ERR_OPERATIONS_ERROR); } /* Store local DN */ ac->local_dn = talloc_steal(ac, ares->message->dn); break; case LDB_REPLY_DONE: switch (ac->req->operation) { case LDB_MODIFY: ret = map_modify_do_local(ac); break; case LDB_DELETE: ret = map_delete_do_local(ac); break; case LDB_RENAME: ret = map_rename_do_local(ac); break; default: /* if we get here we have definitely a problem */ ret = LDB_ERR_OPERATIONS_ERROR; } if (ret != LDB_SUCCESS) { return ldb_module_done(ac->req, NULL, NULL, LDB_ERR_OPERATIONS_ERROR); } break; default: /* ignore referrals */ break; } talloc_free(ares); return LDB_SUCCESS; }
Safe
[ "CWE-200" ]
samba
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
1.621585731962756e+38
63
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message This aims to minimise usage of the error-prone pattern of searching for a just-added message element in order to make modifications to it (and potentially finding the wrong element). BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009 Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz>
0
PeiUsbHubCtrlSetHubDepth ( IN EFI_PEI_SERVICES **PeiServices, IN PEI_USB_DEVICE *PeiUsbDevice, IN PEI_USB_IO_PPI *UsbIoPpi ) { EFI_USB_DEVICE_REQUEST DevReq; ZeroMem (&DevReq, sizeof (EFI_USB_DEVICE_REQUEST)); // // Fill Device request packet // DevReq.RequestType = USB_RT_HUB; DevReq.Request = USB_HUB_REQ_SET_DEPTH; DevReq.Value = PeiUsbDevice->Tier; DevReq.Length = 0; return UsbIoPpi->UsbControlTransfer ( PeiServices, UsbIoPpi, &DevReq, EfiUsbNoData, PcdGet32 (PcdUsbTransferTimeoutValue), NULL, 0 ); }
Safe
[ "CWE-787" ]
edk2
72750e3bf9174f15c17e78f0f117b5e7311bb49f
9.76473924930577e+37
27
MdeModulePkg UsbBusPei: Fix wrong buffer length used to read hub desc REF: https://bugzilla.tianocore.org/show_bug.cgi?id=973 Bug 973 just mentions UsbBusDxe, but UsbBusPei has similar issue. HUB descriptor has variable length. But the code uses stack (HubDescriptor in PeiDoHubConfig) with fixed length sizeof(EFI_USB_HUB_DESCRIPTOR) to hold HUB descriptor data. It uses hard code length value (12) for SuperSpeed path. And it uses HubDesc->Length for none SuperSpeed path, then there will be stack overflow when HubDesc->Length is greater than sizeof(EFI_USB_HUB_DESCRIPTOR). The patch updates the code to use a big enough buffer to hold the descriptor data. Cc: Jiewen Yao <jiewen.yao@intel.com> Cc: Ruiyu Ni <ruiyu.ni@intel.com> Cc: Bret Barkelew <bret.barkelew@microsoft.com> Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Star Zeng <star.zeng@intel.com> Reviewed-by: Bret Barkelew <bret.barkelew@microsoft.com>
0
TcpProxyStats Config::SharedConfig::generateStats(Stats::Scope& scope) { return {ALL_TCP_PROXY_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope))}; }
Safe
[ "CWE-416" ]
envoy
ce0ae309057a216aba031aff81c445c90c6ef145
1.8915575619430754e+38
3
CVE-2021-43826 Signed-off-by: Yan Avlasov <yavlasov@google.com>
0
crcbuf(int crc, unsigned int len, const char *buf) { const unsigned char *ubuf = (const unsigned char *)buf; while (len--) crc = ((crc << 8) & 0xFF00) ^ crctab[((crc >> 8) & 0xFF) ^ *ubuf++]; return crc; }
Safe
[ "CWE-119", "CWE-787" ]
t1utils
6b9d1aafcb61a3663c883663eb19ccdbfcde8d33
2.1328887021499145e+38
7
Security fixes. - Don't overflow the small cs_start buffer (reported by Niels Thykier via the debian tracker (Jakub Wilk), found with a fuzzer ("American fuzzy lop")). - Cast arguments to <ctype.h> functions to unsigned char.
0
current_word( oparg_T *oap, long count, int include, // TRUE: include word and white space int bigword) // FALSE == word, TRUE == WORD { pos_T start_pos; pos_T pos; int inclusive = TRUE; int include_white = FALSE; cls_bigword = bigword; CLEAR_POS(&start_pos); // Correct cursor when 'selection' is exclusive if (VIsual_active && *p_sel == 'e' && LT_POS(VIsual, curwin->w_cursor)) dec_cursor(); /* * When Visual mode is not active, or when the VIsual area is only one * character, select the word and/or white space under the cursor. */ if (!VIsual_active || EQUAL_POS(curwin->w_cursor, VIsual)) { /* * Go to start of current word or white space. */ back_in_line(); start_pos = curwin->w_cursor; /* * If the start is on white space, and white space should be included * (" word"), or start is not on white space, and white space should * not be included ("word"), find end of word. */ if ((cls() == 0) == include) { if (end_word(1L, bigword, TRUE, TRUE) == FAIL) return FAIL; } else { /* * If the start is not on white space, and white space should be * included ("word "), or start is on white space and white * space should not be included (" "), find start of word. * If we end up in the first column of the next line (single char * word) back up to end of the line. */ fwd_word(1L, bigword, TRUE); if (curwin->w_cursor.col == 0) decl(&curwin->w_cursor); else oneleft(); if (include) include_white = TRUE; } if (VIsual_active) { // should do something when inclusive == FALSE ! VIsual = start_pos; redraw_curbuf_later(INVERTED); // update the inversion } else { oap->start = start_pos; oap->motion_type = MCHAR; } --count; } /* * When count is still > 0, extend with more objects. */ while (count > 0) { inclusive = TRUE; if (VIsual_active && LT_POS(curwin->w_cursor, VIsual)) { /* * In Visual mode, with cursor at start: move cursor back. */ if (decl(&curwin->w_cursor) == -1) return FAIL; if (include != (cls() != 0)) { if (bck_word(1L, bigword, TRUE) == FAIL) return FAIL; } else { if (bckend_word(1L, bigword, TRUE) == FAIL) return FAIL; (void)incl(&curwin->w_cursor); } } else { /* * Move cursor forward one word and/or white area. */ if (incl(&curwin->w_cursor) == -1) return FAIL; if (include != (cls() == 0)) { if (fwd_word(1L, bigword, TRUE) == FAIL && count > 1) return FAIL; /* * If end is just past a new-line, we don't want to include * the first character on the line. * Put cursor on last char of white. */ if (oneleft() == FAIL) inclusive = FALSE; } else { if (end_word(1L, bigword, TRUE, TRUE) == FAIL) return FAIL; } } --count; } if (include_white && (cls() != 0 || (curwin->w_cursor.col == 0 && !inclusive))) { /* * If we don't include white space at the end, move the start * to include some white space there. This makes "daw" work * better on the last word in a sentence (and "2daw" on last-but-one * word). Also when "2daw" deletes "word." at the end of the line * (cursor is at start of next line). * But don't delete white space at start of line (indent). */ pos = curwin->w_cursor; // save cursor position curwin->w_cursor = start_pos; if (oneleft() == OK) { back_in_line(); if (cls() == 0 && curwin->w_cursor.col > 0) { if (VIsual_active) VIsual = curwin->w_cursor; else oap->start = curwin->w_cursor; } } curwin->w_cursor = pos; // put cursor back at end } if (VIsual_active) { if (*p_sel == 'e' && inclusive && LTOREQ_POS(VIsual, curwin->w_cursor)) inc_cursor(); if (VIsual_mode == 'V') { VIsual_mode = 'v'; redraw_cmdline = TRUE; // show mode later } } else oap->inclusive = inclusive; return OK; }
Safe
[ "CWE-126", "CWE-787" ]
vim
53a70289c2712808e6d4e88927e03cac01b470dd
2.2705652557121054e+38
168
patch 8.2.4925: trailing backslash may cause reading past end of line Problem: Trailing backslash may cause reading past end of line. Solution: Check for NUL after backslash.
0
TEST(IndexBoundsBuilderTest, ExistsFalse) { auto testIndex = buildSimpleIndexEntry(); BSONObj obj = fromjson("{a: {$exists: false}}"); auto expr = parseMatchExpression(obj); BSONElement elt = obj.firstElement(); OrderedIntervalList oil; IndexBoundsBuilder::BoundsTightness tightness; IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness); ASSERT_EQUALS(oil.name, "a"); ASSERT_EQUALS(oil.intervals.size(), 1U); ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(Interval(fromjson("{'': null, '': null}"), true, true))); ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH); }
Safe
[ "CWE-754" ]
mongo
f8f55e1825ee5c7bdb3208fc7c5b54321d172732
3.0997911452154116e+38
14
SERVER-44377 generate correct plan for indexed inequalities to null
0
static void tight_pack24(VncState *vs, uint8_t *buf, size_t count, size_t *ret) { uint32_t *buf32; uint32_t pix; int rshift, gshift, bshift; buf32 = (uint32_t *)buf; if (1 /* FIXME: (vs->clientds.flags & QEMU_BIG_ENDIAN_FLAG) == (vs->ds->surface->flags & QEMU_BIG_ENDIAN_FLAG) */) { rshift = vs->client_pf.rshift; gshift = vs->client_pf.gshift; bshift = vs->client_pf.bshift; } else { rshift = 24 - vs->client_pf.rshift; gshift = 24 - vs->client_pf.gshift; bshift = 24 - vs->client_pf.bshift; } if (ret) { *ret = count * 3; } while (count--) { pix = *buf32++; *buf++ = (char)(pix >> rshift); *buf++ = (char)(pix >> gshift); *buf++ = (char)(pix >> bshift); } }
Safe
[ "CWE-125" ]
qemu
9f64916da20eea67121d544698676295bbb105a7
1.1362062014808283e+38
30
pixman/vnc: use pixman images in vnc. The vnc code uses *three* DisplaySurfaces: First is the surface of the actual QemuConsole, usually the guest screen, but could also be a text console (monitor/serial reachable via Ctrl-Alt-<nr> keys). This is left as-is. Second is the current server's view of the screen content. The vnc code uses this to figure which parts of the guest screen did _really_ change to reduce the amount of updates sent to the vnc clients. It is also used as data source when sending out the updates to the clients. This surface gets replaced by a pixman image. The format changes too, instead of using the guest screen format we'll use fixed 32bit rgb framebuffer and convert the pixels on the fly when comparing and updating the server framebuffer. Third surface carries the format expected by the vnc client. That isn't used to store image data. This surface is switched to PixelFormat and a boolean for bigendian byte order. Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
0
TEST_P(RBACIntegrationTest, PathWithQueryAndFragment) { config_helper_.addFilter(RBAC_CONFIG_WITH_PATH_EXACT_MATCH); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); const std::vector<std::string> paths{"/allow", "/allow?p1=v1&p2=v2", "/allow?p1=v1#seg"}; for (const auto& path : paths) { auto response = codec_client_->makeRequestWithBody( Http::TestRequestHeaderMapImpl{ {":method", "POST"}, {":path", path}, {":scheme", "http"}, {":authority", "host"}, {"x-forwarded-for", "10.0.0.1"}, }, 1024); waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); } }
Safe
[]
envoy
2c60632d41555ec8b3d9ef5246242be637a2db0f
2.326884778329352e+38
26
http: header map security fixes for duplicate headers (#197) Previously header matching did not match on all headers for non-inline headers. This patch changes the default behavior to always logically match on all headers. Multiple individual headers will be logically concatenated with ',' similar to what is done with inline headers. This makes the behavior effectively consistent. This behavior can be temporary reverted by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to "false". Targeted fixes have been additionally performed on the following extensions which make them consider all duplicate headers by default as a comma concatenated list: 1) Any extension using CEL matching on headers. 2) The header to metadata filter. 3) The JWT filter. 4) The Lua filter. Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to false. Finally, the setCopy() header map API previously only set the first header in the case of duplicate non-inline headers. setCopy() now behaves similiarly to the other set*() APIs and replaces all found headers with a single value. This may have had security implications in the extauth filter which uses this API. This behavior can be disabled by setting the runtime value "envoy.reloadable_features.http_set_copy_replace_all_headers" to false. Fixes https://github.com/envoyproxy/envoy-setec/issues/188 Signed-off-by: Matt Klein <mklein@lyft.com>
0
ico_get_byte_from_data (const guint8 *data, gint line_width, gint byte) { gint line; gint width32; gint offset; /* width per line in multiples of 32 bits */ width32 = (line_width % 4 == 0 ? line_width / 4 : line_width / 4 + 1); line = byte / line_width; offset = byte % line_width; return data[line * width32 * 4 + offset]; }
Safe
[]
gimp
323ecb73f7bf36788fb7066eb2d6678830cd5de7
2.4860263052901123e+38
15
Bug 773233 - CVE-2007-3126 - Gimp 2.3.14 allows context-dependent attackers... ...to cause a denial of service (crash) via an ICO file with an InfoHeader containing a Height of zero Add some error handling to ico-load.c and bail out on zero width or height icons. Also some formatting cleanup. (cherry picked from commit 46bcd82800e37b0f5aead76184430ef2fe802748)
0
static int StreamTcpPacketStateSynSent(ThreadVars *tv, Packet *p, StreamTcpThread *stt, TcpSession *ssn, PacketQueue *pq) { if (ssn == NULL) return -1; SCLogDebug("ssn %p: pkt received: %s", ssn, PKT_IS_TOCLIENT(p) ? "toclient":"toserver"); /* check for bad responses */ if (StateSynSentValidateTimestamp(ssn, p) == false) return -1; /* RST */ if (p->tcph->th_flags & TH_RST) { if (!StreamTcpValidateRst(ssn, p)) return -1; if (PKT_IS_TOSERVER(p)) { if (SEQ_EQ(TCP_GET_SEQ(p), ssn->client.isn) && SEQ_EQ(TCP_GET_WINDOW(p), 0) && SEQ_EQ(TCP_GET_ACK(p), (ssn->client.isn + 1))) { SCLogDebug("ssn->server.flags |= STREAMTCP_STREAM_FLAG_RST_RECV"); ssn->server.flags |= STREAMTCP_STREAM_FLAG_RST_RECV; StreamTcpPacketSetState(p, ssn, TCP_CLOSED); SCLogDebug("ssn %p: Reset received and state changed to " "TCP_CLOSED", ssn); } } else { ssn->client.flags |= STREAMTCP_STREAM_FLAG_RST_RECV; SCLogDebug("ssn->client.flags |= STREAMTCP_STREAM_FLAG_RST_RECV"); StreamTcpPacketSetState(p, ssn, TCP_CLOSED); SCLogDebug("ssn %p: Reset received and state changed to " "TCP_CLOSED", ssn); } /* FIN */ } else if (p->tcph->th_flags & TH_FIN) { /** \todo */ /* SYN/ACK */ } else if ((p->tcph->th_flags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { if ((ssn->flags & STREAMTCP_FLAG_4WHS) && PKT_IS_TOSERVER(p)) { SCLogDebug("ssn %p: SYN/ACK received on 4WHS session", ssn); /* Check if the SYN/ACK packet ack's the earlier * received SYN packet. */ if (!(SEQ_EQ(TCP_GET_ACK(p), ssn->server.isn + 1))) { StreamTcpSetEvent(p, STREAM_4WHS_SYNACK_WITH_WRONG_ACK); SCLogDebug("ssn %p: 4WHS ACK mismatch, packet ACK %"PRIu32"" " != %" PRIu32 " from stream", ssn, TCP_GET_ACK(p), ssn->server.isn + 1); return -1; } /* Check if the SYN/ACK packet SEQ's the *FIRST* received SYN * packet. */ if (!(SEQ_EQ(TCP_GET_SEQ(p), ssn->client.isn))) { StreamTcpSetEvent(p, STREAM_4WHS_SYNACK_WITH_WRONG_SYN); SCLogDebug("ssn %p: 4WHS SEQ mismatch, packet SEQ %"PRIu32"" " != %" PRIu32 " from *first* SYN pkt", ssn, TCP_GET_SEQ(p), ssn->client.isn); return -1; } /* update state */ StreamTcpPacketSetState(p, ssn, TCP_SYN_RECV); SCLogDebug("ssn %p: =~ 4WHS ssn state is now TCP_SYN_RECV", ssn); /* sequence number & window */ ssn->client.isn = TCP_GET_SEQ(p); STREAMTCP_SET_RA_BASE_SEQ(&ssn->client, ssn->client.isn); ssn->client.next_seq = ssn->client.isn + 1; ssn->server.window = TCP_GET_WINDOW(p); SCLogDebug("ssn %p: 4WHS window %" PRIu32 "", ssn, ssn->client.window); /* Set the timestamp values used to validate the timestamp of * received packets. */ if ((TCP_HAS_TS(p)) && (ssn->server.flags & STREAMTCP_STREAM_FLAG_TIMESTAMP)) { ssn->client.last_ts = TCP_GET_TSVAL(p); SCLogDebug("ssn %p: 4WHS ssn->client.last_ts %" PRIu32" " "ssn->server.last_ts %" PRIu32"", ssn, ssn->client.last_ts, ssn->server.last_ts); ssn->flags |= STREAMTCP_FLAG_TIMESTAMP; ssn->client.last_pkt_ts = p->ts.tv_sec; if (ssn->client.last_ts == 0) ssn->client.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; } else { ssn->server.last_ts = 0; ssn->client.last_ts = 0; ssn->server.flags &= ~STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; } ssn->server.last_ack = TCP_GET_ACK(p); ssn->client.last_ack = ssn->client.isn + 1; /** check for the presense of the ws ptr to determine if we * support wscale at all */ if ((ssn->flags & STREAMTCP_FLAG_SERVER_WSCALE) && (TCP_HAS_WSCALE(p))) { ssn->server.wscale = TCP_GET_WSCALE(p); } else { ssn->server.wscale = 0; } if ((ssn->flags & STREAMTCP_FLAG_CLIENT_SACKOK) && TCP_GET_SACKOK(p) == 1) { ssn->flags |= STREAMTCP_FLAG_SACKOK; SCLogDebug("ssn %p: SACK permitted for 4WHS session", ssn); } ssn->client.next_win = ssn->client.last_ack + ssn->client.window; ssn->server.next_win = ssn->server.last_ack + ssn->server.window; SCLogDebug("ssn %p: 4WHS ssn->client.next_win %" PRIu32 "", ssn, ssn->client.next_win); SCLogDebug("ssn %p: 4WHS ssn->server.next_win %" PRIu32 "", ssn, ssn->server.next_win); SCLogDebug("ssn %p: 4WHS ssn->client.isn %" PRIu32 ", " "ssn->client.next_seq %" PRIu32 ", " "ssn->client.last_ack %" PRIu32 " " "(ssn->server.last_ack %" PRIu32 ")", ssn, ssn->client.isn, ssn->client.next_seq, ssn->client.last_ack, ssn->server.last_ack); /* done here */ return 0; } if (PKT_IS_TOSERVER(p)) { StreamTcpSetEvent(p, STREAM_3WHS_SYNACK_IN_WRONG_DIRECTION); SCLogDebug("ssn %p: SYN/ACK received in the wrong direction", ssn); return -1; } if (!(TCP_HAS_TFO(p) || (ssn->flags & STREAMTCP_FLAG_TCP_FAST_OPEN))) { /* Check if the SYN/ACK packet ack's the earlier * received SYN packet. */ if (!(SEQ_EQ(TCP_GET_ACK(p), ssn->client.isn + 1))) { StreamTcpSetEvent(p, STREAM_3WHS_SYNACK_WITH_WRONG_ACK); SCLogDebug("ssn %p: ACK mismatch, packet ACK %" PRIu32 " != " "%" PRIu32 " from stream", ssn, TCP_GET_ACK(p), ssn->client.isn + 1); return -1; } } else { if (!(SEQ_EQ(TCP_GET_ACK(p), ssn->client.next_seq))) { StreamTcpSetEvent(p, STREAM_3WHS_SYNACK_WITH_WRONG_ACK); SCLogDebug("ssn %p: (TFO) ACK mismatch, packet ACK %" PRIu32 " != " "%" PRIu32 " from stream", ssn, TCP_GET_ACK(p), ssn->client.next_seq); return -1; } SCLogDebug("ssn %p: (TFO) ACK match, packet ACK %" PRIu32 " == " "%" PRIu32 " from stream", ssn, TCP_GET_ACK(p), ssn->client.next_seq); ssn->flags |= STREAMTCP_FLAG_TCP_FAST_OPEN; StreamTcpPacketSetState(p, ssn, TCP_ESTABLISHED); } StreamTcp3whsSynAckUpdate(ssn, p, /* no queue override */NULL); } else if (p->tcph->th_flags & TH_SYN) { SCLogDebug("ssn %p: SYN packet on state SYN_SENT... resent", ssn); if (ssn->flags & STREAMTCP_FLAG_4WHS) { SCLogDebug("ssn %p: SYN packet on state SYN_SENT... resent of " "4WHS SYN", ssn); } if (PKT_IS_TOCLIENT(p)) { /** a SYN only packet in the opposite direction could be: * http://www.breakingpointsystems.com/community/blog/tcp- * portals-the-three-way-handshake-is-a-lie * * \todo improve resetting the session */ /* indicate that we're dealing with 4WHS here */ ssn->flags |= STREAMTCP_FLAG_4WHS; SCLogDebug("ssn %p: STREAMTCP_FLAG_4WHS flag set", ssn); /* set the sequence numbers and window for server * We leave the ssn->client.isn in place as we will * check the SYN/ACK pkt with that. */ ssn->server.isn = TCP_GET_SEQ(p); STREAMTCP_SET_RA_BASE_SEQ(&ssn->server, ssn->server.isn); ssn->server.next_seq = ssn->server.isn + 1; /* Set the stream timestamp value, if packet has timestamp * option enabled. */ if (TCP_HAS_TS(p)) { ssn->server.last_ts = TCP_GET_TSVAL(p); SCLogDebug("ssn %p: %02x", ssn, ssn->server.last_ts); if (ssn->server.last_ts == 0) ssn->server.flags |= STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; ssn->server.last_pkt_ts = p->ts.tv_sec; ssn->server.flags |= STREAMTCP_STREAM_FLAG_TIMESTAMP; } ssn->server.window = TCP_GET_WINDOW(p); if (TCP_HAS_WSCALE(p)) { ssn->flags |= STREAMTCP_FLAG_SERVER_WSCALE; ssn->server.wscale = TCP_GET_WSCALE(p); } else { ssn->flags &= ~STREAMTCP_FLAG_SERVER_WSCALE; ssn->server.wscale = 0; } if (TCP_GET_SACKOK(p) == 1) { ssn->flags |= STREAMTCP_FLAG_CLIENT_SACKOK; } else { ssn->flags &= ~STREAMTCP_FLAG_CLIENT_SACKOK; } SCLogDebug("ssn %p: 4WHS ssn->server.isn %" PRIu32 ", " "ssn->server.next_seq %" PRIu32 ", " "ssn->server.last_ack %"PRIu32"", ssn, ssn->server.isn, ssn->server.next_seq, ssn->server.last_ack); SCLogDebug("ssn %p: 4WHS ssn->client.isn %" PRIu32 ", " "ssn->client.next_seq %" PRIu32 ", " "ssn->client.last_ack %"PRIu32"", ssn, ssn->client.isn, ssn->client.next_seq, ssn->client.last_ack); } /** \todo check if it's correct or set event */ } else if (p->tcph->th_flags & TH_ACK) { /* Handle the asynchronous stream, when we receive a SYN packet and now istead of receving a SYN/ACK we receive a ACK from the same host, which sent the SYN, this suggests the ASNYC streams.*/ if (stream_config.async_oneside == FALSE) return 0; /* we are in AYNC (one side) mode now. */ /* one side async means we won't see a SYN/ACK, so we can * only check the SYN. */ if (!(SEQ_EQ(TCP_GET_SEQ(p), ssn->client.next_seq))) { StreamTcpSetEvent(p, STREAM_3WHS_ASYNC_WRONG_SEQ); SCLogDebug("ssn %p: SEQ mismatch, packet SEQ %" PRIu32 " != " "%" PRIu32 " from stream",ssn, TCP_GET_SEQ(p), ssn->client.next_seq); return -1; } ssn->flags |= STREAMTCP_FLAG_ASYNC; StreamTcpPacketSetState(p, ssn, TCP_ESTABLISHED); SCLogDebug("ssn %p: =~ ssn state is now TCP_ESTABLISHED", ssn); ssn->client.window = TCP_GET_WINDOW(p); ssn->client.last_ack = TCP_GET_SEQ(p); ssn->client.next_win = ssn->client.last_ack + ssn->client.window; /* Set the server side parameters */ ssn->server.isn = TCP_GET_ACK(p) - 1; STREAMTCP_SET_RA_BASE_SEQ(&ssn->server, ssn->server.isn); ssn->server.next_seq = ssn->server.isn + 1; ssn->server.last_ack = ssn->server.next_seq; ssn->server.next_win = ssn->server.last_ack; SCLogDebug("ssn %p: synsent => Asynchronous stream, packet SEQ" " %" PRIu32 ", payload size %" PRIu32 " (%" PRIu32 "), " "ssn->client.next_seq %" PRIu32 "" ,ssn, TCP_GET_SEQ(p), p->payload_len, TCP_GET_SEQ(p) + p->payload_len, ssn->client.next_seq); /* if SYN had wscale, assume it to be supported. Otherwise * we know it not to be supported. */ if (ssn->flags & STREAMTCP_FLAG_SERVER_WSCALE) { ssn->client.wscale = TCP_WSCALE_MAX; } /* Set the timestamp values used to validate the timestamp of * received packets.*/ if (TCP_HAS_TS(p) && (ssn->client.flags & STREAMTCP_STREAM_FLAG_TIMESTAMP)) { ssn->flags |= STREAMTCP_FLAG_TIMESTAMP; ssn->client.flags &= ~STREAMTCP_STREAM_FLAG_TIMESTAMP; ssn->client.last_pkt_ts = p->ts.tv_sec; } else { ssn->client.last_ts = 0; ssn->client.flags &= ~STREAMTCP_STREAM_FLAG_ZERO_TIMESTAMP; } if (ssn->flags & STREAMTCP_FLAG_CLIENT_SACKOK) { ssn->flags |= STREAMTCP_FLAG_SACKOK; } StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn, &ssn->client, p, pq); } else { SCLogDebug("ssn %p: default case", ssn); } return 0; }
Safe
[ "CWE-436", "CWE-94" ]
suricata
fa692df37a796c3330c81988d15ef1a219afc006
2.594454695784805e+38
311
stream: reject broken ACK packets Fix evasion posibility by rejecting packets with a broken ACK field. These packets have a non-0 ACK field, but do not have a ACK flag set. Bug #3324. Reported-by: Nicolas Adba
0
GF_Err esds_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e=GF_OK; u32 descSize; GF_ESDBox *ptr = (GF_ESDBox *)s; descSize = (u32) (ptr->size); if (descSize) { char *enc_desc = (char*)gf_malloc(sizeof(char) * descSize); if (!enc_desc) return GF_OUT_OF_MEM; //get the payload gf_bs_read_data(bs, enc_desc, descSize); //send it to the OD Codec e = gf_odf_desc_read(enc_desc, descSize, (GF_Descriptor **) &ptr->desc); //OK, free our desc gf_free(enc_desc); if (ptr->desc && (ptr->desc->tag!=GF_ODF_ESD_TAG) ) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid descriptor tag 0x%x in esds\n", ptr->desc->tag)); gf_odf_desc_del((GF_Descriptor*)ptr->desc); ptr->desc=NULL; return GF_ISOM_INVALID_FILE; } if (e) { ptr->desc = NULL; } else { /*fix broken files*/ if (ptr->desc && !ptr->desc->URLString) { if (!ptr->desc->slConfig) { ptr->desc->slConfig = (GF_SLConfig *) gf_odf_desc_new(GF_ODF_SLC_TAG); ptr->desc->slConfig->predefined = SLPredef_MP4; } else if (ptr->desc->slConfig->predefined != SLPredef_MP4) { ptr->desc->slConfig->predefined = SLPredef_MP4; gf_odf_slc_set_pref(ptr->desc->slConfig); } } } } return e; }
Safe
[ "CWE-787" ]
gpac
388ecce75d05e11fc8496aa4857b91245007d26e
1.645248466175278e+38
42
fixed #1587
0
do_source_buffer_init(source_cookie_T *sp, exarg_T *eap) { linenr_T curr_lnum; char_u *line = NULL; char_u *fname; CLEAR_FIELD(*sp); if (curbuf == NULL) return NULL; // Use ":source buffer=<num>" as the script name vim_snprintf((char *)IObuff, IOSIZE, ":source buffer=%d", curbuf->b_fnum); fname = vim_strsave(IObuff); if (fname == NULL) return NULL; ga_init2(&sp->buflines, sizeof(char_u *), 100); // Copy the lines from the buffer into a grow array for (curr_lnum = eap->line1; curr_lnum <= eap->line2; curr_lnum++) { line = vim_strsave(ml_get(curr_lnum)); if (line == NULL) goto errret; if (ga_add_string(&sp->buflines, line) == FAIL) goto errret; line = NULL; } sp->buf_lnum = 0; sp->source_from_buf = TRUE; return fname; errret: vim_free(fname); vim_free(line); ga_clear_strings(&sp->buflines); return NULL; }
Safe
[ "CWE-122" ]
vim
2bdad6126778f907c0b98002bfebf0e611a3f5db
7.230483056962917e+37
40
patch 8.2.4647: "source" can read past end of copied line Problem: "source" can read past end of copied line. Solution: Add a terminating NUL.
0
SPL_METHOD(SplFileObject, fgets) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_filesystem_file_read(intern, 0 TSRMLS_CC) == FAILURE) { RETURN_FALSE; } RETURN_STRINGL(intern->u.file.current_line, intern->u.file.current_line_len, 1); } /* }}} */
Vulnerable
[ "CWE-190" ]
php-src
7245bff300d3fa8bacbef7897ff080a6f1c23eba
1.5241983072465042e+38
13
Fix bug #72262 - do not overflow int
1