func
string
target
string
cwe
list
project
string
commit_id
string
hash
string
size
int64
message
string
vul
int64
void md_reload_sb(struct mddev *mddev) { struct md_rdev *rdev, *tmp; rdev_for_each_safe(rdev, tmp, mddev) { rdev->sb_loaded = 0; ClearPageUptodate(rdev->sb_page); } mddev->raid_disks = 0; analyze_sbs(mddev); rdev_for_each_safe(rdev, tmp, mddev) { struct mdp_superblock_1 *sb = page_address(rdev->sb_page); /* since we don't write to faulty devices, we figure out if the * disk is faulty by comparing events */ if (mddev->events > sb->events) set_bit(Faulty, &rdev->flags); } }
Safe
[ "CWE-200" ]
linux
b6878d9e03043695dbf3fa1caa6dfc09db225b16
8.897399363686966e+37
20
md: use kzalloc() when bitmap is disabled In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a mdu_bitmap_file_t called "file". 5769 file = kmalloc(sizeof(*file), GFP_NOIO); 5770 if (!file) 5771 return -ENOMEM; This structure is copied to user space at the end of the function. 5786 if (err == 0 && 5787 copy_to_user(arg, file, sizeof(*file))) 5788 err = -EFAULT But if bitmap is disabled only the first byte of "file" is initialized with zero, so it's possible to read some bytes (up to 4095) of kernel space memory from user space. This is an information leak. 5775 /* bitmap disabled, zero the first byte and copy out */ 5776 if (!mddev->bitmap_info.file) 5777 file->pathname[0] = '\0'; Signed-off-by: Benjamin Randazzo <benjamin@randazzo.fr> Signed-off-by: NeilBrown <neilb@suse.com>
0
void sdhci_common_unrealize(SDHCIState *s) { /* This function is expected to be called only once for each class: * - SysBus: via DeviceClass->unrealize(), * - PCI: via PCIDeviceClass->exit(). * However to avoid double-free and/or use-after-free we still nullify * this variable (better safe than sorry!). */ g_free(s->fifo_buffer); s->fifo_buffer = NULL; }
Safe
[ "CWE-119" ]
qemu
dfba99f17feb6d4a129da19d38df1bcd8579d1c3
1.2146504456613574e+38
10
hw/sd/sdhci: Fix DMA Transfer Block Size field The 'Transfer Block Size' field is 12-bit wide. See section '2.2.2. Block Size Register (Offset 004h)' in datasheet. Two different bug reproducer available: - https://bugs.launchpad.net/qemu/+bug/1892960 - https://ruhr-uni-bochum.sciebo.de/s/NNWP2GfwzYKeKwE?path=%2Fsdhci_oob_write1 Cc: qemu-stable@nongnu.org Buglink: https://bugs.launchpad.net/qemu/+bug/1892960 Fixes: d7dfca0807a ("hw/sdhci: introduce standard SD host controller") Reported-by: Alexander Bulekov <alxndr@bu.edu> Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org> Tested-by: Alexander Bulekov <alxndr@bu.edu> Message-Id: <20200901140411.112150-3-f4bug@amsat.org>
0
run_simple_dialog (CommonJob *job, gboolean ignore_close_box, GtkMessageType message_type, char *primary_text, char *secondary_text, const char *details_text, ...) { va_list varargs; int res; va_start (varargs, details_text); res = run_simple_dialog_va (job, ignore_close_box, message_type, primary_text, secondary_text, details_text, varargs); va_end (varargs); return res; }
Safe
[]
nautilus
ca2fd475297946f163c32dcea897f25da892b89d
5.250517340764272e+37
22
Add nautilus_file_mark_desktop_file_trusted(), this now adds a #! line if 2009-02-24 Alexander Larsson <alexl@redhat.com> * libnautilus-private/nautilus-file-operations.c: * libnautilus-private/nautilus-file-operations.h: Add nautilus_file_mark_desktop_file_trusted(), this now adds a #! line if there is none as well as makes the file executable. * libnautilus-private/nautilus-mime-actions.c: Use nautilus_file_mark_desktop_file_trusted() instead of just setting the permissions. svn path=/trunk/; revision=15006
0
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) { if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { kvm_inject_gp(vcpu, 0); return 1; } return kvm_skip_emulated_instruction(vcpu); }
Safe
[ "CWE-476" ]
linux
55749769fe608fa3f4a075e42e89d237c8e37637
1.0498043939792017e+38
10
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty When dirty ring logging is enabled, any dirty logging without an active vCPU context will cause a kernel oops. But we've already declared that the shared_info page doesn't get dirty tracking anyway, since it would be kind of insane to mark it dirty every time we deliver an event channel interrupt. Userspace is supposed to just assume it's always dirty any time a vCPU can run or event channels are routed. So stop using the generic kvm_write_wall_clock() and just write directly through the gfn_to_pfn_cache that we already have set up. We can make kvm_write_wall_clock() static in x86.c again now, but let's not remove the 'sec_hi_ofs' argument even though it's not used yet. At some point we *will* want to use that for KVM guests too. Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region") Reported-by: butt3rflyh4ck <butterflyhuangxx@gmail.com> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Message-Id: <20211210163625.2886-6-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
static void clear_tables(JOIN *join) { /* must clear only the non-const tables, as const tables are not re-calculated. */ for (uint i= 0 ; i < join->table_count ; i++) { if (!(join->table[i]->map & join->const_table_map)) mark_as_null_row(join->table[i]); // All fields are NULL } }
Safe
[ "CWE-89" ]
server
5ba77222e9fe7af8ff403816b5338b18b342053c
3.3863991763637208e+38
12
MDEV-21028 Server crashes in Query_arena::set_query_arena upon SELECT from view if the view has algorithm=temptable it is not updatable, so DEFAULT() for its fields is meaningless, and thus it's NULL or 0/'' for NOT NULL columns.
0
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); }
Safe
[ "CWE-476" ]
linux
e97f852fd4561e77721bb9a4e0ea9d98305b1e93
2.2974718721426707e+38
4
KVM: X86: Fix scan ioapic use-before-initialization Reported by syzkaller: BUG: unable to handle kernel NULL pointer dereference at 00000000000001c8 PGD 80000003ec4da067 P4D 80000003ec4da067 PUD 3f7bfa067 PMD 0 Oops: 0000 [#1] PREEMPT SMP PTI CPU: 7 PID: 5059 Comm: debug Tainted: G OE 4.19.0-rc5 #16 RIP: 0010:__lock_acquire+0x1a6/0x1990 Call Trace: lock_acquire+0xdb/0x210 _raw_spin_lock+0x38/0x70 kvm_ioapic_scan_entry+0x3e/0x110 [kvm] vcpu_enter_guest+0x167e/0x1910 [kvm] kvm_arch_vcpu_ioctl_run+0x35c/0x610 [kvm] kvm_vcpu_ioctl+0x3e9/0x6d0 [kvm] do_vfs_ioctl+0xa5/0x690 ksys_ioctl+0x6d/0x80 __x64_sys_ioctl+0x1a/0x20 do_syscall_64+0x83/0x6e0 entry_SYSCALL_64_after_hwframe+0x49/0xbe The reason is that the testcase writes hyperv synic HV_X64_MSR_SINT6 msr and triggers scan ioapic logic to load synic vectors into EOI exit bitmap. However, irqchip is not initialized by this simple testcase, ioapic/apic objects should not be accessed. This can be triggered by the following program: #define _GNU_SOURCE #include <endian.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/syscall.h> #include <sys/types.h> #include <unistd.h> uint64_t r[3] = {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}; int main(void) { syscall(__NR_mmap, 0x20000000, 0x1000000, 3, 0x32, -1, 0); long res = 0; memcpy((void*)0x20000040, "/dev/kvm", 9); res = syscall(__NR_openat, 0xffffffffffffff9c, 0x20000040, 0, 0); if (res != -1) r[0] = res; res = syscall(__NR_ioctl, r[0], 0xae01, 0); if (res != -1) r[1] = res; res = syscall(__NR_ioctl, r[1], 0xae41, 0); if (res != -1) r[2] = res; memcpy( (void*)0x20000080, "\x01\x00\x00\x00\x00\x5b\x61\xbb\x96\x00\x00\x40\x00\x00\x00\x00\x01\x00" "\x08\x00\x00\x00\x00\x00\x0b\x77\xd1\x78\x4d\xd8\x3a\xed\xb1\x5c\x2e\x43" "\xaa\x43\x39\xd6\xff\xf5\xf0\xa8\x98\xf2\x3e\x37\x29\x89\xde\x88\xc6\x33" "\xfc\x2a\xdb\xb7\xe1\x4c\xac\x28\x61\x7b\x9c\xa9\xbc\x0d\xa0\x63\xfe\xfe" "\xe8\x75\xde\xdd\x19\x38\xdc\x34\xf5\xec\x05\xfd\xeb\x5d\xed\x2e\xaf\x22" "\xfa\xab\xb7\xe4\x42\x67\xd0\xaf\x06\x1c\x6a\x35\x67\x10\x55\xcb", 106); syscall(__NR_ioctl, r[2], 0x4008ae89, 0x20000080); syscall(__NR_ioctl, r[2], 0xae80, 0); return 0; } This patch fixes it by bailing out scan ioapic if ioapic is not initialized in kernel. Reported-by: Wei Wu <ww9210@gmail.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: Wei Wu <ww9210@gmail.com> Signed-off-by: Wanpeng Li <wanpengli@tencent.com> Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
static int hw_atl_utils_write_b0_mbox(struct aq_hw_s *self, u32 addr, u32 *p, u32 cnt) { u32 offset = 0; int err = 0; u32 val; aq_hw_write_reg(self, 0x208, addr); for (; offset < cnt; ++offset) { aq_hw_write_reg(self, 0x20C, p[offset]); aq_hw_write_reg(self, 0x200, 0xC000); err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get, self, val, (val & 0x100) == 0U, 10U, 10000U); if (err < 0) break; } return err; }
Safe
[ "CWE-787" ]
net
b922f622592af76b57cbc566eaeccda0b31a3496
2.3328720743059736e+38
24
atlantic: Fix OOB read and write in hw_atl_utils_fw_rpc_wait This bug report shows up when running our research tools. The reports is SOOB read, but it seems SOOB write is also possible a few lines below. In details, fw.len and sw.len are inputs coming from io. A len over the size of self->rpc triggers SOOB. The patch fixes the bugs by adding sanity checks. The bugs are triggerable with compromised/malfunctioning devices. They are potentially exploitable given they first leak up to 0xffff bytes and able to overwrite the region later. The patch is tested with QEMU emulater. This is NOT tested with a real device. Attached is the log we found by fuzzing. BUG: KASAN: slab-out-of-bounds in hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic] Read of size 4 at addr ffff888016260b08 by task modprobe/213 CPU: 0 PID: 213 Comm: modprobe Not tainted 5.6.0 #1 Call Trace: dump_stack+0x76/0xa0 print_address_description.constprop.0+0x16/0x200 ? hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic] ? hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic] __kasan_report.cold+0x37/0x7c ? aq_hw_read_reg_bit+0x60/0x70 [atlantic] ? hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic] kasan_report+0xe/0x20 hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic] hw_atl_utils_fw_rpc_call+0x95/0x130 [atlantic] hw_atl_utils_fw_rpc_wait+0x176/0x210 [atlantic] hw_atl_utils_mpi_create+0x229/0x2e0 [atlantic] ? hw_atl_utils_fw_rpc_wait+0x210/0x210 [atlantic] ? hw_atl_utils_initfw+0x9f/0x1c8 [atlantic] hw_atl_utils_initfw+0x12a/0x1c8 [atlantic] aq_nic_ndev_register+0x88/0x650 [atlantic] ? aq_nic_ndev_init+0x235/0x3c0 [atlantic] aq_pci_probe+0x731/0x9b0 [atlantic] ? aq_pci_func_init+0xc0/0xc0 [atlantic] local_pci_probe+0xd3/0x160 pci_device_probe+0x23f/0x3e0 Reported-by: Brendan Dolan-Gavitt <brendandg@nyu.edu> Signed-off-by: Zekun Shen <bruceshenzk@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
std::vector<FormWidgetSignature*> PDFDoc::getSignatureWidgets() { int num_pages = getNumPages(); FormPageWidgets *page_widgets = nullptr; std::vector<FormWidgetSignature*> widget_vector; for (int i = 1; i <= num_pages; i++) { Page *p = getCatalog()->getPage(i); if (p) { page_widgets = p->getFormWidgets(); for (int j = 0; page_widgets != nullptr && j < page_widgets->getNumWidgets(); j++) { if (page_widgets->getWidget(j)->getType() == formSignature) { widget_vector.push_back(static_cast<FormWidgetSignature*>(page_widgets->getWidget(j))); } } delete page_widgets; } } return widget_vector; }
Safe
[ "CWE-20" ]
poppler
9fd5ec0e6e5f763b190f2a55ceb5427cfe851d5f
2.6585633979987885e+38
20
PDFDoc::setup: Fix return value At that point xref can have gone wrong since extractPDFSubtype() can have caused a reconstruct that broke stuff so instead of unconditionally returning true, return xref->isOk() Fixes #706
0
static void tcp_ack_probe(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *head = tcp_send_head(sk); const struct tcp_sock *tp = tcp_sk(sk); /* Was it a usable window open? */ if (!head) return; if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) { icsk->icsk_backoff = 0; inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); /* Socket must be waked up by subsequent tcp_data_snd_check(). * This function is not for random using! */ } else { unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX, NULL); } }
Safe
[ "CWE-190" ]
net
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
1.1967289910437722e+38
22
tcp: limit payload size of sacked skbs Jonathan Looney reported that TCP can trigger the following crash in tcp_shifted_skb() : BUG_ON(tcp_skb_pcount(skb) < pcount); This can happen if the remote peer has advertized the smallest MSS that linux TCP accepts : 48 An skb can hold 17 fragments, and each fragment can hold 32KB on x86, or 64KB on PowerPC. This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs can overflow. Note that tcp_sendmsg() builds skbs with less than 64KB of payload, so this problem needs SACK to be enabled. SACK blocks allow TCP to coalesce multiple skbs in the retransmit queue, thus filling the 17 fragments to maximal capacity. CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Jonathan Looney <jtl@netflix.com> Acked-by: Neal Cardwell <ncardwell@google.com> Reviewed-by: Tyler Hicks <tyhicks@canonical.com> Cc: Yuchung Cheng <ycheng@google.com> Cc: Bruce Curtis <brucec@netflix.com> Cc: Jonathan Lemon <jonathan.lemon@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static void extract_apparmor(pid_t pid) { if (checkcfg(CFG_APPARMOR)) { EUID_USER(); if (aa_is_enabled() == 1) { // get pid of next child process pid_t child; if (find_child(pid, &child) == 1) child = pid; // no child, proceed with current pid // get name of AppArmor profile char *fname; if (asprintf(&fname, "/proc/%d/attr/current", child) == -1) errExit("asprintf"); EUID_ROOT(); int fd = open(fname, O_RDONLY|O_CLOEXEC); EUID_USER(); free(fname); if (fd == -1) goto errexit; char buf[BUFLEN]; ssize_t rv = read(fd, buf, sizeof(buf) - 1); close(fd); if (rv < 0) goto errexit; buf[rv] = '\0'; // process confined by Firejail's AppArmor policy? if (strncmp(buf, "firejail-default", 16) == 0) arg_apparmor = 1; } EUID_ROOT(); } return; errexit: fprintf(stderr, "Error: cannot read /proc file\n"); exit(1); }
Vulnerable
[ "CWE-269", "CWE-94" ]
firejail
27cde3d7d1e4e16d4190932347c7151dc2a84c50
2.9517622702618335e+38
37
fixing CVE-2022-31214
1
Bool gf_filter_send_gf_event(GF_Filter *filter, GF_Event *evt) { return gf_filter_forward_gf_event(filter, evt, GF_FALSE, GF_FALSE); }
Safe
[ "CWE-787" ]
gpac
da37ec8582266983d0ec4b7550ec907401ec441e
2.5247159204135385e+38
4
fixed crashes for very long path - cf #1908
0
void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& diag = context->input(1); // MatrixSetDiag and MatrixSetDiagV2 both use this OpKernel. MatrixSetDiag // only has two inputs, so we have to check the number of inputs before // reading additional parameters in MatrixSetDiagV2. int32_t lower_diag_index = 0; int32_t upper_diag_index = 0; // MatrixSetDiagV2-specific. if (context->num_inputs() > kNumV1Inputs) { auto& diag_index = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(diag_index.shape()) || TensorShapeUtils::IsVector(diag_index.shape()), errors::InvalidArgument( "diag_index must be a scalar or vector, received shape: ", diag_index.shape().DebugString())); lower_diag_index = diag_index.flat<int32>()(0); upper_diag_index = lower_diag_index; if (TensorShapeUtils::IsVector(diag_index.shape())) { auto diag_index_size = diag_index.dim_size(0); OP_REQUIRES( context, 0 < diag_index_size && diag_index_size <= 2, errors::InvalidArgument( "diag_index must have only one or two elements, received ", diag_index_size, " elements.")); if (diag_index_size > 1) { upper_diag_index = diag_index.flat<int32>()(1); } } } const TensorShape& input_shape = input.shape(); const TensorShape& diag_shape = diag.shape(); const int input_rank = input_shape.dims(); // Preliminary validation of sizes. OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(input_shape), errors::InvalidArgument( "input must be at least 2-dim, received shape: ", input.shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVectorOrHigher(diag_shape), errors::InvalidArgument( "diagonal must be at least 1-dim, received shape: ", diag_shape.DebugString())); // Make sure lower_diag_index and upper_diag_index is valid. const Eigen::Index num_rows = input_shape.dim_size(input_rank - 2); const Eigen::Index num_cols = input_shape.dim_size(input_rank - 1); OP_REQUIRES( // Checks lower_diag_index == 0 for when matrix shape = 0. context, (-num_rows < lower_diag_index && lower_diag_index < num_cols) || lower_diag_index == 0, errors::InvalidArgument( "lower_diag_index is out of bound: ", lower_diag_index, " It must be between ", -num_rows, " and ", num_cols)); OP_REQUIRES(context, (-num_rows < upper_diag_index && upper_diag_index < num_cols) || upper_diag_index == 0, errors::InvalidArgument( "upper_diag_index is out of bound: ", upper_diag_index, " It must be between ", -num_rows, " and ", num_cols)); OP_REQUIRES( context, lower_diag_index <= upper_diag_index, errors::InvalidArgument( "lower_diag_index must not be larger than upper_diag_index: ", lower_diag_index, " > ", upper_diag_index)); // Check if diag size is consistent with input. const Eigen::Index num_diags = upper_diag_index - lower_diag_index + 1; OP_REQUIRES( context, lower_diag_index == upper_diag_index || (diag_shape.dim_size(input_rank - 2) == num_diags), errors::InvalidArgument("The number of diagonals provided in `diag` " "is not consistent with `lower_diag_index` and " "`upper_diag_index`")); TensorShape expected_diag_shape = input_shape; expected_diag_shape.RemoveLastDims(2); if (num_diags > 1) expected_diag_shape.AddDim(num_diags); const int32_t max_diag_len = std::min(num_rows + std::min(upper_diag_index, 0), num_cols - std::max(lower_diag_index, 0)); expected_diag_shape.AddDim(max_diag_len); OP_REQUIRES( context, expected_diag_shape == diag_shape, errors::InvalidArgument( "Either first dimensions of diagonal don't match input.shape[:-2], " "or diagonal.shape[:-1] is not equal to the longests diagonal in " "range [lower_diag_index:upper_diag_index].\nInput shape: ", input_shape.DebugString(), "\nDiagonal shape: ", diag_shape.DebugString(), "\nExpected diagonal shape: ", expected_diag_shape.DebugString())); if (input.NumElements() == 0) { // This is a no-op. context->set_output(0, input); return; } auto input_reshaped = input.flat_inner_dims<T, 3>(); auto diag_reshaped = diag.flat<T>(); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, input_shape, &output)); auto output_reshaped = output->flat_inner_dims<T, 3>(); functor::MatrixSetDiag<Device, T>::Compute( context, context->eigen_device<Device>(), input_reshaped, diag_reshaped, output_reshaped, lower_diag_index, upper_diag_index, max_diag_len, left_align_superdiagonal_, left_align_subdiagonal_); }
Vulnerable
[ "CWE-476", "CWE-824" ]
tensorflow
ff8894044dfae5568ecbf2ed514c1a37dc394f1b
1.3871223104318878e+38
114
Add one missing valdiation to `matrix_set_diag_op.cc` PiperOrigin-RevId: 387923408 Change-Id: If6a97b9098c13879400f56c22f91555cdf0ce5d7
1
piv_cache_internal_data(sc_card_t *card, int enumtag) { piv_private_data_t * priv = PIV_DATA(card); const u8* tag; const u8* body; size_t taglen; size_t bodylen; int compressed = 0; /* if already cached */ if (priv->obj_cache[enumtag].internal_obj_data && priv->obj_cache[enumtag].internal_obj_len) { sc_log(card->ctx, "#%d found internal %p:%"SC_FORMAT_LEN_SIZE_T"u", enumtag, priv->obj_cache[enumtag].internal_obj_data, priv->obj_cache[enumtag].internal_obj_len); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } body = sc_asn1_find_tag(card->ctx, priv->obj_cache[enumtag].obj_data, priv->obj_cache[enumtag].obj_len, 0x53, &bodylen); if (body == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OBJECT_NOT_VALID); /* get the certificate out */ if (piv_objects[enumtag].flags & PIV_OBJECT_TYPE_CERT) { tag = sc_asn1_find_tag(card->ctx, body, bodylen, 0x71, &taglen); /* 800-72-1 not clear if this is 80 or 01 Sent comment to NIST for 800-72-2 */ /* 800-73-3 says it is 01, keep dual test so old cards still work */ if (tag && (((*tag) & 0x80) || ((*tag) & 0x01))) compressed = 1; tag = sc_asn1_find_tag(card->ctx, body, bodylen, 0x70, &taglen); if (tag == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OBJECT_NOT_VALID); if (taglen == 0) LOG_FUNC_RETURN(card->ctx, SC_ERROR_FILE_NOT_FOUND); if(compressed) { #ifdef ENABLE_ZLIB size_t len; u8* newBuf = NULL; if(SC_SUCCESS != sc_decompress_alloc(&newBuf, &len, tag, taglen, COMPRESSION_AUTO)) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OBJECT_NOT_VALID); priv->obj_cache[enumtag].internal_obj_data = newBuf; priv->obj_cache[enumtag].internal_obj_len = len; #else sc_log(card->ctx, "PIV compression not supported, no zlib"); LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); #endif } else { if (!(priv->obj_cache[enumtag].internal_obj_data = malloc(taglen))) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); memcpy(priv->obj_cache[enumtag].internal_obj_data, tag, taglen); priv->obj_cache[enumtag].internal_obj_len = taglen; } /* convert pub key to internal */ /* TODO: -DEE need to fix ... would only be used if we cache the pub key, but we don't today */ } else if (piv_objects[enumtag].flags & PIV_OBJECT_TYPE_PUBKEY) { tag = sc_asn1_find_tag(card->ctx, body, bodylen, *body, &taglen); if (tag == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OBJECT_NOT_VALID); if (taglen == 0) LOG_FUNC_RETURN(card->ctx, SC_ERROR_FILE_NOT_FOUND); if (!(priv->obj_cache[enumtag].internal_obj_data = malloc(taglen))) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); memcpy(priv->obj_cache[enumtag].internal_obj_data, tag, taglen); priv->obj_cache[enumtag].internal_obj_len = taglen; } else { LOG_FUNC_RETURN(card->ctx, SC_ERROR_INTERNAL); } sc_log(card->ctx, "added #%d internal %p:%"SC_FORMAT_LEN_SIZE_T"u", enumtag, priv->obj_cache[enumtag].internal_obj_data, priv->obj_cache[enumtag].internal_obj_len); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); }
Safe
[ "CWE-125" ]
OpenSC
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
2.5895369036626426e+38
94
fixed out of bounds reads Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting and suggesting security fixes.
0
xdr_mpol_arg(XDR *xdrs, mpol_arg *objp) { if (!xdr_ui_4(xdrs, &objp->api_version)) { return (FALSE); } if (!_xdr_kadm5_policy_ent_rec(xdrs, &objp->rec, objp->api_version)) { return (FALSE); } if (!xdr_long(xdrs, &objp->mask)) { return (FALSE); } return (TRUE); }
Safe
[ "CWE-703" ]
krb5
a197e92349a4aa2141b5dff12e9dd44c2a2166e3
3.3874016629028338e+38
14
Fix kadm5/gssrpc XDR double free [CVE-2014-9421] [MITKRB5-SA-2015-001] In auth_gssapi_unwrap_data(), do not free partial deserialization results upon failure to deserialize. This responsibility belongs to the callers, svctcp_getargs() and svcudp_getargs(); doing it in the unwrap function results in freeing the results twice. In xdr_krb5_tl_data() and xdr_krb5_principal(), null out the pointers we are freeing, as other XDR functions such as xdr_bytes() and xdr_string(). ticket: 8056 (new) target_version: 1.13.1 tags: pullup
0
BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) /* * Returns 'ret' such that ret^2 == a (mod p), using the Tonelli/Shanks * algorithm (cf. Henri Cohen, "A Course in Algebraic Computational Number * Theory", algorithm 1.5.1). 'p' must be prime, otherwise an error or * an incorrect "result" will be returned. */ { BIGNUM *ret = in; int err = 1; int r; BIGNUM *A, *b, *q, *t, *x, *y; int e, i, j; if (!BN_is_odd(p) || BN_abs_is_word(p, 1)) { if (BN_abs_is_word(p, 2)) { if (ret == NULL) ret = BN_new(); if (ret == NULL) goto end; if (!BN_set_word(ret, BN_is_bit_set(a, 0))) { if (ret != in) BN_free(ret); return NULL; } bn_check_top(ret); return ret; } BNerr(BN_F_BN_MOD_SQRT, BN_R_P_IS_NOT_PRIME); return NULL; } if (BN_is_zero(a) || BN_is_one(a)) { if (ret == NULL) ret = BN_new(); if (ret == NULL) goto end; if (!BN_set_word(ret, BN_is_one(a))) { if (ret != in) BN_free(ret); return NULL; } bn_check_top(ret); return ret; } BN_CTX_start(ctx); A = BN_CTX_get(ctx); b = BN_CTX_get(ctx); q = BN_CTX_get(ctx); t = BN_CTX_get(ctx); x = BN_CTX_get(ctx); y = BN_CTX_get(ctx); if (y == NULL) goto end; if (ret == NULL) ret = BN_new(); if (ret == NULL) goto end; /* A = a mod p */ if (!BN_nnmod(A, a, p, ctx)) goto end; /* now write |p| - 1 as 2^e*q where q is odd */ e = 1; while (!BN_is_bit_set(p, e)) e++; /* we'll set q later (if needed) */ if (e == 1) { /*- * The easy case: (|p|-1)/2 is odd, so 2 has an inverse * modulo (|p|-1)/2, and square roots can be computed * directly by modular exponentiation. * We have * 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2), * so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1. */ if (!BN_rshift(q, p, 2)) goto end; q->neg = 0; if (!BN_add_word(q, 1)) goto end; if (!BN_mod_exp(ret, A, q, p, ctx)) goto end; err = 0; goto vrfy; } if (e == 2) { /*- * |p| == 5 (mod 8) * * In this case 2 is always a non-square since * Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime. * So if a really is a square, then 2*a is a non-square. * Thus for * b := (2*a)^((|p|-5)/8), * i := (2*a)*b^2 * we have * i^2 = (2*a)^((1 + (|p|-5)/4)*2) * = (2*a)^((p-1)/2) * = -1; * so if we set * x := a*b*(i-1), * then * x^2 = a^2 * b^2 * (i^2 - 2*i + 1) * = a^2 * b^2 * (-2*i) * = a*(-i)*(2*a*b^2) * = a*(-i)*i * = a. * * (This is due to A.O.L. Atkin, * Subject: Square Roots and Cognate Matters modulo p=8n+5. * URL: https://listserv.nodak.edu/cgi-bin/wa.exe?A2=ind9211&L=NMBRTHRY&P=4026 * November 1992.) */ /* t := 2*a */ if (!BN_mod_lshift1_quick(t, A, p)) goto end; /* b := (2*a)^((|p|-5)/8) */ if (!BN_rshift(q, p, 3)) goto end; q->neg = 0; if (!BN_mod_exp(b, t, q, p, ctx)) goto end; /* y := b^2 */ if (!BN_mod_sqr(y, b, p, ctx)) goto end; /* t := (2*a)*b^2 - 1 */ if (!BN_mod_mul(t, t, y, p, ctx)) goto end; if (!BN_sub_word(t, 1)) goto end; /* x = a*b*t */ if (!BN_mod_mul(x, A, b, p, ctx)) goto end; if (!BN_mod_mul(x, x, t, p, ctx)) goto end; if (!BN_copy(ret, x)) goto end; err = 0; goto vrfy; } /* * e > 2, so we really have to use the Tonelli/Shanks algorithm. First, * find some y that is not a square. */ if (!BN_copy(q, p)) goto end; /* use 'q' as temp */ q->neg = 0; i = 2; do { /* * For efficiency, try small numbers first; if this fails, try random * numbers. */ if (i < 22) { if (!BN_set_word(y, i)) goto end; } else { if (!BN_priv_rand(y, BN_num_bits(p), 0, 0)) goto end; if (BN_ucmp(y, p) >= 0) { if (!(p->neg ? BN_add : BN_sub) (y, y, p)) goto end; } /* now 0 <= y < |p| */ if (BN_is_zero(y)) if (!BN_set_word(y, i)) goto end; } r = BN_kronecker(y, q, ctx); /* here 'q' is |p| */ if (r < -1) goto end; if (r == 0) { /* m divides p */ BNerr(BN_F_BN_MOD_SQRT, BN_R_P_IS_NOT_PRIME); goto end; } } while (r == 1 && ++i < 82); if (r != -1) { /* * Many rounds and still no non-square -- this is more likely a bug * than just bad luck. Even if p is not prime, we should have found * some y such that r == -1. */ BNerr(BN_F_BN_MOD_SQRT, BN_R_TOO_MANY_ITERATIONS); goto end; } /* Here's our actual 'q': */ if (!BN_rshift(q, q, e)) goto end; /* * Now that we have some non-square, we can find an element of order 2^e * by computing its q'th power. */ if (!BN_mod_exp(y, y, q, p, ctx)) goto end; if (BN_is_one(y)) { BNerr(BN_F_BN_MOD_SQRT, BN_R_P_IS_NOT_PRIME); goto end; } /*- * Now we know that (if p is indeed prime) there is an integer * k, 0 <= k < 2^e, such that * * a^q * y^k == 1 (mod p). * * As a^q is a square and y is not, k must be even. * q+1 is even, too, so there is an element * * X := a^((q+1)/2) * y^(k/2), * * and it satisfies * * X^2 = a^q * a * y^k * = a, * * so it is the square root that we are looking for. */ /* t := (q-1)/2 (note that q is odd) */ if (!BN_rshift1(t, q)) goto end; /* x := a^((q-1)/2) */ if (BN_is_zero(t)) { /* special case: p = 2^e + 1 */ if (!BN_nnmod(t, A, p, ctx)) goto end; if (BN_is_zero(t)) { /* special case: a == 0 (mod p) */ BN_zero(ret); err = 0; goto end; } else if (!BN_one(x)) goto end; } else { if (!BN_mod_exp(x, A, t, p, ctx)) goto end; if (BN_is_zero(x)) { /* special case: a == 0 (mod p) */ BN_zero(ret); err = 0; goto end; } } /* b := a*x^2 (= a^q) */ if (!BN_mod_sqr(b, x, p, ctx)) goto end; if (!BN_mod_mul(b, b, A, p, ctx)) goto end; /* x := a*x (= a^((q+1)/2)) */ if (!BN_mod_mul(x, x, A, p, ctx)) goto end; while (1) { /*- * Now b is a^q * y^k for some even k (0 <= k < 2^E * where E refers to the original value of e, which we * don't keep in a variable), and x is a^((q+1)/2) * y^(k/2). * * We have a*b = x^2, * y^2^(e-1) = -1, * b^2^(e-1) = 1. */ if (BN_is_one(b)) { if (!BN_copy(ret, x)) goto end; err = 0; goto vrfy; } /* Find the smallest i, 0 < i < e, such that b^(2^i) = 1. */ for (i = 1; i < e; i++) { if (i == 1) { if (!BN_mod_sqr(t, b, p, ctx)) goto end; } else { if (!BN_mod_mul(t, t, t, p, ctx)) goto end; } if (BN_is_one(t)) break; } /* If not found, a is not a square or p is not prime. */ if (i >= e) { BNerr(BN_F_BN_MOD_SQRT, BN_R_NOT_A_SQUARE); goto end; } /* t := y^2^(e - i - 1) */ if (!BN_copy(t, y)) goto end; for (j = e - i - 1; j > 0; j--) { if (!BN_mod_sqr(t, t, p, ctx)) goto end; } if (!BN_mod_mul(y, t, t, p, ctx)) goto end; if (!BN_mod_mul(x, x, t, p, ctx)) goto end; if (!BN_mod_mul(b, b, y, p, ctx)) goto end; e = i; } vrfy: if (!err) { /* * verify the result -- the input might have been not a square (test * added in 0.9.8) */ if (!BN_mod_sqr(x, ret, p, ctx)) err = 1; if (!err && 0 != BN_cmp(x, A)) { BNerr(BN_F_BN_MOD_SQRT, BN_R_NOT_A_SQUARE); err = 1; } } end: if (err) { if (ret != in) BN_clear_free(ret); ret = NULL; } BN_CTX_end(ctx); bn_check_top(ret); return ret; }
Safe
[ "CWE-835" ]
openssl
3118eb64934499d93db3230748a452351d1d9a65
5.221510500926188e+37
353
Fix possible infinite loop in BN_mod_sqrt() The calculation in some cases does not finish for non-prime p. This fixes CVE-2022-0778. Based on patch by David Benjamin <davidben@google.com>. Reviewed-by: Paul Dale <pauli@openssl.org> Reviewed-by: Matt Caswell <matt@openssl.org>
0
static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) { const struct iphdr *iph; int err; UDP_SKB_CB(skb)->partial_cov = 0; UDP_SKB_CB(skb)->cscov = skb->len; if (proto == IPPROTO_UDPLITE) { err = udplite_checksum_init(skb, uh); if (err) return err; } iph = ip_hdr(skb); if (uh->check == 0) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, proto, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (!skb_csum_unnecessary(skb)) skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len, proto, 0); /* Probably, we should checksum udp header (it should be in cache * in any case) and data in tiny packets (< rx copybreak). */ return 0; }
Safe
[ "CWE-400" ]
linux-2.6
c377411f2494a931ff7facdbb3a6839b1266bcf6
4.33157089080358e+37
32
net: sk_add_backlog() take rmem_alloc into account Current socket backlog limit is not enough to really stop DDOS attacks, because user thread spend many time to process a full backlog each round, and user might crazy spin on socket lock. We should add backlog size and receive_queue size (aka rmem_alloc) to pace writers, and let user run without being slow down too much. Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in stress situations. Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp receiver can now process ~200.000 pps (instead of ~100 pps before the patch) on a 8 core machine. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
ofputil_put_ofp11_match(struct ofpbuf *b, const struct match *match, enum ofputil_protocol protocol) { switch (protocol) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: OVS_NOT_REACHED(); case OFPUTIL_P_OF11_STD: { struct ofp11_match *om; /* Make sure that no padding is needed. */ BUILD_ASSERT_DECL(sizeof *om % 8 == 0); om = ofpbuf_put_uninit(b, sizeof *om); ofputil_match_to_ofp11_match(match, om); return sizeof *om; } case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: return oxm_put_match(b, match, ofputil_protocol_to_ofp_version(protocol)); } OVS_NOT_REACHED(); }
Safe
[ "CWE-772" ]
ovs
77ad4225d125030420d897c873e4734ac708c66b
2.4085648530656115e+38
32
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod(). Found by libFuzzer. Reported-by: Bhargava Shastry <bshastry@sec.t-labs.tu-berlin.de> Signed-off-by: Ben Pfaff <blp@ovn.org> Acked-by: Justin Pettit <jpettit@ovn.org>
0
inline void WireFormatLite::WriteUInt64NoTag(uint64_t value, io::CodedOutputStream* output) { output->WriteVarint64(value); }
Safe
[ "CWE-703" ]
protobuf
d1635e1496f51e0d5653d856211e8821bc47adc4
2.9997869453260353e+38
4
Apply patch
0
TPM_CAP_Unmarshal(TPM_CAP *target, BYTE **buffer, INT32 *size) { TPM_RC rc = TPM_RC_SUCCESS; TPM_CAP orig_target = *target; // libtpms added if (rc == TPM_RC_SUCCESS) { rc = UINT32_Unmarshal(target, buffer, size); } if (rc == TPM_RC_SUCCESS) { switch (*target) { case TPM_CAP_ALGS: case TPM_CAP_HANDLES: case TPM_CAP_COMMANDS: case TPM_CAP_PP_COMMANDS: case TPM_CAP_AUDIT_COMMANDS: case TPM_CAP_PCRS: case TPM_CAP_TPM_PROPERTIES: case TPM_CAP_PCR_PROPERTIES: case TPM_CAP_ECC_CURVES: case TPM_CAP_AUTH_POLICIES: case TPM_CAP_ACT: case TPM_CAP_VENDOR_PROPERTY: break; default: rc = TPM_RC_VALUE; *target = orig_target; // libtpms added } } return rc; }
Safe
[ "CWE-787" ]
libtpms
5cc98a62dc6f204dcf5b87c2ee83ac742a6a319b
9.542235878229807e+37
30
tpm2: Restore original value if unmarshalled value was illegal Restore the original value of the memory location where data from a stream was unmarshalled and the unmarshalled value was found to be illegal. The goal is to not keep illegal values in memory. Signed-off-by: Stefan Berger <stefanb@linux.ibm.com>
0
PHPAPI const ps_serializer *_php_find_ps_serializer(char *name TSRMLS_DC) /* {{{ */ { const ps_serializer *ret = NULL; const ps_serializer *mod; for (mod = ps_serializers; mod->name; mod++) { if (!strcasecmp(name, mod->name)) { ret = mod; break; } } return ret; }
Safe
[ "CWE-264" ]
php-src
25e8fcc88fa20dc9d4c47184471003f436927cde
1.9019384486901216e+38
13
Strict session
0
onig_builtin_max(OnigCalloutArgs* args, void* user_data ARG_UNUSED) { int r; int slot; long max_val; OnigCodePoint count_type; OnigType type; OnigValue val; OnigValue aval; (void )onig_check_callout_data_and_clear_old_values(args); slot = 0; r = onig_get_callout_data_by_callout_args_self(args, slot, &type, &val); if (r < ONIG_NORMAL) return r; else if (r > ONIG_NORMAL) { /* type == void: initial state */ type = ONIG_TYPE_LONG; val.l = 0; } r = onig_get_arg_by_callout_args(args, 0, &type, &aval); if (r != ONIG_NORMAL) return r; if (type == ONIG_TYPE_TAG) { r = onig_get_callout_data_by_callout_args(args, aval.tag, 0, &type, &aval); if (r < ONIG_NORMAL) return r; else if (r > ONIG_NORMAL) max_val = 0L; else max_val = aval.l; } else { /* LONG */ max_val = aval.l; } r = onig_get_arg_by_callout_args(args, 1, &type, &aval); if (r != ONIG_NORMAL) return r; count_type = aval.c; if (count_type != '>' && count_type != 'X' && count_type != '<') return ONIGERR_INVALID_CALLOUT_ARG; if (args->in == ONIG_CALLOUT_IN_RETRACTION) { if (count_type == '<') { if (val.l >= max_val) return ONIG_CALLOUT_FAIL; val.l++; } else if (count_type == 'X') val.l--; } else { if (count_type != '<') { if (val.l >= max_val) return ONIG_CALLOUT_FAIL; val.l++; } } r = onig_set_callout_data_by_callout_args_self(args, slot, ONIG_TYPE_LONG, &val); if (r != ONIG_NORMAL) return r; return ONIG_CALLOUT_SUCCESS; }
Safe
[ "CWE-125" ]
oniguruma
d3e402928b6eb3327f8f7d59a9edfa622fec557b
9.479100138722778e+37
63
fix heap-buffer-overflow
0
static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p) { Bucket *prev = NULL; if (!(ht->u.flags & HASH_FLAG_PACKED)) { uint32_t nIndex = p->h | ht->nTableMask; uint32_t i = HT_HASH(ht, nIndex); if (i != idx) { prev = HT_HASH_TO_BUCKET(ht, i); while (Z_NEXT(prev->val) != idx) { i = Z_NEXT(prev->val); prev = HT_HASH_TO_BUCKET(ht, i); } } } _zend_hash_del_el_ex(ht, idx, p, prev); }
Safe
[ "CWE-190" ]
php-src
4cc0286f2f3780abc6084bcdae5dce595daa3c12
3.1927221600893634e+38
19
Fix #73832 - leave the table in a safe state if the size is too big.
0
bool val_native(THD *thd, Native *to) { return Time(thd, this).to_native(to, decimals); }
Safe
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
2.82761323918606e+38
4
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
0
static zend_object_value spl_filesystem_object_clone(zval *zobject TSRMLS_DC) { zend_object_value new_obj_val; zend_object *old_object; zend_object *new_object; zend_object_handle handle = Z_OBJ_HANDLE_P(zobject); spl_filesystem_object *intern; spl_filesystem_object *source; int index, skip_dots; old_object = zend_objects_get_address(zobject TSRMLS_CC); source = (spl_filesystem_object*)old_object; new_obj_val = spl_filesystem_object_new_ex(old_object->ce, &intern TSRMLS_CC); new_object = &intern->std; intern->flags = source->flags; switch (source->type) { case SPL_FS_INFO: intern->_path_len = source->_path_len; intern->_path = estrndup(source->_path, source->_path_len); intern->file_name_len = source->file_name_len; intern->file_name = estrndup(source->file_name, intern->file_name_len); break; case SPL_FS_DIR: spl_filesystem_dir_open(intern, source->_path TSRMLS_CC); /* read until we hit the position in which we were before */ skip_dots = SPL_HAS_FLAG(source->flags, SPL_FILE_DIR_SKIPDOTS); for(index = 0; index < source->u.dir.index; ++index) { do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } intern->u.dir.index = index; break; case SPL_FS_FILE: php_error_docref(NULL TSRMLS_CC, E_ERROR, "An object of class %s cannot be cloned", old_object->ce->name); break; } intern->file_class = source->file_class; intern->info_class = source->info_class; intern->oth = source->oth; intern->oth_handler = source->oth_handler; zend_objects_clone_members(new_object, new_obj_val, old_object, handle TSRMLS_CC); if (intern->oth_handler && intern->oth_handler->clone) { intern->oth_handler->clone(source, intern TSRMLS_CC); } return new_obj_val; }
Safe
[ "CWE-190" ]
php-src
7245bff300d3fa8bacbef7897ff080a6f1c23eba
1.315404204020456e+38
54
Fix bug #72262 - do not overflow int
0
int ImagingLibTiffEncodeInit(ImagingCodecState state, char *filename, int fp) { // Open the FD or the pointer as a tiff file, for writing. // We may have to do some monkeying around to make this really work. // If we have a fp, then we're good. // If we have a memory string, we're probably going to have to malloc, then // shuffle bytes into the writescanline process. // Going to have to deal with the directory as well. TIFFSTATE *clientstate = (TIFFSTATE *)state->context; int bufsize = 64*1024; char *mode = "w"; TRACE(("initing libtiff\n")); TRACE(("Filename %s, filepointer: %d \n", filename, fp)); TRACE(("State: count %d, state %d, x %d, y %d, ystep %d\n", state->count, state->state, state->x, state->y, state->ystep)); TRACE(("State: xsize %d, ysize %d, xoff %d, yoff %d \n", state->xsize, state->ysize, state->xoff, state->yoff)); TRACE(("State: bits %d, bytes %d \n", state->bits, state->bytes)); TRACE(("State: context %p \n", state->context)); clientstate->loc = 0; clientstate->size = 0; clientstate->eof =0; clientstate->data = 0; clientstate->flrealloc = 0; clientstate->fp = fp; state->state = 0; if (fp) { TRACE(("Opening using fd: %d for writing \n",clientstate->fp)); clientstate->tiff = TIFFFdOpen(clientstate->fp, filename, mode); } else { // malloc a buffer to write the tif, we're going to need to realloc or something if we need bigger. TRACE(("Opening a buffer for writing \n")); /* malloc check ok, small constant allocation */ clientstate->data = malloc(bufsize); clientstate->size = bufsize; clientstate->flrealloc=1; if (!clientstate->data) { TRACE(("Error, couldn't allocate a buffer of size %d\n", bufsize)); return 0; } clientstate->tiff = TIFFClientOpen(filename, mode, (thandle_t) clientstate, _tiffReadProc, _tiffWriteProc, _tiffSeekProc, _tiffCloseProc, _tiffSizeProc, _tiffNullMapProc, _tiffUnmapProc); /*force no mmap*/ } if (!clientstate->tiff) { TRACE(("Error, couldn't open tiff file\n")); return 0; } return 1; }
Safe
[ "CWE-190", "CWE-787" ]
Pillow
4e2def2539ec13e53a82e06c4b3daf00454100c4
2.9066217359107612e+38
62
Overflow checks for realloc for tiff decoding
0
bool ExpressionTrim::codePointMatchesAtIndex(const StringData& input, std::size_t indexOfInput, const StringData& testCP) { for (size_t i = 0; i < testCP.size(); ++i) { if (indexOfInput + i >= input.size() || input[indexOfInput + i] != testCP[i]) { return false; } } return true; };
Safe
[]
mongo
1772b9a0393b55e6a280a35e8f0a1f75c014f301
2.682968813209956e+38
10
SERVER-49404 Enforce additional checks in $arrayToObject
0
print_var_list (list) register SHELL_VAR **list; { register int i; register SHELL_VAR *var; for (i = 0; list && (var = list[i]); i++) if (invisible_p (var) == 0) print_assignment (var); }
Safe
[]
bash
863d31ae775d56b785dc5b0105b6d251515d81d5
1.4469723781084327e+38
10
commit bash-20120224 snapshot
0
int in_gate_area(struct mm_struct *mm, unsigned long addr) { return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); }
Safe
[ "CWE-284", "CWE-264" ]
linux
a4780adeefd042482f624f5e0d577bf9cdcbb760
1.2059298967827263e+38
4
ARM: 7735/2: Preserve the user r/w register TPIDRURW on context switch and fork Since commit 6a1c53124aa1 the user writeable TLS register was zeroed to prevent it from being used as a covert channel between two tasks. There are more and more applications coming to Windows RT, Wine could support them, but mostly they expect to have the thread environment block (TEB) in TPIDRURW. This patch preserves that register per thread instead of clearing it. Unlike the TPIDRURO, which is already switched, the TPIDRURW can be updated from userspace so needs careful treatment in the case that we modify TPIDRURW and call fork(). To avoid this we must always read TPIDRURW in copy_thread. Signed-off-by: André Hentschel <nerv@dawncrow.de> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Jonathan Austin <jonathan.austin@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
0
static inline void GetPixelInfoPixel(const Image *restrict image, const Quantum *restrict pixel,PixelInfo *restrict pixel_info) { pixel_info->colorspace=image->colorspace; pixel_info->fuzz=image->fuzz; pixel_info->red=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]; pixel_info->green=(MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset]; pixel_info->blue=(MagickRealType) pixel[image->channel_map[BluePixelChannel].offset]; pixel_info->black=0.0f; if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait) pixel_info->black=(MagickRealType) pixel[image->channel_map[BlackPixelChannel].offset]; pixel_info->alpha=(MagickRealType) OpaqueAlpha; pixel_info->alpha_trait=UndefinedPixelTrait; if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait) { pixel_info->alpha=(MagickRealType) pixel[image->channel_map[AlphaPixelChannel].offset]; pixel_info->alpha_trait=BlendPixelTrait; } pixel_info->index=0.0f; if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait) pixel_info->index=(MagickRealType) pixel[image->channel_map[IndexPixelChannel].offset]; }
Safe
[ "CWE-119", "CWE-787" ]
ImageMagick
450bd716ed3b9186dd10f9e60f630a3d9eeea2a4
4.885307841060251e+37
28
0
CImg<T>& RGBtoLab(const bool use_D65=true) { return RGBtoXYZ(use_D65).XYZtoLab(use_D65); }
Safe
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
2.7116999596015686e+38
3
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
0
static inline void native_set_ldt(const void *addr, unsigned int entries) { if (likely(entries == 0)) asm volatile("lldt %w0"::"q" (0)); else { unsigned cpu = smp_processor_id(); ldt_desc ldt; set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT, entries * LDT_ENTRY_SIZE - 1); write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &ldt, DESC_LDT); asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); } }
Safe
[ "CWE-119" ]
linux-2.6
5ac37f87ff18843aabab84cf75b2f8504c2d81fe
4.129961570820901e+37
15
x86: fix ldt limit for 64 bit Fix size of LDT entries. On x86-64, ldt_desc is a double-sized descriptor. Signed-off-by: Michael Karcher <kernel@mkarcher.dialup.fu-berlin.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
0
QPDF::pushInheritedAttributesToPage(bool allow_changes, bool warn_skipped_keys) { // Traverse pages tree pushing all inherited resources down to the // page level. // The record of whether we've done this is cleared by // updateAllPagesCache(). If we're warning for skipped keys, // re-traverse unconditionally. if (this->m->pushed_inherited_attributes_to_pages && (! warn_skipped_keys)) { return; } // Calling getAllPages() resolves any duplicated page objects. getAllPages(); // key_ancestors is a mapping of page attribute keys to a stack of // Pages nodes that contain values for them. std::map<std::string, std::vector<QPDFObjectHandle> > key_ancestors; this->m->all_pages.clear(); std::set<QPDFObjGen> visited; pushInheritedAttributesToPageInternal( this->m->trailer.getKey("/Root").getKey("/Pages"), key_ancestors, this->m->all_pages, allow_changes, warn_skipped_keys, visited); if (! key_ancestors.empty()) { throw std::logic_error( "key_ancestors not empty after" " pushing inherited attributes to pages"); } this->m->pushed_inherited_attributes_to_pages = true; }
Safe
[ "CWE-787" ]
qpdf
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
1.1433219145421964e+38
33
Fix sign and conversion warnings (major) This makes all integer type conversions that have potential data loss explicit with calls that do range checks and raise an exception. After this commit, qpdf builds with no warnings when -Wsign-conversion -Wconversion is used with gcc or clang or when -W3 -Wd4800 is used with MSVC. This significantly reduces the likelihood of potential crashes from bogus integer values. There are some parts of the code that take int when they should take size_t or an offset. Such places would make qpdf not support files with more than 2^31 of something that usually wouldn't be so large. In the event that such a file shows up and is valid, at least qpdf would raise an error in the right spot so the issue could be legitimately addressed rather than failing in some weird way because of a silent overflow condition.
0
ushort *CLASS ljpeg_row(int jrow, struct jhead *jh) { int col, c, diff, pred, spred = 0; ushort mark = 0, *row[3]; if (jrow * jh->wide % jh->restart == 0) { FORC(6) jh->vpred[c] = 1 << (jh->bits - 1); if (jrow) { fseek(ifp, -2, SEEK_CUR); do mark = (mark << 8) + (c = fgetc(ifp)); while (c != EOF && mark >> 4 != 0xffd); } getbits(-1); } FORC3 row[c] = jh->row + jh->wide * jh->clrs * ((jrow + c) & 1); for (col = 0; col < jh->wide; col++) FORC(jh->clrs) { diff = ljpeg_diff(jh->huff[c]); if (jh->sraw && c <= jh->sraw && (col | c)) pred = spred; else if (col) pred = row[0][-jh->clrs]; else pred = (jh->vpred[c] += diff) - diff; if (jrow && col) switch (jh->psv) { case 1: break; case 2: pred = row[1][0]; break; case 3: pred = row[1][-jh->clrs]; break; case 4: pred = pred + row[1][0] - row[1][-jh->clrs]; break; case 5: pred = pred + ((row[1][0] - row[1][-jh->clrs]) >> 1); break; case 6: pred = row[1][0] + ((pred - row[1][-jh->clrs]) >> 1); break; case 7: pred = (pred + row[1][0]) >> 1; break; default: pred = 0; } if ((**row = pred + diff) >> jh->bits) derror(); if (c <= jh->sraw) spred = **row; row[0]++; row[1]++; } return row[2]; }
Safe
[ "CWE-476", "CWE-119" ]
LibRaw
d7c3d2cb460be10a3ea7b32e9443a83c243b2251
4.993065035140943e+37
63
Secunia SA75000 advisory: several buffer overruns
0
static void convert_ccc_entry(char *key, char *value, void *user_data) { char *src_addr = user_data; char dst_addr[18]; char type = BDADDR_BREDR; uint16_t handle; int ret, err; char filename[PATH_MAX]; GKeyFile *key_file; struct stat st; char group[6]; char *data; gsize length = 0; ret = sscanf(key, "%17s#%hhu#%04hX", dst_addr, &type, &handle); if (ret < 3) return; if (bachk(dst_addr) != 0) return; /* Check if the device directory has been created as records should * only be converted for known devices */ snprintf(filename, PATH_MAX, STORAGEDIR "/%s/%s", src_addr, dst_addr); err = stat(filename, &st); if (err || !S_ISDIR(st.st_mode)) return; snprintf(filename, PATH_MAX, STORAGEDIR "/%s/%s/ccc", src_addr, dst_addr); key_file = g_key_file_new(); g_key_file_load_from_file(key_file, filename, 0, NULL); sprintf(group, "%hu", handle); g_key_file_set_string(key_file, group, "Value", value); data = g_key_file_to_data(key_file, &length, NULL); if (length > 0) { create_file(filename, 0600); g_file_set_contents(filename, data, length, NULL); } g_free(data); g_key_file_free(key_file); }
Safe
[ "CWE-862", "CWE-863" ]
bluez
b497b5942a8beb8f89ca1c359c54ad67ec843055
4.5726327071300365e+37
46
adapter: Fix storing discoverable setting discoverable setting shall only be store when changed via Discoverable property and not when discovery client set it as that be considered temporary just for the lifetime of the discovery.
0
static void __vmx_load_host_state(struct vcpu_vmx *vmx) { if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; if (vmx->host_state.fs_reload_needed) loadsegment(fs, vmx->host_state.fs_sel); if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); #ifdef CONFIG_X86_64 load_gs_index(vmx->host_state.gs_sel); wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); #else loadsegment(gs, vmx->host_state.gs_sel); #endif } reload_tss(); #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) { rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); } #endif if (current_thread_info()->status & TS_USEDFPU) clts(); load_gdt(&__get_cpu_var(host_gdt)); }
Safe
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
2.9926227833867786e+38
29
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
0
GF_Err trgt_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->track_group_id = gf_bs_read_u32(bs); return GF_OK;
Safe
[ "CWE-787" ]
gpac
388ecce75d05e11fc8496aa4857b91245007d26e
9.954800338240874e+37
7
fixed #1587
0
static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; register ssize_t i; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) ResetMagickMemory(colormap_index,(-1),MaxColormapSize* sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); }
Vulnerable
[ "CWE-703", "CWE-772" ]
ImageMagick
0417cea1b6d72f90bd4f1f573f91e42a8ba66a89
3.017540163223647e+38
151
https://github.com/ImageMagick/ImageMagick/issues/574
1
t_go_generator(t_program* program, const std::map<std::string, std::string>& parsed_options, const std::string& option_string) : t_generator(program) { (void)option_string; std::map<std::string, std::string>::const_iterator iter; gen_thrift_import_ = DEFAULT_THRIFT_IMPORT; gen_package_prefix_ = ""; package_flag = ""; read_write_private_ = false; ignore_initialisms_ = false; for( iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) { if( iter->first.compare("package_prefix") == 0) { gen_package_prefix_ = (iter->second); } else if( iter->first.compare("thrift_import") == 0) { gen_thrift_import_ = (iter->second); } else if( iter->first.compare("package") == 0) { package_flag = (iter->second); } else if( iter->first.compare("read_write_private") == 0) { read_write_private_ = true; } else if( iter->first.compare("ignore_initialisms") == 0) { ignore_initialisms_ = true; } else { throw "unknown option go:" + iter->first; } } out_dir_base_ = "gen-go"; }
Safe
[ "CWE-77" ]
thrift
2007783e874d524a46b818598a45078448ecc53e
3.141348492847688e+38
31
THRIFT-3893 Command injection in format_go_output Client: Go Patch: Jens Geyer
0
void Field_blob::sql_type(String &res) const { const char *str; uint length; switch (packlength) { default: str="tiny"; length=4; break; case 2: str=""; length=0; break; case 3: str="medium"; length= 6; break; case 4: str="long"; length=4; break; } res.set_ascii(str,length); if (charset() == &my_charset_bin) res.append(STRING_WITH_LEN("blob")); else { res.append(STRING_WITH_LEN("text")); } }
Safe
[ "CWE-120" ]
server
eca207c46293bc72dd8d0d5622153fab4d3fccf1
3.2632739852194598e+38
18
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size. Precision should be kept below DECIMAL_MAX_SCALE for computations. It can be bigger in Item_decimal. I'd fix this too but it changes the existing behaviour so problemmatic to ix.
0
static int cmd_status(const char *s) { s = imap_next_word((char *) s); if (mutt_str_strncasecmp("OK", s, 2) == 0) return IMAP_CMD_OK; if (mutt_str_strncasecmp("NO", s, 2) == 0) return IMAP_CMD_NO; return IMAP_CMD_BAD; }
Safe
[ "CWE-78", "CWE-77" ]
neomutt
e52393740334443ae0206cab2d7caef381646725
2.825345486824208e+38
11
quote imap strings more carefully Co-authored-by: JerikoOne <jeriko.one@gmx.us>
0
httpd_senddone(isc_nmhandle_t *handle, isc_result_t result, void *arg) { isc_httpd_t *httpd = (isc_httpd_t *)arg; REQUIRE(VALID_HTTPD(httpd)); REQUIRE(httpd->state == SEND); REQUIRE(httpd->handle == handle); isc_buffer_free(&httpd->sendbuffer); /* * We will always want to clean up our receive buffer, even if we * got an error on send or we are shutting down. */ if (httpd->freecb != NULL) { isc_buffer_t *b = NULL; if (isc_buffer_length(&httpd->bodybuffer) > 0) { b = &httpd->bodybuffer; httpd->freecb(b, httpd->freecb_arg); } } isc_nmhandle_detach(&httpd->sendhandle); if (result != ISC_R_SUCCESS) { goto cleanup_readhandle; } if ((httpd->flags & HTTPD_CLOSE) != 0) { goto cleanup_readhandle; } httpd->state = RECV; httpd->sendhandle = NULL; if (httpd->recvlen != 0) { /* * Outstanding requests still exist, start processing * them. */ httpd_request(httpd->handle, ISC_R_SUCCESS, NULL, httpd->mgr); } else if (!httpd->truncated) { isc_nm_resumeread(httpd->readhandle); } else { /* Truncated request, don't resume */ goto cleanup_readhandle; } return; cleanup_readhandle: isc_nmhandle_detach(&httpd->readhandle); }
Safe
[]
bind9
d4c5d1c650ae0e97a083b0ce7a705c20fc001f07
2.84044051125735e+38
52
Fix statistics channel multiple request processing with non-empty bodies When the HTTP request has a body part after the HTTP headers, it is not getting processed and is being prepended to the next request's data, which results in an error when trying to parse it. Improve the httpd.c:process_request() function with the following additions: 1. Require that HTTP POST requests must have Content-Length header. 2. When Content-Length header is set, extract its value, and make sure that it is valid and that the whole request's body is received before processing the request. 3. Discard the request's body by consuming Content-Length worth of data in the buffer. (cherry picked from commit c2bbdc8a648c9630b2c9cea5227ad5c309c2ade5)
0
static bool IsLiteralCompareBool(HValue* left, Token::Value op, HValue* right) { return op == Token::EQ_STRICT && ((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) || (right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean())); }
Safe
[]
node
fd80a31e0697d6317ce8c2d289575399f4e06d21
3.309697408566636e+38
7
deps: backport 5f836c from v8 upstream Original commit message: Fix Hydrogen bounds check elimination When combining bounds checks, they must all be moved before the first load/store that they are guarding. BUG=chromium:344186 LOG=y R=svenpanne@chromium.org Review URL: https://codereview.chromium.org/172093002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 fix #8070
0
mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv, const struct sk_buff *skb) { u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp)); u8 ret_val; /* * Queue delay is passed as a uint8 in units of 2ms (ms shifted * by 1). Min value (other than 0) is therefore 2ms, max is 510ms. * * Pass max value if queue_delay is beyond the uint8 range */ ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1); mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t" "%d ms sent to FW\n", queue_delay, ret_val); return ret_val; }
Safe
[ "CWE-787" ]
linux
3a9b153c5591548612c3955c9600a98150c81875
4.855574678524925e+37
19
mwifiex: Fix possible buffer overflows in mwifiex_ret_wmm_get_status() mwifiex_ret_wmm_get_status() calls memcpy() without checking the destination size.Since the source is given from remote AP which contains illegal wmm elements , this may trigger a heap buffer overflow. Fix it by putting the length check before calling memcpy(). Signed-off-by: Qing Xu <m1s5p6688@gmail.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
0
ssize_t __weak cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "Not affected\n"); }
Safe
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
2.1952771818786548e+38
5
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <joe@perches.com> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
0
static int cap_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) { return -EOPNOTSUPP; }
Safe
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
1.9142294460848613e+38
5
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: James Morris <jmorris@namei.org>
0
vmxnet3_can_receive(NetClientState *nc) { VMXNET3State *s = qemu_get_nic_opaque(nc); return s->device_active && VMXNET_FLAG_IS_SET(s->link_status_and_speed, VMXNET3_LINK_STATUS_UP); }
Safe
[ "CWE-20" ]
qemu
a7278b36fcab9af469563bd7b9dadebe2ae25e48
1.7935951756670684e+38
6
net/vmxnet3: Refine l2 header validation Validation of l2 header length assumed minimal packet size as eth_header + 2 * vlan_header regardless of the actual protocol. This caused crash for valid non-IP packets shorter than 22 bytes, as 'tx_pkt->packet_type' hasn't been assigned for such packets, and 'vmxnet3_on_tx_done_update_stats()' expects it to be properly set. Refine header length validation in 'vmxnet_tx_pkt_parse_headers'. Check its return value during packet processing flow. As a side effect, in case IPv4 and IPv6 header validation failure, corrupt packets will be dropped. Signed-off-by: Dana Rubin <dana.rubin@ravellosystems.com> Signed-off-by: Shmulik Ladkani <shmulik.ladkani@ravellosystems.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
0
static int i2r_address(BIO *out, const unsigned afi, const unsigned char fill, const ASN1_BIT_STRING *bs) { unsigned char addr[ADDR_RAW_BUF_LEN]; int i, n; if (bs->length < 0) return 0; switch (afi) { case IANA_AFI_IPV4: if (!addr_expand(addr, bs, 4, fill)) return 0; BIO_printf(out, "%d.%d.%d.%d", addr[0], addr[1], addr[2], addr[3]); break; case IANA_AFI_IPV6: if (!addr_expand(addr, bs, 16, fill)) return 0; for (n = 16; n > 1 && addr[n - 1] == 0x00 && addr[n - 2] == 0x00; n -= 2) ; for (i = 0; i < n; i += 2) BIO_printf(out, "%x%s", (addr[i] << 8) | addr[i + 1], (i < 14 ? ":" : "")); if (i < 16) BIO_puts(out, ":"); if (i == 0) BIO_puts(out, ":"); break; default: for (i = 0; i < bs->length; i++) BIO_printf(out, "%s%02x", (i > 0 ? ":" : ""), bs->data[i]); BIO_printf(out, "[%d]", (int)(bs->flags & 7)); break; } return 1; }
Safe
[ "CWE-119", "CWE-787" ]
openssl
068b963bb7afc57f5bdd723de0dd15e7795d5822
2.0814448070855504e+38
36
Avoid out-of-bounds read Fixes CVE 2017-3735 Reviewed-by: Kurt Roeckx <kurt@roeckx.be> (Merged from https://github.com/openssl/openssl/pull/4276) (cherry picked from commit b23171744b01e473ebbfd6edad70c1c3825ffbcd)
0
static void test_bug12744() { MYSQL_STMT *prep_stmt = NULL; MYSQL *lmysql; int rc; myheader("test_bug12744"); lmysql= mysql_client_init(NULL); DIE_UNLESS(lmysql); if (!mysql_real_connect(lmysql, opt_host, opt_user, opt_password, current_db, opt_port, opt_unix_socket, 0)) { fprintf(stderr, "Failed to connect to the database\n"); DIE_UNLESS(0); } prep_stmt= mysql_stmt_init(lmysql); rc= mysql_stmt_prepare(prep_stmt, "SELECT 1", 8); DIE_UNLESS(rc == 0); mysql_close(lmysql); rc= mysql_stmt_execute(prep_stmt); DIE_UNLESS(rc); rc= mysql_stmt_reset(prep_stmt); DIE_UNLESS(rc); rc= mysql_stmt_close(prep_stmt); DIE_UNLESS(rc == 0); }
Safe
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
2.3726986605093377e+38
30
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
0
QString CoreBasicHandler::channelDecode(const QString &bufferName, const QByteArray &string) { return network()->channelDecode(bufferName, string); }
Safe
[ "CWE-399" ]
quassel
b5e38970ffd55e2dd9f706ce75af9a8d7730b1b8
2.973299727243321e+38
4
Improve the message-splitting algorithm for PRIVMSG and CTCP This introduces a new message splitting algorithm based on QTextBoundaryFinder. It works by first starting with the entire message to be sent, encoding it, and checking to see if it is over the maximum message length. If it is, it uses QTBF to find the word boundary most immediately preceding the maximum length. If no suitable boundary can be found, it falls back to searching for grapheme boundaries. It repeats this process until the entire message has been sent. Unlike what it replaces, the new splitting code is not recursive and cannot cause stack overflows. Additionally, if it is unable to split a string, it will give up gracefully and not crash the core or cause a thread to run away. This patch fixes two bugs. The first is garbage characters caused by accidentally splitting the string in the middle of a multibyte character. Since the new code splits at a character level instead of a byte level, this will no longer be an issue. The second is the core crash caused by sending an overlength CTCP query ("/me") containing only multibyte characters. This bug was caused by the old CTCP splitter using the byte index from lastParamOverrun() as a character index for a QString.
0
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); vcpu->arch.shadow_efer = efer; if (!msr) return; if (efer & EFER_LMA) { vmcs_write32(VM_ENTRY_CONTROLS, vmcs_read32(VM_ENTRY_CONTROLS) | VM_ENTRY_IA32E_MODE); msr->data = efer; } else { vmcs_write32(VM_ENTRY_CONTROLS, vmcs_read32(VM_ENTRY_CONTROLS) & ~VM_ENTRY_IA32E_MODE); msr->data = efer & ~EFER_LME; } setup_msrs(vmx); }
Safe
[ "CWE-20" ]
linux-2.6
16175a796d061833aacfbd9672235f2d2725df65
2.23481348165665e+38
22
KVM: VMX: Don't allow uninhibited access to EFER on i386 vmx_set_msr() does not allow i386 guests to touch EFER, but they can still do so through the default: label in the switch. If they set EFER_LME, they can oops the host. Fix by having EFER access through the normal channel (which will check for EFER_LME) even on i386. Reported-and-tested-by: Benjamin Gilbert <bgilbert@cs.cmu.edu> Cc: stable@kernel.org Signed-off-by: Avi Kivity <avi@redhat.com>
0
void quicklistInsertBefore(quicklist *quicklist, quicklistEntry *entry, void *value, const size_t sz) { _quicklistInsert(quicklist, entry, value, sz, 0); }
Safe
[ "CWE-190" ]
redis
f6a40570fa63d5afdd596c78083d754081d80ae3
1.210724618398988e+38
4
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628) - fix possible heap corruption in ziplist and listpack resulting by trying to allocate more than the maximum size of 4GB. - prevent ziplist (hash and zset) from reaching size of above 1GB, will be converted to HT encoding, that's not a useful size. - prevent listpack (stream) from reaching size of above 1GB. - XADD will start a new listpack if the new record may cause the previous listpack to grow over 1GB. - XADD will respond with an error if a single stream record is over 1GB - List type (ziplist in quicklist) was truncating strings that were over 4GB, now it'll respond with an error.
0
long qemu_maxrampagesize(void) { return qemu_real_host_page_size(); }
Safe
[ "CWE-908" ]
qemu
418ade7849ce7641c0f7333718caf5091a02fd4c
3.1126969672432176e+38
4
softmmu: Always initialize xlat in address_space_translate_for_iotlb The bug is an uninitialized memory read, along the translate_fail path, which results in garbage being read from iotlb_to_section, which can lead to a crash in io_readx/io_writex. The bug may be fixed by writing any value with zero in ~TARGET_PAGE_MASK, so that the call to iotlb_to_section using the xlat'ed address returns io_mem_unassigned, as desired by the translate_fail path. It is most useful to record the original physical page address, which will eventually be logged by memory_region_access_valid when the access is rejected by unassigned_mem_accepts. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1065 Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Message-Id: <20220621153829.366423-1-richard.henderson@linaro.org>
0
int ssl_cipher_list_to_bytes(SSL *s,STACK_OF(SSL_CIPHER) *sk,unsigned char *p, int (*put_cb)(const SSL_CIPHER *, unsigned char *)) { int i,j=0; SSL_CIPHER *c; unsigned char *q; #ifndef OPENSSL_NO_KRB5 int nokrb5 = !kssl_tgt_is_available(s->kssl_ctx); #endif /* OPENSSL_NO_KRB5 */ if (sk == NULL) return(0); q=p; for (i=0; i<sk_SSL_CIPHER_num(sk); i++) { c=sk_SSL_CIPHER_value(sk,i); /* Skip TLS v1.2 only ciphersuites if lower than v1.2 */ if ((c->algorithm_ssl & SSL_TLSV1_2) && (TLS1_get_client_version(s) < TLS1_2_VERSION)) continue; #ifndef OPENSSL_NO_KRB5 if (((c->algorithm_mkey & SSL_kKRB5) || (c->algorithm_auth & SSL_aKRB5)) && nokrb5) continue; #endif /* OPENSSL_NO_KRB5 */ #ifndef OPENSSL_NO_PSK /* with PSK there must be client callback set */ if (((c->algorithm_mkey & SSL_kPSK) || (c->algorithm_auth & SSL_aPSK)) && s->psk_client_callback == NULL) continue; #endif /* OPENSSL_NO_PSK */ j = put_cb ? put_cb(c,p) : ssl_put_cipher_by_char(s,c,p); p+=j; } /* If p == q, no ciphers and caller indicates an error. Otherwise * add SCSV if not renegotiating. */ if (p != q && !s->renegotiate) { static SSL_CIPHER scsv = { 0, NULL, SSL3_CK_SCSV, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; j = put_cb ? put_cb(&scsv,p) : ssl_put_cipher_by_char(s,&scsv,p); p+=j; #ifdef OPENSSL_RI_DEBUG fprintf(stderr, "SCSV sent by client\n"); #endif } return(p-q); }
Safe
[]
openssl
0ffa49970b9f8ea66b43ce2eb7f8fd523b65bc2c
1.215935160269628e+38
52
Backport support for fixed DH ciphersuites (from HEAD)
0
xfs_da3_fixhashpath( struct xfs_da_state *state, struct xfs_da_state_path *path) { struct xfs_da_state_blk *blk; struct xfs_da_intnode *node; struct xfs_da_node_entry *btree; xfs_dahash_t lasthash=0; int level; int count; struct xfs_inode *dp = state->args->dp; trace_xfs_da_fixhashpath(state->args); level = path->active-1; blk = &path->blk[ level ]; switch (blk->magic) { case XFS_ATTR_LEAF_MAGIC: lasthash = xfs_attr_leaf_lasthash(blk->bp, &count); if (count == 0) return; break; case XFS_DIR2_LEAFN_MAGIC: lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count); if (count == 0) return; break; case XFS_DA_NODE_MAGIC: lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count); if (count == 0) return; break; } for (blk--, level--; level >= 0; blk--, level--) { struct xfs_da3_icnode_hdr nodehdr; node = blk->bp->b_addr; dp->d_ops->node_hdr_from_disk(&nodehdr, node); btree = dp->d_ops->node_tree_p(node); if (be32_to_cpu(btree->hashval) == lasthash) break; blk->hashval = lasthash; btree[blk->index].hashval = cpu_to_be32(lasthash); xfs_trans_log_buf(state->args->trans, blk->bp, XFS_DA_LOGRANGE(node, &btree[blk->index], sizeof(*btree))); lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval); } }
Vulnerable
[ "CWE-399" ]
linux
c88547a8119e3b581318ab65e9b72f27f23e641d
3.2871302391339354e+38
50
xfs: fix directory hash ordering bug Commit f5ea1100 ("xfs: add CRCs to dir2/da node blocks") introduced in 3.10 incorrectly converted the btree hash index array pointer in xfs_da3_fixhashpath(). It resulted in the the current hash always being compared against the first entry in the btree rather than the current block index into the btree block's hash entry array. As a result, it was comparing the wrong hashes, and so could misorder the entries in the btree. For most cases, this doesn't cause any problems as it requires hash collisions to expose the ordering problem. However, when there are hash collisions within a directory there is a very good probability that the entries will be ordered incorrectly and that actually matters when duplicate hashes are placed into or removed from the btree block hash entry array. This bug results in an on-disk directory corruption and that results in directory verifier functions throwing corruption warnings into the logs. While no data or directory entries are lost, access to them may be compromised, and attempts to remove entries from a directory that has suffered from this corruption may result in a filesystem shutdown. xfs_repair will fix the directory hash ordering without data loss occuring. [dchinner: wrote useful a commit message] cc: <stable@vger.kernel.org> Reported-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: Mark Tinguely <tinguely@sgi.com> Reviewed-by: Ben Myers <bpm@sgi.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
1
authtrust( keyid_t id, u_long trust ) { symkey ** bucket; symkey * sk; u_long lifetime; /* * Search bin for key; if it does not exist and is untrusted, * forget it. */ bucket = &key_hash[KEYHASH(id)]; for (sk = *bucket; sk != NULL; sk = sk->hlink) { if (id == sk->keyid) break; } if (!trust && NULL == sk) return; /* * There are two conditions remaining. Either it does not * exist and is to be trusted or it does exist and is or is * not to be trusted. */ if (sk != NULL) { if (cache_keyid == id) { cache_flags = 0; cache_keyid = 0; cache_keyacclist = NULL; } /* * Key exists. If it is to be trusted, say so and * update its lifetime. */ if (trust > 0) { sk->flags |= KEY_TRUSTED; if (trust > 1) sk->lifetime = current_time + trust; else sk->lifetime = 0; return; } /* No longer trusted, return it to the free list. */ freesymkey(sk, bucket); return; } /* * keyid is not present, but the is to be trusted. We allocate * a new key, but do not specify a key type or secret. */ if (trust > 1) { lifetime = current_time + trust; } else { lifetime = 0; } allocsymkey(bucket, id, KEY_TRUSTED, 0, lifetime, 0, NULL, NULL); }
Safe
[ "CWE-287" ]
ntp
71a962710bfe066f76da9679cf4cfdeffe34e95e
3.1618221525886453e+38
62
[Sec 2936] Skeleton Key: Any trusted key system can serve time. HStenn.
0
void OneHotComputeImpl(const OneHotContext& op_context) { // prefix_dim_size == # of elements before the axis // depth == # of elements per axis // suffix_dim_size == # of elements after the axis int prefix_dim_size = 1; for (int i = 0; i < op_context.axis; ++i) { prefix_dim_size *= op_context.indices->dims->data[i]; } if (prefix_dim_size == 0) { // If indices tensor is degenerate, return a degenerate tensor, just like // TensorFlow does. return; } const int suffix_dim_size = NumElements(op_context.indices) / prefix_dim_size; const int depth = *op_context.depth->data.i32; const T on_value = *GetTensorData<T>(op_context.on_value); const T off_value = *GetTensorData<T>(op_context.off_value); // View the indices as a matrix of size: // prefix_dim_size x suffix_dim_size // View the output as a matrix of size: // prefix_dim_size x depth x suffix_dim_size // Then the output is: // output(i, j, k) == (indices(i, k) == j) ? on : off T* output = GetTensorData<T>(op_context.output); const TI* indices = GetTensorData<TI>(op_context.indices); for (int i = 0; i < prefix_dim_size; ++i) { for (int j = 0; j < depth; ++j) { for (int k = 0; k < suffix_dim_size; ++k, ++output) { *output = static_cast<int>(indices[i * suffix_dim_size + k]) == j ? on_value : off_value; } } } }
Safe
[ "CWE-369" ]
tensorflow
3ebedd7e345453d68e279cfc3e4072648e5e12e5
2.5420282645469186e+38
37
Prevent division by 0 in OneHot implementation If input indices is degenerate, the implementation would do a divide by zero. See https://github.com/tensorflow/tensorflow/blob/745d57df6d5e9bc568666a2a48ed8dd629c27241/tensorflow/lite/kernels/one_hot.cc#L68-L72 PiperOrigin-RevId: 370966870 Change-Id: Ie018337811c8016b5a1d3a277d00d5f2e19a2058
0
void CModule::SetClient(CClient* pClient) { m_pClient = pClient; }
Safe
[ "CWE-20", "CWE-264" ]
znc
8de9e376ce531fe7f3c8b0aa4876d15b479b7311
6.01975593067897e+37
1
Fix remote code execution and privilege escalation vulnerability. To trigger this, need to have a user already. Thanks for Jeriko One <jeriko.one@gmx.us> for finding and reporting this. CVE-2019-12816
0
void endhostent(void) { __UCLIBC_MUTEX_LOCK(mylock); endhostent_unlocked(); __UCLIBC_MUTEX_UNLOCK(mylock); }
Safe
[ "CWE-79" ]
uclibc-ng
0f822af0445e5348ce7b7bd8ce1204244f31d174
1.1475950598020458e+38
6
libc/inet/resolv.c: add __hnbad to check DNS entries for validity… … using the same rules glibc does also call __hnbad in some places to check answers
0
static int string_contains(const void *a, const void *b) { return !strstr ((const char*) a, (const char*) b); }
Safe
[ "CWE-787" ]
radare2
c84b7232626badd075caf3ae29661b609164bac6
1.7085395921399105e+38
3
Fix heap buffer overflow in dyldcache parser ##crash * Reported by: Lazymio via huntr.dev * Reproducer: dyldovf
0
fs_file_open_file(const gs_memory_t *mem, void *secret, const char *fname, const char *mode, gp_file **file) { FILE *f; *file = gp_file_FILE_alloc(mem); if (*file == NULL) return 0; f = gp_fopen_impl(mem->non_gc_memory, fname, mode); if (gp_file_FILE_set(*file, f, fclose)) { *file = NULL; return gs_error_VMerror; } return 0; }
Safe
[ "CWE-20" ]
ghostpdl
a9bd3dec9fde03327a4a2c69dad1036bf9632e20
2.9920571312247984e+38
18
Bug 704342: Include device specifier strings in access validation for the "%pipe%", %handle%" and %printer% io devices. We previously validated only the part after the "%pipe%" Postscript device specifier, but this proved insufficient. This rebuilds the original file name string, and validates it complete. The slight complication for "%pipe%" is it can be reached implicitly using "|" so we have to check both prefixes. Addresses CVE-2021-3781
0
MagickExport double *StringToArrayOfDoubles(const char *string,ssize_t *count, ExceptionInfo *exception) { char *q; const char *p; double *array; register ssize_t i; /* Determine count of values, and check syntax. */ assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); *count=0; if (string == (char *) NULL) return((double *) NULL); /* no value found */ i=0; p=string; while (*p != '\0') { (void) StringToDouble(p,&q); /* get value - ignores leading space */ if (p == q) return((double *) NULL); /* no value found */ p=q; i++; /* increment value count */ while (isspace((int) ((unsigned char) *p)) != 0) p++; /* skip spaces */ if (*p == ',') p++; /* skip comma */ while (isspace((int) ((unsigned char) *p)) != 0) p++; /* and more spaces */ } /* Allocate floating point argument list. */ *count=i; array=(double *) AcquireQuantumMemory((size_t) i,sizeof(*array)); if (array == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((double *) NULL); } /* Fill in the floating point values. */ i=0; p=string; while ((*p != '\0') && (i < *count)) { array[i++]=StringToDouble(p,&q); p=q; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; } return(array); }
Safe
[ "CWE-190" ]
ImageMagick
be90a5395695f0d19479a5d46b06c678be7f7927
2.8695107109940344e+38
64
https://github.com/ImageMagick/ImageMagick/issues/1721
0
static bool pdb_samba_dsdb_get_trusteddom_pw(struct pdb_methods *m, const char *domain, char** pwd, struct dom_sid *sid, time_t *pass_last_set_time) { struct pdb_samba_dsdb_state *state = talloc_get_type_abort( m->private_data, struct pdb_samba_dsdb_state); TALLOC_CTX *tmp_ctx = talloc_stackframe(); const char * const attrs[] = { "securityIdentifier", "flatName", "trustPartner", "trustAuthOutgoing", "whenCreated", "msDS-SupportedEncryptionTypes", "trustAttributes", "trustDirection", "trustType", NULL }; struct ldb_message *msg; const struct ldb_val *password_val; int trust_direction_flags; int trust_type; int i; DATA_BLOB password_utf16; struct trustAuthInOutBlob password_blob; struct AuthenticationInformationArray *auth_array; char *password_talloc; size_t password_len; enum ndr_err_code ndr_err; NTSTATUS status; const char *netbios_domain = NULL; const struct dom_sid *domain_sid = NULL; status = dsdb_trust_search_tdo(state->ldb, domain, NULL, attrs, tmp_ctx, &msg); if (!NT_STATUS_IS_OK(status)) { /* * This can be called to work out of a domain is * trusted, rather than just to get the password */ DEBUG(2, ("Failed to get trusted domain password for %s - %s. " "It may not be a trusted domain.\n", domain, nt_errstr(status))); TALLOC_FREE(tmp_ctx); return false; } netbios_domain = ldb_msg_find_attr_as_string(msg, "flatName", NULL); if (netbios_domain == NULL) { DEBUG(2, ("Trusted domain %s has to flatName defined.\n", domain)); TALLOC_FREE(tmp_ctx); return false; } domain_sid = samdb_result_dom_sid(tmp_ctx, msg, "securityIdentifier"); if (domain_sid == NULL) { DEBUG(2, ("Trusted domain %s has no securityIdentifier defined.\n", domain)); TALLOC_FREE(tmp_ctx); return false; } trust_direction_flags = ldb_msg_find_attr_as_int(msg, "trustDirection", 0); if (!(trust_direction_flags & LSA_TRUST_DIRECTION_OUTBOUND)) { DBG_WARNING("Trusted domain %s is not an outbound trust.\n", domain); TALLOC_FREE(tmp_ctx); return false; } trust_type = ldb_msg_find_attr_as_int(msg, "trustType", 0); if (trust_type == LSA_TRUST_TYPE_MIT) { DBG_WARNING("Trusted domain %s is not an AD trust " "(trustType == LSA_TRUST_TYPE_MIT).\n", domain); TALLOC_FREE(tmp_ctx); return false; } password_val = ldb_msg_find_ldb_val(msg, "trustAuthOutgoing"); if (password_val == NULL) { DEBUG(2, ("Failed to get trusted domain password for %s, " "attribute trustAuthOutgoing not returned.\n", domain)); TALLOC_FREE(tmp_ctx); return false; } ndr_err = ndr_pull_struct_blob(password_val, tmp_ctx, &password_blob, (ndr_pull_flags_fn_t)ndr_pull_trustAuthInOutBlob); if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { DEBUG(0, ("Failed to get trusted domain password for %s, " "attribute trustAuthOutgoing could not be parsed %s.\n", domain, ndr_map_error2string(ndr_err))); TALLOC_FREE(tmp_ctx); return false; } auth_array = &password_blob.current; for (i=0; i < auth_array->count; i++) { if (auth_array->array[i].AuthType == TRUST_AUTH_TYPE_CLEAR) { break; } } if (i == auth_array->count) { DEBUG(0, ("Trusted domain %s does not have a " "clear-text password stored\n", domain)); TALLOC_FREE(tmp_ctx); return false; } password_utf16 = data_blob_const(auth_array->array[i].AuthInfo.clear.password, auth_array->array[i].AuthInfo.clear.size); /* * In the future, make this function return a * cli_credentials that can store a MD4 hash with cli_credential_set_nt_hash() * but for now convert to UTF8 and fail if the string can not be converted. * * We can't safely convert the random strings windows uses into * utf8. */ if (!convert_string_talloc(tmp_ctx, CH_UTF16MUNGED, CH_UTF8, password_utf16.data, password_utf16.length, (void *)&password_talloc, &password_len)) { DEBUG(0, ("FIXME: Could not convert password for trusted domain %s" " to UTF8. This may be a password set from Windows.\n", domain)); TALLOC_FREE(tmp_ctx); return false; } *pwd = SMB_STRNDUP(password_talloc, password_len); if (pass_last_set_time) { *pass_last_set_time = nt_time_to_unix(auth_array->array[i].LastUpdateTime); } if (sid != NULL) { sid_copy(sid, domain_sid); } TALLOC_FREE(tmp_ctx); return true; }
Safe
[ "CWE-200" ]
samba
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
4.292360006289446e+37
150
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message This aims to minimise usage of the error-prone pattern of searching for a just-added message element in order to make modifications to it (and potentially finding the wrong element). BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009 Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz>
0
xcf_load_add_masks (GimpImage *image) { GList *layers; GList *list; layers = gimp_image_get_layer_list (image); for (list = layers; list; list = g_list_next (list)) { GimpLayer *layer = list->data; GimpLayerMask *mask; mask = g_object_get_data (G_OBJECT (layer), "gimp-layer-mask"); if (mask) { gimp_layer_add_mask (layer, mask, FALSE, NULL); g_object_set_data (G_OBJECT (layer), "gimp-layer-mask", NULL); } } g_list_free (layers); }
Safe
[ "CWE-416" ]
gimp
e82aaa4b4ee0703c879e35ea9321fff6be3e9b6f
4.896059580746893e+37
24
Bug 767873 - (CVE-2016-4994) Multiple Use-After-Free when parsing... ...XCF channel and layer properties The properties PROP_ACTIVE_LAYER, PROP_FLOATING_SELECTION, PROP_ACTIVE_CHANNEL saves the current object pointer the @info structure. Others like PROP_SELECTION (for channel) and PROP_GROUP_ITEM (for layer) will delete the current object and create a new object, leaving the pointers in @info invalid (dangling). Therefore, if a property from the first type will come before the second, the result will be an UaF in the last lines of xcf_load_image (when it actually using the pointers from @info). I wasn't able to exploit this bug because that g_object_instance->c_class gets cleared by the last g_object_unref and GIMP_IS_{LAYER,CHANNEL} detects that and return FALSE. (cherry picked from commit 6d804bf9ae77bc86a0a97f9b944a129844df9395)
0
int dd_delete_item(struct dump_dir *dd, const char *name) { if (!dd->locked) error_msg_and_die("dump_dir is not opened"); /* bug */ if (!str_is_correct_filename(name)) error_msg_and_die("Cannot delete item. '%s' is not a valid file name", name); char *path = concat_path_file(dd->dd_dirname, name); int res = unlink(path); if (res < 0) { if (errno == ENOENT) errno = res = 0; else perror_msg("Can't delete file '%s'", path); } free(path); return res; }
Vulnerable
[ "CWE-20" ]
libreport
1951e7282043dfe1268d492aea056b554baedb75
1.6489174776020392e+36
22
lib: fix races in dump directory handling code Florian Weimer <fweimer@redhat.com>: dd_opendir() should keep a file handle (opened with O_DIRECTORY) and use openat() and similar functions to access files in it. ... The file system manipulation functions should guard against hard links (check that link count is <= 1, just as in the user coredump code in abrt-hook-ccpp), possibly after opening the file with O_PATH first to avoid side effects on open/close. Related: #1214745 Signed-off-by: Jakub Filak <jfilak@redhat.com>
1
int main(int argc, char *argv[]) { SERVER* serve; GArray* servers; if (sizeof( struct nbd_request )!=28) { fprintf(stderr,"Bad size of structure. Alignment problems?\n"); exit(-1) ; } logging(); serve=cmdline(argc, argv); servers=g_array_new(TRUE, FALSE, sizeof(SERVER*)); if (!(serve->port)) { CLIENT *client; #ifndef ISSERVER /* You really should define ISSERVER if you're going to use * inetd mode, but if you don't, closing stdout and stderr * (which inetd had connected to the client socket) will let it * work. */ close(1); close(2); open("/dev/null", O_WRONLY); open("/dev/null", O_WRONLY); #endif client=g_malloc(sizeof(CLIENT)); client->server=serve; client->net=0; client->exportsize=OFFT_MAX; set_peername(0,client); serveconnection(client); return 0; } daemonize(serve); setup_serve(serve); serveloop(serve); return 0 ; }
Safe
[ "CWE-119" ]
nbd
4ed24fe0d64c7cc9963c57b52cad1555ad7c6b60
8.863646778606829e+36
38
r134: CVE-2005-3534
0
static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct device *dev = kobj_to_dev(kobj); struct bmc_device *bmc = to_bmc_device(dev); umode_t mode = attr->mode; int rv; if (attr == &dev_attr_aux_firmware_revision.attr) { struct ipmi_device_id id; rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); return (!rv && id.aux_firmware_revision_set) ? mode : 0; } if (attr == &dev_attr_guid.attr) { bool guid_set; rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); return (!rv && guid_set) ? mode : 0; } return mode; }
Safe
[ "CWE-416", "CWE-284" ]
linux
77f8269606bf95fcb232ee86f6da80886f1dfae8
3.0796843016490352e+38
22
ipmi: fix use-after-free of user->release_barrier.rda When we do the following test, we got oops in ipmi_msghandler driver while((1)) do service ipmievd restart & service ipmievd restart done --------------------------------------------------------------- [ 294.230186] Unable to handle kernel paging request at virtual address 0000803fea6ea008 [ 294.230188] Mem abort info: [ 294.230190] ESR = 0x96000004 [ 294.230191] Exception class = DABT (current EL), IL = 32 bits [ 294.230193] SET = 0, FnV = 0 [ 294.230194] EA = 0, S1PTW = 0 [ 294.230195] Data abort info: [ 294.230196] ISV = 0, ISS = 0x00000004 [ 294.230197] CM = 0, WnR = 0 [ 294.230199] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000a1c1b75a [ 294.230201] [0000803fea6ea008] pgd=0000000000000000 [ 294.230204] Internal error: Oops: 96000004 [#1] SMP [ 294.235211] Modules linked in: nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm iw_cm dm_mirror dm_region_hash dm_log dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ghash_ce sha2_ce ses sha256_arm64 sha1_ce hibmc_drm hisi_sas_v2_hw enclosure sg hisi_sas_main sbsa_gwdt ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe ipmi_si mdio hns_dsaf ipmi_devintf ipmi_msghandler hns_enet_drv hns_mdio [ 294.277745] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.0.0-rc2+ #113 [ 294.285511] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017 [ 294.292835] pstate: 80000005 (Nzcv daif -PAN -UAO) [ 294.297695] pc : __srcu_read_lock+0x38/0x58 [ 294.301940] lr : acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler] [ 294.307853] sp : ffff00001001bc80 [ 294.311208] x29: ffff00001001bc80 x28: ffff0000117e5000 [ 294.316594] x27: 0000000000000000 x26: dead000000000100 [ 294.321980] x25: dead000000000200 x24: ffff803f6bd06800 [ 294.327366] x23: 0000000000000000 x22: 0000000000000000 [ 294.332752] x21: ffff00001001bd04 x20: ffff80df33d19018 [ 294.338137] x19: ffff80df33d19018 x18: 0000000000000000 [ 294.343523] x17: 0000000000000000 x16: 0000000000000000 [ 294.348908] x15: 0000000000000000 x14: 0000000000000002 [ 294.354293] x13: 0000000000000000 x12: 0000000000000000 [ 294.359679] x11: 0000000000000000 x10: 0000000000100000 [ 294.365065] x9 : 0000000000000000 x8 : 0000000000000004 [ 294.370451] x7 : 0000000000000000 x6 : ffff80df34558678 [ 294.375836] x5 : 000000000000000c x4 : 0000000000000000 [ 294.381221] x3 : 0000000000000001 x2 : 0000803fea6ea000 [ 294.386607] x1 : 0000803fea6ea008 x0 : 0000000000000001 [ 294.391994] Process swapper/3 (pid: 0, stack limit = 0x0000000083087293) [ 294.398791] Call trace: [ 294.401266] __srcu_read_lock+0x38/0x58 [ 294.405154] acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler] [ 294.410716] deliver_response+0x80/0xf8 [ipmi_msghandler] [ 294.416189] deliver_local_response+0x28/0x68 [ipmi_msghandler] [ 294.422193] handle_one_recv_msg+0x158/0xcf8 [ipmi_msghandler] [ 294.432050] handle_new_recv_msgs+0xc0/0x210 [ipmi_msghandler] [ 294.441984] smi_recv_tasklet+0x8c/0x158 [ipmi_msghandler] [ 294.451618] tasklet_action_common.isra.5+0x88/0x138 [ 294.460661] tasklet_action+0x2c/0x38 [ 294.468191] __do_softirq+0x120/0x2f8 [ 294.475561] irq_exit+0x134/0x140 [ 294.482445] __handle_domain_irq+0x6c/0xc0 [ 294.489954] gic_handle_irq+0xb8/0x178 [ 294.497037] el1_irq+0xb0/0x140 [ 294.503381] arch_cpu_idle+0x34/0x1a8 [ 294.510096] do_idle+0x1d4/0x290 [ 294.516322] cpu_startup_entry+0x28/0x30 [ 294.523230] secondary_start_kernel+0x184/0x1d0 [ 294.530657] Code: d538d082 d2800023 8b010c81 8b020021 (c85f7c25) [ 294.539746] ---[ end trace 8a7a880dee570b29 ]--- [ 294.547341] Kernel panic - not syncing: Fatal exception in interrupt [ 294.556837] SMP: stopping secondary CPUs [ 294.563996] Kernel Offset: disabled [ 294.570515] CPU features: 0x002,21006008 [ 294.577638] Memory Limit: none [ 294.587178] Starting crashdump kernel... [ 294.594314] Bye! Because the user->release_barrier.rda is freed in ipmi_destroy_user(), but the refcount is not zero, when acquire_ipmi_user() uses user->release_barrier.rda in __srcu_read_lock(), it causes oops. Fix this by calling cleanup_srcu_struct() when the refcount is zero. Fixes: e86ee2d44b44 ("ipmi: Rework locking and shutdown for hot remove") Cc: stable@vger.kernel.org # 4.18 Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> Signed-off-by: Corey Minyard <cminyard@mvista.com>
0
bool setup_tables(THD *thd, Name_resolution_context *context, List<TABLE_LIST> *from_clause, TABLE_LIST *tables, List<TABLE_LIST> &leaves, bool select_insert, bool full_table_list) { uint tablenr= 0; List_iterator<TABLE_LIST> ti(leaves); TABLE_LIST *table_list; DBUG_ENTER("setup_tables"); DBUG_ASSERT ((select_insert && !tables->next_name_resolution_table) || !tables || (context->table_list && context->first_name_resolution_table)); /* this is used for INSERT ... SELECT. For select we setup tables except first (and its underlying tables) */ TABLE_LIST *first_select_table= (select_insert ? tables->next_local: 0); SELECT_LEX *select_lex= select_insert ? thd->lex->first_select_lex() : thd->lex->current_select; if (select_lex->first_cond_optimization) { leaves.empty(); if (select_lex->prep_leaf_list_state != SELECT_LEX::SAVED) { make_leaves_list(thd, leaves, tables, full_table_list, first_select_table); select_lex->prep_leaf_list_state= SELECT_LEX::READY; select_lex->leaf_tables_exec.empty(); } else { List_iterator_fast <TABLE_LIST> ti(select_lex->leaf_tables_prep); while ((table_list= ti++)) leaves.push_back(table_list, thd->mem_root); } while ((table_list= ti++)) { TABLE *table= table_list->table; if (table) table->pos_in_table_list= table_list; if (first_select_table && table_list->top_table() == first_select_table) { /* new counting for SELECT of INSERT ... SELECT command */ first_select_table= 0; thd->lex->first_select_lex()->insert_tables= tablenr; tablenr= 0; } if(table_list->jtbm_subselect) { table_list->jtbm_table_no= tablenr; } else if (table) { table->pos_in_table_list= table_list; setup_table_map(table, table_list, tablenr); if (table_list->process_index_hints(table)) DBUG_RETURN(1); } tablenr++; /* We test the max tables here as we setup_table_map() should not be called with tablenr >= 64 */ if (tablenr > MAX_TABLES) { my_error(ER_TOO_MANY_TABLES,MYF(0), static_cast<int>(MAX_TABLES)); DBUG_RETURN(1); } } } else { List_iterator_fast <TABLE_LIST> ti(select_lex->leaf_tables_exec); select_lex->leaf_tables.empty(); while ((table_list= ti++)) { if(table_list->jtbm_subselect) { table_list->jtbm_table_no= table_list->tablenr_exec; } else { table_list->table->tablenr= table_list->tablenr_exec; table_list->table->map= table_list->map_exec; table_list->table->maybe_null= table_list->maybe_null_exec; table_list->table->pos_in_table_list= table_list; if (table_list->process_index_hints(table_list->table)) DBUG_RETURN(1); } select_lex->leaf_tables.push_back(table_list); } } for (table_list= tables; table_list; table_list= table_list->next_local) { if (table_list->merge_underlying_list) { DBUG_ASSERT(table_list->is_merged_derived()); Query_arena *arena, backup; arena= thd->activate_stmt_arena_if_needed(&backup); bool res; res= table_list->setup_underlying(thd); if (arena) thd->restore_active_arena(arena, &backup); if (res) DBUG_RETURN(1); } if (table_list->jtbm_subselect) { Item *item= table_list->jtbm_subselect->optimizer; if (!table_list->jtbm_subselect->optimizer->fixed && table_list->jtbm_subselect->optimizer->fix_fields(thd, &item)) { my_error(ER_TOO_MANY_TABLES,MYF(0), static_cast<int>(MAX_TABLES)); /* psergey-todo: WHY ER_TOO_MANY_TABLES ???*/ DBUG_RETURN(1); } DBUG_ASSERT(item == table_list->jtbm_subselect->optimizer); } } /* Precompute and store the row types of NATURAL/USING joins. */ if (setup_natural_join_row_types(thd, from_clause, context)) DBUG_RETURN(1); DBUG_RETURN(0); }
Safe
[ "CWE-416" ]
server
0beed9b5e933f0ff79b3bb346524f7a451d14e38
8.677348960232889e+37
134
MDEV-28097 use-after-free when WHERE has subquery with an outer reference in HAVING when resolving WHERE and ON clauses, do not look in SELECT list/aliases.
0
f_executable(typval_T *argvars, typval_T *rettv) { char_u *name = tv_get_string(&argvars[0]); /* Check in $PATH and also check directly if there is a directory name. */ rettv->vval.v_number = mch_can_exe(name, NULL, TRUE) || (gettail(name) != name && mch_can_exe(name, NULL, FALSE)); }
Safe
[ "CWE-78" ]
vim
8c62a08faf89663e5633dc5036cd8695c80f1075
8.289749190810287e+37
8
patch 8.1.0881: can execute shell commands in rvim through interfaces Problem: Can execute shell commands in rvim through interfaces. Solution: Disable using interfaces in restricted mode. Allow for writing file with writefile(), histadd() and a few others.
0
void print_session_id(int64_t id) { std::cout << "[id=" << id << "] "; }
Safe
[]
nghttp2
95efb3e19d174354ca50c65d5d7227d92bcd60e1
1.7936383488590593e+38
1
Don't read too greedily
0
static int cirrus_bitblt_common_patterncopy(CirrusVGAState * s, const uint8_t * src) { uint8_t *dst; dst = s->vram_ptr + s->cirrus_blt_dstaddr; (*s->cirrus_rop) (s, dst, src, s->cirrus_blt_dstpitch, 0, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); return 1; }
Vulnerable
[ "CWE-787" ]
qemu
b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef
1.7663022863923554e+38
14
CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow I have just noticed that patch for CVE-2007-1320 has never been applied to the QEMU CVS. Please find it below. | Multiple heap-based buffer overflows in the cirrus_invalidate_region | function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and | possibly other products, might allow local users to execute arbitrary | code via unspecified vectors related to "attempting to mark | non-existent regions as dirty," aka the "bitblt" heap overflow. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162
1
static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) { MirrorBlockJob *s = op->s; struct iovec *iov; int64_t chunk_num; int i, nb_chunks; trace_mirror_iteration_done(s, op->offset, op->bytes, ret); s->in_flight--; s->bytes_in_flight -= op->bytes; iov = op->qiov.iov; for (i = 0; i < op->qiov.niov; i++) { MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); s->buf_free_count++; } chunk_num = op->offset / s->granularity; nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); QTAILQ_REMOVE(&s->ops_in_flight, op, next); if (ret >= 0) { if (s->cow_bitmap) { bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); } if (!s->initial_zeroing_ongoing) { job_progress_update(&s->common.job, op->bytes); } } qemu_iovec_destroy(&op->qiov); qemu_co_queue_restart_all(&op->waiting_requests); g_free(op); }
Safe
[ "CWE-476" ]
qemu
66fed30c9cd11854fc878a4eceb507e915d7c9cd
5.705875834282388e+37
36
block/mirror: fix NULL pointer dereference in mirror_wait_on_conflicts() In mirror_iteration() we call mirror_wait_on_conflicts() with `self` parameter set to NULL. Starting from commit d44dae1a7c we dereference `self` pointer in mirror_wait_on_conflicts() without checks if it is not NULL. Backtrace: Program terminated with signal SIGSEGV, Segmentation fault. #0 mirror_wait_on_conflicts (self=0x0, s=<optimized out>, offset=<optimized out>, bytes=<optimized out>) at ../block/mirror.c:172 172 self->waiting_for_op = op; [Current thread is 1 (Thread 0x7f0908931ec0 (LWP 380249))] (gdb) bt #0 mirror_wait_on_conflicts (self=0x0, s=<optimized out>, offset=<optimized out>, bytes=<optimized out>) at ../block/mirror.c:172 #1 0x00005610c5d9d631 in mirror_run (job=0x5610c76a2c00, errp=<optimized out>) at ../block/mirror.c:491 #2 0x00005610c5d58726 in job_co_entry (opaque=0x5610c76a2c00) at ../job.c:917 #3 0x00005610c5f046c6 in coroutine_trampoline (i0=<optimized out>, i1=<optimized out>) at ../util/coroutine-ucontext.c:173 #4 0x00007f0909975820 in ?? () at ../sysdeps/unix/sysv/linux/x86_64/__start_context.S:91 from /usr/lib64/libc.so.6 Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=2001404 Fixes: d44dae1a7c ("block/mirror: fix active mirror dead-lock in mirror_wait_on_conflicts") Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Message-Id: <20210910124533.288318-1-sgarzare@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Signed-off-by: Hanna Reitz <hreitz@redhat.com>
0
static void kiocb_free(struct kiocb *req) { if (req->ki_filp) fput(req->ki_filp); if (req->ki_eventfd != NULL) eventfd_ctx_put(req->ki_eventfd); kmem_cache_free(kiocb_cachep, req); }
Safe
[ "CWE-399" ]
linux
d558023207e008a4476a3b7bb8706b2a2bf5d84f
7.08506494897114e+37
8
aio: prevent double free in ioctx_alloc ioctx_alloc() calls aio_setup_ring() to allocate a ring. If aio_setup_ring() fails to do so it would call aio_free_ring() before returning, but ioctx_alloc() would call aio_free_ring() again causing a double free of the ring. This is easily reproducible from userspace. Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
0
void zend_shared_alloc_clear_xlat_table(void) { zend_hash_clean(&xlat_table); }
Safe
[ "CWE-416" ]
php-src
777c39f4042327eac4b63c7ee87dc1c7a09a3115
2.472009571993469e+37
4
Fixed #68677
0
struct file *get_mm_exe_file(struct mm_struct *mm) { struct file *exe_file; rcu_read_lock(); exe_file = rcu_dereference(mm->exe_file); if (exe_file && !get_file_rcu(exe_file)) exe_file = NULL; rcu_read_unlock(); return exe_file; }
Safe
[ "CWE-416", "CWE-703" ]
linux
2b7e8665b4ff51c034c55df3cff76518d1a9ee3a
5.793607271202538e+36
11
fork: fix incorrect fput of ->exe_file causing use-after-free Commit 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable") made it possible to kill a forking task while it is waiting to acquire its ->mmap_sem for write, in dup_mmap(). However, it was overlooked that this introduced an new error path before a reference is taken on the mm_struct's ->exe_file. Since the ->exe_file of the new mm_struct was already set to the old ->exe_file by the memcpy() in dup_mm(), it was possible for the mmput() in the error path of dup_mm() to drop a reference to ->exe_file which was never taken. This caused the struct file to later be freed prematurely. Fix it by updating mm_init() to NULL out the ->exe_file, in the same place it clears other things like the list of mmaps. This bug was found by syzkaller. It can be reproduced using the following C program: #define _GNU_SOURCE #include <pthread.h> #include <stdlib.h> #include <sys/mman.h> #include <sys/syscall.h> #include <sys/wait.h> #include <unistd.h> static void *mmap_thread(void *_arg) { for (;;) { mmap(NULL, 0x1000000, PROT_READ, MAP_POPULATE|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); } } static void *fork_thread(void *_arg) { usleep(rand() % 10000); fork(); } int main(void) { fork(); fork(); fork(); for (;;) { if (fork() == 0) { pthread_t t; pthread_create(&t, NULL, mmap_thread, NULL); pthread_create(&t, NULL, fork_thread, NULL); usleep(rand() % 10000); syscall(__NR_exit_group, 0); } wait(NULL); } } No special kernel config options are needed. It usually causes a NULL pointer dereference in __remove_shared_vm_struct() during exit, or in dup_mmap() (which is usually inlined into copy_process()) during fork. Both are due to a vm_area_struct's ->vm_file being used after it's already been freed. Google Bug Id: 64772007 Link: http://lkml.kernel.org/r/20170823211408.31198-1-ebiggers3@gmail.com Fixes: 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable") Signed-off-by: Eric Biggers <ebiggers@google.com> Tested-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: <stable@vger.kernel.org> [v4.7+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
session_update_local_initial_window_size(nghttp2_session *session, int32_t new_initial_window_size, int32_t old_initial_window_size) { nghttp2_update_window_size_arg arg; arg.session = session; arg.new_window_size = new_initial_window_size; arg.old_window_size = old_initial_window_size; return nghttp2_map_each(&session->streams, update_local_initial_window_size_func, &arg); }
Safe
[]
nghttp2
0a6ce87c22c69438ecbffe52a2859c3a32f1620f
2.0535679509421027e+38
10
Add nghttp2_option_set_max_outbound_ack
0
static int add_iov(conn *c, const void *buf, int len) { struct msghdr *m; int leftover; bool limit_to_mtu; assert(c != NULL); do { m = &c->msglist[c->msgused - 1]; /* * Limit UDP packets, and the first payloads of TCP replies, to * UDP_MAX_PAYLOAD_SIZE bytes. */ limit_to_mtu = IS_UDP(c->transport) || (1 == c->msgused); /* We may need to start a new msghdr if this one is full. */ if (m->msg_iovlen == IOV_MAX || (limit_to_mtu && c->msgbytes >= UDP_MAX_PAYLOAD_SIZE)) { add_msghdr(c); m = &c->msglist[c->msgused - 1]; } if (ensure_iov_space(c) != 0) return -1; /* If the fragment is too big to fit in the datagram, split it up */ if (limit_to_mtu && len + c->msgbytes > UDP_MAX_PAYLOAD_SIZE) { leftover = len + c->msgbytes - UDP_MAX_PAYLOAD_SIZE; len -= leftover; } else { leftover = 0; } m = &c->msglist[c->msgused - 1]; m->msg_iov[m->msg_iovlen].iov_base = (void *)buf; m->msg_iov[m->msg_iovlen].iov_len = len; c->msgbytes += len; c->iovused++; m->msg_iovlen++; buf = ((char *)buf) + len; len = leftover; } while (leftover > 0); return 0; }
Safe
[ "CWE-189" ]
memcached
6695ccbc525c36d693aaa3e8337b36aa0c784424
2.504360930772634e+38
48
Fix segfault on specially crafted packet.
0
explicit SparseSegmentReductionSqrtNOp(OpKernelConstruction* context) : SparseSegmentReductionOpBase<Device, T>( context, false /*is_mean*/, true /*is_sqrtn*/, false /* has_num_segments */, T(0) /* default_value */) {}
Safe
[ "CWE-703", "CWE-681", "CWE-787" ]
tensorflow
db4f9717c41bccc3ce10099ab61996b246099892
3.2912823461052664e+38
4
Fix heap buffer overflow in UnsortedSegmentSum. When Index=int32, data_size and num_segments were truncated from int64 to int32. This truncation can produce negative numbers, which causes UnsortedSegmentFunctor to access out of bounds memory. Also: - Switches some indexing calculations to int64 to avoid signed integer overflow when either the input or output tensors have more than 2**31 - 1 elements. - Fixes a range check error in the GPU kernel. The segment ID was checked against an upper bound measured in elements, not segments. PiperOrigin-RevId: 256451663
0
ex_buffer_all(exarg_T *eap) { buf_T *buf; win_T *wp, *wpnext; int split_ret = OK; int p_ea_save; int open_wins = 0; int r; int count; // Maximum number of windows to open. int all; // When TRUE also load inactive buffers. int had_tab = cmdmod.cmod_tab; tabpage_T *tpnext; if (eap->addr_count == 0) // make as many windows as possible count = 9999; else count = eap->line2; // make as many windows as specified if (eap->cmdidx == CMD_unhide || eap->cmdidx == CMD_sunhide) all = FALSE; else all = TRUE; setpcmark(); #ifdef FEAT_GUI need_mouse_correct = TRUE; #endif /* * Close superfluous windows (two windows for the same buffer). * Also close windows that are not full-width. */ if (had_tab > 0) goto_tabpage_tp(first_tabpage, TRUE, TRUE); for (;;) { tpnext = curtab->tp_next; for (wp = firstwin; wp != NULL; wp = wpnext) { wpnext = wp->w_next; if ((wp->w_buffer->b_nwindows > 1 || ((cmdmod.cmod_split & WSP_VERT) ? wp->w_height + wp->w_status_height < Rows - p_ch - tabline_height() : wp->w_width != Columns) || (had_tab > 0 && wp != firstwin)) && !ONE_WINDOW && !(wp->w_closing || wp->w_buffer->b_locked > 0) && !win_unlisted(wp)) { if (win_close(wp, FALSE) == FAIL) break; // Just in case an autocommand does something strange with // windows: start all over... wpnext = firstwin; tpnext = first_tabpage; open_wins = 0; } else ++open_wins; } // Without the ":tab" modifier only do the current tab page. if (had_tab == 0 || tpnext == NULL) break; goto_tabpage_tp(tpnext, TRUE, TRUE); } /* * Go through the buffer list. When a buffer doesn't have a window yet, * open one. Otherwise move the window to the right position. * Watch out for autocommands that delete buffers or windows! */ // Don't execute Win/Buf Enter/Leave autocommands here. ++autocmd_no_enter; win_enter(lastwin, FALSE); ++autocmd_no_leave; for (buf = firstbuf; buf != NULL && open_wins < count; buf = buf->b_next) { // Check if this buffer needs a window if ((!all && buf->b_ml.ml_mfp == NULL) || !buf->b_p_bl) continue; if (had_tab != 0) { // With the ":tab" modifier don't move the window. if (buf->b_nwindows > 0) wp = lastwin; // buffer has a window, skip it else wp = NULL; } else { // Check if this buffer already has a window FOR_ALL_WINDOWS(wp) if (wp->w_buffer == buf) break; // If the buffer already has a window, move it if (wp != NULL) win_move_after(wp, curwin); } if (wp == NULL && split_ret == OK) { bufref_T bufref; set_bufref(&bufref, buf); // Split the window and put the buffer in it p_ea_save = p_ea; p_ea = TRUE; // use space from all windows split_ret = win_split(0, WSP_ROOM | WSP_BELOW); ++open_wins; p_ea = p_ea_save; if (split_ret == FAIL) continue; // Open the buffer in this window. swap_exists_action = SEA_DIALOG; set_curbuf(buf, DOBUF_GOTO); if (!bufref_valid(&bufref)) { // autocommands deleted the buffer!!! swap_exists_action = SEA_NONE; break; } if (swap_exists_action == SEA_QUIT) { #if defined(FEAT_EVAL) cleanup_T cs; // Reset the error/interrupt/exception state here so that // aborting() returns FALSE when closing a window. enter_cleanup(&cs); #endif // User selected Quit at ATTENTION prompt; close this window. win_close(curwin, TRUE); --open_wins; swap_exists_action = SEA_NONE; swap_exists_did_quit = TRUE; #if defined(FEAT_EVAL) // Restore the error/interrupt/exception state if not // discarded by a new aborting error, interrupt, or uncaught // exception. leave_cleanup(&cs); #endif } else handle_swap_exists(NULL); } ui_breakcheck(); if (got_int) { (void)vgetc(); // only break the file loading, not the rest break; } #ifdef FEAT_EVAL // Autocommands deleted the buffer or aborted script processing!!! if (aborting()) break; #endif // When ":tab" was used open a new tab for a new window repeatedly. if (had_tab > 0 && tabpage_index(NULL) <= p_tpm) cmdmod.cmod_tab = 9999; } --autocmd_no_enter; win_enter(firstwin, FALSE); // back to first window --autocmd_no_leave; /* * Close superfluous windows. */ for (wp = lastwin; open_wins > count; ) { r = (buf_hide(wp->w_buffer) || !bufIsChanged(wp->w_buffer) || autowrite(wp->w_buffer, FALSE) == OK); if (!win_valid(wp)) { // BufWrite Autocommands made the window invalid, start over wp = lastwin; } else if (r) { win_close(wp, !buf_hide(wp->w_buffer)); --open_wins; wp = lastwin; } else { wp = wp->w_prev; if (wp == NULL) break; } } }
Safe
[ "CWE-476" ]
vim
8e4b76da1d7e987d43ca960dfbc372d1c617466f
1.5583460334617405e+38
198
patch 8.2.4901: NULL pointer access when using invalid pattern Problem: NULL pointer access when using invalid pattern. Solution: Check for failed regexp program.
0
command_subst_completion_function (text, state) const char *text; int state; { static char **matches = (char **)NULL; static const char *orig_start; static char *filename_text = (char *)NULL; static int cmd_index, start_len; char *value; if (state == 0) { if (filename_text) free (filename_text); orig_start = text; if (*text == '`') text++; else if (*text == '$' && text[1] == '(') /* ) */ text += 2; /* If the text was quoted, suppress any quote character that the readline completion code would insert. */ rl_completion_suppress_quote = 1; start_len = text - orig_start; filename_text = savestring (text); if (matches) free (matches); /* * At this point we can entertain the idea of re-parsing * `filename_text' into a (possibly incomplete) command name and * arguments, and doing completion based on that. This is * currently very rudimentary, but it is a small improvement. */ for (value = filename_text + strlen (filename_text) - 1; value > filename_text; value--) if (whitespace (*value) || member (*value, COMMAND_SEPARATORS)) break; if (value <= filename_text) matches = rl_completion_matches (filename_text, command_word_completion_function); else { value++; start_len += value - filename_text; if (whitespace (value[-1])) matches = rl_completion_matches (value, rl_filename_completion_function); else matches = rl_completion_matches (value, command_word_completion_function); } /* If there is more than one match, rl_completion_matches has already put the lcd in matches[0]. Skip over it. */ cmd_index = matches && matches[0] && matches[1]; /* If there's a single match and it's a directory, set the append char to the expected `/'. Otherwise, don't append anything. */ if (matches && matches[0] && matches[1] == 0 && test_for_directory (matches[0])) rl_completion_append_character = '/'; else rl_completion_suppress_append = 1; } if (matches == 0 || matches[cmd_index] == 0) { rl_filename_quoting_desired = 0; /* disable quoting */ return ((char *)NULL); } else { value = (char *)xmalloc (1 + start_len + strlen (matches[cmd_index])); if (start_len == 1) value[0] = *orig_start; else strncpy (value, orig_start, start_len); strcpy (value + start_len, matches[cmd_index]); cmd_index++; return (value); } }
Safe
[ "CWE-20" ]
bash
4f747edc625815f449048579f6e65869914dd715
3.1515914524538402e+38
80
Bash-4.4 patch 7
0
static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_connectinfo ci = { .devnum = ps->dev->devnum, .slow = ps->dev->speed == USB_SPEED_LOW }; if (copy_to_user(arg, &ci, sizeof(ci))) return -EFAULT; return 0; }
Vulnerable
[ "CWE-200" ]
linux
681fef8380eb818c0b845fca5d2ab1dcbab114ee
1.3077294431724822e+38
11
USB: usbfs: fix potential infoleak in devio The stack object “ci” has a total size of 8 bytes. Its last 3 bytes are padding bytes which are not initialized and leaked to userland via “copy_to_user”. Signed-off-by: Kangjie Lu <kjlu@gatech.edu> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1
int ssh_buffer_allocate_size(struct ssh_buffer_struct *buffer, uint32_t len) { buffer_verify(buffer); if (buffer->allocated < len) { if (buffer->pos > 0) { buffer_shift(buffer); } if (realloc_buffer(buffer, len) < 0) { return -1; } } buffer_verify(buffer); return 0; }
Safe
[ "CWE-476" ]
libssh-mirror
10b3ebbe61a7031a3dae97f05834442220447181
1.0536449775237654e+38
18
buffer: Reformat ssh_buffer_add_data() Signed-off-by: Andreas Schneider <asn@cryptomilk.org> Reviewed-by: Anderson Toshiyuki Sasaki <ansasaki@redhat.com> Reviewed-by: Jakub Jelen <jjelen@redhat.com>
0
mono_event_get_object (MonoDomain *domain, MonoClass *klass, MonoEvent *event) { MonoReflectionEvent *res; MonoReflectionMonoEvent *mono_event; static MonoClass *monoevent_klass; CHECK_OBJECT (MonoReflectionEvent *, event, klass); if (!monoevent_klass) monoevent_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoEvent"); mono_event = (MonoReflectionMonoEvent *)mono_object_new (domain, monoevent_klass); mono_event->klass = klass; mono_event->event = event; res = (MonoReflectionEvent*)mono_event; CACHE_OBJECT (MonoReflectionEvent *, event, res, klass); }
Safe
[ "CWE-20" ]
mono
4905ef1130feb26c3150b28b97e4a96752e0d399
1.7512620015659763e+38
15
Handle invalid instantiation of generic methods. * verify.c: Add new function to internal verifier API to check method instantiations. * reflection.c (mono_reflection_bind_generic_method_parameters): Check the instantiation before returning it. Fixes #655847
0
vips_foreign_load_init( VipsForeignLoad *load ) { load->disc = TRUE; load->access = VIPS_ACCESS_RANDOM; }
Safe
[ "CWE-362", "CWE-476" ]
libvips
20d840e6da15c1574b3ed998bc92f91d1e36c2a5
1.964870654324285e+38
5
fix a crash with delayed load If a delayed load failed, it could leave the pipeline only half-set up. Sebsequent threads could then segv. Set a load-has-failed flag and test before generate. See https://github.com/jcupitt/libvips/issues/893
0
int h2_make_htx_response(struct http_hdr *list, struct htx *htx, unsigned int *msgf, unsigned long long *body_len) { struct ist phdr_val[H2_PHDR_NUM_ENTRIES]; uint32_t fields; /* bit mask of H2_PHDR_FND_* */ uint32_t idx; int phdr; int ret; int i; uint32_t used = htx_used_space(htx); struct htx_sl *sl = NULL; unsigned int sl_flags = 0; const char *ctl; fields = 0; for (idx = 0; list[idx].n.len != 0; idx++) { if (!list[idx].n.ptr) { /* this is an indexed pseudo-header */ phdr = list[idx].n.len; } else { /* this can be any type of header */ /* RFC7540#8.1.2: upper case not allowed in header field names. * #10.3: header names must be valid (i.e. match a token). * For pseudo-headers we check from 2nd char and for other ones * from the first char, because HTTP_IS_TOKEN() also excludes * the colon. */ phdr = h2_str_to_phdr(list[idx].n); for (i = !!phdr; i < list[idx].n.len; i++) if ((uint8_t)(list[idx].n.ptr[i] - 'A') < 'Z' - 'A' || !HTTP_IS_TOKEN(list[idx].n.ptr[i])) goto fail; } /* RFC7540#10.3: intermediaries forwarding to HTTP/1 must take care of * rejecting NUL, CR and LF characters. */ ctl = ist_find_ctl(list[idx].v); if (unlikely(ctl) && has_forbidden_char(list[idx].v, ctl)) goto fail; if (phdr > 0 && phdr < H2_PHDR_NUM_ENTRIES) { /* insert a pseudo header by its index (in phdr) and value (in value) */ if (fields & ((1 << phdr) | H2_PHDR_FND_NONE)) { if (fields & H2_PHDR_FND_NONE) { /* pseudo header field after regular headers */ goto fail; } else { /* repeated pseudo header field */ goto fail; } } fields |= 1 << phdr; phdr_val[phdr] = list[idx].v; continue; } else if (phdr != 0) { /* invalid pseudo header -- should never happen here */ goto fail; } /* regular header field in (name,value) */ if (!(fields & H2_PHDR_FND_NONE)) { /* no more pseudo-headers, time to build the status line */ sl = h2_prepare_htx_stsline(fields, phdr_val, htx, msgf); if (!sl) goto fail; fields |= H2_PHDR_FND_NONE; } if (isteq(list[idx].n, ist("content-length"))) { ret = h2_parse_cont_len_header(msgf, &list[idx].v, body_len); if (ret < 0) goto fail; sl_flags |= HTX_SL_F_CLEN; if (ret == 0) continue; // skip this duplicate } /* these ones are forbidden in responses (RFC7540#8.1.2.2) */ if (isteq(list[idx].n, ist("connection")) || isteq(list[idx].n, ist("proxy-connection")) || isteq(list[idx].n, ist("keep-alive")) || isteq(list[idx].n, ist("upgrade")) || isteq(list[idx].n, ist("transfer-encoding"))) goto fail; if (!htx_add_header(htx, list[idx].n, list[idx].v)) goto fail; } /* RFC7540#8.1.2.1 mandates to reject request pseudo-headers */ if (fields & (H2_PHDR_FND_AUTH|H2_PHDR_FND_METH|H2_PHDR_FND_PATH|H2_PHDR_FND_SCHM)) goto fail; /* Let's dump the request now if not yet emitted. */ if (!(fields & H2_PHDR_FND_NONE)) { sl = h2_prepare_htx_stsline(fields, phdr_val, htx, msgf); if (!sl) goto fail; } if (!(*msgf & H2_MSGF_BODY) || ((*msgf & H2_MSGF_BODY_CL) && *body_len == 0)) sl_flags |= HTX_SL_F_BODYLESS; /* update the start line with last detected header info */ sl->flags |= sl_flags; if ((*msgf & (H2_MSGF_BODY|H2_MSGF_BODY_TUNNEL|H2_MSGF_BODY_CL)) == H2_MSGF_BODY) { /* FIXME: Do we need to signal anything when we have a body and * no content-length, to have the equivalent of H1's chunked * encoding? */ } /* now send the end of headers marker */ htx_add_endof(htx, HTX_BLK_EOH); /* Set bytes used in the HTX mesage for the headers now */ sl->hdrs_bytes = htx_used_space(htx) - used; ret = 1; return ret; fail: return -1; }
Safe
[ "CWE-74" ]
haproxy
146f53ae7e97dbfe496d0445c2802dd0a30b0878
2.104474342722706e+38
129
BUG/MAJOR: h2: make header field name filtering stronger Tim D�sterhus found that the amount of sanitization we perform on HTTP header field names received in H2 is insufficient. Currently we reject upper case letters as mandated by RFC7540#8.1.2, but section 10.3 also requires that intermediaries translating streams to HTTP/1 further refine the filtering to also reject invalid names (which means any name that doesn't match a token). There is a small trick here which is that the colon character used to start pseudo-header names doesn't match a token, so pseudo-header names fall into that category, thus we have to swap the pseudo-header name lookup with this check so that we only check from the second character (past the ':') in case of pseudo-header names. Another possibility could have been to perform this check only in the HTX-to-H1 trancoder but doing would still expose the configured rules and logs to such header names. This fix must be backported as far as 1.8 since this bug could be exploited and serve as the base for an attack. In 2.0 and earlier, functions h2_make_h1_request() and h2_make_h1_trailers() must also be adapted to sanitize requests coming in legacy mode.
0
void gf_sg_set_private(GF_SceneGraph *sg, void *ptr) { if (sg) sg->userpriv = ptr; }
Safe
[ "CWE-416" ]
gpac
9723dd0955894f2cb7be13b94cf7a47f2754b893
1.5999411769540583e+38
4
fixed #2109
0
xmlFARegDebugExec(xmlRegExecCtxtPtr exec) { printf("state: %d:%d:idx %d", exec->state->no, exec->transno, exec->index); if (exec->inputStack != NULL) { int i; printf(": "); for (i = 0;(i < 3) && (i < exec->inputStackNr);i++) printf("%s ", (const char *) exec->inputStack[exec->inputStackNr - (i + 1)].value); } else { printf(": %s", &(exec->inputString[exec->index])); } printf("\n"); }
Safe
[ "CWE-119" ]
libxml2
cbb271655cadeb8dbb258a64701d9a3a0c4835b4
3.3693729385639002e+38
13
Bug 757711: heap-buffer-overflow in xmlFAParsePosCharGroup <https://bugzilla.gnome.org/show_bug.cgi?id=757711> * xmlregexp.c: (xmlFAParseCharRange): Only advance to the next character if there is no error. Advancing to the next character in case of an error while parsing regexp leads to an out of bounds access.
0
static huft_t* huft_build(const unsigned *b, const unsigned n, const unsigned s, const struct cp_ext *cp_ext, unsigned *m) { unsigned a; /* counter for codes of length k */ unsigned c[BMAX + 1]; /* bit length count table */ unsigned eob_len; /* length of end-of-block code (value 256) */ unsigned f; /* i repeats in table every f entries */ int g; /* maximum code length */ int htl; /* table level */ unsigned i; /* counter, current code */ unsigned j; /* counter */ int k; /* number of bits in current code */ const unsigned *p; /* pointer into c[], b[], or v[] */ huft_t *q; /* points to current table */ huft_t r; /* table entry for structure assignment */ huft_t *u[BMAX]; /* table stack */ unsigned v[N_MAX + 1]; /* values in order of bit length. last v[] is never used */ int ws[BMAX + 1]; /* bits decoded stack */ int w; /* bits decoded */ unsigned x[BMAX + 1]; /* bit offsets, then code stack */ unsigned *xp; /* pointer into x */ int y; /* number of dummy codes added */ unsigned z; /* number of entries in current table */ huft_t *result; huft_t **t; /* Length of EOB code, if any */ eob_len = n > 256 ? b[256] : BMAX; /* Generate counts for each bit length */ memset(c, 0, sizeof(c)); p = b; i = n; do { c[*p]++; /* assume all entries <= BMAX */ p++; /* can't combine with above line (Solaris bug) */ } while (--i); if (c[0] == n) { /* null input - all zero length codes */ q = xzalloc(3 * sizeof(*q)); //q[0].v.t = NULL; q[1].e = 99; /* invalid code marker */ q[1].b = 1; q[2].e = 99; /* invalid code marker */ q[2].b = 1; *m = 1; return q + 1; } /* Find minimum and maximum length, bound *m by those */ for (j = 1; (j <= BMAX) && (c[j] == 0); j++) continue; k = j; /* minimum code length */ for (i = BMAX; (c[i] == 0) && i; i--) continue; g = i; /* maximum code length */ *m = (*m < j) ? j : ((*m > i) ? i : *m); /* Adjust last length count to fill out codes, if needed */ for (y = 1 << j; j < i; j++, y <<= 1) { y -= c[j]; if (y < 0) return ERR_RET; /* bad input: more codes than bits */ } y -= c[i]; if (y < 0) return ERR_RET; c[i] += y; /* Generate starting offsets into the value table for each length */ x[1] = j = 0; p = c + 1; xp = x + 2; while (--i) { /* note that i == g from above */ j += *p++; *xp++ = j; } /* Make a table of values in order of bit lengths. * To detect bad input, unused v[i]'s are set to invalid value UINT_MAX. * In particular, last v[i] is never filled and must not be accessed. */ memset(v, 0xff, sizeof(v)); p = b; i = 0; do { j = *p++; if (j != 0) { v[x[j]++] = i; } } while (++i < n); /* Generate the Huffman codes and for each, make the table entries */ result = ERR_RET; t = &result; x[0] = i = 0; /* first Huffman code is zero */ p = v; /* grab values in bit order */ htl = -1; /* no tables yet--level -1 */ w = ws[0] = 0; /* bits decoded */ u[0] = NULL; /* just to keep compilers happy */ q = NULL; /* ditto */ z = 0; /* ditto */ /* go through the bit lengths (k already is bits in shortest code) */ for (; k <= g; k++) { a = c[k]; while (a--) { /* here i is the Huffman code of length k bits for value *p */ /* make tables up to required level */ while (k > ws[htl + 1]) { w = ws[++htl]; /* compute minimum size table less than or equal to *m bits */ z = g - w; z = z > *m ? *m : z; /* upper limit on table size */ j = k - w; f = 1 << j; if (f > a + 1) { /* try a k-w bit table */ /* too few codes for k-w bit table */ f -= a + 1; /* deduct codes from patterns left */ xp = c + k; while (++j < z) { /* try smaller tables up to z bits */ f <<= 1; if (f <= *++xp) { break; /* enough codes to use up j bits */ } f -= *xp; /* else deduct codes from patterns */ } } j = (w + j > eob_len && w < eob_len) ? eob_len - w : j; /* make EOB code end at table */ z = 1 << j; /* table entries for j-bit table */ ws[htl+1] = w + j; /* set bits decoded in stack */ /* allocate and link in new table */ q = xzalloc((z + 1) * sizeof(huft_t)); *t = q + 1; /* link to list for huft_free() */ t = &(q->v.t); u[htl] = ++q; /* table starts after link */ /* connect to last table, if there is one */ if (htl) { x[htl] = i; /* save pattern for backing up */ r.b = (unsigned char) (w - ws[htl - 1]); /* bits to dump before this table */ r.e = (unsigned char) (16 + j); /* bits in this table */ r.v.t = q; /* pointer to this table */ j = (i & ((1 << w) - 1)) >> ws[htl - 1]; u[htl - 1][j] = r; /* connect to last table */ } } /* set up table entry in r */ r.b = (unsigned char) (k - w); if (/*p >= v + n || -- redundant, caught by the second check: */ *p == UINT_MAX /* do we access uninited v[i]? (see memset(v))*/ ) { r.e = 99; /* out of values--invalid code */ } else if (*p < s) { r.e = (unsigned char) (*p < 256 ? 16 : 15); /* 256 is EOB code */ r.v.n = (unsigned short) (*p++); /* simple code is just the value */ } else { r.e = (unsigned char) cp_ext->ext[*p - s]; /* non-simple--look up in lists */ r.v.n = cp_ext->cp[*p++ - s]; } /* fill code-like entries with r */ f = 1 << (k - w); for (j = i >> w; j < z; j += f) { q[j] = r; } /* backwards increment the k-bit code i */ for (j = 1 << (k - 1); i & j; j >>= 1) { i ^= j; } i ^= j; /* backup over finished tables */ while ((i & ((1 << w) - 1)) != x[htl]) { w = ws[--htl]; } } } /* return actual size of base table */ *m = ws[1]; if (y != 0 && g != 1) /* we were given an incomplete table */ /* return "result" ORed with 1 */ return (void*)((uintptr_t)result | 1); return result; }
Safe
[ "CWE-755" ]
busybox
f25d254dfd4243698c31a4f3153d4ac72aa9e9bd
2.8765692339533028e+38
192
decompress_gunzip: Fix DoS if gzip is corrupt On certain corrupt gzip files, huft_build will set the error bit on the result pointer. If afterwards abort_unzip is called huft_free might run into a segmentation fault or an invalid pointer to free(p). In order to mitigate this, we check in huft_free if the error bit is set and clear it before the linked list is freed. Signed-off-by: Samuel Sapalski <samuel.sapalski@nokia.com> Signed-off-by: Peter Kaestle <peter.kaestle@nokia.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
0
void __init signals_init(void) { sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); }
Safe
[]
linux-2.6
0083fc2c50e6c5127c2802ad323adf8143ab7856
4.1779246862536127e+37
4
do_sigaltstack: avoid copying 'stack_t' as a structure to user space Ulrich Drepper correctly points out that there is generally padding in the structure on 64-bit hosts, and that copying the structure from kernel to user space can leak information from the kernel stack in those padding bytes. Avoid the whole issue by just copying the three members one by one instead, which also means that the function also can avoid the need for a stack frame. This also happens to match how we copy the new structure from user space, so it all even makes sense. [ The obvious solution of adding a memset() generates horrid code, gcc does really stupid things. ] Reported-by: Ulrich Drepper <drepper@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
cheader_write (cheader_t *ch, GDataOutputStream *out, GCancellable *cancellable, GError **error) { GOutputStream *stream = g_filter_output_stream_get_base_stream (G_FILTER_OUTPUT_STREAM (out)); if (!W1 ('M') || !W1 ('S') || !W1 ('C') || !W1 ('F') || !W4 (ch->res1) || !W4 (ch->size) || !W4 (ch->res2) || !W4 (ch->offsetfiles) || !W4 (ch->res3) || !W1 (ch->versionMIN = 3) || !W1 (ch->versionMAJ = 1) || !W2 (ch->nfolders) || !W2 (ch->nfiles) || !W2 (ch->flags) || !W2 (ch->setID) || !W2 (ch->cabID)) return FALSE; if (ch->flags & CABINET_HEADER_RESERVE) { if (!W2 (ch->res_header) || !W1 (ch->res_folder) || !W1 (ch->res_data)) return FALSE; if (g_output_stream_write (stream, ch->reserved, ch->res_header, cancellable, error) == -1) return FALSE; } return TRUE; }
Safe
[ "CWE-787" ]
gcab
c512f6ff0c82a1139b36db2b28f93edc01c74b4b
6.188560431511387e+37
32
trivial: Allocate cdata_t on the heap Using a 91kB stack allocation for one object isn't awesome, and it also allows us to use g_autoptr() to simplify gcab_folder_extract()
0
static struct dentry *udf_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { if ((fh_len != 3 && fh_len != 5) || (fh_type != FILEID_UDF_WITH_PARENT && fh_type != FILEID_UDF_WITHOUT_PARENT)) return NULL; return udf_nfs_get_inode(sb, fid->udf.block, fid->udf.partref, fid->udf.generation); }
Safe
[ "CWE-200" ]
linux
0143fc5e9f6f5aad4764801015bc8d4b4a278200
1.112268440994858e+38
11
udf: avoid info leak on export For type 0x51 the udf.parent_partref member in struct fid gets copied uninitialized to userland. Fix this by initializing it to 0. Signed-off-by: Mathias Krause <minipli@googlemail.com> Signed-off-by: Jan Kara <jack@suse.cz>
0
currentbasecolor_cont(i_ctx_t *i_ctx_p) { ref arr, *parr = &arr; es_ptr ep = esp; int i, code = 0, stage, base, cont=1, stack_depth = 0, CIESubst=0; unsigned int depth; PS_colour_space_t *obj; stack_depth = (int)ep[-4].value.intval; base = (int)ep[-3].value.intval; depth = (unsigned int)ep[-2].value.intval; stage = (int)ep[-1].value.intval; /* This shouldn't be possible, all the procedures which call this should * set the depth to at *least* 1. */ if (depth < 1) return_error(gs_error_unknownerror); /* If we get a continuation from a sub-procedure, we will want to come back * here afterward, to do any remaining stages. We need to set up for that now. * so that our continuation is ahead of the sub-proc's continuation. */ check_estack(1); /* The push_op_estack macro increments esp before use, so we don't need to */ push_op_estack(currentbasecolor_cont); while (code == 0 && cont) { ref_assign(&arr, ep); parr = &arr; /* Run along the nested color spaces until we get to the lowest one * that we haven't yet processed (given by 'depth') */ for (i = 0;i < depth;i++) { code = get_space_object(i_ctx_p, parr, &obj); if (code < 0) return code; if (i < (depth - 1)) { if (!obj->alternateproc) { return_error(gs_error_typecheck); } code = obj->alternateproc(i_ctx_p, parr, &parr, &CIESubst); if (code < 0) return code; } } code = obj->basecolorproc(i_ctx_p, parr, base, &stage, &cont, &stack_depth); make_int(&ep[-4], stack_depth); make_int(&ep[-1], stage); if (code > 0) return code; /* Completed that space, increment the 'depth' */ make_int(&ep[-2], ++depth); } if (code <= 0) { /* Remove our next continuation and our data */ esp -= 7; code = o_pop_estack; } return code; }
Safe
[]
ghostpdl
b326a71659b7837d3acde954b18bda1a6f5e9498
1.5166610551925606e+38
63
Bug 699655: Properly check the return value.... ...when getting a value from a dictionary
0
static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; int i, res; bond_for_each_slave(bond, slave, i) { struct net_device *slave_dev = slave->dev; const struct net_device_ops *slave_ops = slave_dev->netdev_ops; if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) && slave_ops->ndo_vlan_rx_kill_vid) { slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid); } } res = bond_del_vlan(bond, vid); if (res) { pr_err("%s: Error: Failed to remove vlan id %d\n", bond_dev->name, vid); } }
Safe
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
1.8819677092074939e+37
22
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <nhorman@tuxdriver.com> CC: Karsten Keil <isdn@linux-pingi.de> CC: "David S. Miller" <davem@davemloft.net> CC: Jay Vosburgh <fubar@us.ibm.com> CC: Andy Gospodarek <andy@greyhouse.net> CC: Patrick McHardy <kaber@trash.net> CC: Krzysztof Halasa <khc@pm.waw.pl> CC: "John W. Linville" <linville@tuxdriver.com> CC: Greg Kroah-Hartman <gregkh@suse.de> CC: Marcel Holtmann <marcel@holtmann.org> CC: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); }
Safe
[]
kvm
e28ba7bb020f07193bc000453c8775e9d2c0dda7
3.1683440337051648e+38
14
KVM: x86: fix missing checks in syscall emulation On hosts without this patch, 32bit guests will crash (and 64bit guests may behave in a wrong way) for example by simply executing following nasm-demo-application: [bits 32] global _start SECTION .text _start: syscall (I tested it with winxp and linux - both always crashed) Disassembly of section .text: 00000000 <_start>: 0: 0f 05 syscall The reason seems a missing "invalid opcode"-trap (int6) for the syscall opcode "0f05", which is not available on Intel CPUs within non-longmodes, as also on some AMD CPUs within legacy-mode. (depending on CPU vendor, MSR_EFER and cpuid) Because previous mentioned OSs may not engage corresponding syscall target-registers (STAR, LSTAR, CSTAR), they remain NULL and (non trapping) syscalls are leading to multiple faults and finally crashs. Depending on the architecture (AMD or Intel) pretended by guests, various checks according to vendor's documentation are implemented to overcome the current issue and behave like the CPUs physical counterparts. [mtosatti: cleanup/beautify code] Signed-off-by: Stephan Baerwolf <stephan.baerwolf@tu-ilmenau.de> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
0
njs_object_property_init(njs_lvlhsh_query_t *lhq, const njs_value_t *key, uint32_t hash) { lhq->proto = &njs_object_hash_proto; njs_object_property_key_set(lhq, key, hash); }
Safe
[]
njs
6549d49630ce5f5ac823fd3ae0c6c8558b8716ae
2.5370799354573888e+38
7
Fixed redefinition of special props in Object.defineProperty(). Previously, when NJS_PROPERTY_HANDLER property was updated it might be left in inconsistent state. Namely, prop->type was left unchanged, but prop->value did not have an expected property handler. As a result consecutive reference to the property may result in a segment violation. The fix is to update the prop->type during redefinition. This closes #504 issue on Github.
0
bool operator!=(const const_iterator &rhs) const { return !(*this == rhs); }
Safe
[ "CWE-416", "CWE-703" ]
hermes
d86e185e485b6330216dee8e854455c694e3a36e
2.1221015786074976e+38
3
Fix a bug in transient object property assignment and getUTF16Ref Summary: The returned `UTF16Ref` from `StringView::getUTF16Ref` can be invalidated by appending more contents to the same allocator. This case was encountered in `transientObjectPutErrorMessage`, resulting in using free'd memory. Reviewed By: tmikov Differential Revision: D23034855 fbshipit-source-id: 4c25a5369934bf3bdfc5582385503f4b87de3792
0
static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) { struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); struct brcmf_pub *drvr = bus_if->drvr; struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; struct brcmf_commonring *commonring; u16 i; struct brcmf_flowring_ring *ring; struct brcmf_flowring_hash *hash; commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n", commonring->r_ptr, commonring->w_ptr, commonring->depth); commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n", commonring->r_ptr, commonring->w_ptr, commonring->depth); commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n", commonring->r_ptr, commonring->w_ptr, commonring->depth); commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n", commonring->r_ptr, commonring->w_ptr, commonring->depth); commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n", commonring->r_ptr, commonring->w_ptr, commonring->depth); seq_printf(seq, "\nh2d_flowrings: depth %u\n", BRCMF_H2D_TXFLOWRING_MAX_ITEM); seq_puts(seq, "Active flowrings:\n"); hash = msgbuf->flow->hash; for (i = 0; i < msgbuf->flow->nrofrings; i++) { if (!msgbuf->flow->rings[i]) continue; ring = msgbuf->flow->rings[i]; if (ring->status != RING_OPEN) continue; commonring = msgbuf->flowrings[i]; hash = &msgbuf->flow->hash[ring->hash_id]; seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n" " ifidx %u, fifo %u, da %pM\n", i, commonring->r_ptr, commonring->w_ptr, skb_queue_len(&ring->skblist), ring->blocked, hash->ifidx, hash->fifo, hash->mac); } return 0; }
Safe
[ "CWE-20" ]
linux
a4176ec356c73a46c07c181c6d04039fafa34a9f
1.357584068443481e+38
47
brcmfmac: add subtype check for event handling in data path For USB there is no separate channel being used to pass events from firmware to the host driver and as such are passed over the data path. In order to detect mock event messages an additional check is needed on event subtype. This check is added conditionally using unlikely() keyword. Reviewed-by: Hante Meuleman <hante.meuleman@broadcom.com> Reviewed-by: Pieter-Paul Giesberts <pieter-paul.giesberts@broadcom.com> Reviewed-by: Franky Lin <franky.lin@broadcom.com> Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
0
NumberFormatTest::TestExponential(void) { UErrorCode status = U_ZERO_ERROR; DecimalFormatSymbols sym(Locale::getUS(), status); if (U_FAILURE(status)) { errcheckln(status, "FAIL: Bad status returned by DecimalFormatSymbols ct - %s", u_errorName(status)); return; } const char* pat[] = { "0.####E0", "00.000E00", "##0.######E000", "0.###E0;[0.###E0]" }; int32_t pat_length = UPRV_LENGTHOF(pat); // The following #if statements allow this test to be built and run on // platforms that do not have standard IEEE numerics. For example, // S/390 doubles have an exponent range of -78 to +75. For the // following #if statements to work, float.h must define // DBL_MAX_10_EXP to be a compile-time constant. // This section may be expanded as needed. #if DBL_MAX_10_EXP > 300 double val[] = { 0.01234, 123456789, 1.23e300, -3.141592653e-271 }; int32_t val_length = UPRV_LENGTHOF(val); const char* valFormat[] = { // 0.####E0 "1.234E-2", "1.2346E8", "1.23E300", "-3.1416E-271", // 00.000E00 "12.340E-03", "12.346E07", "12.300E299", "-31.416E-272", // ##0.######E000 "12.34E-003", "123.4568E006", "1.23E300", "-314.1593E-273", // 0.###E0;[0.###E0] "1.234E-2", "1.235E8", "1.23E300", "[3.142E-271]" }; double valParse[] = { 0.01234, 123460000, 1.23E300, -3.1416E-271, 0.01234, 123460000, 1.23E300, -3.1416E-271, 0.01234, 123456800, 1.23E300, -3.141593E-271, 0.01234, 123500000, 1.23E300, -3.142E-271, }; #elif DBL_MAX_10_EXP > 70 double val[] = { 0.01234, 123456789, 1.23e70, -3.141592653e-71 }; int32_t val_length = UPRV_LENGTHOF(val); char* valFormat[] = { // 0.####E0 "1.234E-2", "1.2346E8", "1.23E70", "-3.1416E-71", // 00.000E00 "12.340E-03", "12.346E07", "12.300E69", "-31.416E-72", // ##0.######E000 "12.34E-003", "123.4568E006", "12.3E069", "-31.41593E-072", // 0.###E0;[0.###E0] "1.234E-2", "1.235E8", "1.23E70", "[3.142E-71]" }; double valParse[] = { 0.01234, 123460000, 1.23E70, -3.1416E-71, 0.01234, 123460000, 1.23E70, -3.1416E-71, 0.01234, 123456800, 1.23E70, -3.141593E-71, 0.01234, 123500000, 1.23E70, -3.142E-71, }; #else // Don't test double conversion double* val = 0; int32_t val_length = 0; char** valFormat = 0; double* valParse = 0; logln("Warning: Skipping double conversion tests"); #endif int32_t lval[] = { 0, -1, 1, 123456789 }; int32_t lval_length = UPRV_LENGTHOF(lval); const char* lvalFormat[] = { // 0.####E0 "0E0", "-1E0", "1E0", "1.2346E8", // 00.000E00 "00.000E00", "-10.000E-01", "10.000E-01", "12.346E07", // ##0.######E000 "0E000", "-1E000", "1E000", "123.4568E006", // 0.###E0;[0.###E0] "0E0", "[1E0]", "1E0", "1.235E8" }; int32_t lvalParse[] = { 0, -1, 1, 123460000, 0, -1, 1, 123460000, 0, -1, 1, 123456800, 0, -1, 1, 123500000, }; int32_t ival = 0, ilval = 0; for (int32_t p=0; p<pat_length; ++p) { DecimalFormat fmt(pat[p], sym, status); if (U_FAILURE(status)) { errln("FAIL: Bad status returned by DecimalFormat ct"); continue; } UnicodeString pattern; logln((UnicodeString)"Pattern \"" + pat[p] + "\" -toPattern-> \"" + fmt.toPattern(pattern) + "\""); int32_t v; for (v=0; v<val_length; ++v) { UnicodeString s; (*(NumberFormat*)&fmt).format(val[v], s); logln((UnicodeString)" " + val[v] + " -format-> " + s); if (s != valFormat[v+ival]) errln((UnicodeString)"FAIL: Expected " + valFormat[v+ival]); ParsePosition pos(0); Formattable af; fmt.parse(s, af, pos); double a; UBool useEpsilon = FALSE; if (af.getType() == Formattable::kLong) a = af.getLong(); else if (af.getType() == Formattable::kDouble) { a = af.getDouble(); #if U_PF_OS390 <= U_PLATFORM && U_PLATFORM <= U_PF_OS400 // S/390 will show a failure like this: //| -3.141592652999999e-271 -format-> -3.1416E-271 //| -parse-> -3.1416e-271 //| FAIL: Expected -3.141599999999999e-271 // To compensate, we use an epsilon-based equality // test on S/390 only. We don't want to do this in // general because it's less exacting. useEpsilon = TRUE; #endif } else { errln(UnicodeString("FAIL: Non-numeric Formattable returned: ") + pattern + " " + s); continue; } if (pos.getIndex() == s.length()) { logln((UnicodeString)" -parse-> " + a); // Use epsilon comparison as necessary if ((useEpsilon && (uprv_fabs(a - valParse[v+ival]) / a > (2*DBL_EPSILON))) || (!useEpsilon && a != valParse[v+ival])) { errln((UnicodeString)"FAIL: Expected " + valParse[v+ival] + " but got " + a + " on input " + s); } } else { errln((UnicodeString)"FAIL: Partial parse (" + pos.getIndex() + " chars) -> " + a); errln((UnicodeString)" should be (" + s.length() + " chars) -> " + valParse[v+ival]); } } for (v=0; v<lval_length; ++v) { UnicodeString s; (*(NumberFormat*)&fmt).format(lval[v], s); logln((UnicodeString)" " + lval[v] + "L -format-> " + s); if (s != lvalFormat[v+ilval]) errln((UnicodeString)"ERROR: Expected " + lvalFormat[v+ilval] + " Got: " + s); ParsePosition pos(0); Formattable af; fmt.parse(s, af, pos); if (af.getType() == Formattable::kLong || af.getType() == Formattable::kInt64) { UErrorCode status = U_ZERO_ERROR; int32_t a = af.getLong(status); if (pos.getIndex() == s.length()) { logln((UnicodeString)" -parse-> " + a); if (a != lvalParse[v+ilval]) errln((UnicodeString)"FAIL: Expected " + lvalParse[v+ilval] + " but got " + a); } else errln((UnicodeString)"FAIL: Partial parse (" + pos.getIndex() + " chars) -> " + a); } else errln((UnicodeString)"FAIL: Non-long Formattable returned for " + s + " Double: " + af.getDouble() + ", Long: " + af.getLong()); } ival += val_length; ilval += lval_length; } }
Safe
[ "CWE-190" ]
icu
53d8c8f3d181d87a6aa925b449b51c4a2c922a51
1.9437724728347096e+38
177
ICU-20246 Fixing another integer overflow in number parsing.
0
init_render_context(ASS_Renderer *render_priv, ASS_Event *event) { render_priv->state.event = event; render_priv->state.parsed_tags = 0; render_priv->state.has_clips = 0; render_priv->state.evt_type = EVENT_NORMAL; reset_render_context(render_priv, NULL); render_priv->state.wrap_style = render_priv->track->WrapStyle; render_priv->state.alignment = render_priv->state.style->Alignment; render_priv->state.pos_x = 0; render_priv->state.pos_y = 0; render_priv->state.org_x = 0; render_priv->state.org_y = 0; render_priv->state.have_origin = 0; render_priv->state.clip_x0 = 0; render_priv->state.clip_y0 = 0; render_priv->state.clip_x1 = render_priv->track->PlayResX; render_priv->state.clip_y1 = render_priv->track->PlayResY; render_priv->state.clip_mode = 0; render_priv->state.detect_collisions = 1; render_priv->state.fade = 0; render_priv->state.drawing_scale = 0; render_priv->state.pbo = 0; render_priv->state.effect_type = EF_NONE; render_priv->state.effect_timing = 0; render_priv->state.effect_skip_timing = 0; apply_transition_effects(render_priv, event); }
Safe
[ "CWE-125" ]
libass
f4f48950788b91c6a30029cc28a240b834713ea7
2.9992661217235624e+38
31
Fix line wrapping mode 0/3 bugs This fixes two separate bugs: a) Don't move a linebreak into the first symbol. This results in a empty line at the front, which does not help to equalize line lengths at all. Instead, merge line with the second one. b) When moving a linebreak into a symbol that already is a break, the number of lines must be decremented. Otherwise, uninitialized memory is possibly used for later layout operations. Found by fuzzer test case id:000085,sig:11,src:003377+003350,op:splice,rep:8. This might also affect and hopefully fix libass#229. v2: change semantics according to review
0
int RAND_load_file(const char*, long) { // TODO: return 0; }
Safe
[ "CWE-254" ]
mysql-server
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
2.6001072731414063e+38
5
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
0
static noinline int copy_to_sk(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key, struct btrfs_ioctl_search_key *sk, char *buf, unsigned long *sk_offset, int *num_found) { u64 found_transid; struct extent_buffer *leaf; struct btrfs_ioctl_search_header sh; unsigned long item_off; unsigned long item_len; int nritems; int i; int slot; int ret = 0; leaf = path->nodes[0]; slot = path->slots[0]; nritems = btrfs_header_nritems(leaf); if (btrfs_header_generation(leaf) > sk->max_transid) { i = nritems; goto advance_key; } found_transid = btrfs_header_generation(leaf); for (i = slot; i < nritems; i++) { item_off = btrfs_item_ptr_offset(leaf, i); item_len = btrfs_item_size_nr(leaf, i); if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE) item_len = 0; if (sizeof(sh) + item_len + *sk_offset > BTRFS_SEARCH_ARGS_BUFSIZE) { ret = 1; goto overflow; } btrfs_item_key_to_cpu(leaf, key, i); if (!key_in_sk(key, sk)) continue; sh.objectid = key->objectid; sh.offset = key->offset; sh.type = key->type; sh.len = item_len; sh.transid = found_transid; /* copy search result header */ memcpy(buf + *sk_offset, &sh, sizeof(sh)); *sk_offset += sizeof(sh); if (item_len) { char *p = buf + *sk_offset; /* copy the item */ read_extent_buffer(leaf, p, item_off, item_len); *sk_offset += item_len; } (*num_found)++; if (*num_found >= sk->nr_items) break; } advance_key: ret = 0; if (key->offset < (u64)-1 && key->offset < sk->max_offset) key->offset++; else if (key->type < (u8)-1 && key->type < sk->max_type) { key->offset = 0; key->type++; } else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) { key->offset = 0; key->type = 0; key->objectid++; } else ret = 1; overflow: return ret; }
Safe
[ "CWE-310" ]
linux-2.6
9c52057c698fb96f8f07e7a4bcf4801a092bda89
6.061087688416008e+37
83
Btrfs: fix hash overflow handling The handling for directory crc hash overflows was fairly obscure, split_leaf returns EOVERFLOW when we try to extend the item and that is supposed to bubble up to userland. For a while it did so, but along the way we added better handling of errors and forced the FS readonly if we hit IO errors during the directory insertion. Along the way, we started testing only for EEXIST and the EOVERFLOW case was dropped. The end result is that we may force the FS readonly if we catch a directory hash bucket overflow. This fixes a few problem spots. First I add tests for EOVERFLOW in the places where we can safely just return the error up the chain. btrfs_rename is harder though, because it tries to insert the new directory item only after it has already unlinked anything the rename was going to overwrite. Rather than adding very complex logic, I added a helper to test for the hash overflow case early while it is still safe to bail out. Snapshot and subvolume creation had a similar problem, so they are using the new helper now too. Signed-off-by: Chris Mason <chris.mason@fusionio.com> Reported-by: Pascal Junod <pascal@junod.info>
0