func
string
target
string
cwe
list
project
string
commit_id
string
hash
string
size
int64
message
string
vul
int64
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; struct page *page, *page_head; int err, ro = 0; /* * The futex address must be "naturally" aligned. */ key->both.offset = address % PAGE_SIZE; if (unlikely((address % sizeof(u32)) != 0)) return -EINVAL; address -= key->both.offset; if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) return -EFAULT; /* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs * virtual address, we dont even have to find the underlying vma. * Note : We do have to check 'uaddr' is a valid user address, * but access_ok() should be faster than find_vma() */ if (!fshared) { key->private.mm = mm; key->private.address = address; get_futex_key_refs(key); /* implies MB (B) */ return 0; } again: err = get_user_pages_fast(address, 1, 1, &page); /* * If write access is not required (eg. FUTEX_WAIT), try * and get read-only access. */ if (err == -EFAULT && rw == VERIFY_READ) { err = get_user_pages_fast(address, 1, 0, &page); ro = 1; } if (err < 0) return err; else err = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE page_head = page; if (unlikely(PageTail(page))) { put_page(page); /* serialize against __split_huge_page_splitting() */ local_irq_disable(); if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) { page_head = compound_head(page); /* * page_head is valid pointer but we must pin * it before taking the PG_lock and/or * PG_compound_lock. The moment we re-enable * irqs __split_huge_page_splitting() can * return and the head page can be freed from * under us. We can't take the PG_lock and/or * PG_compound_lock on a page that could be * freed from under us. */ if (page != page_head) { get_page(page_head); put_page(page); } local_irq_enable(); } else { local_irq_enable(); goto again; } } #else page_head = compound_head(page); if (page != page_head) { get_page(page_head); put_page(page); } #endif lock_page(page_head); /* * If page_head->mapping is NULL, then it cannot be a PageAnon * page; but it might be the ZERO_PAGE or in the gate area or * in a special mapping (all cases which we are happy to fail); * or it may have been a good file page when get_user_pages_fast * found it, but truncated or holepunched or subjected to * invalidate_complete_page2 before we got the page lock (also * cases which we are happy to fail). And we hold a reference, * so refcount care in invalidate_complete_page's remove_mapping * prevents drop_caches from setting mapping to NULL beneath us. * * The case we do have to guard against is when memory pressure made * shmem_writepage move it from filecache to swapcache beneath us: * an unlikely race, but we do need to retry for page_head->mapping. */ if (!page_head->mapping) { int shmem_swizzled = PageSwapCache(page_head); unlock_page(page_head); put_page(page_head); if (shmem_swizzled) goto again; return -EFAULT; } /* * Private mappings are handled in a simple way. * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to * the object not the particular process. */ if (PageAnon(page_head)) { /* * A RO anonymous page will never change and thus doesn't make * sense for futex operations. */ if (ro) { err = -EFAULT; goto out; } key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; } else { key->both.offset |= FUT_OFF_INODE; /* inode-based key */ key->shared.inode = page_head->mapping->host; key->shared.pgoff = basepage_index(page); } get_futex_key_refs(key); /* implies MB (B) */ out: unlock_page(page_head); put_page(page_head); return err; }
Safe
[ "CWE-264", "CWE-269" ]
linux
e9c243a5a6de0be8e584c604d353412584b592f8
1.2510196307925665e+38
142
futex-prevent-requeue-pi-on-same-futex.patch futex: Forbid uaddr == uaddr2 in futex_requeue(..., requeue_pi=1) If uaddr == uaddr2, then we have broken the rule of only requeueing from a non-pi futex to a pi futex with this call. If we attempt this, then dangling pointers may be left for rt_waiter resulting in an exploitable condition. This change brings futex_requeue() in line with futex_wait_requeue_pi() which performs the same check as per commit 6f7b0a2a5c0f ("futex: Forbid uaddr == uaddr2 in futex_wait_requeue_pi()") [ tglx: Compare the resulting keys as well, as uaddrs might be different depending on the mapping ] Fixes CVE-2014-3153. Reported-by: Pinkie Pie Signed-off-by: Will Drewry <wad@chromium.org> Signed-off-by: Kees Cook <keescook@chromium.org> Cc: stable@vger.kernel.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Darren Hart <dvhart@linux.intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
defragmentTCP(DcmTransportConnection *connection, DUL_BLOCKOPTIONS block, time_t timerStart, int timeout, void *p, unsigned long l, unsigned long *rtnLen) { unsigned char *b; int bytesRead; /* assign buffer to local variable */ b = (unsigned char *) p; /* if reference parameter is a valid pointer, initialize its value */ if (rtnLen != NULL) *rtnLen = 0; /* if there is no network connection, return an error */ if (connection == NULL) return DUL_NULLKEY; int timeToWait = 0; if (block == DUL_NOBLOCK) { /* figure out how long we want to wait: if timerStart equals 0 we want to wait exactly */ /* timeout seconds starting from the call to select(...) within the below called function; */ /* if timerStart does not equal 0 we want to subtract the time which has already passed */ /* after the timer was started from timeout and wait the resulting amount of seconds */ /* starting from the call to select(...) within the below called function. */ if (timerStart == 0) timerStart = time(NULL); } /* start a loop: since we want to receive l bytes of data over the network, */ /* we won't stop waiting for data until we actually did receive l bytes. */ while (l > 0) { /* receive data from the network connection; wait until */ /* we actually did receive data or an error occurred */ do { /* if DUL_NOBLOCK is specified as a blocking option, we only want to wait a certain * time for receiving data over the network. If no data was received during that time, * DUL_READTIMEOUT shall be returned. Note that if DUL_BLOCK is specified the application * will not stop waiting until data is actually received over the network. */ if (block == DUL_NOBLOCK) { /* determine remaining time to wait */ timeToWait = timeout - (int) (time(NULL) - timerStart); /* go ahead and see if within timeout seconds data will be received over the network. */ /* if not, return DUL_READTIMEOUT, if yes, stay in this function. */ if (!connection->networkDataAvailable(timeToWait)) return DUL_READTIMEOUT; } /* data has become available, now call read(). */ bytesRead = connection->read((char*)b, size_t(l)); } while (bytesRead == -1 && OFStandard::getLastNetworkErrorCode().value() == DCMNET_EINTR); /* if we actually received data, move the buffer pointer to its own end, update the variable */ /* that determines the end of the first loop, and update the reference parameter return variable */ if (bytesRead > 0) { b += bytesRead; l -= (unsigned long) bytesRead; if (rtnLen != NULL) *rtnLen += (unsigned long) bytesRead; } else { /* in case we did not receive any data, an error must have occurred; return a corresponding result value */ return DUL_NETWORKCLOSED; } } return EC_Normal; }
Safe
[ "CWE-415", "CWE-703", "CWE-401" ]
dcmtk
a9697dfeb672b0b9412c00c7d36d801e27ec85cb
1.5670206586003499e+38
69
Fixed poss. NULL pointer dereference/double free. Thanks to Jinsheng Ba <bajinsheng@u.nus.edu> for the report and some patches.
0
static void perf_event_task_output(struct perf_event *event, struct perf_task_event *task_event) { struct perf_output_handle handle; struct perf_sample_data sample; struct task_struct *task = task_event->task; int ret, size = task_event->event_id.header.size; perf_event_header__init_id(&task_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, task_event->event_id.header.size); if (ret) goto out; task_event->event_id.pid = perf_event_pid(event, task); task_event->event_id.ppid = perf_event_pid(event, current); task_event->event_id.tid = perf_event_tid(event, task); task_event->event_id.ptid = perf_event_tid(event, current); perf_output_put(&handle, task_event->event_id); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); out: task_event->event_id.header.size = size; }
Safe
[ "CWE-703", "CWE-189" ]
linux
8176cced706b5e5d15887584150764894e94e02f
1.313065900077062e+38
29
perf: Treat attr.config as u64 in perf_swevent_init() Trinity discovered that we fail to check all 64 bits of attr.config passed by user space, resulting to out-of-bounds access of the perf_swevent_enabled array in sw_perf_event_destroy(). Introduced in commit b0a873ebb ("perf: Register PMU implementations"). Signed-off-by: Tommi Rantala <tt.rantala@gmail.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: davej@redhat.com Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Link: http://lkml.kernel.org/r/1365882554-30259-1-git-send-email-tt.rantala@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
0
DEFINE_RUN_ONCE_STATIC(ossl_init_add_all_digests) { /* * OPENSSL_NO_AUTOALGINIT is provided here to prevent at compile time * pulling in all the ciphers during static linking */ #ifndef OPENSSL_NO_AUTOALGINIT # ifdef OPENSSL_INIT_DEBUG fprintf(stderr, "OPENSSL_INIT: ossl_init_add_all_digests: " "openssl_add_all_digests()\n"); # endif openssl_add_all_digests_int(); #endif return 1; }
Safe
[ "CWE-330" ]
openssl
1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be
2.0361323265039055e+38
15
drbg: ensure fork-safety without using a pthread_atfork handler When the new OpenSSL CSPRNG was introduced in version 1.1.1, it was announced in the release notes that it would be fork-safe, which the old CSPRNG hadn't been. The fork-safety was implemented using a fork count, which was incremented by a pthread_atfork handler. Initially, this handler was enabled by default. Unfortunately, the default behaviour had to be changed for other reasons in commit b5319bdbd095, so the new OpenSSL CSPRNG failed to keep its promise. This commit restores the fork-safety using a different approach. It replaces the fork count by a fork id, which coincides with the process id on UNIX-like operating systems and is zero on other operating systems. It is used to detect when an automatic reseed after a fork is necessary. To prevent a future regression, it also adds a test to verify that the child reseeds after fork. CVE-2019-1549 Reviewed-by: Paul Dale <paul.dale@oracle.com> Reviewed-by: Matt Caswell <matt@openssl.org> (Merged from https://github.com/openssl/openssl/pull/9802)
0
imapx_server_info_changed_cb (CamelIMAPXSummary *summary, CamelMessageInfo *info, gpointer user_data) { GHashTable *changed_meanwhile = user_data; g_return_if_fail (info != NULL); g_return_if_fail (changed_meanwhile != NULL); /* The UID can be NULL in case of a newly fetched message, for example when creating the message info in imapx_untagged_fetch() by camel_folder_summary_info_new_from_parser() */ if (camel_message_info_uid (info)) { g_hash_table_insert (changed_meanwhile, (gpointer) camel_pstring_strdup (camel_message_info_uid (info)), GINT_TO_POINTER (1)); } }
Safe
[]
evolution-data-server
f26a6f672096790d0bbd76903db4c9a2e44f116b
5.795985093146874e+37
17
[IMAPx] 'STARTTLS not supported' error ignored When a user has setup the STARTTLS encryption method, but the server doesn't support it, then an error should be shown to the user, instead of using unsecure connection. There had been two bugs in the existing code which prevented this error from being used and the failure properly reported. This had been filled at: https://bugzilla.redhat.com/show_bug.cgi?id=1334842
0
static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) { struct vcpu_svm *svm = to_svm(vcpu); switch (ecx) { case MSR_IA32_TSC: { *data = svm->vmcb->control.tsc_offset + svm_scale_tsc(vcpu, native_read_tsc()); break; } case MSR_STAR: *data = svm->vmcb->save.star; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: *data = svm->vmcb->save.lstar; break; case MSR_CSTAR: *data = svm->vmcb->save.cstar; break; case MSR_KERNEL_GS_BASE: *data = svm->vmcb->save.kernel_gs_base; break; case MSR_SYSCALL_MASK: *data = svm->vmcb->save.sfmask; break; #endif case MSR_IA32_SYSENTER_CS: *data = svm->vmcb->save.sysenter_cs; break; case MSR_IA32_SYSENTER_EIP: *data = svm->sysenter_eip; break; case MSR_IA32_SYSENTER_ESP: *data = svm->sysenter_esp; break; /* * Nobody will change the following 5 values in the VMCB so we can * safely return them on rdmsr. They will always be 0 until LBRV is * implemented. */ case MSR_IA32_DEBUGCTLMSR: *data = svm->vmcb->save.dbgctl; break; case MSR_IA32_LASTBRANCHFROMIP: *data = svm->vmcb->save.br_from; break; case MSR_IA32_LASTBRANCHTOIP: *data = svm->vmcb->save.br_to; break; case MSR_IA32_LASTINTFROMIP: *data = svm->vmcb->save.last_excp_from; break; case MSR_IA32_LASTINTTOIP: *data = svm->vmcb->save.last_excp_to; break; case MSR_VM_HSAVE_PA: *data = svm->nested.hsave_msr; break; case MSR_VM_CR: *data = svm->nested.vm_cr_msr; break; case MSR_IA32_UCODE_REV: *data = 0x01000065; break; default: return kvm_get_msr_common(vcpu, ecx, data); } return 0; }
Safe
[]
kvm
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
1.9054074093971116e+38
71
KVM: x86: Check non-canonical addresses upon WRMSR Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is written to certain MSRs. The behavior is "almost" identical for AMD and Intel (ignoring MSRs that are not implemented in either architecture since they would anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if non-canonical address is written on Intel but not on AMD (which ignores the top 32-bits). Accordingly, this patch injects a #GP on the MSRs which behave identically on Intel and AMD. To eliminate the differences between the architecutres, the value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to canonical value before writing instead of injecting a #GP. Some references from Intel and AMD manuals: According to Intel SDM description of WRMSR instruction #GP is expected on WRMSR "If the source register contains a non-canonical address and ECX specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE, IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP." According to AMD manual instruction manual: LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical form, a general-protection exception (#GP) occurs." IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the base field must be in canonical form or a #GP fault will occur." IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must be in canonical form." This patch fixes CVE-2014-3610. Cc: stable@vger.kernel.org Signed-off-by: Nadav Amit <namit@cs.technion.ac.il> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
static int ZEND_FASTCALL ZEND_IS_SMALLER_OR_EQUAL_SPEC_VAR_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zend_free_op free_op1; zval *result = &EX_T(opline->result.u.var).tmp_var; compare_function(result, _get_zval_ptr_var(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC), &opline->op2.u.constant TSRMLS_CC); ZVAL_BOOL(result, (Z_LVAL_P(result) <= 0)); if (free_op1.var) {zval_ptr_dtor(&free_op1.var);}; ZEND_VM_NEXT_OPCODE(); }
Safe
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
3.005639956767341e+38
14
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
0
poppler_fonts_iter_new (GooList *items) { PopplerFontsIter *iter; iter = g_slice_new (PopplerFontsIter); iter->items = items; iter->index = 0; return iter; }
Safe
[ "CWE-476" ]
poppler
f162ecdea0dda5dbbdb45503c1d55d9afaa41d44
1.4618047472573059e+37
10
Fix crash on missing embedded file Check whether an embedded file is actually present in the PDF and show warning in that case. https://bugs.freedesktop.org/show_bug.cgi?id=106137 https://gitlab.freedesktop.org/poppler/poppler/issues/236
0
pathWithEscapedSlashesAction() const override { return envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: KEEP_UNCHANGED; }
Safe
[ "CWE-22" ]
envoy
5333b928d8bcffa26ab19bf018369a835f697585
1.8072458678403344e+37
4
Implement handling of escaped slash characters in URL path Fixes: CVE-2021-29492 Signed-off-by: Yan Avlasov <yavlasov@google.com>
0
static int do_ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); int val; int len; if (level != SOL_IP) return -EOPNOTSUPP; if (ip_mroute_opt(optname)) return ip_mroute_getsockopt(sk, optname, optval, optlen); if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; lock_sock(sk); switch (optname) { case IP_OPTIONS: { unsigned char optbuf[sizeof(struct ip_options)+40]; struct ip_options *opt = (struct ip_options *)optbuf; struct ip_options_rcu *inet_opt; inet_opt = rcu_dereference_protected(inet->inet_opt, sock_owned_by_user(sk)); opt->optlen = 0; if (inet_opt) memcpy(optbuf, &inet_opt->opt, sizeof(struct ip_options) + inet_opt->opt.optlen); release_sock(sk); if (opt->optlen == 0) return put_user(0, optlen); ip_options_undo(opt); len = min_t(unsigned int, len, opt->optlen); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, opt->__data, len)) return -EFAULT; return 0; } case IP_PKTINFO: val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0; break; case IP_RECVTTL: val = (inet->cmsg_flags & IP_CMSG_TTL) != 0; break; case IP_RECVTOS: val = (inet->cmsg_flags & IP_CMSG_TOS) != 0; break; case IP_RECVOPTS: val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0; break; case IP_RETOPTS: val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0; break; case IP_PASSSEC: val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0; break; case IP_RECVORIGDSTADDR: val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0; break; case IP_TOS: val = inet->tos; break; case IP_TTL: val = (inet->uc_ttl == -1 ? sysctl_ip_default_ttl : inet->uc_ttl); break; case IP_HDRINCL: val = inet->hdrincl; break; case IP_NODEFRAG: val = inet->nodefrag; break; case IP_MTU_DISCOVER: val = inet->pmtudisc; break; case IP_MTU: { struct dst_entry *dst; val = 0; dst = sk_dst_get(sk); if (dst) { val = dst_mtu(dst); dst_release(dst); } if (!val) { release_sock(sk); return -ENOTCONN; } break; } case IP_RECVERR: val = inet->recverr; break; case IP_MULTICAST_TTL: val = inet->mc_ttl; break; case IP_MULTICAST_LOOP: val = inet->mc_loop; break; case IP_UNICAST_IF: val = (__force int)htonl((__u32) inet->uc_index); break; case IP_MULTICAST_IF: { struct in_addr addr; len = min_t(unsigned int, len, sizeof(struct in_addr)); addr.s_addr = inet->mc_addr; release_sock(sk); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &addr, len)) return -EFAULT; return 0; } case IP_MSFILTER: { struct ip_msfilter msf; int err; if (len < IP_MSFILTER_SIZE(0)) { release_sock(sk); return -EINVAL; } if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) { release_sock(sk); return -EFAULT; } err = ip_mc_msfget(sk, &msf, (struct ip_msfilter __user *)optval, optlen); release_sock(sk); return err; } case MCAST_MSFILTER: { struct group_filter gsf; int err; if (len < GROUP_FILTER_SIZE(0)) { release_sock(sk); return -EINVAL; } if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) { release_sock(sk); return -EFAULT; } err = ip_mc_gsfget(sk, &gsf, (struct group_filter __user *)optval, optlen); release_sock(sk); return err; } case IP_MULTICAST_ALL: val = inet->mc_all; break; case IP_PKTOPTIONS: { struct msghdr msg; release_sock(sk); if (sk->sk_type != SOCK_STREAM) return -ENOPROTOOPT; msg.msg_control = optval; msg.msg_controllen = len; msg.msg_flags = flags; if (inet->cmsg_flags & IP_CMSG_PKTINFO) { struct in_pktinfo info; info.ipi_addr.s_addr = inet->inet_rcv_saddr; info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr; info.ipi_ifindex = inet->mc_index; put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); } if (inet->cmsg_flags & IP_CMSG_TTL) { int hlim = inet->mc_ttl; put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim); } if (inet->cmsg_flags & IP_CMSG_TOS) { int tos = inet->rcv_tos; put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos); } len -= msg.msg_controllen; return put_user(len, optlen); } case IP_FREEBIND: val = inet->freebind; break; case IP_TRANSPARENT: val = inet->transparent; break; case IP_MINTTL: val = inet->min_ttl; break; default: release_sock(sk); return -ENOPROTOOPT; } release_sock(sk); if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { unsigned char ucval = (unsigned char)val; len = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &ucval, 1)) return -EFAULT; } else { len = min_t(unsigned int, sizeof(int), len); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; } return 0; }
Safe
[ "CWE-20" ]
net
85fbaa75037d0b6b786ff18658ddf0b4014ce2a4
3.1270925280233286e+38
229
inet: fix addr_len/msg->msg_namelen assignment in recv_error and rxpmtu functions Commit bceaa90240b6019ed73b49965eac7d167610be69 ("inet: prevent leakage of uninitialized memory to user in recv syscalls") conditionally updated addr_len if the msg_name is written to. The recv_error and rxpmtu functions relied on the recvmsg functions to set up addr_len before. As this does not happen any more we have to pass addr_len to those functions as well and set it to the size of the corresponding sockaddr length. This broke traceroute and such. Fixes: bceaa90240b6 ("inet: prevent leakage of uninitialized memory to user in recv syscalls") Reported-by: Brad Spengler <spender@grsecurity.net> Reported-by: Tom Labanowski Cc: mpb <mpb.mail@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ /* RFC 7323 2.3 * The window field (SEG.WND) of every outgoing segment, with the * exception of <SYN> segments, MUST be right-shifted by * Rcv.Wind.Shift bits: */ tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0, 0); }
Safe
[ "CWE-284" ]
linux
ac6e780070e30e4c35bd395acfe9191e6268bdd3
1.9483542859496622e+38
19
tcp: take care of truncations done by sk_filter() With syzkaller help, Marco Grassi found a bug in TCP stack, crashing in tcp_collapse() Root cause is that sk_filter() can truncate the incoming skb, but TCP stack was not really expecting this to happen. It probably was expecting a simple DROP or ACCEPT behavior. We first need to make sure no part of TCP header could be removed. Then we need to adjust TCP_SKB_CB(skb)->end_seq Many thanks to syzkaller team and Marco for giving us a reproducer. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Marco Grassi <marco.gra@gmail.com> Reported-by: Vladis Dronov <vdronov@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
bool bodyUsed_get(JSContext *cx, unsigned argc, Value *vp) { METHOD_HEADER(0) args.rval().setBoolean(RequestOrResponse::body_used(self)); return true; }
Safe
[ "CWE-94" ]
js-compute-runtime
65524ffc962644e9fc39f4b368a326b6253912a9
7.85680370456919e+37
5
use rangom_get instead of arc4random as arc4random does not work correctly with wizer wizer causes the seed in arc4random to be the same between executions which is not random
0
static void dw_reader(struct dw_spi *dws) { u32 max; u16 rxw; spin_lock(&dws->buf_lock); max = rx_max(dws); while (max--) { rxw = dw_read_io_reg(dws, DW_SPI_DR); /* Care rx only if the transfer's original "rx" is not null */ if (dws->rx_end - dws->len) { if (dws->n_bytes == 1) *(u8 *)(dws->rx) = rxw; else *(u16 *)(dws->rx) = rxw; } dws->rx += dws->n_bytes; } spin_unlock(&dws->buf_lock); }
Safe
[ "CWE-662" ]
linux
19b61392c5a852b4e8a0bf35aecb969983c5932d
4.317595192345885e+37
20
spi: spi-dw: Add lock protect dw_spi rx/tx to prevent concurrent calls dw_spi_irq() and dw_spi_transfer_one concurrent calls. I find a panic in dw_writer(): txw = *(u8 *)(dws->tx), when dw->tx==null, dw->len==4, and dw->tx_end==1. When tpm driver's message overtime dw_spi_irq() and dw_spi_transfer_one may concurrent visit dw_spi, so I think dw_spi structure lack of protection. Otherwise dw_spi_transfer_one set dw rx/tx buffer and then open irq, store dw rx/tx instructions and other cores handle irq load dw rx/tx instructions may out of order. [ 1025.321302] Call trace: ... [ 1025.321319] __crash_kexec+0x98/0x148 [ 1025.321323] panic+0x17c/0x314 [ 1025.321329] die+0x29c/0x2e8 [ 1025.321334] die_kernel_fault+0x68/0x78 [ 1025.321337] __do_kernel_fault+0x90/0xb0 [ 1025.321346] do_page_fault+0x88/0x500 [ 1025.321347] do_translation_fault+0xa8/0xb8 [ 1025.321349] do_mem_abort+0x68/0x118 [ 1025.321351] el1_da+0x20/0x8c [ 1025.321362] dw_writer+0xc8/0xd0 [ 1025.321364] interrupt_transfer+0x60/0x110 [ 1025.321365] dw_spi_irq+0x48/0x70 ... Signed-off-by: wuxu.wu <wuxu.wu@huawei.com> Link: https://lore.kernel.org/r/1577849981-31489-1-git-send-email-wuxu.wu@huawei.com Signed-off-by: Mark Brown <broonie@kernel.org>
0
ieee80211_rx_h_action(struct ieee80211_rx_data *rx) { struct ieee80211_local *local = rx->local; struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); int len = rx->skb->len; if (!ieee80211_is_action(mgmt->frame_control)) return RX_CONTINUE; /* drop too small frames */ if (len < IEEE80211_MIN_ACTION_SIZE) return RX_DROP_UNUSABLE; if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) return RX_DROP_UNUSABLE; switch (mgmt->u.action.category) { case WLAN_CATEGORY_HT: /* reject HT action frames from stations not supporting HT */ if (!rx->sta->sta.ht_cap.ht_supported) goto invalid; if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC) break; /* verify action & smps_control/chanwidth are present */ if (len < IEEE80211_MIN_ACTION_SIZE + 2) goto invalid; switch (mgmt->u.action.u.ht_smps.action) { case WLAN_HT_ACTION_SMPS: { struct ieee80211_supported_band *sband; enum ieee80211_smps_mode smps_mode; struct sta_opmode_info sta_opmode = {}; /* convert to HT capability */ switch (mgmt->u.action.u.ht_smps.smps_control) { case WLAN_HT_SMPS_CONTROL_DISABLED: smps_mode = IEEE80211_SMPS_OFF; break; case WLAN_HT_SMPS_CONTROL_STATIC: smps_mode = IEEE80211_SMPS_STATIC; break; case WLAN_HT_SMPS_CONTROL_DYNAMIC: smps_mode = IEEE80211_SMPS_DYNAMIC; break; default: goto invalid; } /* if no change do nothing */ if (rx->sta->sta.smps_mode == smps_mode) goto handled; rx->sta->sta.smps_mode = smps_mode; sta_opmode.smps_mode = ieee80211_smps_mode_to_smps_mode(smps_mode); sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; sband = rx->local->hw.wiphy->bands[status->band]; rate_control_rate_update(local, sband, rx->sta, IEEE80211_RC_SMPS_CHANGED); cfg80211_sta_opmode_change_notify(sdata->dev, rx->sta->addr, &sta_opmode, GFP_ATOMIC); goto handled; } case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { struct ieee80211_supported_band *sband; u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; enum ieee80211_sta_rx_bandwidth max_bw, new_bw; struct sta_opmode_info sta_opmode = {}; /* If it doesn't support 40 MHz it can't change ... */ if (!(rx->sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) goto handled; if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) max_bw = IEEE80211_STA_RX_BW_20; else max_bw = ieee80211_sta_cap_rx_bw(rx->sta); /* set cur_max_bandwidth and recalc sta bw */ rx->sta->cur_max_bandwidth = max_bw; new_bw = ieee80211_sta_cur_vht_bw(rx->sta); if (rx->sta->sta.bandwidth == new_bw) goto handled; rx->sta->sta.bandwidth = new_bw; sband = rx->local->hw.wiphy->bands[status->band]; sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(rx->sta); sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED; rate_control_rate_update(local, sband, rx->sta, IEEE80211_RC_BW_CHANGED); cfg80211_sta_opmode_change_notify(sdata->dev, rx->sta->addr, &sta_opmode, GFP_ATOMIC); goto handled; } default: goto invalid; } break; case WLAN_CATEGORY_PUBLIC: if (len < IEEE80211_MIN_ACTION_SIZE + 1) goto invalid; if (sdata->vif.type != NL80211_IFTYPE_STATION) break; if (!rx->sta) break; if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) break; if (mgmt->u.action.u.ext_chan_switch.action_code != WLAN_PUB_ACTION_EXT_CHANSW_ANN) break; if (len < offsetof(struct ieee80211_mgmt, u.action.u.ext_chan_switch.variable)) goto invalid; goto queue; case WLAN_CATEGORY_VHT: if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC) break; /* verify action code is present */ if (len < IEEE80211_MIN_ACTION_SIZE + 1) goto invalid; switch (mgmt->u.action.u.vht_opmode_notif.action_code) { case WLAN_VHT_ACTION_OPMODE_NOTIF: { /* verify opmode is present */ if (len < IEEE80211_MIN_ACTION_SIZE + 2) goto invalid; goto queue; } case WLAN_VHT_ACTION_GROUPID_MGMT: { if (len < IEEE80211_MIN_ACTION_SIZE + 25) goto invalid; goto queue; } default: break; } break; case WLAN_CATEGORY_BACK: if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC) break; /* verify action_code is present */ if (len < IEEE80211_MIN_ACTION_SIZE + 1) break; switch (mgmt->u.action.u.addba_req.action_code) { case WLAN_ACTION_ADDBA_REQ: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.addba_req))) goto invalid; break; case WLAN_ACTION_ADDBA_RESP: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.addba_resp))) goto invalid; break; case WLAN_ACTION_DELBA: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.delba))) goto invalid; break; default: goto invalid; } goto queue; case WLAN_CATEGORY_SPECTRUM_MGMT: /* verify action_code is present */ if (len < IEEE80211_MIN_ACTION_SIZE + 1) break; switch (mgmt->u.action.u.measurement.action_code) { case WLAN_ACTION_SPCT_MSR_REQ: if (status->band != NL80211_BAND_5GHZ) break; if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.measurement))) break; if (sdata->vif.type != NL80211_IFTYPE_STATION) break; ieee80211_process_measurement_req(sdata, mgmt, len); goto handled; case WLAN_ACTION_SPCT_CHL_SWITCH: { u8 *bssid; if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.chan_switch))) break; if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_MESH_POINT) break; if (sdata->vif.type == NL80211_IFTYPE_STATION) bssid = sdata->u.mgd.bssid; else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) bssid = sdata->u.ibss.bssid; else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) bssid = mgmt->sa; else break; if (!ether_addr_equal(mgmt->bssid, bssid)) break; goto queue; } } break; case WLAN_CATEGORY_SA_QUERY: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.sa_query))) break; switch (mgmt->u.action.u.sa_query.action) { case WLAN_ACTION_SA_QUERY_REQUEST: if (sdata->vif.type != NL80211_IFTYPE_STATION) break; ieee80211_process_sa_query_req(sdata, mgmt, len); goto handled; } break; case WLAN_CATEGORY_SELF_PROTECTED: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.self_prot.action_code))) break; switch (mgmt->u.action.u.self_prot.action_code) { case WLAN_SP_MESH_PEERING_OPEN: case WLAN_SP_MESH_PEERING_CLOSE: case WLAN_SP_MESH_PEERING_CONFIRM: if (!ieee80211_vif_is_mesh(&sdata->vif)) goto invalid; if (sdata->u.mesh.user_mpm) /* userspace handles this frame */ break; goto queue; case WLAN_SP_MGK_INFORM: case WLAN_SP_MGK_ACK: if (!ieee80211_vif_is_mesh(&sdata->vif)) goto invalid; break; } break; case WLAN_CATEGORY_MESH_ACTION: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.mesh_action.action_code))) break; if (!ieee80211_vif_is_mesh(&sdata->vif)) break; if (mesh_action_is_path_sel(mgmt) && !mesh_path_sel_is_hwmp(sdata)) break; goto queue; } return RX_CONTINUE; invalid: status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; /* will return in the next handlers */ return RX_CONTINUE; handled: if (rx->sta) rx->sta->rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; queue: skb_queue_tail(&sdata->skb_queue, rx->skb); ieee80211_queue_work(&local->hw, &sdata->work); if (rx->sta) rx->sta->rx_stats.packets++; return RX_QUEUED; }
Safe
[]
linux
588f7d39b3592a36fb7702ae3b8bdd9be4621e2f
8.64105483668498e+37
309
mac80211: drop robust management frames from unknown TA When receiving a robust management frame, drop it if we don't have rx->sta since then we don't have a security association and thus couldn't possibly validate the frame. Cc: stable@vger.kernel.org Signed-off-by: Johannes Berg <johannes.berg@intel.com>
0
HeaderMapImpl::HeaderEntryImpl* HeaderMapImpl::getExistingInline(absl::string_view key) { EntryCb cb = ConstSingleton<StaticLookupTable>::get().find(key); if (cb) { StaticLookupResponse ref_lookup_response = cb(*this); return *ref_lookup_response.entry_; } return nullptr; }
Safe
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
3.2122783351394264e+36
8
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <asraa@google.com>
0
PJ_DEF(const char*) pjsip_transport_get_type_desc(pjsip_transport_type_e type) { /* Return the description. */ return get_tpname(type)->description; }
Safe
[ "CWE-297", "CWE-295" ]
pjproject
67e46c1ac45ad784db5b9080f5ed8b133c122872
9.962259844695754e+37
5
Merge pull request from GHSA-8hcp-hm38-mfph * Check hostname during TLS transport selection * revision based on feedback * remove the code in create_request that has been moved
0
static BOOL rfx_process_message_region(RFX_CONTEXT* context, RFX_MESSAGE* message, wStream* s, UINT16* pExpectedBlockType) { int i; UINT16 regionType; UINT16 numTileSets; RFX_RECT* tmpRects; if (*pExpectedBlockType != WBT_REGION) { WLog_ERR(TAG, "%s: message unexpected wants WBT_REGION", __FUNCTION__); return FALSE; } *pExpectedBlockType = WBT_EXTENSION; if (Stream_GetRemainingLength(s) < 3) { WLog_ERR(TAG, "%s: packet too small (regionFlags/numRects)", __FUNCTION__); return FALSE; } Stream_Seek_UINT8(s); /* regionFlags (1 byte) */ Stream_Read_UINT16(s, message->numRects); /* numRects (2 bytes) */ if (message->numRects < 1) { /* If numRects is zero the decoder must generate a rectangle with coordinates (0, 0, width, height). See [MS-RDPRFX] (revision >= 17.0) 2.2.2.3.3 TS_RFX_REGION https://msdn.microsoft.com/en-us/library/ff635233.aspx */ tmpRects = realloc(message->rects, sizeof(RFX_RECT)); if (!tmpRects) return FALSE; message->numRects = 1; message->rects = tmpRects; message->rects->x = 0; message->rects->y = 0; message->rects->width = context->width; message->rects->height = context->height; return TRUE; } if (Stream_GetRemainingLength(s) < (size_t)(8 * message->numRects)) { WLog_ERR(TAG, "%s: packet too small for num_rects=%" PRIu16 "", __FUNCTION__, message->numRects); return FALSE; } tmpRects = realloc(message->rects, message->numRects * sizeof(RFX_RECT)); if (!tmpRects) return FALSE; message->rects = tmpRects; /* rects */ for (i = 0; i < message->numRects; i++) { RFX_RECT* rect = rfx_message_get_rect(message, i); /* RFX_RECT */ Stream_Read_UINT16(s, rect->x); /* x (2 bytes) */ Stream_Read_UINT16(s, rect->y); /* y (2 bytes) */ Stream_Read_UINT16(s, rect->width); /* width (2 bytes) */ Stream_Read_UINT16(s, rect->height); /* height (2 bytes) */ WLog_Print(context->priv->log, WLOG_DEBUG, "rect %d (x,y=%" PRIu16 ",%" PRIu16 " w,h=%" PRIu16 " %" PRIu16 ").", i, rect->x, rect->y, rect->width, rect->height); } if (Stream_GetRemainingLength(s) < 4) { WLog_ERR(TAG, "%s: packet too small (regionType/numTileSets)", __FUNCTION__); return FALSE; } Stream_Read_UINT16(s, regionType); /*regionType (2 bytes): MUST be set to CBT_REGION (0xCAC1)*/ Stream_Read_UINT16(s, numTileSets); /*numTilesets (2 bytes): MUST be set to 0x0001.*/ if (regionType != CBT_REGION) { WLog_ERR(TAG, "%s: invalid region type 0x%04" PRIX16 "", __FUNCTION__, regionType); return TRUE; } if (numTileSets != 0x0001) { WLog_ERR(TAG, "%s: invalid number of tilesets (%" PRIu16 ")", __FUNCTION__, numTileSets); return FALSE; } return TRUE; }
Safe
[ "CWE-125" ]
FreeRDP
3a06ce058f690b7fc1edad2f352c453376c2ebfe
1.6385640746613526e+38
95
Fixed oob read in rfx_process_message_tileset Check input data length Thanks to hac425 CVE-2020-11043
0
static uint16_t get_version(struct avdtp *session) { const sdp_record_t *rec; sdp_list_t *protos; sdp_data_t *proto_desc; uint16_t ver = 0x0000; rec = btd_device_get_record(session->device, A2DP_SINK_UUID); if (!rec) rec = btd_device_get_record(session->device, A2DP_SOURCE_UUID); if (!rec) return ver; if (sdp_get_access_protos(rec, &protos) < 0) return ver; proto_desc = sdp_get_proto_desc(protos, AVDTP_UUID); if (proto_desc && proto_desc->dtd == SDP_UINT16) ver = proto_desc->val.uint16; sdp_list_foreach(protos, (sdp_list_func_t) sdp_list_free, NULL); sdp_list_free(protos, NULL); return ver; }
Safe
[ "CWE-703" ]
bluez
7a80d2096f1b7125085e21448112aa02f49f5e9a
2.610513526558433e+38
26
avdtp: Fix accepting invalid/malformed capabilities Check if capabilities are valid before attempting to copy them.
0
static void calculate_gunzip_crc(STATE_PARAM_ONLY) { gunzip_crc = crc32_block_endian0(gunzip_crc, gunzip_window, gunzip_outbuf_count, gunzip_crc_table); gunzip_bytes_out += gunzip_outbuf_count; }
Safe
[ "CWE-476" ]
busybox
1de25a6e87e0e627aa34298105a3d17c60a1f44e
4.877597719161641e+36
5
unzip: test for bad archive SEGVing function old new delta huft_build 1296 1300 +4 Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
0
_gdata_service_get_log_level (void) { static int level = -1; if (level < 0) { const gchar *envvar = g_getenv ("LIBGDATA_DEBUG"); if (envvar != NULL) level = atoi (envvar); level = MIN (MAX (level, 0), GDATA_LOG_FULL_UNREDACTED); } return level; }
Safe
[ "CWE-20" ]
libgdata
6799f2c525a584dc998821a6ce897e463dad7840
1.8470716545185974e+37
13
core: Validate SSL certificates for all connections This prevents MitM attacks which use spoofed SSL certificates. Note that this bumps our libsoup requirement to 2.37.91. Closes: https://bugzilla.gnome.org/show_bug.cgi?id=671535
0
**/ CImg<T>& min(const T& val) { if (is_empty()) return *this; cimg_pragma_openmp(parallel for cimg_openmp_if(size()>=65536)) cimg_rof(*this,ptrd,T) *ptrd = std::min(*ptrd,val); return *this;
Safe
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
7.88411291296251e+36
6
Fix other issues in 'CImg<T>::load_bmp()'.
0
int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, int type, void *kvaddr, struct page *page, unsigned long offset, u16 len) { int pad_len, rval; dma_addr_t addr; rval = _extend_sdma_tx_descs(dd, tx); if (rval) { __sdma_txclean(dd, tx); return rval; } /* If coalesce buffer is allocated, copy data into it */ if (tx->coalesce_buf) { if (type == SDMA_MAP_NONE) { __sdma_txclean(dd, tx); return -EINVAL; } if (type == SDMA_MAP_PAGE) { kvaddr = kmap(page); kvaddr += offset; } else if (WARN_ON(!kvaddr)) { __sdma_txclean(dd, tx); return -EINVAL; } memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len); tx->coalesce_idx += len; if (type == SDMA_MAP_PAGE) kunmap(page); /* If there is more data, return */ if (tx->tlen - tx->coalesce_idx) return 0; /* Whole packet is received; add any padding */ pad_len = tx->packet_len & (sizeof(u32) - 1); if (pad_len) { pad_len = sizeof(u32) - pad_len; memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len); /* padding is taken care of for coalescing case */ tx->packet_len += pad_len; tx->tlen += pad_len; } /* dma map the coalesce buffer */ addr = dma_map_single(&dd->pcidev->dev, tx->coalesce_buf, tx->tlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { __sdma_txclean(dd, tx); return -ENOSPC; } /* Add descriptor for coalesce buffer */ tx->desc_limit = MAX_DESC; return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, addr, tx->tlen); } return 1; }
Safe
[ "CWE-400", "CWE-401" ]
linux
34b3be18a04ecdc610aae4c48e5d1b799d8689f6
1.8731308822934707e+38
66
RDMA/hfi1: Prevent memory leak in sdma_init In sdma_init if rhashtable_init fails the allocated memory for tmp_sdma_rht should be released. Fixes: 5a52a7acf7e2 ("IB/hfi1: NULL pointer dereference when freeing rhashtable") Link: https://lore.kernel.org/r/20190925144543.10141-1-navid.emamdoost@gmail.com Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com> Acked-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
0
static int eb_create(struct i915_execbuffer *eb) { if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { unsigned int size = 1 + ilog2(eb->buffer_count); /* * Without a 1:1 association between relocation handles and * the execobject[] index, we instead create a hashtable. * We size it dynamically based on available memory, starting * first with 1:1 assocative hash and scaling back until * the allocation succeeds. * * Later on we use a positive lut_size to indicate we are * using this hashtable, and a negative value to indicate a * direct lookup. */ do { gfp_t flags; /* While we can still reduce the allocation size, don't * raise a warning and allow the allocation to fail. * On the last pass though, we want to try as hard * as possible to perform the allocation and warn * if it fails. */ flags = GFP_KERNEL; if (size > 1) flags |= __GFP_NORETRY | __GFP_NOWARN; eb->buckets = kzalloc(sizeof(struct hlist_head) << size, flags); if (eb->buckets) break; } while (--size); if (unlikely(!size)) return -ENOMEM; eb->lut_size = size; } else { eb->lut_size = -eb->buffer_count; } return 0; }
Safe
[ "CWE-20" ]
linux
594cc251fdd0d231d342d88b2fdff4bc42fb0690
1.3144552025207885e+38
45
make 'user_access_begin()' do 'access_ok()' Originally, the rule used to be that you'd have to do access_ok() separately, and then user_access_begin() before actually doing the direct (optimized) user access. But experience has shown that people then decide not to do access_ok() at all, and instead rely on it being implied by other operations or similar. Which makes it very hard to verify that the access has actually been range-checked. If you use the unsafe direct user accesses, hardware features (either SMAP - Supervisor Mode Access Protection - on x86, or PAN - Privileged Access Never - on ARM) do force you to use user_access_begin(). But nothing really forces the range check. By putting the range check into user_access_begin(), we actually force people to do the right thing (tm), and the range check vill be visible near the actual accesses. We have way too long a history of people trying to avoid them. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
static inline Quantum ScaleMapToQuantum(const MagickRealType value) { if (value <= 0.0) return((Quantum) 0); if (value >= MaxMap) return(QuantumRange); return((Quantum) (281479271743489.0*value)); }
Safe
[ "CWE-190" ]
ImageMagick
f60d59cc3a7e3402d403361e0985ffa56f746a82
2.1850119995886405e+38
8
https://github.com/ImageMagick/ImageMagick/issues/1727
0
static int sched_read_attr(struct sched_attr __user *uattr, struct sched_attr *attr, unsigned int usize) { int ret; if (!access_ok(VERIFY_WRITE, uattr, usize)) return -EFAULT; /* * If we're handed a smaller struct than we know of, * ensure all the unknown bits are 0 - i.e. old * user-space does not get uncomplete information. */ if (usize < sizeof(*attr)) { unsigned char *addr; unsigned char *end; addr = (void *)attr + usize; end = (void *)attr + sizeof(*attr); for (; addr < end; addr++) { if (*addr) goto err_size; } attr->size = usize; } ret = copy_to_user(uattr, attr, attr->size); if (ret) return -EFAULT; out: return ret; err_size: ret = -E2BIG; goto out; }
Safe
[ "CWE-200" ]
linux
4efbc454ba68def5ef285b26ebfcfdb605b52755
1.6292402442097871e+38
40
sched: Fix information leak in sys_sched_getattr() We're copying the on-stack structure to userspace, but forgot to give the right number of bytes to copy. This allows the calling process to obtain up to PAGE_SIZE bytes from the stack (and possibly adjacent kernel memory). This fix copies only as much as we actually have on the stack (attr->size defaults to the size of the struct) and leaves the rest of the userspace-provided buffer untouched. Found using kmemcheck + trinity. Fixes: d50dde5a10f30 ("sched: Add new scheduler syscalls to support an extended scheduling parameters ABI") Cc: Dario Faggioli <raistlin@linux.it> Cc: Juri Lelli <juri.lelli@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1392585857-10725-1-git-send-email-vegard.nossum@oracle.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
0
static void build_dirs(char *src, char *dst, size_t src_prefix_len, size_t dst_prefix_len) { char *p = src + src_prefix_len + 1; char *q = dst + dst_prefix_len + 1; char *r = dst + dst_prefix_len; struct stat s; bool last = false; *r = '\0'; for (; !last; p++, q++) { if (*p == '\0') { last = true; } if (*p == '\0' || (*p == '/' && *(p - 1) != '/')) { // We found a new component of our src path. // Null-terminate it temporarily here so that we can work // with it. *p = '\0'; if (stat(src, &s) == 0 && S_ISDIR(s.st_mode)) { // Null-terminate the dst path and undo its previous // termination. *q = '\0'; *r = '/'; r = q; create_empty_dir_as_root(dst, s.st_mode); } if (!last) { // If we're not at the final terminating null, restore // the slash so that we can continue our traversal. *p = '/'; } } } }
Vulnerable
[ "CWE-269", "CWE-94" ]
firejail
dab835e7a0eb287822016f5ae4e87f46e1d363e7
7.87786192757538e+37
32
CVE-2022-31214: fixing the fix
1
perf_install_in_context(struct perf_event_context *ctx, struct perf_event *event, int cpu) { struct task_struct *task = ctx->task; lockdep_assert_held(&ctx->mutex); event->ctx = ctx; if (event->cpu != -1) event->cpu = cpu; if (!task) { /* * Per cpu events are installed via an smp call and * the install is always successful. */ cpu_function_call(cpu, __perf_install_in_context, event); return; } retry: if (!task_function_call(task, __perf_install_in_context, event)) return; raw_spin_lock_irq(&ctx->lock); /* * If we failed to find a running task, but find the context active now * that we've acquired the ctx->lock, retry. */ if (ctx->is_active) { raw_spin_unlock_irq(&ctx->lock); /* * Reload the task pointer, it might have been changed by * a concurrent perf_event_context_sched_out(). */ task = ctx->task; goto retry; } /* * Since the task isn't running, its safe to add the event, us holding * the ctx->lock ensures the task won't get scheduled in. */ add_event_to_ctx(event, ctx); raw_spin_unlock_irq(&ctx->lock); }
Safe
[ "CWE-284", "CWE-264" ]
linux
f63a8daa5812afef4f06c962351687e1ff9ccb2b
3.0444878025711288e+38
47
perf: Fix event->ctx locking There have been a few reported issues wrt. the lack of locking around changing event->ctx. This patch tries to address those. It avoids the whole rwsem thing; and while it appears to work, please give it some thought in review. What I did fail at is sensible runtime checks on the use of event->ctx, the RCU use makes it very hard. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20150123125834.209535886@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
0
clutter_device_manager_xi2_class_init (ClutterDeviceManagerXI2Class *klass) { ClutterDeviceManagerClass *manager_class; GObjectClass *gobject_class; obj_props[PROP_OPCODE] = g_param_spec_int ("opcode", "Opcode", "The XI2 opcode", -1, G_MAXINT, -1, CLUTTER_PARAM_WRITABLE | G_PARAM_CONSTRUCT_ONLY); gobject_class = G_OBJECT_CLASS (klass); gobject_class->constructed = clutter_device_manager_xi2_constructed; gobject_class->set_property = clutter_device_manager_xi2_set_property; g_object_class_install_properties (gobject_class, PROP_LAST, obj_props); manager_class = CLUTTER_DEVICE_MANAGER_CLASS (klass); manager_class->add_device = clutter_device_manager_xi2_add_device; manager_class->remove_device = clutter_device_manager_xi2_remove_device; manager_class->get_devices = clutter_device_manager_xi2_get_devices; manager_class->get_core_device = clutter_device_manager_xi2_get_core_device; manager_class->get_device = clutter_device_manager_xi2_get_device; }
Safe
[ "CWE-264" ]
clutter
e310c68d7b38d521e341f4e8a36f54303079d74e
3.182595301853271e+38
26
x11: trap errors when calling XIQueryDevice Devices can disappear at any time, causing XIQueryDevice to throw an error. At the same time, plug a memory leak. https://bugzilla.gnome.org/show_bug.cgi?id=701974
0
void Compute(OpKernelContext *ctx) override { const Tensor *indices_t, *values_t, *shape_t, *reduction_axes_t; OP_REQUIRES_OK(ctx, ctx->input("input_indices", &indices_t)); OP_REQUIRES_OK(ctx, ctx->input("input_values", &values_t)); OP_REQUIRES_OK(ctx, ctx->input("input_shape", &shape_t)); OP_REQUIRES_OK(ctx, ctx->input("reduction_axes", &reduction_axes_t)); OP_REQUIRES_OK(ctx, ValidateInputs(shape_t, reduction_axes_t)); SparseTensor sp; OP_REQUIRES_OK(ctx, SparseTensor::Create(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t), TensorShape(shape_t->vec<int64>()), &sp)); ReduceDetails reduction = SparseTensorReduceHelper( sp, reduction_axes_t->flat<int32>(), keep_dims_); sp.Reorder<T>(reduction.reorder_dims); // Count nnzs in the output SparseTensor. int64_t nnz = 0; auto iter = sp.group(reduction.group_by_dims); for (auto it = iter.begin(); it != iter.end(); ++it) { nnz++; } Tensor *out_indices_t; OP_REQUIRES_OK(ctx, ctx->allocate_output( 0, TensorShape({nnz, reduction.reduced_shape.dims()}), &out_indices_t)); typename TTypes<int64>::Matrix out_indices_mat = out_indices_t->matrix<int64>(); // For keep_dims. We don't explicitly set dim fields for reduced dims below. out_indices_mat.setZero(); Tensor *out_values_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({nnz}), &out_values_t)); auto out_flat = out_values_t->flat<T>(); Tensor tmp_reduced_val; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({}), &tmp_reduced_val)); auto reduced_val = tmp_reduced_val.scalar<T>(); int64_t i = 0; for (const auto &g : sp.group(reduction.group_by_dims)) { Op::template Run<T>(ctx, reduced_val, g.template values<T>()); std::vector<int64> group = g.group(); for (int64_t j = 0; j < group.size(); j++) { if (keep_dims_) { out_indices_mat(i, reduction.group_by_dims[j]) = group[j]; } else { out_indices_mat(i, j) = group[j]; } } out_flat(i) = reduced_val(); i++; VLOG(2) << "coords: " << absl::StrJoin(g.group(), ",") << "; group " << Op::Name() << ": " << reduced_val(); } Tensor *out_shape_t; OP_REQUIRES_OK(ctx, ctx->allocate_output( 2, TensorShape({reduction.reduced_shape.dims()}), &out_shape_t)); auto out_shape_flat = out_shape_t->flat<int64>(); auto out_dim_sizes = reduction.reduced_shape.dim_sizes(); if (!out_dim_sizes.empty()) { std::copy(out_dim_sizes.begin(), out_dim_sizes.end(), &out_shape_flat(0)); } }
Safe
[ "CWE-125" ]
tensorflow
87158f43f05f2720a374f3e6d22a7aaa3a33f750
2.8199544499934314e+38
71
Prevent heap OOB in sparse reduction ops. PiperOrigin-RevId: 387934524 Change-Id: I894aa30f1e454f09b471d565b4a325da49322c1a
0
static int _expand_arg(pam_handle_t *pamh, char **value) { const char *orig=*value, *tmpptr=NULL; char *ptr; /* * Sure would be nice to use tmpptr but it needs to be * a constant so that the compiler will shut up when I * call pam_getenv and _pam_get_item_byname -- sigh */ /* No unexpanded variable can be bigger than BUF_SIZE */ char type, tmpval[BUF_SIZE]; /* I know this shouldn't be hard-coded but it's so much easier this way */ char tmp[MAX_ENV]; D(("Remember to initialize tmp!")); memset(tmp, 0, MAX_ENV); /* * (possibly non-existent) environment variables can be used as values * by prepending a "$" and wrapping in {} (ie: ${HOST}), can escape with "\" * (possibly non-existent) PAM items can be used as values * by prepending a "@" and wrapping in {} (ie: @{PAM_RHOST}, can escape * */ D(("Expanding <%s>",orig)); while (*orig) { /* while there is some input to deal with */ if ('\\' == *orig) { ++orig; if ('$' != *orig && '@' != *orig) { D(("Unrecognized escaped character: <%c> - ignoring", *orig)); pam_syslog(pamh, LOG_ERR, "Unrecognized escaped character: <%c> - ignoring", *orig); } else if ((strlen(tmp) + 1) < MAX_ENV) { tmp[strlen(tmp)] = *orig++; /* Note the increment */ } else { /* is it really a good idea to try to log this? */ D(("Variable buffer overflow: <%s> + <%s>", tmp, tmpptr)); pam_syslog (pamh, LOG_ERR, "Variable buffer overflow: <%s> + <%s>", tmp, tmpptr); } continue; } if ('$' == *orig || '@' == *orig) { if ('{' != *(orig+1)) { D(("Expandable variables must be wrapped in {}" " <%s> - ignoring", orig)); pam_syslog(pamh, LOG_ERR, "Expandable variables must be wrapped in {}" " <%s> - ignoring", orig); if ((strlen(tmp) + 1) < MAX_ENV) { tmp[strlen(tmp)] = *orig++; /* Note the increment */ } continue; } else { D(("Expandable argument: <%s>", orig)); type = *orig; orig+=2; /* skip the ${ or @{ characters */ ptr = strchr(orig, '}'); if (ptr) { *ptr++ = '\0'; } else { D(("Unterminated expandable variable: <%s>", orig-2)); pam_syslog(pamh, LOG_ERR, "Unterminated expandable variable: <%s>", orig-2); return PAM_ABORT; } strncpy(tmpval, orig, sizeof(tmpval)); tmpval[sizeof(tmpval)-1] = '\0'; orig=ptr; /* * so, we know we need to expand tmpval, it is either * an environment variable or a PAM_ITEM. type will tell us which */ switch (type) { case '$': D(("Expanding env var: <%s>",tmpval)); tmpptr = pam_getenv(pamh, tmpval); D(("Expanded to <%s>", tmpptr)); break; case '@': D(("Expanding pam item: <%s>",tmpval)); tmpptr = _pam_get_item_byname(pamh, tmpval); D(("Expanded to <%s>", tmpptr)); break; default: D(("Impossible error, type == <%c>", type)); pam_syslog(pamh, LOG_CRIT, "Impossible error, type == <%c>", type); return PAM_ABORT; } /* switch */ if (tmpptr) { if ((strlen(tmp) + strlen(tmpptr)) < MAX_ENV) { strcat(tmp, tmpptr); } else { /* is it really a good idea to try to log this? */ D(("Variable buffer overflow: <%s> + <%s>", tmp, tmpptr)); pam_syslog (pamh, LOG_ERR, "Variable buffer overflow: <%s> + <%s>", tmp, tmpptr); } } } /* if ('{' != *orig++) */ } else { /* if ( '$' == *orig || '@' == *orig) */ if ((strlen(tmp) + 1) < MAX_ENV) { tmp[strlen(tmp)] = *orig++; /* Note the increment */ } else { /* is it really a good idea to try to log this? */ D(("Variable buffer overflow: <%s> + <%s>", tmp, tmpptr)); pam_syslog(pamh, LOG_ERR, "Variable buffer overflow: <%s> + <%s>", tmp, tmpptr); } } } /* for (;*orig;) */ if (strlen(tmp) > strlen(*value)) { free(*value); if ((*value = malloc(strlen(tmp) +1)) == NULL) { D(("Couldn't malloc %d bytes for expanded var", strlen(tmp)+1)); pam_syslog (pamh, LOG_ERR, "Couldn't malloc %lu bytes for expanded var", (unsigned long)strlen(tmp)+1); return PAM_BUF_ERR; } } strcpy(*value, tmp); memset(tmp,'\0',sizeof(tmp)); D(("Exit.")); return PAM_SUCCESS; }
Vulnerable
[ "CWE-119" ]
linux-pam
109823cb621c900c07c4b6cdc99070d354d19444
2.417333132458978e+37
132
pam_env: abort when encountering an overflowed environment variable expansion * modules/pam_env/pam_env.c (_expand_arg): Abort when encountering an overflowed environment variable expansion. Fixes CVE-2011-3149. Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/pam/+bug/874565
1
GF_Err cprt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_CopyrightBox *ptr = (GF_CopyrightBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 1); if (ptr->packedLanguageCode[0]) { gf_bs_write_int(bs, ptr->packedLanguageCode[0] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguageCode[1] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguageCode[2] - 0x60, 5); } else { gf_bs_write_int(bs, 0, 15); } if (ptr->notice) { gf_bs_write_data(bs, ptr->notice, (u32) (strlen(ptr->notice) + 1) ); } return GF_OK; }
Safe
[ "CWE-787" ]
gpac
388ecce75d05e11fc8496aa4857b91245007d26e
2.417166718913583e+38
20
fixed #1587
0
int llhttp__internal__c_test_flags_1( llhttp__internal_t* state, const unsigned char* p, const unsigned char* endp) { return (state->flags & 544) == 544; }
Safe
[ "CWE-444" ]
node
641f786bb1a1f6eb1ff8750782ed939780f2b31a
1.4007729907548799e+38
6
http: unset `F_CHUNKED` on new `Transfer-Encoding` Duplicate `Transfer-Encoding` header should be a treated as a single, but with original header values concatenated with a comma separator. In the light of this, even if the past `Transfer-Encoding` ended with `chunked`, we should be not let the `F_CHUNKED` to leak into the next header, because mere presence of another header indicates that `chunked` is not the last transfer-encoding token. CVE-ID: CVE-2020-8287 Refs: https://github.com/nodejs-private/llhttp-private/pull/3 Refs: https://hackerone.com/bugs?report_id=1002188&subject=nodejs PR-URL: https://github.com/nodejs-private/node-private/pull/228 Reviewed-By: Fedor Indutny <fedor.indutny@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com>
0
int RGWConfigBucketMetaSearch_ObjStore_S3::get_params() { auto iter = s->info.x_meta_map.find("x-amz-meta-search"); if (iter == s->info.x_meta_map.end()) { s->err.message = "X-Rgw-Meta-Search header not provided"; ldout(s->cct, 5) << s->err.message << dendl; return -EINVAL; } list<string> expressions; get_str_list(iter->second, ",", expressions); for (auto& expression : expressions) { vector<string> args; get_str_vec(expression, ";", args); if (args.empty()) { s->err.message = "invalid empty expression"; ldout(s->cct, 5) << s->err.message << dendl; return -EINVAL; } if (args.size() > 2) { s->err.message = string("invalid expression: ") + expression; ldout(s->cct, 5) << s->err.message << dendl; return -EINVAL; } string key = boost::algorithm::to_lower_copy(rgw_trim_whitespace(args[0])); string val; if (args.size() > 1) { val = boost::algorithm::to_lower_copy(rgw_trim_whitespace(args[1])); } if (!boost::algorithm::starts_with(key, RGW_AMZ_META_PREFIX)) { s->err.message = string("invalid expression, key must start with '" RGW_AMZ_META_PREFIX "' : ") + expression; ldout(s->cct, 5) << s->err.message << dendl; return -EINVAL; } key = key.substr(sizeof(RGW_AMZ_META_PREFIX) - 1); ESEntityTypeMap::EntityType entity_type; if (val.empty() || val == "str" || val == "string") { entity_type = ESEntityTypeMap::ES_ENTITY_STR; } else if (val == "int" || val == "integer") { entity_type = ESEntityTypeMap::ES_ENTITY_INT; } else if (val == "date" || val == "datetime") { entity_type = ESEntityTypeMap::ES_ENTITY_DATE; } else { s->err.message = string("invalid entity type: ") + val; ldout(s->cct, 5) << s->err.message << dendl; return -EINVAL; } mdsearch_config[key] = entity_type; } return 0; }
Safe
[ "CWE-79" ]
ceph
fce0b267446d6f3f631bb4680ebc3527bbbea002
2.4507248796488275e+38
60
rgw: reject unauthenticated response-header actions Signed-off-by: Matt Benjamin <mbenjamin@redhat.com> Reviewed-by: Casey Bodley <cbodley@redhat.com> (cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
0
const char *mg_unpack(const char *path, size_t *size, time_t *mtime) { (void) path, (void) size, (void) mtime; return NULL; }
Safe
[ "CWE-552" ]
mongoose
c65c8fdaaa257e0487ab0aaae9e8f6b439335945
2.4966335750209295e+38
4
Protect against the directory traversal in mg_upload()
0
intrusive_ptr<Expression> ExpressionDateToParts::optimize() { _date = _date->optimize(); if (_timeZone) { _timeZone = _timeZone->optimize(); } if (_iso8601) { _iso8601 = _iso8601->optimize(); } if (ExpressionConstant::allNullOrConstant({_date, _iso8601, _timeZone})) { // Everything is a constant, so we can turn into a constant. return ExpressionConstant::create(getExpressionContext(), evaluate(Document{})); } return this; }
Safe
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
2.7877074612964386e+38
16
SERVER-38070 fix infinite loop in agg expression
0
failed(const char *s) { perror(s); ExitProgram(EXIT_FAILURE); }
Safe
[ "CWE-125" ]
ncurses
b025434573f466efe27862656a6a9d41dd2bd609
1.939121349903169e+38
5
ncurses 6.1 - patch 20191012 + amend recent changes to ncurses*-config and pc-files to filter out Debian linker-flags (report by Sven Joachim, cf: 20150516). + clarify relationship between tic, infocmp and captoinfo in manpage. + check for invalid hashcode in _nc_find_type_entry and _nc_find_name_entry. > fix several errata in tic (reports/testcases by "zjuchenyuan"): + check for invalid hashcode in _nc_find_entry. + check for missing character after backslash in fmt_entry + check for acsc with odd length in dump_entry in check for one-one mapping (cf: 20060415); + check length when converting from old AIX box_chars_1 capability, overlooked in changes to eliminate strcpy (cf: 20001007). + amend the ncurses*-config and pc-files to take into account the rpath
0
proto_tree_add_bits_item(proto_tree *tree, const int hfindex, tvbuff_t *tvb, const guint bit_offset, const gint no_of_bits, const guint encoding) { header_field_info *hfinfo; gint octet_length; gint octet_offset; PROTO_REGISTRAR_GET_NTH(hfindex, hfinfo); octet_length = (no_of_bits + 7) >> 3; octet_offset = bit_offset >> 3; test_length(hfinfo, tvb, octet_offset, octet_length, encoding); /* Yes, we try to fake this item again in proto_tree_add_bits_ret_val() * but only after doing a bunch more work (which we can, in the common * case, shortcut here). */ CHECK_FOR_NULL_TREE(tree); TRY_TO_FAKE_THIS_ITEM(tree, hfindex, hfinfo); return proto_tree_add_bits_ret_val(tree, hfindex, tvb, bit_offset, no_of_bits, NULL, encoding); }
Safe
[ "CWE-401" ]
wireshark
a9fc769d7bb4b491efb61c699d57c9f35269d871
1.0461606148780974e+38
23
epan: Fix a memory leak. Make sure _proto_tree_add_bits_ret_val allocates a bits array using the packet scope, otherwise we leak memory. Fixes #17032.
0
static INLINE UINT32 ExtractRunLength(UINT32 code, const BYTE* pbOrderHdr, UINT32* advance) { UINT32 runLength; UINT32 ladvance; ladvance = 1; runLength = 0; switch (code) { case REGULAR_FGBG_IMAGE: runLength = (*pbOrderHdr) & g_MaskRegularRunLength; if (runLength == 0) { runLength = (*(pbOrderHdr + 1)) + 1; ladvance += 1; } else { runLength = runLength * 8; } break; case LITE_SET_FG_FGBG_IMAGE: runLength = (*pbOrderHdr) & g_MaskLiteRunLength; if (runLength == 0) { runLength = (*(pbOrderHdr + 1)) + 1; ladvance += 1; } else { runLength = runLength * 8; } break; case REGULAR_BG_RUN: case REGULAR_FG_RUN: case REGULAR_COLOR_RUN: case REGULAR_COLOR_IMAGE: runLength = (*pbOrderHdr) & g_MaskRegularRunLength; if (runLength == 0) { /* An extended (MEGA) run. */ runLength = (*(pbOrderHdr + 1)) + 32; ladvance += 1; } break; case LITE_SET_FG_FG_RUN: case LITE_DITHERED_RUN: runLength = (*pbOrderHdr) & g_MaskLiteRunLength; if (runLength == 0) { /* An extended (MEGA) run. */ runLength = (*(pbOrderHdr + 1)) + 16; ladvance += 1; } break; case MEGA_MEGA_BG_RUN: case MEGA_MEGA_FG_RUN: case MEGA_MEGA_SET_FG_RUN: case MEGA_MEGA_DITHERED_RUN: case MEGA_MEGA_COLOR_RUN: case MEGA_MEGA_FGBG_IMAGE: case MEGA_MEGA_SET_FGBG_IMAGE: case MEGA_MEGA_COLOR_IMAGE: runLength = ((UINT16)pbOrderHdr[1]) | ((UINT16)(pbOrderHdr[2] << 8)); ladvance += 2; break; } *advance = ladvance; return runLength; }
Safe
[ "CWE-787" ]
FreeRDP
7b1d4b49391b4512402840431757703a96946820
1.1757775970217652e+38
83
Fix CVE-2020-11524: out of bounds access in interleaved Thanks to Sunglin and HuanGMz from Knownsec 404
0
builtin_chr_impl(PyObject *module, int i) /*[clinic end generated code: output=c733afcd200afcb7 input=3f604ef45a70750d]*/ { return PyUnicode_FromOrdinal(i); }
Safe
[ "CWE-125" ]
cpython
dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c
2.3695251311387813e+37
5
bpo-35766: Merge typed_ast back into CPython (GH-11645)
0
static OPJ_BOOL opj_j2k_read_SPCod_SPCoc(opj_j2k_t *p_j2k, OPJ_UINT32 compno, OPJ_BYTE * p_header_data, OPJ_UINT32 * p_header_size, opj_event_mgr_t * p_manager) { OPJ_UINT32 i, l_tmp; opj_cp_t *l_cp = NULL; opj_tcp_t *l_tcp = NULL; opj_tccp_t *l_tccp = NULL; OPJ_BYTE * l_current_ptr = NULL; /* preconditions */ assert(p_j2k != 00); assert(p_manager != 00); assert(p_header_data != 00); l_cp = &(p_j2k->m_cp); l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ? &l_cp->tcps[p_j2k->m_current_tile_number] : p_j2k->m_specific_param.m_decoder.m_default_tcp; /* precondition again */ assert(compno < p_j2k->m_private_image->numcomps); l_tccp = &l_tcp->tccps[compno]; l_current_ptr = p_header_data; /* make sure room is sufficient */ if (*p_header_size < 5) { opj_event_msg(p_manager, EVT_ERROR, "Error reading SPCod SPCoc element\n"); return OPJ_FALSE; } /* SPcod (D) / SPcoc (A) */ opj_read_bytes(l_current_ptr, &l_tccp->numresolutions, 1); ++l_tccp->numresolutions; /* tccp->numresolutions = read() + 1 */ if (l_tccp->numresolutions > OPJ_J2K_MAXRLVLS) { opj_event_msg(p_manager, EVT_ERROR, "Invalid value for numresolutions : %d, max value is set in openjpeg.h at %d\n", l_tccp->numresolutions, OPJ_J2K_MAXRLVLS); return OPJ_FALSE; } ++l_current_ptr; /* If user wants to remove more resolutions than the codestream contains, return error */ if (l_cp->m_specific_param.m_dec.m_reduce >= l_tccp->numresolutions) { opj_event_msg(p_manager, EVT_ERROR, "Error decoding component %d.\nThe number of resolutions " "to remove (%d) is greater or equal than the number " "of resolutions of this component (%d)\nModify the cp_reduce parameter.\n\n", compno, l_cp->m_specific_param.m_dec.m_reduce, l_tccp->numresolutions); p_j2k->m_specific_param.m_decoder.m_state |= 0x8000;/* FIXME J2K_DEC_STATE_ERR;*/ return OPJ_FALSE; } /* SPcod (E) / SPcoc (B) */ opj_read_bytes(l_current_ptr, &l_tccp->cblkw, 1); ++l_current_ptr; l_tccp->cblkw += 2; /* SPcod (F) / SPcoc (C) */ opj_read_bytes(l_current_ptr, &l_tccp->cblkh, 1); ++l_current_ptr; l_tccp->cblkh += 2; if ((l_tccp->cblkw > 10) || (l_tccp->cblkh > 10) || ((l_tccp->cblkw + l_tccp->cblkh) > 12)) { opj_event_msg(p_manager, EVT_ERROR, "Error reading SPCod SPCoc element, Invalid cblkw/cblkh combination\n"); return OPJ_FALSE; } /* SPcod (G) / SPcoc (D) */ opj_read_bytes(l_current_ptr, &l_tccp->cblksty, 1); ++l_current_ptr; if (l_tccp->cblksty & 0xC0U) { /* 2 msb are reserved, assume we can't read */ opj_event_msg(p_manager, EVT_ERROR, "Error reading SPCod SPCoc element, Invalid code-block style found\n"); return OPJ_FALSE; } /* SPcod (H) / SPcoc (E) */ opj_read_bytes(l_current_ptr, &l_tccp->qmfbid, 1); ++l_current_ptr; if (l_tccp->qmfbid > 1) { opj_event_msg(p_manager, EVT_ERROR, "Error reading SPCod SPCoc element, Invalid transformation found\n"); return OPJ_FALSE; } *p_header_size = *p_header_size - 5; /* use custom precinct size ? */ if (l_tccp->csty & J2K_CCP_CSTY_PRT) { if (*p_header_size < l_tccp->numresolutions) { opj_event_msg(p_manager, EVT_ERROR, "Error reading SPCod SPCoc element\n"); return OPJ_FALSE; } /* SPcod (I_i) / SPcoc (F_i) */ for (i = 0; i < l_tccp->numresolutions; ++i) { opj_read_bytes(l_current_ptr, &l_tmp, 1); ++l_current_ptr; /* Precinct exponent 0 is only allowed for lowest resolution level (Table A.21) */ if ((i != 0) && (((l_tmp & 0xf) == 0) || ((l_tmp >> 4) == 0))) { opj_event_msg(p_manager, EVT_ERROR, "Invalid precinct size\n"); return OPJ_FALSE; } l_tccp->prcw[i] = l_tmp & 0xf; l_tccp->prch[i] = l_tmp >> 4; } *p_header_size = *p_header_size - l_tccp->numresolutions; } else { /* set default size for the precinct width and height */ for (i = 0; i < l_tccp->numresolutions; ++i) { l_tccp->prcw[i] = 15; l_tccp->prch[i] = 15; } } #ifdef WIP_REMOVE_MSD /* INDEX >> */ if (p_j2k->cstr_info && compno == 0) { OPJ_UINT32 l_data_size = l_tccp->numresolutions * sizeof(OPJ_UINT32); p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].cblkh = l_tccp->cblkh; p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].cblkw = l_tccp->cblkw; p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].numresolutions = l_tccp->numresolutions; p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].cblksty = l_tccp->cblksty; p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].qmfbid = l_tccp->qmfbid; memcpy(p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].pdx, l_tccp->prcw, l_data_size); memcpy(p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].pdy, l_tccp->prch, l_data_size); } /* << INDEX */ #endif return OPJ_TRUE; }
Safe
[ "CWE-20" ]
openjpeg
73fdf28342e4594019af26eb6a347a34eceb6296
8.934775415812892e+37
150
opj_j2k_write_sod(): avoid potential heap buffer overflow (fixes #1299) (probably master only)
0
static void audit_set_auditable(struct audit_context *ctx) { if (!ctx->prio) { ctx->prio = 1; ctx->current_state = AUDIT_RECORD_CONTEXT; } }
Safe
[ "CWE-362" ]
linux
43761473c254b45883a64441dd0bc85a42f3645c
1.2673072327286794e+38
7
audit: fix a double fetch in audit_log_single_execve_arg() There is a double fetch problem in audit_log_single_execve_arg() where we first check the execve(2) argumnets for any "bad" characters which would require hex encoding and then re-fetch the arguments for logging in the audit record[1]. Of course this leaves a window of opportunity for an unsavory application to munge with the data. This patch reworks things by only fetching the argument data once[2] into a buffer where it is scanned and logged into the audit records(s). In addition to fixing the double fetch, this patch improves on the original code in a few other ways: better handling of large arguments which require encoding, stricter record length checking, and some performance improvements (completely unverified, but we got rid of some strlen() calls, that's got to be a good thing). As part of the development of this patch, I've also created a basic regression test for the audit-testsuite, the test can be tracked on GitHub at the following link: * https://github.com/linux-audit/audit-testsuite/issues/25 [1] If you pay careful attention, there is actually a triple fetch problem due to a strnlen_user() call at the top of the function. [2] This is a tiny white lie, we do make a call to strnlen_user() prior to fetching the argument data. I don't like it, but due to the way the audit record is structured we really have no choice unless we copy the entire argument at once (which would require a rather wasteful allocation). The good news is that with this patch the kernel no longer relies on this strnlen_user() value for anything beyond recording it in the log, we also update it with a trustworthy value whenever possible. Reported-by: Pengfei Wang <wpengfeinudt@gmail.com> Cc: <stable@vger.kernel.org> Signed-off-by: Paul Moore <paul@paul-moore.com>
0
virDomainCheckVirtioOptions(virDomainVirtioOptionsPtr virtio) { if (!virtio) return 0; if (virtio->iommu != VIR_TRISTATE_SWITCH_ABSENT) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("iommu driver option is only supported " "for virtio devices")); return -1; } if (virtio->ats != VIR_TRISTATE_SWITCH_ABSENT) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("ats driver option is only supported " "for virtio devices")); return -1; } if (virtio->packed != VIR_TRISTATE_SWITCH_ABSENT) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("packed driver option is only supported " "for virtio devices")); return -1; } return 0; }
Safe
[ "CWE-212" ]
libvirt
a5b064bf4b17a9884d7d361733737fb614ad8979
2.049320827616795e+38
25
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410 (v6.1.0-122-g3b076391be) we support http cookies. Since they may contain somewhat sensitive information we should not format them into the XML unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted. Reported-by: Han Han <hhan@redhat.com> Signed-off-by: Peter Krempa <pkrempa@redhat.com> Reviewed-by: Erik Skultety <eskultet@redhat.com>
0
static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd, ALSChannelData **cd, int *reverted, unsigned int offset, int c) { ALSChannelData *ch = cd[c]; unsigned int dep = 0; unsigned int channels = ctx->avctx->channels; if (reverted[c]) return 0; reverted[c] = 1; while (dep < channels && !ch[dep].stop_flag) { revert_channel_correlation(ctx, bd, cd, reverted, offset, ch[dep].master_channel); dep++; } if (dep == channels) { av_log(ctx->avctx, AV_LOG_WARNING, "Invalid channel correlation.\n"); return AVERROR_INVALIDDATA; } bd->const_block = ctx->const_block + c; bd->shift_lsbs = ctx->shift_lsbs + c; bd->opt_order = ctx->opt_order + c; bd->store_prev_samples = ctx->store_prev_samples + c; bd->use_ltp = ctx->use_ltp + c; bd->ltp_lag = ctx->ltp_lag + c; bd->ltp_gain = ctx->ltp_gain[c]; bd->lpc_cof = ctx->lpc_cof[c]; bd->quant_cof = ctx->quant_cof[c]; bd->raw_samples = ctx->raw_samples[c] + offset; for (dep = 0; !ch[dep].stop_flag; dep++) { unsigned int smp; unsigned int begin = 1; unsigned int end = bd->block_length - 1; int64_t y; int32_t *master = ctx->raw_samples[ch[dep].master_channel] + offset; if (ch[dep].master_channel == c) continue; if (ch[dep].time_diff_flag) { int t = ch[dep].time_diff_index; if (ch[dep].time_diff_sign) { t = -t; begin -= t; } else { end -= t; } for (smp = begin; smp < end; smp++) { y = (1 << 6) + MUL64(ch[dep].weighting[0], master[smp - 1 ]) + MUL64(ch[dep].weighting[1], master[smp ]) + MUL64(ch[dep].weighting[2], master[smp + 1 ]) + MUL64(ch[dep].weighting[3], master[smp - 1 + t]) + MUL64(ch[dep].weighting[4], master[smp + t]) + MUL64(ch[dep].weighting[5], master[smp + 1 + t]); bd->raw_samples[smp] += y >> 7; } } else { for (smp = begin; smp < end; smp++) { y = (1 << 6) + MUL64(ch[dep].weighting[0], master[smp - 1]) + MUL64(ch[dep].weighting[1], master[smp ]) + MUL64(ch[dep].weighting[2], master[smp + 1]); bd->raw_samples[smp] += y >> 7; } } } return 0; }
Safe
[ "CWE-787" ]
FFmpeg
18f94df8af04f2c02a25a7dec512289feff6517f
2.252646738799591e+38
81
avcodec/alsdec: check predictor order against block length Fixes out of array access Fixes: abd3c041acbcb816be113455d138166b-asan_heap-oob_b11634_3707_cov_1707137151_als_05_2ch48k16b.mp4 Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
0
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *verbs_init_attr, struct ib_udata *udata) { struct mlx5_ib_dev *dev; struct mlx5_ib_qp *qp; u16 xrcdn = 0; int err; struct ib_qp_init_attr mlx_init_attr; struct ib_qp_init_attr *init_attr = verbs_init_attr; if (pd) { dev = to_mdev(pd->device); if (init_attr->qp_type == IB_QPT_RAW_PACKET) { if (!pd->uobject) { mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n"); return ERR_PTR(-EINVAL); } else if (!to_mucontext(pd->uobject->context)->cqe_version) { mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n"); return ERR_PTR(-EINVAL); } } } else { /* being cautious here */ if (init_attr->qp_type != IB_QPT_XRC_TGT && init_attr->qp_type != MLX5_IB_QPT_REG_UMR) { pr_warn("%s: no PD for transport %s\n", __func__, ib_qp_type_str(init_attr->qp_type)); return ERR_PTR(-EINVAL); } dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); } if (init_attr->qp_type == IB_QPT_DRIVER) { struct mlx5_ib_create_qp ucmd; init_attr = &mlx_init_attr; memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr)); err = set_mlx_qp_type(dev, init_attr, &ucmd, udata); if (err) return ERR_PTR(err); if (init_attr->qp_type == MLX5_IB_QPT_DCI) { if (init_attr->cap.max_recv_wr || init_attr->cap.max_recv_sge) { mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n"); return ERR_PTR(-EINVAL); } } else { return mlx5_ib_create_dct(pd, init_attr, &ucmd); } } switch (init_attr->qp_type) { case IB_QPT_XRC_TGT: case IB_QPT_XRC_INI: if (!MLX5_CAP_GEN(dev->mdev, xrc)) { mlx5_ib_dbg(dev, "XRC not supported\n"); return ERR_PTR(-ENOSYS); } init_attr->recv_cq = NULL; if (init_attr->qp_type == IB_QPT_XRC_TGT) { xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; init_attr->send_cq = NULL; } /* fall through */ case IB_QPT_RAW_PACKET: case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_UD: case IB_QPT_SMI: case MLX5_IB_QPT_HW_GSI: case MLX5_IB_QPT_REG_UMR: case MLX5_IB_QPT_DCI: qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); err = create_qp_common(dev, pd, init_attr, udata, qp); if (err) { mlx5_ib_dbg(dev, "create_qp_common failed\n"); kfree(qp); return ERR_PTR(err); } if (is_qp0(init_attr->qp_type)) qp->ibqp.qp_num = 0; else if (is_qp1(init_attr->qp_type)) qp->ibqp.qp_num = 1; else qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn; mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1, init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1); qp->trans_qp.xrcdn = xrcdn; break; case IB_QPT_GSI: return mlx5_ib_gsi_create_qp(pd, init_attr); case IB_QPT_RAW_IPV6: case IB_QPT_RAW_ETHERTYPE: case IB_QPT_MAX: default: mlx5_ib_dbg(dev, "unsupported qp type %d\n", init_attr->qp_type); /* Don't support raw QPs */ return ERR_PTR(-EINVAL); } if (verbs_init_attr->qp_type == IB_QPT_DRIVER) qp->qp_sub_type = init_attr->qp_type; return &qp->ibqp; }
Safe
[ "CWE-119", "CWE-787" ]
linux
0625b4ba1a5d4703c7fb01c497bd6c156908af00
1.9944728465710413e+38
121
IB/mlx5: Fix leaking stack memory to userspace mlx5_ib_create_qp_resp was never initialized and only the first 4 bytes were written. Fixes: 41d902cb7c32 ("RDMA/mlx5: Fix definition of mlx5_ib_create_qp_resp") Cc: <stable@vger.kernel.org> Acked-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
0
static inline uint8_t *bcf_unpack_fmt_core1(uint8_t *ptr, int n_sample, bcf_fmt_t *fmt) { uint8_t *ptr_start = ptr; fmt->id = bcf_dec_typed_int1(ptr, &ptr); fmt->n = bcf_dec_size(ptr, &ptr, &fmt->type); fmt->size = fmt->n << bcf_type_shift[fmt->type]; fmt->p = ptr; fmt->p_off = ptr - ptr_start; fmt->p_free = 0; ptr += n_sample * fmt->size; fmt->p_len = ptr - fmt->p; return ptr; }
Safe
[ "CWE-787" ]
htslib
dcd4b7304941a8832fba2d0fc4c1e716e7a4e72c
2.3037999415640845e+38
13
Fix check for VCF record size The check for excessive record size in vcf_parse_format() only looked at individual fields. It was therefore possible to exceed the limit and overflow fmt_aux_t::offset by having multiple fields with a combined size that went over INT_MAX. Fix by including the amount of memory used so far in the check. Credit to OSS-Fuzz Fixes oss-fuzz 24097
0
static inline void skcipher_map_dst(struct skcipher_walk *walk) { walk->dst.virt.addr = skcipher_map(&walk->out); }
Safe
[ "CWE-476", "CWE-703" ]
linux
9933e113c2e87a9f46a40fde8dafbf801dca1ab9
1.6017810979052791e+38
4
crypto: skcipher - Add missing API setkey checks The API setkey checks for key sizes and alignment went AWOL during the skcipher conversion. This patch restores them. Cc: <stable@vger.kernel.org> Fixes: 4e6c3df4d729 ("crypto: skcipher - Add low-level skcipher...") Reported-by: Baozeng <sploving1@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
0
static int mov_write_dfla_tag(AVIOContext *pb, MOVTrack *track) { int64_t pos = avio_tell(pb); avio_wb32(pb, 0); ffio_wfourcc(pb, "dfLa"); avio_w8(pb, 0); /* version */ avio_wb24(pb, 0); /* flags */ /* Expect the encoder to pass a METADATA_BLOCK_TYPE_STREAMINFO. */ if (track->par->extradata_size != FLAC_STREAMINFO_SIZE) return AVERROR_INVALIDDATA; /* TODO: Write other METADATA_BLOCK_TYPEs if the encoder makes them available. */ avio_w8(pb, 1 << 7 | FLAC_METADATA_TYPE_STREAMINFO); /* LastMetadataBlockFlag << 7 | BlockType */ avio_wb24(pb, track->par->extradata_size); /* Length */ avio_write(pb, track->par->extradata, track->par->extradata_size); /* BlockData[Length] */ return update_size(pb, pos); }
Safe
[ "CWE-125" ]
FFmpeg
95556e27e2c1d56d9e18f5db34d6f756f3011148
1.389121384758289e+38
19
avformat/movenc: Do not pass AVCodecParameters in avpriv_request_sample Fixes: out of array read Fixes: ffmpeg_crash_8.avi Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
0
void snd_usb_set_pcm_ops(struct snd_pcm *pcm, int stream) { snd_pcm_set_ops(pcm, stream, stream == SNDRV_PCM_STREAM_PLAYBACK ? &snd_usb_playback_ops : &snd_usb_capture_ops); }
Safe
[]
sound
447d6275f0c21f6cc97a88b3a0c601436a4cdf2a
9.397175186918352e+37
6
ALSA: usb-audio: Add sanity checks for endpoint accesses Add some sanity check codes before actually accessing the endpoint via get_endpoint() in order to avoid the invalid access through a malformed USB descriptor. Mostly just checking bNumEndpoints, but in one place (snd_microii_spdif_default_get()), the validity of iface and altsetting index is checked as well. Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=971125 Cc: <stable@vger.kernel.org> Signed-off-by: Takashi Iwai <tiwai@suse.de>
0
int allow_signal(int sig) { if (!valid_signal(sig) || sig < 1) return -EINVAL; spin_lock_irq(&current->sighand->siglock); /* This is only needed for daemonize()'ed kthreads */ sigdelset(&current->blocked, sig); /* * Kernel threads handle their own signals. Let the signal code * know it'll be handled, so that they don't get converted to * SIGKILL or just silently dropped. */ current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return 0; }
Safe
[ "CWE-20", "CWE-703", "CWE-400" ]
linux
b69f2292063d2caf37ca9aec7d63ded203701bf3
1.5560825734543787e+38
18
block: Fix io_context leak after failure of clone with CLONE_IO With CLONE_IO, parent's io_context->nr_tasks is incremented, but never decremented whenever copy_process() fails afterwards, which prevents exit_io_context() from calling IO schedulers exit functions. Give a task_struct to exit_io_context(), and call exit_io_context() instead of put_io_context() in copy_process() cleanup path. Signed-off-by: Louis Rilling <louis.rilling@kerlabs.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
0
void determinization(ctx_t &ctx) { const uint32_t INITIAL_TAGS = init_tag_versions(ctx); // initial state const clos_t c0(ctx.nfa.root, 0, INITIAL_TAGS, HROOT, HROOT); ctx.reach.push_back(c0); tagged_epsilon_closure(ctx); find_state(ctx); // Iterate while new kernels are added: for each alphabet symbol, build tagged // epsilon-closure of all reachable NFA states, then find identical or mappable DFA // state or add a new one. for (uint32_t i = 0; i < ctx.dc_kernels.size(); ++i) { ctx.dc_origin = i; clear_caches(ctx); for (uint32_t c = 0; c < ctx.dfa.nchars; ++c) { reach_on_symbol(ctx, c); tagged_epsilon_closure(ctx); find_state(ctx); // Abort if DFA grows too fast (either in the number of states, or in the // total size of all state kernels which may have many NFA substates). if (ctx.dc_kernels.size() > MAX_DFA_STATES) { error("DFA has too many states"); exit(1); } else if (ctx.kernels_total > MAX_DFA_SIZE) { error("DFA is too large"); exit(1); } } } warn_nondeterministic_tags(ctx); }
Safe
[ "CWE-787" ]
re2c
a3473fd7be829cb33907cb08612f955133c70a96
2.1717515317331122e+38
36
Limit maximum allowed NFA and DFA size. Instead of failing with an out of memory exception or crashing with a stack overflow, emit an error message and exit. This is a partial fix for bug #394 "Stack overflow due to recursion in src/dfa/dead_rules.cc", where re2c hit stack overflow on a counted repetition regexp with high upper bound. The patch adds the following limits: 1. the number of NFA states 2. NFA depth (maximum length of a non-looping path from start to end) 3. the number of DFA states 3. total DFA size (sum total of all NFA substates in all DFA states) There are tests for the first three limits, but not for the DFA size as all examples that trigger this behavior take a long time to finish (a few seconds), which increases test run time almost twice.
0
nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku) { struct xdr_stream *xdr = &resp->xdr; if (!nfserr) nfserr = nfsd4_encode_stateid(xdr, &locku->lu_stateid); return nfserr; }
Safe
[ "CWE-20", "CWE-129" ]
linux
f961e3f2acae94b727380c0b74e2d3954d0edf79
7.493389182280147e+37
9
nfsd: encoders mustn't use unitialized values in error cases In error cases, lgp->lg_layout_type may be out of bounds; so we shouldn't be using it until after the check of nfserr. This was seen to crash nfsd threads when the server receives a LAYOUTGET request with a large layout type. GETDEVICEINFO has the same problem. Reported-by: Ari Kauppi <Ari.Kauppi@synopsys.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields <bfields@redhat.com>
0
e_ews_connection_set_disconnected_flag (EEwsConnection *cnc, gboolean disconnected_flag) { g_return_if_fail (E_IS_EWS_CONNECTION (cnc)); cnc->priv->disconnected_flag = disconnected_flag; }
Safe
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
6.594308112564158e+37
7
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
0
SPL_METHOD(SplObjectStorage, serialize) { spl_SplObjectStorage *intern = (spl_SplObjectStorage*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_SplObjectStorageElement *element; zval members, *pmembers, *flags; HashPosition pos; php_serialize_data_t var_hash; smart_str buf = {0}; if (zend_parse_parameters_none() == FAILURE) { return; } PHP_VAR_SERIALIZE_INIT(var_hash); /* storage */ smart_str_appendl(&buf, "x:", 2); MAKE_STD_ZVAL(flags); ZVAL_LONG(flags, zend_hash_num_elements(&intern->storage)); php_var_serialize(&buf, &flags, &var_hash TSRMLS_CC); zval_ptr_dtor(&flags); zend_hash_internal_pointer_reset_ex(&intern->storage, &pos); while(zend_hash_has_more_elements_ex(&intern->storage, &pos) == SUCCESS) { if (zend_hash_get_current_data_ex(&intern->storage, (void**)&element, &pos) == FAILURE) { smart_str_free(&buf); PHP_VAR_SERIALIZE_DESTROY(var_hash); RETURN_NULL(); } php_var_serialize(&buf, &element->obj, &var_hash TSRMLS_CC); smart_str_appendc(&buf, ','); php_var_serialize(&buf, &element->inf, &var_hash TSRMLS_CC); smart_str_appendc(&buf, ';'); zend_hash_move_forward_ex(&intern->storage, &pos); } /* members */ smart_str_appendl(&buf, "m:", 2); INIT_PZVAL(&members); Z_ARRVAL(members) = zend_std_get_properties(getThis() TSRMLS_CC); Z_TYPE(members) = IS_ARRAY; pmembers = &members; php_var_serialize(&buf, &pmembers, &var_hash TSRMLS_CC); /* finishes the string */ /* done */ PHP_VAR_SERIALIZE_DESTROY(var_hash); if (buf.c) { RETURN_STRINGL(buf.c, buf.len, 0); } else { RETURN_NULL(); } } /* }}} */
Safe
[ "CWE-416" ]
php-src
c2e197e4efc663ca55f393bf0e799848842286f3
3.347151858340254e+38
56
Fix bug #70168 - Use After Free Vulnerability in unserialize() with SplObjectStorage
0
void __fastcall TSaveSessionDialog::Init(bool CanSavePassword, bool NotRecommendedSavingPassword, TStrings * AdditionalFolders) { Caption = LoadStr(SAVE_SESSION_CAPTION); SessionNameEdit = new TEdit(this); AddEdit(SessionNameEdit, CreateLabel(LoadStr(SAVE_SESSION_PROMPT))); FRootFolder = LoadStr(SAVE_SESSION_ROOT_FOLDER2); std::unique_ptr<TStringList> Folders(new TStringList()); if (AdditionalFolders != NULL) { Folders->AddStrings(AdditionalFolders); } for (int Index = 0; Index < StoredSessions->Count; Index++) { TSessionData * Data = StoredSessions->Sessions[Index]; if (!Data->Special && !Data->IsWorkspace) { UnicodeString Folder = Data->FolderName; if (!Folder.IsEmpty() && Folders->IndexOf(Folder) < 0) { Folders->Add(Folder); } } } DebugAssert(!Folders->CaseSensitive); Folders->Sort(); FolderCombo = new TComboBox(this); AddComboBox(FolderCombo, CreateLabel(LoadStr(SAVE_SESSION_FOLDER))); FolderCombo->DropDownCount = Max(FolderCombo->DropDownCount, 16); FolderCombo->Items->Add(FRootFolder); FolderCombo->Items->AddStrings(Folders.get()); SavePasswordCheck = CreateAndAddCheckBox( LoadStr(NotRecommendedSavingPassword ? SAVE_SESSION_PASSWORD : (CustomWinConfiguration->UseMasterPassword ? SAVE_SESSION_PASSWORD_MASTER : SAVE_SESSION_PASSWORD_RECOMMENDED))); CreateShortcutCheck = CreateAndAddCheckBox(LoadStr(SAVE_SITE_WORKSPACE_SHORTCUT)); EnableControl(SavePasswordCheck, CanSavePassword); }
Safe
[ "CWE-787" ]
winscp
faa96e8144e6925a380f94a97aa382c9427f688d
3.1601900300609715e+38
46
Bug 1943: Prevent loading session settings that can lead to remote code execution from handled URLs https://winscp.net/tracker/1943 (cherry picked from commit ec584f5189a856cd79509f754722a6898045c5e0) Source commit: 0f4be408b3f01132b00682da72d925d6c4ee649b
0
static const char *set_flush_max_pipelined(cmd_parms *cmd, void *d_, const char *arg) { core_server_config *conf = ap_get_core_module_config(cmd->server->module_config); apr_off_t num; char *end; if (apr_strtoff(&num, arg, &end, 10) || *end || num < -1 || num > APR_INT32_MAX) return apr_pstrcat(cmd->pool, "parameter must be a number between -1 and " APR_STRINGIFY(APR_INT32_MAX) ": ", arg, NULL); conf->flush_max_pipelined = (apr_int32_t)num; return NULL; }
Safe
[]
httpd
ecebcc035ccd8d0e2984fe41420d9e944f456b3c
1.7561799082684657e+38
19
Merged r1734009,r1734231,r1734281,r1838055,r1838079,r1840229,r1876664,r1876674,r1876784,r1879078,r1881620,r1887311,r1888871 from trunk: *) core: Split ap_create_request() from ap_read_request(). [Graham Leggett] *) core, h2: common ap_parse_request_line() and ap_check_request_header() code. [Yann Ylavic] *) core: Add StrictHostCheck to allow unconfigured hostnames to be rejected. [Eric Covener] git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1890245 13f79535-47bb-0310-9956-ffa450edef68
0
static void sco_chan_del(struct sock *sk, int err) { struct sco_conn *conn; conn = sco_pi(sk)->conn; BT_DBG("sk %p, conn %p, err %d", sk, conn, err); if (conn) { sco_conn_lock(conn); conn->sk = NULL; sco_pi(sk)->conn = NULL; sco_conn_unlock(conn); if (conn->hcon) hci_conn_put(conn->hcon); } sk->sk_state = BT_CLOSED; sk->sk_err = err; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_ZAPPED); }
Safe
[ "CWE-200" ]
linux
c8c499175f7d295ef867335bceb9a76a2c3cdc38
2.9164767344372853e+38
24
Bluetooth: SCO - Fix missing msg_namelen update in sco_sock_recvmsg() If the socket is in state BT_CONNECT2 and BT_SK_DEFER_SETUP is set in the flags, sco_sock_recvmsg() returns early with 0 without updating the possibly set msg_namelen member. This, in turn, leads to a 128 byte kernel stack leak in net/socket.c. Fix this by updating msg_namelen in this case. For all other cases it will be handled in bt_sock_recvmsg(). Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Gustavo Padovan <gustavo@padovan.org> Cc: Johan Hedberg <johan.hedberg@gmail.com> Signed-off-by: Mathias Krause <minipli@googlemail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
CImg<_cimg_Tt> operator+(const t value) const { return CImg<_cimg_Tt>(*this,false)+=value; }
Safe
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
7.649815455449599e+37
3
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
0
rdpsnd_record(const void *data, unsigned int size) { uint32 remain, chunk; assert(rec_device_open); while (size) { remain = sizeof(record_buffer) - record_buffer_size; if (size >= remain) chunk = remain; else chunk = size; memcpy(record_buffer + record_buffer_size, data, chunk); #ifdef B_ENDIAN if (current_driver->need_byteswap_on_be) rdpsnd_dsp_swapbytes(record_buffer + record_buffer_size, chunk, &rec_formats[rec_current_format]); #endif record_buffer_size += chunk; data = (const char *) data + chunk; size -= chunk; if (record_buffer_size == sizeof(record_buffer)) rdpsnd_flush_record(); } }
Safe
[ "CWE-787" ]
rdesktop
766ebcf6f23ccfe8323ac10242ae6e127d4505d2
2.221290968859255e+38
32
Malicious RDP server security fixes This commit includes fixes for a set of 21 vulnerabilities in rdesktop when a malicious RDP server is used. All vulnerabilities was identified and reported by Eyal Itkin. * Add rdp_protocol_error function that is used in several fixes * Refactor of process_bitmap_updates * Fix possible integer overflow in s_check_rem() on 32bit arch * Fix memory corruption in process_bitmap_data - CVE-2018-8794 * Fix remote code execution in process_bitmap_data - CVE-2018-8795 * Fix remote code execution in process_plane - CVE-2018-8797 * Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175 * Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175 * Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176 * Fix Denial of Service in sec_recv - CVE-2018-20176 * Fix minor information leak in rdpdr_process - CVE-2018-8791 * Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792 * Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793 * Fix Denial of Service in process_bitmap_data - CVE-2018-8796 * Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798 * Fix Denial of Service in process_secondary_order - CVE-2018-8799 * Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800 * Fix major information leak in ui_clip_handle_data - CVE-2018-20174 * Fix memory corruption in rdp_in_unistr - CVE-2018-20177 * Fix Denial of Service in process_demand_active - CVE-2018-20178 * Fix remote code execution in lspci_process - CVE-2018-20179 * Fix remote code execution in rdpsnddbg_process - CVE-2018-20180 * Fix remote code execution in seamless_process - CVE-2018-20181 * Fix remote code execution in seamless_process_line - CVE-2018-20182
0
dname_pkt_compare(sldns_buffer* pkt, uint8_t* d1, uint8_t* d2) { uint8_t len1, len2; log_assert(pkt && d1 && d2); len1 = *d1++; len2 = *d2++; while( len1 != 0 || len2 != 0 ) { /* resolve ptrs */ if(LABEL_IS_PTR(len1)) { d1 = sldns_buffer_at(pkt, PTR_OFFSET(len1, *d1)); len1 = *d1++; continue; } if(LABEL_IS_PTR(len2)) { d2 = sldns_buffer_at(pkt, PTR_OFFSET(len2, *d2)); len2 = *d2++; continue; } /* check label length */ log_assert(len1 <= LDNS_MAX_LABELLEN); log_assert(len2 <= LDNS_MAX_LABELLEN); if(len1 != len2) { if(len1 < len2) return -1; return 1; } log_assert(len1 == len2 && len1 != 0); /* compare labels */ while(len1--) { if(tolower((unsigned char)*d1) != tolower((unsigned char)*d2)) { if(tolower((unsigned char)*d1) < tolower((unsigned char)*d2)) return -1; return 1; } d1++; d2++; } len1 = *d1++; len2 = *d2++; } return 0; }
Vulnerable
[ "CWE-400" ]
unbound
ba0f382eee814e56900a535778d13206b86b6d49
1.983214470162146e+38
41
- CVE-2020-12662 Unbound can be tricked into amplifying an incoming query into a large number of queries directed to a target. - CVE-2020-12663 Malformed answers from upstream name servers can be used to make Unbound unresponsive.
1
command_height(void) { int h; frame_T *frp; int old_p_ch = curtab->tp_ch_used; /* Use the value of p_ch that we remembered. This is needed for when the * GUI starts up, we can't be sure in what order things happen. And when * p_ch was changed in another tab page. */ curtab->tp_ch_used = p_ch; /* Find bottom frame with width of screen. */ frp = lastwin->w_frame; while (frp->fr_width != Columns && frp->fr_parent != NULL) frp = frp->fr_parent; /* Avoid changing the height of a window with 'winfixheight' set. */ while (frp->fr_prev != NULL && frp->fr_layout == FR_LEAF && frp->fr_win->w_p_wfh) frp = frp->fr_prev; if (starting != NO_SCREEN) { cmdline_row = Rows - p_ch; if (p_ch > old_p_ch) /* p_ch got bigger */ { while (p_ch > old_p_ch) { if (frp == NULL) { emsg(_(e_noroom)); p_ch = old_p_ch; curtab->tp_ch_used = p_ch; cmdline_row = Rows - p_ch; break; } h = frp->fr_height - frame_minheight(frp, NULL); if (h > p_ch - old_p_ch) h = p_ch - old_p_ch; old_p_ch += h; frame_add_height(frp, -h); frp = frp->fr_prev; } /* Recompute window positions. */ (void)win_comp_pos(); /* clear the lines added to cmdline */ if (full_screen) screen_fill((int)(cmdline_row), (int)Rows, 0, (int)Columns, ' ', ' ', 0); msg_row = cmdline_row; redraw_cmdline = TRUE; return; } if (msg_row < cmdline_row) msg_row = cmdline_row; redraw_cmdline = TRUE; } frame_add_height(frp, (int)(old_p_ch - p_ch)); /* Recompute window positions. */ if (frp != lastwin->w_frame) (void)win_comp_pos(); }
Safe
[ "CWE-416" ]
vim
ec66c41d84e574baf8009dbc0bd088d2bc5b2421
2.4478520026274852e+38
67
patch 8.1.2136: using freed memory with autocmd from fuzzer Problem: using freed memory with autocmd from fuzzer. (Dhiraj Mishra, Dominique Pelle) Solution: Avoid using "wp" after autocommands. (closes #5041)
0
static double mp_matrix_svd(_cimg_math_parser& mp) { double *ptrd = &_mp_arg(1) + 1; const double *ptr1 = &_mp_arg(2) + 1; const unsigned int k = (unsigned int)mp.opcode[3], l = (unsigned int)mp.opcode[4]; CImg<doubleT> U, S, V; CImg<doubleT>(ptr1,k,l,1,1,true).SVD(U,S,V); CImg<doubleT>(ptrd,k,l,1,1,true) = U; CImg<doubleT>(ptrd + k*l,1,k,1,1,true) = S; CImg<doubleT>(ptrd + k*l + k,k,k,1,1,true) = V; return cimg::type<double>::nan();
Safe
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
6.469687320424724e+37
13
Fix other issues in 'CImg<T>::load_bmp()'.
0
prepend_bwrap_argv_wrapper (GPtrArray *argv, int app_info_fd, int *bwrap_fd_out, GError **error) { int i = 0; g_auto(GLnxDirFdIterator) dir_iter = { 0 }; struct dirent *dent; g_autoptr(GPtrArray) bwrap_args = g_ptr_array_new_with_free_func (g_free); gsize bwrap_args_len; glnx_fd_close int bwrap_args_fd = -1; g_autofree char *bwrap_args_data = NULL; if (!glnx_dirfd_iterator_init_at (AT_FDCWD, "/", FALSE, &dir_iter, error)) return FALSE; while (TRUE) { if (!glnx_dirfd_iterator_next_dent_ensure_dtype (&dir_iter, &dent, NULL, error)) return FALSE; if (dent == NULL) break; if (strcmp (dent->d_name, ".flatpak-info") == 0) continue; if (dent->d_type == DT_DIR) { if (strcmp (dent->d_name, "tmp") == 0 || strcmp (dent->d_name, "var") == 0 || strcmp (dent->d_name, "run") == 0) g_ptr_array_add (bwrap_args, g_strdup ("--bind")); else g_ptr_array_add (bwrap_args, g_strdup ("--ro-bind")); g_ptr_array_add (bwrap_args, g_strconcat ("/", dent->d_name, NULL)); g_ptr_array_add (bwrap_args, g_strconcat ("/", dent->d_name, NULL)); } else if (dent->d_type == DT_LNK) { ssize_t symlink_size; char path_buffer[PATH_MAX + 1]; symlink_size = readlinkat (dir_iter.fd, dent->d_name, path_buffer, sizeof (path_buffer) - 1); if (symlink_size < 0) { glnx_set_error_from_errno (error); return FALSE; } path_buffer[symlink_size] = 0; g_ptr_array_add (bwrap_args, g_strdup ("--symlink")); g_ptr_array_add (bwrap_args, g_strdup (path_buffer)); g_ptr_array_add (bwrap_args, g_strconcat ("/", dent->d_name, NULL)); } } g_ptr_array_add (bwrap_args, g_strdup ("--ro-bind-data")); g_ptr_array_add (bwrap_args, g_strdup_printf ("%d", app_info_fd)); g_ptr_array_add (bwrap_args, g_strdup ("/.flatpak-info")); bwrap_args_data = join_args (bwrap_args, &bwrap_args_len); bwrap_args_fd = create_tmp_fd (bwrap_args_data, bwrap_args_len, error); if (bwrap_args_fd < 0) return FALSE; g_ptr_array_insert (argv, i++, g_strdup (flatpak_get_bwrap ())); g_ptr_array_insert (argv, i++, g_strdup ("--args")); g_ptr_array_insert (argv, i++, g_strdup_printf ("%d", bwrap_args_fd)); *bwrap_fd_out = bwrap_args_fd; bwrap_args_fd = -1; /* Steal it */ return TRUE; }
Safe
[ "CWE-20" ]
flatpak
902fb713990a8f968ea4350c7c2a27ff46f1a6c4
2.6105397637085283e+38
75
Use seccomp to filter out TIOCSTI ioctl This would otherwise let the sandbox add input to the controlling tty.
0
static void ffs_data_clear(struct ffs_data *ffs) { ENTER(); ffs_closed(ffs); BUG_ON(ffs->gadget); if (ffs->epfiles) ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); if (ffs->ffs_eventfd) eventfd_ctx_put(ffs->ffs_eventfd); kfree(ffs->raw_descs_data); kfree(ffs->raw_strings); kfree(ffs->stringtabs); }
Safe
[ "CWE-416", "CWE-362" ]
linux
38740a5b87d53ceb89eb2c970150f6e94e00373a
3.3517435986751307e+37
18
usb: gadget: f_fs: Fix use-after-free When using asynchronous read or write operations on the USB endpoints the issuer of the IO request is notified by calling the ki_complete() callback of the submitted kiocb when the URB has been completed. Calling this ki_complete() callback will free kiocb. Make sure that the structure is no longer accessed beyond that point, otherwise undefined behaviour might occur. Fixes: 2e4c7553cd6f ("usb: gadget: f_fs: add aio support") Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
0
static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) { if (static_key_false(&preempt_notifier_key)) __fire_sched_in_preempt_notifiers(curr); }
Safe
[ "CWE-119" ]
linux
29d6455178a09e1dc340380c582b13356227e8df
1.9738905431405513e+38
5
sched: panic on corrupted stack end Until now, hitting this BUG_ON caused a recursive oops (because oops handling involves do_exit(), which calls into the scheduler, which in turn raises an oops), which caused stuff below the stack to be overwritten until a panic happened (e.g. via an oops in interrupt context, caused by the overwritten CPU index in the thread_info). Just panic directly. Signed-off-by: Jann Horn <jannh@google.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
QPDF_Array::setDescription(QPDF* qpdf, std::string const& description) { this->QPDFObject::setDescription(qpdf, description); }
Safe
[ "CWE-787" ]
qpdf
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
3.2678631852838937e+38
4
Fix sign and conversion warnings (major) This makes all integer type conversions that have potential data loss explicit with calls that do range checks and raise an exception. After this commit, qpdf builds with no warnings when -Wsign-conversion -Wconversion is used with gcc or clang or when -W3 -Wd4800 is used with MSVC. This significantly reduces the likelihood of potential crashes from bogus integer values. There are some parts of the code that take int when they should take size_t or an offset. Such places would make qpdf not support files with more than 2^31 of something that usually wouldn't be so large. In the event that such a file shows up and is valid, at least qpdf would raise an error in the right spot so the issue could be legitimately addressed rather than failing in some weird way because of a silent overflow condition.
0
static void io_req_track_inflight(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; if (!(req->flags & REQ_F_INFLIGHT)) { req->flags |= REQ_F_INFLIGHT; spin_lock_irq(&ctx->inflight_lock); list_add(&req->inflight_entry, &ctx->inflight_list); spin_unlock_irq(&ctx->inflight_lock); } }
Safe
[ "CWE-667" ]
linux
3ebba796fa251d042be42b929a2d916ee5c34a49
2.711914572438705e+38
12
io_uring: ensure that SQPOLL thread is started for exit If we create it in a disabled state because IORING_SETUP_R_DISABLED is set on ring creation, we need to ensure that we've kicked the thread if we're exiting before it's been explicitly disabled. Otherwise we can run into a deadlock where exit is waiting go park the SQPOLL thread, but the SQPOLL thread itself is waiting to get a signal to start. That results in the below trace of both tasks hung, waiting on each other: INFO: task syz-executor458:8401 blocked for more than 143 seconds. Not tainted 5.11.0-next-20210226-syzkaller #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz-executor458 state:D stack:27536 pid: 8401 ppid: 8400 flags:0x00004004 Call Trace: context_switch kernel/sched/core.c:4324 [inline] __schedule+0x90c/0x21a0 kernel/sched/core.c:5075 schedule+0xcf/0x270 kernel/sched/core.c:5154 schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868 do_wait_for_common kernel/sched/completion.c:85 [inline] __wait_for_common kernel/sched/completion.c:106 [inline] wait_for_common kernel/sched/completion.c:117 [inline] wait_for_completion+0x168/0x270 kernel/sched/completion.c:138 io_sq_thread_park fs/io_uring.c:7115 [inline] io_sq_thread_park+0xd5/0x130 fs/io_uring.c:7103 io_uring_cancel_task_requests+0x24c/0xd90 fs/io_uring.c:8745 __io_uring_files_cancel+0x110/0x230 fs/io_uring.c:8840 io_uring_files_cancel include/linux/io_uring.h:47 [inline] do_exit+0x299/0x2a60 kernel/exit.c:780 do_group_exit+0x125/0x310 kernel/exit.c:922 __do_sys_exit_group kernel/exit.c:933 [inline] __se_sys_exit_group kernel/exit.c:931 [inline] __x64_sys_exit_group+0x3a/0x50 kernel/exit.c:931 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x43e899 RSP: 002b:00007ffe89376d48 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7 RAX: ffffffffffffffda RBX: 00000000004af2f0 RCX: 000000000043e899 RDX: 000000000000003c RSI: 00000000000000e7 RDI: 0000000000000000 RBP: 0000000000000000 R08: ffffffffffffffc0 R09: 0000000010000000 R10: 0000000000008011 R11: 0000000000000246 R12: 00000000004af2f0 R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000001 INFO: task iou-sqp-8401:8402 can't die for more than 143 seconds. task:iou-sqp-8401 state:D stack:30272 pid: 8402 ppid: 8400 flags:0x00004004 Call Trace: context_switch kernel/sched/core.c:4324 [inline] __schedule+0x90c/0x21a0 kernel/sched/core.c:5075 schedule+0xcf/0x270 kernel/sched/core.c:5154 schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868 do_wait_for_common kernel/sched/completion.c:85 [inline] __wait_for_common kernel/sched/completion.c:106 [inline] wait_for_common kernel/sched/completion.c:117 [inline] wait_for_completion+0x168/0x270 kernel/sched/completion.c:138 io_sq_thread+0x27d/0x1ae0 fs/io_uring.c:6717 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294 INFO: task iou-sqp-8401:8402 blocked for more than 143 seconds. Reported-by: syzbot+fb5458330b4442f2090d@syzkaller.appspotmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
0
static void txtin_process_send_text_sample(GF_TXTIn *ctx, GF_TextSample *txt_samp, u64 ts, u32 duration, Bool is_rap) { GF_FilterPacket *dst_pck; u8 *pck_data; u32 size; if (ctx->seek_state==2) { Double end = (Double) (ts+duration); end /= 1000; if (end < ctx->start_range) return; ctx->seek_state = 0; } size = gf_isom_text_sample_size(txt_samp); dst_pck = gf_filter_pck_new_alloc(ctx->opid, size, &pck_data); if (!dst_pck) return; gf_bs_reassign_buffer(ctx->bs_w, pck_data, size); gf_isom_text_sample_write_bs(txt_samp, ctx->bs_w); ts = gf_timestamp_rescale(ts, 1000, ctx->timescale); duration = (u32) gf_timestamp_rescale(duration, 1000, ctx->timescale); gf_filter_pck_set_sap(dst_pck, is_rap ? GF_FILTER_SAP_1 : GF_FILTER_SAP_NONE); gf_filter_pck_set_cts(dst_pck, ts); gf_filter_pck_set_duration(dst_pck, duration); gf_filter_pck_send(dst_pck); }
Safe
[ "CWE-276" ]
gpac
96699aabae042f8f55cf8a85fa5758e3db752bae
1.9647081483669794e+38
30
fixed #2061
0
int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, u8 *filter, bool add_filter) { struct sk_buff *skb; struct wmi_mcast_filter_add_del_cmd *cmd; int ret; if ((filter[0] != 0x33 || filter[1] != 0x33) && (filter[0] != 0x01 || filter[1] != 0x00 || filter[2] != 0x5e || filter[3] > 0x7f)) { ath6kl_warn("invalid multicast filter address\n"); return -EINVAL; } skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_mcast_filter_add_del_cmd *) skb->data; memcpy(cmd->mcast_mac, filter, ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE); ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, add_filter ? WMI_SET_MCAST_FILTER_CMDID : WMI_DEL_MCAST_FILTER_CMDID, NO_SYNC_WMIFLAG); return ret; }
Safe
[ "CWE-125" ]
linux
5d6751eaff672ea77642e74e92e6c0ac7f9709ab
8.899841874111074e+37
27
ath6kl: add some bounds checking The "ev->traffic_class" and "reply->ac" variables come from the network and they're used as an offset into the wmi->stream_exist_for_ac[] array. Those variables are u8 so they can be 0-255 but the stream_exist_for_ac[] array only has WMM_NUM_AC (4) elements. We need to add a couple bounds checks to prevent array overflows. I also modified one existing check from "if (traffic_class > 3) {" to "if (traffic_class >= WMM_NUM_AC) {" just to make them all consistent. Fixes: bdcd81707973 (" Add ath6kl cleaned up driver") Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
0
static int vhdx_create_new_headers(BlockDriverState *bs, uint64_t image_size, uint32_t log_size) { int ret = 0; VHDXHeader *hdr = NULL; hdr = g_malloc0(sizeof(VHDXHeader)); hdr->signature = VHDX_HEADER_SIGNATURE; hdr->sequence_number = g_random_int(); hdr->log_version = 0; hdr->version = 1; hdr->log_length = log_size; hdr->log_offset = VHDX_HEADER_SECTION_END; vhdx_guid_generate(&hdr->file_write_guid); vhdx_guid_generate(&hdr->data_write_guid); ret = vhdx_write_header(bs, hdr, VHDX_HEADER1_OFFSET, false); if (ret < 0) { goto exit; } hdr->sequence_number++; ret = vhdx_write_header(bs, hdr, VHDX_HEADER2_OFFSET, false); if (ret < 0) { goto exit; } exit: g_free(hdr); return ret; }
Safe
[ "CWE-835" ]
qemu
1d7678dec4761acdc43439da6ceda41a703ba1a6
2.477993385519319e+38
31
vhdx: Bounds checking for block_size and logical_sector_size (CVE-2014-0148) Other variables (e.g. sectors_per_block) are calculated using these variables, and if not range-checked illegal values could be obtained causing infinite loops and other potential issues when calculating BAT entries. The 1.00 VHDX spec requires BlockSize to be min 1MB, max 256MB. LogicalSectorSize is required to be either 512 or 4096 bytes. Reported-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Jeff Cody <jcody@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
0
static int create_or_die(const char *filename) { int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_EXCL, DEFAULT_DUMP_DIR_MODE); if (fd >= 0) { IGNORE_RESULT(fchown(fd, dd->dd_uid, dd->dd_gid)); return fd; } int sv_errno = errno; if (dd) dd_delete(dd); if (user_core_fd >= 0) unlinkat(dirfd(proc_cwd), core_basename, /*unlink file*/0); errno = sv_errno; perror_msg_and_die("Can't open '%s'", filename); }
Safe
[ "CWE-200" ]
abrt
7269a2cc88735aee0d1fa62491b9efe73ab5c6e8
7.81400702948698e+37
18
ccpp: revert the UID/GID changes if user core fails Thanks Florian Weimer <fweimer@redhat.com> Signed-off-by: Jakub Filak <jfilak@redhat.com>
0
static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int error = 0; int noblock; int err; noblock = flags & MSG_DONTWAIT; flags &= ~MSG_DONTWAIT; skb = skb_recv_datagram(sk, flags, noblock, &error); if (!skb) return error; if (skb->len < size) size = skb->len; err = memcpy_to_msg(msg, skb->data, size); if (err < 0) { skb_free_datagram(sk, skb); return err; } sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { __sockaddr_check_size(BCM_MIN_NAMELEN); msg->msg_namelen = BCM_MIN_NAMELEN; memcpy(msg->msg_name, skb->cb, msg->msg_namelen); } skb_free_datagram(sk, skb); return size; }
Safe
[ "CWE-362" ]
linux
d5f9023fa61ee8b94f37a93f08e94b136cf1e463
1.8178783267325335e+37
36
can: bcm: delay release of struct bcm_op after synchronize_rcu() can_rx_register() callbacks may be called concurrently to the call to can_rx_unregister(). The callbacks and callback data, though, are protected by RCU and the struct sock reference count. So the callback data is really attached to the life of sk, meaning that it should be released on sk_destruct. However, bcm_remove_op() calls tasklet_kill(), and RCU callbacks may be called under RCU softirq, so that cannot be used on kernels before the introduction of HRTIMER_MODE_SOFT. However, bcm_rx_handler() is called under RCU protection, so after calling can_rx_unregister(), we may call synchronize_rcu() in order to wait for any RCU read-side critical sections to finish. That is, bcm_rx_handler() won't be called anymore for those ops. So, we only free them, after we do that synchronize_rcu(). Fixes: ffd980f976e7 ("[CAN]: Add broadcast manager (bcm) protocol") Link: https://lore.kernel.org/r/20210619161813.2098382-1-cascardo@canonical.com Cc: linux-stable <stable@vger.kernel.org> Reported-by: syzbot+0f7e7e5e2f4f40fa89c0@syzkaller.appspotmail.com Reported-by: Norbert Slusarek <nslusarek@gmx.net> Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com> Acked-by: Oliver Hartkopp <socketcan@hartkopp.net> Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
0
static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct cma_hdr *hdr) { struct sockaddr_in *listen4, *ip4; listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr; ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; ip4->sin_family = listen4->sin_family; ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; ip4->sin_port = listen4->sin_port; ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; ip4->sin_family = listen4->sin_family; ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; ip4->sin_port = hdr->port; }
Safe
[ "CWE-20" ]
linux
b2853fd6c2d0f383dbdf7427e263eb576a633867
1.2307704726840715e+38
16
IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler The code that resolves the passive side source MAC within the rdma_cm connection request handler was both redundant and buggy, so remove it. It was redundant since later, when an RC QP is modified to RTR state, the resolution will take place in the ib_core module. It was buggy because this callback also deals with UD SIDR exchange, for which we incorrectly looked at the REQ member of the CM event and dereferenced a random value. Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures") Signed-off-by: Moni Shoua <monis@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
0
static void selinux_req_classify_flow(const struct request_sock *req, struct flowi_common *flic) { flic->flowic_secid = req->secid; }
Safe
[ "CWE-416" ]
linux
a3727a8bac0a9e77c70820655fd8715523ba3db7
1.62476290163944e+38
5
selinux,smack: fix subjective/objective credential use mixups Jann Horn reported a problem with commit eb1231f73c4d ("selinux: clarify task subjective and objective credentials") where some LSM hooks were attempting to access the subjective credentials of a task other than the current task. Generally speaking, it is not safe to access another task's subjective credentials and doing so can cause a number of problems. Further, while looking into the problem, I realized that Smack was suffering from a similar problem brought about by a similar commit 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials"). This patch addresses this problem by restoring the use of the task's objective credentials in those cases where the task is other than the current executing task. Not only does this resolve the problem reported by Jann, it is arguably the correct thing to do in these cases. Cc: stable@vger.kernel.org Fixes: eb1231f73c4d ("selinux: clarify task subjective and objective credentials") Fixes: 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials") Reported-by: Jann Horn <jannh@google.com> Acked-by: Eric W. Biederman <ebiederm@xmission.com> Acked-by: Casey Schaufler <casey@schaufler-ca.com> Signed-off-by: Paul Moore <paul@paul-moore.com>
0
int PE_(bin_pe_get_actual_checksum)(RBinPEObj* pe) { size_t i, j, checksum_offset = 0; ut64 computed_cs = 0; int remaining_bytes; int shift; ut32 cur; if (!pe || !pe->nt_header_offset) { return 0; } const size_t buf_sz = 0x1000; ut32 *buf = malloc (buf_sz); if (!buf) { return 0; } if (r_buf_read_at (pe->b, 0, (ut8 *)buf, buf_sz) < 0) { free (buf); return 0; } checksum_offset = pe->nt_header_offset + 4 + sizeof (PE_(image_file_header)) + 0x40; for (i = 0, j = 0; i < pe->size / 4; i++) { cur = r_read_at_ble32 (buf, j * 4, pe->endian); j++; // skip the checksum bytes if (i * 4 == checksum_offset) { continue; } computed_cs = (computed_cs & 0xFFFFFFFF) + cur + (computed_cs >> 32); if (computed_cs >> 32) { computed_cs = (computed_cs & 0xFFFFFFFF) + (computed_cs >> 32); } if (j == buf_sz / 4) { if (r_buf_read_at (pe->b, (i + 1) * 4, (ut8 *)buf, buf_sz) < 0) { break; } j = 0; } } // add resultant bytes to checksum remaining_bytes = pe->size % 4; i = i * 4; if (remaining_bytes != 0) { cur = r_buf_read8_at (pe->b, i); shift = 8; for (j = 1; j < remaining_bytes; j++, shift += 8) { cur |= r_buf_read8_at (pe->b, i + j) << shift; } computed_cs = (computed_cs & 0xFFFFFFFF) + cur + (computed_cs >> 32); if (computed_cs >> 32) { computed_cs = (computed_cs & 0xFFFFFFFF) + (computed_cs >> 32); } } // 32bits -> 16bits computed_cs = (computed_cs & 0xFFFF) + (computed_cs >> 16); computed_cs = (computed_cs) + (computed_cs >> 16); computed_cs = (computed_cs & 0xFFFF); // add filesize computed_cs += pe->size; free (buf); return computed_cs; }
Safe
[ "CWE-400", "CWE-703" ]
radare2
634b886e84a5c568d243e744becc6b3223e089cf
1.845123783595069e+38
64
Fix DoS in PE/QNX/DYLDCACHE/PSX parsers ##crash * Reported by lazymio * Reproducer: AAA4AAAAAB4=
0
static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) { struct sk_buff *skb = NULL; struct net *net = dev_net(in_dev->dev); int type; if (!pmc) { rcu_read_lock(); for_each_pmc_rcu(in_dev, pmc) { if (pmc->multiaddr == IGMP_ALL_HOSTS) continue; if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) continue; spin_lock_bh(&pmc->lock); if (pmc->sfcount[MCAST_EXCLUDE]) type = IGMPV3_MODE_IS_EXCLUDE; else type = IGMPV3_MODE_IS_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->lock); } rcu_read_unlock(); } else { spin_lock_bh(&pmc->lock); if (pmc->sfcount[MCAST_EXCLUDE]) type = IGMPV3_MODE_IS_EXCLUDE; else type = IGMPV3_MODE_IS_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->lock); } if (!skb) return 0; return igmpv3_sendpack(skb); }
Safe
[ "CWE-362" ]
linux
23d2b94043ca8835bd1e67749020e839f396a1c2
2.0264510012256957e+38
36
igmp: Add ip_mc_list lock in ip_check_mc_rcu I got below panic when doing fuzz test: Kernel panic - not syncing: panic_on_warn set ... CPU: 0 PID: 4056 Comm: syz-executor.3 Tainted: G B 5.14.0-rc1-00195-gcff5c4254439-dirty #2 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014 Call Trace: dump_stack_lvl+0x7a/0x9b panic+0x2cd/0x5af end_report.cold+0x5a/0x5a kasan_report+0xec/0x110 ip_check_mc_rcu+0x556/0x5d0 __mkroute_output+0x895/0x1740 ip_route_output_key_hash_rcu+0x2d0/0x1050 ip_route_output_key_hash+0x182/0x2e0 ip_route_output_flow+0x28/0x130 udp_sendmsg+0x165d/0x2280 udpv6_sendmsg+0x121e/0x24f0 inet6_sendmsg+0xf7/0x140 sock_sendmsg+0xe9/0x180 ____sys_sendmsg+0x2b8/0x7a0 ___sys_sendmsg+0xf0/0x160 __sys_sendmmsg+0x17e/0x3c0 __x64_sys_sendmmsg+0x9e/0x100 do_syscall_64+0x3b/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x462eb9 Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007f3df5af1c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000133 RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462eb9 RDX: 0000000000000312 RSI: 0000000020001700 RDI: 0000000000000007 RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007f3df5af26bc R13: 00000000004c372d R14: 0000000000700b10 R15: 00000000ffffffff It is one use-after-free in ip_check_mc_rcu. In ip_mc_del_src, the ip_sf_list of pmc has been freed under pmc->lock protection. But access to ip_sf_list in ip_check_mc_rcu is not protected by the lock. Signed-off-by: Liu Jian <liujian56@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
mml_add(MinMaxLen* to, MinMaxLen* add) { to->min = distance_add(to->min, add->min); to->max = distance_add(to->max, add->max); }
Safe
[ "CWE-787" ]
oniguruma
cbe9f8bd9cfc6c3c87a60fbae58fa1a85db59df0
2.382744028351604e+38
5
#207: Out-of-bounds write
0
callbacks_move_layer_down_button_clicked (GtkButton *button, gpointer user_data) { gint index=callbacks_get_selected_row_index(); if (index < 0) { show_no_layers_warning (); return; } if (index < mainProject->last_loaded) { gerbv_change_layer_order (mainProject, index, index + 1); callbacks_update_layer_tree (); callbacks_select_layer_row (index + 1); if (screenRenderInfo.renderType <= GERBV_RENDER_TYPE_GDK_XOR) { render_refresh_rendered_image_on_screen (); } else { render_recreate_composite_surface (screen.drawing_area); callbacks_force_expose_event_for_screen (); } } }
Safe
[ "CWE-200" ]
gerbv
319a8af890e4d0a5c38e6d08f510da8eefc42537
5.747810327950837e+37
21
Remove local alias to parameter array Normalizing access to `gerbv_simplified_amacro_t::parameter` as a step to fix CVE-2021-40402
0
evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) { struct evbuffer_ptr pos; struct evbuffer_chain *chain, *last_chain = NULL; const unsigned char *p; char first; EVBUFFER_LOCK(buffer); if (start) { memcpy(&pos, start, sizeof(pos)); chain = pos.internal_.chain; } else { pos.pos = 0; chain = pos.internal_.chain = buffer->first; pos.internal_.pos_in_chain = 0; } if (end) last_chain = end->internal_.chain; if (!len || len > EV_SSIZE_MAX) goto done; first = what[0]; while (chain) { const unsigned char *start_at = chain->buffer + chain->misalign + pos.internal_.pos_in_chain; p = memchr(start_at, first, chain->off - pos.internal_.pos_in_chain); if (p) { pos.pos += p - start_at; pos.internal_.pos_in_chain += p - start_at; if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { if (end && pos.pos + (ev_ssize_t)len > end->pos) goto not_found; else goto done; } ++pos.pos; ++pos.internal_.pos_in_chain; if (pos.internal_.pos_in_chain == chain->off) { chain = pos.internal_.chain = chain->next; pos.internal_.pos_in_chain = 0; } } else { if (chain == last_chain) goto not_found; pos.pos += chain->off - pos.internal_.pos_in_chain; chain = pos.internal_.chain = chain->next; pos.internal_.pos_in_chain = 0; } } not_found: PTR_NOT_FOUND(&pos); done: EVBUFFER_UNLOCK(buffer); return pos; }
Safe
[ "CWE-189" ]
libevent
841ecbd96105c84ac2e7c9594aeadbcc6fb38bc4
3.3613953660470462e+38
62
Fix CVE-2014-6272 in Libevent 2.1 For this fix, we need to make sure that passing too-large inputs to the evbuffer functions can't make us do bad things with the heap. Also, lower the maximum chunk size to the lower of off_t, size_t maximum. This is necessary since otherwise we could get into an infinite loop if we make a chunk that 'misalign' cannot index into.
0
krb5_ldap_put_principal(krb5_context context, krb5_db_entry *entry, char **db_args) { int l=0, kerberos_principal_object_type=0; unsigned int ntrees=0, tre=0; krb5_error_code st=0, tempst=0; LDAP *ld=NULL; LDAPMessage *result=NULL, *ent=NULL; char **subtreelist = NULL; char *user=NULL, *subtree=NULL, *principal_dn=NULL; char **values=NULL, *strval[10]={NULL}, errbuf[1024]; char *filtuser=NULL; struct berval **bersecretkey=NULL; LDAPMod **mods=NULL; krb5_boolean create_standalone=FALSE; krb5_boolean krb_identity_exists=FALSE, establish_links=FALSE; char *standalone_principal_dn=NULL; krb5_tl_data *tl_data=NULL; krb5_key_data **keys=NULL; kdb5_dal_handle *dal_handle=NULL; krb5_ldap_context *ldap_context=NULL; krb5_ldap_server_handle *ldap_server_handle=NULL; osa_princ_ent_rec princ_ent = {0}; xargs_t xargs = {0}; char *polname = NULL; OPERATION optype; krb5_boolean found_entry = FALSE; /* Clear the global error string */ krb5_clear_error_message(context); SETUP_CONTEXT(); if (ldap_context->lrparams == NULL || ldap_context->container_dn == NULL) return EINVAL; /* get ldap handle */ GET_HANDLE(); if (!is_principal_in_realm(ldap_context, entry->princ)) { st = EINVAL; k5_setmsg(context, st, _("Principal does not belong to the default realm")); goto cleanup; } /* get the principal information to act on */ if (((st=krb5_unparse_name(context, entry->princ, &user)) != 0) || ((st=krb5_ldap_unparse_principal_name(user)) != 0)) goto cleanup; filtuser = ldap_filter_correct(user); if (filtuser == NULL) { st = ENOMEM; goto cleanup; } /* Identity the type of operation, it can be * add principal or modify principal. * hack if the entry->mask has KRB_PRINCIPAL flag set * then it is a add operation */ if (entry->mask & KADM5_PRINCIPAL) optype = ADD_PRINCIPAL; else optype = MODIFY_PRINCIPAL; if (((st=krb5_get_princ_type(context, entry, &kerberos_principal_object_type)) != 0) || ((st=krb5_get_userdn(context, entry, &principal_dn)) != 0)) goto cleanup; if ((st=process_db_args(context, db_args, &xargs, optype)) != 0) goto cleanup; if (entry->mask & KADM5_LOAD) { unsigned int tree = 0; int numlentries = 0; char *filter = NULL; /* A load operation is special, will do a mix-in (add krbprinc * attrs to a non-krb object entry) if an object exists with a * matching krbprincipalname attribute so try to find existing * object and set principal_dn. This assumes that the * krbprincipalname attribute is unique (only one object entry has * a particular krbprincipalname attribute). */ if (asprintf(&filter, FILTER"%s))", filtuser) < 0) { filter = NULL; st = ENOMEM; goto cleanup; } /* get the current subtree list */ if ((st = krb5_get_subtree_info(ldap_context, &subtreelist, &ntrees)) != 0) goto cleanup; found_entry = FALSE; /* search for entry with matching krbprincipalname attribute */ for (tree = 0; found_entry == FALSE && tree < ntrees; ++tree) { if (principal_dn == NULL) { LDAP_SEARCH_1(subtreelist[tree], ldap_context->lrparams->search_scope, filter, principal_attributes, IGNORE_STATUS); } else { /* just look for entry with principal_dn */ LDAP_SEARCH_1(principal_dn, LDAP_SCOPE_BASE, filter, principal_attributes, IGNORE_STATUS); } if (st == LDAP_SUCCESS) { numlentries = ldap_count_entries(ld, result); if (numlentries > 1) { free(filter); st = EINVAL; k5_setmsg(context, st, _("operation can not continue, more than one " "entry with principal name \"%s\" found"), user); goto cleanup; } else if (numlentries == 1) { found_entry = TRUE; if (principal_dn == NULL) { ent = ldap_first_entry(ld, result); if (ent != NULL) { /* setting principal_dn will cause that entry to be modified further down */ if ((principal_dn = ldap_get_dn(ld, ent)) == NULL) { ldap_get_option (ld, LDAP_OPT_RESULT_CODE, &st); st = set_ldap_error (context, st, 0); free(filter); goto cleanup; } } } } } else if (st != LDAP_NO_SUCH_OBJECT) { /* could not perform search, return with failure */ st = set_ldap_error (context, st, 0); free(filter); goto cleanup; } ldap_msgfree(result); result = NULL; /* * If it isn't found then assume a standalone princ entry is to * be created. */ } /* end for (tree = 0; principal_dn == ... */ free(filter); if (found_entry == FALSE && principal_dn != NULL) { /* * if principal_dn is null then there is code further down to * deal with setting standalone_principal_dn. Also note that * this will set create_standalone true for * non-mix-in entries which is okay if loading from a dump. */ create_standalone = TRUE; standalone_principal_dn = strdup(principal_dn); CHECK_NULL(standalone_principal_dn); } } /* end if (entry->mask & KADM5_LOAD */ /* time to generate the DN information with the help of * containerdn, principalcontainerreference or * realmcontainerdn information */ if (principal_dn == NULL && xargs.dn == NULL) { /* creation of standalone principal */ /* get the subtree information */ if (entry->princ->length == 2 && entry->princ->data[0].length == strlen("krbtgt") && strncmp(entry->princ->data[0].data, "krbtgt", entry->princ->data[0].length) == 0) { /* if the principal is a inter-realm principal, always created in the realm container */ subtree = strdup(ldap_context->lrparams->realmdn); } else if (xargs.containerdn) { if ((st=checkattributevalue(ld, xargs.containerdn, NULL, NULL, NULL)) != 0) { if (st == KRB5_KDB_NOENTRY || st == KRB5_KDB_CONSTRAINT_VIOLATION) { int ost = st; st = EINVAL; k5_wrapmsg(context, ost, st, _("'%s' not found"), xargs.containerdn); } goto cleanup; } subtree = strdup(xargs.containerdn); } else if (ldap_context->lrparams->containerref && strlen(ldap_context->lrparams->containerref) != 0) { /* * Here the subtree should be changed with * principalcontainerreference attribute value */ subtree = strdup(ldap_context->lrparams->containerref); } else { subtree = strdup(ldap_context->lrparams->realmdn); } CHECK_NULL(subtree); if (asprintf(&standalone_principal_dn, "krbprincipalname=%s,%s", filtuser, subtree) < 0) standalone_principal_dn = NULL; CHECK_NULL(standalone_principal_dn); /* * free subtree when you are done using the subtree * set the boolean create_standalone to TRUE */ create_standalone = TRUE; free(subtree); subtree = NULL; } /* * If the DN information is presented by the user, time to * validate the input to ensure that the DN falls under * any of the subtrees */ if (xargs.dn_from_kbd == TRUE) { /* make sure the DN falls in the subtree */ int dnlen=0, subtreelen=0; char *dn=NULL; krb5_boolean outofsubtree=TRUE; if (xargs.dn != NULL) { dn = xargs.dn; } else if (xargs.linkdn != NULL) { dn = xargs.linkdn; } else if (standalone_principal_dn != NULL) { /* * Even though the standalone_principal_dn is constructed * within this function, there is the containerdn input * from the user that can become part of the it. */ dn = standalone_principal_dn; } /* Get the current subtree list if we haven't already done so. */ if (subtreelist == NULL) { st = krb5_get_subtree_info(ldap_context, &subtreelist, &ntrees); if (st) goto cleanup; } for (tre=0; tre<ntrees; ++tre) { if (subtreelist[tre] == NULL || strlen(subtreelist[tre]) == 0) { outofsubtree = FALSE; break; } else { dnlen = strlen (dn); subtreelen = strlen(subtreelist[tre]); if ((dnlen >= subtreelen) && (strcasecmp((dn + dnlen - subtreelen), subtreelist[tre]) == 0)) { outofsubtree = FALSE; break; } } } if (outofsubtree == TRUE) { st = EINVAL; k5_setmsg(context, st, _("DN is out of the realm subtree")); goto cleanup; } /* * dn value will be set either by dn, linkdn or the standalone_principal_dn * In the first 2 cases, the dn should be existing and in the last case we * are supposed to create the ldap object. so the below should not be * executed for the last case. */ if (standalone_principal_dn == NULL) { /* * If the ldap object is missing, this results in an error. */ /* * Search for krbprincipalname attribute here. * This is to find if a kerberos identity is already present * on the ldap object, in which case adding a kerberos identity * on the ldap object should result in an error. */ char *attributes[]={"krbticketpolicyreference", "krbprincipalname", NULL}; ldap_msgfree(result); result = NULL; LDAP_SEARCH_1(dn, LDAP_SCOPE_BASE, 0, attributes, IGNORE_STATUS); if (st == LDAP_SUCCESS) { ent = ldap_first_entry(ld, result); if (ent != NULL) { if ((values=ldap_get_values(ld, ent, "krbticketpolicyreference")) != NULL) { ldap_value_free(values); } if ((values=ldap_get_values(ld, ent, "krbprincipalname")) != NULL) { krb_identity_exists = TRUE; ldap_value_free(values); } } } else { st = set_ldap_error(context, st, OP_SEARCH); goto cleanup; } } } /* * If xargs.dn is set then the request is to add a * kerberos principal on a ldap object, but if * there is one already on the ldap object this * should result in an error. */ if (xargs.dn != NULL && krb_identity_exists == TRUE) { st = EINVAL; snprintf(errbuf, sizeof(errbuf), _("ldap object is already kerberized")); k5_setmsg(context, st, "%s", errbuf); goto cleanup; } if (xargs.linkdn != NULL) { /* * link information can be changed using modprinc. * However, link information can be changed only on the * standalone kerberos principal objects. A standalone * kerberos principal object is of type krbprincipal * structural objectclass. * * NOTE: kerberos principals on an ldap object can't be * linked to other ldap objects. */ if (optype == MODIFY_PRINCIPAL && kerberos_principal_object_type != KDB_STANDALONE_PRINCIPAL_OBJECT) { st = EINVAL; snprintf(errbuf, sizeof(errbuf), _("link information can not be set/updated as the " "kerberos principal belongs to an ldap object")); k5_setmsg(context, st, "%s", errbuf); goto cleanup; } /* * Check the link information. If there is already a link * existing then this operation is not allowed. */ { char **linkdns=NULL; int j=0; if ((st=krb5_get_linkdn(context, entry, &linkdns)) != 0) { snprintf(errbuf, sizeof(errbuf), _("Failed getting object references")); k5_setmsg(context, st, "%s", errbuf); goto cleanup; } if (linkdns != NULL) { st = EINVAL; snprintf(errbuf, sizeof(errbuf), _("kerberos principal is already linked to a ldap " "object")); k5_setmsg(context, st, "%s", errbuf); for (j=0; linkdns[j] != NULL; ++j) free (linkdns[j]); free (linkdns); goto cleanup; } } establish_links = TRUE; } if (entry->mask & KADM5_LAST_SUCCESS) { memset(strval, 0, sizeof(strval)); if ((strval[0]=getstringtime(entry->last_success)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastSuccessfulAuth", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } if (entry->mask & KADM5_LAST_FAILED) { memset(strval, 0, sizeof(strval)); if ((strval[0]=getstringtime(entry->last_failed)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastFailedAuth", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free(strval[0]); } if (entry->mask & KADM5_FAIL_AUTH_COUNT) { krb5_kvno fail_auth_count; fail_auth_count = entry->fail_auth_count; if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT) fail_auth_count++; st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount", LDAP_MOD_REPLACE, fail_auth_count); if (st != 0) goto cleanup; } else if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT) { int attr_mask = 0; krb5_boolean has_fail_count; /* Check if the krbLoginFailedCount attribute exists. (Through * krb5 1.8.1, it wasn't set in new entries.) */ st = krb5_get_attributes_mask(context, entry, &attr_mask); if (st != 0) goto cleanup; has_fail_count = ((attr_mask & KDB_FAIL_AUTH_COUNT_ATTR) != 0); /* * If the client library and server supports RFC 4525, * then use it to increment by one the value of the * krbLoginFailedCount attribute. Otherwise, assert the * (provided) old value by deleting it before adding. */ #ifdef LDAP_MOD_INCREMENT if (ldap_server_handle->server_info->modify_increment && has_fail_count) { st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount", LDAP_MOD_INCREMENT, 1); if (st != 0) goto cleanup; } else { #endif /* LDAP_MOD_INCREMENT */ if (has_fail_count) { st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount", LDAP_MOD_DELETE, entry->fail_auth_count); if (st != 0) goto cleanup; } st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount", LDAP_MOD_ADD, entry->fail_auth_count + 1); if (st != 0) goto cleanup; #ifdef LDAP_MOD_INCREMENT } #endif } else if (optype == ADD_PRINCIPAL) { /* Initialize krbLoginFailedCount in new entries to help avoid a * race during the first failed login. */ st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount", LDAP_MOD_ADD, 0); } if (entry->mask & KADM5_MAX_LIFE) { if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbmaxticketlife", LDAP_MOD_REPLACE, entry->max_life)) != 0) goto cleanup; } if (entry->mask & KADM5_MAX_RLIFE) { if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbmaxrenewableage", LDAP_MOD_REPLACE, entry->max_renewable_life)) != 0) goto cleanup; } if (entry->mask & KADM5_ATTRIBUTES) { if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbticketflags", LDAP_MOD_REPLACE, entry->attributes)) != 0) goto cleanup; } if (entry->mask & KADM5_PRINCIPAL) { memset(strval, 0, sizeof(strval)); strval[0] = user; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbprincipalname", LDAP_MOD_REPLACE, strval)) != 0) goto cleanup; } if (entry->mask & KADM5_PRINC_EXPIRE_TIME) { memset(strval, 0, sizeof(strval)); if ((strval[0]=getstringtime(entry->expiration)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbprincipalexpiration", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } if (entry->mask & KADM5_PW_EXPIRATION) { memset(strval, 0, sizeof(strval)); if ((strval[0]=getstringtime(entry->pw_expiration)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpasswordexpiration", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } if (entry->mask & KADM5_POLICY || entry->mask & KADM5_KEY_HIST) { memset(&princ_ent, 0, sizeof(princ_ent)); for (tl_data=entry->tl_data; tl_data; tl_data=tl_data->tl_data_next) { if (tl_data->tl_data_type == KRB5_TL_KADM_DATA) { if ((st = krb5_lookup_tl_kadm_data(tl_data, &princ_ent)) != 0) { goto cleanup; } break; } } } if (entry->mask & KADM5_POLICY) { if (princ_ent.aux_attributes & KADM5_POLICY) { memset(strval, 0, sizeof(strval)); if ((st = krb5_ldap_name_to_policydn (context, princ_ent.policy, &polname)) != 0) goto cleanup; strval[0] = polname; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_REPLACE, strval)) != 0) goto cleanup; } else { st = EINVAL; k5_setmsg(context, st, "Password policy value null"); goto cleanup; } } else if (entry->mask & KADM5_LOAD && found_entry == TRUE) { /* * a load is special in that existing entries must have attrs that * removed. */ if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_REPLACE, NULL)) != 0) goto cleanup; } if (entry->mask & KADM5_POLICY_CLR) { if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_DELETE, NULL)) != 0) goto cleanup; } if (entry->mask & KADM5_KEY_HIST) { bersecretkey = krb5_encode_histkey(&princ_ent); if (bersecretkey == NULL) { st = ENOMEM; goto cleanup; } st = krb5_add_ber_mem_ldap_mod(&mods, "krbpwdhistory", LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, bersecretkey); if (st != 0) goto cleanup; free_berdata(bersecretkey); bersecretkey = NULL; } if (entry->mask & KADM5_KEY_DATA || entry->mask & KADM5_KVNO) { krb5_kvno mkvno; if ((st=krb5_dbe_lookup_mkvno(context, entry, &mkvno)) != 0) goto cleanup; bersecretkey = krb5_encode_krbsecretkey (entry->key_data, entry->n_key_data, mkvno); if (bersecretkey == NULL) { st = ENOMEM; goto cleanup; } /* An empty list of bervals is only accepted for modify operations, * not add operations. */ if (bersecretkey[0] != NULL || !create_standalone) { st = krb5_add_ber_mem_ldap_mod(&mods, "krbprincipalkey", LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, bersecretkey); if (st != 0) goto cleanup; } if (!(entry->mask & KADM5_PRINCIPAL)) { memset(strval, 0, sizeof(strval)); if ((strval[0]=getstringtime(entry->pw_expiration)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpasswordexpiration", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } /* Update last password change whenever a new key is set */ { krb5_timestamp last_pw_changed; if ((st=krb5_dbe_lookup_last_pwd_change(context, entry, &last_pw_changed)) != 0) goto cleanup; memset(strval, 0, sizeof(strval)); if ((strval[0] = getstringtime(last_pw_changed)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastPwdChange", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } } /* Modify Key data ends here */ /* Auth indicators will also be stored in krbExtraData when processing * tl_data. */ st = update_ldap_mod_auth_ind(context, entry, &mods); if (st != 0) goto cleanup; /* Set tl_data */ if (entry->tl_data != NULL) { int count = 0; struct berval **ber_tl_data = NULL; krb5_tl_data *ptr; krb5_timestamp unlock_time; for (ptr = entry->tl_data; ptr != NULL; ptr = ptr->tl_data_next) { if (ptr->tl_data_type == KRB5_TL_LAST_PWD_CHANGE #ifdef SECURID || ptr->tl_data_type == KRB5_TL_DB_ARGS #endif || ptr->tl_data_type == KRB5_TL_KADM_DATA || ptr->tl_data_type == KDB_TL_USER_INFO || ptr->tl_data_type == KRB5_TL_CONSTRAINED_DELEGATION_ACL || ptr->tl_data_type == KRB5_TL_LAST_ADMIN_UNLOCK) continue; count++; } if (count != 0) { int j; ber_tl_data = (struct berval **) calloc (count + 1, sizeof (struct berval*)); if (ber_tl_data == NULL) { st = ENOMEM; goto cleanup; } for (j = 0, ptr = entry->tl_data; ptr != NULL; ptr = ptr->tl_data_next) { /* Ignore tl_data that are stored in separate directory * attributes */ if (ptr->tl_data_type == KRB5_TL_LAST_PWD_CHANGE #ifdef SECURID || ptr->tl_data_type == KRB5_TL_DB_ARGS #endif || ptr->tl_data_type == KRB5_TL_KADM_DATA || ptr->tl_data_type == KDB_TL_USER_INFO || ptr->tl_data_type == KRB5_TL_CONSTRAINED_DELEGATION_ACL || ptr->tl_data_type == KRB5_TL_LAST_ADMIN_UNLOCK) continue; if ((st = tl_data2berval (ptr, &ber_tl_data[j])) != 0) break; j++; } if (st == 0) { ber_tl_data[count] = NULL; st=krb5_add_ber_mem_ldap_mod(&mods, "krbExtraData", LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, ber_tl_data); } free_berdata(ber_tl_data); if (st != 0) goto cleanup; } if ((st=krb5_dbe_lookup_last_admin_unlock(context, entry, &unlock_time)) != 0) goto cleanup; if (unlock_time != 0) { /* Update last admin unlock */ memset(strval, 0, sizeof(strval)); if ((strval[0] = getstringtime(unlock_time)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastAdminUnlock", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } } /* Directory specific attribute */ if (xargs.tktpolicydn != NULL) { int tmask=0; if (strlen(xargs.tktpolicydn) != 0) { st = checkattributevalue(ld, xargs.tktpolicydn, "objectclass", policyclass, &tmask); CHECK_CLASS_VALIDITY(st, tmask, _("ticket policy object value: ")); strval[0] = xargs.tktpolicydn; strval[1] = NULL; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbticketpolicyreference", LDAP_MOD_REPLACE, strval)) != 0) goto cleanup; } else { /* if xargs.tktpolicydn is a empty string, then delete * already existing krbticketpolicyreference attr */ if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbticketpolicyreference", LDAP_MOD_DELETE, NULL)) != 0) goto cleanup; } } if (establish_links == TRUE) { memset(strval, 0, sizeof(strval)); strval[0] = xargs.linkdn; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbObjectReferences", LDAP_MOD_REPLACE, strval)) != 0) goto cleanup; } /* * in case mods is NULL then return * not sure but can happen in a modprinc * so no need to return an error * addprinc will at least have the principal name * and the keys passed in */ if (mods == NULL) goto cleanup; if (create_standalone == TRUE) { memset(strval, 0, sizeof(strval)); strval[0] = "krbprincipal"; strval[1] = "krbprincipalaux"; strval[2] = "krbTicketPolicyAux"; if ((st=krb5_add_str_mem_ldap_mod(&mods, "objectclass", LDAP_MOD_ADD, strval)) != 0) goto cleanup; st = ldap_add_ext_s(ld, standalone_principal_dn, mods, NULL, NULL); if (st == LDAP_ALREADY_EXISTS && entry->mask & KADM5_LOAD) { /* a load operation must replace an existing entry */ st = ldap_delete_ext_s(ld, standalone_principal_dn, NULL, NULL); if (st != LDAP_SUCCESS) { snprintf(errbuf, sizeof(errbuf), _("Principal delete failed (trying to replace " "entry): %s"), ldap_err2string(st)); st = translate_ldap_error (st, OP_ADD); k5_setmsg(context, st, "%s", errbuf); goto cleanup; } else { st = ldap_add_ext_s(ld, standalone_principal_dn, mods, NULL, NULL); } } if (st != LDAP_SUCCESS) { snprintf(errbuf, sizeof(errbuf), _("Principal add failed: %s"), ldap_err2string(st)); st = translate_ldap_error (st, OP_ADD); k5_setmsg(context, st, "%s", errbuf); goto cleanup; } } else { /* * Here existing ldap object is modified and can be related * to any attribute, so always ensure that the ldap * object is extended with all the kerberos related * objectclasses so that there are no constraint * violations. */ { char *attrvalues[] = {"krbprincipalaux", "krbTicketPolicyAux", NULL}; int p, q, r=0, amask=0; if ((st=checkattributevalue(ld, (xargs.dn) ? xargs.dn : principal_dn, "objectclass", attrvalues, &amask)) != 0) goto cleanup; memset(strval, 0, sizeof(strval)); for (p=1, q=0; p<=2; p<<=1, ++q) { if ((p & amask) == 0) strval[r++] = attrvalues[q]; } if (r != 0) { if ((st=krb5_add_str_mem_ldap_mod(&mods, "objectclass", LDAP_MOD_ADD, strval)) != 0) goto cleanup; } } if (xargs.dn != NULL) st=ldap_modify_ext_s(ld, xargs.dn, mods, NULL, NULL); else st = ldap_modify_ext_s(ld, principal_dn, mods, NULL, NULL); if (st != LDAP_SUCCESS) { snprintf(errbuf, sizeof(errbuf), _("User modification failed: %s"), ldap_err2string(st)); st = translate_ldap_error (st, OP_MOD); k5_setmsg(context, st, "%s", errbuf); goto cleanup; } if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT) entry->fail_auth_count++; } cleanup: if (user) free(user); if (filtuser) free(filtuser); free_xargs(xargs); if (standalone_principal_dn) free(standalone_principal_dn); if (principal_dn) free (principal_dn); if (polname != NULL) free(polname); for (tre = 0; tre < ntrees; tre++) free(subtreelist[tre]); free(subtreelist); if (subtree) free (subtree); if (bersecretkey) { for (l=0; bersecretkey[l]; ++l) { if (bersecretkey[l]->bv_val) free (bersecretkey[l]->bv_val); free (bersecretkey[l]); } free (bersecretkey); } if (keys) free (keys); ldap_mods_free(mods, 1); ldap_osa_free_princ_ent(&princ_ent); ldap_msgfree(result); krb5_ldap_put_handle_to_pool(ldap_context, ldap_server_handle); return(st); }
Vulnerable
[ "CWE-476", "CWE-90" ]
krb5
e1caf6fb74981da62039846931ebdffed71309d1
1.116790692583288e+38
835
Fix flaws in LDAP DN checking KDB_TL_USER_INFO tl-data is intended to be internal to the LDAP KDB module, and not used in disk or wire principal entries. Prevent kadmin clients from sending KDB_TL_USER_INFO tl-data by giving it a type number less than 256 and filtering out type numbers less than 256 in kadm5_create_principal_3(). (We already filter out low type numbers in kadm5_modify_principal()). In the LDAP KDB module, if containerdn and linkdn are both specified in a put_principal operation, check both linkdn and the computed standalone_principal_dn for container membership. To that end, factor out the checks into helper functions and call them on all applicable client-influenced DNs. CVE-2018-5729: In MIT krb5 1.6 or later, an authenticated kadmin user with permission to add principals to an LDAP Kerberos database can cause a null dereference in kadmind, or circumvent a DN container check, by supplying tagged data intended to be internal to the database module. Thanks to Sharwan Ram and Pooja Anil for discovering the potential null dereference. CVE-2018-5730: In MIT krb5 1.6 or later, an authenticated kadmin user with permission to add principals to an LDAP Kerberos database can circumvent a DN containership check by supplying both a "linkdn" and "containerdn" database argument, or by supplying a DN string which is a left extension of a container DN string but is not hierarchically within the container DN. ticket: 8643 (new) tags: pullup target_version: 1.16-next target_version: 1.15-next
1
typval2string(typval_T *tv, int convert) { garray_T ga; char_u *retval; #ifdef FEAT_FLOAT char_u numbuf[NUMBUFLEN]; #endif if (convert && tv->v_type == VAR_LIST) { ga_init2(&ga, sizeof(char), 80); if (tv->vval.v_list != NULL) { list_join(&ga, tv->vval.v_list, (char_u *)"\n", TRUE, FALSE, 0); if (tv->vval.v_list->lv_len > 0) ga_append(&ga, NL); } ga_append(&ga, NUL); retval = (char_u *)ga.ga_data; } #ifdef FEAT_FLOAT else if (convert && tv->v_type == VAR_FLOAT) { vim_snprintf((char *)numbuf, NUMBUFLEN, "%g", tv->vval.v_float); retval = vim_strsave(numbuf); } #endif else retval = vim_strsave(tv_get_string(tv)); return retval; }
Safe
[ "CWE-786", "CWE-119", "CWE-787" ]
vim
fe6fb267e6ee5c5da2f41889e4e0e0ac5bf4b89d
1.5070003893267637e+38
31
patch 8.2.4206: condition with many "(" causes a crash Problem: Condition with many "(" causes a crash. Solution: Limit recursion to 1000.
0
void QPaintEngineEx::updateState(const QPaintEngineState &) { // do nothing... }
Safe
[ "CWE-787" ]
qtbase
6b400e3147dcfd8cc3a393ace1bd118c93762e0c
7.780796722424617e+37
4
Improve fix for avoiding huge number of tiny dashes Some pathological cases were not caught by the previous fix. Fixes: QTBUG-95239 Pick-to: 6.2 6.1 5.15 Change-Id: I0337ee3923ff93ccb36c4d7b810a9c0667354cc5 Reviewed-by: Robert Löhning <robert.loehning@qt.io>
0
TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsAcrossDatabaseWithTargetUuid) { _createCollection(_opCtx.get(), _sourceNss); auto dbName = _sourceNss.db().toString(); auto uuid = UUID::gen(); auto uuidDoc = BSON("ui" << uuid); auto cmd = BSON("renameCollection" << _sourceNss.ns() << "to" << _targetNssDifferentDb.ns() << "dropTarget" << true); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), dbName, uuidDoc["ui"], cmd, {})); ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNss)); ASSERT_EQUALS(uuid, _getCollectionUuid(_opCtx.get(), _targetNssDifferentDb)); }
Safe
[ "CWE-20" ]
mongo
35c1b1f588f04926a958ad2fe4d9c59d79f81e8b
2.2292785604600706e+37
12
SERVER-35636 renameCollectionForApplyOps checks for complete namespace
0
ves_icall_System_CurrentSystemTimeZone_GetTimeZoneData (guint32 year, MonoArray **data, MonoArray **names) { #ifndef PLATFORM_WIN32 MonoDomain *domain = mono_domain_get (); struct tm start, tt; time_t t; long int gmtoff; int is_daylight = 0, day; char tzone [64]; MONO_ARCH_SAVE_REGS; MONO_CHECK_ARG_NULL (data); MONO_CHECK_ARG_NULL (names); mono_gc_wbarrier_generic_store (data, (MonoObject*) mono_array_new (domain, mono_defaults.int64_class, 4)); mono_gc_wbarrier_generic_store (names, (MonoObject*) mono_array_new (domain, mono_defaults.string_class, 2)); /* * no info is better than crashing: we'll need our own tz data * to make this work properly, anyway. The range is probably * reduced to 1970 .. 2037 because that is what mktime is * guaranteed to support (we get into an infinite loop * otherwise). */ memset (&start, 0, sizeof (start)); start.tm_mday = 1; start.tm_year = year-1900; t = mktime (&start); if ((year < 1970) || (year > 2037) || (t == -1)) { t = time (NULL); tt = *localtime (&t); strftime (tzone, sizeof (tzone), "%Z", &tt); mono_array_setref ((*names), 0, mono_string_new (domain, tzone)); mono_array_setref ((*names), 1, mono_string_new (domain, tzone)); return 1; } gmtoff = gmt_offset (&start, t); /* For each day of the year, calculate the tm_gmtoff. */ for (day = 0; day < 365; day++) { t += 3600*24; tt = *localtime (&t); /* Daylight saving starts or ends here. */ if (gmt_offset (&tt, t) != gmtoff) { struct tm tt1; time_t t1; /* Try to find the exact hour when daylight saving starts/ends. */ t1 = t; do { t1 -= 3600; tt1 = *localtime (&t1); } while (gmt_offset (&tt1, t1) != gmtoff); /* Try to find the exact minute when daylight saving starts/ends. */ do { t1 += 60; tt1 = *localtime (&t1); } while (gmt_offset (&tt1, t1) == gmtoff); t1+=gmtoff; strftime (tzone, sizeof (tzone), "%Z", &tt); /* Write data, if we're already in daylight saving, we're done. */ if (is_daylight) { mono_array_setref ((*names), 0, mono_string_new (domain, tzone)); mono_array_set ((*data), gint64, 1, ((gint64)t1 + EPOCH_ADJUST) * 10000000L); return 1; } else { mono_array_setref ((*names), 1, mono_string_new (domain, tzone)); mono_array_set ((*data), gint64, 0, ((gint64)t1 + EPOCH_ADJUST) * 10000000L); is_daylight = 1; } /* This is only set once when we enter daylight saving. */ mono_array_set ((*data), gint64, 2, (gint64)gmtoff * 10000000L); mono_array_set ((*data), gint64, 3, (gint64)(gmt_offset (&tt, t) - gmtoff) * 10000000L); gmtoff = gmt_offset (&tt, t); } } if (!is_daylight) { strftime (tzone, sizeof (tzone), "%Z", &tt); mono_array_setref ((*names), 0, mono_string_new (domain, tzone)); mono_array_setref ((*names), 1, mono_string_new (domain, tzone)); mono_array_set ((*data), gint64, 0, 0); mono_array_set ((*data), gint64, 1, 0); mono_array_set ((*data), gint64, 2, (gint64) gmtoff * 10000000L); mono_array_set ((*data), gint64, 3, 0); } return 1; #else MonoDomain *domain = mono_domain_get (); TIME_ZONE_INFORMATION tz_info; FILETIME ft; int i; int err, tz_id; tz_id = GetTimeZoneInformation (&tz_info); if (tz_id == TIME_ZONE_ID_INVALID) return 0; MONO_CHECK_ARG_NULL (data); MONO_CHECK_ARG_NULL (names); mono_gc_wbarrier_generic_store (data, mono_array_new (domain, mono_defaults.int64_class, 4)); mono_gc_wbarrier_generic_store (names, mono_array_new (domain, mono_defaults.string_class, 2)); for (i = 0; i < 32; ++i) if (!tz_info.DaylightName [i]) break; mono_array_setref ((*names), 1, mono_string_new_utf16 (domain, tz_info.DaylightName, i)); for (i = 0; i < 32; ++i) if (!tz_info.StandardName [i]) break; mono_array_setref ((*names), 0, mono_string_new_utf16 (domain, tz_info.StandardName, i)); if ((year <= 1601) || (year > 30827)) { /* * According to MSDN, the MS time functions can't handle dates outside * this interval. */ return 1; } /* even if the timezone has no daylight savings it may have Bias (e.g. GMT+13 it seems) */ if (tz_id != TIME_ZONE_ID_UNKNOWN) { tz_info.StandardDate.wYear = year; convert_to_absolute_date(&tz_info.StandardDate); err = SystemTimeToFileTime (&tz_info.StandardDate, &ft); //g_assert(err); if (err == 0) return 0; mono_array_set ((*data), gint64, 1, FILETIME_ADJUST + (((guint64)ft.dwHighDateTime<<32) | ft.dwLowDateTime)); tz_info.DaylightDate.wYear = year; convert_to_absolute_date(&tz_info.DaylightDate); err = SystemTimeToFileTime (&tz_info.DaylightDate, &ft); //g_assert(err); if (err == 0) return 0; mono_array_set ((*data), gint64, 0, FILETIME_ADJUST + (((guint64)ft.dwHighDateTime<<32) | ft.dwLowDateTime)); } mono_array_set ((*data), gint64, 2, (tz_info.Bias + tz_info.StandardBias) * -600000000LL); mono_array_set ((*data), gint64, 3, (tz_info.DaylightBias - tz_info.StandardBias) * -600000000LL); return 1; #endif }
Safe
[ "CWE-264" ]
mono
035c8587c0d8d307e45f1b7171a0d337bb451f1e
1.3966086558274305e+38
160
Allow only primitive types/enums in RuntimeHelpers.InitializeArray ().
0
__execlists_context_pin(struct intel_context *ce, struct intel_engine_cs *engine) { void *vaddr; int ret; GEM_BUG_ON(!ce->state); ret = intel_context_active_acquire(ce); if (ret) goto err; GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); vaddr = i915_gem_object_pin_map(ce->state->obj, i915_coherent_map_type(engine->i915) | I915_MAP_OVERRIDE); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); goto unpin_active; } ce->lrc_desc = lrc_descriptor(ce, engine); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; __execlists_update_reg_state(ce, engine); return 0; unpin_active: intel_context_active_release(ce); err: return ret; }
Safe
[]
linux
bc8a76a152c5f9ef3b48104154a65a68a8b76946
1.4595914641764664e+38
32
drm/i915/gen9: Clear residual context state on context switch Intel ID: PSIRT-TA-201910-001 CVEID: CVE-2019-14615 Intel GPU Hardware prior to Gen11 does not clear EU state during a context switch. This can result in information leakage between contexts. For Gen8 and Gen9, hardware provides a mechanism for fast cleardown of the EU state, by issuing a PIPE_CONTROL with bit 27 set. We can use this in a context batch buffer to explicitly cleardown the state on every context switch. As this workaround is already in place for gen8, we can borrow the code verbatim for Gen9. Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Akeem G Abodunrin <akeem.g.abodunrin@intel.com> Cc: Kumar Valsan Prathap <prathap.kumar.valsan@intel.com> Cc: Chris Wilson <chris.p.wilson@intel.com> Cc: Balestrieri Francesco <francesco.balestrieri@intel.com> Cc: Bloomfield Jon <jon.bloomfield@intel.com> Cc: Dutt Sudeep <sudeep.dutt@intel.com>
0
bool field_is_partition_charset(Field *field) { if (!(field->type() == MYSQL_TYPE_STRING) && !(field->type() == MYSQL_TYPE_VARCHAR)) return FALSE; { CHARSET_INFO *cs= ((Field_str*)field)->charset(); if (!(field->type() == MYSQL_TYPE_STRING) || !(cs->state & MY_CS_BINSORT)) return TRUE; return FALSE; } }
Safe
[]
mysql-server
be901b60ae59c93848c829d1b0b2cb523ab8692e
3.0699851777062487e+38
13
Bug#26390632: CREATE TABLE CAN CAUSE MYSQL TO EXIT. Analysis ======== CREATE TABLE of InnoDB table with a partition name which exceeds the path limit can cause the server to exit. During the preparation of the partition name, there was no check to identify whether the complete path name for partition exceeds the max supported path length, causing the server to exit during subsequent processing. Fix === During the preparation of partition name, check and report an error if the partition path name exceeds the maximum path name limit. This is a 5.5 patch.
0
int sctp_packet_transmit(struct sctp_packet *packet) { struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; struct sctphdr *sh; struct sk_buff *nskb; struct sctp_chunk *chunk, *tmp; struct sock *sk; int err = 0; int padding; /* How much padding do we need? */ __u8 has_data = 0; struct dst_entry *dst = tp->dst; unsigned char *auth = NULL; /* pointer to auth in skb data */ __u32 cksum_buf_len = sizeof(struct sctphdr); SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet); /* Do NOT generate a chunkless packet. */ if (list_empty(&packet->chunk_list)) return err; /* Set up convenience variables... */ chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); sk = chunk->skb->sk; /* Allocate the new skb. */ nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC); if (!nskb) goto nomem; /* Make sure the outbound skb has enough header room reserved. */ skb_reserve(nskb, packet->overhead + LL_MAX_HEADER); /* Set the owning socket so that we know where to get the * destination IP address. */ sctp_packet_set_owner_w(nskb, sk); if (!sctp_transport_dst_check(tp)) { sctp_transport_route(tp, NULL, sctp_sk(sk)); if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { sctp_assoc_sync_pmtu(sk, asoc); } } dst = dst_clone(tp->dst); skb_dst_set(nskb, dst); if (!dst) goto no_route; /* Build the SCTP header. */ sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); skb_reset_transport_header(nskb); sh->source = htons(packet->source_port); sh->dest = htons(packet->destination_port); /* From 6.8 Adler-32 Checksum Calculation: * After the packet is constructed (containing the SCTP common * header and one or more control or DATA chunks), the * transmitter shall: * * 1) Fill in the proper Verification Tag in the SCTP common * header and initialize the checksum field to 0's. */ sh->vtag = htonl(packet->vtag); sh->checksum = 0; /** * 6.10 Bundling * * An endpoint bundles chunks by simply including multiple * chunks in one outbound SCTP packet. ... */ /** * 3.2 Chunk Field Descriptions * * The total length of a chunk (including Type, Length and * Value fields) MUST be a multiple of 4 bytes. If the length * of the chunk is not a multiple of 4 bytes, the sender MUST * pad the chunk with all zero bytes and this padding is not * included in the chunk length field. The sender should * never pad with more than 3 bytes. * * [This whole comment explains WORD_ROUND() below.] */ SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); if (sctp_chunk_is_data(chunk)) { /* 6.3.1 C4) When data is in flight and when allowed * by rule C5, a new RTT measurement MUST be made each * round trip. Furthermore, new RTT measurements * SHOULD be made no more than once per round-trip * for a given destination transport address. */ if (!tp->rto_pending) { chunk->rtt_in_progress = 1; tp->rto_pending = 1; } has_data = 1; } padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len; if (padding) memset(skb_put(chunk->skb, padding), 0, padding); /* if this is the auth chunk that we are adding, * store pointer where it will be added and put * the auth into the packet. */ if (chunk == packet->auth) auth = skb_tail_pointer(nskb); cksum_buf_len += chunk->skb->len; memcpy(skb_put(nskb, chunk->skb->len), chunk->skb->data, chunk->skb->len); SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n", "*** Chunk", chunk, sctp_cname(SCTP_ST_CHUNK( chunk->chunk_hdr->type)), chunk->has_tsn ? "TSN" : "No TSN", chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, "length", ntohs(chunk->chunk_hdr->length), "chunk->skb->len", chunk->skb->len, "rtt_in_progress", chunk->rtt_in_progress); /* * If this is a control chunk, this is our last * reference. Free data chunks after they've been * acknowledged or have failed. */ if (!sctp_chunk_is_data(chunk)) sctp_chunk_free(chunk); } /* SCTP-AUTH, Section 6.2 * The sender MUST calculate the MAC as described in RFC2104 [2] * using the hash function H as described by the MAC Identifier and * the shared association key K based on the endpoint pair shared key * described by the shared key identifier. The 'data' used for the * computation of the AUTH-chunk is given by the AUTH chunk with its * HMAC field set to zero (as shown in Figure 6) followed by all * chunks that are placed after the AUTH chunk in the SCTP packet. */ if (auth) sctp_auth_calculate_hmac(asoc, nskb, (struct sctp_auth_chunk *)auth, GFP_ATOMIC); /* 2) Calculate the Adler-32 checksum of the whole packet, * including the SCTP common header and all the * chunks. * * Note: Adler-32 is no longer applicable, as has been replaced * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. */ if (!sctp_checksum_disable) { if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) { __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); /* 3) Put the resultant value into the checksum field in the * common header, and leave the rest of the bits unchanged. */ sh->checksum = sctp_end_cksum(crc32); } else { /* no need to seed pseudo checksum for SCTP */ nskb->ip_summed = CHECKSUM_PARTIAL; nskb->csum_start = (skb_transport_header(nskb) - nskb->head); nskb->csum_offset = offsetof(struct sctphdr, checksum); } } /* IP layer ECN support * From RFC 2481 * "The ECN-Capable Transport (ECT) bit would be set by the * data sender to indicate that the end-points of the * transport protocol are ECN-capable." * * Now setting the ECT bit all the time, as it should not cause * any problems protocol-wise even if our peer ignores it. * * Note: The works for IPv6 layer checks this bit too later * in transmission. See IP6_ECN_flow_xmit(). */ (*tp->af_specific->ecn_capable)(nskb->sk); /* Set up the IP options. */ /* BUG: not implemented * For v4 this all lives somewhere in sk->sk_opt... */ /* Dump that on IP! */ if (asoc) { asoc->stats.opackets++; if (asoc->peer.last_sent_to != tp) /* Considering the multiple CPU scenario, this is a * "correcter" place for last_sent_to. --xguo */ asoc->peer.last_sent_to = tp; } if (has_data) { struct timer_list *timer; unsigned long timeout; /* Restart the AUTOCLOSE timer when sending data. */ if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; if (!mod_timer(timer, jiffies + timeout)) sctp_association_hold(asoc); } } SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n", nskb->len); nskb->local_df = packet->ipfragok; (*tp->af_specific->sctp_xmit)(nskb, tp); out: sctp_packet_reset(packet); return err; no_route: kfree_skb(nskb); IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); /* FIXME: Returning the 'err' will effect all the associations * associated with a socket, although only one of the paths of the * association is unreachable. * The real failure of a transport or association can be passed on * to the user via notifications. So setting this error may not be * required. */ /* err = -EHOSTUNREACH; */ err: /* Control chunks are unreliable so just drop them. DATA chunks * will get resent or dropped later. */ list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); if (!sctp_chunk_is_data(chunk)) sctp_chunk_free(chunk); } goto out; nomem: err = -ENOMEM; goto err; }
Safe
[]
linux
196d67593439b03088913227093e374235596e33
1.10764158523402e+38
255
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call The current SCTP stack is lacking a mechanism to have per association statistics. This is an implementation modeled after OpenSolaris' SCTP_GET_ASSOC_STATS. Userspace part will follow on lksctp if/when there is a general ACK on this. V4: - Move ipackets++ before q->immediate.func() for consistency reasons - Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid returning bogus RTO values - return asoc->rto_min when max_obs_rto value has not changed V3: - Increase ictrlchunks in sctp_assoc_bh_rcv() as well - Move ipackets++ to sctp_inq_push() - return 0 when no rto updates took place since the last call V2: - Implement partial retrieval of stat struct to cope for future expansion - Kill the rtxpackets counter as it cannot be precise anyway - Rename outseqtsns to outofseqtsns to make it clearer that these are out of sequence unexpected TSNs - Move asoc->ipackets++ under a lock to avoid potential miscounts - Fold asoc->opackets++ into the already existing asoc check - Kill unneeded (q->asoc) test when increasing rtxchunks - Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0) - Don't count SHUTDOWNs as SACKs - Move SCTP_GET_ASSOC_STATS to the private space API - Adjust the len check in sctp_getsockopt_assoc_stats() to allow for future struct growth - Move association statistics in their own struct - Update idupchunks when we send a SACK with dup TSNs - return min_rto in max_rto when RTO has not changed. Also return the transport when max_rto last changed. Signed-off: Michele Baldessari <michele@acksyn.org> Acked-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
poolClear(STRING_POOL *pool) { if (!pool->freeBlocks) pool->freeBlocks = pool->blocks; else { BLOCK *p = pool->blocks; while (p) { BLOCK *tem = p->next; p->next = pool->freeBlocks; pool->freeBlocks = p; p = tem; } } pool->blocks = NULL; pool->start = NULL; pool->ptr = NULL; pool->end = NULL; }
Safe
[ "CWE-119" ]
libexpat
ba0f9c3b40c264b8dd392e02a7a060a8fa54f032
6.107611305072614e+37
18
CVE-2015-1283 Sanity check size calculations. r=peterv, a=abillings https://sourceforge.net/p/expat/bugs/528/
0
valid_spell_word(char_u *word, char_u *end) { char_u *p; if (enc_utf8 && !utf_valid_string(word, end)) return FALSE; for (p = word; *p != NUL && p < end; p += mb_ptr2len(p)) if (*p < ' ' || (p[0] == '/' && p[1] == NUL)) return FALSE; return TRUE; }
Safe
[ "CWE-476", "CWE-787" ]
vim
6669de1b235843968e88844ca6d3c8dec4b01a9e
3.069201883180995e+38
11
patch 9.0.0240: crash when using ":mkspell" with an empty .dic file Problem: Crash when using ":mkspell" with an empty .dic file. Solution: Check for an empty word tree.
0
void AuthorizationManager::grantInternalAuthorization(const std::string& principalName) { Principal* principal = new Principal(PrincipalName(principalName, "local")); ActionSet actions; actions.addAllActions(); addAuthorizedPrincipal(principal); fassert(16581, acquirePrivilege(Privilege(PrivilegeSet::WILDCARD_RESOURCE, actions), principal->getName()).isOK()); }
Safe
[ "CWE-264" ]
mongo
23344f8b7506df694f66999693ee3c00dfd6afae
1.3093293229581465e+38
9
SERVER-9983 Do not needlessly lock when looking up privileges for the __system@local user. Uncorrected, this can cause replica set heartbeats to stall behind operations that hold the read lock for a long time.
0
static int mxf_parse_handle_essence(MXFContext *mxf) { AVIOContext *pb = mxf->fc->pb; int64_t ret; if (mxf->parsing_backward) { return mxf_seek_to_previous_partition(mxf); } else { if (!mxf->footer_partition) { av_log(mxf->fc, AV_LOG_TRACE, "no FooterPartition\n"); return 0; } av_log(mxf->fc, AV_LOG_TRACE, "seeking to FooterPartition\n"); /* remember where we were so we don't end up seeking further back than this */ mxf->last_forward_tell = avio_tell(pb); if (!(pb->seekable & AVIO_SEEKABLE_NORMAL)) { av_log(mxf->fc, AV_LOG_INFO, "file is not seekable - not parsing FooterPartition\n"); return -1; } /* seek to FooterPartition and parse backward */ if ((ret = avio_seek(pb, mxf->run_in + mxf->footer_partition, SEEK_SET)) < 0) { av_log(mxf->fc, AV_LOG_ERROR, "failed to seek to FooterPartition @ 0x%" PRIx64 " (%"PRId64") - partial file?\n", mxf->run_in + mxf->footer_partition, ret); return ret; } mxf->current_partition = NULL; mxf->parsing_backward = 1; } return 1; }
Safe
[ "CWE-703", "CWE-834" ]
FFmpeg
900f39692ca0337a98a7cf047e4e2611071810c2
1.451487813203953e+38
38
avformat/mxfdec: Fix DoS issues in mxf_read_index_entry_array() Fixes: 20170829A.mxf Co-Author: 张洪亮(望初)" <wangchu.zhl@alibaba-inc.com> Found-by: Xiaohei and Wangchu from Alibaba Security Team Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
0
command_process_versions_cell(var_cell_t *cell, or_connection_t *conn) { int highest_supported_version = 0; const uint8_t *cp, *end; if (conn->link_proto != 0 || conn->_base.state != OR_CONN_STATE_OR_HANDSHAKING || (conn->handshake_state && conn->handshake_state->received_versions)) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Received a VERSIONS cell on a connection with its version " "already set to %d; dropping", (int) conn->link_proto); return; } tor_assert(conn->handshake_state); end = cell->payload + cell->payload_len; for (cp = cell->payload; cp+1 < end; ++cp) { uint16_t v = ntohs(get_uint16(cp)); if (is_or_protocol_version_known(v) && v > highest_supported_version) highest_supported_version = v; } if (!highest_supported_version) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Couldn't find a version in common between my version list and the " "list in the VERSIONS cell; closing connection."); connection_mark_for_close(TO_CONN(conn)); return; } else if (highest_supported_version == 1) { /* Negotiating version 1 makes no sense, since version 1 has no VERSIONS * cells. */ log_fn(LOG_PROTOCOL_WARN, LD_OR, "Used version negotiation protocol to negotiate a v1 connection. " "That's crazily non-compliant. Closing connection."); connection_mark_for_close(TO_CONN(conn)); return; } conn->link_proto = highest_supported_version; conn->handshake_state->received_versions = 1; log_info(LD_OR, "Negotiated version %d with %s:%d; sending NETINFO.", highest_supported_version, safe_str(conn->_base.address), conn->_base.port); tor_assert(conn->link_proto >= 2); if (connection_or_send_netinfo(conn) < 0) { connection_mark_for_close(TO_CONN(conn)); return; } }
Safe
[ "CWE-264" ]
tor
a74e7fd40f1a77eb4000d8216bb5b80cdd8a6193
8.155721636926372e+37
47
Reject create cells on outgoing OR connections from bridges
0
e_ews_config_lookup_result_init (EEwsConfigLookupResult *ews_result) { }
Safe
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
1.4729531105635895e+38
3
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
0
void RGWListBucket_ObjStore_S3::send_response() { if (op_ret < 0) { set_req_state_err(s, op_ret); } dump_errno(s); // Explicitly use chunked transfer encoding so that we can stream the result // to the user without having to wait for the full length of it. end_header(s, this, "application/xml", CHUNKED_TRANSFER_ENCODING); dump_start(s); if (op_ret < 0) { return; } if (list_versions) { send_versioned_response(); return; } s->formatter->open_object_section_in_ns("ListBucketResult", XMLNS_AWS_S3); if (strcasecmp(encoding_type.c_str(), "url") == 0) { s->formatter->dump_string("EncodingType", "url"); encode_key = true; } RGWListBucket_ObjStore_S3::send_common_response(); if (op_ret >= 0) { vector<rgw_bucket_dir_entry>::iterator iter; for (iter = objs.begin(); iter != objs.end(); ++iter) { rgw_obj_key key(iter->key); s->formatter->open_array_section("Contents"); if (encode_key) { string key_name; url_encode(key.name, key_name); s->formatter->dump_string("Key", key_name); } else { s->formatter->dump_string("Key", key.name); } dump_time(s, "LastModified", &iter->meta.mtime); s->formatter->dump_format("ETag", "\"%s\"", iter->meta.etag.c_str()); s->formatter->dump_int("Size", iter->meta.accounted_size); auto& storage_class = rgw_placement_rule::get_canonical_storage_class(iter->meta.storage_class); s->formatter->dump_string("StorageClass", storage_class.c_str()); dump_owner(s, iter->meta.owner, iter->meta.owner_display_name); if (s->system_request) { s->formatter->dump_string("RgwxTag", iter->tag); } if (iter->meta.appendable) { s->formatter->dump_string("Type", "Appendable"); } else { s->formatter->dump_string("Type", "Normal"); } s->formatter->close_section(); } } s->formatter->dump_string("Marker", marker.name); if (is_truncated && !next_marker.empty()) { s->formatter->dump_string("NextMarker", next_marker.name); } s->formatter->close_section(); rgw_flush_formatter_and_reset(s, s->formatter); }
Safe
[ "CWE-79" ]
ceph
fce0b267446d6f3f631bb4680ebc3527bbbea002
2.3072761853242554e+38
61
rgw: reject unauthenticated response-header actions Signed-off-by: Matt Benjamin <mbenjamin@redhat.com> Reviewed-by: Casey Bodley <cbodley@redhat.com> (cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
0
select_result_interceptor(THD *thd_arg): select_result(thd_arg), suppress_my_ok(false) { DBUG_ENTER("select_result_interceptor::select_result_interceptor"); DBUG_PRINT("enter", ("this %p", this)); DBUG_VOID_RETURN; } /* Remove gcc warning */
Safe
[ "CWE-416" ]
server
4681b6f2d8c82b4ec5cf115e83698251963d80d5
1.819184614086639e+38
7
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob the bug was that in_vector array in Item_func_in was allocated in the statement arena, not in the table->expr_arena. revert part of the 5acd391e8b2d. Instead, change the arena correctly in fix_all_session_vcol_exprs(). Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force item tree changes to be rolled back (because they were allocated in the wrong arena and didn't persist. now they do)
0
CURLcode Curl_http(struct Curl_easy *data, bool *done) { struct connectdata *conn = data->conn; CURLcode result = CURLE_OK; struct HTTP *http; Curl_HttpReq httpreq; const char *te = ""; /* transfer-encoding */ const char *request; const char *httpstring; struct dynbuf req; char *altused = NULL; const char *p_accept; /* Accept: string */ /* Always consider the DO phase done after this function call, even if there may be parts of the request that are not yet sent, since we can deal with the rest of the request in the PERFORM phase. */ *done = TRUE; if(conn->transport != TRNSPRT_QUIC) { if(conn->httpversion < 20) { /* unless the connection is re-used and already http2 */ switch(conn->negnpn) { case CURL_HTTP_VERSION_2: conn->httpversion = 20; /* we know we're on HTTP/2 now */ result = Curl_http2_switched(data, NULL, 0); if(result) return result; break; case CURL_HTTP_VERSION_1_1: /* continue with HTTP/1.1 when explicitly requested */ break; default: /* Check if user wants to use HTTP/2 with clear TCP*/ #ifdef USE_NGHTTP2 if(data->state.httpwant == CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE) { #ifndef CURL_DISABLE_PROXY if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) { /* We don't support HTTP/2 proxies yet. Also it's debatable whether or not this setting should apply to HTTP/2 proxies. */ infof(data, "Ignoring HTTP/2 prior knowledge due to proxy"); break; } #endif DEBUGF(infof(data, "HTTP/2 over clean TCP")); conn->httpversion = 20; result = Curl_http2_switched(data, NULL, 0); if(result) return result; } #endif break; } } else { /* prepare for a http2 request */ result = Curl_http2_setup(data, conn); if(result) return result; } } http = data->req.p.http; DEBUGASSERT(http); result = Curl_http_host(data, conn); if(result) return result; result = Curl_http_useragent(data); if(result) return result; Curl_http_method(data, conn, &request, &httpreq); /* setup the authentication headers */ { char *pq = NULL; if(data->state.up.query) { pq = aprintf("%s?%s", data->state.up.path, data->state.up.query); if(!pq) return CURLE_OUT_OF_MEMORY; } result = Curl_http_output_auth(data, conn, request, httpreq, (pq ? pq : data->state.up.path), FALSE); free(pq); if(result) return result; } Curl_safefree(data->state.aptr.ref); if(data->state.referer && !Curl_checkheaders(data, STRCONST("Referer"))) { data->state.aptr.ref = aprintf("Referer: %s\r\n", data->state.referer); if(!data->state.aptr.ref) return CURLE_OUT_OF_MEMORY; } if(!Curl_checkheaders(data, STRCONST("Accept-Encoding")) && data->set.str[STRING_ENCODING]) { Curl_safefree(data->state.aptr.accept_encoding); data->state.aptr.accept_encoding = aprintf("Accept-Encoding: %s\r\n", data->set.str[STRING_ENCODING]); if(!data->state.aptr.accept_encoding) return CURLE_OUT_OF_MEMORY; } else Curl_safefree(data->state.aptr.accept_encoding); #ifdef HAVE_LIBZ /* we only consider transfer-encoding magic if libz support is built-in */ result = Curl_transferencode(data); if(result) return result; #endif result = Curl_http_body(data, conn, httpreq, &te); if(result) return result; p_accept = Curl_checkheaders(data, STRCONST("Accept"))?NULL:"Accept: */*\r\n"; result = Curl_http_resume(data, conn, httpreq); if(result) return result; result = Curl_http_range(data, httpreq); if(result) return result; httpstring = get_http_string(data, conn); /* initialize a dynamic send-buffer */ Curl_dyn_init(&req, DYN_HTTP_REQUEST); /* make sure the header buffer is reset - if there are leftovers from a previous transfer */ Curl_dyn_reset(&data->state.headerb); /* add the main request stuff */ /* GET/HEAD/POST/PUT */ result = Curl_dyn_addf(&req, "%s ", request); if(!result) result = Curl_http_target(data, conn, &req); if(result) { Curl_dyn_free(&req); return result; } #ifndef CURL_DISABLE_ALTSVC if(conn->bits.altused && !Curl_checkheaders(data, STRCONST("Alt-Used"))) { altused = aprintf("Alt-Used: %s:%d\r\n", conn->conn_to_host.name, conn->conn_to_port); if(!altused) { Curl_dyn_free(&req); return CURLE_OUT_OF_MEMORY; } } #endif result = Curl_dyn_addf(&req, " HTTP/%s\r\n" /* HTTP version */ "%s" /* host */ "%s" /* proxyuserpwd */ "%s" /* userpwd */ "%s" /* range */ "%s" /* user agent */ "%s" /* accept */ "%s" /* TE: */ "%s" /* accept-encoding */ "%s" /* referer */ "%s" /* Proxy-Connection */ "%s" /* transfer-encoding */ "%s",/* Alt-Used */ httpstring, (data->state.aptr.host?data->state.aptr.host:""), data->state.aptr.proxyuserpwd? data->state.aptr.proxyuserpwd:"", data->state.aptr.userpwd?data->state.aptr.userpwd:"", (data->state.use_range && data->state.aptr.rangeline)? data->state.aptr.rangeline:"", (data->set.str[STRING_USERAGENT] && *data->set.str[STRING_USERAGENT] && data->state.aptr.uagent)? data->state.aptr.uagent:"", p_accept?p_accept:"", data->state.aptr.te?data->state.aptr.te:"", (data->set.str[STRING_ENCODING] && *data->set.str[STRING_ENCODING] && data->state.aptr.accept_encoding)? data->state.aptr.accept_encoding:"", (data->state.referer && data->state.aptr.ref)? data->state.aptr.ref:"" /* Referer: <data> */, #ifndef CURL_DISABLE_PROXY (conn->bits.httpproxy && !conn->bits.tunnel_proxy && !Curl_checkheaders(data, STRCONST("Proxy-Connection")) && !Curl_checkProxyheaders(data, conn, STRCONST("Proxy-Connection")))? "Proxy-Connection: Keep-Alive\r\n":"", #else "", #endif te, altused ? altused : "" ); /* clear userpwd and proxyuserpwd to avoid re-using old credentials * from re-used connections */ Curl_safefree(data->state.aptr.userpwd); Curl_safefree(data->state.aptr.proxyuserpwd); free(altused); if(result) { Curl_dyn_free(&req); return result; } if(!(conn->handler->flags&PROTOPT_SSL) && conn->httpversion != 20 && (data->state.httpwant == CURL_HTTP_VERSION_2)) { /* append HTTP2 upgrade magic stuff to the HTTP request if it isn't done over SSL */ result = Curl_http2_request_upgrade(&req, data); if(result) { Curl_dyn_free(&req); return result; } } result = Curl_http_cookies(data, conn, &req); if(!result) result = Curl_add_timecondition(data, &req); if(!result) result = Curl_add_custom_headers(data, FALSE, &req); if(!result) { http->postdata = NULL; /* nothing to post at this point */ if((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD)) Curl_pgrsSetUploadSize(data, 0); /* nothing */ /* bodysend takes ownership of the 'req' memory on success */ result = Curl_http_bodysend(data, conn, &req, httpreq); } if(result) { Curl_dyn_free(&req); return result; } if((http->postsize > -1) && (http->postsize <= data->req.writebytecount) && (http->sending != HTTPSEND_REQUEST)) data->req.upload_done = TRUE; if(data->req.writebytecount) { /* if a request-body has been sent off, we make sure this progress is noted properly */ Curl_pgrsSetUploadCounter(data, data->req.writebytecount); if(Curl_pgrsUpdate(data)) result = CURLE_ABORTED_BY_CALLBACK; if(!http->postsize) { /* already sent the entire request body, mark the "upload" as complete */ infof(data, "upload completely sent off: %" CURL_FORMAT_CURL_OFF_T " out of %" CURL_FORMAT_CURL_OFF_T " bytes", data->req.writebytecount, http->postsize); data->req.upload_done = TRUE; data->req.keepon &= ~KEEP_SEND; /* we're done writing */ data->req.exp100 = EXP100_SEND_DATA; /* already sent */ Curl_expire_done(data, EXPIRE_100_TIMEOUT); } } if((conn->httpversion == 20) && data->req.upload_chunky) /* upload_chunky was set above to set up the request in a chunky fashion, but is disabled here again to avoid that the chunked encoded version is actually used when sending the request body over h2 */ data->req.upload_chunky = FALSE; return result; }
Safe
[]
curl
48d7064a49148f03942380967da739dcde1cdc24
3.1964172762150126e+38
284
cookie: apply limits - Send no more than 150 cookies per request - Cap the max length used for a cookie: header to 8K - Cap the max number of received Set-Cookie: headers to 50 Bug: https://curl.se/docs/CVE-2022-32205.html CVE-2022-32205 Reported-by: Harry Sintonen Closes #9048
0
static void tower_abort_transfers (struct lego_usb_tower *dev) { if (dev == NULL) return; /* shutdown transfer */ if (dev->interrupt_in_running) { dev->interrupt_in_running = 0; mb(); if (dev->udev) usb_kill_urb (dev->interrupt_in_urb); } if (dev->interrupt_out_busy && dev->udev) usb_kill_urb(dev->interrupt_out_urb); }
Safe
[ "CWE-476" ]
linux
2fae9e5a7babada041e2e161699ade2447a01989
2.250865752079808e+37
15
usb: misc: legousbtower: Fix NULL pointer deference This patch fixes a NULL pointer dereference caused by a race codition in the probe function of the legousbtower driver. It re-structures the probe function to only register the interface after successfully reading the board's firmware ID. The probe function does not deregister the usb interface after an error receiving the devices firmware ID. The device file registered (/dev/usb/legousbtower%d) may be read/written globally before the probe function returns. When tower_delete is called in the probe function (after an r/w has been initiated), core dev structures are deleted while the file operation functions are still running. If the 0 address is mappable on the machine, this vulnerability can be used to create a Local Priviege Escalation exploit via a write-what-where condition by remapping dev->interrupt_out_buffer in tower_write. A forged USB device and local program execution would be required for LPE. The USB device would have to delay the control message in tower_probe and accept the control urb in tower_open whilst guest code initiated a write to the device file as tower_delete is called from the error in tower_probe. This bug has existed since 2003. Patch tested by emulated device. Reported-by: James Patrick-Evans <james@jmp-e.com> Tested-by: James Patrick-Evans <james@jmp-e.com> Signed-off-by: James Patrick-Evans <james@jmp-e.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
0
static int ZEND_FASTCALL ZEND_BW_NOT_SPEC_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); bitwise_not_function(&EX_T(opline->result.u.var).tmp_var, &opline->op1.u.constant TSRMLS_CC); ZEND_VM_NEXT_OPCODE(); }
Safe
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
2.8450722965134877e+38
10
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
0
static void bdrv_bochs_init(void) { bdrv_register(&bdrv_bochs); }
Safe
[ "CWE-369" ]
qemu
8e53abbc20d08ae3ec30c2054e1161314ad9501d
1.1731434833832224e+37
4
bochs: Check extent_size header field (CVE-2014-0142) This fixes two possible division by zero crashes: In bochs_open() and in seek_to_sector(). Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
0
onig_name_to_group_numbers(regex_t* reg, const UChar* name, const UChar* name_end, int** nums) { NameEntry* e = name_find(reg, name, name_end); if (IS_NULL(e)) return ONIGERR_UNDEFINED_NAME_REFERENCE; switch (e->back_num) { case 0: break; case 1: *nums = &(e->back_ref1); break; default: *nums = e->back_refs; break; } return e->back_num; }
Safe
[ "CWE-400", "CWE-399", "CWE-674" ]
oniguruma
4097828d7cc87589864fecf452f2cd46c5f37180
4.738707008844504e+37
19
fix #147: Stack Exhaustion Problem caused by some parsing functions in regcomp.c making recursive calls to themselves.
0
static void io_kill_linked_timeout(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *link; bool cancelled = false; unsigned long flags; spin_lock_irqsave(&ctx->completion_lock, flags); link = req->link; /* * Can happen if a linked timeout fired and link had been like * req -> link t-out -> link t-out [-> ...] */ if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) { struct io_timeout_data *io = link->async_data; int ret; io_remove_next_linked(req); link->timeout.head = NULL; ret = hrtimer_try_to_cancel(&io->timer); if (ret != -1) { io_cqring_fill_event(link, -ECANCELED); io_commit_cqring(ctx); cancelled = true; } } req->flags &= ~REQ_F_LINK_TIMEOUT; spin_unlock_irqrestore(&ctx->completion_lock, flags); if (cancelled) { io_cqring_ev_posted(ctx); io_put_req(link); } }
Safe
[ "CWE-667" ]
linux
3ebba796fa251d042be42b929a2d916ee5c34a49
2.2362365425033714e+38
35
io_uring: ensure that SQPOLL thread is started for exit If we create it in a disabled state because IORING_SETUP_R_DISABLED is set on ring creation, we need to ensure that we've kicked the thread if we're exiting before it's been explicitly disabled. Otherwise we can run into a deadlock where exit is waiting go park the SQPOLL thread, but the SQPOLL thread itself is waiting to get a signal to start. That results in the below trace of both tasks hung, waiting on each other: INFO: task syz-executor458:8401 blocked for more than 143 seconds. Not tainted 5.11.0-next-20210226-syzkaller #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz-executor458 state:D stack:27536 pid: 8401 ppid: 8400 flags:0x00004004 Call Trace: context_switch kernel/sched/core.c:4324 [inline] __schedule+0x90c/0x21a0 kernel/sched/core.c:5075 schedule+0xcf/0x270 kernel/sched/core.c:5154 schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868 do_wait_for_common kernel/sched/completion.c:85 [inline] __wait_for_common kernel/sched/completion.c:106 [inline] wait_for_common kernel/sched/completion.c:117 [inline] wait_for_completion+0x168/0x270 kernel/sched/completion.c:138 io_sq_thread_park fs/io_uring.c:7115 [inline] io_sq_thread_park+0xd5/0x130 fs/io_uring.c:7103 io_uring_cancel_task_requests+0x24c/0xd90 fs/io_uring.c:8745 __io_uring_files_cancel+0x110/0x230 fs/io_uring.c:8840 io_uring_files_cancel include/linux/io_uring.h:47 [inline] do_exit+0x299/0x2a60 kernel/exit.c:780 do_group_exit+0x125/0x310 kernel/exit.c:922 __do_sys_exit_group kernel/exit.c:933 [inline] __se_sys_exit_group kernel/exit.c:931 [inline] __x64_sys_exit_group+0x3a/0x50 kernel/exit.c:931 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x43e899 RSP: 002b:00007ffe89376d48 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7 RAX: ffffffffffffffda RBX: 00000000004af2f0 RCX: 000000000043e899 RDX: 000000000000003c RSI: 00000000000000e7 RDI: 0000000000000000 RBP: 0000000000000000 R08: ffffffffffffffc0 R09: 0000000010000000 R10: 0000000000008011 R11: 0000000000000246 R12: 00000000004af2f0 R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000001 INFO: task iou-sqp-8401:8402 can't die for more than 143 seconds. task:iou-sqp-8401 state:D stack:30272 pid: 8402 ppid: 8400 flags:0x00004004 Call Trace: context_switch kernel/sched/core.c:4324 [inline] __schedule+0x90c/0x21a0 kernel/sched/core.c:5075 schedule+0xcf/0x270 kernel/sched/core.c:5154 schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868 do_wait_for_common kernel/sched/completion.c:85 [inline] __wait_for_common kernel/sched/completion.c:106 [inline] wait_for_common kernel/sched/completion.c:117 [inline] wait_for_completion+0x168/0x270 kernel/sched/completion.c:138 io_sq_thread+0x27d/0x1ae0 fs/io_uring.c:6717 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294 INFO: task iou-sqp-8401:8402 blocked for more than 143 seconds. Reported-by: syzbot+fb5458330b4442f2090d@syzkaller.appspotmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
0