func
string
target
string
cwe
list
project
string
commit_id
string
hash
string
size
int64
message
string
vul
int64
virtual void visit(CharacterClass & /*ope*/) {}
Safe
[ "CWE-125" ]
cpp-peglib
b3b29ce8f3acf3a32733d930105a17d7b0ba347e
2.6322275363806645e+38
1
Fix #122
0
static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data) { struct kvm_hv *hv = to_kvm_hv(kvm); size_t size = ARRAY_SIZE(hv->hv_crash_param); if (WARN_ON_ONCE(index >= size)) return -EINVAL; hv->hv_crash_param[array_index_nospec(index, size)] = data; return 0; }
Safe
[ "CWE-476" ]
linux
919f4ebc598701670e80e31573a58f1f2d2bf918
1.7231647168857393e+38
11
KVM: x86: hyper-v: Fix Hyper-V context null-ptr-deref Reported by syzkaller: KASAN: null-ptr-deref in range [0x0000000000000140-0x0000000000000147] CPU: 1 PID: 8370 Comm: syz-executor859 Not tainted 5.11.0-syzkaller #0 RIP: 0010:synic_get arch/x86/kvm/hyperv.c:165 [inline] RIP: 0010:kvm_hv_set_sint_gsi arch/x86/kvm/hyperv.c:475 [inline] RIP: 0010:kvm_hv_irq_routing_update+0x230/0x460 arch/x86/kvm/hyperv.c:498 Call Trace: kvm_set_irq_routing+0x69b/0x940 arch/x86/kvm/../../../virt/kvm/irqchip.c:223 kvm_vm_ioctl+0x12d0/0x2800 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3959 vfs_ioctl fs/ioctl.c:48 [inline] __do_sys_ioctl fs/ioctl.c:753 [inline] __se_sys_ioctl fs/ioctl.c:739 [inline] __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae Hyper-V context is lazily allocated until Hyper-V specific MSRs are accessed or SynIC is enabled. However, the syzkaller testcase sets irq routing table directly w/o enabling SynIC. This results in null-ptr-deref when accessing SynIC Hyper-V context. This patch fixes it. syzkaller source: https://syzkaller.appspot.com/x/repro.c?x=163342ccd00000 Reported-by: syzbot+6987f3b2dbd9eda95f12@syzkaller.appspotmail.com Fixes: 8f014550dfb1 ("KVM: x86: hyper-v: Make Hyper-V emulation enablement conditional") Signed-off-by: Wanpeng Li <wanpengli@tencent.com> Message-Id: <1614326399-5762-1-git-send-email-wanpengli@tencent.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
static int mov_imm_reg(RAnal* anal, RAnalOp* op, ut16 code){ op->type = R_ANAL_OP_TYPE_MOV; op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); op->src[0] = anal_fill_im (anal, (st8)(code & 0xFF)); return op->size; }
Safe
[ "CWE-125" ]
radare2
77c47cf873dd55b396da60baa2ca83bbd39e4add
3.1106218348688295e+38
6
Fix #9903 - oobread in RAnal.sh
0
static SQInteger table_filter(HSQUIRRELVM v) { SQObject &o = stack_get(v,1); SQTable *tbl = _table(o); SQObjectPtr ret = SQTable::Create(_ss(v),0); SQObjectPtr itr, key, val; SQInteger nitr; while((nitr = tbl->Next(false, itr, key, val)) != -1) { itr = (SQInteger)nitr; v->Push(o); v->Push(key); v->Push(val); if(SQ_FAILED(sq_call(v,3,SQTrue,SQFalse))) { return SQ_ERROR; } if(!SQVM::IsFalse(v->GetUp(-1))) { _table(ret)->NewSlot(key, val); } v->Pop(); } v->Push(ret); return 1; }
Safe
[ "CWE-703", "CWE-787" ]
squirrel
a6413aa690e0bdfef648c68693349a7b878fe60d
2.4615522492879736e+38
26
fix in thread.call
0
explicit PhiloxRandomOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, generator_.Init(ctx)); }
Safe
[ "CWE-703", "CWE-197" ]
tensorflow
27b417360cbd671ef55915e4bb6bb06af8b8a832
2.222943478870509e+38
3
Prevent `int64` to `int` truncation in `Shard` API usage. The function argument in `Shard` must be a function of two `int64` arguments. However, we are passing in a function with two `int` arguments. Thus, for large workloads, these arguments get truncated from positive `int64` values to negative `int` ones, resulting in a buffer out of bounds write. PiperOrigin-RevId: 332557334 Change-Id: I236c9a2e7f53580e520571da8ba941a3aa9fa0b5
0
size_t curlWriter(char* data, size_t size, size_t nmemb, std::string* writerData) { if (writerData == nullptr) return 0; writerData->append(data, size*nmemb); return size * nmemb; }
Safe
[ "CWE-190" ]
exiv2
c73d1e27198a389ce7caf52ac30f8e2120acdafd
3.1042991445366235e+38
7
Avoid negative integer overflow when `filesize < io_->tell()`. This fixes #791.
0
capi_ctr_get(struct capi_ctr *ctr) { if (!try_module_get(ctr->owner)) return NULL; return ctr; }
Safe
[ "CWE-125" ]
linux
1f3e2e97c003f80c4b087092b225c8787ff91e4d
3.2777829399646135e+37
6
isdn: cpai: check ctr->cnr to avoid array index out of bound The cmtp_add_connection() would add a cmtp session to a controller and run a kernel thread to process cmtp. __module_get(THIS_MODULE); session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d", session->num); During this process, the kernel thread would call detach_capi_ctr() to detach a register controller. if the controller was not attached yet, detach_capi_ctr() would trigger an array-index-out-bounds bug. [ 46.866069][ T6479] UBSAN: array-index-out-of-bounds in drivers/isdn/capi/kcapi.c:483:21 [ 46.867196][ T6479] index -1 is out of range for type 'capi_ctr *[32]' [ 46.867982][ T6479] CPU: 1 PID: 6479 Comm: kcmtpd_ctr_0 Not tainted 5.15.0-rc2+ #8 [ 46.869002][ T6479] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 [ 46.870107][ T6479] Call Trace: [ 46.870473][ T6479] dump_stack_lvl+0x57/0x7d [ 46.870974][ T6479] ubsan_epilogue+0x5/0x40 [ 46.871458][ T6479] __ubsan_handle_out_of_bounds.cold+0x43/0x48 [ 46.872135][ T6479] detach_capi_ctr+0x64/0xc0 [ 46.872639][ T6479] cmtp_session+0x5c8/0x5d0 [ 46.873131][ T6479] ? __init_waitqueue_head+0x60/0x60 [ 46.873712][ T6479] ? cmtp_add_msgpart+0x120/0x120 [ 46.874256][ T6479] kthread+0x147/0x170 [ 46.874709][ T6479] ? set_kthread_struct+0x40/0x40 [ 46.875248][ T6479] ret_from_fork+0x1f/0x30 [ 46.875773][ T6479] Signed-off-by: Xiaolong Huang <butterflyhuangxx@gmail.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Link: https://lore.kernel.org/r/20211008065830.305057-1-butterflyhuangxx@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
0
static void construct_reply_common(struct smb_request *req, const char *inbuf, char *outbuf) { srv_set_message(outbuf,0,0,false); SCVAL(outbuf, smb_com, req->cmd); SIVAL(outbuf,smb_rcls,0); SCVAL(outbuf,smb_flg, FLAG_REPLY | (CVAL(inbuf,smb_flg) & FLAG_CASELESS_PATHNAMES)); SSVAL(outbuf,smb_flg2, (SVAL(inbuf,smb_flg2) & FLAGS2_UNICODE_STRINGS) | common_flags2); memset(outbuf+smb_pidhigh,'\0',(smb_tid-smb_pidhigh)); SSVAL(outbuf,smb_tid,SVAL(inbuf,smb_tid)); SSVAL(outbuf,smb_pid,SVAL(inbuf,smb_pid)); SSVAL(outbuf,smb_uid,SVAL(inbuf,smb_uid)); SSVAL(outbuf,smb_mid,SVAL(inbuf,smb_mid)); }
Safe
[]
samba
c116652a3050a8549b722ae8ab5f9a2bf9a33b9f
2.441270452329892e+38
18
In chain_reply, copy the subrequests' error to the main request
0
PHP_METHOD(Phar, mount) { char *fname, *arch = NULL, *entry = NULL, *path, *actual; int fname_len, arch_len, entry_len; size_t path_len, actual_len; phar_archive_data *pphar; if (zend_parse_parameters(ZEND_NUM_ARGS(), "pp", &path, &path_len, &actual, &actual_len) == FAILURE) { return; } if (ZEND_SIZE_T_INT_OVFL(path_len) || ZEND_SIZE_T_INT_OVFL(actual_len)) { RETURN_FALSE; } fname = (char*)zend_get_executed_filename(); fname_len = (int)strlen(fname); #ifdef PHP_WIN32 phar_unixify_path_separators(fname, fname_len); #endif if (fname_len > 7 && !memcmp(fname, "phar://", 7) && SUCCESS == phar_split_fname(fname, fname_len, &arch, &arch_len, &entry, &entry_len, 2, 0)) { efree(entry); entry = NULL; if (path_len > 7 && !memcmp(path, "phar://", 7)) { zend_throw_exception_ex(phar_ce_PharException, 0, "Can only mount internal paths within a phar archive, use a relative path instead of \"%s\"", path); efree(arch); return; } carry_on2: if (NULL == (pphar = zend_hash_str_find_ptr(&(PHAR_G(phar_fname_map)), arch, arch_len))) { if (PHAR_G(manifest_cached) && NULL != (pphar = zend_hash_str_find_ptr(&cached_phars, arch, arch_len))) { if (SUCCESS == phar_copy_on_write(&pphar)) { goto carry_on; } } zend_throw_exception_ex(phar_ce_PharException, 0, "%s is not a phar archive, cannot mount", arch); if (arch) { efree(arch); } return; } carry_on: if (SUCCESS != phar_mount_entry(pphar, actual, (int)actual_len, path, (int)path_len)) { zend_throw_exception_ex(phar_ce_PharException, 0, "Mounting of %s to %s within phar %s failed", path, actual, arch); if (path && path == entry) { efree(entry); } if (arch) { efree(arch); } return; } if (entry && path && path == entry) { efree(entry); } if (arch) { efree(arch); } return; } else if (PHAR_G(phar_fname_map.u.flags) && NULL != (pphar = zend_hash_str_find_ptr(&(PHAR_G(phar_fname_map)), fname, fname_len))) { goto carry_on; } else if (PHAR_G(manifest_cached) && NULL != (pphar = zend_hash_str_find_ptr(&cached_phars, fname, fname_len))) { if (SUCCESS == phar_copy_on_write(&pphar)) { goto carry_on; } goto carry_on; } else if (SUCCESS == phar_split_fname(path, (int)path_len, &arch, &arch_len, &entry, &entry_len, 2, 0)) { path = entry; path_len = entry_len; goto carry_on2; } zend_throw_exception_ex(phar_ce_PharException, 0, "Mounting of %s to %s failed", path, actual); }
Safe
[ "CWE-281" ]
php-src
e5c95234d87fcb8f6b7569a96a89d1e1544749a6
3.2793341526711067e+37
85
Fix bug #79082 - Files added to tar with Phar::buildFromIterator have all-access permissions
0
static int ieee80211_fragment(struct ieee80211_tx_data *tx, struct sk_buff *skb, int hdrlen, int frag_threshold) { struct ieee80211_local *local = tx->local; struct ieee80211_tx_info *info; struct sk_buff *tmp; int per_fragm = frag_threshold - hdrlen - FCS_LEN; int pos = hdrlen + per_fragm; int rem = skb->len - hdrlen - per_fragm; if (WARN_ON(rem < 0)) return -EINVAL; /* first fragment was already added to queue by caller */ while (rem) { int fraglen = per_fragm; if (fraglen > rem) fraglen = rem; rem -= fraglen; tmp = dev_alloc_skb(local->tx_headroom + frag_threshold + tx->sdata->encrypt_headroom + IEEE80211_ENCRYPT_TAILROOM); if (!tmp) return -ENOMEM; __skb_queue_tail(&tx->skbs, tmp); skb_reserve(tmp, local->tx_headroom + tx->sdata->encrypt_headroom); /* copy control information */ memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); info = IEEE80211_SKB_CB(tmp); info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT); if (rem) info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; skb_copy_queue_mapping(tmp, skb); tmp->priority = skb->priority; tmp->dev = skb->dev; /* copy header and data */ memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen); memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen); pos += fraglen; } /* adjust first fragment's length */ skb->len = hdrlen + per_fragm; return 0; }
Vulnerable
[ "CWE-200" ]
linux
338f977f4eb441e69bb9a46eaa0ac715c931a67f
2.6011678706689736e+38
59
mac80211: fix fragmentation code, particularly for encryption The "new" fragmentation code (since my rewrite almost 5 years ago) erroneously sets skb->len rather than using skb_trim() to adjust the length of the first fragment after copying out all the others. This leaves the skb tail pointer pointing to after where the data originally ended, and thus causes the encryption MIC to be written at that point, rather than where it belongs: immediately after the data. The impact of this is that if software encryption is done, then a) encryption doesn't work for the first fragment, the connection becomes unusable as the first fragment will never be properly verified at the receiver, the MIC is practically guaranteed to be wrong b) we leak up to 8 bytes of plaintext (!) of the packet out into the air This is only mitigated by the fact that many devices are capable of doing encryption in hardware, in which case this can't happen as the tail pointer is irrelevant in that case. Additionally, fragmentation is not used very frequently and would normally have to be configured manually. Fix this by using skb_trim() properly. Cc: stable@vger.kernel.org Fixes: 2de8e0d999b8 ("mac80211: rewrite fragmentation") Reported-by: Jouni Malinen <j@w1.fi> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
1
static ext3_fsblk_t descriptor_loc(struct super_block *sb, ext3_fsblk_t logic_sb_block, int nr) { struct ext3_sb_info *sbi = EXT3_SB(sb); unsigned long bg, first_meta_bg; int has_super = 0; first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) || nr < first_meta_bg) return (logic_sb_block + nr + 1); bg = sbi->s_desc_per_block * nr; if (ext3_bg_has_super(sb, bg)) has_super = 1; return (has_super + ext3_group_first_block_no(sb, bg)); }
Safe
[ "CWE-20" ]
linux
8d0c2d10dd72c5292eda7a06231056a4c972e4cc
3.3350867290140455e+38
18
ext3: Fix format string issues ext3_msg() takes the printk prefix as the second parameter and the format string as the third parameter. Two callers of ext3_msg omit the prefix and pass the format string as the second parameter and the first parameter to the format string as the third parameter. In both cases this string comes from an arbitrary source. Which means the string may contain format string characters, which will lead to undefined and potentially harmful behavior. The issue was introduced in commit 4cf46b67eb("ext3: Unify log messages in ext3") and is fixed by this patch. CC: stable@vger.kernel.org Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Jan Kara <jack@suse.cz>
0
rend_service_parse_intro_for_v3( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out) { ssize_t adjust, v2_ver_specific_len, ts_offset; /* This should only be called on v3 cells */ if (intro->version != 3) { if (err_msg_out) tor_asprintf(err_msg_out, "rend_service_parse_intro_for_v3() called with " "bad version %d on INTRODUCE%d cell (this is a bug)", intro->version, (int)(intro->type)); goto err; } /* * Check that we have at least enough to get auth_len: * * 1 octet for version, 1 for auth_type, 2 for auth_len */ if (plaintext_len < 4) { if (err_msg_out) { tor_asprintf(err_msg_out, "truncated plaintext of encrypted parted of " "version %d INTRODUCE%d cell", intro->version, (int)(intro->type)); } goto err; } /* * The rend_client_send_introduction() function over in rendclient.c is * broken (i.e., fails to match the spec) in such a way that we can't * change it without breaking the protocol. Specifically, it doesn't * emit auth_len when auth-type is REND_NO_AUTH, so everything is off * by two bytes after that. Calculate ts_offset and do everything from * the timestamp on relative to that to handle this dain bramage. */ intro->u.v3.auth_type = buf[1]; if (intro->u.v3.auth_type != REND_NO_AUTH) { intro->u.v3.auth_len = ntohs(get_uint16(buf + 2)); ts_offset = 4 + intro->u.v3.auth_len; } else { intro->u.v3.auth_len = 0; ts_offset = 2; } /* Check that auth len makes sense for this auth type */ if (intro->u.v3.auth_type == REND_BASIC_AUTH || intro->u.v3.auth_type == REND_STEALTH_AUTH) { if (intro->u.v3.auth_len != REND_DESC_COOKIE_LEN) { if (err_msg_out) { tor_asprintf(err_msg_out, "wrong auth data size %d for INTRODUCE%d cell, " "should be %d", (int)(intro->u.v3.auth_len), (int)(intro->type), REND_DESC_COOKIE_LEN); } goto err; } } /* Check that we actually have everything up through the timestamp */ if (plaintext_len < (size_t)(ts_offset)+4) { if (err_msg_out) { tor_asprintf(err_msg_out, "truncated plaintext of encrypted parted of " "version %d INTRODUCE%d cell", intro->version, (int)(intro->type)); } goto err; } if (intro->u.v3.auth_type != REND_NO_AUTH && intro->u.v3.auth_len > 0) { /* Okay, we can go ahead and copy auth_data */ intro->u.v3.auth_data = tor_malloc(intro->u.v3.auth_len); /* * We know we had an auth_len field in this case, so 4 is * always right. */ memcpy(intro->u.v3.auth_data, buf + 4, intro->u.v3.auth_len); } /* * From here on, the format is as in v2, so we call the v2 parser with * adjusted buffer and length. We are 4 + ts_offset octets in, but the * v2 parser expects to skip over a version byte at the start, so we * adjust by 3 + ts_offset. */ adjust = 3 + ts_offset; v2_ver_specific_len = rend_service_parse_intro_for_v2(intro, buf + adjust, plaintext_len - adjust, err_msg_out); /* Success in v2 parser */ if (v2_ver_specific_len >= 0) return v2_ver_specific_len + adjust; /* Failure in v2 parser; it will have provided an err_msg */ else return v2_ver_specific_len; err: return -1; }
Safe
[ "CWE-532" ]
tor
09ea89764a4d3a907808ed7d4fe42abfe64bd486
9.60757948502179e+37
116
Fix log-uninitialized-stack bug in rend_service_intro_established. Fixes bug 23490; bugfix on 0.2.7.2-alpha. TROVE-2017-008 CVE-2017-0380
0
PHP_FUNCTION(image_type_to_mime_type) { long p_image_type; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &p_image_type) == FAILURE) { return; } ZVAL_STRING(return_value, (char*)php_image_type_to_mime_type(p_image_type), 1); }
Safe
[]
php-src
87829c09a1d9e39bee994460d7ccf19dd20eda14
2.908720859841715e+38
10
Fix #70052: getimagesize() fails for very large and very small WBMP Very large WBMP (width or height greater than 2**31-1) cause an overflow and circumvent the size limitation of 2048x2048 px. Very small WBMP (less than 12 bytes) cause a read error and are not recognized. This patch fixes both bugs.
0
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; err = sk_filter(sk, skb); if (err) return err; return __sock_queue_rcv_skb(sk, skb); }
Safe
[ "CWE-119", "CWE-787" ]
linux
b98b0bc8c431e3ceb4b26b0dfc8db509518fb290
2.0824790999316296e+38
10
net: avoid signed overflows for SO_{SND|RCV}BUFFORCE CAP_NET_ADMIN users should not be allowed to set negative sk_sndbuf or sk_rcvbuf values, as it can lead to various memory corruptions, crashes, OOM... Note that before commit 82981930125a ("net: cleanups in sock_setsockopt()"), the bug was even more serious, since SO_SNDBUF and SO_RCVBUF were vulnerable. This needs to be backported to all known linux kernels. Again, many thanks to syzkaller team for discovering this gem. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Andrey Konovalov <andreyknvl@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
Object* global_proxy() { return context()->global_proxy(); }
Safe
[ "CWE-20", "CWE-119" ]
node
530af9cb8e700e7596b3ec812bad123c9fa06356
2.7006116888230932e+38
3
v8: Interrupts must not mask stack overflow. Backport of https://codereview.chromium.org/339883002
0
Envoy::StatusOr<ParserStatus> ServerConnectionImpl::onHeadersCompleteBase() { // Handle the case where response happens prior to request complete. It's up to upper layer code // to disconnect the connection but we shouldn't fire any more events since it doesn't make // sense. if (active_request_) { auto& headers = absl::get<RequestHeaderMapPtr>(headers_or_trailers_); ENVOY_CONN_LOG(trace, "Server: onHeadersComplete size={}", connection_, headers->size()); if (!handling_upgrade_ && headers->Connection()) { // If we fail to sanitize the request, return a 400 to the client if (!Utility::sanitizeConnectionHeader(*headers)) { absl::string_view header_value = headers->getConnectionValue(); ENVOY_CONN_LOG(debug, "Invalid nominated headers in Connection: {}", connection_, header_value); error_code_ = Http::Code::BadRequest; RETURN_IF_ERROR( sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization)); return codecProtocolError("Invalid nominated headers in Connection."); } } // Inform the response encoder about any HEAD method, so it can set content // length and transfer encoding headers correctly. const Http::HeaderValues& header_values = Http::Headers::get(); active_request_->response_encoder_.setIsResponseToHeadRequest(parser_->methodName() == header_values.MethodValues.Head); active_request_->response_encoder_.setIsResponseToConnectRequest( parser_->methodName() == header_values.MethodValues.Connect); RETURN_IF_ERROR(handlePath(*headers, parser_->methodName())); ASSERT(active_request_->request_url_.empty()); headers->setMethod(parser_->methodName()); // Make sure the host is valid. auto details = HeaderUtility::requestHeadersValid(*headers); if (details.has_value()) { RETURN_IF_ERROR(sendProtocolError(details.value().get())); return codecProtocolError( "http/1.1 protocol error: request headers failed spec compliance checks"); } // Determine here whether we have a body or not. This uses the new RFC semantics where the // presence of content-length or chunked transfer-encoding indicates a body vs. a particular // method. If there is no body, we defer raising decodeHeaders() until the parser is flushed // with message complete. This allows upper layers to behave like HTTP/2 and prevents a proxy // scenario where the higher layers stream through and implicitly switch to chunked transfer // encoding because end stream with zero body length has not yet been indicated. if (parser_->isChunked() || (parser_->contentLength().has_value() && parser_->contentLength().value() > 0) || handling_upgrade_) { active_request_->request_decoder_->decodeHeaders(std::move(headers), false); // If the connection has been closed (or is closing) after decoding headers, pause the parser // so we return control to the caller. if (connection_.state() != Network::Connection::State::Open) { return parser_->pause(); } } else { deferred_end_stream_headers_ = true; } } return ParserStatus::Success; }
Safe
[ "CWE-416" ]
envoy
fe7c69c248f4fe5a9080c7ccb35275b5218bb5ab
4.748487690840827e+37
65
internal redirect: fix a lifetime bug (#785) Signed-off-by: Alyssa Wilk <alyssar@chromium.org> Signed-off-by: Matt Klein <mklein@lyft.com> Signed-off-by: Pradeep Rao <pcrao@google.com>
0
static void minix_read_inode(struct inode * inode) { if (INODE_VERSION(inode) == MINIX_V1) V1_minix_read_inode(inode); else V2_minix_read_inode(inode); }
Safe
[ "CWE-189" ]
linux-2.6
f5fb09fa3392ad43fbcfc2f4580752f383ab5996
1.5383939134106042e+38
7
[PATCH] Fix for minix crash Mounting a (corrupt) minix filesystem with zero s_zmap_blocks gives a spectacular crash on my 2.6.17.8 system, no doubt because minix/inode.c does an unconditional minix_set_bit(0,sbi->s_zmap[0]->b_data); [akpm@osdl.org: make labels conistent while we're there] Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
0
static int run_userns_fn(void *data) { struct userns_fn_data *d = data; char c; // we're not sharing with the parent any more, if it was a thread close(d->p[1]); if (read(d->p[0], &c, 1) != 1) return -1; close(d->p[0]); return d->fn(d->arg); }
Safe
[ "CWE-59", "CWE-61" ]
lxc
592fd47a6245508b79fe6ac819fe6d3b2c1289be
8.982434736011185e+36
12
CVE-2015-1335: Protect container mounts against symlinks When a container starts up, lxc sets up the container's inital fstree by doing a bunch of mounting, guided by the container configuration file. The container config is owned by the admin or user on the host, so we do not try to guard against bad entries. However, since the mount target is in the container, it's possible that the container admin could divert the mount with symbolic links. This could bypass proper container startup (i.e. confinement of a root-owned container by the restrictive apparmor policy, by diverting the required write to /proc/self/attr/current), or bypass the (path-based) apparmor policy by diverting, say, /proc to /mnt in the container. To prevent this, 1. do not allow mounts to paths containing symbolic links 2. do not allow bind mounts from relative paths containing symbolic links. Details: Define safe_mount which ensures that the container has not inserted any symbolic links into any mount targets for mounts to be done during container setup. The host's mount path may contain symbolic links. As it is under the control of the administrator, that's ok. So safe_mount begins the check for symbolic links after the rootfs->mount, by opening that directory. It opens each directory along the path using openat() relative to the parent directory using O_NOFOLLOW. When the target is reached, it mounts onto /proc/self/fd/<targetfd>. Use safe_mount() in mount_entry(), when mounting container proc, and when needed. In particular, safe_mount() need not be used in any case where: 1. the mount is done in the container's namespace 2. the mount is for the container's rootfs 3. the mount is relative to a tmpfs or proc/sysfs which we have just safe_mount()ed ourselves Since we were using proc/net as a temporary placeholder for /proc/sys/net during container startup, and proc/net is a symbolic link, use proc/tty instead. Update the lxc.container.conf manpage with details about the new restrictions. Finally, add a testcase to test some symbolic link possibilities. Reported-by: Roman Fiedler Signed-off-by: Serge Hallyn <serge.hallyn@ubuntu.com> Acked-by: Stéphane Graber <stgraber@ubuntu.com>
0
static u32 gen_reqid(struct net *net) { struct xfrm_policy_walk walk; u32 start; int rc; static u32 reqid = IPSEC_MANUAL_REQID_MAX; start = reqid; do { ++reqid; if (reqid == 0) reqid = IPSEC_MANUAL_REQID_MAX+1; xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN); rc = xfrm_policy_walk(net, &walk, check_reqid, (void*)&reqid); xfrm_policy_walk_done(&walk); if (rc != -EEXIST) return reqid; } while (reqid != start); return 0; }
Safe
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
5.234063346124696e+37
20
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <davem@davemloft.net> Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
0
TRIO_PUBLIC_STRING int trio_xstring_equal_case_max TRIO_ARGS3((self, max, other), trio_string_t* self, size_t max, TRIO_CONST char* other) { assert(self); assert(other); return trio_equal_case_max(self->content, max, other); }
Safe
[ "CWE-190", "CWE-125" ]
FreeRDP
05cd9ea2290d23931f615c1b004d4b2e69074e27
1.5311070117623215e+38
9
Fixed TrioParse and trio_length limts. CVE-2020-4030 thanks to @antonio-morales for finding this.
0
JBIG2Bitmap *JBIG2Bitmap::getSlice(unsigned int x, unsigned int y, unsigned int wA, unsigned int hA) { JBIG2Bitmap *slice; unsigned int xx, yy; if (!data) { return nullptr; } slice = new JBIG2Bitmap(0, wA, hA); if (slice->isOk()) { slice->clearToZero(); for (yy = 0; yy < hA; ++yy) { for (xx = 0; xx < wA; ++xx) { if (getPixel(x + xx, y + yy)) { slice->setPixel(xx, yy); } } } } else { delete slice; slice = nullptr; } return slice; }
Safe
[ "CWE-476", "CWE-190" ]
poppler
27354e9d9696ee2bc063910a6c9a6b27c5184a52
2.2219358553546082e+38
25
JBIG2Stream: Fix crash on broken file https://github.com/jeffssh/CVE-2021-30860 Thanks to David Warren for the heads up
0
QPDFWriter::disableIncompatibleEncryption(int major, int minor, int extension_level) { if (! this->m->encrypted) { return; } bool disable = false; if (compareVersions(major, minor, 1, 3) < 0) { disable = true; } else { int V = QUtil::string_to_int( this->m->encryption_dictionary["/V"].c_str()); int R = QUtil::string_to_int( this->m->encryption_dictionary["/R"].c_str()); if (compareVersions(major, minor, 1, 4) < 0) { if ((V > 1) || (R > 2)) { disable = true; } } else if (compareVersions(major, minor, 1, 5) < 0) { if ((V > 2) || (R > 3)) { disable = true; } } else if (compareVersions(major, minor, 1, 6) < 0) { if (this->m->encrypt_use_aes) { disable = true; } } else if ((compareVersions(major, minor, 1, 7) < 0) || ((compareVersions(major, minor, 1, 7) == 0) && extension_level < 3)) { if ((V >= 5) || (R >= 5)) { disable = true; } } } if (disable) { QTC::TC("qpdf", "QPDFWriter forced version disabled encryption"); this->m->encrypted = false; } }
Safe
[ "CWE-125" ]
qpdf
1868a10f8b06631362618bfc85ca8646da4b4b71
1.7723650254598735e+38
56
Replace all atoi calls with QUtil::string_to_int The latter catches underflow/overflow.
0
static int ssl_decrypt_buf( ssl_context *ssl ) { size_t i, padlen = 0, correct = 1; unsigned char tmp[POLARSSL_SSL_MAX_MAC_SIZE]; SSL_DEBUG_MSG( 2, ( "=> decrypt buf" ) ); if( ssl->in_msglen < ssl->transform_in->minlen ) { SSL_DEBUG_MSG( 1, ( "in_msglen (%d) < minlen (%d)", ssl->in_msglen, ssl->transform_in->minlen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } if( ssl->transform_in->ivlen == 0 ) { #if defined(POLARSSL_ARC4_C) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_RC4_128_MD5 || ssl->session_in->ciphersuite == TLS_RSA_WITH_RC4_128_SHA ) { arc4_crypt( (arc4_context *) ssl->transform_in->ctx_dec, ssl->in_msglen, ssl->in_msg, ssl->in_msg ); } else #endif #if defined(POLARSSL_CIPHER_NULL_CIPHER) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_MD5 || ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_SHA256 ) { } else #endif return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } else if( ssl->transform_in->ivlen == 12 ) { unsigned char *dec_msg; unsigned char *dec_msg_result; size_t dec_msglen; unsigned char add_data[13]; int ret = POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE; #if defined(POLARSSL_AES_C) && defined(POLARSSL_GCM_C) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_GCM_SHA384 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ) { dec_msglen = ssl->in_msglen - ( ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); dec_msglen -= 16; dec_msg = ssl->in_msg + ( ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); dec_msg_result = ssl->in_msg; ssl->in_msglen = dec_msglen; memcpy( add_data, ssl->in_ctr, 8 ); add_data[8] = ssl->in_msgtype; add_data[9] = ssl->major_ver; add_data[10] = ssl->minor_ver; add_data[11] = ( ssl->in_msglen >> 8 ) & 0xFF; add_data[12] = ssl->in_msglen & 0xFF; SSL_DEBUG_BUF( 4, "additional data used for AEAD", add_data, 13 ); memcpy( ssl->transform_in->iv_dec + ssl->transform_in->fixed_ivlen, ssl->in_msg, ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); SSL_DEBUG_BUF( 4, "IV used", ssl->transform_in->iv_dec, ssl->transform_in->ivlen ); SSL_DEBUG_BUF( 4, "TAG used", dec_msg + dec_msglen, 16 ); memcpy( ssl->transform_in->iv_dec + ssl->transform_in->fixed_ivlen, ssl->in_msg, ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); ret = gcm_auth_decrypt( (gcm_context *) ssl->transform_in->ctx_dec, dec_msglen, ssl->transform_in->iv_dec, ssl->transform_in->ivlen, add_data, 13, dec_msg + dec_msglen, 16, dec_msg, dec_msg_result ); if( ret != 0 ) { SSL_DEBUG_MSG( 1, ( "AEAD decrypt failed on validation (ret = -0x%02x)", -ret ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } } else #endif return( ret ); } else { /* * Decrypt and check the padding */ unsigned char *dec_msg; unsigned char *dec_msg_result; size_t dec_msglen; size_t minlen = 0, fake_padlen; /* * Check immediate ciphertext sanity */ if( ssl->in_msglen % ssl->transform_in->ivlen != 0 ) { SSL_DEBUG_MSG( 1, ( "msglen (%d) %% ivlen (%d) != 0", ssl->in_msglen, ssl->transform_in->ivlen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) minlen += ssl->transform_in->ivlen; if( ssl->in_msglen < minlen + ssl->transform_in->ivlen || ssl->in_msglen < minlen + ssl->transform_in->maclen + 1 ) { SSL_DEBUG_MSG( 1, ( "msglen (%d) < max( ivlen(%d), maclen (%d) + 1 ) ( + expl IV )", ssl->in_msglen, ssl->transform_in->ivlen, ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } dec_msglen = ssl->in_msglen; dec_msg = ssl->in_msg; dec_msg_result = ssl->in_msg; /* * Initialize for prepended IV for block cipher in TLS v1.1 and up */ if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) { dec_msg += ssl->transform_in->ivlen; dec_msglen -= ssl->transform_in->ivlen; ssl->in_msglen -= ssl->transform_in->ivlen; for( i = 0; i < ssl->transform_in->ivlen; i++ ) ssl->transform_in->iv_dec[i] = ssl->in_msg[i]; } switch( ssl->transform_in->ivlen ) { #if defined(POLARSSL_DES_C) case 8: #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_DES_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_DES_CBC_SHA ) { des_crypt_cbc( (des_context *) ssl->transform_in->ctx_dec, DES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); } else #endif des3_crypt_cbc( (des3_context *) ssl->transform_in->ctx_dec, DES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; #endif case 16: #if defined(POLARSSL_AES_C) if ( ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 ) { aes_crypt_cbc( (aes_context *) ssl->transform_in->ctx_dec, AES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; } #endif #if defined(POLARSSL_CAMELLIA_C) if ( ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 ) { camellia_crypt_cbc( (camellia_context *) ssl->transform_in->ctx_dec, CAMELLIA_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; } #endif default: return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } padlen = 1 + ssl->in_msg[ssl->in_msglen - 1]; fake_padlen = 256 - padlen; if( ssl->in_msglen < ssl->transform_in->maclen + padlen ) { SSL_DEBUG_MSG( 1, ( "msglen (%d) < maclen (%d) + padlen (%d)", ssl->in_msglen, ssl->transform_in->maclen, padlen ) ); padlen = 0; fake_padlen = 256; correct = 0; } if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( padlen > ssl->transform_in->ivlen ) { SSL_DEBUG_MSG( 1, ( "bad padding length: is %d, " "should be no more than %d", padlen, ssl->transform_in->ivlen ) ); correct = 0; } } else { /* * TLSv1+: always check the padding up to the first failure * and fake check up to 256 bytes of padding */ for( i = 1; i <= padlen; i++ ) { if( ssl->in_msg[ssl->in_msglen - i] != padlen - 1 ) { correct = 0; fake_padlen = 256 - i; padlen = 0; } } for( i = 1; i <= fake_padlen; i++ ) { if( ssl->in_msg[i + 1] != fake_padlen - 1 ) minlen = 0; else minlen = 1; } if( padlen > 0 && correct == 0) SSL_DEBUG_MSG( 1, ( "bad padding byte detected" ) ); } } SSL_DEBUG_BUF( 4, "raw buffer after decryption", ssl->in_msg, ssl->in_msglen ); /* * Always compute the MAC (RFC4346, CBCTIME). */ ssl->in_msglen -= ( ssl->transform_in->maclen + padlen ); ssl->in_hdr[3] = (unsigned char)( ssl->in_msglen >> 8 ); ssl->in_hdr[4] = (unsigned char)( ssl->in_msglen ); memcpy( tmp, ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ); if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( ssl->transform_in->maclen == 16 ) ssl_mac_md5( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen == 20 ) ssl_mac_sha1( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen == 32 ) ssl_mac_sha2( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } else { /* * Process MAC and always update for padlen afterwards to make * total time independent of padlen */ if( ssl->transform_in->maclen == 16 ) md5_hmac( ssl->transform_in->mac_dec, 16, ssl->in_ctr, ssl->in_msglen + 13, ssl->in_msg + ssl->in_msglen ); else if( ssl->transform_in->maclen == 20 ) sha1_hmac( ssl->transform_in->mac_dec, 20, ssl->in_ctr, ssl->in_msglen + 13, ssl->in_msg + ssl->in_msglen ); else if( ssl->transform_in->maclen == 32 ) sha2_hmac( ssl->transform_in->mac_dec, 32, ssl->in_ctr, ssl->in_msglen + 13, ssl->in_msg + ssl->in_msglen, 0 ); else if( ssl->transform_in->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } SSL_DEBUG_BUF( 4, "message mac", tmp, ssl->transform_in->maclen ); SSL_DEBUG_BUF( 4, "computed mac", ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ); if( memcmp( tmp, ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ) != 0 ) { SSL_DEBUG_MSG( 1, ( "message mac does not match" ) ); correct = 0; } /* * Finally check the correct flag */ if( correct == 0 ) return( POLARSSL_ERR_SSL_INVALID_MAC ); if( ssl->in_msglen == 0 ) { ssl->nb_zero++; /* * Three or more empty messages may be a DoS attack * (excessive CPU consumption). */ if( ssl->nb_zero > 3 ) { SSL_DEBUG_MSG( 1, ( "received four consecutive empty " "messages, possible DoS attack" ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } } else ssl->nb_zero = 0; for( i = 8; i > 0; i-- ) if( ++ssl->in_ctr[i - 1] != 0 ) break; SSL_DEBUG_MSG( 2, ( "<= decrypt buf" ) ); return( 0 ); }
Vulnerable
[ "CWE-310" ]
polarssl
d66f070d492ef75405baad9f0d018b1bd06862c8
8.316945656659467e+37
357
Disable debug messages that can introduce a timing side channel. Introduced the POLARSSL_SSL_DEBUG_ALL flag to enable all these debug messages in case somebody does want to see the reason checks fail.
1
bool read_key_file(RSA **key_ptr, bool is_priv_key, char **key_text_buffer) { String key_file_path; char *key; const char *key_type; FILE *key_file= NULL; key= is_priv_key ? auth_rsa_private_key_path : auth_rsa_public_key_path; key_type= is_priv_key ? "private" : "public"; *key_ptr= NULL; get_key_file_path(key, &key_file_path); /* Check for existance of private key/public key file. */ if ((key_file= fopen(key_file_path.c_ptr(), "r")) == NULL) { sql_print_information("RSA %s key file not found: %s." " Some authentication plugins will not work.", key_type, key_file_path.c_ptr()); } else { *key_ptr= is_priv_key ? PEM_read_RSAPrivateKey(key_file, 0, 0, 0) : PEM_read_RSA_PUBKEY(key_file, 0, 0, 0); if (!(*key_ptr)) { char error_buf[MYSQL_ERRMSG_SIZE]; ERR_error_string_n(ERR_get_error(), error_buf, MYSQL_ERRMSG_SIZE); sql_print_error("Failure to parse RSA %s key (file exists): %s:" " %s", key_type, key_file_path.c_ptr(), error_buf); /* Call ERR_clear_error() just in case there are more than 1 entry in the OpenSSL thread's error queue. */ ERR_clear_error(); return true; } /* For public key, read key file content into a char buffer. */ if (!is_priv_key) { int filesize; fseek(key_file, 0, SEEK_END); filesize= ftell(key_file); fseek(key_file, 0, SEEK_SET); *key_text_buffer= new char[filesize+1]; (void) fread(*key_text_buffer, filesize, 1, key_file); (*key_text_buffer)[filesize]= '\0'; } fclose(key_file); } return false; }
Safe
[]
mysql-server
25d1b7e03b9b375a243fabdf0556c063c7282361
2.32924974965319e+38
58
Bug #22722946: integer overflow may lead to wrong results in get_56_lenc_string
0
void eval(CImg<t>& output, const char *const expression, const double x=0, const double y=0, const double z=0, const double c=0, CImgList<T> *const list_images=0) const { _eval(output,0,expression,x,y,z,c,list_images); }
Safe
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
3.5906871481814946e+37
5
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
0
ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert_new(HashTable *ht, zval *pData ZEND_FILE_LINE_DC) { return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC); }
Safe
[ "CWE-190" ]
php-src
4cc0286f2f3780abc6084bcdae5dce595daa3c12
1.9083690290616888e+38
4
Fix #73832 - leave the table in a safe state if the size is too big.
0
vrrp_lower_prio_no_advert_handler(vector_t *strvec) { vrrp_t *vrrp = LIST_TAIL_DATA(vrrp_data->vrrp); int res; if (vector_size(strvec) >= 2) { res = check_true_false(strvec_slot(strvec, 1)); if (res >= 0) vrrp->lower_prio_no_advert = (unsigned)res; else report_config_error(CONFIG_GENERAL_ERROR, "(%s) invalid lower_prio_no_advert %s specified", vrrp->iname, FMT_STR_VSLOT(strvec, 1)); } else { /* Defaults to true */ vrrp->lower_prio_no_advert = true; } }
Safe
[ "CWE-59", "CWE-61" ]
keepalived
04f2d32871bb3b11d7dc024039952f2fe2750306
2.9815585574220303e+38
16
When opening files for write, ensure they aren't symbolic links Issue #1048 identified that if, for example, a non privileged user created a symbolic link from /etc/keepalvied.data to /etc/passwd, writing to /etc/keepalived.data (which could be invoked via DBus) would cause /etc/passwd to be overwritten. This commit stops keepalived writing to pathnames where the ultimate component is a symbolic link, by setting O_NOFOLLOW whenever opening a file for writing. This might break some setups, where, for example, /etc/keepalived.data was a symbolic link to /home/fred/keepalived.data. If this was the case, instead create a symbolic link from /home/fred/keepalived.data to /tmp/keepalived.data, so that the file is still accessible via /home/fred/keepalived.data. There doesn't appear to be a way around this backward incompatibility, since even checking if the pathname is a symbolic link prior to opening for writing would create a race condition. Signed-off-by: Quentin Armitage <quentin@armitage.org.uk>
0
get_obj_from_str(str) VALUE str; { const char *s = StringValueCStr(str); #if TCL_MAJOR_VERSION == 8 && TCL_MINOR_VERSION == 0 return Tcl_NewStringObj((char*)s, RSTRING_LEN(str)); #else /* TCL_VERSION >= 8.1 */ VALUE enc = rb_attr_get(str, ID_at_enc); if (!NIL_P(enc)) { StringValue(enc); if (strcmp(RSTRING_PTR(enc), "binary") == 0) { /* binary string */ return Tcl_NewByteArrayObj((const unsigned char *)s, RSTRING_LENINT(str)); } else { /* text string */ return Tcl_NewStringObj(s, RSTRING_LENINT(str)); } #ifdef HAVE_RUBY_ENCODING_H } else if (rb_enc_get_index(str) == ENCODING_INDEX_BINARY) { /* binary string */ return Tcl_NewByteArrayObj((const unsigned char *)s, RSTRING_LENINT(str)); #endif } else if (memchr(s, 0, RSTRING_LEN(str))) { /* probably binary string */ return Tcl_NewByteArrayObj((const unsigned char *)s, RSTRING_LENINT(str)); } else { /* probably text string */ return Tcl_NewStringObj(s, RSTRING_LENINT(str)); } #endif }
Safe
[]
tk
d098136e3f62a4879a7d7cd34bbd50f482ba3331
1.0343705325244331e+37
33
tcltklib.c: use StringValueCStr [ci skip] * ext/tk/tcltklib.c (set_max_block_time, tcl_protect_core, ip_init, ip_create_slave_core, get_obj_from_str, ip_cancel_eval_core, lib_set_system_encoding, alloc_invoke_arguments, lib_merge_tklist): use StringValueCStr instead of StringValuePtr for values to be passed to Tcl interperter. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@55842 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
0
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) { struct futex_hash_bucket *hb; struct futex_q *this, *next; union futex_key key = FUTEX_KEY_INIT; u32 uval, vpid = task_pid_vnr(current); int ret; retry: if (get_user(uval, uaddr)) return -EFAULT; /* * We release only a lock we actually own: */ if ((uval & FUTEX_TID_MASK) != vpid) return -EPERM; ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE); if (unlikely(ret != 0)) goto out; hb = hash_futex(&key); spin_lock(&hb->lock); /* * To avoid races, try to do the TID -> 0 atomic transition * again. If it succeeds then we can return without waking * anyone else up: */ if (!(uval & FUTEX_OWNER_DIED) && cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) goto pi_faulted; /* * Rare case: we managed to release the lock atomically, * no need to wake anyone else up: */ if (unlikely(uval == vpid)) goto out_unlock; /* * Ok, other tasks may need to be woken up - check waiters * and do the wakeup if necessary: */ plist_for_each_entry_safe(this, next, &hb->chain, list) { if (!match_futex (&this->key, &key)) continue; ret = wake_futex_pi(uaddr, uval, this); /* * The atomic access to the futex value * generated a pagefault, so retry the * user-access and the wakeup: */ if (ret == -EFAULT) goto pi_faulted; goto out_unlock; } /* * No waiters - kernel unlocks the futex: */ if (!(uval & FUTEX_OWNER_DIED)) { ret = unlock_futex_pi(uaddr, uval); if (ret == -EFAULT) goto pi_faulted; } out_unlock: spin_unlock(&hb->lock); put_futex_key(&key); out: return ret; pi_faulted: spin_unlock(&hb->lock); put_futex_key(&key); ret = fault_in_user_writeable(uaddr); if (!ret) goto retry; return ret; }
Vulnerable
[ "CWE-269" ]
linux
13fbca4c6ecd96ec1a1cfa2e4f2ce191fe928a5e
3.2222418081622963e+38
82
futex: Always cleanup owner tid in unlock_pi If the owner died bit is set at futex_unlock_pi, we currently do not cleanup the user space futex. So the owner TID of the current owner (the unlocker) persists. That's observable inconsistant state, especially when the ownership of the pi state got transferred. Clean it up unconditionally. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Kees Cook <keescook@chromium.org> Cc: Will Drewry <wad@chromium.org> Cc: Darren Hart <dvhart@linux.intel.com> Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1
aodv_rerr(netdissect_options *ndo, const u_char *dat, u_int length) { u_int i, dc; const struct aodv_rerr *ap = (const struct aodv_rerr *)dat; const struct rerr_unreach *dp; ND_TCHECK(*ap); if (length < sizeof(*ap)) goto trunc; ND_PRINT((ndo, " rerr %s [items %u] [%u]:", ap->rerr_flags & RERR_NODELETE ? "[D]" : "", ap->rerr_dc, length)); dp = (const struct rerr_unreach *)(dat + sizeof(*ap)); i = length - sizeof(*ap); for (dc = ap->rerr_dc; dc != 0; dc--) { ND_TCHECK(*dp); if (i < sizeof(*dp)) goto trunc; ND_PRINT((ndo, " {%s}(%ld)", ipaddr_string(ndo, &dp->u_da), (unsigned long)EXTRACT_32BITS(&dp->u_ds))); dp++; i -= sizeof(*dp); } return; trunc: ND_PRINT((ndo, "[|rerr]")); }
Safe
[ "CWE-125", "CWE-787" ]
tcpdump
cbddb98484ea8ec1deece351abd56e063d775b38
3.13667531872697e+38
28
CVE-2017-13002/AODV: Add some missing bounds checks. In aodv_extension() do a bounds check on the extension header before we look at it. This fixes a buffer over-read discovered by Kamil Frankowicz. Add a test using the capture file supplied by the reporter(s). While we're at it, add the RFC number, and check the validity of the length for the Hello extension.
0
gdm_display_finish (GdmDisplay *self) { GdmDisplayPrivate *priv; g_return_val_if_fail (GDM_IS_DISPLAY (self), FALSE); priv = gdm_display_get_instance_private (self); if (priv->finish_idle_id != 0) { g_source_remove (priv->finish_idle_id); priv->finish_idle_id = 0; } _gdm_display_set_status (self, GDM_DISPLAY_FINISHED); g_debug ("GdmDisplay: finish display"); return TRUE; }
Safe
[ "CWE-754" ]
gdm
4e6e5335d29c039bed820c43bfd1c19cb62539ff
2.5247965167138807e+38
18
display: Use autoptr to handle errors in look for existing users It will make things just cleaner
0
void OSD::handle_pg_backfill_reserve(OpRequestRef op) { const MBackfillReserve *m = static_cast<const MBackfillReserve*>(op->get_req()); assert(m->get_type() == MSG_OSD_BACKFILL_RESERVE); if (!require_osd_peer(op->get_req())) return; if (!require_same_or_newer_map(op, m->query_epoch, false)) return; PG::CephPeeringEvtRef evt; if (m->type == MBackfillReserve::REQUEST) { evt = PG::CephPeeringEvtRef( new PG::CephPeeringEvt( m->query_epoch, m->query_epoch, PG::RequestBackfillPrio(m->priority))); } else if (m->type == MBackfillReserve::GRANT) { evt = PG::CephPeeringEvtRef( new PG::CephPeeringEvt( m->query_epoch, m->query_epoch, PG::RemoteBackfillReserved())); } else if (m->type == MBackfillReserve::REJECT) { // NOTE: this is replica -> primary "i reject your request" // and also primary -> replica "cancel my previously-granted request" evt = PG::CephPeeringEvtRef( new PG::CephPeeringEvt( m->query_epoch, m->query_epoch, PG::RemoteReservationRejected())); } else { ceph_abort(); } if (service.splitting(m->pgid)) { peering_wait_for_split[m->pgid].push_back(evt); return; } PG *pg = _lookup_lock_pg(m->pgid); if (!pg) { dout(10) << " don't have pg " << m->pgid << dendl; return; } pg->queue_peering_event(evt); pg->unlock(); }
Safe
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
2.187658544037379e+38
49
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <sage@redhat.com> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
0
xkb_file_type_to_string(enum xkb_file_type type) { if (type > _FILE_TYPE_NUM_ENTRIES) return "unknown"; return xkb_file_type_strings[type]; }
Safe
[ "CWE-476" ]
libxkbcommon
e3cacae7b1bfda0d839c280494f23284a1187adf
6.099840500505003e+37
6
xkbcomp: fix crashes in the parser when geometry tokens appear In the XKB format, floats and various keywords can only be used in the xkb_geometry section. xkbcommon removed support xkb_geometry, but still parses it for backward compatibility. As part of ignoring it, the float AST node and various keywords were removed, and instead NULL was returned by their parsing actions. However, the rest of the code does not handle NULLs, and so when they appear crashes usually ensue. To fix this, restore the float AST node and the ignored keywords. None of the evaluating code expects them, so nice error are displayed. Caught with the afl fuzzer. Signed-off-by: Ran Benita <ran234@gmail.com>
0
binlog_trans_log_savepos(THD *thd, my_off_t *pos) { DBUG_ENTER("binlog_trans_log_savepos"); DBUG_ASSERT(pos != NULL); if (thd_get_ha_data(thd, binlog_hton) == NULL) thd->binlog_setup_trx_data(); binlog_cache_mngr *const cache_mngr= (binlog_cache_mngr*) thd_get_ha_data(thd, binlog_hton); DBUG_ASSERT(mysql_bin_log.is_open()); *pos= cache_mngr->trx_cache.get_byte_position(); DBUG_PRINT("return", ("*pos: %lu", (ulong) *pos)); DBUG_VOID_RETURN; }
Safe
[ "CWE-264" ]
mysql-server
48bd8b16fe382be302c6f0b45931be5aa6f29a0e
5.625261796277373e+37
13
Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE [This is the 5.5/5.6 version of the bugfix]. The problem was that it was possible to write log files ending in .ini/.cnf that later could be parsed as an options file. This made it possible for users to specify startup options without the permissions to do so. This patch fixes the problem by disallowing general query log and slow query log to be written to files ending in .ini and .cnf.
0
static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow) { const void *p = flow; BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32)); return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET); }
Vulnerable
[ "CWE-330" ]
linux
55667441c84fa5e0911a0aac44fb059c15ba6da2
2.2042428642879366e+38
7
net/flow_dissector: switch to siphash UDP IPv6 packets auto flowlabels are using a 32bit secret (static u32 hashrnd in net/core/flow_dissector.c) and apply jhash() over fields known by the receivers. Attackers can easily infer the 32bit secret and use this information to identify a device and/or user, since this 32bit secret is only set at boot time. Really, using jhash() to generate cookies sent on the wire is a serious security concern. Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be a dead end. Trying to periodically change the secret (like in sch_sfq.c) could change paths taken in the network for long lived flows. Let's switch to siphash, as we did in commit df453700e8d8 ("inet: switch IP ID generator to siphash") Using a cryptographically strong pseudo random function will solve this privacy issue and more generally remove other weak points in the stack. Packet schedulers using skb_get_hash_perturb() benefit from this change. Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default") Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels") Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel") Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Jonathan Berger <jonathann1@walla.com> Reported-by: Amit Klein <aksecurity@gmail.com> Reported-by: Benny Pinkas <benny@pinkas.net> Cc: Tom Herbert <tom@herbertland.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1
f_resolve(typval_T *argvars, typval_T *rettv) { char_u *p; #ifdef HAVE_READLINK char_u *buf = NULL; #endif p = tv_get_string(&argvars[0]); #ifdef FEAT_SHORTCUT { char_u *v = NULL; v = mch_resolve_shortcut(p); if (v != NULL) rettv->vval.v_string = v; else rettv->vval.v_string = vim_strsave(p); } #else # ifdef HAVE_READLINK { char_u *cpy; int len; char_u *remain = NULL; char_u *q; int is_relative_to_current = FALSE; int has_trailing_pathsep = FALSE; int limit = 100; p = vim_strsave(p); if (p[0] == '.' && (vim_ispathsep(p[1]) || (p[1] == '.' && (vim_ispathsep(p[2]))))) is_relative_to_current = TRUE; len = STRLEN(p); if (len > 0 && after_pathsep(p, p + len)) { has_trailing_pathsep = TRUE; p[len - 1] = NUL; /* the trailing slash breaks readlink() */ } q = getnextcomp(p); if (*q != NUL) { /* Separate the first path component in "p", and keep the * remainder (beginning with the path separator). */ remain = vim_strsave(q - 1); q[-1] = NUL; } buf = alloc(MAXPATHL + 1); if (buf == NULL) goto fail; for (;;) { for (;;) { len = readlink((char *)p, (char *)buf, MAXPATHL); if (len <= 0) break; buf[len] = NUL; if (limit-- == 0) { vim_free(p); vim_free(remain); emsg(_("E655: Too many symbolic links (cycle?)")); rettv->vval.v_string = NULL; goto fail; } /* Ensure that the result will have a trailing path separator * if the argument has one. */ if (remain == NULL && has_trailing_pathsep) add_pathsep(buf); /* Separate the first path component in the link value and * concatenate the remainders. */ q = getnextcomp(vim_ispathsep(*buf) ? buf + 1 : buf); if (*q != NUL) { if (remain == NULL) remain = vim_strsave(q - 1); else { cpy = concat_str(q - 1, remain); if (cpy != NULL) { vim_free(remain); remain = cpy; } } q[-1] = NUL; } q = gettail(p); if (q > p && *q == NUL) { /* Ignore trailing path separator. */ q[-1] = NUL; q = gettail(p); } if (q > p && !mch_isFullName(buf)) { /* symlink is relative to directory of argument */ cpy = alloc((unsigned)(STRLEN(p) + STRLEN(buf) + 1)); if (cpy != NULL) { STRCPY(cpy, p); STRCPY(gettail(cpy), buf); vim_free(p); p = cpy; } } else { vim_free(p); p = vim_strsave(buf); } } if (remain == NULL) break; /* Append the first path component of "remain" to "p". */ q = getnextcomp(remain + 1); len = q - remain - (*q != NUL); cpy = vim_strnsave(p, STRLEN(p) + len); if (cpy != NULL) { STRNCAT(cpy, remain, len); vim_free(p); p = cpy; } /* Shorten "remain". */ if (*q != NUL) STRMOVE(remain, q - 1); else VIM_CLEAR(remain); } /* If the result is a relative path name, make it explicitly relative to * the current directory if and only if the argument had this form. */ if (!vim_ispathsep(*p)) { if (is_relative_to_current && *p != NUL && !(p[0] == '.' && (p[1] == NUL || vim_ispathsep(p[1]) || (p[1] == '.' && (p[2] == NUL || vim_ispathsep(p[2])))))) { /* Prepend "./". */ cpy = concat_str((char_u *)"./", p); if (cpy != NULL) { vim_free(p); p = cpy; } } else if (!is_relative_to_current) { /* Strip leading "./". */ q = p; while (q[0] == '.' && vim_ispathsep(q[1])) q += 2; if (q > p) STRMOVE(p, p + 2); } } /* Ensure that the result will have no trailing path separator * if the argument had none. But keep "/" or "//". */ if (!has_trailing_pathsep) { q = p + STRLEN(p); if (after_pathsep(p, q)) *gettail_sep(p) = NUL; } rettv->vval.v_string = p; } # else rettv->vval.v_string = vim_strsave(p); # endif #endif simplify_filename(rettv->vval.v_string); #ifdef HAVE_READLINK fail: vim_free(buf); #endif rettv->v_type = VAR_STRING; }
Safe
[ "CWE-78" ]
vim
8c62a08faf89663e5633dc5036cd8695c80f1075
3.3252051904559937e+38
199
patch 8.1.0881: can execute shell commands in rvim through interfaces Problem: Can execute shell commands in rvim through interfaces. Solution: Disable using interfaces in restricted mode. Allow for writing file with writefile(), histadd() and a few others.
0
void bdrv_set_io_limits(BlockDriverState *bs, ThrottleConfig *cfg) { int i; throttle_config(&bs->throttle_state, cfg); for (i = 0; i < 2; i++) { qemu_co_enter_next(&bs->throttled_reqs[i]); } }
Safe
[ "CWE-190" ]
qemu
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
3.758663522860955e+37
11
block: Limit request size (CVE-2014-0143) Limiting the size of a single request to INT_MAX not only fixes a direct integer overflow in bdrv_check_request() (which would only trigger bad behaviour with ridiculously huge images, as in close to 2^64 bytes), but can also prevent overflows in all block drivers. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
0
gif_get8(gif_context_t *s) { if (s->img_buffer < s->img_buffer_end) { return *s->img_buffer++; } return 0; }
Safe
[ "CWE-703", "CWE-787" ]
libsixel
7808a06b88c11dbc502318cdd51fa374f8cd47ee
5.583469633168989e+37
7
gif loader: check LZW code size (Issue #75)
0
flatpak_dir_ensure_bundle_remote (FlatpakDir *self, GFile *file, GBytes *extra_gpg_data, char **out_ref, char **out_checksum, char **out_metadata, gboolean *out_created_remote, GCancellable *cancellable, GError **error) { g_autofree char *ref = NULL; gboolean created_remote = FALSE; g_autoptr(GVariant) deploy_data = NULL; g_autoptr(GVariant) metadata = NULL; g_autofree char *origin = NULL; g_autofree char *fp_metadata = NULL; g_auto(GStrv) parts = NULL; g_autofree char *basename = NULL; g_autoptr(GBytes) included_gpg_data = NULL; GBytes *gpg_data = NULL; g_autofree char *to_checksum = NULL; g_autofree char *remote = NULL; g_autofree char *collection_id = NULL; if (!flatpak_dir_ensure_repo (self, cancellable, error)) return NULL; metadata = flatpak_bundle_load (file, &to_checksum, &ref, &origin, NULL, &fp_metadata, NULL, &included_gpg_data, &collection_id, error); if (metadata == NULL) return NULL; gpg_data = extra_gpg_data ? extra_gpg_data : included_gpg_data; parts = flatpak_decompose_ref (ref, error); if (parts == NULL) return NULL; deploy_data = flatpak_dir_get_deploy_data (self, ref, FLATPAK_DEPLOY_VERSION_ANY, cancellable, NULL); if (deploy_data != NULL) { remote = g_strdup (flatpak_deploy_data_get_origin (deploy_data)); /* We need to import any gpg keys because otherwise the pull will fail */ if (gpg_data != NULL) { g_autoptr(GKeyFile) new_config = NULL; new_config = ostree_repo_copy_config (flatpak_dir_get_repo (self)); if (!flatpak_dir_modify_remote (self, remote, new_config, gpg_data, cancellable, error)) return NULL; } } else { /* Add a remote for later updates */ basename = g_file_get_basename (file); remote = flatpak_dir_create_origin_remote (self, origin, parts[1], basename, ref, gpg_data, collection_id, cancellable, error); if (remote == NULL) return NULL; /* From here we need to goto out on error, to clean up */ created_remote = TRUE; } if (out_created_remote) *out_created_remote = created_remote; if (out_ref) *out_ref = g_steal_pointer (&ref); if (out_checksum) *out_checksum = g_steal_pointer (&to_checksum); if (out_metadata) *out_metadata = g_steal_pointer (&fp_metadata); return g_steal_pointer (&remote); }
Safe
[ "CWE-668" ]
flatpak
cd2142888fc4c199723a0dfca1f15ea8788a5483
1.161984261131255e+37
96
Don't expose /proc when running apply_extra As shown by CVE-2019-5736, it is sometimes possible for the sandbox app to access outside files using /proc/self/exe. This is not typically an issue for flatpak as the sandbox runs as the user which has no permissions to e.g. modify the host files. However, when installing apps using extra-data into the system repo we *do* actually run a sandbox as root. So, in this case we disable mounting /proc in the sandbox, which will neuter attacks like this.
0
net_peer_address_is_trusted(const char *addr) { cfg_t *section; const char *network; int i; int n; if (!addr) return false; if (strncmp(addr, "::ffff:", strlen("::ffff:")) == 0) addr += strlen("::ffff:"); section = cfg_getsec(cfg, "general"); n = cfg_size(section, "trusted_networks"); for (i = 0; i < n; i++) { network = cfg_getnstr(section, "trusted_networks", i); if (!network || network[0] == '\0') return false; if (strncmp(network, addr, strlen(network)) == 0) return true; if ((strcmp(network, "localhost") == 0) && (strcmp(addr, "127.0.0.1") == 0 || strcmp(addr, "::1") == 0)) return true; if (strcmp(network, "any") == 0) return true; } return false; }
Safe
[ "CWE-416" ]
owntone-server
246d8ae0cef27377e5dfe9ee3ad87e864d6b6266
2.642434923269195e+38
35
[misc] Fix use-after-free in net_bind() Thanks to Ba Jinsheng for reporting this bug
0
set_arg(void __user *b, void *val, int len) { if (len <= 0) len = sizeof(void *); if (copy_to_user(b, val, len)) return -EFAULT; return 0; }
Safe
[]
linux
4ab42d78e37a294ac7bc56901d563c642e03c4ae
3.36432687471341e+37
8
ppp, slip: Validate VJ compression slot parameters completely Currently slhc_init() treats out-of-range values of rslots and tslots as equivalent to 0, except that if tslots is too large it will dereference a null pointer (CVE-2015-7799). Add a range-check at the top of the function and make it return an ERR_PTR() on error instead of NULL. Change the callers accordingly. Compile-tested only. Reported-by: 郭永刚 <guoyonggang@360.cn> References: http://article.gmane.org/gmane.comp.security.oss.general/17908 Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: David S. Miller <davem@davemloft.net>
0
_ipmi_get_channel_info(struct ipmi_intf *intf, struct channel_info_t *channel_info) { struct ipmi_rs *rsp; struct ipmi_rq req = {0}; uint8_t data[1]; if (!channel_info) { return (-3); } data[0] = channel_info->channel & 0x0F; req.msg.netfn = IPMI_NETFN_APP; req.msg.cmd = IPMI_GET_CHANNEL_INFO; req.msg.data = data; req.msg.data_len = 1; rsp = intf->sendrecv(intf, &req); if (!rsp) { return (-1); } else if (rsp->ccode) { return rsp->ccode; } else if (rsp->data_len != 9) { return (-2); } channel_info->channel = rsp->data[0] & 0x0F; channel_info->medium = rsp->data[1] & 0x7F; channel_info->protocol = rsp->data[2] & 0x1F; channel_info->session_support = rsp->data[3] & 0xC0; channel_info->active_sessions = rsp->data[3] & 0x3F; memcpy(channel_info->vendor_id, &rsp->data[4], sizeof(channel_info->vendor_id)); memcpy(channel_info->aux_info, &rsp->data[7], sizeof(channel_info->aux_info)); return 0; }
Safe
[ "CWE-120" ]
ipmitool
9452be87181a6e83cfcc768b3ed8321763db50e4
6.291550990820868e+37
35
channel: Fix buffer overflow Partial fix for CVE-2020-5208, see https://github.com/ipmitool/ipmitool/security/advisories/GHSA-g659-9qxw-p7cp The `ipmi_get_channel_cipher_suites` function does not properly check the final response’s `data_len`, which can lead to stack buffer overflow on the final copy.
0
eval_op_mult(uschar **sptr, BOOL decimal, uschar **error) { uschar *s = *sptr; int_eximarith_t x = eval_op_unary(&s, decimal, error); if (*error == NULL) { while (*s == '*' || *s == '/' || *s == '%') { int op = *s++; int_eximarith_t y = eval_op_unary(&s, decimal, error); if (*error != NULL) break; /* SIGFPE both on div/mod by zero and on INT_MIN / -1, which would give * a value of INT_MAX+1. Note that INT_MIN * -1 gives INT_MIN for me, which * is a bug somewhere in [gcc 4.2.1, FreeBSD, amd64]. In fact, -N*-M where * -N*M is INT_MIN will yielf INT_MIN. * Since we don't support floating point, this is somewhat simpler. * Ideally, we'd return an error, but since we overflow for all other * arithmetic, consistency suggests otherwise, but what's the correct value * to use? There is none. * The C standard guarantees overflow for unsigned arithmetic but signed * overflow invokes undefined behaviour; in practice, this is overflow * except for converting INT_MIN to INT_MAX+1. We also can't guarantee * that long/longlong larger than int are available, or we could just work * with larger types. We should consider whether to guarantee 32bit eval * and 64-bit working variables, with errors returned. For now ... * So, the only SIGFPEs occur with a non-shrinking div/mod, thus -1; we * can just let the other invalid results occur otherwise, as they have * until now. For this one case, we can coerce. */ if (y == -1 && x == EXIM_ARITH_MIN && op != '*') { DEBUG(D_expand) debug_printf("Integer exception dodging: " PR_EXIM_ARITH "%c-1 coerced to " PR_EXIM_ARITH "\n", EXIM_ARITH_MIN, op, EXIM_ARITH_MAX); x = EXIM_ARITH_MAX; continue; } if (op == '*') x *= y; else { if (y == 0) { *error = (op == '/') ? US"divide by zero" : US"modulo by zero"; x = 0; break; } if (op == '/') x /= y; else x %= y; } } } *sptr = s; return x; }
Safe
[ "CWE-189" ]
exim
7685ce68148a083d7759e78d01aa5198fc099c44
2.9620515888663503e+38
57
Only expand integers for integer math once
0
io_open_sockets(void) { static int already_opened; if (already_opened || HAVE_OPT( SAVECONFIGQUIT )) return; already_opened = 1; /* * Create the sockets */ BLOCKIO(); create_sockets(NTP_PORT); UNBLOCKIO(); init_async_notifications(); DPRINTF(3, ("io_open_sockets: maxactivefd %d\n", maxactivefd)); }
Safe
[ "CWE-287" ]
ntp
71a962710bfe066f76da9679cf4cfdeffe34e95e
2.947856195186799e+37
20
[Sec 2936] Skeleton Key: Any trusted key system can serve time. HStenn.
0
best_effort_strncat_in_locale(struct archive_string *as, const void *_p, size_t length, struct archive_string_conv *sc) { size_t remaining; const uint8_t *itp; int return_value = 0; /* success */ /* * If both from-locale and to-locale is the same, this makes a copy. * And then this checks all copied MBS can be WCS if so returns 0. */ if (sc->same) { if (archive_string_append(as, _p, length) == NULL) return (-1);/* No memory */ return (invalid_mbs(_p, length, sc)); } /* * If a character is ASCII, this just copies it. If not, this * assigns '?' character instead but in UTF-8 locale this assigns * byte sequence 0xEF 0xBD 0xBD, which are code point U+FFFD, * a Replacement Character in Unicode. */ remaining = length; itp = (const uint8_t *)_p; while (*itp && remaining > 0) { if (*itp > 127) { // Non-ASCII: Substitute with suitable replacement if (sc->flag & SCONV_TO_UTF8) { if (archive_string_append(as, utf8_replacement_char, sizeof(utf8_replacement_char)) == NULL) { __archive_errx(1, "Out of memory"); } } else { archive_strappend_char(as, '?'); } return_value = -1; } else { archive_strappend_char(as, *itp); } ++itp; } return (return_value); }
Safe
[ "CWE-125" ]
libarchive
22b1db9d46654afc6f0c28f90af8cdc84a199f41
1.946271842459908e+38
44
Bugfix and optimize archive_wstring_append_from_mbs() The cal to mbrtowc() or mbtowc() should read up to mbs_length bytes and not wcs_length. This avoids out-of-bounds reads. mbrtowc() and mbtowc() return (size_t)-1 wit errno EILSEQ when they encounter an invalid multibyte character and (size_t)-2 when they they encounter an incomplete multibyte character. As we return failure and all our callers error out it makes no sense to continue parsing mbs. As we allocate `len` wchars at the beginning and each wchar has at least one byte, there will never be need to grow the buffer, so the code can be left out. On the other hand, we are always allocatng more memory than we need. As long as wcs_length == mbs_length == len we can omit wcs_length. We keep the old code commented if we decide to save memory and use autoexpanding wcs_length in the future. Fixes #1276
0
Array_Marshal(BYTE *sourceBuffer, UINT16 sourceSize, BYTE **buffer, INT32 *size) { if (buffer != NULL) { if ((size == NULL) || (*size >= sourceSize)) { memcpy(*buffer, sourceBuffer, sourceSize); *buffer += sourceSize; if (size != NULL) { *size -= sourceSize; } } else { pAssert(FALSE); } } return sourceSize; }
Safe
[ "CWE-787" ]
libtpms
3ef9b26cb9f28bd64d738bff9505a20d4eb56acd
5.998554577431005e+36
18
tpm2: Add maxSize parameter to TPM2B_Marshal for sanity checks Add maxSize parameter to TPM2B_Marshal and assert on it checking the size of the data intended to be marshaled versus the maximum buffer size. Signed-off-by: Stefan Berger <stefanb@linux.ibm.com>
0
static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) { int ret = -EINVAL; struct scatterlist sgio; SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher); if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) { ret = -EAGAIN; goto error; } skcipher_request_set_tfm(req, big_key_skcipher); skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); sg_init_one(&sgio, data, datalen); skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL); if (op == BIG_KEY_ENC) ret = crypto_skcipher_encrypt(req); else ret = crypto_skcipher_decrypt(req); skcipher_request_zero(req); error: return ret; }
Safe
[ "CWE-476" ]
linux
7df3e59c3d1df4f87fe874c7956ef7a3d2f4d5fb
8.69360473567933e+37
28
KEYS: Sort out big_key initialisation big_key has two separate initialisation functions, one that registers the key type and one that registers the crypto. If the key type fails to register, there's no problem if the crypto registers successfully because there's no way to reach the crypto except through the key type. However, if the key type registers successfully but the crypto does not, big_key_rng and big_key_blkcipher may end up set to NULL - but the code neither checks for this nor unregisters the big key key type. Furthermore, since the key type is registered before the crypto, it is theoretically possible for the kernel to try adding a big_key before the crypto is set up, leading to the same effect. Fix this by merging big_key_crypto_init() and big_key_init() and calling the resulting function late. If they're going to be encrypted, we shouldn't be creating big_keys before we have the facilities to do the encryption available. The key type registration is also moved after the crypto initialisation. The fix also includes message printing on failure. If the big_key type isn't correctly set up, simply doing: dd if=/dev/zero bs=4096 count=1 | keyctl padd big_key a @s ought to cause an oops. Fixes: 13100a72f40f5748a04017e0ab3df4cf27c809ef ('Security: Keys: Big keys stored encrypted') Signed-off-by: David Howells <dhowells@redhat.com> cc: Peter Hlavaty <zer0mem@yahoo.com> cc: Kirill Marinushkin <k.marinushkin@gmail.com> cc: Artem Savkov <asavkov@redhat.com> cc: stable@vger.kernel.org Signed-off-by: James Morris <james.l.morris@oracle.com>
0
make_absent_tree_for_simple_one_char_repeat(Node** node, Node* absent, Node* quant, Node* body, int possessive, ScanEnv* env) { int r; int i; int id1; int lower, upper; Node* x; Node* ns[4]; *node = NULL_NODE; r = ONIGERR_MEMORY; ns[0] = ns[1] = NULL_NODE; ns[2] = body, ns[3] = absent; lower = QUANT_(quant)->lower; upper = QUANT_(quant)->upper; onig_node_free(quant); r = node_new_save_gimmick(&ns[0], SAVE_RIGHT_RANGE, env); if (r != 0) goto err; id1 = GIMMICK_(ns[0])->id; r = make_absent_engine(&ns[1], id1, absent, body, lower, upper, possessive, FALSE, env); if (r != 0) goto err; ns[2] = ns[3] = NULL_NODE; r = node_new_update_var_gimmick(&ns[2], UPDATE_VAR_RIGHT_RANGE_FROM_STACK, id1, env); if (r != 0) goto err; x = make_list(3, ns); if (IS_NULL(x)) goto err0; *node = x; return ONIG_NORMAL; err0: r = ONIGERR_MEMORY; err: for (i = 0; i < 4; i++) onig_node_free(ns[i]); return r; }
Safe
[ "CWE-125" ]
oniguruma
aa0188eaedc056dca8374ac03d0177429b495515
1.0293044011155604e+38
46
fix #163: heap-buffer-overflow in gb18030_mbc_enc_len
0
static int ath6kl_wmi_p2p_info_event_rx(u8 *datap, int len) { struct wmi_p2p_info_event *ev; u32 flags; u16 dlen; if (len < sizeof(*ev)) return -EINVAL; ev = (struct wmi_p2p_info_event *) datap; flags = le32_to_cpu(ev->info_req_flags); dlen = le16_to_cpu(ev->len); ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: flags=%x len=%d\n", flags, dlen); if (flags & P2P_FLAG_CAPABILITIES_REQ) { struct wmi_p2p_capabilities *cap; if (dlen < sizeof(*cap)) return -EINVAL; cap = (struct wmi_p2p_capabilities *) ev->data; ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: GO Power Save = %d\n", cap->go_power_save); } if (flags & P2P_FLAG_MACADDR_REQ) { struct wmi_p2p_macaddr *mac; if (dlen < sizeof(*mac)) return -EINVAL; mac = (struct wmi_p2p_macaddr *) ev->data; ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: MAC Address = %pM\n", mac->mac_addr); } if (flags & P2P_FLAG_HMODEL_REQ) { struct wmi_p2p_hmodel *mod; if (dlen < sizeof(*mod)) return -EINVAL; mod = (struct wmi_p2p_hmodel *) ev->data; ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: P2P Model = %d (%s)\n", mod->p2p_model, mod->p2p_model ? "host" : "firmware"); } return 0; }
Safe
[ "CWE-125" ]
linux
5d6751eaff672ea77642e74e92e6c0ac7f9709ab
4.673811385935828e+37
43
ath6kl: add some bounds checking The "ev->traffic_class" and "reply->ac" variables come from the network and they're used as an offset into the wmi->stream_exist_for_ac[] array. Those variables are u8 so they can be 0-255 but the stream_exist_for_ac[] array only has WMM_NUM_AC (4) elements. We need to add a couple bounds checks to prevent array overflows. I also modified one existing check from "if (traffic_class > 3) {" to "if (traffic_class >= WMM_NUM_AC) {" just to make them all consistent. Fixes: bdcd81707973 (" Add ath6kl cleaned up driver") Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
0
mark_pages(fz_context *ctx, pdf_document *doc, pdf_write_state *opts, pdf_obj *val, int pagenum) { if (pdf_mark_obj(ctx, val)) return pagenum; fz_try(ctx) { if (pdf_is_dict(ctx, val)) { if (pdf_name_eq(ctx, PDF_NAME_Page, pdf_dict_get(ctx, val, PDF_NAME_Type))) { int num = pdf_to_num(ctx, val); pdf_unmark_obj(ctx, val); mark_all(ctx, doc, opts, val, pagenum == 0 ? USE_PAGE1 : (pagenum<<USE_PAGE_SHIFT), pagenum); page_objects_list_set_page_object(ctx, opts, pagenum, num); pagenum++; opts->use_list[num] |= USE_PAGE_OBJECT; } else { int i, n = pdf_dict_len(ctx, val); for (i = 0; i < n; i++) { pdf_obj *key = pdf_dict_get_key(ctx, val, i); pdf_obj *obj = pdf_dict_get_val(ctx, val, i); if (pdf_name_eq(ctx, PDF_NAME_Kids, key)) pagenum = mark_pages(ctx, doc, opts, obj, pagenum); else mark_all(ctx, doc, opts, obj, USE_CATALOGUE, -1); } if (pdf_is_indirect(ctx, val)) { int num = pdf_to_num(ctx, val); opts->use_list[num] |= USE_CATALOGUE; } } } else if (pdf_is_array(ctx, val)) { int i, n = pdf_array_len(ctx, val); for (i = 0; i < n; i++) { pagenum = mark_pages(ctx, doc, opts, pdf_array_get(ctx, val, i), pagenum); } if (pdf_is_indirect(ctx, val)) { int num = pdf_to_num(ctx, val); opts->use_list[num] |= USE_CATALOGUE; } } } fz_always(ctx) { pdf_unmark_obj(ctx, val); } fz_catch(ctx) { fz_rethrow(ctx); } return pagenum; }
Safe
[ "CWE-119" ]
mupdf
520cc26d18c9ee245b56e9e91f9d4fcae02be5f0
1.1395620942693807e+37
65
Bug 689699: Avoid buffer overrun. When cleaning a pdf file, various lists (of pdf_xref_len length) are defined early on. If we trigger a repair during the clean, this can cause pdf_xref_len to increase causing an overrun. Fix this by watching for changes in the length, and checking accesses to the list for validity. This also appears to fix bugs 698700-698703.
0
static struct commit *get_revision_1(struct rev_info *revs) { if (!revs->commits) return NULL; do { struct commit_list *entry = revs->commits; struct commit *commit = entry->item; revs->commits = entry->next; free(entry); if (revs->reflog_info) fake_reflog_parent(revs->reflog_info, commit); /* * If we haven't done the list limiting, we need to look at * the parents here. We also need to do the date-based limiting * that we'd otherwise have done in limit_list(). */ if (!revs->limited) { if (revs->max_age != -1 && (commit->date < revs->max_age)) continue; if (add_parents_to_list(revs, commit, &revs->commits) < 0) return NULL; } switch (simplify_commit(revs, commit)) { case commit_ignore: continue; case commit_error: return NULL; default: return commit; } } while (revs->commits); return NULL; }
Safe
[ "CWE-119" ]
git
fd55a19eb1d49ae54008d932a65f79cd6fda45c9
2.590069312826178e+38
39
Fix buffer overflow in git diff If PATH_MAX on your system is smaller than a path stored, it may cause buffer overflow and stack corruption in diff_addremove() and diff_change() functions when running git-diff Signed-off-by: Dmitry Potapov <dpotapov@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
0
static inline bool is_external_interrupt(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); }
Safe
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
4.913958072756869e+37
5
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
0
static struct SessionHandle* gethandleathead(struct curl_llist *pipeline) { struct curl_llist_element *curr = pipeline->head; if(curr) { return (struct SessionHandle *) curr->ptr; } return NULL; }
Safe
[ "CWE-200" ]
curl
b3875606925536f82fc61f3114ac42f29eaf6945
2.9867745996348307e+37
9
curl_easy_duphandle: CURLOPT_COPYPOSTFIELDS read out of bounds When duplicating a handle, the data to post was duplicated using strdup() when it could be binary and contain zeroes and it was not even zero terminated! This caused read out of bounds crashes/segfaults. Since the lib/strdup.c file no longer is easily shared with the curl tool with this change, it now uses its own version instead. Bug: http://curl.haxx.se/docs/adv_20141105.html CVE: CVE-2014-3707 Reported-By: Symeon Paraschoudis
0
int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr, unsigned int mode) { return __ip_mc_join_group(sk, imr, mode); }
Safe
[ "CWE-362" ]
linux
23d2b94043ca8835bd1e67749020e839f396a1c2
1.2758611114844205e+38
5
igmp: Add ip_mc_list lock in ip_check_mc_rcu I got below panic when doing fuzz test: Kernel panic - not syncing: panic_on_warn set ... CPU: 0 PID: 4056 Comm: syz-executor.3 Tainted: G B 5.14.0-rc1-00195-gcff5c4254439-dirty #2 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014 Call Trace: dump_stack_lvl+0x7a/0x9b panic+0x2cd/0x5af end_report.cold+0x5a/0x5a kasan_report+0xec/0x110 ip_check_mc_rcu+0x556/0x5d0 __mkroute_output+0x895/0x1740 ip_route_output_key_hash_rcu+0x2d0/0x1050 ip_route_output_key_hash+0x182/0x2e0 ip_route_output_flow+0x28/0x130 udp_sendmsg+0x165d/0x2280 udpv6_sendmsg+0x121e/0x24f0 inet6_sendmsg+0xf7/0x140 sock_sendmsg+0xe9/0x180 ____sys_sendmsg+0x2b8/0x7a0 ___sys_sendmsg+0xf0/0x160 __sys_sendmmsg+0x17e/0x3c0 __x64_sys_sendmmsg+0x9e/0x100 do_syscall_64+0x3b/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x462eb9 Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007f3df5af1c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000133 RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462eb9 RDX: 0000000000000312 RSI: 0000000020001700 RDI: 0000000000000007 RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007f3df5af26bc R13: 00000000004c372d R14: 0000000000700b10 R15: 00000000ffffffff It is one use-after-free in ip_check_mc_rcu. In ip_mc_del_src, the ip_sf_list of pmc has been freed under pmc->lock protection. But access to ip_sf_list in ip_check_mc_rcu is not protected by the lock. Signed-off-by: Liu Jian <liujian56@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static double Hamming(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.54+0.46*cosine); }
Safe
[ "CWE-369" ]
ImageMagick
43539e67a47d2f8de832d33a5b26dc2a7a12294f
5.384513463612174e+37
11
https://github.com/ImageMagick/ImageMagick/issues/1718
0
u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) { size_t short_len; size_t complete_len; /* no space left for name (+ NULL + type + len) */ if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) return ad_len; /* use complete name if present and fits */ complete_len = strlen(hdev->dev_name); if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, hdev->dev_name, complete_len + 1); /* use short name if present */ short_len = strlen(hdev->short_name); if (short_len) return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, hdev->short_name, short_len + 1); /* use shortened full name if present, we already know that name * is longer then HCI_MAX_SHORT_NAME_LENGTH */ if (complete_len) { u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name, sizeof(name)); } return ad_len; }
Safe
[ "CWE-362" ]
linux
e2cb6b891ad2b8caa9131e3be70f45243df82a80
3.303075217077456e+38
36
bluetooth: eliminate the potential race condition when removing the HCI controller There is a possible race condition vulnerability between issuing a HCI command and removing the cont. Specifically, functions hci_req_sync() and hci_dev_do_close() can race each other like below: thread-A in hci_req_sync() | thread-B in hci_dev_do_close() | hci_req_sync_lock(hdev); test_bit(HCI_UP, &hdev->flags); | ... | test_and_clear_bit(HCI_UP, &hdev->flags) hci_req_sync_lock(hdev); | | In this commit we alter the sequence in function hci_req_sync(). Hence, the thread-A cannot issue th. Signed-off-by: Lin Ma <linma@zju.edu.cn> Cc: Marcel Holtmann <marcel@holtmann.org> Fixes: 7c6a329e4447 ("[Bluetooth] Fix regression from using default link policy") Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
0
void Dispatcher::printSpeed() { ++m_countPrint; if( m_countPrint > m_vDevices.size() ) { std::string strGPUs; double speedTotal = 0; unsigned int i = 0; for (auto & e : m_vDevices) { const auto curSpeed = e->m_speed.getSpeed(); speedTotal += curSpeed; strGPUs += " GPU" + toString(e->m_index) + ": " + formatSpeed(curSpeed); ++i; } const std::string strVT100ClearLine = "\33[2K\r"; std::cerr << strVT100ClearLine << "Total: " << formatSpeed(speedTotal) << " -" << strGPUs << '\r' << std::flush; m_countPrint = 0; } }
Safe
[ "CWE-703" ]
profanity
69ff010c14ff80ec14246772db6a245aa59e6689
1.5610308324198899e+38
18
[FIX] pritive key seed .
0
bool WebContents::OnGoToEntryOffset(int offset) { GoToOffset(offset); return false; }
Safe
[]
electron
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
1.8942602620634396e+37
4
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344) * fix: ensure ElectronBrowser mojo service is only bound to authorized render frames Notes: no-notes * refactor: extract electron API IPC to its own mojo interface * fix: just check main frame not primary main frame Co-authored-by: Samuel Attard <samuel.r.attard@gmail.com> Co-authored-by: Samuel Attard <sattard@salesforce.com>
0
PHP_FUNCTION(radius_strerror) { char *msg; radius_descriptor *raddesc; zval *z_radh; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &z_radh) == FAILURE) { return; } ZEND_FETCH_RESOURCE(raddesc, radius_descriptor *, &z_radh, -1, "rad_handle", le_radius); msg = (char *)rad_strerror(raddesc->radh); RETURN_STRINGL(msg, strlen(msg), 1); }
Safe
[ "CWE-119", "CWE-787" ]
php-radius
13c149b051f82b709e8d7cc32111e84b49d57234
2.41319883608671e+37
14
Fix a security issue in radius_get_vendor_attr(). The underlying rad_get_vendor_attr() function assumed that it would always be given valid VSA data. Indeed, the buffer length wasn't even passed in; the assumption was that the length field within the VSA structure would be valid. This could result in denial of service by providing a length that would be beyond the memory limit, or potential arbitrary memory access by providing a length greater than the actual data given. rad_get_vendor_attr() has been changed to require the raw data length be provided, and this is then used to check that the VSA is valid. Conflicts: radlib_vs.h
0
//! Return iterator to one position after the last image of the list \const. const_iterator end() const { return _data + _width;
Safe
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
1.5451963751904005e+38
3
Fix other issues in 'CImg<T>::load_bmp()'.
0
static bool is_version_0 (void *opaque, int version_id) { return version_id == 0; }
Safe
[ "CWE-119" ]
qemu
52f91c3723932f8340fe36c8ec8b18a757c37b2b
2.6915499516839263e+38
4
zaurus: fix buffer overrun on invalid state load CVE-2013-4540 Within scoop_gpio_handler_update, if prev_level has a high bit set, then we get bit > 16 and that causes a buffer overrun. Since prev_level comes from wire indirectly, this can happen on invalid state load. Similarly for gpio_level and gpio_dir. To fix, limit to 16 bit. Reported-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
0
UdfDelete ( IN EFI_FILE_PROTOCOL *This ) { PRIVATE_UDF_FILE_DATA *PrivFileData; if (This == NULL) { return EFI_INVALID_PARAMETER; } PrivFileData = PRIVATE_UDF_FILE_DATA_FROM_THIS (This); (VOID)PrivFileData->FileIo.Close(This); return EFI_WARN_DELETE_FAILURE; }
Safe
[]
edk2
b9ae1705adfdd43668027a25a2b03c2e81960219
2.995835135025268e+38
16
MdeModulePkg/UdfDxe: Refine boundary checks for file/path name string REF:https://bugzilla.tianocore.org/show_bug.cgi?id=828 The commit refines the boundary checks for file/path name string to prevent possible buffer overrun. Cc: Ruiyu Ni <ruiyu.ni@intel.com> Cc: Jiewen Yao <jiewen.yao@intel.com> Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Hao Wu <hao.a.wu@intel.com> Reviewed-by: Paulo Alcantara <palcantara@suse.de> Acked-by: Star Zeng <star.zeng@intel.com>
0
static void tcg_commit(MemoryListener *listener) { CPUAddressSpace *cpuas; AddressSpaceDispatch *d; /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); cpu_reloading_memory_map(); /* The CPU and TLB are protected by the iothread lock. * We reload the dispatch pointer now because cpu_reloading_memory_map() * may have split the RCU critical section. */ d = atomic_rcu_read(&cpuas->as->dispatch); atomic_rcu_set(&cpuas->memory_dispatch, d); tlb_flush(cpuas->cpu); }
Safe
[ "CWE-125" ]
qemu
04bf2526ce87f21b32c9acba1c5518708c243ad0
3.0885704157618777e+38
17
exec: use qemu_ram_ptr_length to access guest ram When accessing guest's ram block during DMA operation, use 'qemu_ram_ptr_length' to get ram block pointer. It ensures that DMA operation of given length is possible; And avoids any OOB memory access situations. Reported-by: Alex <broscutamaker@gmail.com> Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org> Message-Id: <20170712123840.29328-1-ppandit@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
static int btrfs_device_init_dev_stats(struct btrfs_device *device, struct btrfs_path *path) { struct btrfs_dev_stats_item *ptr; struct extent_buffer *eb; struct btrfs_key key; int item_size; int i, ret, slot; if (!device->fs_info->dev_root) return 0; key.objectid = BTRFS_DEV_STATS_OBJECTID; key.type = BTRFS_PERSISTENT_ITEM_KEY; key.offset = device->devid; ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); if (ret) { for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) btrfs_dev_stat_set(device, i, 0); device->dev_stats_valid = 1; btrfs_release_path(path); return ret < 0 ? ret : 0; } slot = path->slots[0]; eb = path->nodes[0]; item_size = btrfs_item_size_nr(eb, slot); ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { if (item_size >= (1 + i) * sizeof(__le64)) btrfs_dev_stat_set(device, i, btrfs_dev_stats_value(eb, ptr, i)); else btrfs_dev_stat_set(device, i, 0); } device->dev_stats_valid = 1; btrfs_dev_stat_print_on_load(device); btrfs_release_path(path); return 0; }
Safe
[ "CWE-476", "CWE-703" ]
linux
e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091
3.271053442074086e+38
43
btrfs: fix NULL pointer dereference when deleting device by invalid id [BUG] It's easy to trigger NULL pointer dereference, just by removing a non-existing device id: # mkfs.btrfs -f -m single -d single /dev/test/scratch1 \ /dev/test/scratch2 # mount /dev/test/scratch1 /mnt/btrfs # btrfs device remove 3 /mnt/btrfs Then we have the following kernel NULL pointer dereference: BUG: kernel NULL pointer dereference, address: 0000000000000000 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT SMP NOPTI CPU: 9 PID: 649 Comm: btrfs Not tainted 5.14.0-rc3-custom+ #35 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:btrfs_rm_device+0x4de/0x6b0 [btrfs] btrfs_ioctl+0x18bb/0x3190 [btrfs] ? lock_is_held_type+0xa5/0x120 ? find_held_lock.constprop.0+0x2b/0x80 ? do_user_addr_fault+0x201/0x6a0 ? lock_release+0xd2/0x2d0 ? __x64_sys_ioctl+0x83/0xb0 __x64_sys_ioctl+0x83/0xb0 do_syscall_64+0x3b/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae [CAUSE] Commit a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly") moves the "missing" device path check into btrfs_rm_device(). But btrfs_rm_device() itself can have case where it only receives @devid, with NULL as @device_path. In that case, calling strcmp() on NULL will trigger the NULL pointer dereference. Before that commit, we handle the "missing" case inside btrfs_find_device_by_devspec(), which will not check @device_path at all if @devid is provided, thus no way to trigger the bug. [FIX] Before calling strcmp(), also make sure @device_path is not NULL. Fixes: a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly") CC: stable@vger.kernel.org # 5.4+ Reported-by: butt3rflyh4ck <butterflyhuangxx@gmail.com> Reviewed-by: Anand Jain <anand.jain@oracle.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
0
GF_Err gf_fs_check_gl_provider(GF_FilterSession *session) { GF_Event evt; GF_Err e; const char *sOpt; void *os_disp_handler; if (!session->nb_gl_filters) return GF_OK; if (gf_list_count(session->gl_providers)) return GF_OK; if (session->gl_driver) return GF_OK; session->gl_driver = (GF_VideoOutput *) gf_module_load(GF_VIDEO_OUTPUT_INTERFACE, gf_opts_get_key("core", "video-output") ); if (!session->gl_driver) { GF_LOG(GF_LOG_ERROR, GF_LOG_FILTER, ("Failed to load a video output for OpenGL context support !\n")); return GF_IO_ERR; } if (!gf_opts_get_key("core", "video-output")) { gf_opts_set_key("core", "video-output", session->gl_driver->module_name); } session->gl_driver->hw_caps |= GF_VIDEO_HW_INTERNAL; session->gl_driver->on_event = fsess_on_event; session->gl_driver->evt_cbk_hdl = session; os_disp_handler = NULL; sOpt = gf_opts_get_key("Temp", "OSDisp"); if (sOpt) sscanf(sOpt, "%p", &os_disp_handler); e = session->gl_driver->Setup(session->gl_driver, NULL, os_disp_handler, GF_TERM_INIT_HIDE); if (e!=GF_OK) { GF_LOG(GF_LOG_WARNING, GF_LOG_FILTER, ("Failed to setup Video Driver %s!\n", session->gl_driver->module_name)); gf_modules_close_interface((GF_BaseInterface *)session->gl_driver); session->gl_driver = NULL; return e; } //and initialize GL context memset(&evt, 0, sizeof(GF_Event)); evt.type = GF_EVENT_VIDEO_SETUP; evt.setup.width = 128; evt.setup.height = 128; evt.setup.use_opengl = GF_TRUE; evt.setup.back_buffer = 1; //we anyway should'nt call swapBuffer/flush on this object evt.setup.disable_vsync = GF_TRUE; session->gl_driver->ProcessEvent(session->gl_driver, &evt); if (evt.setup.use_opengl) { gf_opengl_init(); } return GF_OK; }
Safe
[ "CWE-787" ]
gpac
da37ec8582266983d0ec4b7550ec907401ec441e
9.262588590266771e+36
54
fixed crashes for very long path - cf #1908
0
static void display_refresh(struct DisplayState *ds) { if (qxl0->mode == QXL_MODE_VGA) { qemu_spice_display_refresh(&qxl0->ssd); } }
Safe
[]
qemu-kvm
5ff4e36c804157bd84af43c139f8cd3a59722db9
2.3823481910301908e+38
6
qxl: async io support using new spice api Some of the QXL port i/o commands are waiting for the spice server to complete certain actions. Add async versions for these commands, so we don't block the vcpu while the spice server processses the command. Instead the qxl device will raise an IRQ when done. The async command processing relies on an added QXLInterface::async_complete and added QXLWorker::*_async additions, in spice server qxl >= 3.1 Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Alon Levy <alevy@redhat.com>
0
static int order_lookups(const void *_otl1, const void *_otl2) { const OTLookup *otl1 = *(const OTLookup **) _otl1, *otl2 = *(const OTLookup **) _otl2; return( otl1->lookup_index - otl2->lookup_index ); }
Safe
[ "CWE-416" ]
fontforge
048a91e2682c1a8936ae34dbc7bd70291ec05410
2.5728471589917693e+38
4
Fix for #4084 Use-after-free (heap) in the SFD_GetFontMetaData() function Fix for #4086 NULL pointer dereference in the SFDGetSpiros() function Fix for #4088 NULL pointer dereference in the SFD_AssignLookups() function Add empty sf->fontname string if it isn't set, fixing #4089 #4090 and many other potential issues (many downstream calls to strlen() on the value).
0
static int legacy_init_fs_context(struct fs_context *fc) { fc->fs_private = kzalloc(sizeof(struct legacy_fs_context), GFP_KERNEL_ACCOUNT); if (!fc->fs_private) return -ENOMEM; fc->ops = &legacy_fs_context_ops; return 0; }
Safe
[ "CWE-787" ]
linux
722d94847de29310e8aa03fcbdb41fc92c521756
4.197507784369395e+37
8
vfs: fs_context: fix up param length parsing in legacy_parse_param The "PAGE_SIZE - 2 - size" calculation in legacy_parse_param() is an unsigned type so a large value of "size" results in a high positive value instead of a negative value as expected. Fix this by getting rid of the subtraction. Signed-off-by: Jamie Hill-Daniel <jamie@hill-daniel.co.uk> Signed-off-by: William Liu <willsroot@protonmail.com> Tested-by: Salvatore Bonaccorso <carnil@debian.org> Tested-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com> Acked-by: Dan Carpenter <dan.carpenter@oracle.com> Acked-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
read_cupsd_conf(cups_file_t *fp) /* I - File to read from */ { int linenum; /* Current line number */ char line[HTTP_MAX_BUFFER], /* Line from file */ temp[HTTP_MAX_BUFFER], /* Temporary buffer for value */ *value, /* Pointer to value */ *valueptr; /* Pointer into value */ int valuelen; /* Length of value */ http_addrlist_t *addrlist, /* Address list */ *addr; /* Current address */ cups_file_t *incfile; /* Include file */ char incname[1024]; /* Include filename */ /* * Loop through each line in the file... */ linenum = 0; while (cupsFileGetConf(fp, line, sizeof(line), &value, &linenum)) { /* * Decode the directive... */ if (!_cups_strcasecmp(line, "Include") && value) { /* * Include filename */ if (value[0] == '/') strlcpy(incname, value, sizeof(incname)); else snprintf(incname, sizeof(incname), "%s/%s", ServerRoot, value); if ((incfile = cupsFileOpen(incname, "rb")) == NULL) cupsdLogMessage(CUPSD_LOG_ERROR, "Unable to include config file \"%s\" - %s", incname, strerror(errno)); else { read_cupsd_conf(incfile); cupsFileClose(incfile); } } else if (!_cups_strcasecmp(line, "<Location") && value) { /* * <Location path> */ linenum = read_location(fp, value, linenum); if (linenum == 0) return (0); } else if (!_cups_strcasecmp(line, "<Policy") && value) { /* * <Policy name> */ linenum = read_policy(fp, value, linenum); if (linenum == 0) return (0); } else if (!_cups_strcasecmp(line, "FaxRetryInterval") && value) { JobRetryInterval = atoi(value); cupsdLogMessage(CUPSD_LOG_WARN, "FaxRetryInterval is deprecated; use " "JobRetryInterval on line %d of %s.", linenum, ConfigurationFile); } else if (!_cups_strcasecmp(line, "FaxRetryLimit") && value) { JobRetryLimit = atoi(value); cupsdLogMessage(CUPSD_LOG_WARN, "FaxRetryLimit is deprecated; use " "JobRetryLimit on line %d of %s.", linenum, ConfigurationFile); } #ifdef HAVE_SSL else if (!_cups_strcasecmp(line, "SSLOptions")) { /* * SSLOptions [AllowRC4] [AllowSSL3] [AllowDH] [DenyCBC] [DenyTLS1.0] [None] */ int options = _HTTP_TLS_NONE,/* SSL/TLS options */ min_version = _HTTP_TLS_1_0, max_version = _HTTP_TLS_MAX; if (value) { char *start, /* Start of option */ *end; /* End of option */ for (start = value; *start; start = end) { /* * Find end of keyword... */ end = start; while (*end && !_cups_isspace(*end)) end ++; if (*end) *end++ = '\0'; /* * Compare... */ if (!_cups_strcasecmp(start, "AllowRC4")) options |= _HTTP_TLS_ALLOW_RC4; else if (!_cups_strcasecmp(start, "AllowSSL3")) min_version = _HTTP_TLS_SSL3; else if (!_cups_strcasecmp(start, "AllowDH")) options |= _HTTP_TLS_ALLOW_DH; else if (!_cups_strcasecmp(start, "DenyCBC")) options |= _HTTP_TLS_DENY_CBC; else if (!_cups_strcasecmp(start, "DenyTLS1.0")) min_version = _HTTP_TLS_1_1; else if (!_cups_strcasecmp(start, "MaxTLS1.0")) max_version = _HTTP_TLS_1_0; else if (!_cups_strcasecmp(start, "MaxTLS1.1")) max_version = _HTTP_TLS_1_1; else if (!_cups_strcasecmp(start, "MaxTLS1.2")) max_version = _HTTP_TLS_1_2; else if (!_cups_strcasecmp(start, "MaxTLS1.3")) max_version = _HTTP_TLS_1_3; else if (!_cups_strcasecmp(start, "MinTLS1.0")) min_version = _HTTP_TLS_1_0; else if (!_cups_strcasecmp(start, "MinTLS1.1")) min_version = _HTTP_TLS_1_1; else if (!_cups_strcasecmp(start, "MinTLS1.2")) min_version = _HTTP_TLS_1_2; else if (!_cups_strcasecmp(start, "MinTLS1.3")) min_version = _HTTP_TLS_1_3; else if (!_cups_strcasecmp(start, "None")) options = _HTTP_TLS_NONE; else if (_cups_strcasecmp(start, "NoEmptyFragments")) cupsdLogMessage(CUPSD_LOG_WARN, "Unknown SSL option %s at line %d.", start, linenum); } } _httpTLSSetOptions(options, min_version, max_version); } #endif /* HAVE_SSL */ else if ((!_cups_strcasecmp(line, "Port") || !_cups_strcasecmp(line, "Listen") #ifdef HAVE_SSL || !_cups_strcasecmp(line, "SSLPort") || !_cups_strcasecmp(line, "SSLListen") #endif /* HAVE_SSL */ ) && value) { /* * Add listening address(es) to the list... */ cupsd_listener_t *lis; /* New listeners array */ /* * Get the address list... */ addrlist = get_address(value, IPP_PORT); if (!addrlist) { cupsdLogMessage(CUPSD_LOG_ERROR, "Bad %s address %s at line %d.", line, value, linenum); continue; } /* * Add each address... */ for (addr = addrlist; addr; addr = addr->next) { /* * See if this address is already present... */ for (lis = (cupsd_listener_t *)cupsArrayFirst(Listeners); lis; lis = (cupsd_listener_t *)cupsArrayNext(Listeners)) if (httpAddrEqual(&(addr->addr), &(lis->address)) && httpAddrPort(&(addr->addr)) == httpAddrPort(&(lis->address))) break; if (lis) { #ifdef HAVE_ONDEMAND if (!lis->on_demand) #endif /* HAVE_ONDEMAND */ { httpAddrString(&lis->address, temp, sizeof(temp)); cupsdLogMessage(CUPSD_LOG_WARN, "Duplicate listen address \"%s\" ignored.", temp); } continue; } /* * Allocate another listener... */ if (!Listeners) Listeners = cupsArrayNew(NULL, NULL); if (!Listeners) { cupsdLogMessage(CUPSD_LOG_ERROR, "Unable to allocate %s at line %d - %s.", line, linenum, strerror(errno)); break; } if ((lis = calloc(1, sizeof(cupsd_listener_t))) == NULL) { cupsdLogMessage(CUPSD_LOG_ERROR, "Unable to allocate %s at line %d - %s.", line, linenum, strerror(errno)); break; } cupsArrayAdd(Listeners, lis); /* * Copy the current address and log it... */ memcpy(&(lis->address), &(addr->addr), sizeof(lis->address)); lis->fd = -1; #ifdef HAVE_SSL if (!_cups_strcasecmp(line, "SSLPort") || !_cups_strcasecmp(line, "SSLListen")) lis->encryption = HTTP_ENCRYPT_ALWAYS; #endif /* HAVE_SSL */ httpAddrString(&lis->address, temp, sizeof(temp)); #ifdef AF_LOCAL if (lis->address.addr.sa_family == AF_LOCAL) cupsdLogMessage(CUPSD_LOG_INFO, "Listening to %s (Domain)", temp); else #endif /* AF_LOCAL */ cupsdLogMessage(CUPSD_LOG_INFO, "Listening to %s:%d (IPv%d)", temp, httpAddrPort(&(lis->address)), httpAddrFamily(&(lis->address)) == AF_INET ? 4 : 6); if (!httpAddrLocalhost(&(lis->address))) RemotePort = httpAddrPort(&(lis->address)); } /* * Free the list... */ httpAddrFreeList(addrlist); } else if (!_cups_strcasecmp(line, "BrowseProtocols") || !_cups_strcasecmp(line, "BrowseLocalProtocols")) { /* * "BrowseProtocols name [... name]" * "BrowseLocalProtocols name [... name]" */ int protocols = parse_protocols(value); if (protocols < 0) { cupsdLogMessage(CUPSD_LOG_ERROR, "Unknown browse protocol \"%s\" on line %d of %s.", value, linenum, ConfigurationFile); break; } BrowseLocalProtocols = protocols; } else if (!_cups_strcasecmp(line, "DefaultAuthType") && value) { /* * DefaultAuthType {basic,digest,basicdigest,negotiate} */ if (!_cups_strcasecmp(value, "none")) default_auth_type = CUPSD_AUTH_NONE; else if (!_cups_strcasecmp(value, "basic")) default_auth_type = CUPSD_AUTH_BASIC; else if (!_cups_strcasecmp(value, "negotiate")) default_auth_type = CUPSD_AUTH_NEGOTIATE; else if (!_cups_strcasecmp(value, "auto")) default_auth_type = CUPSD_AUTH_AUTO; else { cupsdLogMessage(CUPSD_LOG_WARN, "Unknown default authorization type %s on line %d of %s.", value, linenum, ConfigurationFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } #ifdef HAVE_SSL else if (!_cups_strcasecmp(line, "DefaultEncryption")) { /* * DefaultEncryption {Never,IfRequested,Required} */ if (!value || !_cups_strcasecmp(value, "never")) DefaultEncryption = HTTP_ENCRYPT_NEVER; else if (!_cups_strcasecmp(value, "required")) DefaultEncryption = HTTP_ENCRYPT_REQUIRED; else if (!_cups_strcasecmp(value, "ifrequested")) DefaultEncryption = HTTP_ENCRYPT_IF_REQUESTED; else { cupsdLogMessage(CUPSD_LOG_WARN, "Unknown default encryption %s on line %d of %s.", value, linenum, ConfigurationFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } #endif /* HAVE_SSL */ else if (!_cups_strcasecmp(line, "HostNameLookups") && value) { /* * Do hostname lookups? */ if (!_cups_strcasecmp(value, "off") || !_cups_strcasecmp(value, "no") || !_cups_strcasecmp(value, "false")) HostNameLookups = 0; else if (!_cups_strcasecmp(value, "on") || !_cups_strcasecmp(value, "yes") || !_cups_strcasecmp(value, "true")) HostNameLookups = 1; else if (!_cups_strcasecmp(value, "double")) HostNameLookups = 2; else cupsdLogMessage(CUPSD_LOG_WARN, "Unknown HostNameLookups %s on line %d of %s.", value, linenum, ConfigurationFile); } else if (!_cups_strcasecmp(line, "AccessLogLevel") && value) { /* * Amount of logging to do to access log... */ if (!_cups_strcasecmp(value, "all")) AccessLogLevel = CUPSD_ACCESSLOG_ALL; else if (!_cups_strcasecmp(value, "actions")) AccessLogLevel = CUPSD_ACCESSLOG_ACTIONS; else if (!_cups_strcasecmp(value, "config")) AccessLogLevel = CUPSD_ACCESSLOG_CONFIG; else if (!_cups_strcasecmp(value, "none")) AccessLogLevel = CUPSD_ACCESSLOG_NONE; else cupsdLogMessage(CUPSD_LOG_WARN, "Unknown AccessLogLevel %s on line %d of %s.", value, linenum, ConfigurationFile); } else if (!_cups_strcasecmp(line, "LogLevel") && value) { /* * Amount of logging to do to error log... */ if (!_cups_strcasecmp(value, "debug2")) LogLevel = CUPSD_LOG_DEBUG2; else if (!_cups_strcasecmp(value, "debug")) LogLevel = CUPSD_LOG_DEBUG; else if (!_cups_strcasecmp(value, "info")) LogLevel = CUPSD_LOG_INFO; else if (!_cups_strcasecmp(value, "notice")) LogLevel = CUPSD_LOG_NOTICE; else if (!_cups_strcasecmp(value, "warn")) LogLevel = CUPSD_LOG_WARN; else if (!_cups_strcasecmp(value, "error")) LogLevel = CUPSD_LOG_ERROR; else if (!_cups_strcasecmp(value, "crit")) LogLevel = CUPSD_LOG_CRIT; else if (!_cups_strcasecmp(value, "alert")) LogLevel = CUPSD_LOG_ALERT; else if (!_cups_strcasecmp(value, "emerg")) LogLevel = CUPSD_LOG_EMERG; else if (!_cups_strcasecmp(value, "none")) LogLevel = CUPSD_LOG_NONE; else cupsdLogMessage(CUPSD_LOG_WARN, "Unknown LogLevel %s on line %d of %s.", value, linenum, ConfigurationFile); } else if (!_cups_strcasecmp(line, "LogTimeFormat") && value) { /* * Amount of logging to do to error log... */ if (!_cups_strcasecmp(value, "standard")) LogTimeFormat = CUPSD_TIME_STANDARD; else if (!_cups_strcasecmp(value, "usecs")) LogTimeFormat = CUPSD_TIME_USECS; else cupsdLogMessage(CUPSD_LOG_WARN, "Unknown LogTimeFormat %s on line %d of %s.", value, linenum, ConfigurationFile); } else if (!_cups_strcasecmp(line, "ServerTokens") && value) { /* * Set the string used for the Server header... */ struct utsname plat; /* Platform info */ uname(&plat); if (!_cups_strcasecmp(value, "ProductOnly")) cupsdSetString(&ServerHeader, "CUPS IPP"); else if (!_cups_strcasecmp(value, "Major")) cupsdSetStringf(&ServerHeader, "CUPS/%d IPP/2", CUPS_VERSION_MAJOR); else if (!_cups_strcasecmp(value, "Minor")) cupsdSetStringf(&ServerHeader, "CUPS/%d.%d IPP/2.1", CUPS_VERSION_MAJOR, CUPS_VERSION_MINOR); else if (!_cups_strcasecmp(value, "Minimal")) cupsdSetString(&ServerHeader, CUPS_MINIMAL " IPP/2.1"); else if (!_cups_strcasecmp(value, "OS")) cupsdSetStringf(&ServerHeader, CUPS_MINIMAL " (%s %s) IPP/2.1", plat.sysname, plat.release); else if (!_cups_strcasecmp(value, "Full")) cupsdSetStringf(&ServerHeader, CUPS_MINIMAL " (%s %s; %s) IPP/2.1", plat.sysname, plat.release, plat.machine); else if (!_cups_strcasecmp(value, "None")) cupsdSetString(&ServerHeader, ""); else cupsdLogMessage(CUPSD_LOG_WARN, "Unknown ServerTokens %s on line %d of %s.", value, linenum, ConfigurationFile); } else if (!_cups_strcasecmp(line, "PassEnv") && value) { /* * PassEnv variable [... variable] */ for (; *value;) { for (valuelen = 0; value[valuelen]; valuelen ++) if (_cups_isspace(value[valuelen]) || value[valuelen] == ',') break; if (value[valuelen]) { value[valuelen] = '\0'; valuelen ++; } cupsdSetEnv(value, NULL); for (value += valuelen; *value; value ++) if (!_cups_isspace(*value) || *value != ',') break; } } else if (!_cups_strcasecmp(line, "ServerAlias") && value) { /* * ServerAlias name [... name] */ if (!ServerAlias) ServerAlias = cupsArrayNew(NULL, NULL); for (; *value;) { for (valuelen = 0; value[valuelen]; valuelen ++) if (_cups_isspace(value[valuelen]) || value[valuelen] == ',') break; if (value[valuelen]) { value[valuelen] = '\0'; valuelen ++; } cupsdAddAlias(ServerAlias, value); for (value += valuelen; *value; value ++) if (!_cups_isspace(*value) || *value != ',') break; } } else if (!_cups_strcasecmp(line, "SetEnv") && value) { /* * SetEnv variable value */ for (valueptr = value; *valueptr && !isspace(*valueptr & 255); valueptr ++); if (*valueptr) { /* * Found a value... */ while (isspace(*valueptr & 255)) *valueptr++ = '\0'; cupsdSetEnv(value, valueptr); } else cupsdLogMessage(CUPSD_LOG_ERROR, "Missing value for SetEnv directive on line %d of %s.", linenum, ConfigurationFile); } else if (!_cups_strcasecmp(line, "AccessLog") || !_cups_strcasecmp(line, "CacheDir") || !_cups_strcasecmp(line, "ConfigFilePerm") || !_cups_strcasecmp(line, "DataDir") || !_cups_strcasecmp(line, "DocumentRoot") || !_cups_strcasecmp(line, "ErrorLog") || !_cups_strcasecmp(line, "FatalErrors") || !_cups_strcasecmp(line, "FileDevice") || !_cups_strcasecmp(line, "FontPath") || !_cups_strcasecmp(line, "Group") || !_cups_strcasecmp(line, "LogFilePerm") || !_cups_strcasecmp(line, "LPDConfigFile") || !_cups_strcasecmp(line, "PageLog") || !_cups_strcasecmp(line, "Printcap") || !_cups_strcasecmp(line, "PrintcapFormat") || !_cups_strcasecmp(line, "RemoteRoot") || !_cups_strcasecmp(line, "RequestRoot") || !_cups_strcasecmp(line, "ServerBin") || !_cups_strcasecmp(line, "ServerCertificate") || !_cups_strcasecmp(line, "ServerKey") || !_cups_strcasecmp(line, "ServerKeychain") || !_cups_strcasecmp(line, "ServerRoot") || !_cups_strcasecmp(line, "SMBConfigFile") || !_cups_strcasecmp(line, "StateDir") || !_cups_strcasecmp(line, "SystemGroup") || !_cups_strcasecmp(line, "SystemGroupAuthKey") || !_cups_strcasecmp(line, "TempDir") || !_cups_strcasecmp(line, "User")) { cupsdLogMessage(CUPSD_LOG_INFO, "Please move \"%s%s%s\" on line %d of %s to the %s file; " "this will become an error in a future release.", line, value ? " " : "", value ? value : "", linenum, ConfigurationFile, CupsFilesFile); } else parse_variable(ConfigurationFile, linenum, line, value, sizeof(cupsd_vars) / sizeof(cupsd_vars[0]), cupsd_vars); } return (1); }
Vulnerable
[]
cups
d47f6aec436e0e9df6554436e391471097686ecc
2.5883142763435275e+38
564
Fix local privilege escalation to root and sandbox bypasses in scheduler (rdar://37836779, rdar://37836995, rdar://37837252, rdar://37837581)
1
PHP_FUNCTION(openssl_x509_export_to_file) { X509 * cert; zval ** zcert; zend_bool notext = 1; BIO * bio_out; long certresource; char * filename; int filename_len; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Zp|b", &zcert, &filename, &filename_len, &notext) == FAILURE) { return; } RETVAL_FALSE; cert = php_openssl_x509_from_zval(zcert, 0, &certresource TSRMLS_CC); if (cert == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "cannot get cert from parameter 1"); return; } if (php_openssl_open_base_dir_chk(filename TSRMLS_CC)) { return; } bio_out = BIO_new_file(filename, "w"); if (bio_out) { if (!notext) { X509_print(bio_out, cert); } PEM_write_bio_X509(bio_out, cert); RETVAL_TRUE; } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "error opening file %s", filename); } if (certresource == -1 && cert) { X509_free(cert); } BIO_free(bio_out); }
Safe
[ "CWE-20" ]
php-src
2874696a5a8d46639d261571f915c493cd875897
1.6472145851439837e+37
41
Fix CVE-2013-4073 - handling of certs with null bytes
0
void skb_set_dev(struct sk_buff *skb, struct net_device *dev) { skb_dst_drop(skb); if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { secpath_reset(skb); nf_reset(skb); skb_init_secmark(skb); skb->mark = 0; skb->priority = 0; skb->nf_trace = 0; skb->ipvs_property = 0; #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #endif } skb->dev = dev; }
Safe
[ "CWE-399" ]
linux
6ec82562ffc6f297d0de36d65776cff8e5704867
2.059669988987662e+38
17
veth: Dont kfree_skb() after dev_forward_skb() In case of congestion, netif_rx() frees the skb, so we must assume dev_forward_skb() also consume skb. Bug introduced by commit 445409602c092 (veth: move loopback logic to common location) We must change dev_forward_skb() to always consume skb, and veth to not double free it. Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3 Reported-by: Martín Ferrari <martin.ferrari@gmail.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
export_remote_greeter_interface (GdmSession *self, GDBusConnection *connection) { GdmDBusRemoteGreeter *remote_greeter_interface; remote_greeter_interface = GDM_DBUS_REMOTE_GREETER (gdm_dbus_remote_greeter_skeleton_new ()); g_signal_connect (remote_greeter_interface, "handle-disconnect", G_CALLBACK (gdm_session_handle_client_disconnect), self); g_dbus_interface_skeleton_export (G_DBUS_INTERFACE_SKELETON (remote_greeter_interface), connection, GDM_SESSION_DBUS_OBJECT_PATH, NULL); self->priv->remote_greeter_interface = remote_greeter_interface; }
Safe
[]
gdm
5ac224602f1d603aac5eaa72e1760d3e33a26f0a
1.8846886778739997e+38
20
session: disconnect signals from worker proxy when conversation is freed We don't want an outstanding reference on the worker proxy to lead to signal handlers getting dispatched after the conversation is freed. https://bugzilla.gnome.org/show_bug.cgi?id=758032
0
defineAttribute(ELEMENT_TYPE *type, ATTRIBUTE_ID *attId, XML_Bool isCdata, XML_Bool isId, const XML_Char *value, XML_Parser parser) { DEFAULT_ATTRIBUTE *att; if (value || isId) { /* The handling of default attributes gets messed up if we have a default which duplicates a non-default. */ int i; for (i = 0; i < type->nDefaultAtts; i++) if (attId == type->defaultAtts[i].id) return 1; if (isId && !type->idAtt && !attId->xmlns) type->idAtt = attId; } if (type->nDefaultAtts == type->allocDefaultAtts) { if (type->allocDefaultAtts == 0) { type->allocDefaultAtts = 8; type->defaultAtts = (DEFAULT_ATTRIBUTE *)MALLOC(type->allocDefaultAtts * sizeof(DEFAULT_ATTRIBUTE)); if (!type->defaultAtts) return 0; } else { DEFAULT_ATTRIBUTE *temp; int count = type->allocDefaultAtts * 2; temp = (DEFAULT_ATTRIBUTE *) REALLOC(type->defaultAtts, (count * sizeof(DEFAULT_ATTRIBUTE))); if (temp == NULL) return 0; type->allocDefaultAtts = count; type->defaultAtts = temp; } } att = type->defaultAtts + type->nDefaultAtts; att->id = attId; att->value = value; att->isCdata = isCdata; if (!isCdata) attId->maybeTokenized = XML_TRUE; type->nDefaultAtts += 1; return 1; }
Safe
[ "CWE-119" ]
libexpat
ba0f9c3b40c264b8dd392e02a7a060a8fa54f032
1.946584737195108e+38
42
CVE-2015-1283 Sanity check size calculations. r=peterv, a=abillings https://sourceforge.net/p/expat/bugs/528/
0
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) { return !fc->initialized || (for_background && fc->blocked); }
Safe
[ "CWE-416" ]
linux
15fab63e1e57be9fdb5eec1bbc5916e9825e9acb
5.189911409724859e+37
4
fs: prevent page refcount overflow in pipe_buf_get Change pipe_buf_get() to return a bool indicating whether it succeeded in raising the refcount of the page (if the thing in the pipe is a page). This removes another mechanism for overflowing the page refcount. All callers converted to handle a failure. Reported-by: Jann Horn <jannh@google.com> Signed-off-by: Matthew Wilcox <willy@infradead.org> Cc: stable@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, const struct nfs_server *server, uint32_t *gid, int may_sleep) { uint32_t len; __be32 *p; int ret = 0; *gid = -2; if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; if (!may_sleep) { /* do nothing */ } else if (len < XDR_MAX_NETOBJ) { if (nfs_map_group_to_gid(server, (char *)p, len, gid) == 0) ret = NFS_ATTR_FATTR_GROUP; else dprintk("%s: nfs_map_group_to_gid failed!\n", __func__); } else dprintk("%s: name too long (%u)!\n", __func__, len); bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP; } dprintk("%s: gid=%d\n", __func__, (int)*gid); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; }
Safe
[ "CWE-703", "CWE-189" ]
linux
bf118a342f10dafe44b14451a1392c3254629a1f
1.853282321017135e+38
37
NFSv4: include bitmap in nfsv4 get acl data The NFSv4 bitmap size is unbounded: a server can return an arbitrary sized bitmap in an FATTR4_WORD0_ACL request. Replace using the nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data xdr length to the (cached) acl page data. This is a general solution to commit e5012d1f "NFSv4.1: update nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead when getting ACLs. Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved. Cc: stable@kernel.org Signed-off-by: Andy Adamson <andros@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
0
part_of_refkey(TABLE *table,Field *field) { JOIN_TAB *join_tab= table->reginfo.join_tab; if (!join_tab) return (Item*) 0; // field from outer non-select (UPDATE,...) uint ref_parts= join_tab->ref.key_parts; if (ref_parts) /* if it's ref/eq_ref/ref_or_null */ { uint key= join_tab->ref.key; KEY *key_info= join_tab->get_keyinfo_by_key_no(key); KEY_PART_INFO *key_part= key_info->key_part; for (uint part=0 ; part < ref_parts ; part++,key_part++) { if (field->eq(key_part->field)) { /* Found the field in the key. Check that 1. ref_or_null doesn't alternate this component between a value and a NULL 2. index fully covers the key */ if (part != join_tab->ref.null_ref_part && // (1) !(key_part->key_part_flag & HA_PART_KEY_SEG)) // (2) { return join_tab->ref.items[part]; } break; } } } return (Item*) 0; }
Safe
[]
server
ff77a09bda884fe6bf3917eb29b9d3a2f53f919b
3.2606228955037826e+38
34
MDEV-22464 Server crash on UPDATE with nested subquery Uninitialized ref_pointer_array[] because setup_fields() got empty fields list. mysql_multi_update() for some reason does that by substituting the fields list with empty total_list for the mysql_select() call (looks like wrong merge since total_list is not used anywhere else and is always empty). The fix would be to return back the original fields list. But this fails update_use_source.test case: --error ER_BAD_FIELD_ERROR update v1 set t1c1=2 order by 1; Actually not failing the above seems to be ok. The other fix would be to keep resolve_in_select_list false (and that keeps outer context from being resolved in Item_ref::fix_fields()). This fix is more consistent with how SELECT behaves: --error ER_SUBQUERY_NO_1_ROW select a from t1 where a= (select 2 from t1 having (a = 3)); So this patch implements this fix.
0
static uint8_t req_get_seid(struct pending_req *req) { if (req->signal_id == AVDTP_DISCOVER) return 0; return ((struct seid_req *) (req->data))->acp_seid; }
Safe
[ "CWE-703" ]
bluez
7a80d2096f1b7125085e21448112aa02f49f5e9a
1.879818474047966e+38
7
avdtp: Fix accepting invalid/malformed capabilities Check if capabilities are valid before attempting to copy them.
0
MONGO_EXPORT int mongo_write_concern_finish( mongo_write_concern *write_concern ) { bson *command; /* Destory any existing serialized write concern object and reuse it. */ if( write_concern->cmd ) { bson_destroy( write_concern->cmd ); command = write_concern->cmd; } else command = (bson *)bson_malloc( sizeof( bson ) ); if( !command ) { return MONGO_ERROR; } bson_init( command ); bson_append_int( command, "getlasterror", 1 ); if( write_concern->mode ) { bson_append_string( command, "w", write_concern->mode ); } else if( write_concern->w && write_concern->w > 1 ) { bson_append_int( command, "w", write_concern->w ); } if( write_concern->wtimeout ) { bson_append_int( command, "wtimeout", write_concern->wtimeout ); } if( write_concern->j ) { bson_append_int( command, "j", write_concern->j ); } if( write_concern->fsync ) { bson_append_int( command, "fsync", write_concern->fsync ); } bson_finish( command ); /* write_concern now owns the BSON command object. * This is freed in mongo_write_concern_destroy(). */ write_concern->cmd = command; return MONGO_OK; }
Safe
[ "CWE-190" ]
mongo-c-driver-legacy
1a1f5e26a4309480d88598913f9eebf9e9cba8ca
3.989105752254089e+37
47
don't mix up int and size_t (first pass to fix that)
0
void bio_unmap_user(struct bio *bio) { __bio_unmap_user(bio); bio_put(bio); }
Safe
[ "CWE-772", "CWE-787" ]
linux
95d78c28b5a85bacbc29b8dba7c04babb9b0d467
2.990886171487564e+38
5
fix unbalanced page refcounting in bio_map_user_iov bio_map_user_iov and bio_unmap_user do unbalanced pages refcounting if IO vector has small consecutive buffers belonging to the same page. bio_add_pc_page merges them into one, but the page reference is never dropped. Cc: stable@vger.kernel.org Signed-off-by: Vitaly Mayatskikh <v.mayatskih@gmail.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
0
float ComputeIntersectionOverUnion(const TfLiteTensor* decoded_boxes, const int i, const int j) { auto& box_i = ReInterpretTensor<const BoxCornerEncoding*>(decoded_boxes)[i]; auto& box_j = ReInterpretTensor<const BoxCornerEncoding*>(decoded_boxes)[j]; const float area_i = (box_i.ymax - box_i.ymin) * (box_i.xmax - box_i.xmin); const float area_j = (box_j.ymax - box_j.ymin) * (box_j.xmax - box_j.xmin); if (area_i <= 0 || area_j <= 0) return 0.0; const float intersection_ymin = std::max<float>(box_i.ymin, box_j.ymin); const float intersection_xmin = std::max<float>(box_i.xmin, box_j.xmin); const float intersection_ymax = std::min<float>(box_i.ymax, box_j.ymax); const float intersection_xmax = std::min<float>(box_i.xmax, box_j.xmax); const float intersection_area = std::max<float>(intersection_ymax - intersection_ymin, 0.0) * std::max<float>(intersection_xmax - intersection_xmin, 0.0); return intersection_area / (area_i + area_j - intersection_area); }
Safe
[ "CWE-125", "CWE-787" ]
tensorflow
1970c2158b1ffa416d159d03c3370b9a462aee35
7.047170976076981e+37
16
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
0
void sqlite3VdbeVerifyNoResultRow(Vdbe *p){ int i; for(i=0; i<p->nOp; i++){ assert( p->aOp[i].opcode!=OP_ResultRow ); } }
Safe
[ "CWE-755" ]
sqlite
8654186b0236d556aa85528c2573ee0b6ab71be3
2.8598030135299374e+38
6
When an error occurs while rewriting the parser tree for window functions in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set, and make sure that this shuts down any subsequent code generation that might depend on the transformations that were implemented. This fixes a problem discovered by the Yongheng and Rui fuzzer. FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f
0
FLAC__bool file_eof_callback_(const FLAC__StreamDecoder *decoder, void *client_data) { (void)client_data; return feof(decoder->private_->file)? true : false; }
Safe
[ "CWE-119" ]
flac
5b3033a2b355068c11fe637e14ac742d273f076e
3.368970675683028e+38
6
src/libFLAC/stream_decoder.c : Fix buffer read overflow. This is CVE-2014-8962. Reported-by: Michele Spagnuolo, Google Security Team <mikispag@google.com>
0
void r_bin_le_free(r_bin_le_obj_t *bin) { r_return_if_fail (bin); free (bin->header); free (bin->objtbl); free (bin->filename); free (bin); }
Safe
[ "CWE-252" ]
radare2
d7ea20fb2e1433ebece9f004d87ad8f2377af23d
2.8247721750078472e+38
7
Fix #18923 - Fix resource exhaustion bug in LE binary (#18926)
0
static int kill_something_info(int sig, struct siginfo *info, pid_t pid) { int ret; if (pid > 0) { rcu_read_lock(); ret = kill_pid_info(sig, info, find_vpid(pid)); rcu_read_unlock(); return ret; } read_lock(&tasklist_lock); if (pid != -1) { ret = __kill_pgrp_info(sig, info, pid ? find_vpid(-pid) : task_pgrp(current)); } else { int retval = 0, count = 0; struct task_struct * p; for_each_process(p) { if (task_pid_vnr(p) > 1 && !same_thread_group(p, current)) { int err = group_send_sig_info(sig, info, p); ++count; if (err != -EPERM) retval = err; } } ret = count ? retval : -ESRCH; } read_unlock(&tasklist_lock); return ret; }
Vulnerable
[ "CWE-119", "CWE-787" ]
linux
4ea77014af0d6205b05503d1c7aac6eace11d473
8.000552686128805e+36
34
kernel/signal.c: avoid undefined behaviour in kill_something_info When running kill(72057458746458112, 0) in userspace I hit the following issue. UBSAN: Undefined behaviour in kernel/signal.c:1462:11 negation of -2147483648 cannot be represented in type 'int': CPU: 226 PID: 9849 Comm: test Tainted: G B ---- ------- 3.10.0-327.53.58.70.x86_64_ubsan+ #116 Hardware name: Huawei Technologies Co., Ltd. RH8100 V3/BC61PBIA, BIOS BLHSV028 11/11/2014 Call Trace: dump_stack+0x19/0x1b ubsan_epilogue+0xd/0x50 __ubsan_handle_negate_overflow+0x109/0x14e SYSC_kill+0x43e/0x4d0 SyS_kill+0xe/0x10 system_call_fastpath+0x16/0x1b Add code to avoid the UBSAN detection. [akpm@linux-foundation.org: tweak comment] Link: http://lkml.kernel.org/r/1496670008-59084-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhongjiang <zhongjiang@huawei.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Xishi Qiu <qiuxishi@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1
static void MapCharsetR(int n) { curr->w_ss = 0; if (curr->w_CharsetR != n) { curr->w_CharsetR = n; curr->w_FontR = curr->w_charsets[n]; } curr->w_gr = 1; }
Safe
[ "CWE-119" ]
screen
c336a32a1dcd445e6b83827f83531d4c6414e2cd
1.758799656077517e+38
9
Fix stack overflow due to too deep recursion Bug: 45713 How to reproduce: Run this command inside screen $ printf '\x1b[10000000T' screen will recursively call MScrollV to depth n/256. This is time consuming and will overflow stack if n is huge.
0
static void brotli_close_writer(struct Curl_easy *data, struct contenc_writer *writer) { struct brotli_params *bp = (struct brotli_params *) &writer->params; (void) data; if(bp->br) { BrotliDecoderDestroyInstance(bp->br); bp->br = NULL; } }
Safe
[]
curl
3a09fbb7f264c67c438d01a30669ce325aa508e2
2.51059720511297e+38
11
content_encoding: return error on too many compression steps The max allowed steps is arbitrarily set to 5. Bug: https://curl.se/docs/CVE-2022-32206.html CVE-2022-32206 Reported-by: Harry Sintonen Closes #9049
0
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, int type) { struct sock_exterr_skb *serr; struct sk_buff *skb, *skb2; int copied, err; err = -EAGAIN; skb = skb_dequeue(&sk->sk_error_queue); if (skb == NULL) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free_skb; sock_recv_timestamp(msg, sk, skb); serr = SKB_EXT_ERR(skb); put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); msg->msg_flags |= MSG_ERRQUEUE; err = copied; /* Reset and regenerate socket error */ spin_lock_bh(&sk->sk_error_queue.lock); sk->sk_err = 0; if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; spin_unlock_bh(&sk->sk_error_queue.lock); sk->sk_error_report(sk); } else spin_unlock_bh(&sk->sk_error_queue.lock); out_free_skb: kfree_skb(skb); out: return err; }
Safe
[]
linux
7bced397510ab569d31de4c70b39e13355046387
2.0612658023740533e+38
44
net_dma: simple removal Per commit "77873803363c net_dma: mark broken" net_dma is no longer used and there is no plan to fix it. This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards. Reverting the remainder of the net_dma induced changes is deferred to subsequent patches. Marked for stable due to Roman's report of a memory leak in dma_pin_iovec_pages(): https://lkml.org/lkml/2014/9/3/177 Cc: Dave Jiang <dave.jiang@intel.com> Cc: Vinod Koul <vinod.koul@intel.com> Cc: David Whipple <whipple@securedatainnovations.ch> Cc: Alexander Duyck <alexander.h.duyck@intel.com> Cc: <stable@vger.kernel.org> Reported-by: Roman Gushchin <klamm@yandex-team.ru> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
0
ikev2_sa_print(netdissect_options *ndo, u_char tpay, const struct isakmp_gen *ext1, u_int osa_length, const u_char *ep, uint32_t phase _U_, uint32_t doi _U_, uint32_t proto _U_, int depth) { const struct isakmp_gen *ext; struct isakmp_gen e; u_int sa_length; const u_char *cp; int i; int pcount; u_char np; u_int item_len; ND_TCHECK(*ext1); UNALIGNED_MEMCPY(&e, ext1, sizeof(e)); ikev2_pay_print(ndo, "sa", e.critical); /* * ikev2_sub0_print() guarantees that this is >= 4. */ osa_length= ntohs(e.len); sa_length = osa_length - 4; ND_PRINT((ndo," len=%d", sa_length)); /* * Print the payloads. */ cp = (const u_char *)(ext1 + 1); pcount = 0; for (np = ISAKMP_NPTYPE_P; np != 0; np = e.np) { pcount++; ext = (const struct isakmp_gen *)cp; if (sa_length < sizeof(*ext)) goto toolong; ND_TCHECK(*ext); UNALIGNED_MEMCPY(&e, ext, sizeof(e)); /* * Since we can't have a payload length of less than 4 bytes, * we need to bail out here if the generic header is nonsensical * or truncated, otherwise we could loop forever processing * zero-length items or otherwise misdissect the packet. */ item_len = ntohs(e.len); if (item_len <= 4) goto trunc; if (sa_length < item_len) goto toolong; ND_TCHECK2(*cp, item_len); depth++; ND_PRINT((ndo,"\n")); for (i = 0; i < depth; i++) ND_PRINT((ndo," ")); ND_PRINT((ndo,"(")); if (np == ISAKMP_NPTYPE_P) { cp = ikev2_p_print(ndo, np, pcount, ext, item_len, ep, depth); if (cp == NULL) { /* error, already reported */ return NULL; } } else { ND_PRINT((ndo, "%s", NPSTR(np))); cp += item_len; } ND_PRINT((ndo,")")); depth--; sa_length -= item_len; } return cp; toolong: /* * Skip the rest of the SA. */ cp += sa_length; ND_PRINT((ndo," [|%s]", NPSTR(tpay))); return cp; trunc: ND_PRINT((ndo," [|%s]", NPSTR(tpay))); return NULL; }
Vulnerable
[ "CWE-125", "CWE-787" ]
tcpdump
8dca25d26c7ca2caf6138267f6f17111212c156e
1.6325357200873606e+38
86
CVE-2017-13690/IKEv2: Fix some bounds checks. Use a pointer of the correct type in ND_TCHECK(), or use ND_TCHECK2() and provide the correct length. While we're at it, remove the blank line between some checks and the UNALIGNED_MEMCPY()s they protect. Also, note the places where we print the entire payload. This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s).
1
alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, int node, bool hugepage) { struct mempolicy *pol; struct page *page; int preferred_nid; nodemask_t *nmask; pol = get_vma_policy(vma, addr); if (pol->mode == MPOL_INTERLEAVE) { unsigned nid; nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); mpol_cond_put(pol); page = alloc_page_interleave(gfp, order, nid); goto out; } if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { int hpage_node = node; /* * For hugepage allocation and non-interleave policy which * allows the current node (or other explicitly preferred * node) we only try to allocate from the current/preferred * node and don't fall back to other nodes, as the cost of * remote accesses would likely offset THP benefits. * * If the policy is interleave, or does not allow the current * node in its nodemask, we allocate the standard way. */ if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) hpage_node = pol->v.preferred_node; nmask = policy_nodemask(gfp, pol); if (!nmask || node_isset(hpage_node, *nmask)) { mpol_cond_put(pol); /* * First, try to allocate THP only on local node, but * don't reclaim unnecessarily, just compact. */ page = __alloc_pages_node(hpage_node, gfp | __GFP_THISNODE | __GFP_NORETRY, order); /* * If hugepage allocations are configured to always * synchronous compact or the vma has been madvised * to prefer hugepage backing, retry allowing remote * memory with both reclaim and compact as well. */ if (!page && (gfp & __GFP_DIRECT_RECLAIM)) page = __alloc_pages_node(hpage_node, gfp, order); goto out; } } nmask = policy_nodemask(gfp, pol); preferred_nid = policy_node(gfp, pol, node); page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); mpol_cond_put(pol); out: return page; }
Safe
[ "CWE-787" ]
linux
aa9f7d5172fac9bf1f09e678c35e287a40a7b7dd
1.073538905313863e+38
66
mm: mempolicy: require at least one nodeid for MPOL_PREFERRED Using an empty (malformed) nodelist that is not caught during mount option parsing leads to a stack-out-of-bounds access. The option string that was used was: "mpol=prefer:,". However, MPOL_PREFERRED requires a single node number, which is not being provided here. Add a check that 'nodes' is not empty after parsing for MPOL_PREFERRED's nodeid. Fixes: 095f1fc4ebf3 ("mempolicy: rework shmem mpol parsing and display") Reported-by: Entropy Moe <3ntr0py1337@gmail.com> Reported-by: syzbot+b055b1a6b2b958707a21@syzkaller.appspotmail.com Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Tested-by: syzbot+b055b1a6b2b958707a21@syzkaller.appspotmail.com Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Link: http://lkml.kernel.org/r/89526377-7eb6-b662-e1d8-4430928abde9@infradead.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
grub_ext2_mount (grub_disk_t disk) { struct grub_ext2_data *data; data = grub_malloc (sizeof (struct grub_ext2_data)); if (!data) return 0; /* Read the superblock. */ grub_disk_read (disk, 1 * 2, 0, sizeof (struct grub_ext2_sblock), &data->sblock); if (grub_errno) goto fail; /* Make sure this is an ext2 filesystem. */ if (grub_le_to_cpu16 (data->sblock.magic) != EXT2_MAGIC) { grub_error (GRUB_ERR_BAD_FS, "not an ext2 filesystem"); goto fail; } /* Check the FS doesn't have feature bits enabled that we don't support. */ if (grub_le_to_cpu32 (data->sblock.feature_incompat) & ~(EXT2_DRIVER_SUPPORTED_INCOMPAT | EXT2_DRIVER_IGNORED_INCOMPAT)) { grub_error (GRUB_ERR_BAD_FS, "filesystem has unsupported incompatible features"); goto fail; } data->disk = disk; data->diropen.data = data; data->diropen.ino = 2; data->diropen.inode_read = 1; data->inode = &data->diropen.inode; grub_ext2_read_inode (data, 2, data->inode); if (grub_errno) goto fail; return data; fail: if (grub_errno == GRUB_ERR_OUT_OF_RANGE) grub_error (GRUB_ERR_BAD_FS, "not an ext2 filesystem"); grub_free (data); return 0; }
Safe
[ "CWE-703", "CWE-787" ]
radare2
796dd28aaa6b9fa76d99c42c4d5ff8b257cc2191
2.0778882744792352e+38
51
Fix ext2 buffer overflow in r2_sbu_grub_memmove
0
DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId) { char relname[NAMEDATALEN]; Oid namespaceId; List *schema = stmt->tableElts; Oid relationId; Oid tablespaceId; Relation rel; TupleDesc descriptor; List *inheritOids; List *old_constraints; bool localHasOids; int parentOidCount; List *rawDefaults; List *cookedDefaults; Datum reloptions; ListCell *listptr; AttrNumber attnum; static char *validnsps[] = HEAP_RELOPT_NAMESPACES; Oid ofTypeId; /* * Truncate relname to appropriate length (probably a waste of time, as * parser should have done this already). */ StrNCpy(relname, stmt->relation->relname, NAMEDATALEN); /* * Check consistency of arguments */ if (stmt->oncommit != ONCOMMIT_NOOP && stmt->relation->relpersistence != RELPERSISTENCE_TEMP) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("ON COMMIT can only be used on temporary tables"))); if (stmt->constraints != NIL && relkind == RELKIND_FOREIGN_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("constraints are not supported on foreign tables"))); /* * Look up the namespace in which we are supposed to create the relation, * check we have permission to create there, lock it against concurrent * drop, and mark stmt->relation as RELPERSISTENCE_TEMP if a temporary * namespace is selected. */ namespaceId = RangeVarGetAndCheckCreationNamespace(stmt->relation, NoLock, NULL); /* * Security check: disallow creating temp tables from security-restricted * code. This is needed because calling code might not expect untrusted * tables to appear in pg_temp at the front of its search path. */ if (stmt->relation->relpersistence == RELPERSISTENCE_TEMP && InSecurityRestrictedOperation()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("cannot create temporary table within security-restricted operation"))); /* * Select tablespace to use. If not specified, use default tablespace * (which may in turn default to database's default). */ if (stmt->tablespacename) { tablespaceId = get_tablespace_oid(stmt->tablespacename, false); } else { tablespaceId = GetDefaultTablespace(stmt->relation->relpersistence); /* note InvalidOid is OK in this case */ } /* Check permissions except when using database's default */ if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace) { AclResult aclresult; aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_TABLESPACE, get_tablespace_name(tablespaceId)); } /* In all cases disallow placing user relations in pg_global */ if (tablespaceId == GLOBALTABLESPACE_OID) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("only shared relations can be placed in pg_global tablespace"))); /* Identify user ID that will own the table */ if (!OidIsValid(ownerId)) ownerId = GetUserId(); /* * Parse and validate reloptions, if any. */ reloptions = transformRelOptions((Datum) 0, stmt->options, NULL, validnsps, true, false); (void) heap_reloptions(relkind, reloptions, true); if (stmt->ofTypename) { AclResult aclresult; ofTypeId = typenameTypeId(NULL, stmt->ofTypename); aclresult = pg_type_aclcheck(ofTypeId, GetUserId(), ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error_type(aclresult, ofTypeId); } else ofTypeId = InvalidOid; /* * Look up inheritance ancestors and generate relation schema, including * inherited attributes. */ schema = MergeAttributes(schema, stmt->inhRelations, stmt->relation->relpersistence, &inheritOids, &old_constraints, &parentOidCount); /* * Create a tuple descriptor from the relation schema. Note that this * deals with column names, types, and NOT NULL constraints, but not * default values or CHECK constraints; we handle those below. */ descriptor = BuildDescForRelation(schema); localHasOids = interpretOidsOption(stmt->options, (relkind == RELKIND_RELATION || relkind == RELKIND_FOREIGN_TABLE)); descriptor->tdhasoid = (localHasOids || parentOidCount > 0); /* * Find columns with default values and prepare for insertion of the * defaults. Pre-cooked (that is, inherited) defaults go into a list of * CookedConstraint structs that we'll pass to heap_create_with_catalog, * while raw defaults go into a list of RawColumnDefault structs that will * be processed by AddRelationNewConstraints. (We can't deal with raw * expressions until we can do transformExpr.) * * We can set the atthasdef flags now in the tuple descriptor; this just * saves StoreAttrDefault from having to do an immediate update of the * pg_attribute rows. */ rawDefaults = NIL; cookedDefaults = NIL; attnum = 0; foreach(listptr, schema) { ColumnDef *colDef = lfirst(listptr); attnum++; if (colDef->raw_default != NULL) { RawColumnDefault *rawEnt; Assert(colDef->cooked_default == NULL); rawEnt = (RawColumnDefault *) palloc(sizeof(RawColumnDefault)); rawEnt->attnum = attnum; rawEnt->raw_default = colDef->raw_default; rawDefaults = lappend(rawDefaults, rawEnt); descriptor->attrs[attnum - 1]->atthasdef = true; } else if (colDef->cooked_default != NULL) { CookedConstraint *cooked; cooked = (CookedConstraint *) palloc(sizeof(CookedConstraint)); cooked->contype = CONSTR_DEFAULT; cooked->name = NULL; cooked->attnum = attnum; cooked->expr = colDef->cooked_default; cooked->skip_validation = false; cooked->is_local = true; /* not used for defaults */ cooked->inhcount = 0; /* ditto */ cooked->is_no_inherit = false; cookedDefaults = lappend(cookedDefaults, cooked); descriptor->attrs[attnum - 1]->atthasdef = true; } } /* * Create the relation. Inherited defaults and constraints are passed in * for immediate handling --- since they don't need parsing, they can be * stored immediately. */ relationId = heap_create_with_catalog(relname, namespaceId, tablespaceId, InvalidOid, InvalidOid, ofTypeId, ownerId, descriptor, list_concat(cookedDefaults, old_constraints), relkind, stmt->relation->relpersistence, false, false, localHasOids, parentOidCount, stmt->oncommit, reloptions, true, allowSystemTableMods, false); /* Store inheritance information for new rel. */ StoreCatalogInheritance(relationId, inheritOids); /* * We must bump the command counter to make the newly-created relation * tuple visible for opening. */ CommandCounterIncrement(); /* * Open the new relation and acquire exclusive lock on it. This isn't * really necessary for locking out other backends (since they can't see * the new rel anyway until we commit), but it keeps the lock manager from * complaining about deadlock risks. */ rel = relation_open(relationId, AccessExclusiveLock); /* * Now add any newly specified column default values and CHECK constraints * to the new relation. These are passed to us in the form of raw * parsetrees; we need to transform them to executable expression trees * before they can be added. The most convenient way to do that is to * apply the parser's transformExpr routine, but transformExpr doesn't * work unless we have a pre-existing relation. So, the transformation has * to be postponed to this final step of CREATE TABLE. */ if (rawDefaults || stmt->constraints) AddRelationNewConstraints(rel, rawDefaults, stmt->constraints, true, true, false); /* * Clean up. We keep lock on new relation (although it shouldn't be * visible to anyone else anyway, until commit). */ relation_close(rel, NoLock); return relationId; }
Safe
[ "CWE-362" ]
postgres
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
8.528213465093404e+36
254
Avoid repeated name lookups during table and index DDL. If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. This changes the calling convention for DefineIndex, CreateTrigger, transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible (in 9.2 and newer), and AlterTable (in 9.1 and older). In addition, CheckRelationOwnership is removed in 9.2 and newer and the calling convention is changed in older branches. A field has also been added to the Constraint node (FkConstraint in 8.4). Third-party code calling these functions or using the Constraint node will require updating. Report by Andres Freund. Patch by Robert Haas and Andres Freund, reviewed by Tom Lane. Security: CVE-2014-0062
0
static void cache_space_invalidate(JournalStorageSpace *space) { zero(*space); }
Safe
[ "CWE-770" ]
systemd
084eeb865ca63887098e0945fb4e93c852b91b0f
2.6732346839008118e+38
3
journald: do not store the iovec entry for process commandline on stack This fixes a crash where we would read the commandline, whose length is under control of the sending program, and then crash when trying to create a stack allocation for it. CVE-2018-16864 https://bugzilla.redhat.com/show_bug.cgi?id=1653855 The message actually doesn't get written to disk, because journal_file_append_entry() returns -E2BIG.
0
struct nfs_commit_data *nfs_commitdata_alloc(void) { struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); if (p) { memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->pages); } return p; }
Safe
[]
linux
c7559663e42f4294ffe31fe159da6b6a66b35d61
2.761432296619476e+38
10
NFS: Allow nfs_updatepage to extend a write under additional circumstances Currently nfs_updatepage allows a write to be extended to cover a full page only if we don't have a byte range lock lock on the file... but if we have a write delegation on the file or if we have the whole file locked for writing then we should be allowed to extend the write as well. Signed-off-by: Scott Mayhew <smayhew@redhat.com> [Trond: fix up call to nfs_have_delegation()] Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
0
sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; union sctp_addr from_addr; struct sctp_transport *link; sctp_sender_hb_info_t *hbinfo; unsigned long max_interval; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) return sctp_sf_violation_chunklen(ep, asoc, type, arg, commands); hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; /* Make sure that the length of the parameter is what we expect */ if (ntohs(hbinfo->param_hdr.length) != sizeof(sctp_sender_hb_info_t)) { return SCTP_DISPOSITION_DISCARD; } from_addr = hbinfo->daddr; link = sctp_assoc_lookup_paddr(asoc, &from_addr); /* This should never happen, but lets log it if so. */ if (unlikely(!link)) { if (from_addr.sa.sa_family == AF_INET6) { if (net_ratelimit()) printk(KERN_WARNING "%s association %p could not find address %pI6\n", __func__, asoc, &from_addr.v6.sin6_addr); } else { if (net_ratelimit()) printk(KERN_WARNING "%s association %p could not find address %pI4\n", __func__, asoc, &from_addr.v4.sin_addr.s_addr); } return SCTP_DISPOSITION_DISCARD; } /* Validate the 64-bit random nonce. */ if (hbinfo->hb_nonce != link->hb_nonce) return SCTP_DISPOSITION_DISCARD; max_interval = link->hbinterval + link->rto; /* Check if the timestamp looks valid. */ if (time_after(hbinfo->sent_at, jiffies) || time_after(jiffies, hbinfo->sent_at + max_interval)) { SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp " "received for transport: %p\n", __func__, link); return SCTP_DISPOSITION_DISCARD; } /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of * the HEARTBEAT should clear the error counter of the * destination transport address to which the HEARTBEAT was * sent and mark the destination transport address as active if * it is not so marked. */ sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_ON, SCTP_TRANSPORT(link)); return SCTP_DISPOSITION_CONSUME; }
Safe
[ "CWE-119" ]
linux-2.6
9fcb95a105758b81ef0131cd18e2db5149f13e95
4.494055154210933e+37
75
sctp: Avoid memory overflow while FWD-TSN chunk is received with bad stream ID If FWD-TSN chunk is received with bad stream ID, the sctp will not do the validity check, this may cause memory overflow when overwrite the TSN of the stream ID. The FORWARD-TSN chunk is like this: FORWARD-TSN chunk Type = 192 Flags = 0 Length = 172 NewTSN = 99 Stream = 10000 StreamSequence = 0xFFFF This patch fix this problem by discard the chunk if stream ID is not less than MIS. Signed-off-by: Wei Yongjun <yjwei@cn.fujitsu.com> Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) { struct buffer_page *reader = NULL; unsigned long overwrite; unsigned long flags; int nr_loops = 0; int ret; local_irq_save(flags); arch_spin_lock(&cpu_buffer->lock); again: /* * This should normally only loop twice. But because the * start of the reader inserts an empty page, it causes * a case where we will loop three times. There should be no * reason to loop four times (that I know of). */ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { reader = NULL; goto out; } reader = cpu_buffer->reader_page; /* If there's more to read, return this page */ if (cpu_buffer->reader_page->read < rb_page_size(reader)) goto out; /* Never should we have an index greater than the size */ if (RB_WARN_ON(cpu_buffer, cpu_buffer->reader_page->read > rb_page_size(reader))) goto out; /* check if we caught up to the tail */ reader = NULL; if (cpu_buffer->commit_page == cpu_buffer->reader_page) goto out; /* Don't bother swapping if the ring buffer is empty */ if (rb_num_of_entries(cpu_buffer) == 0) goto out; /* * Reset the reader page to size zero. */ local_set(&cpu_buffer->reader_page->write, 0); local_set(&cpu_buffer->reader_page->entries, 0); local_set(&cpu_buffer->reader_page->page->commit, 0); cpu_buffer->reader_page->real_end = 0; spin: /* * Splice the empty reader page into the list around the head. */ reader = rb_set_head_page(cpu_buffer); if (!reader) goto out; cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); cpu_buffer->reader_page->list.prev = reader->list.prev; /* * cpu_buffer->pages just needs to point to the buffer, it * has no specific buffer page to point to. Lets move it out * of our way so we don't accidentally swap it. */ cpu_buffer->pages = reader->list.prev; /* The reader page will be pointing to the new head */ rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); /* * We want to make sure we read the overruns after we set up our * pointers to the next object. The writer side does a * cmpxchg to cross pages which acts as the mb on the writer * side. Note, the reader will constantly fail the swap * while the writer is updating the pointers, so this * guarantees that the overwrite recorded here is the one we * want to compare with the last_overrun. */ smp_mb(); overwrite = local_read(&(cpu_buffer->overrun)); /* * Here's the tricky part. * * We need to move the pointer past the header page. * But we can only do that if a writer is not currently * moving it. The page before the header page has the * flag bit '1' set if it is pointing to the page we want. * but if the writer is in the process of moving it * than it will be '2' or already moved '0'. */ ret = rb_head_page_replace(reader, cpu_buffer->reader_page); /* * If we did not convert it, then we must try again. */ if (!ret) goto spin; /* * Yeah! We succeeded in replacing the page. * * Now make the new head point back to the reader page. */ rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; rb_inc_page(cpu_buffer, &cpu_buffer->head_page); /* Finally update the reader page to the new head */ cpu_buffer->reader_page = reader; cpu_buffer->reader_page->read = 0; if (overwrite != cpu_buffer->last_overrun) { cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; cpu_buffer->last_overrun = overwrite; } goto again; out: /* Update the read_stamp on the first event */ if (reader && reader->read == 0) cpu_buffer->read_stamp = reader->page->time_stamp; arch_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); return reader; }
Safe
[ "CWE-190" ]
linux-stable
59643d1535eb220668692a5359de22545af579f6
1.4524254387658226e+38
131
ring-buffer: Prevent overflow of size in ring_buffer_resize() If the size passed to ring_buffer_resize() is greater than MAX_LONG - BUF_PAGE_SIZE then the DIV_ROUND_UP() will return zero. Here's the details: # echo 18014398509481980 > /sys/kernel/debug/tracing/buffer_size_kb tracing_entries_write() processes this and converts kb to bytes. 18014398509481980 << 10 = 18446744073709547520 and this is passed to ring_buffer_resize() as unsigned long size. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); Where DIV_ROUND_UP(a, b) is (a + b - 1)/b BUF_PAGE_SIZE is 4080 and here 18446744073709547520 + 4080 - 1 = 18446744073709551599 where 18446744073709551599 is still smaller than 2^64 2^64 - 18446744073709551599 = 17 But now 18446744073709551599 / 4080 = 4521260802379792 and size = size * 4080 = 18446744073709551360 This is checked to make sure its still greater than 2 * 4080, which it is. Then we convert to the number of buffer pages needed. nr_page = DIV_ROUND_UP(size, BUF_PAGE_SIZE) but this time size is 18446744073709551360 and 2^64 - (18446744073709551360 + 4080 - 1) = -3823 Thus it overflows and the resulting number is less than 4080, which makes 3823 / 4080 = 0 an nr_pages is set to this. As we already checked against the minimum that nr_pages may be, this causes the logic to fail as well, and we crash the kernel. There's no reason to have the two DIV_ROUND_UP() (that's just result of historical code changes), clean up the code and fix this bug. Cc: stable@vger.kernel.org # 3.5+ Fixes: 83f40318dab00 ("ring-buffer: Make removal of ring buffer pages atomic") Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
0
char **file_lines_parse(char *p, size_t size, int *numlines, TALLOC_CTX *mem_ctx) { int i; char *s, **ret; if (!p) return NULL; for (s = p, i=0; s < p+size; s++) { if (s[0] == '\n') i++; } ret = talloc_array(mem_ctx, char *, i+2); if (!ret) { talloc_free(p); return NULL; } talloc_steal(ret, p); memset(ret, 0, sizeof(ret[0])*(i+2)); ret[0] = p; for (s = p, i=0; s < p+size; s++) { if (s[0] == '\n') { s[0] = 0; i++; ret[i] = s+1; } if (s[0] == '\r') s[0] = 0; } /* remove any blank lines at the end */ while (i > 0 && ret[i-1][0] == 0) { i--; } if (numlines) *numlines = i; return ret; }
Safe
[]
samba
63d98ed90466295d0e946f79868d3d7aad6e7589
8.019119612037256e+37
40
CVE-2013-4476: lib-util: split out file_save_mode() from file_save() file_save_mode() writes files with specified mode. Bug: https://bugzilla.samba.org/show_bug.cgi?id=10234 Signed-off-by: Björn Baumbach <bb@sernet.de> Reviewed-by: Stefan Metzmacher <metze@samba.org>
0
static int ZEND_FASTCALL zend_binary_assign_op_helper_SPEC_UNUSED_VAR(int (*binary_op)(zval *result, zval *op1, zval *op2 TSRMLS_DC), ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zend_free_op free_op2, free_op_data2, free_op_data1; zval **var_ptr; zval *value; switch (opline->extended_value) { case ZEND_ASSIGN_OBJ: return zend_binary_assign_op_obj_helper_SPEC_UNUSED_VAR(binary_op, ZEND_OPCODE_HANDLER_ARGS_PASSTHRU); break; case ZEND_ASSIGN_DIM: { zval **container = _get_obj_zval_ptr_ptr_unused(TSRMLS_C); if (IS_UNUSED == IS_VAR && !container) { zend_error_noreturn(E_ERROR, "Cannot use string offset as an array"); } else if (Z_TYPE_PP(container) == IS_OBJECT) { if (IS_UNUSED == IS_VAR && !0) { Z_ADDREF_PP(container); /* undo the effect of get_obj_zval_ptr_ptr() */ } return zend_binary_assign_op_obj_helper_SPEC_UNUSED_VAR(binary_op, ZEND_OPCODE_HANDLER_ARGS_PASSTHRU); } else { zend_op *op_data = opline+1; zval *dim = _get_zval_ptr_var(&opline->op2, EX(Ts), &free_op2 TSRMLS_CC); zend_fetch_dimension_address(&EX_T(op_data->op2.u.var), container, dim, 0, BP_VAR_RW TSRMLS_CC); value = get_zval_ptr(&op_data->op1, EX(Ts), &free_op_data1, BP_VAR_R); var_ptr = _get_zval_ptr_ptr_var(&op_data->op2, EX(Ts), &free_op_data2 TSRMLS_CC); ZEND_VM_INC_OPCODE(); } } break; default: value = _get_zval_ptr_var(&opline->op2, EX(Ts), &free_op2 TSRMLS_CC); var_ptr = NULL; /* do nothing */ break; } if (!var_ptr) { zend_error_noreturn(E_ERROR, "Cannot use assign-op operators with overloaded objects nor string offsets"); } if (*var_ptr == EG(error_zval_ptr)) { if (!RETURN_VALUE_UNUSED(&opline->result)) { AI_SET_PTR(EX_T(opline->result.u.var).var, EG(uninitialized_zval_ptr)); PZVAL_LOCK(EG(uninitialized_zval_ptr)); } if (free_op2.var) {zval_ptr_dtor(&free_op2.var);}; ZEND_VM_NEXT_OPCODE(); } SEPARATE_ZVAL_IF_NOT_REF(var_ptr); if(Z_TYPE_PP(var_ptr) == IS_OBJECT && Z_OBJ_HANDLER_PP(var_ptr, get) && Z_OBJ_HANDLER_PP(var_ptr, set)) { /* proxy object */ zval *objval = Z_OBJ_HANDLER_PP(var_ptr, get)(*var_ptr TSRMLS_CC); Z_ADDREF_P(objval); binary_op(objval, objval, value TSRMLS_CC); Z_OBJ_HANDLER_PP(var_ptr, set)(var_ptr, objval TSRMLS_CC); zval_ptr_dtor(&objval); } else { binary_op(*var_ptr, *var_ptr, value TSRMLS_CC); } if (!RETURN_VALUE_UNUSED(&opline->result)) { AI_SET_PTR(EX_T(opline->result.u.var).var, *var_ptr); PZVAL_LOCK(*var_ptr); } if (free_op2.var) {zval_ptr_dtor(&free_op2.var);}; if (opline->extended_value == ZEND_ASSIGN_DIM) { FREE_OP(free_op_data1); FREE_OP_VAR_PTR(free_op_data2); } ZEND_VM_NEXT_OPCODE(); }
Safe
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
2.1283336014925765e+38
80
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
0
static bool is_ivb_ioat(struct pci_dev *pdev) { switch (pdev->device) { case PCI_DEVICE_ID_INTEL_IOAT_IVB0: case PCI_DEVICE_ID_INTEL_IOAT_IVB1: case PCI_DEVICE_ID_INTEL_IOAT_IVB2: case PCI_DEVICE_ID_INTEL_IOAT_IVB3: case PCI_DEVICE_ID_INTEL_IOAT_IVB4: case PCI_DEVICE_ID_INTEL_IOAT_IVB5: case PCI_DEVICE_ID_INTEL_IOAT_IVB6: case PCI_DEVICE_ID_INTEL_IOAT_IVB7: case PCI_DEVICE_ID_INTEL_IOAT_IVB8: case PCI_DEVICE_ID_INTEL_IOAT_IVB9: return true; default: return false; } }
Safe
[]
linux
7bced397510ab569d31de4c70b39e13355046387
2.6313528032267494e+38
19
net_dma: simple removal Per commit "77873803363c net_dma: mark broken" net_dma is no longer used and there is no plan to fix it. This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards. Reverting the remainder of the net_dma induced changes is deferred to subsequent patches. Marked for stable due to Roman's report of a memory leak in dma_pin_iovec_pages(): https://lkml.org/lkml/2014/9/3/177 Cc: Dave Jiang <dave.jiang@intel.com> Cc: Vinod Koul <vinod.koul@intel.com> Cc: David Whipple <whipple@securedatainnovations.ch> Cc: Alexander Duyck <alexander.h.duyck@intel.com> Cc: <stable@vger.kernel.org> Reported-by: Roman Gushchin <klamm@yandex-team.ru> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
0
static int bus_add_match_full( sd_bus *bus, sd_bus_slot **slot, bool asynchronous, const char *match, sd_bus_message_handler_t callback, sd_bus_message_handler_t install_callback, void *userdata) { struct bus_match_component *components = NULL; unsigned n_components = 0; sd_bus_slot *s = NULL; int r = 0; assert_return(bus, -EINVAL); assert_return(bus = bus_resolve(bus), -ENOPKG); assert_return(match, -EINVAL); assert_return(!bus_pid_changed(bus), -ECHILD); r = bus_match_parse(match, &components, &n_components); if (r < 0) goto finish; s = bus_slot_allocate(bus, !slot, BUS_MATCH_CALLBACK, sizeof(struct match_callback), userdata); if (!s) { r = -ENOMEM; goto finish; } s->match_callback.callback = callback; s->match_callback.install_callback = install_callback; if (bus->bus_client) { enum bus_match_scope scope; scope = bus_match_get_scope(components, n_components); /* Do not install server-side matches for matches against the local service, interface or bus path. */ if (scope != BUS_MATCH_LOCAL) { /* We store the original match string, so that we can use it to remove the match again. */ s->match_callback.match_string = strdup(match); if (!s->match_callback.match_string) { r = -ENOMEM; goto finish; } if (asynchronous) { r = bus_add_match_internal_async(bus, &s->match_callback.install_slot, s->match_callback.match_string, add_match_callback, s); if (r < 0) return r; /* Make the slot of the match call floating now. We need the reference, but we don't * want that this match pins the bus object, hence we first create it non-floating, but * then make it floating. */ r = sd_bus_slot_set_floating(s->match_callback.install_slot, true); } else r = bus_add_match_internal(bus, s->match_callback.match_string, &s->match_callback.after); if (r < 0) goto finish; s->match_added = true; } } bus->match_callbacks_modified = true; r = bus_match_add(&bus->match_callbacks, components, n_components, &s->match_callback); if (r < 0) goto finish; if (slot) *slot = s; s = NULL; finish: bus_match_parse_free(components, n_components); sd_bus_slot_unref(s); return r; }
Safe
[ "CWE-416" ]
systemd
1068447e6954dc6ce52f099ed174c442cb89ed54
6.653227382590398e+37
86
sd-bus: introduce API for re-enqueuing incoming messages When authorizing via PolicyKit we want to process incoming method calls twice: once to process and figure out that we need PK authentication, and a second time after we aquired PK authentication to actually execute the operation. With this new call sd_bus_enqueue_for_read() we have a way to put an incoming message back into the read queue for this purpose. This might have other uses too, for example debugging.
0
static bool build_multipart( ogs_sbi_http_message_t *http, ogs_sbi_message_t *message) { int i; char boundary[32]; unsigned char digest[16]; char *p = NULL, *last; char *content_type = NULL; char *json = NULL; ogs_assert(message); ogs_assert(http); ogs_random(digest, 16); strcpy(boundary, "=-"); ogs_base64_encode_binary(boundary + 2, digest, 16); p = http->content = ogs_calloc(1, OGS_HUGE_LEN); ogs_expect_or_return_val(p, false); last = p + OGS_HUGE_LEN; /* First boundary */ p = ogs_slprintf(p, last, "--%s\r\n", boundary); /* Encapsulated multipart part (application/json) */ json = build_json(message); ogs_expect_or_return_val(json, false); p = ogs_slprintf(p, last, "%s\r\n\r\n%s", OGS_SBI_CONTENT_TYPE ": " OGS_SBI_CONTENT_JSON_TYPE, json); ogs_free(json); /* Add part */ for (i = 0; i < message->num_of_part; i++) { p = ogs_slprintf(p, last, "\r\n--%s\r\n", boundary); p = ogs_slprintf(p, last, "%s: %s\r\n", OGS_SBI_CONTENT_ID, message->part[i].content_id); p = ogs_slprintf(p, last, "%s: %s\r\n\r\n", OGS_SBI_CONTENT_TYPE, message->part[i].content_type); memcpy(p, message->part[i].pkbuf->data, message->part[i].pkbuf->len); p += message->part[i].pkbuf->len; } /* Last boundary */ p = ogs_slprintf(p, last, "\r\n--%s--\r\n", boundary); http->content_length = p - http->content; content_type = ogs_msprintf("%s; boundary=\"%s\"", OGS_SBI_CONTENT_MULTIPART_TYPE, boundary); ogs_expect_or_return_val(content_type, false); ogs_sbi_header_set(http->headers, OGS_SBI_CONTENT_TYPE, content_type); ogs_free(content_type); return true; }
Safe
[ "CWE-476", "CWE-787" ]
open5gs
d919b2744cd05abae043490f0a3dd1946c1ccb8c
5.264648370535551e+37
61
[AMF] fix the memory problem (#1247) 1. memory corruption - Overflow num_of_part in SBI message 2. null pointer dereference - n2InfoContent->ngap_ie_type
0