func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
GF_Err gf_isom_get_watermark(GF_ISOFile *mov, bin128 UUID, u8** data, u32* length)
{
GF_UserDataMap *map;
GF_UnknownUUIDBox *wm;
if (!mov) return GF_BAD_PARAM;
if (!mov->moov || !mov->moov->udta) return GF_NOT_SUPPORTED;
map = udta_getEntry(mov->moov->udta, GF_ISOM_BOX_TYPE_UUID, (bin128 *) & UUID);
if (!map) return GF_NOT_SUPPORTED;
wm = (GF_UnknownUUIDBox*)gf_list_get(map->boxes, 0);
if (!wm) return GF_NOT_SUPPORTED;
*data = (u8 *) gf_malloc(sizeof(char)*wm->dataSize);
if (! *data) return GF_OUT_OF_MEM;
memcpy(*data, wm->data, wm->dataSize);
*length = wm->dataSize;
return GF_OK;
}
|
Safe
|
[
"CWE-476"
] |
gpac
|
ebfa346eff05049718f7b80041093b4c5581c24e
|
3.2314015881009595e+38
| 20 |
fixed #1706
| 0 |
xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
{
struct xfrm_pol_inexact_node *node;
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (node)
xfrm_pol_inexact_node_init(node, addr, prefixlen);
return node;
}
|
Safe
|
[
"CWE-703"
] |
linux
|
f85daf0e725358be78dfd208dea5fd665d8cb901
|
6.880218776414294e+37
| 10 |
xfrm: xfrm_policy: fix a possible double xfrm_pols_put() in xfrm_bundle_lookup()
xfrm_policy_lookup() will call xfrm_pol_hold_rcu() to get a refcount of
pols[0]. This refcount can be dropped in xfrm_expand_policies() when
xfrm_expand_policies() return error. pols[0]'s refcount is balanced in
here. But xfrm_bundle_lookup() will also call xfrm_pols_put() with
num_pols == 1 to drop this refcount when xfrm_expand_policies() return
error.
This patch also fix an illegal address access. pols[0] will save a error
point when xfrm_policy_lookup fails. This lead to xfrm_pols_put to resolve
an illegal address in xfrm_bundle_lookup's error path.
Fix these by setting num_pols = 0 in xfrm_expand_policies()'s error path.
Fixes: 80c802f3073e ("xfrm: cache bundles instead of policies for outgoing flows")
Signed-off-by: Hangyu Hua <hbh25y@gmail.com>
Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
| 0 |
flatpak_context_sockets_to_args (FlatpakContextSockets sockets,
FlatpakContextSockets valid,
GPtrArray *args)
{
return flatpak_context_bitmask_to_args (sockets, valid, flatpak_context_sockets, "--socket", "--nosocket", args);
}
|
Safe
|
[
"CWE-94",
"CWE-74"
] |
flatpak
|
6e5ae7a109cdfa9735ea7ccbd8cb79f9e8d3ae8b
|
3.310015476032718e+38
| 6 |
context: Add --env-fd option
This allows environment variables to be added to the context without
making their values visible to processes running under a different uid,
which might be significant if the variable's value is a token or some
other secret value.
Signed-off-by: Simon McVittie <smcv@collabora.com>
Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2
| 0 |
static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
gfn_t start, end;
int index;
if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
!kvm_arch_has_noncoherent_dma(vcpu->kvm))
return;
if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
return;
/* fixed MTRRs. */
if (fixed_msr_to_range(msr, &start, &end)) {
if (!fixed_mtrr_is_enabled(mtrr_state))
return;
} else if (msr == MSR_MTRRdefType) {
start = 0x0;
end = ~0ULL;
} else {
/* variable range MTRRs. */
index = (msr - 0x200) / 2;
var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
}
kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
}
|
Safe
|
[
"CWE-284"
] |
linux
|
9842df62004f366b9fed2423e24df10542ee0dc5
|
3.3832378950534655e+38
| 28 |
KVM: MTRR: remove MSR 0x2f8
MSR 0x2f8 accessed the 124th Variable Range MTRR ever since MTRR support
was introduced by 9ba075a664df ("KVM: MTRR support").
0x2f8 became harmful when 910a6aae4e2e ("KVM: MTRR: exactly define the
size of variable MTRRs") shrinked the array of VR MTRRs from 256 to 8,
which made access to index 124 out of bounds. The surrounding code only
WARNs in this situation, thus the guest gained a limited read/write
access to struct kvm_arch_vcpu.
0x2f8 is not a valid VR MTRR MSR, because KVM has/advertises only 16 VR
MTRR MSRs, 0x200-0x20f. Every VR MTRR is set up using two MSRs, 0x2f8
was treated as a PHYSBASE and 0x2f9 would be its PHYSMASK, but 0x2f9 was
not implemented in KVM, therefore 0x2f8 could never do anything useful
and getting rid of it is safe.
This fixes CVE-2016-3713.
Fixes: 910a6aae4e2e ("KVM: MTRR: exactly define the size of variable MTRRs")
Cc: stable@vger.kernel.org
Reported-by: David Matlack <dmatlack@google.com>
Signed-off-by: Andy Honig <ahonig@google.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
const struct nfs_server *server, int may_sleep)
{
return decode_getfattr_generic(xdr, fattr, NULL, server, may_sleep);
}
|
Safe
|
[
"CWE-703",
"CWE-189"
] |
linux
|
bf118a342f10dafe44b14451a1392c3254629a1f
|
1.9243551346460656e+38
| 5 |
NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: stable@kernel.org
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
| 0 |
int imap_rename_mailbox(struct ImapData *idata, struct ImapMbox *mx, const char *newname)
{
char oldmbox[LONG_STRING];
char newmbox[LONG_STRING];
char buf[LONG_STRING];
imap_munge_mbox_name(idata, oldmbox, sizeof(oldmbox), mx->mbox);
imap_munge_mbox_name(idata, newmbox, sizeof(newmbox), newname);
snprintf(buf, sizeof(buf), "RENAME %s %s", oldmbox, newmbox);
if (imap_exec(idata, buf, 0) != 0)
return -1;
return 0;
}
|
Safe
|
[
"CWE-78",
"CWE-77"
] |
neomutt
|
95e80bf9ff10f68cb6443f760b85df4117cb15eb
|
1.2838755426479715e+38
| 16 |
Quote path in imap_subscribe
| 0 |
ip_create_slave_core(interp, argc, argv)
VALUE interp;
int argc;
VALUE *argv;
{
struct tcltkip *master = get_ip(interp);
struct tcltkip *slave;
/* struct tcltkip *slave = RbTk_ALLOC_N(struct tcltkip, 1); */
VALUE safemode;
VALUE name;
VALUE new_ip;
int safe;
int thr_crit_bup;
Tk_Window mainWin;
/* ip is deleted? */
if (deleted_ip(master)) {
return rb_exc_new2(rb_eRuntimeError,
"deleted master cannot create a new slave");
}
name = argv[0];
safemode = argv[1];
if (Tcl_IsSafe(master->ip) == 1) {
safe = 1;
} else if (safemode == Qfalse || NIL_P(safemode)) {
safe = 0;
} else {
safe = 1;
}
thr_crit_bup = rb_thread_critical;
rb_thread_critical = Qtrue;
#if 0
/* init Tk */
if (RTEST(with_tk)) {
volatile VALUE exc;
if (!tk_stubs_init_p()) {
exc = tcltkip_init_tk(interp);
if (!NIL_P(exc)) {
rb_thread_critical = thr_crit_bup;
return exc;
}
}
}
#endif
new_ip = TypedData_Make_Struct(CLASS_OF(interp), struct tcltkip,
&tcltkip_type, slave);
/* create slave-ip */
#ifdef RUBY_USE_NATIVE_THREAD
/* slave->tk_thread_id = 0; */
slave->tk_thread_id = master->tk_thread_id; /* == current thread */
#endif
slave->ref_count = 0;
slave->allow_ruby_exit = 0;
slave->return_value = 0;
slave->ip = Tcl_CreateSlave(master->ip, StringValuePtr(name), safe);
if (slave->ip == NULL) {
rb_thread_critical = thr_crit_bup;
return rb_exc_new2(rb_eRuntimeError,
"fail to create the new slave interpreter");
}
#if TCL_MAJOR_VERSION >= 8
#if TCL_NAMESPACE_DEBUG
slave->default_ns = Tcl_GetCurrentNamespace(slave->ip);
#endif
#endif
rbtk_preserve_ip(slave);
slave->has_orig_exit
= Tcl_GetCommandInfo(slave->ip, "exit", &(slave->orig_exit_info));
/* replace 'exit' command --> 'interp_exit' command */
mainWin = (tk_stubs_init_p())? Tk_MainWindow(slave->ip): (Tk_Window)NULL;
#if TCL_MAJOR_VERSION >= 8
DUMP1("Tcl_CreateObjCommand(\"exit\") --> \"interp_exit\"");
Tcl_CreateObjCommand(slave->ip, "exit", ip_InterpExitObjCmd,
(ClientData)mainWin, (Tcl_CmdDeleteProc *)NULL);
#else /* TCL_MAJOR_VERSION < 8 */
DUMP1("Tcl_CreateCommand(\"exit\") --> \"interp_exit\"");
Tcl_CreateCommand(slave->ip, "exit", ip_InterpExitCommand,
(ClientData)mainWin, (Tcl_CmdDeleteProc *)NULL);
#endif
/* replace vwait and tkwait */
ip_replace_wait_commands(slave->ip, mainWin);
/* wrap namespace command */
ip_wrap_namespace_command(slave->ip);
/* define command to replace cmds which depend on slave-slave's MainWin */
#if TCL_MAJOR_VERSION >= 8
Tcl_CreateObjCommand(slave->ip, "__replace_slave_tk_commands__",
ip_rb_replaceSlaveTkCmdsObjCmd,
(ClientData)NULL, (Tcl_CmdDeleteProc *)NULL);
#else /* TCL_MAJOR_VERSION < 8 */
Tcl_CreateCommand(slave->ip, "__replace_slave_tk_commands__",
ip_rb_replaceSlaveTkCmdsCommand,
(ClientData)NULL, (Tcl_CmdDeleteProc *)NULL);
#endif
/* set finalizer */
Tcl_CallWhenDeleted(slave->ip, ip_CallWhenDeleted, (ClientData)mainWin);
rb_thread_critical = thr_crit_bup;
return new_ip;
}
|
Vulnerable
|
[] |
tk
|
d098136e3f62a4879a7d7cd34bbd50f482ba3331
|
2.1065984188863096e+38
| 112 |
tcltklib.c: use StringValueCStr [ci skip]
* ext/tk/tcltklib.c (set_max_block_time, tcl_protect_core,
ip_init, ip_create_slave_core, get_obj_from_str,
ip_cancel_eval_core, lib_set_system_encoding,
alloc_invoke_arguments, lib_merge_tklist): use StringValueCStr
instead of StringValuePtr for values to be passed to Tcl
interperter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@55842 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
| 1 |
int __init hci_sock_init(void)
{
int err;
err = proto_register(&hci_sk_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
if (err < 0) {
BT_ERR("HCI socket registration failed");
goto error;
}
err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
if (err < 0) {
BT_ERR("Failed to create HCI proc file");
bt_sock_unregister(BTPROTO_HCI);
goto error;
}
BT_INFO("HCI socket layer initialized");
return 0;
error:
proto_unregister(&hci_sk_proto);
return err;
}
|
Safe
|
[
"CWE-20",
"CWE-269"
] |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
|
1.1226160869916113e+38
| 29 |
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <davem@davemloft.net>
Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
Char *write_unsigned_decimal(UInt value, unsigned prefix_size = 0) {
unsigned num_digits = internal::count_digits(value);
Char *ptr = get(grow_buffer(prefix_size + num_digits));
internal::format_decimal(ptr + prefix_size, value, num_digits);
return ptr;
}
|
Safe
|
[
"CWE-134",
"CWE-119",
"CWE-787"
] |
fmt
|
8cf30aa2be256eba07bb1cefb998c52326e846e7
|
1.246600494201985e+38
| 6 |
Fix segfault on complex pointer formatting (#642)
| 0 |
TEST_F(HeaderToMetadataTest, EmptyHeaderValue) {
initializeFilter(request_config_yaml);
Http::TestRequestHeaderMapImpl incoming_headers{{"X-VERSION", ""}};
EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));
EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0);
EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(incoming_headers, false));
}
|
Safe
|
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
|
1.0670593381486283e+38
| 8 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <mklein@lyft.com>
| 0 |
static void invalid_env(const char *p, void *userdata) {
InvalidEnvInfo *info = userdata;
log_unit_error(info->unit, "Ignoring invalid environment assignment '%s': %s", p, info->path);
}
|
Safe
|
[
"CWE-269"
] |
systemd
|
f69567cbe26d09eac9d387c0be0fc32c65a83ada
|
1.7194579683734082e+38
| 5 |
core: expose SUID/SGID restriction as new unit setting RestrictSUIDSGID=
| 0 |
rdpdr_handle_ok(int device, int handle)
{
switch (g_rdpdr_device[device].device_type)
{
case DEVICE_TYPE_PARALLEL:
case DEVICE_TYPE_SERIAL:
case DEVICE_TYPE_PRINTER:
case DEVICE_TYPE_SCARD:
if (g_rdpdr_device[device].handle != handle)
return False;
break;
case DEVICE_TYPE_DISK:
if (g_fileinfo[handle].device_id != device)
return False;
break;
}
return True;
}
|
Safe
|
[
"CWE-787"
] |
rdesktop
|
766ebcf6f23ccfe8323ac10242ae6e127d4505d2
|
7.82486685038326e+37
| 18 |
Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182
| 0 |
static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
void __user *arg)
{
struct snd_seq_queue_status status;
struct snd_seq_queue *queue;
struct snd_seq_timer *tmr;
if (copy_from_user(&status, arg, sizeof(status)))
return -EFAULT;
queue = queueptr(status.queue);
if (queue == NULL)
return -EINVAL;
memset(&status, 0, sizeof(status));
status.queue = queue->queue;
tmr = queue->timer;
status.events = queue->tickq->cells + queue->timeq->cells;
status.time = snd_seq_timer_get_cur_time(tmr);
status.tick = snd_seq_timer_get_cur_tick(tmr);
status.running = tmr->running;
status.flags = queue->flags;
queuefree(queue);
if (copy_to_user(arg, &status, sizeof(status)))
return -EFAULT;
return 0;
}
|
Safe
|
[
"CWE-703"
] |
linux
|
030e2c78d3a91dd0d27fef37e91950dde333eba1
|
2.4559962079616166e+38
| 31 |
ALSA: seq: Fix missing NULL check at remove_events ioctl
snd_seq_ioctl_remove_events() calls snd_seq_fifo_clear()
unconditionally even if there is no FIFO assigned, and this leads to
an Oops due to NULL dereference. The fix is just to add a proper NULL
check.
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Tested-by: Dmitry Vyukov <dvyukov@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
| 0 |
fit(peer_t *p, double rd)
{
if ((p->reachable_bits & (p->reachable_bits-1)) == 0) {
/* One or zero bits in reachable_bits */
VERB4 bb_error_msg("peer %s unfit for selection: unreachable", p->p_dotted);
return 0;
}
#if 0 /* we filter out such packets earlier */
if ((p->lastpkt_status & LI_ALARM) == LI_ALARM
|| p->lastpkt_stratum >= MAXSTRAT
) {
VERB4 bb_error_msg("peer %s unfit for selection: bad status/stratum", p->p_dotted);
return 0;
}
#endif
/* rd is root_distance(p) */
if (rd > MAXDIST + FREQ_TOLERANCE * (1 << G.poll_exp)) {
VERB4 bb_error_msg("peer %s unfit for selection: root distance too high", p->p_dotted);
return 0;
}
//TODO
// /* Do we have a loop? */
// if (p->refid == p->dstaddr || p->refid == s.refid)
// return 0;
return 1;
}
|
Safe
|
[
"CWE-399"
] |
busybox
|
150dc7a2b483b8338a3e185c478b4b23ee884e71
|
3.3385512460763572e+38
| 26 |
ntpd: respond only to client and symmetric active packets
The busybox NTP implementation doesn't check the NTP mode of packets
received on the server port and responds to any packet with the right
size. This includes responses from another NTP server. An attacker can
send a packet with a spoofed source address in order to create an
infinite loop of responses between two busybox NTP servers. Adding
more packets to the loop increases the traffic between the servers
until one of them has a fully loaded CPU and/or network.
Signed-off-by: Miroslav Lichvar <mlichvar@redhat.com>
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
| 0 |
static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
unsigned length)
{
u32 align_buffer;
u32 *to_u32_ptr;
u16 *from_u16_ptr, *to_u16_ptr;
to_u32_ptr = dest_ptr;
from_u16_ptr = src_ptr;
align_buffer = 0;
for (; length > 3; length -= 4) {
to_u16_ptr = (u16 *)&align_buffer;
*to_u16_ptr++ = *from_u16_ptr++;
*to_u16_ptr++ = *from_u16_ptr++;
/* This barrier resolves occasional issues seen around
* cases where the data is not properly flushed out
* from the processor store buffers to the destination
* memory locations.
*/
wmb();
/* Output a word */
*to_u32_ptr++ = align_buffer;
}
if (length) {
u8 *from_u8_ptr, *to_u8_ptr;
/* Set up to output the remaining data */
align_buffer = 0;
to_u8_ptr = (u8 *)&align_buffer;
from_u8_ptr = (u8 *)from_u16_ptr;
/* Output the remaining data */
for (; length > 0; length--)
*to_u8_ptr++ = *from_u8_ptr++;
/* This barrier resolves occasional issues seen around
* cases where the data is not properly flushed out
* from the processor store buffers to the destination
* memory locations.
*/
wmb();
*to_u32_ptr = align_buffer;
}
}
|
Safe
|
[
"CWE-703",
"CWE-824"
] |
linux
|
d0d62baa7f505bd4c59cd169692ff07ec49dde37
|
1.929668737836425e+38
| 47 |
net: xilinx_emaclite: Do not print real IOMEM pointer
Printing kernel pointers is discouraged because they might leak kernel
memory layout. This fixes smatch warning:
drivers/net/ethernet/xilinx/xilinx_emaclite.c:1191 xemaclite_of_probe() warn:
argument 4 to %08lX specifier is cast from pointer
Signed-off-by: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void nma_menu_show_cb (GtkWidget *menu, NMApplet *applet)
{
guint32 n_wireless;
g_return_if_fail (menu != NULL);
g_return_if_fail (applet != NULL);
#if GTK_CHECK_VERSION(2, 15, 0)
gtk_status_icon_set_tooltip_text (applet->status_icon, NULL);
#else
gtk_status_icon_set_tooltip (applet->status_icon, NULL);
#endif
if (!nm_client_get_manager_running (applet->nm_client)) {
nma_menu_add_text_item (menu, _("NetworkManager is not running..."));
return;
}
if (nm_client_get_state (applet->nm_client) == NM_STATE_ASLEEP) {
nma_menu_add_text_item (menu, _("Networking disabled"));
return;
}
n_wireless = nma_menu_add_devices (menu, applet);
nma_menu_add_vpn_submenu (menu, applet);
if (n_wireless > 0 && nm_client_wireless_get_enabled (applet->nm_client)) {
/* Add the "Hidden wireless network..." entry */
nma_menu_add_separator_item (menu);
nma_menu_add_hidden_network_item (menu, applet);
nma_menu_add_create_network_item (menu, applet);
}
gtk_widget_show_all (menu);
// nmi_dbus_signal_user_interface_activated (applet->connection);
}
|
Safe
|
[
"CWE-200"
] |
network-manager-applet
|
8627880e07c8345f69ed639325280c7f62a8f894
|
1.1654042405133644e+38
| 38 |
editor: prevent any registration of objects on the system bus
D-Bus access-control is name-based; so requests for a specific name
are allowed/denied based on the rules in /etc/dbus-1/system.d. But
apparently apps still get a non-named service on the bus, and if we
register *any* object even though we don't have a named service,
dbus and dbus-glib will happily proxy signals. Since the connection
editor shouldn't ever expose anything having to do with connections
on any bus, make sure that's the case.
| 0 |
TensorDescriptor CreateTensorDescriptor() {
cudnnTensorDescriptor_t result;
CHECK_CUDNN_OK(cudnnCreateTensorDescriptor(&result));
return TensorDescriptor(result);
}
|
Safe
|
[
"CWE-20"
] |
tensorflow
|
14755416e364f17fb1870882fa778c7fec7f16e3
|
1.4796680024497787e+38
| 5 |
Prevent CHECK-fail in LSTM/GRU with zero-length input.
PiperOrigin-RevId: 346239181
Change-Id: I5f233dbc076aab7bb4e31ba24f5abd4eaf99ea4f
| 0 |
xmlRelaxNGParse(xmlRelaxNGParserCtxtPtr ctxt)
{
xmlRelaxNGPtr ret = NULL;
xmlDocPtr doc;
xmlNodePtr root;
xmlRelaxNGInitTypes();
if (ctxt == NULL)
return (NULL);
/*
* First step is to parse the input document into an DOM/Infoset
*/
if (ctxt->URL != NULL) {
doc = xmlReadFile((const char *) ctxt->URL,NULL,0);
if (doc == NULL) {
xmlRngPErr(ctxt, NULL, XML_RNGP_PARSE_ERROR,
"xmlRelaxNGParse: could not load %s\n", ctxt->URL,
NULL);
return (NULL);
}
} else if (ctxt->buffer != NULL) {
doc = xmlReadMemory(ctxt->buffer, ctxt->size,NULL,NULL,0);
if (doc == NULL) {
xmlRngPErr(ctxt, NULL, XML_RNGP_PARSE_ERROR,
"xmlRelaxNGParse: could not parse schemas\n", NULL,
NULL);
return (NULL);
}
doc->URL = xmlStrdup(BAD_CAST "in_memory_buffer");
ctxt->URL = xmlStrdup(BAD_CAST "in_memory_buffer");
} else if (ctxt->document != NULL) {
doc = ctxt->document;
} else {
xmlRngPErr(ctxt, NULL, XML_RNGP_EMPTY,
"xmlRelaxNGParse: nothing to parse\n", NULL, NULL);
return (NULL);
}
ctxt->document = doc;
/*
* Some preprocessing of the document content
*/
doc = xmlRelaxNGCleanupDoc(ctxt, doc);
if (doc == NULL) {
xmlFreeDoc(ctxt->document);
ctxt->document = NULL;
return (NULL);
}
/*
* Then do the parsing for good
*/
root = xmlDocGetRootElement(doc);
if (root == NULL) {
xmlRngPErr(ctxt, (xmlNodePtr) doc,
XML_RNGP_EMPTY, "xmlRelaxNGParse: %s is empty\n",
(ctxt->URL ? ctxt->URL : BAD_CAST "schemas"), NULL);
xmlFreeDoc(ctxt->document);
ctxt->document = NULL;
return (NULL);
}
ret = xmlRelaxNGParseDocument(ctxt, root);
if (ret == NULL) {
xmlFreeDoc(ctxt->document);
ctxt->document = NULL;
return (NULL);
}
/*
* Check the ref/defines links
*/
/*
* try to preprocess interleaves
*/
if (ctxt->interleaves != NULL) {
xmlHashScan(ctxt->interleaves,
(xmlHashScanner) xmlRelaxNGComputeInterleaves, ctxt);
}
/*
* if there was a parsing error return NULL
*/
if (ctxt->nbErrors > 0) {
xmlRelaxNGFree(ret);
ctxt->document = NULL;
xmlFreeDoc(doc);
return (NULL);
}
/*
* try to compile (parts of) the schemas
*/
if ((ret->topgrammar != NULL) && (ret->topgrammar->start != NULL)) {
if (ret->topgrammar->start->type != XML_RELAXNG_START) {
xmlRelaxNGDefinePtr def;
def = xmlRelaxNGNewDefine(ctxt, NULL);
if (def != NULL) {
def->type = XML_RELAXNG_START;
def->content = ret->topgrammar->start;
ret->topgrammar->start = def;
}
}
xmlRelaxNGTryCompile(ctxt, ret->topgrammar->start);
}
/*
* Transfer the pointer for cleanup at the schema level.
*/
ret->doc = doc;
ctxt->document = NULL;
ret->documents = ctxt->documents;
ctxt->documents = NULL;
ret->includes = ctxt->includes;
ctxt->includes = NULL;
ret->defNr = ctxt->defNr;
ret->defTab = ctxt->defTab;
ctxt->defTab = NULL;
if (ctxt->idref == 1)
ret->idref = 1;
return (ret);
}
|
Safe
|
[
"CWE-134"
] |
libxml2
|
502f6a6d08b08c04b3ddfb1cd21b2f699c1b7f5b
|
2.566604046964126e+38
| 127 |
More format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
adds a new xmlEscapeFormatString() function to escape composed format
strings
| 0 |
**/
CImg<T>& select(CImgDisplay &disp,
const unsigned int feature_type=2, unsigned int *const XYZ=0,
const bool exit_on_anykey=false) {
return get_select(disp,feature_type,XYZ,exit_on_anykey).move_to(*this);
|
Safe
|
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
|
2.7378094653553666e+38
| 5 |
Fix other issues in 'CImg<T>::load_bmp()'.
| 0 |
void __online_page_increment_counters(struct page *page)
{
totalram_pages++;
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page))
totalhigh_pages++;
#endif
}
|
Safe
|
[] |
linux-2.6
|
08dff7b7d629807dbb1f398c68dd9cd58dd657a1
|
3.088977628680004e+38
| 9 |
mm/hotplug: correctly add new zone to all other nodes' zone lists
When online_pages() is called to add new memory to an empty zone, it
rebuilds all zone lists by calling build_all_zonelists(). But there's a
bug which prevents the new zone to be added to other nodes' zone lists.
online_pages() {
build_all_zonelists()
.....
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY)
}
Here the node of the zone is put into N_HIGH_MEMORY state after calling
build_all_zonelists(), but build_all_zonelists() only adds zones from
nodes in N_HIGH_MEMORY state to the fallback zone lists.
build_all_zonelists()
->__build_all_zonelists()
->build_zonelists()
->find_next_best_node()
->for_each_node_state(n, N_HIGH_MEMORY)
So memory in the new zone will never be used by other nodes, and it may
cause strange behavor when system is under memory pressure. So put node
into N_HIGH_MEMORY state before calling build_all_zonelists().
Signed-off-by: Jianguo Wu <wujianguo@huawei.com>
Signed-off-by: Jiang Liu <liuj97@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Keping Chen <chenkeping@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static int ZEND_FASTCALL ZEND_BW_NOT_SPEC_VAR_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_free_op free_op1;
bitwise_not_function(&EX_T(opline->result.u.var).tmp_var,
_get_zval_ptr_var(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC) TSRMLS_CC);
if (free_op1.var) {zval_ptr_dtor(&free_op1.var);};
ZEND_VM_NEXT_OPCODE();
}
|
Safe
|
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
|
1.2486447160464494e+38
| 10 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
| 0 |
static size_t tr_variantDictSize(tr_variant const* dict)
{
return tr_variantIsDict(dict) ? dict->val.l.count : 0;
}
|
Safe
|
[
"CWE-416",
"CWE-284"
] |
transmission
|
2123adf8e5e1c2b48791f9d22fc8c747e974180e
|
7.451738518650174e+37
| 4 |
CVE-2018-10756: Fix heap-use-after-free in tr_variantWalk
In libtransmission/variant.c, function tr_variantWalk, when the variant
stack is reallocated, a pointer to the previously allocated memory
region is kept. This address is later accessed (heap use-after-free)
while walking back down the stack, causing the application to crash.
The application can be any application which uses libtransmission, such
as transmission-daemon, transmission-gtk, transmission-show, etc.
Reported-by: Tom Richards <tom@tomrichards.net>
| 0 |
static void nfs4_fl_release_lock(struct file_lock *fl)
{
nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
}
|
Safe
|
[
"CWE-703"
] |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
|
1.281717738576239e+38
| 4 |
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
| 0 |
static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
unsigned long state,
void *_notify)
{
struct netlink_notify *notify = _notify;
if (state != NETLINK_URELEASE)
return NOTIFY_DONE;
remove_user_radios(notify->portid);
if (notify->portid == hwsim_net_get_wmediumd(notify->net)) {
printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
" socket, switching to perfect channel medium\n");
hwsim_register_wmediumd(notify->net, 0);
}
return NOTIFY_DONE;
}
|
Safe
|
[
"CWE-703",
"CWE-772"
] |
linux
|
0ddcff49b672239dda94d70d0fcf50317a9f4b51
|
2.130057789720964e+38
| 19 |
mac80211_hwsim: fix possible memory leak in hwsim_new_radio_nl()
'hwname' is malloced in hwsim_new_radio_nl() and should be freed
before leaving from the error handling cases, otherwise it will cause
memory leak.
Fixes: ff4dd73dd2b4 ("mac80211_hwsim: check HWSIM_ATTR_RADIO_NAME length")
Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
Reviewed-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
| 0 |
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
int parse_extradata)
{
AVCodecContext *const avctx = h->avctx;
H264Context *hx; ///< thread context
int buf_index;
unsigned context_count;
int next_avc;
int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
int nal_index;
int idr_cleared=0;
int ret = 0;
h->nal_unit_type= 0;
if(!h->slice_context_count)
h->slice_context_count= 1;
h->max_contexts = h->slice_context_count;
if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) {
h->current_slice = 0;
if (!h->first_field)
h->cur_pic_ptr = NULL;
ff_h264_reset_sei(h);
}
if (h->nal_length_size == 4) {
if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
h->is_avc = 0;
}else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
h->is_avc = 1;
}
if (avctx->active_thread_type & FF_THREAD_FRAME)
nals_needed = get_last_needed_nal(h, buf, buf_size);
{
buf_index = 0;
context_count = 0;
next_avc = h->is_avc ? 0 : buf_size;
nal_index = 0;
for (;;) {
int consumed;
int dst_length;
int bit_length;
const uint8_t *ptr;
int nalsize = 0;
int err;
if (buf_index >= next_avc) {
nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
if (nalsize < 0)
break;
next_avc = buf_index + nalsize;
} else {
buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
if (buf_index >= buf_size)
break;
if (buf_index >= next_avc)
continue;
}
hx = h->thread_context[context_count];
ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length,
&consumed, next_avc - buf_index);
if (!ptr || dst_length < 0) {
ret = -1;
goto end;
}
bit_length = get_bit_length(h, buf, ptr, dst_length,
buf_index + consumed, next_avc);
if (h->avctx->debug & FF_DEBUG_STARTCODE)
av_log(h->avctx, AV_LOG_DEBUG,
"NAL %d/%d at %d/%d length %d\n",
hx->nal_unit_type, hx->nal_ref_idc, buf_index, buf_size, dst_length);
if (h->is_avc && (nalsize != consumed) && nalsize)
av_log(h->avctx, AV_LOG_DEBUG,
"AVC: Consumed only %d bytes instead of %d\n",
consumed, nalsize);
buf_index += consumed;
nal_index++;
if (avctx->skip_frame >= AVDISCARD_NONREF &&
h->nal_ref_idc == 0 &&
h->nal_unit_type != NAL_SEI)
continue;
again:
if ( !(avctx->active_thread_type & FF_THREAD_FRAME)
|| nals_needed >= nal_index)
h->au_pps_id = -1;
/* Ignore per frame NAL unit type during extradata
* parsing. Decoding slices is not possible in codec init
* with frame-mt */
if (parse_extradata) {
switch (hx->nal_unit_type) {
case NAL_IDR_SLICE:
case NAL_SLICE:
case NAL_DPA:
case NAL_DPB:
case NAL_DPC:
av_log(h->avctx, AV_LOG_WARNING,
"Ignoring NAL %d in global header/extradata\n",
hx->nal_unit_type);
// fall through to next case
case NAL_AUXILIARY_SLICE:
hx->nal_unit_type = NAL_FF_IGNORE;
}
}
err = 0;
switch (hx->nal_unit_type) {
case NAL_IDR_SLICE:
if ((ptr[0] & 0xFC) == 0x98) {
av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
h->next_outputed_poc = INT_MIN;
ret = -1;
goto end;
}
if (h->nal_unit_type != NAL_IDR_SLICE) {
av_log(h->avctx, AV_LOG_ERROR,
"Invalid mix of idr and non-idr slices\n");
ret = -1;
goto end;
}
if(!idr_cleared)
idr(h); // FIXME ensure we don't lose some frames if there is reordering
idr_cleared = 1;
h->has_recovery_point = 1;
case NAL_SLICE:
init_get_bits(&hx->gb, ptr, bit_length);
hx->intra_gb_ptr =
hx->inter_gb_ptr = &hx->gb;
hx->data_partitioning = 0;
if ((err = ff_h264_decode_slice_header(hx, h)))
break;
if (h->sei_recovery_frame_cnt >= 0) {
if (h->frame_num != h->sei_recovery_frame_cnt || hx->slice_type_nos != AV_PICTURE_TYPE_I)
h->valid_recovery_point = 1;
if ( h->recovery_frame < 0
|| ((h->recovery_frame - h->frame_num) & ((1 << h->sps.log2_max_frame_num)-1)) > h->sei_recovery_frame_cnt) {
h->recovery_frame = (h->frame_num + h->sei_recovery_frame_cnt) &
((1 << h->sps.log2_max_frame_num) - 1);
if (!h->valid_recovery_point)
h->recovery_frame = h->frame_num;
}
}
h->cur_pic_ptr->f.key_frame |=
(hx->nal_unit_type == NAL_IDR_SLICE);
if (hx->nal_unit_type == NAL_IDR_SLICE ||
h->recovery_frame == h->frame_num) {
h->recovery_frame = -1;
h->cur_pic_ptr->recovered = 1;
}
// If we have an IDR, all frames after it in decoded order are
// "recovered".
if (hx->nal_unit_type == NAL_IDR_SLICE)
h->frame_recovered |= FRAME_RECOVERED_IDR;
h->frame_recovered |= 3*!!(avctx->flags2 & CODEC_FLAG2_SHOW_ALL);
h->frame_recovered |= 3*!!(avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT);
#if 1
h->cur_pic_ptr->recovered |= h->frame_recovered;
#else
h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
#endif
if (h->current_slice == 1) {
if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS))
decode_postinit(h, nal_index >= nals_needed);
if (h->avctx->hwaccel &&
(ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0)) < 0)
return ret;
if (CONFIG_H264_VDPAU_DECODER &&
h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
ff_vdpau_h264_picture_start(h);
}
if (hx->redundant_pic_count == 0) {
if (avctx->hwaccel) {
ret = avctx->hwaccel->decode_slice(avctx,
&buf[buf_index - consumed],
consumed);
if (ret < 0)
return ret;
} else if (CONFIG_H264_VDPAU_DECODER &&
h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) {
ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0],
start_code,
sizeof(start_code));
ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0],
&buf[buf_index - consumed],
consumed);
} else
context_count++;
}
break;
case NAL_DPA:
if (h->avctx->flags & CODEC_FLAG2_CHUNKS) {
av_log(h->avctx, AV_LOG_ERROR,
"Decoding in chunks is not supported for "
"partitioned slices.\n");
return AVERROR(ENOSYS);
}
init_get_bits(&hx->gb, ptr, bit_length);
hx->intra_gb_ptr =
hx->inter_gb_ptr = NULL;
if ((err = ff_h264_decode_slice_header(hx, h))) {
/* make sure data_partitioning is cleared if it was set
* before, so we don't try decoding a slice without a valid
* slice header later */
h->data_partitioning = 0;
break;
}
hx->data_partitioning = 1;
break;
case NAL_DPB:
init_get_bits(&hx->intra_gb, ptr, bit_length);
hx->intra_gb_ptr = &hx->intra_gb;
break;
case NAL_DPC:
init_get_bits(&hx->inter_gb, ptr, bit_length);
hx->inter_gb_ptr = &hx->inter_gb;
av_log(h->avctx, AV_LOG_ERROR, "Partitioned H.264 support is incomplete\n");
break;
if (hx->redundant_pic_count == 0 &&
hx->intra_gb_ptr &&
hx->data_partitioning &&
h->cur_pic_ptr && h->context_initialized &&
(avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) &&
(avctx->skip_frame < AVDISCARD_BIDIR ||
hx->slice_type_nos != AV_PICTURE_TYPE_B) &&
(avctx->skip_frame < AVDISCARD_NONINTRA ||
hx->slice_type_nos == AV_PICTURE_TYPE_I) &&
avctx->skip_frame < AVDISCARD_ALL)
context_count++;
break;
case NAL_SEI:
init_get_bits(&h->gb, ptr, bit_length);
ret = ff_h264_decode_sei(h);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
goto end;
break;
case NAL_SPS:
init_get_bits(&h->gb, ptr, bit_length);
if (ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? nalsize : 1)) {
av_log(h->avctx, AV_LOG_DEBUG,
"SPS decoding failure, trying again with the complete NAL\n");
if (h->is_avc)
av_assert0(next_avc - buf_index + consumed == nalsize);
if ((next_avc - buf_index + consumed - 1) >= INT_MAX/8)
break;
init_get_bits(&h->gb, &buf[buf_index + 1 - consumed],
8*(next_avc - buf_index + consumed - 1));
ff_h264_decode_seq_parameter_set(h);
}
break;
case NAL_PPS:
init_get_bits(&h->gb, ptr, bit_length);
ret = ff_h264_decode_picture_parameter_set(h, bit_length);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
goto end;
break;
case NAL_AUD:
case NAL_END_SEQUENCE:
case NAL_END_STREAM:
case NAL_FILLER_DATA:
case NAL_SPS_EXT:
case NAL_AUXILIARY_SLICE:
break;
case NAL_FF_IGNORE:
break;
default:
av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
hx->nal_unit_type, bit_length);
}
if (context_count == h->max_contexts) {
ret = ff_h264_execute_decode_slices(h, context_count);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
goto end;
context_count = 0;
}
if (err < 0 || err == SLICE_SKIPED) {
if (err < 0)
av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
h->ref_count[0] = h->ref_count[1] = h->list_count = 0;
} else if (err == SLICE_SINGLETHREAD) {
/* Slice could not be decoded in parallel mode, copy down
* NAL unit stuff to context 0 and restart. Note that
* rbsp_buffer is not transferred, but since we no longer
* run in parallel mode this should not be an issue. */
h->nal_unit_type = hx->nal_unit_type;
h->nal_ref_idc = hx->nal_ref_idc;
hx = h;
goto again;
}
}
}
if (context_count) {
ret = ff_h264_execute_decode_slices(h, context_count);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
goto end;
}
ret = 0;
end:
/* clean up */
if (h->cur_pic_ptr && !h->droppable) {
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
}
return (ret < 0) ? ret : buf_index;
}
|
Safe
|
[
"CWE-703"
] |
FFmpeg
|
e8714f6f93d1a32f4e4655209960afcf4c185214
|
1.5318288433506993e+38
| 333 |
avcodec/h264: Clear delayed_pic on deallocation
Fixes use of freed memory
Fixes: case5_av_frame_copy_props.mp4
Found-by: Michal Zalewski <lcamtuf@coredump.cx>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
| 0 |
static void l2tp_ip6_close(struct sock *sk, long timeout)
{
write_lock_bh(&l2tp_ip6_lock);
hlist_del_init(&sk->sk_bind_node);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip6_lock);
sk_common_release(sk);
}
|
Safe
|
[
"CWE-20"
] |
net
|
85fbaa75037d0b6b786ff18658ddf0b4014ce2a4
|
5.172157010249545e+37
| 9 |
inet: fix addr_len/msg->msg_namelen assignment in recv_error and rxpmtu functions
Commit bceaa90240b6019ed73b49965eac7d167610be69 ("inet: prevent leakage
of uninitialized memory to user in recv syscalls") conditionally updated
addr_len if the msg_name is written to. The recv_error and rxpmtu
functions relied on the recvmsg functions to set up addr_len before.
As this does not happen any more we have to pass addr_len to those
functions as well and set it to the size of the corresponding sockaddr
length.
This broke traceroute and such.
Fixes: bceaa90240b6 ("inet: prevent leakage of uninitialized memory to user in recv syscalls")
Reported-by: Brad Spengler <spender@grsecurity.net>
Reported-by: Tom Labanowski
Cc: mpb <mpb.mail@gmail.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static u64 __sched_period(unsigned long nr_running)
{
if (unlikely(nr_running > sched_nr_latency))
return nr_running * sysctl_sched_min_granularity;
else
return sysctl_sched_latency;
}
|
Safe
|
[
"CWE-400",
"CWE-703",
"CWE-835"
] |
linux
|
c40f7d74c741a907cfaeb73a7697081881c497d0
|
2.3718461315490794e+38
| 7 |
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <xiexiuqi@huawei.com>
Analyzed-by: Vincent Guittot <vincent.guittot@linaro.org>
Reported-by: Zhipeng Xie <xiezhipeng1@huawei.com>
Reported-by: Sargun Dhillon <sargun@sargun.me>
Reported-by: Xie XiuQi <xiexiuqi@huawei.com>
Tested-by: Zhipeng Xie <xiezhipeng1@huawei.com>
Tested-by: Sargun Dhillon <sargun@sargun.me>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
Cc: <stable@vger.kernel.org> # v4.13+
Cc: Bin Li <huawei.libin@huawei.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/1545879866-27809-1-git-send-email-xiexiuqi@huawei.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| 0 |
static inline int compute_score(struct sock *sk, struct net *net,
__be32 saddr, unsigned short hnum, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
ipv6_only_sock(sk))
return -1;
score = (sk->sk_family == PF_INET) ? 2 : 1;
inet = inet_sk(sk);
if (inet->inet_rcv_saddr) {
if (inet->inet_rcv_saddr != daddr)
return -1;
score += 4;
}
if (inet->inet_daddr) {
if (inet->inet_daddr != saddr)
return -1;
score += 4;
}
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score += 4;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
return score;
}
|
Safe
|
[
"CWE-358"
] |
linux
|
197c949e7798fbf28cfadc69d9ca0c2abbf93191
|
3.1272288384163086e+38
| 42 |
udp: properly support MSG_PEEK with truncated buffers
Backport of this upstream commit into stable kernels :
89c22d8c3b27 ("net: Fix skb csum races when peeking")
exposed a bug in udp stack vs MSG_PEEK support, when user provides
a buffer smaller than skb payload.
In this case,
skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr),
msg->msg_iov);
returns -EFAULT.
This bug does not happen in upstream kernels since Al Viro did a great
job to replace this into :
skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
This variant is safe vs short buffers.
For the time being, instead reverting Herbert Xu patch and add back
skb->ip_summed invalid changes, simply store the result of
udp_lib_checksum_complete() so that we avoid computing the checksum a
second time, and avoid the problematic
skb_copy_and_csum_datagram_iovec() call.
This patch can be applied on recent kernels as it avoids a double
checksumming, then backported to stable kernels as a bug fix.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
dissect_tb_data(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
dissector_handle_t *data_handle, void *data)
{
int chan, num_tbs = 0;
int bit_offset = 0;
int crci_bit_offset = (offset+1)<<3; /* Current offset + Quality estimate of 1 byte at the end*/
guint data_bits = 0;
guint8 crci_bit = 0;
proto_item *tree_ti = NULL;
proto_tree *data_tree = NULL;
gboolean dissected = FALSE;
if (tree) {
/* Add data subtree */
tree_ti = proto_tree_add_item(tree, hf_fp_data, tvb, offset, -1, ENC_NA);
proto_item_set_text(tree_ti, "TB data for %u chans", p_fp_info->num_chans);
data_tree = proto_item_add_subtree(tree_ti, ett_fp_data);
}
/* Calculate offset to CRCI bits */
if (p_fp_info->is_uplink) {
for (chan=0; chan < p_fp_info->num_chans; chan++) {
int n;
for (n=0; n < p_fp_info->chan_num_tbs[chan]; n++) {
/* Advance bit offset */
crci_bit_offset += p_fp_info->chan_tf_size[chan];
/* Pad out to next byte */
if (crci_bit_offset % 8) {
crci_bit_offset += (8 - (crci_bit_offset % 8));
}
}
}
}
/* Now for the TB data */
for (chan=0; chan < p_fp_info->num_chans; chan++) {
int n;
p_fp_info->cur_chan = chan; /*Set current channel?*/
/* Clearly show channels with no TBs */
if (p_fp_info->chan_num_tbs[chan] == 0) {
proto_item *no_tb_ti = proto_tree_add_uint(data_tree, hf_fp_chan_zero_tbs, tvb,
offset+(bit_offset/8),
0, chan+1);
proto_item_append_text(no_tb_ti, " (of size %d)",
p_fp_info->chan_tf_size[chan]);
PROTO_ITEM_SET_GENERATED(no_tb_ti);
}
/* Show TBs from non-empty channels */
pinfo->fd->subnum = chan; /* set subframe number to current TB */
for (n=0; n < p_fp_info->chan_num_tbs[chan]; n++) {
proto_item *ti;
p_fp_info->cur_tb = chan; /*Set current transport block?*/
if (data_tree) {
ti = proto_tree_add_item(data_tree, hf_fp_tb, tvb,
offset + (bit_offset/8),
((bit_offset % 8) + p_fp_info->chan_tf_size[chan] + 7) / 8,
ENC_NA);
proto_item_set_text(ti, "TB (chan %u, tb %u, %u bits)",
chan+1, n+1, p_fp_info->chan_tf_size[chan]);
}
if (preferences_call_mac_dissectors /*&& !rlc_is_ciphered(pinfo)*/ && data_handle &&
(p_fp_info->chan_tf_size[chan] > 0)) {
tvbuff_t *next_tvb;
proto_item *item;
/* If this is DL we should not care about crci bits (since they don't exists)*/
if (p_fp_info->is_uplink) {
if ( p_fp_info->channel == CHANNEL_RACH_FDD) { /*In RACH we don't have any QE field, hence go back 8 bits.*/
crci_bit = tvb_get_bits8(tvb, crci_bit_offset+n-8, 1);
item = proto_tree_add_item(data_tree, hf_fp_crci[n%8], tvb, (crci_bit_offset+n-8)/8, 1, ENC_BIG_ENDIAN);
PROTO_ITEM_SET_GENERATED(item);
} else {
crci_bit = tvb_get_bits8(tvb, crci_bit_offset+n, 1);
item = proto_tree_add_item(data_tree, hf_fp_crci[n%8], tvb, (crci_bit_offset+n)/8, 1, ENC_BIG_ENDIAN);
PROTO_ITEM_SET_GENERATED(item);
}
}
if (crci_bit == 0 || !p_fp_info->is_uplink) {
next_tvb = tvb_new_subset(tvb, offset + bit_offset/8,
((bit_offset % 8) + p_fp_info->chan_tf_size[chan] + 7) / 8, -1);
/****************/
/* TODO: maybe this decision can be based only on info available in fp_info */
call_dissector_with_data(*data_handle, next_tvb, pinfo, top_level_tree, data);
dissected = TRUE;
} else {
proto_tree_add_expert(tree, pinfo, &ei_fp_crci_no_subdissector, tvb, offset + bit_offset/8,
((bit_offset % 8) + p_fp_info->chan_tf_size[chan] + 7) / 8);
}
}
num_tbs++;
/* Advance bit offset */
bit_offset += p_fp_info->chan_tf_size[chan];
data_bits += p_fp_info->chan_tf_size[chan];
/* Pad out to next byte */
if (bit_offset % 8) {
bit_offset += (8 - (bit_offset % 8));
}
}
}
if (dissected == FALSE) {
col_append_fstr(pinfo->cinfo, COL_INFO, "(%u bits in %u tbs)",
data_bits, num_tbs);
}
/* Data tree should cover entire length */
if (data_tree) {
proto_item_set_len(tree_ti, bit_offset/8);
proto_item_append_text(tree_ti, " (%u bits in %u tbs)", data_bits, num_tbs);
}
/* Move offset past TBs (we know it's already padded out to next byte) */
offset += (bit_offset / 8);
return offset;
}
|
Safe
|
[
"CWE-20"
] |
wireshark
|
7d7190695ce2ff269fdffb04e87139995cde21f4
|
3.0059808872550746e+38
| 127 |
UMTS_FP: fix handling reserved C/T value
The spec puts the reserved value at 0xf but our internal table has 'unknown' at
0; since all the other values seem to be offset-by-one, just take the modulus
0xf to avoid running off the end of the table.
Bug: 12191
Change-Id: I83c8fb66797bbdee52a2246fb1eea6e37cbc7eb0
Reviewed-on: https://code.wireshark.org/review/15722
Reviewed-by: Evan Huus <eapache@gmail.com>
Petri-Dish: Evan Huus <eapache@gmail.com>
Tested-by: Petri Dish Buildbot <buildbot-no-reply@wireshark.org>
Reviewed-by: Michael Mann <mmann78@netscape.net>
| 0 |
pkinit_process_td_dh_params(krb5_context context,
pkinit_plg_crypto_context cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_algorithm_identifier **algId,
int *new_dh_size)
{
krb5_error_code retval = KRB5KDC_ERR_DH_KEY_PARAMETERS_NOT_ACCEPTED;
int i = 0, use_sent_dh = 0, ok = 0;
pkiDebug("dh parameters\n");
while (algId[i] != NULL) {
DH *dh = NULL;
unsigned char *tmp = NULL;
int dh_prime_bits = 0;
if (algId[i]->algorithm.length != dh_oid.length ||
memcmp(algId[i]->algorithm.data, dh_oid.data, dh_oid.length))
goto cleanup;
tmp = (unsigned char *)algId[i]->parameters.data;
dh = DH_new();
dh = pkinit_decode_dh_params(&dh, &tmp, algId[i]->parameters.length);
dh_prime_bits = BN_num_bits(dh->p);
pkiDebug("client sent %d DH bits server prefers %d DH bits\n",
*new_dh_size, dh_prime_bits);
switch(dh_prime_bits) {
case 1024:
if (pkinit_check_dh_params(cryptoctx->dh_1024->p, dh->p,
dh->g, dh->q) == 0) {
*new_dh_size = 1024;
ok = 1;
}
break;
case 2048:
if (pkinit_check_dh_params(cryptoctx->dh_2048->p, dh->p,
dh->g, dh->q) == 0) {
*new_dh_size = 2048;
ok = 1;
}
break;
case 4096:
if (pkinit_check_dh_params(cryptoctx->dh_4096->p, dh->p,
dh->g, dh->q) == 0) {
*new_dh_size = 4096;
ok = 1;
}
break;
default:
break;
}
if (!ok) {
DH_check(dh, &retval);
if (retval != 0) {
pkiDebug("DH parameters provided by server are unacceptable\n");
retval = KRB5KDC_ERR_DH_KEY_PARAMETERS_NOT_ACCEPTED;
}
else {
use_sent_dh = 1;
ok = 1;
}
}
if (!use_sent_dh)
DH_free(dh);
if (ok) {
if (req_cryptoctx->dh != NULL) {
DH_free(req_cryptoctx->dh);
req_cryptoctx->dh = NULL;
}
if (use_sent_dh)
req_cryptoctx->dh = dh;
break;
}
i++;
}
if (ok)
retval = 0;
cleanup:
return retval;
}
|
Safe
|
[
"CWE-476"
] |
krb5
|
f249555301940c6df3a2cdda13b56b5674eebc2e
|
3.920374487580916e+37
| 83 |
PKINIT null pointer deref [CVE-2013-1415]
Don't dereference a null pointer when cleaning up.
The KDC plugin for PKINIT can dereference a null pointer when a
malformed packet causes processing to terminate early, leading to
a crash of the KDC process. An attacker would need to have a valid
PKINIT certificate or have observed a successful PKINIT authentication,
or an unauthenticated attacker could execute the attack if anonymous
PKINIT is enabled.
CVSSv2 vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:P/RL:O/RC:C
This is a minimal commit for pullup; style fixes in a followup.
[kaduk@mit.edu: reformat and edit commit message]
(cherry picked from commit c773d3c775e9b2d88bcdff5f8a8ba88d7ec4e8ed)
ticket: 7570
version_fixed: 1.11.1
status: resolved
| 0 |
static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
{
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
tcp_verify_retransmit_hint(tp, skb);
tp->lost_out += tcp_skb_pcount(skb);
tcp_sum_lost(tp, skb);
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
}
}
|
Safe
|
[
"CWE-190"
] |
net
|
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
|
2.565622365241398e+38
| 10 |
tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Jonathan Looney <jtl@netflix.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Bruce Curtis <brucec@netflix.com>
Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
struct ndp_msg_opt_type_info *ndp_msg_opt_type_info_by_raw_type(uint8_t raw_type)
{
struct ndp_msg_opt_type_info *info;
int i;
for (i = 0; i < NDP_MSG_OPT_TYPE_LIST_SIZE; i++) {
info = &ndp_msg_opt_type_info_list[i];
if (info->raw_type == raw_type)
return info;
}
return NULL;
}
|
Safe
|
[
"CWE-284"
] |
libndp
|
a4892df306e0532487f1634ba6d4c6d4bb381c7f
|
7.513901123825534e+36
| 12 |
libndp: validate the IPv6 hop limit
None of the NDP messages should ever come from a non-local network; as
stated in RFC4861's 6.1.1 (RS), 6.1.2 (RA), 7.1.1 (NS), 7.1.2 (NA),
and 8.1. (redirect):
- The IP Hop Limit field has a value of 255, i.e., the packet
could not possibly have been forwarded by a router.
This fixes CVE-2016-3698.
Reported by: Julien BERNARD <julien.bernard@viagenie.ca>
Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
| 0 |
sig_handler handle_ctrlc_signal(int sig)
{
sigint_received= 1;
/* Skip rest if --sigint-ignore is used. */
if (opt_sigint_ignore)
return;
if (executing_query)
kill_query("^C");
/* else, do nothing, just terminate the current line (like /c command). */
return;
}
|
Safe
|
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
|
3.11604417887552e+38
| 13 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
| 0 |
rsvg_filter_primitive_composite_free (gpointer impl)
{
RsvgFilterPrimitiveComposite *composite = impl;
g_string_free (composite->in2, TRUE);
rsvg_filter_primitive_free (impl);
}
|
Safe
|
[
"CWE-369"
] |
librsvg
|
ecf9267a24b2c3c0cd211dbdfa9ef2232511972a
|
6.423147674861378e+37
| 8 |
bgo#783835 - Don't divide by zero in box_blur_line() for gaussian blurs
We were making the decision to use box blurs, instead of a true
Gaussian kernel, based on the size of *both* x and y dimensions. Do
them individually instead.
| 0 |
static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
int gdeleted, int sdeleted)
{
switch (type) {
case IGMPV3_MODE_IS_INCLUDE:
case IGMPV3_MODE_IS_EXCLUDE:
if (gdeleted || sdeleted)
return 0;
if (!(pmc->gsquery && !psf->sf_gsresp)) {
if (pmc->sfmode == MCAST_INCLUDE)
return 1;
/* don't include if this source is excluded
* in all filters
*/
if (psf->sf_count[MCAST_INCLUDE])
return type == IGMPV3_MODE_IS_INCLUDE;
return pmc->sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
}
return 0;
case IGMPV3_CHANGE_TO_INCLUDE:
if (gdeleted || sdeleted)
return 0;
return psf->sf_count[MCAST_INCLUDE] != 0;
case IGMPV3_CHANGE_TO_EXCLUDE:
if (gdeleted || sdeleted)
return 0;
if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
psf->sf_count[MCAST_INCLUDE])
return 0;
return pmc->sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
case IGMPV3_ALLOW_NEW_SOURCES:
if (gdeleted || !psf->sf_crcount)
return 0;
return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
case IGMPV3_BLOCK_OLD_SOURCES:
if (pmc->sfmode == MCAST_INCLUDE)
return gdeleted || (psf->sf_crcount && sdeleted);
return psf->sf_crcount && !gdeleted && !sdeleted;
}
return 0;
}
|
Safe
|
[
"CWE-362"
] |
linux
|
23d2b94043ca8835bd1e67749020e839f396a1c2
|
2.9229028348833542e+38
| 43 |
igmp: Add ip_mc_list lock in ip_check_mc_rcu
I got below panic when doing fuzz test:
Kernel panic - not syncing: panic_on_warn set ...
CPU: 0 PID: 4056 Comm: syz-executor.3 Tainted: G B 5.14.0-rc1-00195-gcff5c4254439-dirty #2
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
dump_stack_lvl+0x7a/0x9b
panic+0x2cd/0x5af
end_report.cold+0x5a/0x5a
kasan_report+0xec/0x110
ip_check_mc_rcu+0x556/0x5d0
__mkroute_output+0x895/0x1740
ip_route_output_key_hash_rcu+0x2d0/0x1050
ip_route_output_key_hash+0x182/0x2e0
ip_route_output_flow+0x28/0x130
udp_sendmsg+0x165d/0x2280
udpv6_sendmsg+0x121e/0x24f0
inet6_sendmsg+0xf7/0x140
sock_sendmsg+0xe9/0x180
____sys_sendmsg+0x2b8/0x7a0
___sys_sendmsg+0xf0/0x160
__sys_sendmmsg+0x17e/0x3c0
__x64_sys_sendmmsg+0x9e/0x100
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x462eb9
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8
48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48>
3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f3df5af1c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000133
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462eb9
RDX: 0000000000000312 RSI: 0000000020001700 RDI: 0000000000000007
RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f3df5af26bc
R13: 00000000004c372d R14: 0000000000700b10 R15: 00000000ffffffff
It is one use-after-free in ip_check_mc_rcu.
In ip_mc_del_src, the ip_sf_list of pmc has been freed under pmc->lock protection.
But access to ip_sf_list in ip_check_mc_rcu is not protected by the lock.
Signed-off-by: Liu Jian <liujian56@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void window_themes_update(void)
{
GSList *tmp;
for (tmp = windows; tmp != NULL; tmp = tmp->next) {
WINDOW_REC *rec = tmp->data;
if (rec->theme_name != NULL)
rec->theme = theme_load(rec->theme_name);
}
}
|
Safe
|
[
"CWE-416"
] |
irssi
|
43e44d553d44e313003cee87e6ea5e24d68b84a1
|
3.041066782116363e+38
| 11 |
Merge branch 'security' into 'master'
Security
Closes GL#12, GL#13, GL#14, GL#15, GL#16
See merge request irssi/irssi!23
| 0 |
static int rdg_bio_free(BIO* bio)
{
WINPR_UNUSED(bio);
return 1;
}
|
Safe
|
[
"CWE-125"
] |
FreeRDP
|
6b485b146a1b9d6ce72dfd7b5f36456c166e7a16
|
3.249976357485436e+37
| 5 |
Fixed oob read in irp_write and similar
| 0 |
relpTcpEnableTLS(relpTcp_t __attribute__((unused)) *pThis)
{
ENTER_RELPFUNC;
RELPOBJ_assert(pThis, Tcp);
#ifdef ENABLE_TLS
pThis->bEnableTLS = 1;
#else
iRet = RELP_RET_ERR_NO_TLS;
#endif /* #ifdef ENABLE_TLS */
LEAVE_RELPFUNC;
}
|
Safe
|
[
"CWE-787"
] |
librelp
|
2cfe657672636aa5d7d2a14cfcb0a6ab9d1f00cf
|
3.9365199991736865e+37
| 11 |
unify error message generation
| 0 |
int SRP_Verify_B_mod_N(BIGNUM *B, BIGNUM *N)
{
BIGNUM *r;
BN_CTX *bn_ctx;
int ret = 0;
if (B == NULL || N == NULL ||
(bn_ctx = BN_CTX_new()) == NULL)
return 0;
if ((r = BN_new()) == NULL)
goto err;
/* Checks if B % N == 0 */
if (!BN_nnmod(r,B,N,bn_ctx))
goto err;
ret = !BN_is_zero(r);
err:
BN_CTX_free(bn_ctx);
BN_free(r);
return ret;
}
|
Safe
|
[] |
openssl
|
edc032b5e3f3ebb1006a9c89e0ae00504f47966f
|
2.1574364889160164e+38
| 21 |
Add SRP support.
| 0 |
win_drag_status_line(win_T *dragwin, int offset)
{
frame_T *curfr;
frame_T *fr;
int room;
int row;
int up; // if TRUE, drag status line up, otherwise down
int n;
fr = dragwin->w_frame;
curfr = fr;
if (fr != topframe) // more than one window
{
fr = fr->fr_parent;
// When the parent frame is not a column of frames, its parent should
// be.
if (fr->fr_layout != FR_COL)
{
curfr = fr;
if (fr != topframe) // only a row of windows, may drag statusline
fr = fr->fr_parent;
}
}
// If this is the last frame in a column, may want to resize the parent
// frame instead (go two up to skip a row of frames).
while (curfr != topframe && curfr->fr_next == NULL)
{
if (fr != topframe)
fr = fr->fr_parent;
curfr = fr;
if (fr != topframe)
fr = fr->fr_parent;
}
if (offset < 0) // drag up
{
up = TRUE;
offset = -offset;
// sum up the room of the current frame and above it
if (fr == curfr)
{
// only one window
room = fr->fr_height - frame_minheight(fr, NULL);
}
else
{
room = 0;
for (fr = fr->fr_child; ; fr = fr->fr_next)
{
room += fr->fr_height - frame_minheight(fr, NULL);
if (fr == curfr)
break;
}
}
fr = curfr->fr_next; // put fr at frame that grows
}
else // drag down
{
up = FALSE;
/*
* Only dragging the last status line can reduce p_ch.
*/
room = Rows - cmdline_row;
if (curfr->fr_next == NULL)
room -= 1;
else
room -= p_ch;
if (room < 0)
room = 0;
// sum up the room of frames below of the current one
FOR_ALL_FRAMES(fr, curfr->fr_next)
room += fr->fr_height - frame_minheight(fr, NULL);
fr = curfr; // put fr at window that grows
}
if (room < offset) // Not enough room
offset = room; // Move as far as we can
if (offset <= 0)
return;
/*
* Grow frame fr by "offset" lines.
* Doesn't happen when dragging the last status line up.
*/
if (fr != NULL)
frame_new_height(fr, fr->fr_height + offset, up, FALSE);
if (up)
fr = curfr; // current frame gets smaller
else
fr = curfr->fr_next; // next frame gets smaller
/*
* Now make the other frames smaller.
*/
while (fr != NULL && offset > 0)
{
n = frame_minheight(fr, NULL);
if (fr->fr_height - offset <= n)
{
offset -= fr->fr_height - n;
frame_new_height(fr, n, !up, FALSE);
}
else
{
frame_new_height(fr, fr->fr_height - offset, !up, FALSE);
break;
}
if (up)
fr = fr->fr_prev;
else
fr = fr->fr_next;
}
row = win_comp_pos();
screen_fill(row, cmdline_row, 0, (int)Columns, ' ', ' ', 0);
cmdline_row = row;
p_ch = Rows - cmdline_row;
if (p_ch < 1)
p_ch = 1;
curtab->tp_ch_used = p_ch;
redraw_all_later(SOME_VALID);
showmode();
}
|
Safe
|
[
"CWE-476"
] |
vim
|
0f6e28f686dbb59ab3b562408ab9b2234797b9b1
|
1.0864655567680612e+38
| 124 |
patch 8.2.4428: crash when switching tabpage while in the cmdline window
Problem: Crash when switching tabpage while in the cmdline window.
Solution: Disallow switching tabpage when in the cmdline window.
| 0 |
static int bpf_obj_get_next_id(const union bpf_attr *attr,
union bpf_attr __user *uattr,
struct idr *idr,
spinlock_t *lock)
{
u32 next_id = attr->start_id;
int err = 0;
if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
next_id++;
spin_lock_bh(lock);
if (!idr_get_next(idr, &next_id))
err = -ENOENT;
spin_unlock_bh(lock);
if (!err)
err = put_user(next_id, &uattr->next_id);
return err;
}
|
Safe
|
[
"CWE-307"
] |
linux
|
350a5c4dd2452ea999cc5e1d4a8dbf12de2f97ef
|
2.452712453624418e+38
| 25 |
bpf: Dont allow vmlinux BTF to be used in map_create and prog_load.
The syzbot got FD of vmlinux BTF and passed it into map_create which caused
crash in btf_type_id_size() when it tried to access resolved_ids. The vmlinux
BTF doesn't have 'resolved_ids' and 'resolved_sizes' initialized to save
memory. To avoid such issues disallow using vmlinux BTF in prog_load and
map_create commands.
Fixes: 5329722057d4 ("bpf: Assign ID to vmlinux BTF and return extra info for BTF in GET_OBJ_INFO")
Reported-by: syzbot+8bab8ed346746e7540e8@syzkaller.appspotmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20210307225248.79031-1-alexei.starovoitov@gmail.com
| 0 |
TfLiteRegistration* Register_SLICE() {
static TfLiteRegistration r = {nullptr, nullptr, slice::Prepare,
slice::Eval<slice::kGenericOptimized>};
return &r;
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
|
3.2583544826256375e+38
| 5 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
| 0 |
PHPAPI void php_session_start(TSRMLS_D) /* {{{ */
{
zval **ppid;
zval **data;
char *p, *value;
int nrand;
int lensess;
if (PS(use_only_cookies)) {
PS(apply_trans_sid) = 0;
} else {
PS(apply_trans_sid) = PS(use_trans_sid);
}
switch (PS(session_status)) {
case php_session_active:
php_error(E_NOTICE, "A session had already been started - ignoring session_start()");
return;
break;
case php_session_disabled:
value = zend_ini_string("session.save_handler", sizeof("session.save_handler"), 0);
if (!PS(mod) && value) {
PS(mod) = _php_find_ps_module(value TSRMLS_CC);
if (!PS(mod)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot find save handler '%s' - session startup failed", value);
return;
}
}
value = zend_ini_string("session.serialize_handler", sizeof("session.serialize_handler"), 0);
if (!PS(serializer) && value) {
PS(serializer) = _php_find_ps_serializer(value TSRMLS_CC);
if (!PS(serializer)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot find serialization handler '%s' - session startup failed", value);
return;
}
}
PS(session_status) = php_session_none;
/* fallthrough */
default:
case php_session_none:
PS(define_sid) = 1;
PS(send_cookie) = 1;
}
lensess = strlen(PS(session_name));
/* Cookies are preferred, because initially
* cookie and get variables will be available. */
if (!PS(id)) {
if (PS(use_cookies) && zend_hash_find(&EG(symbol_table), "_COOKIE", sizeof("_COOKIE"), (void **) &data) == SUCCESS &&
Z_TYPE_PP(data) == IS_ARRAY &&
zend_hash_find(Z_ARRVAL_PP(data), PS(session_name), lensess + 1, (void **) &ppid) == SUCCESS
) {
PPID2SID;
PS(apply_trans_sid) = 0;
PS(send_cookie) = 0;
PS(define_sid) = 0;
}
if (!PS(use_only_cookies) && !PS(id) &&
zend_hash_find(&EG(symbol_table), "_GET", sizeof("_GET"), (void **) &data) == SUCCESS &&
Z_TYPE_PP(data) == IS_ARRAY &&
zend_hash_find(Z_ARRVAL_PP(data), PS(session_name), lensess + 1, (void **) &ppid) == SUCCESS
) {
PPID2SID;
PS(send_cookie) = 0;
}
if (!PS(use_only_cookies) && !PS(id) &&
zend_hash_find(&EG(symbol_table), "_POST", sizeof("_POST"), (void **) &data) == SUCCESS &&
Z_TYPE_PP(data) == IS_ARRAY &&
zend_hash_find(Z_ARRVAL_PP(data), PS(session_name), lensess + 1, (void **) &ppid) == SUCCESS
) {
PPID2SID;
PS(send_cookie) = 0;
}
}
/* Check the REQUEST_URI symbol for a string of the form
* '<session-name>=<session-id>' to allow URLs of the form
* http://yoursite/<session-name>=<session-id>/script.php */
if (!PS(use_only_cookies) && !PS(id) && PG(http_globals)[TRACK_VARS_SERVER] &&
zend_hash_find(Z_ARRVAL_P(PG(http_globals)[TRACK_VARS_SERVER]), "REQUEST_URI", sizeof("REQUEST_URI"), (void **) &data) == SUCCESS &&
Z_TYPE_PP(data) == IS_STRING &&
(p = strstr(Z_STRVAL_PP(data), PS(session_name))) &&
p[lensess] == '='
) {
char *q;
p += lensess + 1;
if ((q = strpbrk(p, "/?\\"))) {
PS(id) = estrndup(p, q - p);
PS(send_cookie) = 0;
}
}
/* Check whether the current request was referred to by
* an external site which invalidates the previously found id. */
if (PS(id) &&
PS(extern_referer_chk)[0] != '\0' &&
PG(http_globals)[TRACK_VARS_SERVER] &&
zend_hash_find(Z_ARRVAL_P(PG(http_globals)[TRACK_VARS_SERVER]), "HTTP_REFERER", sizeof("HTTP_REFERER"), (void **) &data) == SUCCESS &&
Z_TYPE_PP(data) == IS_STRING &&
Z_STRLEN_PP(data) != 0 &&
strstr(Z_STRVAL_PP(data), PS(extern_referer_chk)) == NULL
) {
efree(PS(id));
PS(id) = NULL;
PS(send_cookie) = 1;
if (PS(use_trans_sid) && !PS(use_only_cookies)) {
PS(apply_trans_sid) = 1;
}
}
/* Finally check session id for dangarous characters
* Security note: session id may be embedded in HTML pages.*/
if (PS(id) && strpbrk(PS(id), "\r\n\t <>'\"\\")) {
efree(PS(id));
PS(id) = NULL;
}
php_session_initialize(TSRMLS_C);
php_session_cache_limiter(TSRMLS_C);
if ((PS(mod_data) || PS(mod_user_implemented)) && PS(gc_probability) > 0) {
int nrdels = -1;
nrand = (int) ((float) PS(gc_divisor) * php_combined_lcg(TSRMLS_C));
if (nrand < PS(gc_probability)) {
PS(mod)->s_gc(&PS(mod_data), PS(gc_maxlifetime), &nrdels TSRMLS_CC);
#ifdef SESSION_DEBUG
if (nrdels != -1) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "purged %d expired session objects", nrdels);
}
#endif
}
}
}
|
Safe
|
[
"CWE-264"
] |
php-src
|
25e8fcc88fa20dc9d4c47184471003f436927cde
|
8.540740235310919e+36
| 143 |
Strict session
| 0 |
CNB::~CNB()
{
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
if(m_SGL != nullptr)
{
NdisMFreeNetBufferSGList(m_Context->DmaHandle, m_SGL, m_NB);
}
}
|
Safe
|
[
"CWE-20"
] |
kvm-guest-drivers-windows
|
723416fa4210b7464b28eab89cc76252e6193ac1
|
3.3295160191546696e+38
| 9 |
NetKVM: BZ#1169718: Checking the length only on read
Signed-off-by: Joseph Hindin <yhindin@rehat.com>
| 0 |
ArgParser::argEmpty()
{
o.infilename = "";
}
|
Safe
|
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
|
3.097491571571805e+38
| 4 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
| 0 |
static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
/* Initial reset is a superset of the normal reset */
kvm_arch_vcpu_ioctl_normal_reset(vcpu);
/* this equals initial cpu reset in pop, but we don't switch to ESA */
vcpu->arch.sie_block->gpsw.mask = 0;
vcpu->arch.sie_block->gpsw.addr = 0;
kvm_s390_set_prefix(vcpu, 0);
kvm_s390_set_cpu_timer(vcpu, 0);
vcpu->arch.sie_block->ckc = 0;
memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
vcpu->run->s.regs.fpc = 0;
/*
* Do not reset these registers in the protected case, as some of
* them are overlayed and they are not accessible in this case
* anyway.
*/
if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.sie_block->todpr = 0;
}
}
|
Safe
|
[
"CWE-416"
] |
linux
|
0774a964ef561b7170d8d1b1bfe6f88002b6d219
|
4.75470827689157e+35
| 27 |
KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <cai@lca.pw>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320205546.2396-2-sean.j.christopherson@intel.com>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
QPDF_Stream::releaseResolved()
{
this->stream_provider = 0;
QPDFObjectHandle::ReleaseResolver::releaseResolved(this->stream_dict);
}
|
Safe
|
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
|
1.3339178822757651e+38
| 5 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
| 0 |
dp_packet_hwol_set_tx_ipv6(struct dp_packet *b)
{
*dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_IPV6;
}
|
Safe
|
[
"CWE-400"
] |
ovs
|
79349cbab0b2a755140eedb91833ad2760520a83
|
3.1030751879598762e+38
| 4 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <joakim.hindersson@elastx.se>
Acked-by: Ilya Maximets <i.maximets@ovn.org>
Signed-off-by: Flavio Leitner <fbl@sysclose.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
| 0 |
void unireg_end(void)
{
clean_up(1);
my_thread_end();
#if defined(SIGNALS_DONT_BREAK_READ)
exit(0);
#else
pthread_exit(0); // Exit is in main thread
#endif
}
|
Safe
|
[
"CWE-264"
] |
mysql-server
|
48bd8b16fe382be302c6f0b45931be5aa6f29a0e
|
3.3443671360434717e+38
| 10 |
Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE
[This is the 5.5/5.6 version of the bugfix].
The problem was that it was possible to write log files ending
in .ini/.cnf that later could be parsed as an options file.
This made it possible for users to specify startup options
without the permissions to do so.
This patch fixes the problem by disallowing general query log
and slow query log to be written to files ending in .ini and .cnf.
| 0 |
int gnutls_x509_tlsfeatures_add(gnutls_x509_tlsfeatures_t f, unsigned int feature)
{
if (f == NULL) {
gnutls_assert();
return GNUTLS_E_INVALID_REQUEST;
}
if (feature > UINT16_MAX)
return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST);
if (f->size >= sizeof(f->feature)/sizeof(f->feature[0]))
return gnutls_assert_val(GNUTLS_E_INTERNAL_ERROR);
f->feature[f->size++] = feature;
return 0;
}
|
Safe
|
[
"CWE-415"
] |
gnutls
|
c5aaa488a3d6df712dc8dff23a049133cab5ec1b
|
3.36078738831264e+38
| 17 |
gnutls_x509_ext_import_proxy: fix issue reading the policy language
If the language was set but the policy wasn't, that could lead to
a double free, as the value returned to the user was freed.
| 0 |
ex_redraw(exarg_T *eap)
{
int r = RedrawingDisabled;
int p = p_lz;
RedrawingDisabled = 0;
p_lz = FALSE;
validate_cursor();
update_topline();
update_screen(eap->forceit ? CLEAR : VIsual_active ? INVERTED : 0);
if (need_maketitle)
maketitle();
#if defined(MSWIN) && (!defined(FEAT_GUI_MSWIN) || defined(VIMDLL))
# ifdef VIMDLL
if (!gui.in_use)
# endif
resize_console_buf();
#endif
RedrawingDisabled = r;
p_lz = p;
// After drawing the statusline screen_attr may still be set.
screen_stop_highlight();
// Reset msg_didout, so that a message that's there is overwritten.
msg_didout = FALSE;
msg_col = 0;
// No need to wait after an intentional redraw.
need_wait_return = FALSE;
// When invoked from a callback or autocmd the command line may be active.
if (State & CMDLINE)
redrawcmdline();
out_flush();
}
|
Safe
|
[
"CWE-416"
] |
vim
|
d88934406c5375d88f8f1b65331c9f0cab68cc6c
|
1.7972131242734607e+38
| 37 |
patch 8.2.4895: buffer overflow with invalid command with composing chars
Problem: Buffer overflow with invalid command with composing chars.
Solution: Check that the whole character fits in the buffer.
| 0 |
static inline void nohz_newidle_balance(struct rq *this_rq) { }
|
Safe
|
[
"CWE-400",
"CWE-703",
"CWE-835"
] |
linux
|
c40f7d74c741a907cfaeb73a7697081881c497d0
|
1.1614170038849869e+37
| 1 |
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <xiexiuqi@huawei.com>
Analyzed-by: Vincent Guittot <vincent.guittot@linaro.org>
Reported-by: Zhipeng Xie <xiezhipeng1@huawei.com>
Reported-by: Sargun Dhillon <sargun@sargun.me>
Reported-by: Xie XiuQi <xiexiuqi@huawei.com>
Tested-by: Zhipeng Xie <xiezhipeng1@huawei.com>
Tested-by: Sargun Dhillon <sargun@sargun.me>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
Cc: <stable@vger.kernel.org> # v4.13+
Cc: Bin Li <huawei.libin@huawei.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/1545879866-27809-1-git-send-email-xiexiuqi@huawei.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| 0 |
static int fsl_lpspi_suspend(struct device *dev)
{
int ret;
pinctrl_pm_select_sleep_state(dev);
ret = pm_runtime_force_suspend(dev);
return ret;
}
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
linux
|
057b8945f78f76d0b04eeb5c27cd9225e5e7ad86
|
1.1406696592685802e+38
| 8 |
spi: lpspi: fix memory leak in fsl_lpspi_probe
In fsl_lpspi_probe an SPI controller is allocated either via
spi_alloc_slave or spi_alloc_master. In all but one error cases this
controller is put by going to error handling code. This commit fixes the
case when pm_runtime_get_sync fails and it should go to the error
handling path.
Fixes: 944c01a889d9 ("spi: lpspi: enable runtime pm for lpspi")
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Link: https://lore.kernel.org/r/20190930034602.1467-1-navid.emamdoost@gmail.com
Signed-off-by: Mark Brown <broonie@kernel.org>
| 0 |
print_command_failed (const unsigned char *msg)
{
const char *t;
char buffer[100];
int ec;
if (!debug_level)
return;
ec = CCID_ERROR_CODE (msg);
switch (ec)
{
case 0x00: t = "Command not supported"; break;
case 0xE0: t = "Slot busy"; break;
case 0xEF: t = "PIN cancelled"; break;
case 0xF0: t = "PIN timeout"; break;
case 0xF2: t = "Automatic sequence ongoing"; break;
case 0xF3: t = "Deactivated Protocol"; break;
case 0xF4: t = "Procedure byte conflict"; break;
case 0xF5: t = "ICC class not supported"; break;
case 0xF6: t = "ICC protocol not supported"; break;
case 0xF7: t = "Bad checksum in ATR"; break;
case 0xF8: t = "Bad TS in ATR"; break;
case 0xFB: t = "An all inclusive hardware error occurred"; break;
case 0xFC: t = "Overrun error while talking to the ICC"; break;
case 0xFD: t = "Parity error while talking to the ICC"; break;
case 0xFE: t = "CCID timed out while talking to the ICC"; break;
case 0xFF: t = "Host aborted the current activity"; break;
default:
if (ec > 0 && ec < 128)
sprintf (buffer, "Parameter error at offset %d", ec);
else
sprintf (buffer, "Error code %02X", ec);
t = buffer;
break;
}
DEBUGOUT_1 ("CCID command failed: %s\n", t);
}
|
Safe
|
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
|
1.9560751370076513e+38
| 42 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <wk@gnupg.org>
| 0 |
static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
{
struct i2c_rdwr_ioctl_data32 __user *udata = compat_ptr(arg);
struct i2c_rdwr_aligned __user *tdata;
struct i2c_msg __user *tmsgs;
struct i2c_msg32 __user *umsgs;
compat_caddr_t datap;
int nmsgs, i;
if (get_user(nmsgs, &udata->nmsgs))
return -EFAULT;
if (nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
return -EINVAL;
if (get_user(datap, &udata->msgs))
return -EFAULT;
umsgs = compat_ptr(datap);
tdata = compat_alloc_user_space(sizeof(*tdata) +
nmsgs * sizeof(struct i2c_msg));
tmsgs = &tdata->msgs[0];
if (put_user(nmsgs, &tdata->cmd.nmsgs) ||
put_user(tmsgs, &tdata->cmd.msgs))
return -EFAULT;
for (i = 0; i < nmsgs; i++) {
if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
return -EFAULT;
if (get_user(datap, &umsgs[i].buf) ||
put_user(compat_ptr(datap), &tmsgs[i].buf))
return -EFAULT;
}
return sys_ioctl(fd, cmd, (unsigned long)tdata);
}
|
Safe
|
[] |
linux-2.6
|
188f83dfe0eeecd1427d0d255cc97dbf7ef6b4b7
|
1.6187575152547709e+38
| 35 |
[PATCH] BLOCK: Move the msdos device ioctl compat stuff to the msdos driver [try #6]
Move the msdos device ioctl compat stuff from fs/compat_ioctl.c to the msdos
driver so that the msdos header file doesn't need to be included.
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
str_lower_case_match(OnigEncoding enc, int case_fold_flag,
const UChar* t, const UChar* tend,
const UChar* p, const UChar* end)
{
int lowlen;
UChar *q, lowbuf[ONIGENC_MBC_CASE_FOLD_MAXLEN];
while (t < tend) {
lowlen = ONIGENC_MBC_CASE_FOLD(enc, case_fold_flag, &p, end, lowbuf);
q = lowbuf;
while (lowlen > 0) {
if (*t++ != *q++) return 0;
lowlen--;
}
}
return 1;
}
|
Safe
|
[
"CWE-125"
] |
oniguruma
|
690313a061f7a4fa614ec5cc8368b4f2284e059b
|
1.6663534986685518e+38
| 18 |
fix #57 : DATA_ENSURE() check must be before data access
| 0 |
void KernelAndDeviceFunc::RunAsync(
ScopedStepContainer* step_container, const EagerKernelArgs& inputs,
std::vector<EagerKernelRet>* outputs,
CancellationManager* cancellation_manager,
const absl::optional<EagerRemoteFunctionParams>& remote_func_params,
std::function<void(const Status&)> done) {
std::shared_ptr<FunctionLibraryRuntime::Options> opts = nullptr;
if (remote_func_params.has_value()) {
const EagerRemoteFunctionParams& params = remote_func_params.value();
if (params.step_id.has_value()) {
// If the function is a remote component of a cross-process function,
// re-use the step id as its parent function's.
opts = std::make_shared<FunctionLibraryRuntime::Options>(
params.step_id.value());
} else {
opts = std::make_shared<FunctionLibraryRuntime::Options>();
}
// Reuse the op id if it exists.
opts->op_id = params.op_id;
} else {
opts = std::make_shared<FunctionLibraryRuntime::Options>();
if (get_op_id_ && is_cross_process_) {
// If the function is a cross-process function and the remote execution
// goes through eager service, create an eager op id for the function.
opts->op_id = get_op_id_();
}
}
// We don't pass rendezvous from eager context because we can get tensor
// name collisions in send/recv ops when running multiple instances
// of the same multi-device function concurrently.
Rendezvous* rendezvous = rendezvous_creator_(opts->step_id);
opts->rendezvous = rendezvous;
opts->create_rendezvous = false;
// Create a cancellation manager to be used by FLR options if caller does not
// pass in one. If the caller does provide one, pass it to process FLR and the
// locally created one will be unused.
std::shared_ptr<CancellationManager> local_cm;
if (cancellation_manager) {
opts->cancellation_manager = cancellation_manager;
} else {
local_cm = std::make_shared<CancellationManager>();
opts->cancellation_manager = local_cm.get();
}
opts->allow_dead_tensors = true;
opts->step_container =
step_container == nullptr ? &step_container_ : step_container;
opts->collective_executor =
collective_executor_ ? collective_executor_->get() : nullptr;
opts->stats_collector = nullptr;
opts->runner = get_runner();
outputs->clear();
pflr_->Run(*opts, handle_, inputs, outputs,
[opts, rendezvous, local_cm, step_container, this,
done = std::move(done)](const Status& s) {
rendezvous->Unref();
if (step_container == nullptr) {
this->step_container_.CleanUp();
}
done(s);
});
}
|
Safe
|
[
"CWE-476",
"CWE-369"
] |
tensorflow
|
da8558533d925694483d2c136a9220d6d49d843c
|
3.1266810312144054e+38
| 66 |
Fix undefined behavior in `tf.raw_ops.Switch` in eager mode.
PiperOrigin-RevId: 332578058
Change-Id: I9727571d2f21476b10d8aa27c1b7176564b76ac9
| 0 |
int main(int argc, char *argv[])
{
FILE *iplist = NULL;
plist_t root_node = NULL;
char *plist_out = NULL;
uint32_t size = 0;
int read_size = 0;
char *plist_entire = NULL;
struct stat filestats;
options_t *options = parse_arguments(argc, argv);
if (!options)
{
print_usage(argc, argv);
return 0;
}
// read input file
iplist = fopen(options->in_file, "rb");
if (!iplist) {
free(options);
return 1;
}
stat(options->in_file, &filestats);
if (filestats.st_size < 8) {
printf("ERROR: Input file is too small to contain valid plist data.\n");
return -1;
}
plist_entire = (char *) malloc(sizeof(char) * (filestats.st_size + 1));
read_size = fread(plist_entire, sizeof(char), filestats.st_size, iplist);
fclose(iplist);
// convert from binary to xml or vice-versa
if (memcmp(plist_entire, "bplist00", 8) == 0)
{
plist_from_bin(plist_entire, read_size, &root_node);
plist_to_xml(root_node, &plist_out, &size);
}
else
{
plist_from_xml(plist_entire, read_size, &root_node);
plist_to_bin(root_node, &plist_out, &size);
}
plist_free(root_node);
free(plist_entire);
if (plist_out)
{
if (options->out_file != NULL)
{
FILE *oplist = fopen(options->out_file, "wb");
if (!oplist) {
free(options);
return 1;
}
fwrite(plist_out, size, sizeof(char), oplist);
fclose(oplist);
}
// if no output file specified, write to stdout
else
fwrite(plist_out, size, sizeof(char), stdout);
free(plist_out);
}
else
printf("ERROR: Failed to convert input file.\n");
free(options);
return 0;
}
|
Safe
|
[
"CWE-399",
"CWE-125"
] |
libplist
|
7391a506352c009fe044dead7baad9e22dd279ee
|
2.3817358148380346e+38
| 73 |
plistutil: Prevent OOB heap buffer read by checking input size
As pointed out in #87 plistutil would do a memcmp with a heap buffer
without checking the size. If the size is less than 8 it would read
beyond the bounds of this heap buffer. This commit prevents that.
| 0 |
static int propfind_caldata(const xmlChar *name, xmlNsPtr ns,
struct propfind_ctx *fctx,
xmlNodePtr resp __attribute__((unused)),
struct propstat propstat[],
void *rock)
{
xmlNodePtr prop = (xmlNodePtr) rock;
struct buf buf = BUF_INITIALIZER;
const char *data = NULL;
size_t datalen = 0;
int r = 0;
if (propstat) {
if (!fctx->record) return HTTP_NOT_FOUND;
mailbox_map_record(fctx->mailbox, fctx->record, &buf);
data = buf_cstring(&buf) + fctx->record->header_size;
datalen = buf_len(&buf) - fctx->record->header_size;
}
r = propfind_getdata(name, ns, fctx, propstat, prop, caldav_mime_types,
CALDAV_SUPP_DATA, data, datalen);
buf_free(&buf);
return r;
}
|
Safe
|
[
"CWE-787"
] |
cyrus-imapd
|
a5779db8163b99463e25e7c476f9cbba438b65f3
|
1.6326415107524493e+38
| 26 |
HTTP: don't overrun buffer when parsing strings with sscanf()
| 0 |
SecurityFunctionTableA* SEC_ENTRY InitSecurityInterfaceA(void)
{
return &SSPI_SecurityFunctionTableA;
}
|
Safe
|
[
"CWE-476",
"CWE-125"
] |
FreeRDP
|
0773bb9303d24473fe1185d85a424dfe159aff53
|
1.555823019773443e+38
| 4 |
nla: invalidate sec handle after creation
If sec pointer isn't invalidated after creation it is not possible
to check if the upper and lower pointers are valid.
This fixes a segfault in the server part if the client disconnects before
the authentication was finished.
| 0 |
void Compute(OpKernelContext* context) override {
const auto& in_min_tensor = context->input(2);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_min_tensor.shape()),
errors::InvalidArgument("min must be a scalar"));
const float in_min = in_min_tensor.flat<float>()(0);
const auto& in_max_tensor = context->input(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_max_tensor.shape()),
errors::InvalidArgument("max must be a scalar"));
const float in_max = in_max_tensor.flat<float>()(0);
ImageResizerState st(align_corners_, false);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
// Return if the output is empty.
if (st.output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor image_data(
context->input(0).tensor<T, 4>());
typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>());
ResizeBilinear<T>(image_data, st.height_scale, st.width_scale, in_min,
in_max, half_pixel_centers_, &output_data);
Tensor* out_min = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &out_min));
out_min->flat<float>()(0) = in_min;
Tensor* out_max = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &out_max));
out_max->flat<float>()(0) = in_max;
}
|
Safe
|
[
"CWE-787"
] |
tensorflow
|
f6c40f0c6cbf00d46c7717a26419f2062f2f8694
|
2.306355969640079e+38
| 32 |
Validate min and max arguments to `QuantizedResizeBilinear`.
PiperOrigin-RevId: 369765091
Change-Id: I33be8b78273ab7d08b97541692fe05cb7f94963a
| 0 |
save_hsts (void)
{
if (hsts_store)
{
char *filename = get_hsts_database ();
if (filename && hsts_store_has_changed (hsts_store))
{
DEBUGP (("Saving HSTS entries to %s\n", filename));
hsts_store_save (hsts_store, filename);
}
hsts_store_close (hsts_store);
xfree (filename);
}
}
|
Safe
|
[
"CWE-200"
] |
wget
|
a933bdd31eee9c956a3b5cc142f004ef1fa94cb3
|
5.583168495972064e+37
| 17 |
Keep fetched URLs in POSIX extended attributes
* configure.ac: Check for xattr availability
* src/Makefile.am: Add xattr.c
* src/ftp.c: Include xattr.h.
(getftp): Set attributes if enabled.
* src/http.c: Include xattr.h.
(gethttp): Add parameter 'original_url',
set attributes if enabled.
(http_loop): Add 'original_url' to call of gethttp().
* src/init.c: Add new option --xattr.
* src/main.c: Add new option --xattr, add description to help text.
* src/options.h: Add new config member 'enable_xattr'.
* src/xatrr.c: New file.
* src/xattr.h: New file.
These attributes provide a lightweight method of later determining
where a file was downloaded from.
This patch changes:
* autoconf detects whether extended attributes are available and
enables the code if they are.
* The new flags --xattr and --no-xattr control whether xattr is enabled.
* The new command "xattr = (on|off)" can be used in ~/.wgetrc or /etc/wgetrc
* The original and redirected URLs are recorded as shown below.
* This works for both single fetches and recursive mode.
The attributes that are set are:
user.xdg.origin.url: The URL that the content was fetched from.
user.xdg.referrer.url: The URL that was originally requested.
Here is an example, where http://archive.org redirects to https://archive.org:
$ wget --xattr http://archive.org
...
$ getfattr -d index.html
user.xdg.origin.url="https://archive.org/"
user.xdg.referrer.url="http://archive.org/"
These attributes were chosen based on those stored by Google Chrome
https://bugs.chromium.org/p/chromium/issues/detail?id=45903
and curl https://github.com/curl/curl/blob/master/src/tool_xattr.c
| 0 |
int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
const struct ieee802154_llsec_seclevel *sl)
{
struct mac802154_llsec_seclevel *pos;
pos = llsec_find_seclevel(sec, sl);
if (!pos)
return -ENOENT;
list_del_rcu(&pos->level.list);
kfree_rcu(pos, rcu);
return 0;
}
|
Safe
|
[
"CWE-416"
] |
linux
|
1165affd484889d4986cf3b724318935a0b120d8
|
9.158799885250401e+37
| 14 |
net: mac802154: Fix general protection fault
syzbot found general protection fault in crypto_destroy_tfm()[1].
It was caused by wrong clean up loop in llsec_key_alloc().
If one of the tfm array members is in IS_ERR() range it will
cause general protection fault in clean up function [1].
Call Trace:
crypto_free_aead include/crypto/aead.h:191 [inline] [1]
llsec_key_alloc net/mac802154/llsec.c:156 [inline]
mac802154_llsec_key_add+0x9e0/0xcc0 net/mac802154/llsec.c:249
ieee802154_add_llsec_key+0x56/0x80 net/mac802154/cfg.c:338
rdev_add_llsec_key net/ieee802154/rdev-ops.h:260 [inline]
nl802154_add_llsec_key+0x3d3/0x560 net/ieee802154/nl802154.c:1584
genl_family_rcv_msg_doit+0x228/0x320 net/netlink/genetlink.c:739
genl_family_rcv_msg net/netlink/genetlink.c:783 [inline]
genl_rcv_msg+0x328/0x580 net/netlink/genetlink.c:800
netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2502
genl_rcv+0x24/0x40 net/netlink/genetlink.c:811
netlink_unicast_kernel net/netlink/af_netlink.c:1312 [inline]
netlink_unicast+0x533/0x7d0 net/netlink/af_netlink.c:1338
netlink_sendmsg+0x856/0xd90 net/netlink/af_netlink.c:1927
sock_sendmsg_nosec net/socket.c:654 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:674
____sys_sendmsg+0x6e8/0x810 net/socket.c:2350
___sys_sendmsg+0xf3/0x170 net/socket.c:2404
__sys_sendmsg+0xe5/0x1b0 net/socket.c:2433
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Signed-off-by: Pavel Skripkin <paskripkin@gmail.com>
Reported-by: syzbot+9ec037722d2603a9f52e@syzkaller.appspotmail.com
Acked-by: Alexander Aring <aahringo@redhat.com>
Link: https://lore.kernel.org/r/20210304152125.1052825-1-paskripkin@gmail.com
Signed-off-by: Stefan Schmidt <stefan@datenfreihafen.org>
| 0 |
l_noret luaG_concaterror (lua_State *L, const TValue *p1, const TValue *p2) {
if (ttisstring(p1) || cvt2str(p1)) p1 = p2;
luaG_typeerror(L, p1, "concatenate");
}
|
Safe
|
[
"CWE-703"
] |
lua
|
a2195644d89812e5b157ce7bac35543e06db05e3
|
6.234333749041648e+37
| 4 |
Fixed bug: invalid 'oldpc' when returning to a function
The field 'L->oldpc' is not always updated when control returns to a
function; an invalid value can seg. fault when computing 'changedline'.
(One example is an error in a finalizer; control can return to
'luaV_execute' without executing 'luaD_poscall'.) Instead of trying to
fix all possible corner cases, it seems safer to be resilient to invalid
values for 'oldpc'. Valid but wrong values at most cause an extra call
to a line hook.
| 0 |
unsigned long long CUser::BytesWritten() const {
unsigned long long uBytes = m_uBytesWritten;
for (const CIRCNetwork* pNetwork : m_vIRCNetworks) {
uBytes += pNetwork->BytesWritten();
}
return uBytes;
}
|
Safe
|
[
"CWE-20"
] |
znc
|
64613bc8b6b4adf1e32231f9844d99cd512b8973
|
2.3469053517749717e+38
| 7 |
Don't crash if user specified invalid encoding.
This is CVE-2019-9917
| 0 |
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
{
struct vsock_sock *vsk = vsock_sk(sk);
int retval;
/* First ensure this socket isn't already bound. */
if (vsock_addr_bound(&vsk->local_addr))
return -EINVAL;
/* Now bind to the provided address or select appropriate values if
* none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
* like AF_INET prevents binding to a non-local IP address (in most
* cases), we only allow binding to a local CID.
*/
if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid))
return -EADDRNOTAVAIL;
switch (sk->sk_socket->type) {
case SOCK_STREAM:
spin_lock_bh(&vsock_table_lock);
retval = __vsock_bind_stream(vsk, addr);
spin_unlock_bh(&vsock_table_lock);
break;
case SOCK_DGRAM:
retval = __vsock_bind_dgram(vsk, addr);
break;
default:
retval = -EINVAL;
break;
}
return retval;
}
|
Safe
|
[
"CWE-667"
] |
linux
|
c518adafa39f37858697ac9309c6cf1805581446
|
2.379415249468967e+38
| 35 |
vsock: fix the race conditions in multi-transport support
There are multiple similar bugs implicitly introduced by the
commit c0cfa2d8a788fcf4 ("vsock: add multi-transports support") and
commit 6a2c0962105ae8ce ("vsock: prevent transport modules unloading").
The bug pattern:
[1] vsock_sock.transport pointer is copied to a local variable,
[2] lock_sock() is called,
[3] the local variable is used.
VSOCK multi-transport support introduced the race condition:
vsock_sock.transport value may change between [1] and [2].
Let's copy vsock_sock.transport pointer to local variables after
the lock_sock() call.
Fixes: c0cfa2d8a788fcf4 ("vsock: add multi-transports support")
Signed-off-by: Alexander Popov <alex.popov@linux.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Reviewed-by: Jorgen Hansen <jhansen@vmware.com>
Link: https://lore.kernel.org/r/20210201084719.2257066-1-alex.popov@linux.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
| 0 |
void Config::set_filter_modules(const ConfigModule * modbegin,
const ConfigModule * modend)
{
assert(filter_modules_ptrs.empty());
filter_modules.clear();
filter_modules.assign(modbegin, modend);
}
|
Safe
|
[
"CWE-125"
] |
aspell
|
80fa26c74279fced8d778351cff19d1d8f44fe4e
|
1.0041347040724923e+38
| 7 |
Fix various bugs found by OSS-Fuze.
| 0 |
nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{
struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
atomic_t *replies = &priv->reify_replies;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_repr *nfp_repr;
struct sk_buff *ctrl_skb;
struct nfp_reprs *reprs;
int err, reify_cnt;
unsigned int i;
ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
if (!ctrl_skb)
return -ENOMEM;
reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
if (!reprs) {
err = -ENOMEM;
goto err_free_ctrl_skb;
}
for (i = 0; i < eth_tbl->count; i++) {
unsigned int phys_port = eth_tbl->ports[i].index;
struct net_device *repr;
struct nfp_port *port;
u32 cmsg_port_id;
repr = nfp_repr_alloc(app);
if (!repr) {
err = -ENOMEM;
goto err_reprs_clean;
}
repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
if (!repr_priv) {
err = -ENOMEM;
goto err_reprs_clean;
}
nfp_repr = netdev_priv(repr);
nfp_repr->app_priv = repr_priv;
repr_priv->nfp_repr = nfp_repr;
port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) {
err = PTR_ERR(port);
nfp_repr_free(repr);
goto err_reprs_clean;
}
err = nfp_port_init_phy_port(app->pf, app, port, i);
if (err) {
nfp_port_free(port);
nfp_repr_free(repr);
goto err_reprs_clean;
}
SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
nfp_net_get_mac_addr(app->pf, repr, port);
cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
err = nfp_repr_init(app, repr,
cmsg_port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
nfp_repr_free(repr);
goto err_reprs_clean;
}
nfp_flower_cmsg_mac_repr_add(ctrl_skb, i,
eth_tbl->ports[i].nbi,
eth_tbl->ports[i].base,
phys_port);
RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
phys_port, repr->name);
}
nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
/* The REIFY/MAC_REPR control messages should be sent after the MAC
* representors are registered using nfp_app_reprs_set(). This is
* because the firmware may respond with control messages for the
* MAC representors, f.e. to provide the driver with information
* about their state, and without registration the driver will drop
* any such messages.
*/
atomic_set(replies, 0);
reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
if (reify_cnt < 0) {
err = reify_cnt;
nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
goto err_reprs_remove;
}
err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
if (err)
goto err_reprs_remove;
nfp_ctrl_tx(app->ctrl, ctrl_skb);
return 0;
err_reprs_remove:
reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
err_reprs_clean:
nfp_reprs_clean_and_free(app, reprs);
err_free_ctrl_skb:
kfree_skb(ctrl_skb);
return err;
}
|
Vulnerable
|
[
"CWE-400",
"CWE-401"
] |
linux
|
8572cea1461a006bce1d06c0c4b0575869125fa4
|
2.1931075001891945e+38
| 110 |
nfp: flower: prevent memory leak in nfp_flower_spawn_phy_reprs
In nfp_flower_spawn_phy_reprs, in the for loop over eth_tbl if any of
intermediate allocations or initializations fail memory is leaked.
requiered releases are added.
Fixes: b94524529741 ("nfp: flower: add per repr private data for LAG offload")
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 1 |
TEST_F(HttpConnectionManagerImplTest, OverlyLongHeadersAcceptedIfConfigured) {
max_request_headers_kb_ = 62;
setup(false, "");
EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {
StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_);
HeaderMapPtr headers{
new TestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}};
headers->addCopy(LowerCaseString("Foo"), std::string(60 * 1024, 'a'));
EXPECT_CALL(response_encoder_, encodeHeaders(_, _)).Times(0);
decoder->decodeHeaders(std::move(headers), true);
conn_manager_->newStream(response_encoder_);
}));
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false); // kick off request
}
|
Vulnerable
|
[
"CWE-400",
"CWE-703"
] |
envoy
|
afc39bea36fd436e54262f150c009e8d72db5014
|
8.62127640359941e+37
| 18 |
Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <asraa@google.com>
| 1 |
int wc_BerToDer(const byte* ber, word32 berSz, byte* der, word32* derSz)
{
int ret = 0;
word32 i, j;
#ifdef WOLFSSL_SMALL_STACK
IndefItems* indefItems = NULL;
#else
IndefItems indefItems[1];
#endif
byte tag, basic;
word32 length;
int indef;
if (ber == NULL || derSz == NULL)
return BAD_FUNC_ARG;
#ifdef WOLFSSL_SMALL_STACK
indefItems = (IndefItems *)XMALLOC(sizeof(IndefItems), NULL, DYNAMIC_TYPE_TMP_BUFFER);
if (indefItems == NULL) {
ret = MEMORY_E;
goto end;
}
#endif
XMEMSET(indefItems, 0, sizeof(*indefItems));
/* Calculate indefinite item lengths */
for (i = 0; i < berSz; ) {
word32 start = i;
/* Get next BER item */
ret = GetBerHeader(ber, &i, berSz, &tag, &length, &indef);
if (ret != 0) {
goto end;
}
if (indef) {
/* Indefinite item - add to list */
ret = IndefItems_AddItem(indefItems, i);
if (ret != 0) {
goto end;
}
if ((tag & 0xC0) == 0 &&
tag != (ASN_SEQUENCE | ASN_CONSTRUCTED) &&
tag != (ASN_SET | ASN_CONSTRUCTED)) {
/* Constructed basic type - get repeating tag */
basic = tag & (~ASN_CONSTRUCTED);
/* Add up lengths of each item below */
for (; i < berSz; ) {
/* Get next BER_item */
ret = GetBerHeader(ber, &i, berSz, &tag, &length, &indef);
if (ret != 0) {
goto end;
}
/* End of content closes item */
if (tag == ASN_EOC) {
/* Must be zero length */
if (length != 0) {
ret = ASN_PARSE_E;
goto end;
}
break;
}
/* Must not be indefinite and tag must match parent */
if (indef || tag != basic) {
ret = ASN_PARSE_E;
goto end;
}
/* Add to length */
IndefItems_AddData(indefItems, length);
/* Skip data */
i += length;
}
/* Ensure we got an EOC and not end of data */
if (tag != ASN_EOC) {
ret = ASN_PARSE_E;
goto end;
}
/* Set the header length to include the length field */
IndefItems_UpdateHeaderLen(indefItems);
/* Go to indefinte parent item */
IndefItems_Up(indefItems);
}
}
else if (tag == ASN_EOC) {
/* End-of-content must be 0 length */
if (length != 0) {
ret = ASN_PARSE_E;
goto end;
}
/* Check there is an item to close - missing EOC */
if (indefItems->depth == 0) {
ret = ASN_PARSE_E;
goto end;
}
/* Finish calculation of data length for indefinite item */
IndefItems_CalcLength(indefItems);
/* Go to indefinte parent item */
IndefItems_Up(indefItems);
}
else {
/* Known length item to add in - make sure enough data for it */
if (i + length > berSz) {
ret = ASN_PARSE_E;
goto end;
}
/* Include all data - can't have indefinite inside definite */
i += length;
/* Add entire item to current indefinite item */
IndefItems_MoreData(indefItems, i - start);
}
}
/* Check we had a EOC for each indefinite item */
if (indefItems->depth != 0) {
ret = ASN_PARSE_E;
goto end;
}
/* Write out DER */
j = 0;
/* Reset index */
indefItems->idx = 0;
for (i = 0; i < berSz; ) {
word32 start = i;
/* Get item - checked above */
(void)GetBerHeader(ber, &i, berSz, &tag, &length, &indef);
if (indef) {
if (der != NULL) {
/* Check enough space for header */
if (j + IndefItems_HeaderLen(indefItems) > *derSz) {
ret = BUFFER_E;
goto end;
}
if ((tag & 0xC0) == 0 &&
tag != (ASN_SEQUENCE | ASN_CONSTRUCTED) &&
tag != (ASN_SET | ASN_CONSTRUCTED)) {
/* Remove constructed tag for basic types */
tag &= ~ASN_CONSTRUCTED;
}
/* Add tag and length */
der[j] = tag;
(void)SetLength(IndefItems_Len(indefItems), der + j + 1);
}
/* Add header length of indefinite item */
j += IndefItems_HeaderLen(indefItems);
if ((tag & 0xC0) == 0 &&
tag != (ASN_SEQUENCE | ASN_CONSTRUCTED) &&
tag != (ASN_SET | ASN_CONSTRUCTED)) {
/* For basic type - get each child item and add data */
for (; i < berSz; ) {
(void)GetBerHeader(ber, &i, berSz, &tag, &length, &indef);
if (tag == ASN_EOC) {
break;
}
if (der != NULL) {
if (j + length > *derSz) {
ret = BUFFER_E;
goto end;
}
XMEMCPY(der + j, ber + i, length);
}
j += length;
i += length;
}
}
/* Move to next indef item in list */
indefItems->idx++;
}
else if (tag == ASN_EOC) {
/* End-Of-Content is not written out in DER */
}
else {
/* Write out definite length item as is. */
i += length;
if (der != NULL) {
/* Ensure space for item */
if (j + i - start > *derSz) {
ret = BUFFER_E;
goto end;
}
/* Copy item as is */
XMEMCPY(der + j, ber + start, i - start);
}
j += i - start;
}
}
/* Return the length of the DER encoded ASN.1 */
*derSz = j;
if (der == NULL) {
ret = LENGTH_ONLY_E;
}
end:
#ifdef WOLFSSL_SMALL_STACK
if (indefItems != NULL) {
XFREE(indefItems, NULL, DYNAMIC_TYPE_TMP_BUFFER);
}
#endif
return ret;
}
|
Safe
|
[
"CWE-125",
"CWE-345"
] |
wolfssl
|
f93083be72a3b3d956b52a7ec13f307a27b6e093
|
5.713491236986147e+37
| 214 |
OCSP: improve handling of OCSP no check extension
| 0 |
static char *console_kit_check_active_session_change(struct session_info *info)
{
si_dbus_read_signals(info);
if (info->verbose)
syslog(LOG_DEBUG, "(console-kit) active-session: '%s'",
(info->active_session ? info->active_session : "None"));
return info->active_session;
}
|
Safe
|
[
"CWE-362"
] |
spice-vd_agent
|
5c50131797e985d0a5654c1fd7000ae945ed29a7
|
2.1747426846111428e+38
| 9 |
Better check for sessions
Do not allow other users to hijack a session checking that
the process is launched by the owner of the session.
Signed-off-by: Frediano Ziglio <freddy77@gmail.com>
Acked-by: Uri Lublin <uril@redhat.com>
| 0 |
int mysql_store_result_for_lazy(MYSQL_RES **result)
{
if ((*result=mysql_store_result(&mysql)))
return 0;
if (mysql_error(&mysql)[0])
return put_error(&mysql);
return 0;
}
|
Safe
|
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
|
1.6930839409866639e+37
| 9 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
| 0 |
bool IsSupported(const string& op_name, DataType dtype) const {
const auto it = supported_ops_.find(op_name);
return it != supported_ops_.end() && it->second.count(dtype) > 0;
}
|
Safe
|
[
"CWE-476"
] |
tensorflow
|
e6340f0665d53716ef3197ada88936c2a5f7a2d3
|
5.901013582637869e+36
| 4 |
Handle a special grappler case resulting in crash.
It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault.
PiperOrigin-RevId: 369242852
Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b
| 0 |
void flush_all_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
BUG_ON(tsk != current);
#ifdef CONFIG_SPE
if (tsk->thread.regs->msr & MSR_SPE)
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
#endif
save_all(tsk);
preempt_enable();
}
}
|
Safe
|
[
"CWE-862"
] |
linux
|
8205d5d98ef7f155de211f5e2eb6ca03d95a5a60
|
3.3646711596947206e+38
| 14 |
powerpc/tm: Fix FP/VMX unavailable exceptions inside a transaction
When we take an FP unavailable exception in a transaction we have to
account for the hardware FP TM checkpointed registers being
incorrect. In this case for this process we know the current and
checkpointed FP registers must be the same (since FP wasn't used
inside the transaction) hence in the thread_struct we copy the current
FP registers to the checkpointed ones.
This copy is done in tm_reclaim_thread(). We use thread->ckpt_regs.msr
to determine if FP was on when in userspace. thread->ckpt_regs.msr
represents the state of the MSR when exiting userspace. This is setup
by check_if_tm_restore_required().
Unfortunatley there is an optimisation in giveup_all() which returns
early if tsk->thread.regs->msr (via local variable `usermsr`) has
FP=VEC=VSX=SPE=0. This optimisation means that
check_if_tm_restore_required() is not called and hence
thread->ckpt_regs.msr is not updated and will contain an old value.
This can happen if due to load_fp=255 we start a userspace process
with MSR FP=1 and then we are context switched out. In this case
thread->ckpt_regs.msr will contain FP=1. If that same process is then
context switched in and load_fp overflows, MSR will have FP=0. If that
process now enters a transaction and does an FP instruction, the FP
unavailable will not update thread->ckpt_regs.msr (the bug) and MSR
FP=1 will be retained in thread->ckpt_regs.msr. tm_reclaim_thread()
will then not perform the required memcpy and the checkpointed FP regs
in the thread struct will contain the wrong values.
The code path for this happening is:
Userspace: Kernel
Start userspace
with MSR FP/VEC/VSX/SPE=0 TM=1
< -----
...
tbegin
bne
fp instruction
FP unavailable
---- >
fp_unavailable_tm()
tm_reclaim_current()
tm_reclaim_thread()
giveup_all()
return early since FP/VMX/VSX=0
/* ckpt MSR not updated (Incorrect) */
tm_reclaim()
/* thread_struct ckpt FP regs contain junk (OK) */
/* Sees ckpt MSR FP=1 (Incorrect) */
no memcpy() performed
/* thread_struct ckpt FP regs not fixed (Incorrect) */
tm_recheckpoint()
/* Put junk in hardware checkpoint FP regs */
....
< -----
Return to userspace
with MSR TM=1 FP=1
with junk in the FP TM checkpoint
TM rollback
reads FP junk
This is a data integrity problem for the current process as the FP
registers are corrupted. It's also a security problem as the FP
registers from one process may be leaked to another.
This patch moves up check_if_tm_restore_required() in giveup_all() to
ensure thread->ckpt_regs.msr is updated correctly.
A simple testcase to replicate this will be posted to
tools/testing/selftests/powerpc/tm/tm-poison.c
Similarly for VMX.
This fixes CVE-2019-15030.
Fixes: f48e91e87e67 ("powerpc/tm: Fix FP and VMX register corruption")
Cc: stable@vger.kernel.org # 4.12+
Signed-off-by: Gustavo Romero <gromero@linux.vnet.ibm.com>
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190904045529.23002-1-gromero@linux.vnet.ibm.com
| 0 |
static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_product_id, const u16 bcdDevice)
{
const u16 reported_product_id = hid->product;
int ret;
*real_product_id = lg4ff_identify_multimode_wheel(hid, reported_product_id, bcdDevice);
/* Probed wheel is not a multimode wheel */
if (!*real_product_id) {
*real_product_id = reported_product_id;
dbg_hid("Wheel is not a multimode wheel\n");
return LG4FF_MMODE_NOT_MULTIMODE;
}
/* Switch from "Driving Force" mode to native mode automatically.
* Otherwise keep the wheel in its current mode */
if (reported_product_id == USB_DEVICE_ID_LOGITECH_WHEEL &&
reported_product_id != *real_product_id &&
!lg4ff_no_autoswitch) {
const struct lg4ff_compat_mode_switch *s = lg4ff_get_mode_switch_command(*real_product_id, *real_product_id);
if (!s) {
hid_err(hid, "Invalid product id %X\n", *real_product_id);
return LG4FF_MMODE_NOT_MULTIMODE;
}
ret = lg4ff_switch_compatibility_mode(hid, s);
if (ret) {
/* Wheel could not have been switched to native mode,
* leave it in "Driving Force" mode and continue */
hid_err(hid, "Unable to switch wheel mode, errno %d\n", ret);
return LG4FF_MMODE_IS_MULTIMODE;
}
return LG4FF_MMODE_SWITCHED;
}
return LG4FF_MMODE_IS_MULTIMODE;
}
|
Safe
|
[
"CWE-787"
] |
linux
|
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
|
2.4266140562339255e+37
| 37 |
HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: syzbot+403741a091bf41d4ae79@syzkaller.appspotmail.com
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
CC: <stable@vger.kernel.org>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
| 0 |
int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
{
return memcmp(key1, key2, *(uint *) arg);
}
|
Safe
|
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
|
1.7471421496642703e+38
| 4 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
| 0 |
xcf_skip_unknown_prop (XcfInfo *info,
gsize size)
{
guint8 buf[16];
guint amount;
while (size > 0)
{
if (g_input_stream_is_closed (info->input))
return FALSE;
amount = MIN (16, size);
amount = xcf_read_int8 (info, buf, amount);
if (amount == 0)
return FALSE;
size -= amount;
}
return TRUE;
}
|
Safe
|
[
"CWE-120"
] |
gimp
|
4f99f1fcfd892ead19831b5adcd38a99d71214b6
|
1.233786314647284e+38
| 21 |
app: fix #8120 GIMP 2.10.30 crashed when allocate large memory
GIMP could crash if the information regarding old path properties read
from XCF was incorrect. It did not check if xcf_old_path succeeded and
kept trying to load more paths even if the last one failed to load.
Instead we now stop loading paths as soon as that function fails.
In case we have a failure here we also try to skip to the next property
based on the size of the path property, in hopes that the only problem
was this property.
| 0 |
int32_t ID() const { return id_; }
|
Safe
|
[] |
electron
|
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
|
1.105375618777625e+38
| 1 |
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <samuel.r.attard@gmail.com>
Co-authored-by: Samuel Attard <sattard@salesforce.com>
| 0 |
calc_delta_fair(unsigned long delta, struct sched_entity *se)
{
for_each_sched_entity(se) {
delta = calc_delta_mine(delta,
cfs_rq_of(se)->load.weight, &se->load);
}
return delta;
}
|
Safe
|
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
|
3.486692710887741e+37
| 9 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| 0 |
static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urb *urb)
{
struct stub_priv *priv = (struct stub_priv *) urb->context;
setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, priv->seqnum);
usbip_pack_pdu(rpdu, urb, USBIP_RET_SUBMIT, 1);
}
|
Safe
|
[
"CWE-476"
] |
linux
|
be6123df1ea8f01ee2f896a16c2b7be3e4557a5a
|
2.6711156326710137e+38
| 7 |
usbip: fix stub_send_ret_submit() vulnerability to null transfer_buffer
stub_send_ret_submit() handles urb with a potential null transfer_buffer,
when it replays a packet with potential malicious data that could contain
a null buffer. Add a check for the condition when actual_length > 0 and
transfer_buffer is null.
Reported-by: Secunia Research <vuln@secunia.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Shuah Khan <shuahkh@osg.samsung.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
readDelayed(void *context, CommRead const &)
{
HttpStateData *state = static_cast<HttpStateData*>(context);
state->flags.do_next_read = true;
state->maybeReadVirginBody();
}
|
Safe
|
[
"CWE-444"
] |
squid
|
fd68382860633aca92065e6c343cfd1b12b126e7
|
1.6818225860553986e+38
| 6 |
Improve Transfer-Encoding handling (#702)
Reject messages containing Transfer-Encoding header with coding other
than chunked or identity. Squid does not support other codings.
For simplicity and security sake, also reject messages where
Transfer-Encoding contains unnecessary complex values that are
technically equivalent to "chunked" or "identity" (e.g., ",,chunked" or
"identity, chunked").
RFC 7230 formally deprecated and removed identity coding, but it is
still used by some agents.
| 0 |
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
{
struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
struct ib_ucq_object, uobject);
ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle,
event->event, &uobj->async_list,
&uobj->async_events_reported);
}
|
Safe
|
[
"CWE-362",
"CWE-703",
"CWE-667"
] |
linux
|
04f5866e41fb70690e28397487d8bd8eea7d712a
|
5.540193404084674e+37
| 9 |
coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/alpine.LSU.2.11.1707191716030.2055@eggly.anvils
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/20190325224949.11068-1-aarcange@redhat.com
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reported-by: Jann Horn <jannh@google.com>
Suggested-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Jann Horn <jannh@google.com>
Acked-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static int pit_get_out(struct kvm *kvm, int channel)
{
struct kvm_kpit_channel_state *c =
&kvm->arch.vpit->pit_state.channels[channel];
s64 d, t;
int out;
WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
t = kpit_elapsed(kvm, c, channel);
d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
switch (c->mode) {
default:
case 0:
out = (d >= c->count);
break;
case 1:
out = (d < c->count);
break;
case 2:
out = ((mod_64(d, c->count) == 0) && (d != 0));
break;
case 3:
out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
break;
case 4:
case 5:
out = (d == c->count);
break;
}
return out;
}
|
Safe
|
[
"CWE-362"
] |
kvm
|
2febc839133280d5a5e8e1179c94ea674489dae2
|
7.432316961609625e+37
| 34 |
KVM: x86: Improve thread safety in pit
There's a race condition in the PIT emulation code in KVM. In
__kvm_migrate_pit_timer the pit_timer object is accessed without
synchronization. If the race condition occurs at the wrong time this
can crash the host kernel.
This fixes CVE-2014-3611.
Cc: stable@vger.kernel.org
Signed-off-by: Andrew Honig <ahonig@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static int check_subprogs(struct bpf_verifier_env *env)
{
int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
struct bpf_subprog_info *subprog = env->subprog_info;
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
/* Add entry function. */
ret = add_subprog(env, 0);
if (ret < 0)
return ret;
/* determine subprog starts. The end is one before the next starts */
for (i = 0; i < insn_cnt; i++) {
if (insn[i].code != (BPF_JMP | BPF_CALL))
continue;
if (insn[i].src_reg != BPF_PSEUDO_CALL)
continue;
if (!env->allow_ptr_leaks) {
verbose(env, "function calls to other bpf functions are allowed for root only\n");
return -EPERM;
}
ret = add_subprog(env, i + insn[i].imm + 1);
if (ret < 0)
return ret;
}
/* Add a fake 'exit' subprog which could simplify subprog iteration
* logic. 'subprog_cnt' should not be increased.
*/
subprog[env->subprog_cnt].start = insn_cnt;
if (env->log.level & BPF_LOG_LEVEL2)
for (i = 0; i < env->subprog_cnt; i++)
verbose(env, "func#%d @%d\n", i, subprog[i].start);
/* now check that all jumps are within the same subprog */
subprog_start = subprog[cur_subprog].start;
subprog_end = subprog[cur_subprog + 1].start;
for (i = 0; i < insn_cnt; i++) {
u8 code = insn[i].code;
if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
goto next;
off = i + insn[i].off + 1;
if (off < subprog_start || off >= subprog_end) {
verbose(env, "jump out of range from insn %d to %d\n", i, off);
return -EINVAL;
}
next:
if (i == subprog_end - 1) {
/* to avoid fall-through from one subprog into another
* the last insn of the subprog should be either exit
* or unconditional jump back
*/
if (code != (BPF_JMP | BPF_EXIT) &&
code != (BPF_JMP | BPF_JA)) {
verbose(env, "last insn is not an exit or jmp\n");
return -EINVAL;
}
subprog_start = subprog_end;
cur_subprog++;
if (cur_subprog < env->subprog_cnt)
subprog_end = subprog[cur_subprog + 1].start;
}
}
return 0;
}
|
Safe
|
[] |
linux
|
294f2fc6da27620a506e6c050241655459ccd6bd
|
1.2577869778066832e+38
| 70 |
bpf: Verifer, adjust_scalar_min_max_vals to always call update_reg_bounds()
Currently, for all op verification we call __red_deduce_bounds() and
__red_bound_offset() but we only call __update_reg_bounds() in bitwise
ops. However, we could benefit from calling __update_reg_bounds() in
BPF_ADD, BPF_SUB, and BPF_MUL cases as well.
For example, a register with state 'R1_w=invP0' when we subtract from
it,
w1 -= 2
Before coerce we will now have an smin_value=S64_MIN, smax_value=U64_MAX
and unsigned bounds umin_value=0, umax_value=U64_MAX. These will then
be clamped to S32_MIN, U32_MAX values by coerce in the case of alu32 op
as done in above example. However tnum will be a constant because the
ALU op is done on a constant.
Without update_reg_bounds() we have a scenario where tnum is a const
but our unsigned bounds do not reflect this. By calling update_reg_bounds
after coerce to 32bit we further refine the umin_value to U64_MAX in the
alu64 case or U32_MAX in the alu32 case above.
Signed-off-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/158507151689.15666.566796274289413203.stgit@john-Precision-5820-Tower
| 0 |
cdio_generic_stdio_free (void *p_user_data)
{
generic_img_private_t *p_env = p_user_data;
if (NULL == p_env) return;
if (NULL != p_env->source_name)
free (p_env->source_name);
if (p_env->data_source)
cdio_stdio_destroy (p_env->data_source);
}
|
Safe
|
[
"CWE-415"
] |
libcdio
|
dec2f876c2d7162da213429bce1a7140cdbdd734
|
2.8936121765052327e+37
| 11 |
Removed wrong line
| 0 |
static void pp_init(struct parallel_processes *pp,
int n,
get_next_task_fn get_next_task,
start_failure_fn start_failure,
task_finished_fn task_finished,
void *data)
{
int i;
if (n < 1)
n = online_cpus();
pp->max_processes = n;
trace_printf("run_processes_parallel: preparing to run up to %d tasks", n);
pp->data = data;
if (!get_next_task)
die("BUG: you need to specify a get_next_task function");
pp->get_next_task = get_next_task;
pp->start_failure = start_failure ? start_failure : default_start_failure;
pp->task_finished = task_finished ? task_finished : default_task_finished;
pp->nr_processes = 0;
pp->output_owner = 0;
pp->shutdown = 0;
pp->children = xcalloc(n, sizeof(*pp->children));
pp->pfd = xcalloc(n, sizeof(*pp->pfd));
strbuf_init(&pp->buffered_output, 0);
for (i = 0; i < n; i++) {
strbuf_init(&pp->children[i].err, 0);
child_process_init(&pp->children[i].process);
pp->pfd[i].events = POLLIN | POLLHUP;
pp->pfd[i].fd = -1;
}
pp_for_signal = pp;
sigchain_push_common(handle_children_on_signal);
}
|
Safe
|
[] |
git
|
321fd82389742398d2924640ce3a61791fd27d60
|
2.8242562771755478e+38
| 41 |
run-command: mark path lookup errors with ENOENT
Since commit e3a434468f (run-command: use the
async-signal-safe execv instead of execvp, 2017-04-19),
prepare_cmd() does its own PATH lookup for any commands we
run (on non-Windows platforms).
However, its logic does not match the old execvp call when
we fail to find a matching entry in the PATH. Instead of
feeding the name directly to execv, execvp would consider
that an ENOENT error. By continuing and passing the name
directly to execv, we effectively behave as if "." was
included at the end of the PATH. This can have confusing and
even dangerous results.
The fix itself is pretty straight-forward. There's a new
test in t0061 to cover this explicitly, and I've also added
a duplicate of the ENOENT test to ensure that we return the
correct errno for this case.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
| 0 |
GF_Box *tfdt_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TFBaseMediaDecodeTimeBox, GF_ISOM_BOX_TYPE_TFDT);
return (GF_Box *)tmp;
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
77510778516803b7f7402d7423c6d6bef50254c3
|
3.2731913827277296e+38
| 5 |
fixed #2255
| 0 |
xmlOutputBufferWriteEscape(xmlOutputBufferPtr out, const xmlChar *str,
xmlCharEncodingOutputFunc escaping) {
int nbchars = 0; /* number of chars to output to I/O */
int ret; /* return from function call */
int written = 0; /* number of char written to I/O so far */
int oldwritten=0;/* loop guard */
int chunk; /* number of byte currently processed from str */
int len; /* number of bytes in str */
int cons; /* byte from str consumed */
if ((out == NULL) || (out->error) || (str == NULL) ||
(out->buffer == NULL) ||
(xmlBufGetAllocationScheme(out->buffer) == XML_BUFFER_ALLOC_IMMUTABLE))
return(-1);
len = strlen((const char *)str);
if (len < 0) return(0);
if (out->error) return(-1);
if (escaping == NULL) escaping = xmlEscapeContent;
do {
oldwritten = written;
/*
* how many bytes to consume and how many bytes to store.
*/
cons = len;
chunk = xmlBufAvail(out->buffer) - 1;
/*
* make sure we have enough room to save first, if this is
* not the case force a flush, but make sure we stay in the loop
*/
if (chunk < 40) {
if (xmlBufGrow(out->buffer, 100) < 0)
return(-1);
oldwritten = -1;
continue;
}
/*
* first handle encoding stuff.
*/
if (out->encoder != NULL) {
/*
* Store the data in the incoming raw buffer
*/
if (out->conv == NULL) {
out->conv = xmlBufCreate();
}
ret = escaping(xmlBufEnd(out->buffer) ,
&chunk, str, &cons);
if ((ret < 0) || (chunk == 0)) /* chunk==0 => nothing done */
return(-1);
xmlBufAddLen(out->buffer, chunk);
if ((xmlBufUse(out->buffer) < MINLEN) && (cons == len))
goto done;
/*
* convert as much as possible to the output buffer.
*/
ret = xmlCharEncOutput(out, 0);
if ((ret < 0) && (ret != -3)) {
xmlIOErr(XML_IO_ENCODER, NULL);
out->error = XML_IO_ENCODER;
return(-1);
}
nbchars = xmlBufUse(out->conv);
} else {
ret = escaping(xmlBufEnd(out->buffer), &chunk, str, &cons);
if ((ret < 0) || (chunk == 0)) /* chunk==0 => nothing done */
return(-1);
xmlBufAddLen(out->buffer, chunk);
nbchars = xmlBufUse(out->buffer);
}
str += cons;
len -= cons;
if ((nbchars < MINLEN) && (len <= 0))
goto done;
if (out->writecallback) {
/*
* second write the stuff to the I/O channel
*/
if (out->encoder != NULL) {
ret = out->writecallback(out->context,
(const char *)xmlBufContent(out->conv), nbchars);
if (ret >= 0)
xmlBufShrink(out->conv, ret);
} else {
ret = out->writecallback(out->context,
(const char *)xmlBufContent(out->buffer), nbchars);
if (ret >= 0)
xmlBufShrink(out->buffer, ret);
}
if (ret < 0) {
xmlIOErr(XML_IO_WRITE, NULL);
out->error = XML_IO_WRITE;
return(ret);
}
out->written += ret;
} else if (xmlBufAvail(out->buffer) < MINLEN) {
xmlBufGrow(out->buffer, MINLEN);
}
written += nbchars;
} while ((len > 0) && (oldwritten != written));
done:
#ifdef DEBUG_INPUT
xmlGenericError(xmlGenericErrorContext,
"I/O: wrote %d chars\n", written);
#endif
return(written);
}
|
Safe
|
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
|
1.1837681600240044e+38
| 115 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
| 0 |
const char *pop_get_field(enum ConnAccountField field)
{
switch (field)
{
case MUTT_CA_LOGIN:
case MUTT_CA_USER:
return C_PopUser;
case MUTT_CA_PASS:
return C_PopPass;
case MUTT_CA_OAUTH_CMD:
return C_PopOauthRefreshCommand;
case MUTT_CA_HOST:
default:
return NULL;
}
}
|
Safe
|
[
"CWE-94",
"CWE-74"
] |
neomutt
|
fb013ec666759cb8a9e294347c7b4c1f597639cc
|
2.232954904974451e+38
| 16 |
tls: clear data after a starttls acknowledgement
After a starttls acknowledgement message, clear the buffers of any
incoming data / commands. This will ensure that all future data is
handled securely.
Co-authored-by: Pietro Cerutti <gahr@gahr.ch>
| 0 |
irqreturn_t floppy_interrupt(int irq, void *dev_id)
{
int do_print;
unsigned long f;
void (*handler)(void) = do_floppy;
lasthandler = handler;
interruptjiffies = jiffies;
f = claim_dma_lock();
fd_disable_dma();
release_dma_lock(f);
do_floppy = NULL;
if (fdc >= N_FDC || FDCS->address == -1) {
/* we don't even know which FDC is the culprit */
pr_info("DOR0=%x\n", fdc_state[0].dor);
pr_info("floppy interrupt on bizarre fdc %d\n", fdc);
pr_info("handler=%pf\n", handler);
is_alive(__func__, "bizarre fdc");
return IRQ_NONE;
}
FDCS->reset = 0;
/* We have to clear the reset flag here, because apparently on boxes
* with level triggered interrupts (PS/2, Sparc, ...), it is needed to
* emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the
* emission of the SENSEI's.
* It is OK to emit floppy commands because we are in an interrupt
* handler here, and thus we have to fear no interference of other
* activity.
*/
do_print = !handler && print_unex && initialized;
inr = result();
if (do_print)
print_result("unexpected interrupt", inr);
if (inr == 0) {
int max_sensei = 4;
do {
output_byte(FD_SENSEI);
inr = result();
if (do_print)
print_result("sensei", inr);
max_sensei--;
} while ((ST0 & 0x83) != UNIT(current_drive) &&
inr == 2 && max_sensei);
}
if (!handler) {
FDCS->reset = 1;
return IRQ_NONE;
}
schedule_bh(handler);
is_alive(__func__, "normal interrupt end");
/* FIXME! Was it really for us? */
return IRQ_HANDLED;
}
|
Safe
|
[
"CWE-264",
"CWE-754"
] |
linux
|
ef87dbe7614341c2e7bfe8d32fcb7028cc97442c
|
3.137793712447858e+37
| 59 |
floppy: ignore kernel-only members in FDRAWCMD ioctl input
Always clear out these floppy_raw_cmd struct members after copying the
entire structure from userspace so that the in-kernel version is always
valid and never left in an interdeterminate state.
Signed-off-by: Matthew Daley <mattd@bugfuzz.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
TIFFWriteDirectoryTagCheckedSshort(TIFF* tif, uint32* ndir, TIFFDirEntry* dir, uint16 tag, int16 value)
{
int16 m;
assert(sizeof(int16)==2);
m=value;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabShort((uint16*)(&m));
return(TIFFWriteDirectoryTagData(tif,ndir,dir,tag,TIFF_SSHORT,1,2,&m));
}
|
Safe
|
[
"CWE-617"
] |
libtiff
|
de144fd228e4be8aa484c3caf3d814b6fa88c6d9
|
2.815484880408103e+38
| 9 |
TIFFWriteDirectorySec: avoid assertion. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2795. CVE-2018-10963
| 0 |
read_viminfo_barline(vir_T *virp, int got_encoding, int force, int writing)
{
char_u *p = virp->vir_line + 1;
int bartype;
garray_T values;
bval_T *vp;
int i;
int read_next = TRUE;
/* The format is: |{bartype},{value},...
* For a very long string:
* |{bartype},>{length of "{text}{text2}"}
* |<{text1}
* |<{text2},{value}
* For a long line not using a string
* |{bartype},{lots of values},>
* |<{value},{value}
*/
if (*p == '<')
{
/* Continuation line of an unrecognized item. */
if (writing)
ga_add_string(&virp->vir_barlines, virp->vir_line);
}
else
{
ga_init2(&values, sizeof(bval_T), 20);
bartype = getdigits(&p);
switch (bartype)
{
case BARTYPE_VERSION:
/* Only use the version when it comes before the encoding.
* If it comes later it was copied by a Vim version that
* doesn't understand the version. */
if (!got_encoding)
{
read_next = barline_parse(virp, p, &values);
vp = (bval_T *)values.ga_data;
if (values.ga_len > 0 && vp->bv_type == BVAL_NR)
virp->vir_version = vp->bv_nr;
}
break;
case BARTYPE_HISTORY:
read_next = barline_parse(virp, p, &values);
handle_viminfo_history(&values, writing);
break;
case BARTYPE_REGISTER:
read_next = barline_parse(virp, p, &values);
handle_viminfo_register(&values, force);
break;
case BARTYPE_MARK:
read_next = barline_parse(virp, p, &values);
handle_viminfo_mark(&values, force);
break;
default:
/* copy unrecognized line (for future use) */
if (writing)
ga_add_string(&virp->vir_barlines, virp->vir_line);
}
for (i = 0; i < values.ga_len; ++i)
{
vp = (bval_T *)values.ga_data + i;
if (vp->bv_type == BVAL_STRING && vp->bv_allocated)
vim_free(vp->bv_string);
}
ga_clear(&values);
}
if (read_next)
return viminfo_readline(virp);
return FALSE;
}
|
Safe
|
[
"CWE-78"
] |
vim
|
8c62a08faf89663e5633dc5036cd8695c80f1075
|
3.2946045628547422e+38
| 76 |
patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others.
| 0 |
static int xenvif_poll(struct napi_struct *napi, int budget)
{
struct xenvif *vif = container_of(napi, struct xenvif, napi);
int work_done;
work_done = xenvif_tx_action(vif, budget);
if (work_done < budget) {
int more_to_do = 0;
unsigned long flags;
/* It is necessary to disable IRQ before calling
* RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
* lose event from the frontend.
*
* Consider:
* RING_HAS_UNCONSUMED_REQUESTS
* <frontend generates event to trigger napi_schedule>
* __napi_complete
*
* This handler is still in scheduled state so the
* event has no effect at all. After __napi_complete
* this handler is descheduled and cannot get
* scheduled again. We lose event in this case and the ring
* will be completely stalled.
*/
local_irq_save(flags);
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
if (!more_to_do)
__napi_complete(napi);
local_irq_restore(flags);
}
return work_done;
}
|
Vulnerable
|
[
"CWE-399"
] |
net-next
|
e9d8b2c2968499c1f96563e6522c56958d5a1d0d
|
9.458739106895488e+34
| 38 |
xen-netback: disable rogue vif in kthread context
When netback discovers frontend is sending malformed packet it will
disables the interface which serves that frontend.
However disabling a network interface involving taking a mutex which
cannot be done in softirq context, so we need to defer this process to
kthread context.
This patch does the following:
1. introduce a flag to indicate the interface is disabled.
2. check that flag in TX path, don't do any work if it's true.
3. check that flag in RX path, turn off that interface if it's true.
The reason to disable it in RX path is because RX uses kthread. After
this change the behavior of netback is still consistent -- it won't do
any TX work for a rogue frontend, and the interface will be eventually
turned off.
Also change a "continue" to "break" after xenvif_fatal_tx_err, as it
doesn't make sense to continue processing packets if frontend is rogue.
This is a fix for XSA-90.
Reported-by: Török Edwin <edwin@etorok.net>
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 1 |
proto_tree_get_parent(proto_tree *tree) {
if (!tree)
return NULL;
return (proto_item *)tree;
}
|
Safe
|
[
"CWE-401"
] |
wireshark
|
a9fc769d7bb4b491efb61c699d57c9f35269d871
|
1.4791073553975714e+38
| 5 |
epan: Fix a memory leak.
Make sure _proto_tree_add_bits_ret_val allocates a bits array using the
packet scope, otherwise we leak memory. Fixes #17032.
| 0 |
static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr,
unsigned rxbufsize)
{
uint32_t reg;
bool matched, mismatched;
int i, j;
for (i = 0; i < s->num_type1_screeners; i++) {
reg = s->regs[GEM_SCREENING_TYPE1_REGISTER_0 + i];
matched = false;
mismatched = false;
/* Screening is based on UDP Port */
if (reg & GEM_ST1R_UDP_PORT_MATCH_ENABLE) {
uint16_t udp_port = rxbuf_ptr[14 + 22] << 8 | rxbuf_ptr[14 + 23];
if (udp_port == extract32(reg, GEM_ST1R_UDP_PORT_MATCH_SHIFT,
GEM_ST1R_UDP_PORT_MATCH_WIDTH)) {
matched = true;
} else {
mismatched = true;
}
}
/* Screening is based on DS/TC */
if (reg & GEM_ST1R_DSTC_ENABLE) {
uint8_t dscp = rxbuf_ptr[14 + 1];
if (dscp == extract32(reg, GEM_ST1R_DSTC_MATCH_SHIFT,
GEM_ST1R_DSTC_MATCH_WIDTH)) {
matched = true;
} else {
mismatched = true;
}
}
if (matched && !mismatched) {
return extract32(reg, GEM_ST1R_QUEUE_SHIFT, GEM_ST1R_QUEUE_WIDTH);
}
}
for (i = 0; i < s->num_type2_screeners; i++) {
reg = s->regs[GEM_SCREENING_TYPE2_REGISTER_0 + i];
matched = false;
mismatched = false;
if (reg & GEM_ST2R_ETHERTYPE_ENABLE) {
uint16_t type = rxbuf_ptr[12] << 8 | rxbuf_ptr[13];
int et_idx = extract32(reg, GEM_ST2R_ETHERTYPE_INDEX_SHIFT,
GEM_ST2R_ETHERTYPE_INDEX_WIDTH);
if (et_idx > s->num_type2_screeners) {
qemu_log_mask(LOG_GUEST_ERROR, "Out of range ethertype "
"register index: %d\n", et_idx);
}
if (type == s->regs[GEM_SCREENING_TYPE2_ETHERTYPE_REG_0 +
et_idx]) {
matched = true;
} else {
mismatched = true;
}
}
/* Compare A, B, C */
for (j = 0; j < 3; j++) {
uint32_t cr0, cr1, mask;
uint16_t rx_cmp;
int offset;
int cr_idx = extract32(reg, GEM_ST2R_COMPARE_A_SHIFT + j * 6,
GEM_ST2R_COMPARE_WIDTH);
if (!(reg & (GEM_ST2R_COMPARE_A_ENABLE << (j * 6)))) {
continue;
}
if (cr_idx > s->num_type2_screeners) {
qemu_log_mask(LOG_GUEST_ERROR, "Out of range compare "
"register index: %d\n", cr_idx);
}
cr0 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2];
cr1 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2 + 1];
offset = extract32(cr1, GEM_T2CW1_OFFSET_VALUE_SHIFT,
GEM_T2CW1_OFFSET_VALUE_WIDTH);
switch (extract32(cr1, GEM_T2CW1_COMPARE_OFFSET_SHIFT,
GEM_T2CW1_COMPARE_OFFSET_WIDTH)) {
case 3: /* Skip UDP header */
qemu_log_mask(LOG_UNIMP, "TCP compare offsets"
"unimplemented - assuming UDP\n");
offset += 8;
/* Fallthrough */
case 2: /* skip the IP header */
offset += 20;
/* Fallthrough */
case 1: /* Count from after the ethertype */
offset += 14;
break;
case 0:
/* Offset from start of frame */
break;
}
rx_cmp = rxbuf_ptr[offset] << 8 | rxbuf_ptr[offset];
mask = extract32(cr0, 0, 16);
if ((rx_cmp & mask) == (extract32(cr0, 16, 16) & mask)) {
matched = true;
} else {
mismatched = true;
}
}
if (matched && !mismatched) {
return extract32(reg, GEM_ST2R_QUEUE_SHIFT, GEM_ST2R_QUEUE_WIDTH);
}
}
/* We made it here, assume it's queue 0 */
return 0;
}
|
Safe
|
[
"CWE-835"
] |
qemu
|
e73adfbeec9d4e008630c814759052ed945c3fed
|
2.5439364652696636e+38
| 118 |
cadence_gem: switch to use qemu_receive_packet() for loopback
This patch switches to use qemu_receive_packet() which can detect
reentrancy and return early.
This is intended to address CVE-2021-3416.
Cc: Prasad J Pandit <ppandit@redhat.com>
Cc: qemu-stable@nongnu.org
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
Signed-off-by: Jason Wang <jasowang@redhat.com>
| 0 |
static void __exit dn_rtmsg_fini(void)
{
nf_unregister_hook(&dnrmg_ops);
netlink_kernel_release(dnrmg);
}
|
Safe
|
[
"CWE-264"
] |
net
|
90f62cf30a78721641e08737bda787552428061e
|
2.826976472406267e+38
| 5 |
net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static BROTLI_INLINE uint16_t GetInsertLengthCode(size_t insertlen) {
if (insertlen < 6) {
return (uint16_t)insertlen;
} else if (insertlen < 130) {
uint32_t nbits = Log2FloorNonZero(insertlen - 2) - 1u;
return (uint16_t)((nbits << 1) + ((insertlen - 2) >> nbits) + 2);
} else if (insertlen < 2114) {
return (uint16_t)(Log2FloorNonZero(insertlen - 66) + 10);
} else if (insertlen < 6210) {
return 21u;
} else if (insertlen < 22594) {
return 22u;
} else {
return 23u;
}
}
|
Safe
|
[
"CWE-120"
] |
brotli
|
223d80cfbec8fd346e32906c732c8ede21f0cea6
|
2.460373533101831e+38
| 16 |
Update (#826)
* IMPORTANT: decoder: fix potential overflow when input chunk is >2GiB
* simplify max Huffman table size calculation
* eliminate symbol duplicates (static arrays in .h files)
* minor combing in research/ code
| 0 |
static bool wsrep_command_no_result(char command)
{
return (command == COM_STMT_FETCH ||
command == COM_STMT_SEND_LONG_DATA ||
command == COM_STMT_CLOSE);
}
|
Safe
|
[
"CWE-703"
] |
server
|
39feab3cd31b5414aa9b428eaba915c251ac34a2
|
4.915097044531934e+35
| 6 |
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <sanja@mariadb.com>
| 0 |
static void dev_disable_gro_hw(struct net_device *dev)
{
dev->wanted_features &= ~NETIF_F_GRO_HW;
netdev_update_features(dev);
if (unlikely(dev->features & NETIF_F_GRO_HW))
netdev_WARN(dev, "failed to disable GRO_HW!\n");
}
|
Safe
|
[
"CWE-416"
] |
linux
|
a4270d6795b0580287453ea55974d948393e66ef
|
1.7966510516542434e+38
| 8 |
net-gro: fix use-after-free read in napi_gro_frags()
If a network driver provides to napi_gro_frags() an
skb with a page fragment of exactly 14 bytes, the call
to gro_pull_from_frag0() will 'consume' the fragment
by calling skb_frag_unref(skb, 0), and the page might
be freed and reused.
Reading eth->h_proto at the end of napi_frags_skb() might
read mangled data, or crash under specific debugging features.
BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline]
BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
Read of size 2 at addr ffff88809366840c by task syz-executor599/8957
CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x172/0x1f0 lib/dump_stack.c:113
print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188
__kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
kasan_report+0x12/0x20 mm/kasan/common.c:614
__asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142
napi_frags_skb net/core/dev.c:5833 [inline]
napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991
tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037
call_write_iter include/linux/fs.h:1872 [inline]
do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693
do_iter_write fs/read_write.c:970 [inline]
do_iter_write+0x184/0x610 fs/read_write.c:951
vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015
do_writev+0x15b/0x330 fs/read_write.c:1058
Fixes: a50e233c50db ("net-gro: restore frag0 optimization")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
int ssl3_send_certificate_request(SSL *s)
{
unsigned char *p, *d;
int i, j, nl, off, n;
STACK_OF(X509_NAME) *sk = NULL;
X509_NAME *name;
BUF_MEM *buf;
if (s->state == SSL3_ST_SW_CERT_REQ_A) {
buf = s->init_buf;
d = p = ssl_handshake_start(s);
/* get the list of acceptable cert types */
p++;
n = ssl3_get_req_cert_type(s, p);
d[0] = n;
p += n;
n++;
if (SSL_USE_SIGALGS(s)) {
const unsigned char *psigs;
nl = tls12_get_psigalgs(s, &psigs);
s2n(nl, p);
memcpy(p, psigs, nl);
p += nl;
n += nl + 2;
}
off = n;
p += 2;
n += 2;
sk = SSL_get_client_CA_list(s);
nl = 0;
if (sk != NULL) {
for (i = 0; i < sk_X509_NAME_num(sk); i++) {
name = sk_X509_NAME_value(sk, i);
j = i2d_X509_NAME(name, NULL);
if (!BUF_MEM_grow_clean
(buf, SSL_HM_HEADER_LENGTH(s) + n + j + 2)) {
SSLerr(SSL_F_SSL3_SEND_CERTIFICATE_REQUEST,
ERR_R_BUF_LIB);
goto err;
}
p = ssl_handshake_start(s) + n;
if (!(s->options & SSL_OP_NETSCAPE_CA_DN_BUG)) {
s2n(j, p);
i2d_X509_NAME(name, &p);
n += 2 + j;
nl += 2 + j;
} else {
d = p;
i2d_X509_NAME(name, &p);
j -= 2;
s2n(j, d);
j += 2;
n += j;
nl += j;
}
}
}
/* else no CA names */
p = ssl_handshake_start(s) + off;
s2n(nl, p);
ssl_set_handshake_header(s, SSL3_MT_CERTIFICATE_REQUEST, n);
#ifdef NETSCAPE_HANG_BUG
if (!SSL_IS_DTLS(s)) {
if (!BUF_MEM_grow_clean(buf, s->init_num + 4)) {
SSLerr(SSL_F_SSL3_SEND_CERTIFICATE_REQUEST, ERR_R_BUF_LIB);
goto err;
}
p = (unsigned char *)s->init_buf->data + s->init_num;
/* do the header */
*(p++) = SSL3_MT_SERVER_DONE;
*(p++) = 0;
*(p++) = 0;
*(p++) = 0;
s->init_num += 4;
}
#endif
s->state = SSL3_ST_SW_CERT_REQ_B;
}
/* SSL3_ST_SW_CERT_REQ_B */
return ssl_do_write(s);
err:
return (-1);
}
|
Safe
|
[
"CWE-20"
] |
openssl
|
b19d8143212ae5fbc9cebfd51c01f802fabccd33
|
1.982196941793581e+38
| 92 |
Fix DHE Null CKE vulnerability
If client auth is used then a server can seg fault in the event of a DHE
cipher being used and a zero length ClientKeyExchange message being sent
by the client. This could be exploited in a DoS attack.
CVE-2015-1787
Reviewed-by: Richard Levitte <levitte@openssl.org>
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.