func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
{
return dev->msix_pba + vector / 8;
}
|
Safe
|
[] |
qemu
|
43b11a91dd861a946b231b89b7542856ade23d1b
|
1.173918606990186e+38
| 4 |
msix: implement pba write (but read-only)
qpci_msix_pending() writes on pba region, causing qemu to SEGV:
Program received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7ffff7fba8c0 (LWP 25882)]
0x0000000000000000 in ?? ()
(gdb) bt
#0 0x0000000000000000 in ()
#1 0x00005555556556c5 in memory_region_oldmmio_write_accessor (mr=0x5555579f3f80, addr=0, value=0x7fffffffbf68, size=4, shift=0, mask=4294967295, attrs=...) at /home/elmarco/src/qemu/memory.c:434
#2 0x00005555556558e1 in access_with_adjusted_size (addr=0, value=0x7fffffffbf68, size=4, access_size_min=1, access_size_max=4, access=0x55555565563e <memory_region_oldmmio_write_accessor>, mr=0x5555579f3f80, attrs=...) at /home/elmarco/src/qemu/memory.c:506
#3 0x00005555556581eb in memory_region_dispatch_write (mr=0x5555579f3f80, addr=0, data=0, size=4, attrs=...) at /home/elmarco/src/qemu/memory.c:1176
#4 0x000055555560b6f9 in address_space_rw (as=0x555555eff4e0 <address_space_memory>, addr=3759147008, attrs=..., buf=0x7fffffffc1b0 "", len=4, is_write=true) at /home/elmarco/src/qemu/exec.c:2439
#5 0x000055555560baa2 in cpu_physical_memory_rw (addr=3759147008, buf=0x7fffffffc1b0 "", len=4, is_write=1) at /home/elmarco/src/qemu/exec.c:2534
#6 0x000055555564c005 in cpu_physical_memory_write (addr=3759147008, buf=0x7fffffffc1b0, len=4) at /home/elmarco/src/qemu/include/exec/cpu-common.h:80
#7 0x000055555564cd9c in qtest_process_command (chr=0x55555642b890, words=0x5555578de4b0) at /home/elmarco/src/qemu/qtest.c:378
#8 0x000055555564db77 in qtest_process_inbuf (chr=0x55555642b890, inbuf=0x55555641b340) at /home/elmarco/src/qemu/qtest.c:569
#9 0x000055555564dc07 in qtest_read (opaque=0x55555642b890, buf=0x7fffffffc2e0 "writel 0xe0100800 0x0\n", size=22) at /home/elmarco/src/qemu/qtest.c:581
#10 0x000055555574ce3e in qemu_chr_be_write (s=0x55555642b890, buf=0x7fffffffc2e0 "writel 0xe0100800 0x0\n", len=22) at qemu-char.c:306
#11 0x0000555555751263 in tcp_chr_read (chan=0x55555642bcf0, cond=G_IO_IN, opaque=0x55555642b890) at qemu-char.c:2876
#12 0x00007ffff64c9a8a in g_main_context_dispatch (context=0x55555641c400) at gmain.c:3122
(without this patch, this can be reproduced with the ivshmem qtest)
Implement an empty mmio write to avoid the crash.
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static BOOL gdi_Glyph_BeginDraw(rdpContext* context, INT32 x, INT32 y,
INT32 width, INT32 height, UINT32 bgcolor,
UINT32 fgcolor, BOOL fOpRedundant)
{
rdpGdi* gdi;
if (!context || !context->gdi)
return FALSE;
gdi = context->gdi;
if (!gdi->drawing || !gdi->drawing->hdc)
return FALSE;
if (!fOpRedundant)
{
if (!gdi_decode_color(gdi, bgcolor, &bgcolor, NULL))
return FALSE;
if (!gdi_decode_color(gdi, fgcolor, &fgcolor, NULL))
return FALSE;
gdi_SetClipRgn(gdi->drawing->hdc, x, y, width, height);
gdi_SetTextColor(gdi->drawing->hdc, bgcolor);
gdi_SetBkColor(gdi->drawing->hdc, fgcolor);
if (1)
{
GDI_RECT rect = { 0 };
HGDI_BRUSH brush = gdi_CreateSolidBrush(fgcolor);
if (!brush)
return FALSE;
if (x > 0)
rect.left = x;
if (y > 0)
rect.top = y;
rect.right = x + width - 1;
rect.bottom = y + height - 1;
if ((x + width > rect.left) && (y + height > rect.top))
gdi_FillRect(gdi->drawing->hdc, &rect, brush);
gdi_DeleteObject((HGDIOBJECT)brush);
}
return gdi_SetNullClipRgn(gdi->drawing->hdc);
}
return TRUE;
}
|
Safe
|
[
"CWE-190",
"CWE-787"
] |
FreeRDP
|
09b9d4f1994a674c4ec85b4947aa656eda1aed8a
|
3.0785085337060564e+38
| 54 |
Fixed CVE-2018-8787
Thanks to Eyal Itkin from Check Point Software Technologies.
| 0 |
static int oidc_handle_logout(request_rec *r, oidc_cfg *c,
oidc_session_t *session) {
oidc_provider_t *provider = NULL;
/* pickup the command or URL where the user wants to go after logout */
char *url = NULL;
char *error_str = NULL;
char *error_description = NULL;
oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_LOGOUT, &url);
oidc_debug(r, "enter (url=%s)", url);
if (oidc_is_front_channel_logout(url)) {
return oidc_handle_logout_request(r, c, session, url);
} else if (oidc_is_back_channel_logout(url)) {
return oidc_handle_logout_backchannel(r, c);
}
if ((url == NULL) || (apr_strnatcmp(url, "") == 0)) {
url = c->default_slo_url;
} else {
/* do input validation on the logout parameter value */
if (oidc_validate_redirect_url(r, c, url, TRUE, &error_str,
&error_description) == FALSE) {
return oidc_util_html_send_error(r, c->error_template, error_str,
error_description,
HTTP_BAD_REQUEST);
}
}
oidc_get_provider_from_session(r, c, session, &provider);
if ((provider != NULL) && (provider->end_session_endpoint != NULL)) {
const char *id_token_hint = oidc_session_get_idtoken(r, session);
char *logout_request = apr_pstrdup(r->pool,
provider->end_session_endpoint);
if (id_token_hint != NULL) {
logout_request = apr_psprintf(r->pool, "%s%sid_token_hint=%s",
logout_request, strchr(logout_request ? logout_request : "",
OIDC_CHAR_QUERY) != NULL ?
OIDC_STR_AMP :
OIDC_STR_QUERY,
oidc_util_escape_string(r, id_token_hint));
}
if (url != NULL) {
logout_request = apr_psprintf(r->pool,
"%s%spost_logout_redirect_uri=%s", logout_request,
strchr(logout_request ? logout_request : "",
OIDC_CHAR_QUERY) != NULL ?
OIDC_STR_AMP :
OIDC_STR_QUERY,
oidc_util_escape_string(r, url));
}
url = logout_request;
}
return oidc_handle_logout_request(r, c, session, url);
}
|
Safe
|
[
"CWE-79"
] |
mod_auth_openidc
|
55ea0a085290cd2c8cdfdd960a230cbc38ba8b56
|
2.5476373175125845e+38
| 65 |
Add a function to escape Javascript characters
| 0 |
SV* dbd_db_quote(SV *dbh, SV *str, SV *type)
{
dTHX;
SV *result;
if (SvGMAGICAL(str))
mg_get(str);
if (!SvOK(str))
result= newSVpv("NULL", 4);
else
{
char *ptr, *sptr;
STRLEN len;
D_imp_dbh(dbh);
if (type && SvMAGICAL(type))
mg_get(type);
if (type && SvOK(type))
{
int i;
int tp= SvIV(type);
for (i= 0; i < (int)SQL_GET_TYPE_INFO_num; i++)
{
const sql_type_info_t *t= &SQL_GET_TYPE_INFO_values[i];
if (t->data_type == tp)
{
if (!t->literal_prefix)
return Nullsv;
break;
}
}
}
ptr= SvPV(str, len);
result= newSV(len*2+3);
#ifdef SvUTF8
if (SvUTF8(str)) SvUTF8_on(result);
#endif
sptr= SvPVX(result);
*sptr++ = '\'';
sptr+= mysql_real_escape_string(imp_dbh->pmysql, sptr,
ptr, len);
*sptr++= '\'';
SvPOK_on(result);
SvCUR_set(result, sptr - SvPVX(result));
/* Never hurts NUL terminating a Per string */
*sptr++= '\0';
}
return result;
}
|
Safe
|
[
"CWE-416"
] |
DBD-mysql
|
a56ae87a4c1c1fead7d09c3653905841ccccf1cc
|
1.7783172777300163e+38
| 54 |
fix use-after-free crash in RT #97625
| 0 |
DEFUN(accessKey, ACCESSKEY, "Pop up accesskey menu")
{
anchorMn(accesskey_menu, TRUE);
}
|
Safe
|
[
"CWE-59",
"CWE-241"
] |
w3m
|
18dcbadf2771cdb0c18509b14e4e73505b242753
|
1.6943995477168428e+38
| 4 |
Make temporary directory safely when ~/.w3m is unwritable
| 0 |
_nm_singleton_instance_weak_cb(gpointer data, GObject *where_the_object_was)
{
nm_assert(g_slist_find(_singletons, where_the_object_was));
_singletons = g_slist_remove(_singletons, where_the_object_was);
}
|
Safe
|
[
"CWE-20"
] |
NetworkManager
|
420784e342da4883f6debdfe10cde68507b10d27
|
2.8179728751649864e+38
| 6 |
core: fix crash in nm_wildcard_match_check()
It's not entirely clear how to treat %NULL.
Clearly "match.interface-name=eth0" should not
match with an interface %NULL. But what about
"match.interface-name=!eth0"? It's now implemented
that negative matches still succeed against %NULL.
What about "match.interface-name=*"? That probably
should also match with %NULL. So we treat %NULL really
like "".
Against commit 11cd443448bc ('iwd: Don't call IWD methods when device
unmanaged'), we got this backtrace:
#0 0x00007f1c164069f1 in __strnlen_avx2 () at ../sysdeps/x86_64/multiarch/strlen-avx2.S:62
#1 0x00007f1c1637ac9e in __fnmatch (pattern=<optimized out>, string=<optimized out>, string@entry=0x0, flags=flags@entry=0) at fnmatch.c:379
p = 0x0
res = <optimized out>
orig_pattern = <optimized out>
n = <optimized out>
wpattern = 0x7fff8d860730 L"pci-0000:03:00.0"
ps = {__count = 0, __value = {__wch = 0, __wchb = "\000\000\000"}}
wpattern_malloc = 0x0
wstring_malloc = 0x0
wstring = <optimized out>
alloca_used = 80
__PRETTY_FUNCTION__ = "__fnmatch"
#2 0x0000564484a978bf in nm_wildcard_match_check (str=0x0, patterns=<optimized out>, num_patterns=<optimized out>) at src/core/nm-core-utils.c:1959
is_inverted = 0
is_mandatory = 0
match = <optimized out>
p = 0x564486c43fa0 "pci-0000:03:00.0"
has_optional = 0
has_any_optional = 0
i = <optimized out>
#3 0x0000564484bf4797 in check_connection_compatible (self=<optimized out>, connection=<optimized out>, error=0x0) at src/core/devices/nm-device.c:7499
patterns = <optimized out>
device_driver = 0x564486c76bd0 "veth"
num_patterns = 1
priv = 0x564486cbe0b0
__func__ = "check_connection_compatible"
device_iface = <optimized out>
local = 0x564486c99a60
conn_iface = 0x0
klass = <optimized out>
s_match = 0x564486c63df0 [NMSettingMatch]
#4 0x0000564484c38491 in check_connection_compatible (device=0x564486cbe590 [NMDeviceVeth], connection=0x564486c6b160, error=0x0) at src/core/devices/nm-device-ethernet.c:348
self = 0x564486cbe590 [NMDeviceVeth]
s_wired = <optimized out>
Fixes: 3ced486f4162 ('libnm/match: extend syntax for match patterns with '|', '&', '!' and '\\'')
https://bugzilla.redhat.com/show_bug.cgi?id=1942741
| 0 |
virNodeDevPCICapSRIOVPhysicalParseXML(xmlXPathContextPtr ctxt,
virNodeDevCapPCIDevPtr pci_dev)
{
xmlNodePtr address = virXPathNode("./address[1]", ctxt);
pci_dev->physical_function = g_new0(virPCIDeviceAddress, 1);
if (!address) {
virReportError(VIR_ERR_XML_ERROR, "%s",
_("Missing address in 'phys_function' capability"));
return -1;
}
if (virPCIDeviceAddressParseXML(address,
pci_dev->physical_function) < 0)
return -1;
pci_dev->flags |= VIR_NODE_DEV_CAP_FLAG_PCI_PHYSICAL_FUNCTION;
return 0;
}
|
Safe
|
[
"CWE-119"
] |
libvirt
|
4c4d0e2da07b5a035b26a0ff13ec27070f7c7b1a
|
2.49141300212329e+38
| 21 |
conf: Fix segfault when parsing mdev types
Commit f1b0890 introduced a potential crash due to incorrect operator
precedence when accessing an element from a pointer to an array.
Backtrace below:
#0 virNodeDeviceGetMdevTypesCaps (sysfspath=0x7fff801661e0 "/sys/devices/pci0000:00/0000:00:02.0", mdev_types=0x7fff801c9b40, nmdev_types=0x7fff801c9b48) at ../src/conf/node_device_conf.c:2676
#1 0x00007ffff7caf53d in virNodeDeviceGetPCIDynamicCaps (sysfsPath=0x7fff801661e0 "/sys/devices/pci0000:00/0000:00:02.0", pci_dev=0x7fff801c9ac8) at ../src/conf/node_device_conf.c:2705
#2 0x00007ffff7cae38f in virNodeDeviceUpdateCaps (def=0x7fff80168a10) at ../src/conf/node_device_conf.c:2342
#3 0x00007ffff7cb11c0 in virNodeDeviceObjMatch (obj=0x7fff84002e50, flags=0) at ../src/conf/virnodedeviceobj.c:850
#4 0x00007ffff7cb153d in virNodeDeviceObjListExportCallback (payload=0x7fff84002e50, name=0x7fff801cbc20 "pci_0000_00_02_0", opaque=0x7fffe2ffc6a0) at ../src/conf/virnodedeviceobj.c:909
#5 0x00007ffff7b69146 in virHashForEach (table=0x7fff9814b700 = {...}, iter=0x7ffff7cb149e <virNodeDeviceObjListExportCallback>, opaque=0x7fffe2ffc6a0) at ../src/util/virhash.c:394
#6 0x00007ffff7cb1694 in virNodeDeviceObjListExport (conn=0x7fff98013170, devs=0x7fff98154430, devices=0x7fffe2ffc798, filter=0x7ffff7cf47a1 <virConnectListAllNodeDevicesCheckACL>, flags=0)
at ../src/conf/virnodedeviceobj.c:943
#7 0x00007fffe00694b2 in nodeConnectListAllNodeDevices (conn=0x7fff98013170, devices=0x7fffe2ffc798, flags=0) at ../src/node_device/node_device_driver.c:228
#8 0x00007ffff7e703aa in virConnectListAllNodeDevices (conn=0x7fff98013170, devices=0x7fffe2ffc798, flags=0) at ../src/libvirt-nodedev.c:130
#9 0x000055555557f796 in remoteDispatchConnectListAllNodeDevices (server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000, rerr=0x7fffe2ffc8a0, args=0x7fffd4008470, ret=0x7fffd40084e0)
at src/remote/remote_daemon_dispatch_stubs.h:1613
#10 0x000055555557f6f9 in remoteDispatchConnectListAllNodeDevicesHelper (server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000, rerr=0x7fffe2ffc8a0, args=0x7fffd4008470, ret=0x7fffd40084e0)
at src/remote/remote_daemon_dispatch_stubs.h:1591
#11 0x00007ffff7ce9542 in virNetServerProgramDispatchCall (prog=0x555555690c10, server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000) at ../src/rpc/virnetserverprogram.c:428
#12 0x00007ffff7ce90bd in virNetServerProgramDispatch (prog=0x555555690c10, server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000) at ../src/rpc/virnetserverprogram.c:302
#13 0x00007ffff7cf042b in virNetServerProcessMsg (srv=0x555555627080, client=0x5555556bf050, prog=0x555555690c10, msg=0x5555556c0000) at ../src/rpc/virnetserver.c:137
#14 0x00007ffff7cf04eb in virNetServerHandleJob (jobOpaque=0x5555556b66b0, opaque=0x555555627080) at ../src/rpc/virnetserver.c:154
#15 0x00007ffff7bd912f in virThreadPoolWorker (opaque=0x55555562bc70) at ../src/util/virthreadpool.c:163
#16 0x00007ffff7bd8645 in virThreadHelper (data=0x55555562bc90) at ../src/util/virthread.c:233
#17 0x00007ffff6d90432 in start_thread () at /lib64/libpthread.so.0
#18 0x00007ffff75c5913 in clone () at /lib64/libc.so.6
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
Signed-off-by: Ján Tomko <jtomko@redhat.com>
| 0 |
static int submodule_has_commits(const char *path, struct oid_array *commits)
{
int has_commit = 1;
/*
* Perform a cheap, but incorrect check for the existence of 'commits'.
* This is done by adding the submodule's object store to the in-core
* object store, and then querying for each commit's existence. If we
* do not have the commit object anywhere, there is no chance we have
* it in the object store of the correct submodule and have it
* reachable from a ref, so we can fail early without spawning rev-list
* which is expensive.
*/
if (add_submodule_odb(path))
return 0;
oid_array_for_each_unique(commits, check_has_commit, &has_commit);
if (has_commit) {
/*
* Even if the submodule is checked out and the commit is
* present, make sure it exists in the submodule's object store
* and that it is reachable from a ref.
*/
struct child_process cp = CHILD_PROCESS_INIT;
struct strbuf out = STRBUF_INIT;
argv_array_pushl(&cp.args, "rev-list", "-n", "1", NULL);
oid_array_for_each_unique(commits, append_oid_to_argv, &cp.args);
argv_array_pushl(&cp.args, "--not", "--all", NULL);
prepare_submodule_repo_env(&cp.env_array);
cp.git_cmd = 1;
cp.no_stdin = 1;
cp.dir = path;
if (capture_command(&cp, &out, GIT_MAX_HEXSZ + 1) || out.len)
has_commit = 0;
strbuf_release(&out);
}
return has_commit;
}
|
Safe
|
[] |
git
|
a8dee3ca610f5a1d403634492136c887f83b59d2
|
1.898840252742092e+38
| 44 |
Disallow dubiously-nested submodule git directories
Currently it is technically possible to let a submodule's git
directory point right into the git dir of a sibling submodule.
Example: the git directories of two submodules with the names `hippo`
and `hippo/hooks` would be `.git/modules/hippo/` and
`.git/modules/hippo/hooks/`, respectively, but the latter is already
intended to house the former's hooks.
In most cases, this is just confusing, but there is also a (quite
contrived) attack vector where Git can be fooled into mistaking remote
content for file contents it wrote itself during a recursive clone.
Let's plug this bug.
To do so, we introduce the new function `validate_submodule_git_dir()`
which simply verifies that no git dir exists for any leading directories
of the submodule name (if there are any).
Note: this patch specifically continues to allow sibling modules names
of the form `core/lib`, `core/doc`, etc, as long as `core` is not a
submodule name.
This fixes CVE-2019-1387.
Reported-by: Nicolas Joly <Nicolas.Joly@microsoft.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
| 0 |
void Discovered_table_list::sort_desc()
{
tables->sort(cmp_table_names_desc);
}
|
Safe
|
[
"CWE-416"
] |
server
|
af810407f78b7f792a9bb8c47c8c532eb3b3a758
|
1.2163799558790328e+38
| 4 |
MDEV-28098 incorrect key in "dup value" error after long unique
reset errkey after using it, so that it wouldn't affect
the next error message in the next statement
| 0 |
static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
/* Common GRO receive for SIT and IP6IP6 */
if (NAPI_GRO_CB(skb)->encap_mark) {
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
NAPI_GRO_CB(skb)->encap_mark = 1;
return ipv6_gro_receive(head, skb);
}
|
Safe
|
[
"CWE-125"
] |
linux
|
2423496af35d94a87156b063ea5cedffc10a70a1
|
2.175096703912579e+38
| 14 |
ipv6: Prevent overrun when parsing v6 header options
The KASAN warning repoted below was discovered with a syzkaller
program. The reproducer is basically:
int s = socket(AF_INET6, SOCK_RAW, NEXTHDR_HOP);
send(s, &one_byte_of_data, 1, MSG_MORE);
send(s, &more_than_mtu_bytes_data, 2000, 0);
The socket() call sets the nexthdr field of the v6 header to
NEXTHDR_HOP, the first send call primes the payload with a non zero
byte of data, and the second send call triggers the fragmentation path.
The fragmentation code tries to parse the header options in order
to figure out where to insert the fragment option. Since nexthdr points
to an invalid option, the calculation of the size of the network header
can made to be much larger than the linear section of the skb and data
is read outside of it.
This fix makes ip6_find_1stfrag return an error if it detects
running out-of-bounds.
[ 42.361487] ==================================================================
[ 42.364412] BUG: KASAN: slab-out-of-bounds in ip6_fragment+0x11c8/0x3730
[ 42.365471] Read of size 840 at addr ffff88000969e798 by task ip6_fragment-oo/3789
[ 42.366469]
[ 42.366696] CPU: 1 PID: 3789 Comm: ip6_fragment-oo Not tainted 4.11.0+ #41
[ 42.367628] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.1-1ubuntu1 04/01/2014
[ 42.368824] Call Trace:
[ 42.369183] dump_stack+0xb3/0x10b
[ 42.369664] print_address_description+0x73/0x290
[ 42.370325] kasan_report+0x252/0x370
[ 42.370839] ? ip6_fragment+0x11c8/0x3730
[ 42.371396] check_memory_region+0x13c/0x1a0
[ 42.371978] memcpy+0x23/0x50
[ 42.372395] ip6_fragment+0x11c8/0x3730
[ 42.372920] ? nf_ct_expect_unregister_notifier+0x110/0x110
[ 42.373681] ? ip6_copy_metadata+0x7f0/0x7f0
[ 42.374263] ? ip6_forward+0x2e30/0x2e30
[ 42.374803] ip6_finish_output+0x584/0x990
[ 42.375350] ip6_output+0x1b7/0x690
[ 42.375836] ? ip6_finish_output+0x990/0x990
[ 42.376411] ? ip6_fragment+0x3730/0x3730
[ 42.376968] ip6_local_out+0x95/0x160
[ 42.377471] ip6_send_skb+0xa1/0x330
[ 42.377969] ip6_push_pending_frames+0xb3/0xe0
[ 42.378589] rawv6_sendmsg+0x2051/0x2db0
[ 42.379129] ? rawv6_bind+0x8b0/0x8b0
[ 42.379633] ? _copy_from_user+0x84/0xe0
[ 42.380193] ? debug_check_no_locks_freed+0x290/0x290
[ 42.380878] ? ___sys_sendmsg+0x162/0x930
[ 42.381427] ? rcu_read_lock_sched_held+0xa3/0x120
[ 42.382074] ? sock_has_perm+0x1f6/0x290
[ 42.382614] ? ___sys_sendmsg+0x167/0x930
[ 42.383173] ? lock_downgrade+0x660/0x660
[ 42.383727] inet_sendmsg+0x123/0x500
[ 42.384226] ? inet_sendmsg+0x123/0x500
[ 42.384748] ? inet_recvmsg+0x540/0x540
[ 42.385263] sock_sendmsg+0xca/0x110
[ 42.385758] SYSC_sendto+0x217/0x380
[ 42.386249] ? SYSC_connect+0x310/0x310
[ 42.386783] ? __might_fault+0x110/0x1d0
[ 42.387324] ? lock_downgrade+0x660/0x660
[ 42.387880] ? __fget_light+0xa1/0x1f0
[ 42.388403] ? __fdget+0x18/0x20
[ 42.388851] ? sock_common_setsockopt+0x95/0xd0
[ 42.389472] ? SyS_setsockopt+0x17f/0x260
[ 42.390021] ? entry_SYSCALL_64_fastpath+0x5/0xbe
[ 42.390650] SyS_sendto+0x40/0x50
[ 42.391103] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.391731] RIP: 0033:0x7fbbb711e383
[ 42.392217] RSP: 002b:00007ffff4d34f28 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
[ 42.393235] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbbb711e383
[ 42.394195] RDX: 0000000000001000 RSI: 00007ffff4d34f60 RDI: 0000000000000003
[ 42.395145] RBP: 0000000000000046 R08: 00007ffff4d34f40 R09: 0000000000000018
[ 42.396056] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000400aad
[ 42.396598] R13: 0000000000000066 R14: 00007ffff4d34ee0 R15: 00007fbbb717af00
[ 42.397257]
[ 42.397411] Allocated by task 3789:
[ 42.397702] save_stack_trace+0x16/0x20
[ 42.398005] save_stack+0x46/0xd0
[ 42.398267] kasan_kmalloc+0xad/0xe0
[ 42.398548] kasan_slab_alloc+0x12/0x20
[ 42.398848] __kmalloc_node_track_caller+0xcb/0x380
[ 42.399224] __kmalloc_reserve.isra.32+0x41/0xe0
[ 42.399654] __alloc_skb+0xf8/0x580
[ 42.400003] sock_wmalloc+0xab/0xf0
[ 42.400346] __ip6_append_data.isra.41+0x2472/0x33d0
[ 42.400813] ip6_append_data+0x1a8/0x2f0
[ 42.401122] rawv6_sendmsg+0x11ee/0x2db0
[ 42.401505] inet_sendmsg+0x123/0x500
[ 42.401860] sock_sendmsg+0xca/0x110
[ 42.402209] ___sys_sendmsg+0x7cb/0x930
[ 42.402582] __sys_sendmsg+0xd9/0x190
[ 42.402941] SyS_sendmsg+0x2d/0x50
[ 42.403273] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.403718]
[ 42.403871] Freed by task 1794:
[ 42.404146] save_stack_trace+0x16/0x20
[ 42.404515] save_stack+0x46/0xd0
[ 42.404827] kasan_slab_free+0x72/0xc0
[ 42.405167] kfree+0xe8/0x2b0
[ 42.405462] skb_free_head+0x74/0xb0
[ 42.405806] skb_release_data+0x30e/0x3a0
[ 42.406198] skb_release_all+0x4a/0x60
[ 42.406563] consume_skb+0x113/0x2e0
[ 42.406910] skb_free_datagram+0x1a/0xe0
[ 42.407288] netlink_recvmsg+0x60d/0xe40
[ 42.407667] sock_recvmsg+0xd7/0x110
[ 42.408022] ___sys_recvmsg+0x25c/0x580
[ 42.408395] __sys_recvmsg+0xd6/0x190
[ 42.408753] SyS_recvmsg+0x2d/0x50
[ 42.409086] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.409513]
[ 42.409665] The buggy address belongs to the object at ffff88000969e780
[ 42.409665] which belongs to the cache kmalloc-512 of size 512
[ 42.410846] The buggy address is located 24 bytes inside of
[ 42.410846] 512-byte region [ffff88000969e780, ffff88000969e980)
[ 42.411941] The buggy address belongs to the page:
[ 42.412405] page:ffffea000025a780 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0
[ 42.413298] flags: 0x100000000008100(slab|head)
[ 42.413729] raw: 0100000000008100 0000000000000000 0000000000000000 00000001800c000c
[ 42.414387] raw: ffffea00002a9500 0000000900000007 ffff88000c401280 0000000000000000
[ 42.415074] page dumped because: kasan: bad access detected
[ 42.415604]
[ 42.415757] Memory state around the buggy address:
[ 42.416222] ffff88000969e880: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 42.416904] ffff88000969e900: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 42.417591] >ffff88000969e980: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 42.418273] ^
[ 42.418588] ffff88000969ea00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 42.419273] ffff88000969ea80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 42.419882] ==================================================================
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Craig Gallek <kraig@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int vfswrap_statvfs(struct vfs_handle_struct *handle, const char *path, vfs_statvfs_struct *statbuf)
{
return sys_statvfs(path, statbuf);
}
|
Safe
|
[
"CWE-665"
] |
samba
|
30e724cbff1ecd90e5a676831902d1e41ec1b347
|
1.304607454056103e+38
| 4 |
FSCTL_GET_SHADOW_COPY_DATA: Initialize output array to zero
Otherwise num_volumes and the end marker can return uninitialized data
to the client.
Signed-off-by: Christof Schmitt <christof.schmitt@us.ibm.com>
Reviewed-by: Jeremy Allison <jra@samba.org>
Reviewed-by: Simo Sorce <idra@samba.org>
| 0 |
void sqlite3ExprListCheckLength(
Parse *pParse,
ExprList *pEList,
const char *zObject
){
int mx = pParse->db->aLimit[SQLITE_LIMIT_COLUMN];
testcase( pEList && pEList->nExpr==mx );
testcase( pEList && pEList->nExpr==mx+1 );
if( pEList && pEList->nExpr>mx ){
sqlite3ErrorMsg(pParse, "too many columns in %s", zObject);
}
}
|
Safe
|
[
"CWE-476"
] |
sqlite
|
57f7ece78410a8aae86aa4625fb7556897db384c
|
2.9154698446651464e+38
| 12 |
Fix a problem that comes up when using generated columns that evaluate to a
constant in an index and then making use of that index in a join.
FossilOrigin-Name: 8b12e95fec7ce6e0de82a04ca3dfcf1a8e62e233b7382aa28a8a9be6e862b1af
| 0 |
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
LineSegment
dx,
dy,
inverse_slope,
slope,
theta;
MagickBooleanType
closed_path;
double
delta_theta,
dot_product,
mid,
miterlimit;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) ||
(polygon_primitive == (PrimitiveInfo *) NULL))
return((PrimitiveInfo *) NULL);
(void) CopyMagickMemory(polygon_primitive,primitive_info,(size_t)
number_vertices*sizeof(*polygon_primitive));
closed_path=
(primitive_info[number_vertices-1].point.x == primitive_info[0].point.x) &&
(primitive_info[number_vertices-1].point.y == primitive_info[0].point.y) ?
MagickTrue : MagickFalse;
if ((draw_info->linejoin == RoundJoin) ||
((draw_info->linejoin == MiterJoin) && (closed_path != MagickFalse)))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
n=(ssize_t) number_vertices-1L;
slope.p=DrawEpsilonReciprocal(dx.p)*dy.p;
inverse_slope.p=(-1.0*DrawEpsilonReciprocal(slope.p));
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*
mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=DrawEpsilonReciprocal(dx.q)*dy.q;
inverse_slope.q=(-1.0*DrawEpsilonReciprocal(slope.q));
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360))
{
if (~max_strokes < (6*BezierQuantum+360))
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
}
else
{
max_strokes+=6*BezierQuantum+360;
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes,
sizeof(*path_q));
}
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL))
{
if (path_p != (PointInfo *) NULL)
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
if (path_q != (PointInfo *) NULL)
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
}
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=(double) (2.0*MagickPI);
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=(double) (2.0*MagickPI);
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
Safe
|
[
"CWE-399",
"CWE-119"
] |
ImageMagick
|
726812fa2fa7ce16bcf58f6e115f65427a1c0950
|
1.94513313532408e+38
| 403 |
Prevent buffer overflow in magick/draw.c
| 0 |
static bool dce110_validate_surface_sets(
struct dc_state *context)
{
int i, j;
for (i = 0; i < context->stream_count; i++) {
if (context->stream_status[i].plane_count == 0)
continue;
if (context->stream_status[i].plane_count > 2)
return false;
for (j = 0; j < context->stream_status[i].plane_count; j++) {
struct dc_plane_state *plane =
context->stream_status[i].plane_states[j];
/* underlay validation */
if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
if ((plane->src_rect.width > 1920 ||
plane->src_rect.height > 1080))
return false;
/* we don't have the logic to support underlay
* only yet so block the use case where we get
* NV12 plane as top layer
*/
if (j == 0)
return false;
/* irrespective of plane format,
* stream should be RGB encoded
*/
if (context->streams[i]->timing.pixel_encoding
!= PIXEL_ENCODING_RGB)
return false;
}
}
}
return true;
}
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
linux
|
104c307147ad379617472dd91a5bcb368d72bd6d
|
5.003503595247223e+37
| 44 |
drm/amd/display: prevent memory leak
In dcn*_create_resource_pool the allocated memory should be released if
construct pool fails.
Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
| 0 |
TS_RESP *d2i_TS_RESP_bio(BIO *bp, TS_RESP **a)
{
return (TS_RESP *) ASN1_d2i_bio((char *(*)()) TS_RESP_new,
(char *(*)()) d2i_TS_RESP,
bp, (unsigned char **) a);
}
|
Safe
|
[] |
openssl
|
c7235be6e36c4bef84594aa3b2f0561db84b63d8
|
2.9608666678922815e+38
| 6 |
RFC 3161 compliant time stamp request creation, response generation
and response verification.
Submitted by: Zoltan Glozik <zglozik@opentsa.org>
Reviewed by: Ulf Moeller
| 0 |
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
MagickRealType
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (MagickRealType *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType),
MagickRealTypeCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(MagickRealType *) RelinquishMagickMemory(
quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
|
Safe
|
[
"CWE-125"
] |
ImageMagick6
|
e2a21735e3a3f3930bd431585ec36334c4c2eb77
|
1.4103458344466706e+38
| 50 |
https://github.com/ImageMagick/ImageMagick/issues/1540
| 0 |
isdn_ppp_dial_slave(char *name)
{
#ifdef CONFIG_ISDN_MPP
isdn_net_dev *ndev;
isdn_net_local *lp;
struct net_device *sdev;
if (!(ndev = isdn_net_findif(name)))
return 1;
lp = ndev->local;
if (!(lp->flags & ISDN_NET_CONNECTED))
return 5;
sdev = lp->slave;
while (sdev) {
isdn_net_local *mlp = netdev_priv(sdev);
if (!(mlp->flags & ISDN_NET_CONNECTED))
break;
sdev = mlp->slave;
}
if (!sdev)
return 2;
isdn_net_dial_req(netdev_priv(sdev));
return 0;
#else
return -1;
#endif
}
|
Safe
|
[] |
linux
|
4ab42d78e37a294ac7bc56901d563c642e03c4ae
|
8.461697930617442e+37
| 29 |
ppp, slip: Validate VJ compression slot parameters completely
Currently slhc_init() treats out-of-range values of rslots and tslots
as equivalent to 0, except that if tslots is too large it will
dereference a null pointer (CVE-2015-7799).
Add a range-check at the top of the function and make it return an
ERR_PTR() on error instead of NULL. Change the callers accordingly.
Compile-tested only.
Reported-by: 郭永刚 <guoyonggang@360.cn>
References: http://article.gmane.org/gmane.comp.security.oss.general/17908
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void mqtt_cb(struct mg_connection *c, int ev, void *ev_data,
void *fn_data) {
if (ev == MG_EV_READ) {
for (;;) {
struct mg_mqtt_message mm;
int rc = mg_mqtt_parse(c->recv.buf, c->recv.len, &mm);
if (rc == MQTT_MALFORMED) {
LOG(LL_ERROR, ("%lu MQTT malformed message", c->id));
c->is_closing = 1;
break;
} else if (rc == MQTT_OK) {
LOG(LL_VERBOSE_DEBUG,
("%p MQTT CMD %d len %d [%.*s]", c->fd, mm.cmd, (int) mm.dgram.len,
(int) mm.data.len, mm.data.ptr));
switch (mm.cmd) {
case MQTT_CMD_CONNACK:
mg_call(c, MG_EV_MQTT_OPEN, &mm.ack);
if (mm.ack == 0) {
LOG(LL_INFO, ("%lu Connected", c->id));
} else {
LOG(LL_ERROR, ("%lu MQTT auth failed, code %d", c->id, mm.ack));
c->is_closing = 1;
}
break;
case MQTT_CMD_PUBLISH: {
LOG(LL_DEBUG, ("%lu [%.*s] -> [%.*s]", c->id, (int) mm.topic.len,
mm.topic.ptr, (int) mm.data.len, mm.data.ptr));
mg_call(c, MG_EV_MQTT_MSG, &mm);
break;
}
}
mg_call(c, MG_EV_MQTT_CMD, &mm);
mg_iobuf_del(&c->recv, 0, mm.dgram.len);
} else {
break;
}
}
}
(void) ev_data;
(void) fn_data;
}
|
Safe
|
[
"CWE-552"
] |
mongoose
|
c65c8fdaaa257e0487ab0aaae9e8f6b439335945
|
3.339833896253595e+38
| 41 |
Protect against the directory traversal in mg_upload()
| 0 |
ga_grow(garray_T *gap, int n)
{
if (gap->ga_maxlen - gap->ga_len < n)
return ga_grow_inner(gap, n);
return OK;
}
|
Safe
|
[
"CWE-416"
] |
vim
|
9f1a39a5d1cd7989ada2d1cb32f97d84360e050f
|
3.5291964814208935e+37
| 6 |
patch 8.2.4040: keeping track of allocated lines is too complicated
Problem: Keeping track of allocated lines in user functions is too
complicated.
Solution: Instead of freeing individual lines keep them all until the end.
| 0 |
static inline void native_load_gdt(const struct desc_ptr *dtr)
{
asm volatile("lgdt %0"::"m" (*dtr));
}
|
Safe
|
[
"CWE-119"
] |
linux-2.6
|
5ac37f87ff18843aabab84cf75b2f8504c2d81fe
|
3.3762003998424696e+38
| 4 |
x86: fix ldt limit for 64 bit
Fix size of LDT entries. On x86-64, ldt_desc is a double-sized descriptor.
Signed-off-by: Michael Karcher <kernel@mkarcher.dialup.fu-berlin.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| 0 |
skip_sfx(struct archive_read *a)
{
const void *h;
const char *p, *q;
size_t skip, total;
ssize_t bytes, window;
total = 0;
window = 4096;
while (total + window <= (1024 * 128)) {
h = __archive_read_ahead(a, window, &bytes);
if (h == NULL) {
/* Remaining bytes are less than window. */
window >>= 1;
if (window < 0x40)
goto fatal;
continue;
}
if (bytes < 0x40)
goto fatal;
p = h;
q = p + bytes;
/*
* Scan ahead until we find something that looks
* like the RAR header.
*/
while (p + 7 < q) {
if (memcmp(p, RAR_SIGNATURE, 7) == 0) {
skip = p - (const char *)h;
__archive_read_consume(a, skip);
return (ARCHIVE_OK);
}
p += 0x10;
}
skip = p - (const char *)h;
__archive_read_consume(a, skip);
total += skip;
}
fatal:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Couldn't find out RAR header");
return (ARCHIVE_FATAL);
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
libarchive
|
05caadc7eedbef471ac9610809ba683f0c698700
|
2.3417573649543265e+38
| 44 |
Issue 719: Fix for TALOS-CAN-154
A RAR file with an invalid zero dictionary size was not being
rejected, leading to a zero-sized allocation for the dictionary
storage which was then overwritten during the dictionary initialization.
Thanks to the Open Source and Threat Intelligence project at Cisco for
reporting this.
| 0 |
static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
{
struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
struct dst_entry *dst = &xdst->u.dst;
if (xdst->route == NULL) {
/* Dummy bundle - if it has xfrms we were not
* able to build bundle as template resolution failed.
* It means we need to try again resolving. */
if (xdst->num_xfrms > 0)
return NULL;
} else if (dst->flags & DST_XFRM_QUEUE) {
return NULL;
} else {
/* Real bundle */
if (stale_bundle(dst))
return NULL;
}
dst_hold(dst);
return flo;
}
|
Safe
|
[
"CWE-125"
] |
ipsec
|
7bab09631c2a303f87a7eb7e3d69e888673b9b7e
|
5.6185377674835715e+37
| 22 |
xfrm: policy: check policy direction value
The 'dir' parameter in xfrm_migrate() is a user-controlled byte which is used
as an array index. This can lead to an out-of-bound access, kernel lockup and
DoS. Add a check for the 'dir' value.
This fixes CVE-2017-11600.
References: https://bugzilla.redhat.com/show_bug.cgi?id=1474928
Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)")
Cc: <stable@vger.kernel.org> # v2.6.21-rc1
Reported-by: "bo Zhang" <zhangbo5891001@gmail.com>
Signed-off-by: Vladis Dronov <vdronov@redhat.com>
Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
| 0 |
generate_pac(kdc_request_t r, Key *skey)
{
krb5_error_code ret;
krb5_pac p = NULL;
krb5_data data;
ret = _kdc_pac_generate(r->context, r->client, &p);
if (ret) {
_kdc_r_log(r, 0, "PAC generation failed for -- %s",
r->client_name);
return ret;
}
if (p == NULL)
return 0;
ret = _krb5_pac_sign(r->context, p, r->et.authtime,
r->client->entry.principal,
&skey->key, /* Server key */
&skey->key, /* FIXME: should be krbtgt key */
&data);
krb5_pac_free(r->context, p);
if (ret) {
_kdc_r_log(r, 0, "PAC signing failed for -- %s",
r->client_name);
return ret;
}
ret = _kdc_tkt_add_if_relevant_ad(r->context, &r->et,
KRB5_AUTHDATA_WIN2K_PAC,
&data);
krb5_data_free(&data);
return ret;
}
|
Safe
|
[
"CWE-476"
] |
heimdal
|
1a6a6e462dc2ac6111f9e02c6852ddec4849b887
|
2.367343385355025e+38
| 34 |
Security: Avoid NULL structure pointer member dereference
This can happen in the error path when processing malformed AS
requests with a NULL client name. Bug originally introduced on
Fri Feb 13 09:26:01 2015 +0100 in commit:
a873e21d7c06f22943a90a41dc733ae76799390d
kdc: base _kdc_fast_mk_error() on krb5_mk_error_ext()
Original patch by Jeffrey Altman <jaltman@secure-endpoints.com>
| 0 |
static void _php_curl_close(zend_resource *rsrc)
{
php_curl *ch = (php_curl *) rsrc->ptr;
_php_curl_close_ex(ch);
}
|
Safe
|
[] |
php-src
|
124fb22a13fafa3648e4e15b4f207c7096d8155e
|
1.2407060011397133e+38
| 5 |
Fixed bug #68739 #68740 #68741
| 0 |
relay_websocket_send_http (struct t_relay_client *client,
const char *http)
{
char *message;
int length;
length = 32 + strlen (http) + 1;
message = malloc (length);
if (message)
{
snprintf (message, length, "HTTP/1.1 %s\r\n\r\n", http);
relay_client_send (client, RELAY_CLIENT_MSG_STANDARD,
message, strlen (message), NULL);
free (message);
}
}
|
Safe
|
[
"CWE-125"
] |
weechat
|
8b1331f98de1714bae15a9ca2e2b393ba49d735b
|
3.2180756225567106e+36
| 16 |
relay: fix crash when decoding a malformed websocket frame
| 0 |
uint32 pack_length() const { return Type_handler_datetime::hires_bytes(dec); }
|
Safe
|
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
|
1.2143867750102402e+38
| 1 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <serg@mariadb.org>
| 0 |
vte_sequence_handler_ts (VteTerminal *terminal, GValueArray *params)
{
terminal->pvt->screen->status_line = TRUE;
terminal->pvt->screen->status_line_changed = TRUE;
g_string_truncate(terminal->pvt->screen->status_line_contents, 0);
}
|
Safe
|
[] |
vte
|
58bc3a942f198a1a8788553ca72c19d7c1702b74
|
9.342963439227367e+37
| 6 |
fix bug #548272
svn path=/trunk/; revision=2365
| 0 |
static int nft_fwd_netdev_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_fwd_netdev *priv = nft_expr_priv(expr);
int oif = ctx->regs[priv->sreg_dev].data.data[0];
return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif);
}
|
Safe
|
[
"CWE-269"
] |
nf
|
b1a5983f56e371046dcf164f90bfaf704d2b89f6
|
1.0609942286502013e+38
| 9 |
netfilter: nf_tables_offload: incorrect flow offload action array size
immediate verdict expression needs to allocate one slot in the flow offload
action array, however, immediate data expression does not need to do so.
fwd and dup expression need to allocate one slot, this is missing.
Add a new offload_action interface to report if this expression needs to
allocate one slot in the flow offload action array.
Fixes: be2861dc36d7 ("netfilter: nft_{fwd,dup}_netdev: add offload support")
Reported-and-tested-by: Nick Gregory <Nick.Gregory@Sophos.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
| 0 |
void set_pose_yaw(float pose_yaw) { pose_yaw_ = pose_yaw; }
|
Safe
|
[
"CWE-20"
] |
libvpx
|
f00890eecdf8365ea125ac16769a83aa6b68792d
|
2.392938787714131e+38
| 1 |
update libwebm to libwebm-1.0.0.27-352-g6ab9fcf
https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf
Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d
| 0 |
void emplace_attr(std::string&& key, buffer::list&& bl) {
attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
}
|
Safe
|
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
|
1.6135939908280045e+38
| 3 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <joao@suse.de>
Signed-off-by: Abhishek Lekshmanan <abhishek@suse.com>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
| 0 |
_copyInsertStmt(const InsertStmt *from)
{
InsertStmt *newnode = makeNode(InsertStmt);
COPY_NODE_FIELD(relation);
COPY_NODE_FIELD(cols);
COPY_NODE_FIELD(selectStmt);
COPY_NODE_FIELD(returningList);
COPY_NODE_FIELD(withClause);
return newnode;
}
|
Safe
|
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
|
2.278839050596092e+37
| 12 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
| 0 |
bit_read_BE (Bit_Chain *restrict dat, double *restrict x, double *restrict y,
double *restrict z)
{
if (dat->version >= R_2000 && bit_read_B (dat))
{
*x = 0.0;
*y = 0.0;
*z = 1.0;
}
else
{
*x = bit_read_BD (dat);
*y = bit_read_BD (dat);
*z = bit_read_BD (dat);
}
}
|
Safe
|
[
"CWE-703",
"CWE-125"
] |
libredwg
|
95cc9300430d35feb05b06a9badf678419463dbe
|
3.829815249993916e+37
| 16 |
encode: protect from stack under-flow
From GH #178 fuzzing
| 0 |
static void Fill(const Eigen::half* data, size_t n, TensorProto* proto) {
proto->mutable_half_val()->Reserve(n);
for (size_t i = 0; i < n; ++i) {
proto->mutable_half_val()->AddAlreadyReserved(
Eigen::numext::bit_cast<uint16>(data[i]));
}
}
|
Safe
|
[
"CWE-345"
] |
tensorflow
|
abcced051cb1bd8fb05046ac3b6023a7ebcc4578
|
3.2270336426833517e+38
| 7 |
Prevent crashes when loading tensor slices with unsupported types.
Also fix the `Tensor(const TensorShape&)` constructor swapping the LOG(FATAL)
messages for the unset and unsupported types.
PiperOrigin-RevId: 392695027
Change-Id: I4beda7db950db951d273e3259a7c8534ece49354
| 0 |
static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
{
return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL;
}
|
Safe
|
[
"CWE-119"
] |
net
|
5294b83086cc1c35b4efeca03644cf9d12282e5b
|
7.922979619981016e+37
| 4 |
macsec: dynamically allocate space for sglist
We call skb_cow_data, which is good anyway to ensure we can actually
modify the skb as such (another error from prior). Now that we have the
number of fragments required, we can safely allocate exactly that amount
of memory.
Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver")
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Acked-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int proc_pid_syscall(struct task_struct *task, char *buffer)
{
long nr;
unsigned long args[6], sp, pc;
int res = lock_trace(task);
if (res)
return res;
if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
res = sprintf(buffer, "running\n");
else if (nr < 0)
res = sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
else
res = sprintf(buffer,
"%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
nr,
args[0], args[1], args[2], args[3], args[4], args[5],
sp, pc);
unlock_trace(task);
return res;
}
|
Safe
|
[] |
linux
|
0499680a42141d86417a8fbaa8c8db806bea1201
|
1.4180821343759608e+38
| 21 |
procfs: add hidepid= and gid= mount options
Add support for mount options to restrict access to /proc/PID/
directories. The default backward-compatible "relaxed" behaviour is left
untouched.
The first mount option is called "hidepid" and its value defines how much
info about processes we want to be available for non-owners:
hidepid=0 (default) means the old behavior - anybody may read all
world-readable /proc/PID/* files.
hidepid=1 means users may not access any /proc/<pid>/ directories, but
their own. Sensitive files like cmdline, sched*, status are now protected
against other users. As permission checking done in proc_pid_permission()
and files' permissions are left untouched, programs expecting specific
files' modes are not confused.
hidepid=2 means hidepid=1 plus all /proc/PID/ will be invisible to other
users. It doesn't mean that it hides whether a process exists (it can be
learned by other means, e.g. by kill -0 $PID), but it hides process' euid
and egid. It compicates intruder's task of gathering info about running
processes, whether some daemon runs with elevated privileges, whether
another user runs some sensitive program, whether other users run any
program at all, etc.
gid=XXX defines a group that will be able to gather all processes' info
(as in hidepid=0 mode). This group should be used instead of putting
nonroot user in sudoers file or something. However, untrusted users (like
daemons, etc.) which are not supposed to monitor the tasks in the whole
system should not be added to the group.
hidepid=1 or higher is designed to restrict access to procfs files, which
might reveal some sensitive private information like precise keystrokes
timings:
http://www.openwall.com/lists/oss-security/2011/11/05/3
hidepid=1/2 doesn't break monitoring userspace tools. ps, top, pgrep, and
conky gracefully handle EPERM/ENOENT and behave as if the current user is
the only user running processes. pstree shows the process subtree which
contains "pstree" process.
Note: the patch doesn't deal with setuid/setgid issues of keeping
preopened descriptors of procfs files (like
https://lkml.org/lkml/2011/2/7/368). We rely on that the leaked
information like the scheduling counters of setuid apps doesn't threaten
anybody's privacy - only the user started the setuid program may read the
counters.
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Randy Dunlap <rdunlap@xenotime.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Greg KH <greg@kroah.com>
Cc: Theodore Tso <tytso@MIT.EDU>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: James Morris <jmorris@namei.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
rb_str_sub_bang(argc, argv, str)
int argc;
VALUE *argv;
VALUE str;
{
VALUE pat, repl, match;
struct re_registers *regs;
int iter = 0;
int tainted = 0;
long plen;
if (argc == 1 && rb_block_given_p()) {
iter = 1;
}
else if (argc == 2) {
repl = argv[1];
StringValue(repl);
if (OBJ_TAINTED(repl)) tainted = 1;
}
else {
rb_raise(rb_eArgError, "wrong number of arguments (%d for 2)", argc);
}
pat = get_pat(argv[0], 1);
if (rb_reg_search(pat, str, 0, 0) >= 0) {
match = rb_backref_get();
regs = RMATCH(match)->regs;
if (iter) {
char *p = RSTRING(str)->ptr; long len = RSTRING(str)->len;
rb_match_busy(match);
repl = rb_obj_as_string(rb_yield(rb_reg_nth_match(0, match)));
str_mod_check(str, p, len);
str_frozen_check(str);
rb_backref_set(match);
}
else {
repl = rb_reg_regsub(repl, str, regs);
}
rb_str_modify(str);
if (OBJ_TAINTED(repl)) tainted = 1;
plen = END(0) - BEG(0);
if (RSTRING(repl)->len > plen) {
RESIZE_CAPA(str, RSTRING(str)->len + RSTRING(repl)->len - plen);
}
if (RSTRING(repl)->len != plen) {
memmove(RSTRING(str)->ptr + BEG(0) + RSTRING(repl)->len,
RSTRING(str)->ptr + BEG(0) + plen,
RSTRING(str)->len - BEG(0) - plen);
}
memcpy(RSTRING(str)->ptr + BEG(0),
RSTRING(repl)->ptr, RSTRING(repl)->len);
RSTRING(str)->len += RSTRING(repl)->len - plen;
RSTRING(str)->ptr[RSTRING(str)->len] = '\0';
if (tainted) OBJ_TAINT(str);
return str;
}
return Qnil;
}
|
Safe
|
[
"CWE-20"
] |
ruby
|
e926ef5233cc9f1035d3d51068abe9df8b5429da
|
1.859245825249349e+38
| 61 |
* random.c (rb_genrand_int32, rb_genrand_real), intern.h: Export.
* string.c (rb_str_tmp_new), intern.h: New function.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_8@16014 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
| 0 |
static bool change_password(const char *remote_machine, const char *user_name,
const char *old_passwd, const char *new_passwd,
int local_flags)
{
NTSTATUS ret;
char *err_str = NULL;
char *msg_str = NULL;
if (demo_mode) {
printf("%s\n<p>", _("password change in demo mode rejected"));
return False;
}
if (remote_machine != NULL) {
ret = remote_password_change(remote_machine, user_name,
old_passwd, new_passwd, &err_str);
if (err_str != NULL)
printf("%s\n<p>", err_str);
SAFE_FREE(err_str);
return NT_STATUS_IS_OK(ret);
}
if(!initialize_password_db(True, NULL)) {
printf("%s\n<p>", _("Can't setup password database vectors."));
return False;
}
ret = local_password_change(user_name, local_flags, new_passwd,
&err_str, &msg_str);
if(msg_str)
printf("%s\n<p>", msg_str);
if(err_str)
printf("%s\n<p>", err_str);
SAFE_FREE(msg_str);
SAFE_FREE(err_str);
return NT_STATUS_IS_OK(ret);
}
|
Safe
|
[] |
samba
|
71225948a249f079120282740fcc39fd6faa880e
|
3.134631882646822e+38
| 39 |
swat: Use X-Frame-Options header to avoid clickjacking
Jann Horn reported a potential clickjacking vulnerability in SWAT where
the SWAT page could be embedded into an attacker's page using a frame or
iframe and then used to trick the user to change Samba settings.
Avoid this by telling the browser to refuse the frame embedding via the
X-Frame-Options: DENY header.
Signed-off-by: Kai Blin <kai@samba.org>
Fix bug #9576 - CVE-2013-0213: Clickjacking issue in SWAT.
| 0 |
dp_packet_reset_cutlen(struct dp_packet *b)
{
b->cutlen = 0;
}
|
Safe
|
[
"CWE-400"
] |
ovs
|
79349cbab0b2a755140eedb91833ad2760520a83
|
9.62515685568837e+37
| 4 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <joakim.hindersson@elastx.se>
Acked-by: Ilya Maximets <i.maximets@ovn.org>
Signed-off-by: Flavio Leitner <fbl@sysclose.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
| 0 |
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
int rc;
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
goto check_asce_limit;
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
if (filp || (flags & MAP_SHARED))
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
else
info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
if (addr & ~PAGE_MASK)
return addr;
check_asce_limit:
if (addr + len > current->mm->context.asce_limit) {
rc = crst_table_upgrade(mm);
if (rc)
return (unsigned long) rc;
}
return addr;
}
|
Safe
|
[
"CWE-119"
] |
linux
|
1be7107fbe18eed3e319a6c3e83c78254b693acb
|
1.2259602225814899e+38
| 44 |
mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <oleg@redhat.com>
Original-patch-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Tested-by: Helge Deller <deller@gmx.de> # parisc
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
ClientHttpRequest::httpStart()
{
PROF_start(httpStart);
logType = LOG_TAG_NONE;
debugs(85, 4, logType.c_str() << " for '" << uri << "'");
/* no one should have touched this */
assert(out.offset == 0);
/* Use the Stream Luke */
clientStreamNode *node = (clientStreamNode *)client_stream.tail->data;
clientStreamRead(node, this, node->readBuffer);
PROF_stop(httpStart);
}
|
Safe
|
[
"CWE-116"
] |
squid
|
e7cf864f938f24eea8af0692c04d16790983c823
|
1.3296459721613068e+38
| 13 |
Handle more Range requests (#790)
Also removed some effectively unused code.
| 0 |
static void filter181(int16_t *data, int width, int height, ptrdiff_t stride)
{
int x, y;
/* horizontal filter */
for (y = 1; y < height - 1; y++) {
int prev_dc = data[0 + y * stride];
for (x = 1; x < width - 1; x++) {
int dc;
dc = -prev_dc +
data[x + y * stride] * 8 -
data[x + 1 + y * stride];
dc = (av_clip(dc, INT_MIN/10923, INT_MAX/10923 - 32768) * 10923 + 32768) >> 16;
prev_dc = data[x + y * stride];
data[x + y * stride] = dc;
}
}
/* vertical filter */
for (x = 1; x < width - 1; x++) {
int prev_dc = data[x];
for (y = 1; y < height - 1; y++) {
int dc;
dc = -prev_dc +
data[x + y * stride] * 8 -
data[x + (y + 1) * stride];
dc = (av_clip(dc, INT_MIN/10923, INT_MAX/10923 - 32768) * 10923 + 32768) >> 16;
prev_dc = data[x + y * stride];
data[x + y * stride] = dc;
}
}
}
|
Safe
|
[
"CWE-20",
"CWE-617"
] |
FFmpeg
|
bd27a9364ca274ca97f1df6d984e88a0700fb235
|
1.787743496922536e+38
| 35 |
avcodec/mpeg4videodec: Remove use of FF_PROFILE_MPEG4_SIMPLE_STUDIO as indicator of studio profile
The profile field is changed by code inside and outside the decoder,
its not a reliable indicator of the internal codec state.
Maintaining it consistency with studio_profile is messy.
Its easier to just avoid it and use only studio_profile
Fixes: assertion failure
Fixes: ffmpeg_crash_9.avi
Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
| 0 |
mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
{
u32 winner = 0;
int ret = 0;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (mwifiex_read_reg(adapter, reg->fw_status, &winner)) {
ret = -1;
} else if (!winner) {
mwifiex_dbg(adapter, INFO, "PCI-E is the winner\n");
adapter->winner = 1;
} else {
mwifiex_dbg(adapter, ERROR,
"PCI-E is not the winner <%#x>", winner);
}
return ret;
}
|
Safe
|
[
"CWE-400",
"CWE-200",
"CWE-401"
] |
linux
|
d10dcb615c8e29d403a24d35f8310a7a53e3050c
|
2.730836657401263e+38
| 19 |
mwifiex: pcie: Fix memory leak in mwifiex_pcie_init_evt_ring
In mwifiex_pcie_init_evt_ring, a new skb is allocated which should be
released if mwifiex_map_pci_memory() fails. The release for skb and
card->evtbd_ring_vbase is added.
Fixes: 0732484b47b5 ("mwifiex: separate ring initialization and ring creation routines")
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Acked-by: Ganapathi Bhat <gbhat@marvell.com>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
| 0 |
static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
u64 chunk_offset, u64 devid,
u64 physical_offset, u64 physical_len)
{
struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
struct extent_map *em;
struct map_lookup *map;
struct btrfs_device *dev;
u64 stripe_len;
bool found = false;
int ret = 0;
int i;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_offset, 1);
read_unlock(&em_tree->lock);
if (!em) {
btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
physical_offset, devid);
ret = -EUCLEAN;
goto out;
}
map = em->map_lookup;
stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
if (physical_len != stripe_len) {
btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
physical_offset, devid, em->start, physical_len,
stripe_len);
ret = -EUCLEAN;
goto out;
}
for (i = 0; i < map->num_stripes; i++) {
if (map->stripes[i].dev->devid == devid &&
map->stripes[i].physical == physical_offset) {
found = true;
if (map->verified_stripes >= map->num_stripes) {
btrfs_err(fs_info,
"too many dev extents for chunk %llu found",
em->start);
ret = -EUCLEAN;
goto out;
}
map->verified_stripes++;
break;
}
}
if (!found) {
btrfs_err(fs_info,
"dev extent physical offset %llu devid %llu has no corresponding chunk",
physical_offset, devid);
ret = -EUCLEAN;
}
/* Make sure no dev extent is beyond device bondary */
dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
if (!dev) {
btrfs_err(fs_info, "failed to find devid %llu", devid);
ret = -EUCLEAN;
goto out;
}
/* It's possible this device is a dummy for seed device */
if (dev->disk_total_bytes == 0) {
dev = find_device(fs_info->fs_devices->seed, devid, NULL);
if (!dev) {
btrfs_err(fs_info, "failed to find seed devid %llu",
devid);
ret = -EUCLEAN;
goto out;
}
}
if (physical_offset + physical_len > dev->disk_total_bytes) {
btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
devid, physical_offset, physical_len,
dev->disk_total_bytes);
ret = -EUCLEAN;
goto out;
}
out:
free_extent_map(em);
return ret;
}
|
Vulnerable
|
[
"CWE-476",
"CWE-284"
] |
linux
|
09ba3bc9dd150457c506e4661380a6183af651c1
|
3.2943696453684054e+38
| 89 |
btrfs: merge btrfs_find_device and find_device
Both btrfs_find_device() and find_device() does the same thing except
that the latter does not take the seed device onto account in the device
scanning context. We can merge them.
Signed-off-by: Anand Jain <anand.jain@oracle.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
| 1 |
ftrace_allocate_pages(unsigned long num_to_init)
{
struct ftrace_page *start_pg;
struct ftrace_page *pg;
int order;
int cnt;
if (!num_to_init)
return 0;
start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
if (!pg)
return NULL;
/*
* Try to allocate as much as possible in one continues
* location that fills in all of the space. We want to
* waste as little space as possible.
*/
for (;;) {
cnt = ftrace_allocate_records(pg, num_to_init);
if (cnt < 0)
goto free_pages;
num_to_init -= cnt;
if (!num_to_init)
break;
pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
if (!pg->next)
goto free_pages;
pg = pg->next;
}
return start_pg;
free_pages:
while (start_pg) {
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
free_pages((unsigned long)pg->records, order);
start_pg = pg->next;
kfree(pg);
pg = start_pg;
}
pr_info("ftrace: FAILED to allocate memory for functions\n");
return NULL;
}
|
Safe
|
[
"CWE-703"
] |
linux
|
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
|
1.734547682722656e+38
| 48 |
tracing: Fix possible NULL pointer dereferences
Currently set_ftrace_pid and set_graph_function files use seq_lseek
for their fops. However seq_open() is called only for FMODE_READ in
the fops->open() so that if an user tries to seek one of those file
when she open it for writing, it sees NULL seq_file and then panic.
It can be easily reproduced with following command:
$ cd /sys/kernel/debug/tracing
$ echo 1234 | sudo tee -a set_ftrace_pid
In this example, GNU coreutils' tee opens the file with fopen(, "a")
and then the fopen() internally calls lseek().
Link: http://lkml.kernel.org/r/1365663302-2170-1-git-send-email-namhyung@kernel.org
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Namhyung Kim <namhyung.kim@lge.com>
Cc: stable@vger.kernel.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
| 0 |
TfLiteStatus AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
TfLitePoolParams* params, OpData* data,
const TfLiteTensor* input,
TfLiteTensor* output) {
int32_t activation_min;
int32_t activation_max;
(void)CalculateActivationRangeQuantized(context, params->activation, output,
&activation_min, &activation_max);
#define TF_LITE_AVERAGE_POOL(type) \
tflite::PoolParams op_params; \
op_params.stride_height = params->stride_height; \
op_params.stride_width = params->stride_width; \
op_params.filter_height = params->filter_height; \
op_params.filter_width = params->filter_width; \
op_params.padding_values.height = data->padding.height; \
op_params.padding_values.width = data->padding.width; \
op_params.quantized_activation_min = activation_min; \
op_params.quantized_activation_max = activation_max; \
TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \
GetTensorData<int8_t>(input), \
GetTensorShape(output), \
GetTensorData<int8_t>(output)))
if (kernel_type == kReference) {
TF_LITE_AVERAGE_POOL(reference_integer_ops);
} else {
TF_LITE_AVERAGE_POOL(optimized_integer_ops);
}
#undef TF_LITE_AVERAGE_POOL
return kTfLiteOk;
}
|
Safe
|
[
"CWE-703",
"CWE-835"
] |
tensorflow
|
dfa22b348b70bb89d6d6ec0ff53973bacb4f4695
|
1.9039624487323368e+38
| 31 |
Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3
| 0 |
pcx24b_print_page(gx_device_printer * pdev, gp_file * file)
{
pcx_header header;
header = pcx_header_prototype;
header.version = version_3_0;
header.bpp = 8;
header.nplanes = 3;
assign_ushort(header.palinfo, palinfo_color);
return pcx_write_page(pdev, file, &header, true);
}
|
Safe
|
[
"CWE-787"
] |
ghostpdl
|
2793769ff107d8d22dadd30c6e68cd781b569550
|
1.913798770821855e+38
| 11 |
Bug 701819: fixed ordering in if expression to avoid out-of-bounds access.
Fixes:
./sanbin/gs -dBATCH -dNOPAUSE -r965 -sOutputFile=tmp -sDEVICE=pcx16 ../bug-701819.pdf
| 0 |
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
{
return sbi->total_valid_block_count;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
4969c06a0d83c9c3dc50b8efcdc8eeedfce896f6
|
9.287223053437628e+37
| 4 |
f2fs: support swap file w/ DIO
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
| 0 |
get_policy_2_svc(gpol_arg *arg, struct svc_req *rqstp)
{
static gpol_ret ret;
kadm5_ret_t ret2;
char *prime_arg, *funcname;
gss_buffer_desc client_name,
service_name;
OM_uint32 minor_stat;
kadm5_principal_ent_rec caller_ent;
kadm5_server_handle_t handle;
const char *errmsg = NULL;
xdr_free(xdr_gpol_ret, &ret);
if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle)))
goto exit_func;
if ((ret.code = check_handle((void *)handle)))
goto exit_func;
ret.api_version = handle->api_version;
funcname = "kadm5_get_policy";
if (setup_gss_names(rqstp, &client_name, &service_name) < 0) {
ret.code = KADM5_FAILURE;
goto exit_func;
}
prime_arg = arg->name;
ret.code = KADM5_AUTH_GET;
if (!CHANGEPW_SERVICE(rqstp) && kadm5int_acl_check(handle->context,
rqst2name(rqstp),
ACL_INQUIRE, NULL, NULL))
ret.code = KADM5_OK;
else {
ret.code = kadm5_get_principal(handle->lhandle,
handle->current_caller,
&caller_ent,
KADM5_PRINCIPAL_NORMAL_MASK);
if (ret.code == KADM5_OK) {
if (caller_ent.aux_attributes & KADM5_POLICY &&
strcmp(caller_ent.policy, arg->name) == 0) {
ret.code = KADM5_OK;
} else ret.code = KADM5_AUTH_GET;
ret2 = kadm5_free_principal_ent(handle->lhandle,
&caller_ent);
ret.code = ret.code ? ret.code : ret2;
}
}
if (ret.code == KADM5_OK) {
ret.code = kadm5_get_policy(handle, arg->name, &ret.rec);
if( ret.code != 0 )
errmsg = krb5_get_error_message(handle->context, ret.code);
log_done(funcname,
((prime_arg == NULL) ? "(null)" : prime_arg), errmsg,
&client_name, &service_name, rqstp);
if (errmsg != NULL)
krb5_free_error_message(handle->context, errmsg);
} else {
log_unauth(funcname, prime_arg,
&client_name, &service_name, rqstp);
}
gss_release_buffer(&minor_stat, &client_name);
gss_release_buffer(&minor_stat, &service_name);
exit_func:
free_server_handle(handle);
return &ret;
}
|
Vulnerable
|
[
"CWE-119",
"CWE-772",
"CWE-401"
] |
krb5
|
83ed75feba32e46f736fcce0d96a0445f29b96c2
|
3.1535518113122447e+38
| 74 |
Fix leaks in kadmin server stubs [CVE-2015-8631]
In each kadmind server stub, initialize the client_name and
server_name variables, and release them in the cleanup handler. Many
of the stubs will otherwise leak the client and server name if
krb5_unparse_name() fails. Also make sure to free the prime_arg
variables in rename_principal_2_svc(), or we can leak the first one if
unparsing the second one fails. Discovered by Simo Sorce.
CVE-2015-8631:
In all versions of MIT krb5, an authenticated attacker can cause
kadmind to leak memory by supplying a null principal name in a request
which uses one. Repeating these requests will eventually cause
kadmind to exhaust all available memory.
CVSSv2 Vector: AV:N/AC:L/Au:S/C:N/I:N/A:C/E:POC/RL:OF/RC:C
ticket: 8343 (new)
target_version: 1.14-next
target_version: 1.13-next
tags: pullup
| 1 |
static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
ext4_lblk_t num, ext_prepare_callback func,
void *cbdata)
{
struct ext4_ext_path *path = NULL;
struct ext4_ext_cache cbex;
struct ext4_extent *ex;
ext4_lblk_t next, start = 0, end = 0;
ext4_lblk_t last = block + num;
int depth, exists, err = 0;
BUG_ON(func == NULL);
BUG_ON(inode == NULL);
while (block < last && block != EXT_MAX_BLOCK) {
num = last - block;
/* find extent for this block */
down_read(&EXT4_I(inode)->i_data_sem);
path = ext4_ext_find_extent(inode, block, path);
up_read(&EXT4_I(inode)->i_data_sem);
if (IS_ERR(path)) {
err = PTR_ERR(path);
path = NULL;
break;
}
depth = ext_depth(inode);
if (unlikely(path[depth].p_hdr == NULL)) {
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
err = -EIO;
break;
}
ex = path[depth].p_ext;
next = ext4_ext_next_allocated_block(path);
exists = 0;
if (!ex) {
/* there is no extent yet, so try to allocate
* all requested space */
start = block;
end = block + num;
} else if (le32_to_cpu(ex->ee_block) > block) {
/* need to allocate space before found extent */
start = block;
end = le32_to_cpu(ex->ee_block);
if (block + num < end)
end = block + num;
} else if (block >= le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex)) {
/* need to allocate space after found extent */
start = block;
end = block + num;
if (end >= next)
end = next;
} else if (block >= le32_to_cpu(ex->ee_block)) {
/*
* some part of requested space is covered
* by found extent
*/
start = block;
end = le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex);
if (block + num < end)
end = block + num;
exists = 1;
} else {
BUG();
}
BUG_ON(end <= start);
if (!exists) {
cbex.ec_block = start;
cbex.ec_len = end - start;
cbex.ec_start = 0;
} else {
cbex.ec_block = le32_to_cpu(ex->ee_block);
cbex.ec_len = ext4_ext_get_actual_len(ex);
cbex.ec_start = ext4_ext_pblock(ex);
}
if (unlikely(cbex.ec_len == 0)) {
EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
err = -EIO;
break;
}
err = func(inode, path, &cbex, ex, cbdata);
ext4_ext_drop_refs(path);
if (err < 0)
break;
if (err == EXT_REPEAT)
continue;
else if (err == EXT_BREAK) {
err = 0;
break;
}
if (ext_depth(inode) != depth) {
/* depth was changed. we have to realloc path */
kfree(path);
path = NULL;
}
block = cbex.ec_block + cbex.ec_len;
}
if (path) {
ext4_ext_drop_refs(path);
kfree(path);
}
return err;
}
|
Safe
|
[
"CWE-703"
] |
linux
|
667eff35a1f56fa74ce98a0c7c29a40adc1ba4e3
|
7.447627617064173e+37
| 114 |
ext4: reimplement convert and split_unwritten
Reimplement ext4_ext_convert_to_initialized() and
ext4_split_unwritten_extents() using ext4_split_extent()
Signed-off-by: Yongqiang Yang <xiaoqiangnk@gmail.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Tested-by: Allison Henderson <achender@linux.vnet.ibm.com>
| 0 |
getline_peek_skip_comments(evalarg_T *evalarg)
{
for (;;)
{
char_u *next = getline_peek(evalarg->eval_getline,
evalarg->eval_cookie);
char_u *p;
if (next == NULL)
break;
p = skipwhite(next);
if (*p != NUL && !vim9_comment_start(p))
return next;
(void)eval_next_line(evalarg);
}
return NULL;
}
|
Safe
|
[
"CWE-122",
"CWE-787"
] |
vim
|
605ec91e5a7330d61be313637e495fa02a6dc264
|
1.598828445119477e+38
| 17 |
patch 8.2.3847: illegal memory access when using a lambda with an error
Problem: Illegal memory access when using a lambda with an error.
Solution: Avoid skipping over the NUL after a string.
| 0 |
static inline int security_capset(struct cred *new,
const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{
return cap_capset(new, old, effective, inheritable, permitted);
}
|
Safe
|
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
|
9.03758281662381e+36
| 8 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: James Morris <jmorris@namei.org>
| 0 |
int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
unsigned long data)
{
unsigned long tmp;
int copied;
copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
return -EIO;
return put_user(tmp, (unsigned long __user *)data);
}
|
Safe
|
[
"CWE-264",
"CWE-269"
] |
linux
|
6994eefb0053799d2e07cd140df6c2ea106c41ee
|
1.7927331611000262e+38
| 11 |
ptrace: Fix ->ptracer_cred handling for PTRACE_TRACEME
Fix two issues:
When called for PTRACE_TRACEME, ptrace_link() would obtain an RCU
reference to the parent's objective credentials, then give that pointer
to get_cred(). However, the object lifetime rules for things like
struct cred do not permit unconditionally turning an RCU reference into
a stable reference.
PTRACE_TRACEME records the parent's credentials as if the parent was
acting as the subject, but that's not the case. If a malicious
unprivileged child uses PTRACE_TRACEME and the parent is privileged, and
at a later point, the parent process becomes attacker-controlled
(because it drops privileges and calls execve()), the attacker ends up
with control over two processes with a privileged ptrace relationship,
which can be abused to ptrace a suid binary and obtain root privileges.
Fix both of these by always recording the credentials of the process
that is requesting the creation of the ptrace relationship:
current_cred() can't change under us, and current is the proper subject
for access control.
This change is theoretically userspace-visible, but I am not aware of
any code that it will actually break.
Fixes: 64b875f7ac8a ("ptrace: Capture the ptracer's creds not PT_PTRACE_CAP")
Signed-off-by: Jann Horn <jannh@google.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
void ConnectionManagerImpl::ActiveStream::addStreamEncoderFilterWorker(
StreamEncoderFilterSharedPtr filter, bool dual_filter) {
ActiveStreamEncoderFilterPtr wrapper(new ActiveStreamEncoderFilter(*this, filter, dual_filter));
filter->setEncoderFilterCallbacks(*wrapper);
// Note: configured encoder filters are prepended to encoder_filters_.
// This means that if filters are configured in the following order (assume all three filters are
// both decoder/encoder filters):
// http_filters:
// - A
// - B
// - C
// The encoder filter chain will iterate through filters C, B, A.
wrapper->moveIntoList(std::move(wrapper), encoder_filters_);
}
|
Safe
|
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
|
1.973967423495034e+38
| 14 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <mklein@lyft.com>
| 0 |
archive_wstrcat(struct archive_wstring *as, const wchar_t *p)
{
/* Ditto. */
return archive_wstrncat(as, p, 0x1000000);
}
|
Safe
|
[
"CWE-476"
] |
libarchive
|
42a3408ac7df1e69bea9ea12b72e14f59f7400c0
|
4.1191762252423497e+37
| 5 |
archive_strncat_l(): allocate and do not convert if length == 0
This ensures e.g. that archive_mstring_copy_mbs_len_l() does not set
aes_set = AES_SET_MBS with aes_mbs.s == NULL.
Resolves possible null-pointer dereference reported by OSS-Fuzz.
Reported-By: OSS-Fuzz issue 286
| 0 |
int ssl3_get_certificate_request(SSL *s)
{
int ok,ret=0;
unsigned long n,nc,l;
unsigned int llen, ctype_num,i;
X509_NAME *xn=NULL;
const unsigned char *p,*q;
unsigned char *d;
STACK_OF(X509_NAME) *ca_sk=NULL;
n=s->method->ssl_get_message(s,
SSL3_ST_CR_CERT_REQ_A,
SSL3_ST_CR_CERT_REQ_B,
-1,
s->max_cert_list,
&ok);
if (!ok) return((int)n);
s->s3->tmp.cert_req=0;
if (s->s3->tmp.message_type == SSL3_MT_SERVER_DONE)
{
s->s3->tmp.reuse_message=1;
/* If we get here we don't need any cached handshake records
* as we wont be doing client auth.
*/
if (s->s3->handshake_buffer)
{
if (!ssl3_digest_cached_records(s))
goto err;
}
return(1);
}
if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE_REQUEST)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_WRONG_MESSAGE_TYPE);
goto err;
}
/* TLS does not like anon-DH with client cert */
if (s->version > SSL3_VERSION)
{
if (s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_TLS_CLIENT_CERT_REQ_WITH_ANON_CIPHER);
goto err;
}
}
p=d=(unsigned char *)s->init_msg;
if ((ca_sk=sk_X509_NAME_new(ca_dn_cmp)) == NULL)
{
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,ERR_R_MALLOC_FAILURE);
goto err;
}
/* get the certificate types */
ctype_num= *(p++);
if (s->cert->ctypes)
{
OPENSSL_free(s->cert->ctypes);
s->cert->ctypes = NULL;
}
if (ctype_num > SSL3_CT_NUMBER)
{
/* If we exceed static buffer copy all to cert structure */
s->cert->ctypes = OPENSSL_malloc(ctype_num);
if (s->cert->ctypes == NULL)
{
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,ERR_R_MALLOC_FAILURE);
goto err;
}
memcpy(s->cert->ctypes, p, ctype_num);
s->cert->ctype_num = (size_t)ctype_num;
ctype_num=SSL3_CT_NUMBER;
}
for (i=0; i<ctype_num; i++)
s->s3->tmp.ctype[i]= p[i];
p+=p[-1];
if (SSL_USE_SIGALGS(s))
{
n2s(p, llen);
/* Check we have enough room for signature algorithms and
* following length value.
*/
if ((unsigned long)(p - d + llen + 2) > n)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
/* Clear certificate digests and validity flags */
for (i = 0; i < SSL_PKEY_NUM; i++)
{
s->cert->pkeys[i].digest = NULL;
s->cert->pkeys[i].valid_flags = 0;
}
if ((llen & 1) || !tls1_save_sigalgs(s, p, llen))
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_SIGNATURE_ALGORITHMS_ERROR);
goto err;
}
if (!tls1_process_sigalgs(s))
{
ssl3_send_alert(s,SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, ERR_R_MALLOC_FAILURE);
goto err;
}
p += llen;
}
/* get the CA RDNs */
n2s(p,llen);
#if 0
{
FILE *out;
out=fopen("/tmp/vsign.der","w");
fwrite(p,1,llen,out);
fclose(out);
}
#endif
if ((unsigned long)(p - d + llen) != n)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_LENGTH_MISMATCH);
goto err;
}
for (nc=0; nc<llen; )
{
n2s(p,l);
if ((l+nc+2) > llen)
{
if ((s->options & SSL_OP_NETSCAPE_CA_DN_BUG))
goto cont; /* netscape bugs */
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_CA_DN_TOO_LONG);
goto err;
}
q=p;
if ((xn=d2i_X509_NAME(NULL,&q,l)) == NULL)
{
/* If netscape tolerance is on, ignore errors */
if (s->options & SSL_OP_NETSCAPE_CA_DN_BUG)
goto cont;
else
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,ERR_R_ASN1_LIB);
goto err;
}
}
if (q != (p+l))
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_CA_DN_LENGTH_MISMATCH);
goto err;
}
if (!sk_X509_NAME_push(ca_sk,xn))
{
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,ERR_R_MALLOC_FAILURE);
goto err;
}
p+=l;
nc+=l+2;
}
if (0)
{
cont:
ERR_clear_error();
}
/* we should setup a certificate to return.... */
s->s3->tmp.cert_req=1;
s->s3->tmp.ctype_num=ctype_num;
if (s->s3->tmp.ca_names != NULL)
sk_X509_NAME_pop_free(s->s3->tmp.ca_names,X509_NAME_free);
s->s3->tmp.ca_names=ca_sk;
ca_sk=NULL;
ret=1;
err:
if (ca_sk != NULL) sk_X509_NAME_pop_free(ca_sk,X509_NAME_free);
return(ret);
}
|
Safe
|
[
"CWE-310"
] |
openssl
|
ce325c60c74b0fa784f5872404b722e120e5cab0
|
4.239832064883689e+37
| 197 |
Only allow ephemeral RSA keys in export ciphersuites.
OpenSSL clients would tolerate temporary RSA keys in non-export
ciphersuites. It also had an option SSL_OP_EPHEMERAL_RSA which
enabled this server side. Remove both options as they are a
protocol violation.
Thanks to Karthikeyan Bhargavan for reporting this issue.
(CVE-2015-0204)
Reviewed-by: Matt Caswell <matt@openssl.org>
| 0 |
usage (int status)
{
if (su_mode == RUNUSER_MODE) {
fputs(USAGE_HEADER, stdout);
printf (_(" %s [options] -u <user> [[--] <command>]\n"), program_invocation_short_name);
printf (_(" %s [options] [-] [<user> [<argument>...]]\n"), program_invocation_short_name);
fputs (_("\n"
"Run <command> with the effective user ID and group ID of <user>. If -u is\n"
"not given, fall back to su(1)-compatible semantics and execute standard shell.\n"
"The options -c, -f, -l, and -s are mutually exclusive with -u.\n"), stdout);
fputs(USAGE_OPTIONS, stdout);
fputs (_(" -u, --user <user> username\n"), stdout);
} else {
fputs(USAGE_HEADER, stdout);
printf (_(" %s [options] [-] [<user> [<argument>...]]\n"), program_invocation_short_name);
fputs (_("\n"
"Change the effective user ID and group ID to that of <user>.\n"
"A mere - implies -l. If <user> is not given, root is assumed.\n"), stdout);
fputs(USAGE_OPTIONS, stdout);
}
fputs (_(" -m, -p, --preserve-environment do not reset environment variables\n"), stdout);
fputs (_(" -g, --group <group> specify the primary group\n"), stdout);
fputs (_(" -G, --supp-group <group> specify a supplemental group\n\n"), stdout);
fputs (_(" -, -l, --login make the shell a login shell\n"), stdout);
fputs (_(" -c, --command <command> pass a single command to the shell with -c\n"), stdout);
fputs (_(" --session-command <command> pass a single command to the shell with -c\n"
" and do not create a new session\n"), stdout);
fputs (_(" -f, --fast pass -f to the shell (for csh or tcsh)\n"), stdout);
fputs (_(" -s, --shell <shell> run <shell> if /etc/shells allows it\n"), stdout);
fputs(USAGE_SEPARATOR, stdout);
fputs(USAGE_HELP, stdout);
fputs(USAGE_VERSION, stdout);
printf(USAGE_MAN_TAIL(su_mode == SU_MODE ? "su(1)" : "runuser(1)"));
exit (status);
}
|
Safe
|
[
"CWE-362"
] |
util-linux
|
dffab154d29a288aa171ff50263ecc8f2e14a891
|
2.418940803946901e+38
| 42 |
su: properly clear child PID
Reported-by: Tobias Stöckmann <tobias@stoeckmann.org>
Signed-off-by: Karel Zak <kzak@redhat.com>
| 0 |
static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
{
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
}
|
Safe
|
[
"CWE-399"
] |
linux
|
6ec82562ffc6f297d0de36d65776cff8e5704867
|
3.1220458108885518e+38
| 4 |
veth: Dont kfree_skb() after dev_forward_skb()
In case of congestion, netif_rx() frees the skb, so we must assume
dev_forward_skb() also consume skb.
Bug introduced by commit 445409602c092
(veth: move loopback logic to common location)
We must change dev_forward_skb() to always consume skb, and veth to not
double free it.
Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3
Reported-by: Martín Ferrari <martin.ferrari@gmail.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
{
struct list_head *tail_page, *to_remove, *next_page;
struct buffer_page *to_remove_page, *tmp_iter_page;
struct buffer_page *last_page, *first_page;
unsigned long nr_removed;
unsigned long head_bit;
int page_entries;
head_bit = 0;
raw_spin_lock_irq(&cpu_buffer->reader_lock);
atomic_inc(&cpu_buffer->record_disabled);
/*
* We don't race with the readers since we have acquired the reader
* lock. We also don't race with writers after disabling recording.
* This makes it easy to figure out the first and the last page to be
* removed from the list. We unlink all the pages in between including
* the first and last pages. This is done in a busy loop so that we
* lose the least number of traces.
* The pages are freed after we restart recording and unlock readers.
*/
tail_page = &cpu_buffer->tail_page->list;
/*
* tail page might be on reader page, we remove the next page
* from the ring buffer
*/
if (cpu_buffer->tail_page == cpu_buffer->reader_page)
tail_page = rb_list_head(tail_page->next);
to_remove = tail_page;
/* start of pages to remove */
first_page = list_entry(rb_list_head(to_remove->next),
struct buffer_page, list);
for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
to_remove = rb_list_head(to_remove)->next;
head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
}
next_page = rb_list_head(to_remove)->next;
/*
* Now we remove all pages between tail_page and next_page.
* Make sure that we have head_bit value preserved for the
* next page
*/
tail_page->next = (struct list_head *)((unsigned long)next_page |
head_bit);
next_page = rb_list_head(next_page);
next_page->prev = tail_page;
/* make sure pages points to a valid page in the ring buffer */
cpu_buffer->pages = next_page;
/* update head page */
if (head_bit)
cpu_buffer->head_page = list_entry(next_page,
struct buffer_page, list);
/*
* change read pointer to make sure any read iterators reset
* themselves
*/
cpu_buffer->read = 0;
/* pages are removed, resume tracing and then free the pages */
atomic_dec(&cpu_buffer->record_disabled);
raw_spin_unlock_irq(&cpu_buffer->reader_lock);
RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
/* last buffer page to remove */
last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
list);
tmp_iter_page = first_page;
do {
cond_resched();
to_remove_page = tmp_iter_page;
rb_inc_page(cpu_buffer, &tmp_iter_page);
/* update the counters */
page_entries = rb_page_entries(to_remove_page);
if (page_entries) {
/*
* If something was added to this page, it was full
* since it is not the tail page. So we deduct the
* bytes consumed in ring buffer from here.
* Increment overrun to account for the lost events.
*/
local_add(page_entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
}
/*
* We have already removed references to this list item, just
* free up the buffer_page and its page
*/
free_buffer_page(to_remove_page);
nr_removed--;
} while (to_remove_page != last_page);
RB_WARN_ON(cpu_buffer, nr_removed);
return nr_removed == 0;
}
|
Safe
|
[
"CWE-362"
] |
linux
|
bbeb97464eefc65f506084fd9f18f21653e01137
|
2.176731190540002e+38
| 110 |
tracing: Fix race in trace_open and buffer resize call
Below race can come, if trace_open and resize of
cpu buffer is running parallely on different cpus
CPUX CPUY
ring_buffer_resize
atomic_read(&buffer->resize_disabled)
tracing_open
tracing_reset_online_cpus
ring_buffer_reset_cpu
rb_reset_cpu
rb_update_pages
remove/insert pages
resetting pointer
This race can cause data abort or some times infinte loop in
rb_remove_pages and rb_insert_pages while checking pages
for sanity.
Take buffer lock to fix this.
Link: https://lkml.kernel.org/r/1601976833-24377-1-git-send-email-gkohli@codeaurora.org
Cc: stable@vger.kernel.org
Fixes: b23d7a5f4a07a ("ring-buffer: speed up buffer resets by avoiding synchronize_rcu for each CPU")
Signed-off-by: Gaurav Kohli <gkohli@codeaurora.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
| 0 |
ASC_acceptContextsWithTransferSyntax(
T_ASC_Parameters * params,
const char* transferSyntax,
int abstractSyntaxCount, const char* abstractSyntaxes[],
T_ASC_SC_ROLE acceptedRole)
{
OFCondition cond = EC_Normal;
int n, i, j, k;
DUL_PRESENTATIONCONTEXT *dpc;
T_ASC_PresentationContext pc;
OFBool accepted = OFFalse;
OFBool abstractOK = OFFalse;
n = ASC_countPresentationContexts(params);
// No presentation context proposed at all? Return error.
if (n == 0)
{
return ASC_NOPRESENTATIONCONTEXTPROPOSED;
}
for (i = 0; i < n; i++) {
cond = ASC_getPresentationContext(params, i, &pc);
if (cond.bad()) return cond;
abstractOK = OFFalse;
accepted = OFFalse;
for (j = 0; j < abstractSyntaxCount && !accepted; j++) {
if (strcmp(pc.abstractSyntax, abstractSyntaxes[j]) == 0) {
abstractOK = OFTrue;
/* check the transfer syntax */
for (k = 0; (k < (int)pc.transferSyntaxCount) && !accepted; k++) {
if (strcmp(pc.proposedTransferSyntaxes[k], transferSyntax) == 0) {
accepted = OFTrue;
}
}
}
}
if (accepted) {
cond = ASC_acceptPresentationContext(
params, pc.presentationContextID,
transferSyntax, acceptedRole);
// SCP/SCU role selection failed, reject presentation context
if (cond == ASC_SCPSCUROLESELECTIONFAILED) {
cond = ASC_refusePresentationContext(params,
pc.presentationContextID, ASC_P_NOREASON);
}
if (cond.bad()) return cond;
} else {
T_ASC_P_ResultReason reason;
/* do not refuse if already accepted */
dpc = findPresentationContextID(
params->DULparams.acceptedPresentationContext,
pc.presentationContextID);
if ((dpc == NULL) ||
((dpc != NULL) && (dpc->result != ASC_P_ACCEPTANCE))) {
if (abstractOK) {
reason = ASC_P_TRANSFERSYNTAXESNOTSUPPORTED;
} else {
reason = ASC_P_ABSTRACTSYNTAXNOTSUPPORTED;
}
/*
* If previously this presentation context was refused
* because of bad transfer syntax let it stay that way.
*/
if ((dpc != NULL) &&
(dpc->result == ASC_P_TRANSFERSYNTAXESNOTSUPPORTED))
reason = ASC_P_TRANSFERSYNTAXESNOTSUPPORTED;
cond = ASC_refusePresentationContext(params,
pc.presentationContextID,
reason);
if (cond.bad()) return cond;
}
}
}
return EC_Normal;
}
|
Safe
|
[
"CWE-415",
"CWE-703",
"CWE-401"
] |
dcmtk
|
a9697dfeb672b0b9412c00c7d36d801e27ec85cb
|
3.207238058603716e+38
| 79 |
Fixed poss. NULL pointer dereference/double free.
Thanks to Jinsheng Ba <bajinsheng@u.nus.edu> for the report and some patches.
| 0 |
static void openpic_msi_write(void *opaque, hwaddr addr, uint64_t val,
unsigned size)
{
OpenPICState *opp = opaque;
int idx = opp->irq_msi;
int srs, ibs;
DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64 "\n",
__func__, addr, val);
if (addr & 0xF) {
return;
}
switch (addr) {
case MSIIR_OFFSET:
srs = val >> MSIIR_SRS_SHIFT;
idx += srs;
ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT;
opp->msi[srs].msir |= 1 << ibs;
openpic_set_irq(opp, idx, 1);
break;
default:
/* most registers are read-only, thus ignored */
break;
}
}
|
Safe
|
[
"CWE-119"
] |
qemu
|
73d963c0a75cb99c6aaa3f6f25e427aa0b35a02e
|
3.3067576432738375e+38
| 26 |
openpic: avoid buffer overrun on incoming migration
CVE-2013-4534
opp->nb_cpus is read from the wire and used to determine how many
IRQDest elements to read into opp->dst[]. If the value exceeds the
length of opp->dst[], MAX_CPU, opp->dst[] can be overrun with arbitrary
data from the wire.
Fix this by failing migration if the value read from the wire exceeds
MAX_CPU.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Reviewed-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
| 0 |
ProcessStandbyReplyMessage(void)
{
XLogRecPtr writePtr,
flushPtr,
applyPtr;
bool replyRequested;
/* the caller already consumed the msgtype byte */
writePtr = pq_getmsgint64(&reply_message);
flushPtr = pq_getmsgint64(&reply_message);
applyPtr = pq_getmsgint64(&reply_message);
(void) pq_getmsgint64(&reply_message); /* sendTime; not used ATM */
replyRequested = pq_getmsgbyte(&reply_message);
elog(DEBUG2, "write %X/%X flush %X/%X apply %X/%X%s",
(uint32) (writePtr >> 32), (uint32) writePtr,
(uint32) (flushPtr >> 32), (uint32) flushPtr,
(uint32) (applyPtr >> 32), (uint32) applyPtr,
replyRequested ? " (reply requested)" : "");
/* Send a reply if the standby requested one. */
if (replyRequested)
WalSndKeepalive(false);
/*
* Update shared state for this WalSender process based on reply data from
* standby.
*/
{
/* use volatile pointer to prevent code rearrangement */
volatile WalSnd *walsnd = MyWalSnd;
SpinLockAcquire(&walsnd->mutex);
walsnd->write = writePtr;
walsnd->flush = flushPtr;
walsnd->apply = applyPtr;
SpinLockRelease(&walsnd->mutex);
}
if (!am_cascading_walsender)
SyncRepReleaseWaiters();
/*
* Advance our local xmin horizon when the client confirmed a flush.
*/
if (MyReplicationSlot && flushPtr != InvalidXLogRecPtr)
{
if (MyReplicationSlot->data.database != InvalidOid)
LogicalConfirmReceivedLocation(flushPtr);
else
PhysicalConfirmReceivedLocation(flushPtr);
}
}
|
Safe
|
[
"CWE-89"
] |
postgres
|
2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b
|
2.7528492000726077e+38
| 53 |
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
| 0 |
static bool vfswrap_brl_cancel_windows(struct vfs_handle_struct *handle,
struct byte_range_lock *br_lck,
struct lock_struct *plock,
struct blocking_lock_record *blr)
{
SMB_ASSERT(plock->lock_flav == WINDOWS_LOCK);
/* Note: blr is not used in the default implementation. */
return brl_lock_cancel_default(br_lck, plock);
}
|
Safe
|
[
"CWE-665"
] |
samba
|
30e724cbff1ecd90e5a676831902d1e41ec1b347
|
1.294143012897766e+38
| 10 |
FSCTL_GET_SHADOW_COPY_DATA: Initialize output array to zero
Otherwise num_volumes and the end marker can return uninitialized data
to the client.
Signed-off-by: Christof Schmitt <christof.schmitt@us.ibm.com>
Reviewed-by: Jeremy Allison <jra@samba.org>
Reviewed-by: Simo Sorce <idra@samba.org>
| 0 |
int jemalloc_purge() {
return 0;
}
|
Safe
|
[
"CWE-190"
] |
redis
|
d32f2e9999ce003bad0bd2c3bca29f64dcce4433
|
2.923339214608469e+38
| 3 |
Fix integer overflow (CVE-2021-21309). (#8522)
On 32-bit systems, setting the proto-max-bulk-len config parameter to a high value may result with integer overflow and a subsequent heap overflow when parsing an input bulk (CVE-2021-21309).
This fix has two parts:
Set a reasonable limit to the config parameter.
Add additional checks to prevent the problem in other potential but unknown code paths.
| 0 |
static ssize_t ucma_resolve_addr(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_resolve_addr cmd;
struct ucma_context *ctx;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
if (cmd.reserved ||
(cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
!cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
ucma_put_ctx(ctx);
return ret;
}
|
Safe
|
[
"CWE-416",
"CWE-703"
] |
linux
|
cb2595c1393b4a5211534e6f0a0fbad369e21ad8
|
2.614046976320868e+38
| 25 |
infiniband: fix a possible use-after-free bug
ucma_process_join() will free the new allocated "mc" struct,
if there is any error after that, especially the copy_to_user().
But in parallel, ucma_leave_multicast() could find this "mc"
through idr_find() before ucma_process_join() frees it, since it
is already published.
So "mc" could be used in ucma_leave_multicast() after it is been
allocated and freed in ucma_process_join(), since we don't refcnt
it.
Fix this by separating "publish" from ID allocation, so that we
can get an ID first and publish it later after copy_to_user().
Fixes: c8f6a362bf3e ("RDMA/cma: Add multicast communication support")
Reported-by: Noam Rathaus <noamr@beyondsecurity.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
| 0 |
x_catch_free_colors(Display * dpy, XErrorEvent * err)
{
if (err->request_code == X_FreeColors ||
x_error_handler.orighandler == x_catch_free_colors)
return 0;
return x_error_handler.orighandler(dpy, err);
}
|
Safe
|
[] |
ghostpdl
|
c432131c3fdb2143e148e8ba88555f7f7a63b25e
|
1.4266358674572871e+38
| 7 |
Bug 699661: Avoid sharing pointers between pdf14 compositors
If a copdevice is triggered when the pdf14 compositor is the device, we make
a copy of the device, then throw an error because, by default we're only allowed
to copy the device prototype - then freeing it calls the finalize, which frees
several pointers shared with the parent.
Make a pdf14 specific finish_copydevice() which NULLs the relevant pointers,
before, possibly, throwing the same error as the default method.
This also highlighted a problem with reopening the X11 devices, where a custom
error handler could be replaced with itself, meaning it also called itself,
and infifite recursion resulted.
Keep a note of if the handler replacement has been done, and don't do it a
second time.
| 0 |
bool AreTensorProtosEqual(const TensorProto& lhs, const TensorProto& rhs) {
Tensor lhs_t(lhs.dtype());
bool success = lhs_t.FromProto(lhs);
DCHECK(success);
Tensor rhs_t(rhs.dtype());
success = rhs_t.FromProto(rhs);
DCHECK(success);
TensorProto lhs_tp;
lhs_t.AsProtoTensorContent(&lhs_tp);
TensorProto rhs_tp;
rhs_t.AsProtoTensorContent(&rhs_tp);
return AreSerializedProtosEqual(lhs_tp, rhs_tp);
}
|
Safe
|
[
"CWE-369",
"CWE-674"
] |
tensorflow
|
e07e1c3d26492c06f078c7e5bf2d138043e199c1
|
2.2459795094671294e+38
| 17 |
Prevent memory overflow in ParseAttrValue from nested tensors.
PiperOrigin-RevId: 370108442
Change-Id: I84d64a5e8895a6aeffbf4749841b4c54d51b5889
| 0 |
void Item_sum_hybrid::clear()
{
value->clear();
null_value= 1;
}
|
Safe
|
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
|
6.451203529612819e+37
| 5 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
| 0 |
explicit Conv2DCustomBackpropInputOp(OpKernelConstruction* context)
: OpKernel(context) {
string data_format;
OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format));
OP_REQUIRES(context, FormatFromString(data_format, &data_format_),
errors::InvalidArgument("Invalid data format"));
OP_REQUIRES(context, data_format_ == FORMAT_NHWC,
errors::InvalidArgument(
"Conv2DCustomBackpropInputOp only supports NHWC."));
OP_REQUIRES_OK(context, context->GetAttr("strides", &strides_));
OP_REQUIRES(context, strides_.size() == 4,
errors::InvalidArgument("Sliding window strides field must "
"specify 4 dimensions"));
OP_REQUIRES(
context, (strides_[0] == 1 && strides_[3] == 1),
errors::Unimplemented("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
OP_REQUIRES(context, strides_[1] > 0 && strides_[2] > 0,
errors::InvalidArgument(
"Row and column strides should be larger than 0."));
OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_));
OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilations_));
OP_REQUIRES(context, dilations_.size() == 4,
errors::InvalidArgument("Sliding window dilations field must "
"specify 4 dimensions"));
OP_REQUIRES(
context, (dilations_[0] == 1 && dilations_[3] == 1),
errors::Unimplemented("Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
// TODO(yangzihao): Add a CPU implementation for dilated convolution.
OP_REQUIRES(context, (dilations_[1] == 1 && dilations_[2] == 1),
errors::InvalidArgument(
"Current libxsmm and customized CPU implementations do "
"not yet support dilation rates larger than 1."));
OP_REQUIRES_OK(context,
context->GetAttr("explicit_paddings", &explicit_paddings_));
OP_REQUIRES_OK(context, CheckValidPadding(padding_, explicit_paddings_,
/*num_dims=*/4, data_format_));
}
|
Safe
|
[
"CWE-369"
] |
tensorflow
|
2be2cdf3a123e231b16f766aa0e27d56b4606535
|
2.093341952425776e+38
| 39 |
Prevent yet another division by zero
PiperOrigin-RevId: 369343977
Change-Id: I1a60da4cf512e60fd91e069c16e026544632fe7f
| 0 |
FITS_HDU_LIST *fits_image_info (FITS_FILE *ff, int picind, int *hdupicind)
{FITS_HDU_LIST *hdulist;
int firstpic, lastpic;
if (ff == NULL)
FITS_RETURN ("fits_image_info: ff is NULL", NULL);
if (ff->openmode != 'r')
FITS_RETURN ("fits_image_info: file not open for reading", NULL);
if ((picind < 1) || (picind > ff->n_pic))
FITS_RETURN ("fits_image_info: picind out of range", NULL);
firstpic = 1;
for (hdulist = ff->hdu_list; hdulist != NULL; hdulist = hdulist->next_hdu)
{
if (hdulist->numpic <= 0) continue;
lastpic = firstpic+hdulist->numpic-1;
if (picind <= lastpic) /* Found image in current HDU ? */
break;
firstpic = lastpic+1;
}
*hdupicind = picind - firstpic + 1;
return (hdulist);
}
|
Safe
|
[
"CWE-476"
] |
gimp
|
ace45631595e8781a1420842582d67160097163c
|
9.416356691592378e+37
| 27 |
Bug 676804 - file handling DoS for fit file format
Apply patch from joe@reactionis.co.uk which fixes a buffer overflow on
broken/malicious fits files.
| 0 |
ldbm_config_legacy_errcode_set(void *arg,
void *value,
char *errorbuf __attribute__((unused)),
int phase __attribute__((unused)),
int apply)
{
struct ldbminfo *li = (struct ldbminfo *)arg;
if (apply) {
li->li_legacy_errcode = (int)((uintptr_t)value);
}
return LDAP_SUCCESS;
}
|
Safe
|
[
"CWE-399",
"CWE-203"
] |
389-ds-base
|
cc0f69283abc082488824702dae485b8eae938bc
|
9.116322563476363e+37
| 14 |
Issue 4480 - Unexpected info returned to ldap request (#4491)
Bug description:
If the bind entry does not exist, the bind result info
reports that 'No such entry'. It should not give any
information if the target entry exists or not
Fix description:
Does not return any additional information during a bind
relates: https://github.com/389ds/389-ds-base/issues/4480
Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all)
Platforms tested: F31
| 0 |
static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct open_how __user *how;
const char __user *fname;
size_t len;
int ret;
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
if (sqe->flags & IOSQE_FIXED_FILE)
return -EBADF;
req->open.dfd = READ_ONCE(sqe->fd);
fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
len = READ_ONCE(sqe->len);
if (len < OPEN_HOW_SIZE_VER0)
return -EINVAL;
ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
len);
if (ret)
return ret;
if (!(req->open.how.flags & O_PATH) && force_o_largefile())
req->open.how.flags |= O_LARGEFILE;
req->open.filename = getname(fname);
if (IS_ERR(req->open.filename)) {
ret = PTR_ERR(req->open.filename);
req->open.filename = NULL;
return ret;
}
req->flags |= REQ_F_NEED_CLEANUP;
return 0;
}
|
Safe
|
[] |
linux
|
ff002b30181d30cdfbca316dadd099c3ca0d739c
|
3.776862769982162e+37
| 38 |
io_uring: grab ->fs as part of async preparation
This passes it in to io-wq, so it assumes the right fs_struct when
executing async work that may need to do lookups.
Cc: stable@vger.kernel.org # 5.3+
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
skb_reset_mac_header(skb);
skb->mac_header += offset;
|
Safe
|
[
"CWE-20"
] |
linux
|
2b16f048729bf35e6c28a40cbfad07239f9dcd90
|
6.657892021027671e+37
| 5 |
net: create skb_gso_validate_mac_len()
If you take a GSO skb, and split it into packets, will the MAC
length (L2 + L3 + L4 headers + payload) of those packets be small
enough to fit within a given length?
Move skb_gso_mac_seglen() to skbuff.h with other related functions
like skb_gso_network_seglen() so we can use it, and then create
skb_gso_validate_mac_len to do the full calculation.
Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
**/
CImg<T>& normalize() {
const ulongT whd = (ulongT)_width*_height*_depth;
cimg_pragma_openmp(parallel for cimg_openmp_collapse(2) cimg_openmp_if(_width>=(cimg_openmp_sizefactor)*512 &&
_height*_depth>=16))
cimg_forYZ(*this,y,z) {
T *ptrd = data(0,y,z,0);
cimg_forX(*this,x) {
const T *ptrs = ptrd;
float n = 0;
cimg_forC(*this,c) { n+=cimg::sqr((float)*ptrs); ptrs+=whd; }
n = (float)std::sqrt(n);
T *_ptrd = ptrd++;
if (n>0) cimg_forC(*this,c) { *_ptrd = (T)(*_ptrd/n); _ptrd+=whd; }
else cimg_forC(*this,c) { *_ptrd = (T)0; _ptrd+=whd; }
}
}
return *this;
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
|
1.4257694819320824e+38
| 18 |
.
| 0 |
ConnStateData::notes()
{
if (!theNotes)
theNotes = new NotePairs;
return theNotes;
}
|
Safe
|
[
"CWE-116"
] |
squid
|
7024fb734a59409889e53df2257b3fc817809fb4
|
1.9464214806306315e+38
| 6 |
Handle more Range requests (#790)
Also removed some effectively unused code.
| 0 |
static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
struct vmcb *nested_vmcb, struct kvm_host_map *map)
{
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
else
svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
nested_svm_init_mmu_context(&svm->vcpu);
}
/* Load the nested guest state */
svm->vmcb->save.es = nested_vmcb->save.es;
svm->vmcb->save.cs = nested_vmcb->save.cs;
svm->vmcb->save.ss = nested_vmcb->save.ss;
svm->vmcb->save.ds = nested_vmcb->save.ds;
svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
svm->vmcb->save.idtr = nested_vmcb->save.idtr;
kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
if (npt_enabled) {
svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
} else
(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
/* Guest paging mode is active - reset mmu */
kvm_mmu_reset_context(&svm->vcpu);
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
/* In case we don't even reach vcpu_run, the fields are not updated */
svm->vmcb->save.rax = nested_vmcb->save.rax;
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
svm->vmcb->save.rip = nested_vmcb->save.rip;
svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
svm->vmcb->save.cpl = nested_vmcb->save.cpl;
svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
/* cache intercepts */
svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
svm->nested.intercept = nested_vmcb->control.intercept;
svm_flush_tlb(&svm->vcpu, true);
svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
svm->vcpu.arch.hflags |= HF_VINTR_MASK;
else
svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
/* We only want the cr8 intercept bits of the guest */
clr_cr_intercept(svm, INTERCEPT_CR8_READ);
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
}
/* We don't want to see VMMCALLs from a nested guest */
clr_intercept(svm, INTERCEPT_VMMCALL);
svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
svm->vmcb->control.int_state = nested_vmcb->control.int_state;
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
svm->vmcb->control.pause_filter_count =
nested_vmcb->control.pause_filter_count;
svm->vmcb->control.pause_filter_thresh =
nested_vmcb->control.pause_filter_thresh;
kvm_vcpu_unmap(&svm->vcpu, map, true);
/* Enter Guest-Mode */
enter_guest_mode(&svm->vcpu);
/*
* Merge guest and host intercepts - must be called with vcpu in
* guest-mode to take affect here
*/
recalc_intercepts(svm);
svm->nested.vmcb = vmcb_gpa;
enable_gif(svm);
mark_all_dirty(svm->vmcb);
}
|
Safe
|
[
"CWE-401"
] |
linux
|
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
|
2.2334123313866964e+38
| 102 |
KVM: SVM: Fix potential memory leak in svm_cpu_init()
When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page
held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually
the only possible outcome here.
Reviewed-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static void multi_start_tx(struct sb_uart_port *port)
{
struct mp_port *mtpt = (struct mp_port *)port;
if (!(mtpt->ier & UART_IER_THRI)) {
mtpt->ier |= UART_IER_THRI;
serial_out(mtpt, UART_IER, mtpt->ier);
}
}
|
Safe
|
[
"CWE-200"
] |
linux
|
a8b33654b1e3b0c74d4a1fed041c9aae50b3c427
|
2.2496915994231837e+38
| 9 |
Staging: sb105x: info leak in mp_get_count()
The icount.reserved[] array isn't initialized so it leaks stack
information to userspace.
Reported-by: Nico Golde <nico@ngolde.de>
Reported-by: Fabian Yamaguchi <fabs@goesec.de>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static void cjson_replace_item_in_object_should_preserve_name(void)
{
cJSON root[1] = {{ NULL, NULL, NULL, 0, NULL, 0, 0, NULL }};
cJSON *child = NULL;
cJSON *replacement = NULL;
child = cJSON_CreateNumber(1);
TEST_ASSERT_NOT_NULL(child);
replacement = cJSON_CreateNumber(2);
TEST_ASSERT_NOT_NULL(replacement);
cJSON_AddItemToObject(root, "child", child);
cJSON_ReplaceItemInObject(root, "child", replacement);
TEST_ASSERT_TRUE(root->child == replacement);
TEST_ASSERT_EQUAL_STRING("child", replacement->string);
cJSON_Delete(replacement);
}
|
Safe
|
[
"CWE-754",
"CWE-787"
] |
cJSON
|
be749d7efa7c9021da746e685bd6dec79f9dd99b
|
1.466250209815442e+38
| 19 |
Fix crash of cJSON_GetObjectItemCaseSensitive when calling it on arrays
| 0 |
static int compat_do_replace(struct net *net, void __user *user,
unsigned int len)
{
int ret;
struct compat_arpt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct arpt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.size >= INT_MAX / num_possible_cpus())
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
/* choose the copy that is on our node/cpu */
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_compat_table(tmp.name, tmp.valid_hooks,
&newinfo, &loc_cpu_entry, tmp.size,
tmp.num_entries, tmp.hook_entry,
tmp.underflow);
if (ret != 0)
goto free_newinfo;
duprintf("compat_do_replace: Translated table\n");
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, compat_ptr(tmp.counters));
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
|
Safe
|
[
"CWE-200"
] |
linux-2.6
|
42eab94fff18cb1091d3501cd284d6bd6cc9c143
|
1.6784024935855702e+38
| 52 |
netfilter: arp_tables: fix infoleak to userspace
Structures ipt_replace, compat_ipt_replace, and xt_get_revision are
copied from userspace. Fields of these structs that are
zero-terminated strings are not checked. When they are used as argument
to a format string containing "%s" in request_module(), some sensitive
information is leaked to userspace via argument of spawned modprobe
process.
The first bug was introduced before the git epoch; the second is
introduced by 6b7d31fc (v2.6.15-rc1); the third is introduced by
6b7d31fc (v2.6.15-rc1). To trigger the bug one should have
CAP_NET_ADMIN.
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
| 0 |
static void edge_bulk_in_callback(struct urb *urb)
{
struct edgeport_port *edge_port = urb->context;
struct device *dev = &edge_port->port->dev;
unsigned char *data = urb->transfer_buffer;
int retval = 0;
int port_number;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n", __func__, status);
}
if (status == -EPIPE)
goto exit;
if (status) {
dev_err(&urb->dev->dev, "%s - stopping read!\n", __func__);
return;
}
port_number = edge_port->port->port_number;
if (urb->actual_length > 0 && edge_port->lsr_event) {
edge_port->lsr_event = 0;
dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
__func__, port_number, edge_port->lsr_mask, *data);
handle_new_lsr(edge_port, 1, edge_port->lsr_mask, *data);
/* Adjust buffer length/pointer */
--urb->actual_length;
++data;
}
if (urb->actual_length) {
usb_serial_debug_data(dev, __func__, urb->actual_length, data);
if (edge_port->close_pending)
dev_dbg(dev, "%s - close pending, dropping data on the floor\n",
__func__);
else
edge_tty_recv(edge_port->port, data,
urb->actual_length);
edge_port->port->icount.rx += urb->actual_length;
}
exit:
/* continue read unless stopped */
spin_lock(&edge_port->ep_lock);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED;
spin_unlock(&edge_port->ep_lock);
if (retval)
dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval);
}
|
Safe
|
[
"CWE-191"
] |
linux
|
654b404f2a222f918af9b0cd18ad469d0c941a8e
|
3.0900847409899155e+38
| 66 |
USB: serial: io_ti: fix information leak in completion handler
Add missing sanity check to the bulk-in completion handler to avoid an
integer underflow that can be triggered by a malicious device.
This avoids leaking 128 kB of memory content from after the URB transfer
buffer to user space.
Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32")
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Cc: stable <stable@vger.kernel.org> # 2.6.30
Signed-off-by: Johan Hovold <johan@kernel.org>
| 0 |
ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
__be16 port)
{
register unsigned porth = ntohs(port);
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
& IP_VS_SVC_TAB_MASK;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
04bcef2a83f40c6db24222b27a52892cba39dffb
|
1.0741118256054569e+38
| 15 |
ipvs: Add boundary check on ioctl arguments
The ipvs code has a nifty system for doing the size of ioctl command
copies; it defines an array with values into which it indexes the cmd
to find the right length.
Unfortunately, the ipvs code forgot to check if the cmd was in the
range that the array provides, allowing for an index outside of the
array, which then gives a "garbage" result into the length, which
then gets used for copying into a stack buffer.
Fix this by adding sanity checks on these as well as the copy size.
[ horms@verge.net.au: adjusted limit to IP_VS_SO_GET_MAX ]
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Acked-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Simon Horman <horms@verge.net.au>
Signed-off-by: Patrick McHardy <kaber@trash.net>
| 0 |
static int get_gate_page(struct mm_struct *mm, unsigned long address,
unsigned int gup_flags, struct vm_area_struct **vma,
struct page **page)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret = -EFAULT;
/* user gate pages are read-only */
if (gup_flags & FOLL_WRITE)
return -EFAULT;
if (address > TASK_SIZE)
pgd = pgd_offset_k(address);
else
pgd = pgd_offset_gate(mm, address);
BUG_ON(pgd_none(*pgd));
p4d = p4d_offset(pgd, address);
BUG_ON(p4d_none(*p4d));
pud = pud_offset(p4d, address);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return -EFAULT;
VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map(pmd, address);
if (pte_none(*pte))
goto unmap;
*vma = get_gate_vma(mm);
if (!page)
goto out;
*page = vm_normal_page(*vma, address, *pte);
if (!*page) {
if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
goto unmap;
*page = pte_page(*pte);
}
get_page(*page);
out:
ret = 0;
unmap:
pte_unmap(pte);
return ret;
}
|
Safe
|
[
"CWE-119"
] |
linux
|
1be7107fbe18eed3e319a6c3e83c78254b693acb
|
1.0896375815719376e+38
| 46 |
mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <oleg@redhat.com>
Original-patch-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Tested-by: Helge Deller <deller@gmx.de> # parisc
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static unsigned long intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
unsigned long nrpages, uint64_t dma_mask)
{
unsigned long iova_pfn = 0;
/* Restrict dma_mask to the width that the iommu can handle */
dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
/* Ensure we reserve the whole size-aligned region */
nrpages = __roundup_pow_of_two(nrpages);
if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
/*
* First try to allocate an io virtual address in
* DMA_BIT_MASK(32) and if that fails then try allocating
* from higher range
*/
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(DMA_BIT_MASK(32)), false);
if (iova_pfn)
return iova_pfn;
}
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(dma_mask), true);
if (unlikely(!iova_pfn)) {
pr_err("Allocating %ld-page iova for %s failed",
nrpages, dev_name(dev));
return 0;
}
return iova_pfn;
}
|
Safe
|
[] |
linux
|
fb58fdcd295b914ece1d829b24df00a17a9624bc
|
1.38884310974013e+38
| 32 |
iommu/vt-d: Do not enable ATS for untrusted devices
Currently Linux automatically enables ATS (Address Translation Service)
for any device that supports it (and IOMMU is turned on). ATS is used to
accelerate DMA access as the device can cache translations locally so
there is no need to do full translation on IOMMU side. However, as
pointed out in [1] ATS can be used to bypass IOMMU based security
completely by simply sending PCIe read/write transaction with AT
(Address Translation) field set to "translated".
To mitigate this modify the Intel IOMMU code so that it does not enable
ATS for any device that is marked as being untrusted. In case this turns
out to cause performance issues we may selectively allow ATS based on
user decision but currently use big hammer and disable it completely to
be on the safe side.
[1] https://www.repository.cam.ac.uk/handle/1810/274352
Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Reviewed-by: Ashok Raj <ashok.raj@intel.com>
Reviewed-by: Joerg Roedel <jroedel@suse.de>
Acked-by: Joerg Roedel <jroedel@suse.de>
| 0 |
int ssl3_mac(SSL *ssl, unsigned char *md, int send)
{
SSL3_RECORD *rec;
unsigned char *mac_sec,*seq;
EVP_MD_CTX md_ctx;
const EVP_MD *hash;
unsigned char *p,rec_char;
unsigned int md_size;
int npad;
if (send)
{
rec= &(ssl->s3->wrec);
mac_sec= &(ssl->s3->write_mac_secret[0]);
seq= &(ssl->s3->write_sequence[0]);
hash=ssl->write_hash;
}
else
{
rec= &(ssl->s3->rrec);
mac_sec= &(ssl->s3->read_mac_secret[0]);
seq= &(ssl->s3->read_sequence[0]);
hash=ssl->read_hash;
}
md_size=EVP_MD_size(hash);
npad=(48/md_size)*md_size;
/* Chop the digest off the end :-) */
EVP_MD_CTX_init(&md_ctx);
EVP_DigestInit_ex( &md_ctx,hash, NULL);
EVP_DigestUpdate(&md_ctx,mac_sec,md_size);
EVP_DigestUpdate(&md_ctx,ssl3_pad_1,npad);
EVP_DigestUpdate(&md_ctx,seq,8);
rec_char=rec->type;
EVP_DigestUpdate(&md_ctx,&rec_char,1);
p=md;
s2n(rec->length,p);
EVP_DigestUpdate(&md_ctx,md,2);
EVP_DigestUpdate(&md_ctx,rec->input,rec->length);
EVP_DigestFinal_ex( &md_ctx,md,NULL);
EVP_DigestInit_ex( &md_ctx,hash, NULL);
EVP_DigestUpdate(&md_ctx,mac_sec,md_size);
EVP_DigestUpdate(&md_ctx,ssl3_pad_2,npad);
EVP_DigestUpdate(&md_ctx,md,md_size);
EVP_DigestFinal_ex( &md_ctx,md,&md_size);
EVP_MD_CTX_cleanup(&md_ctx);
ssl3_record_sequence_update(seq);
return(md_size);
}
|
Vulnerable
|
[
"CWE-310"
] |
openssl
|
35a65e814beb899fa1c69a7673a8956c6059dce7
|
3.384017493820774e+38
| 54 |
Make CBC decoding constant time.
This patch makes the decoding of SSLv3 and TLS CBC records constant
time. Without this, a timing side-channel can be used to build a padding
oracle and mount Vaudenay's attack.
This patch also disables the stitched AESNI+SHA mode pending a similar
fix to that code.
In order to be easy to backport, this change is implemented in ssl/,
rather than as a generic AEAD mode. In the future this should be changed
around so that HMAC isn't in ssl/, but crypto/ as FIPS expects.
(cherry picked from commit e130841bccfc0bb9da254dc84e23bc6a1c78a64e)
Conflicts:
crypto/evp/c_allc.c
ssl/ssl_algs.c
ssl/ssl_locl.h
ssl/t1_enc.c
(cherry picked from commit 3622239826698a0e534dcf0473204c724bb9b4b4)
Conflicts:
ssl/d1_enc.c
ssl/s3_enc.c
ssl/s3_pkt.c
ssl/ssl3.h
ssl/ssl_algs.c
ssl/t1_enc.c
| 1 |
const SSL_CIPHER *SSL_get_current_cipher(const SSL *s)
{
if ((s->session != NULL) && (s->session->cipher != NULL))
return(s->session->cipher);
return(NULL);
}
|
Safe
|
[] |
openssl
|
ee2ffc279417f15fef3b1073c7dc81a908991516
|
3.2060312952855804e+38
| 6 |
Add Next Protocol Negotiation.
| 0 |
static int hidpp_touchpad_fw_items_set(struct hidpp_device *hidpp,
u8 feature_index,
struct hidpp_touchpad_fw_items *items)
{
struct hidpp_report response;
int ret;
u8 *params = (u8 *)response.fap.params;
ret = hidpp_send_fap_command_sync(hidpp, feature_index,
CMD_TOUCHPAD_FW_ITEMS_SET, &items->state, 1, &response);
if (ret > 0) {
hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
__func__, ret);
return -EPROTO;
}
if (ret)
return ret;
items->presence = params[0];
items->desired_state = params[1];
items->state = params[2];
items->persistent = params[3];
return 0;
}
|
Safe
|
[
"CWE-787"
] |
linux
|
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
|
2.633539995322667e+38
| 26 |
HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: syzbot+403741a091bf41d4ae79@syzkaller.appspotmail.com
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
CC: <stable@vger.kernel.org>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
| 0 |
//! Test if image height is equal to specified value.
bool is_sameY(const unsigned int size_y) const {
return _height==size_y;
|
Safe
|
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
|
3.3103644532450965e+38
| 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
| 0 |
int megasas_update_device_list(struct megasas_instance *instance,
int event_type)
{
int dcmd_ret = DCMD_SUCCESS;
if (instance->enable_fw_dev_list) {
dcmd_ret = megasas_host_device_list_query(instance, false);
if (dcmd_ret != DCMD_SUCCESS)
goto out;
} else {
if (event_type & SCAN_PD_CHANNEL) {
dcmd_ret = megasas_get_pd_list(instance);
if (dcmd_ret != DCMD_SUCCESS)
goto out;
}
if (event_type & SCAN_VD_CHANNEL) {
if (!instance->requestorId ||
(instance->requestorId &&
megasas_get_ld_vf_affiliation(instance, 0))) {
dcmd_ret = megasas_ld_list_query(instance,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
if (dcmd_ret != DCMD_SUCCESS)
goto out;
}
}
}
out:
return dcmd_ret;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
bcf3b67d16a4c8ffae0aa79de5853435e683945c
|
1.3356856860117993e+38
| 32 |
scsi: megaraid_sas: return error when create DMA pool failed
when create DMA pool for cmd frames failed, we should return -ENOMEM,
instead of 0.
In some case in:
megasas_init_adapter_fusion()
-->megasas_alloc_cmds()
-->megasas_create_frame_pool
create DMA pool failed,
--> megasas_free_cmds() [1]
-->megasas_alloc_cmds_fusion()
failed, then goto fail_alloc_cmds.
-->megasas_free_cmds() [2]
we will call megasas_free_cmds twice, [1] will kfree cmd_list,
[2] will use cmd_list.it will cause a problem:
Unable to handle kernel NULL pointer dereference at virtual address
00000000
pgd = ffffffc000f70000
[00000000] *pgd=0000001fbf893003, *pud=0000001fbf893003,
*pmd=0000001fbf894003, *pte=006000006d000707
Internal error: Oops: 96000005 [#1] SMP
Modules linked in:
CPU: 18 PID: 1 Comm: swapper/0 Not tainted
task: ffffffdfb9290000 ti: ffffffdfb923c000 task.ti: ffffffdfb923c000
PC is at megasas_free_cmds+0x30/0x70
LR is at megasas_free_cmds+0x24/0x70
...
Call trace:
[<ffffffc0005b779c>] megasas_free_cmds+0x30/0x70
[<ffffffc0005bca74>] megasas_init_adapter_fusion+0x2f4/0x4d8
[<ffffffc0005b926c>] megasas_init_fw+0x2dc/0x760
[<ffffffc0005b9ab0>] megasas_probe_one+0x3c0/0xcd8
[<ffffffc0004a5abc>] local_pci_probe+0x4c/0xb4
[<ffffffc0004a5c40>] pci_device_probe+0x11c/0x14c
[<ffffffc00053a5e4>] driver_probe_device+0x1ec/0x430
[<ffffffc00053a92c>] __driver_attach+0xa8/0xb0
[<ffffffc000538178>] bus_for_each_dev+0x74/0xc8
[<ffffffc000539e88>] driver_attach+0x28/0x34
[<ffffffc000539a18>] bus_add_driver+0x16c/0x248
[<ffffffc00053b234>] driver_register+0x6c/0x138
[<ffffffc0004a5350>] __pci_register_driver+0x5c/0x6c
[<ffffffc000ce3868>] megasas_init+0xc0/0x1a8
[<ffffffc000082a58>] do_one_initcall+0xe8/0x1ec
[<ffffffc000ca7be8>] kernel_init_freeable+0x1c8/0x284
[<ffffffc0008d90b8>] kernel_init+0x1c/0xe4
Signed-off-by: Jason Yan <yanaijie@huawei.com>
Acked-by: Sumit Saxena <sumit.saxena@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
| 0 |
guard_has_descriptor(const entry_guard_t *guard)
{
const node_t *node = node_get_by_id(guard->identity);
if (!node)
return 0;
return node_has_descriptor(node);
}
|
Safe
|
[
"CWE-200"
] |
tor
|
665baf5ed5c6186d973c46cdea165c0548027350
|
1.6136454247891614e+38
| 7 |
Consider the exit family when applying guard restrictions.
When the new path selection logic went into place, I accidentally
dropped the code that considered the _family_ of the exit node when
deciding if the guard was usable, and we didn't catch that during
code review.
This patch makes the guard_restriction_t code consider the exit
family as well, and adds some (hopefully redundant) checks for the
case where we lack a node_t for a guard but we have a bridge_info_t
for it.
Fixes bug 22753; bugfix on 0.3.0.1-alpha. Tracked as TROVE-2016-006
and CVE-2017-0377.
| 0 |
static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr=msg->msg_name;
struct sock *other = NULL;
int namelen = 0; /* fake GCC */
int err;
unsigned hash;
struct sk_buff *skb;
long timeo;
struct scm_cookie tmp_scm;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
wait_for_unix_gc();
err = scm_send(sock, msg, siocb->scm);
if (err < 0)
return err;
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out;
if (msg->msg_namelen) {
err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
if (err < 0)
goto out;
namelen = err;
} else {
sunaddr = NULL;
err = -ENOTCONN;
other = unix_peer_get(sk);
if (!other)
goto out;
}
if (test_bit(SOCK_PASSCRED, &sock->flags)
&& !u->addr && (err = unix_autobind(sock)) != 0)
goto out;
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
if (skb==NULL)
goto out;
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
if (siocb->scm->fp) {
err = unix_attach_fds(siocb->scm, skb);
if (err)
goto out_free;
}
unix_get_secdata(siocb->scm, skb);
skb_reset_transport_header(skb);
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
if (err)
goto out_free;
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
restart:
if (!other) {
err = -ECONNRESET;
if (sunaddr == NULL)
goto out_free;
other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
hash, &err);
if (other==NULL)
goto out_free;
}
unix_state_lock(other);
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
if (sock_flag(other, SOCK_DEAD)) {
/*
* Check with 1003.1g - what should
* datagram error
*/
unix_state_unlock(other);
sock_put(other);
err = 0;
unix_state_lock(sk);
if (unix_peer(sk) == other) {
unix_peer(sk)=NULL;
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
} else {
unix_state_unlock(sk);
}
other = NULL;
if (err)
goto out_free;
goto restart;
}
err = -EPIPE;
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
if (sk->sk_type != SOCK_SEQPACKET) {
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
}
if (unix_peer(other) != sk && unix_recvq_full(other)) {
if (!timeo) {
err = -EAGAIN;
goto out_unlock;
}
timeo = unix_wait_for_peer(other, timeo);
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out_free;
goto restart;
}
skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other, len);
sock_put(other);
scm_destroy(siocb->scm);
return len;
out_unlock:
unix_state_unlock(other);
out_free:
kfree_skb(skb);
out:
if (other)
sock_put(other);
scm_destroy(siocb->scm);
return err;
}
|
Safe
|
[
"CWE-399"
] |
linux-2.6
|
5f23b734963ec7eaa3ebcd9050da0c9b7d143dd3
|
1.2548360865035309e+38
| 153 |
net: Fix soft lockups/OOM issues w/ unix garbage collector
This is an implementation of David Miller's suggested fix in:
https://bugzilla.redhat.com/show_bug.cgi?id=470201
It has been updated to use wait_event() instead of
wait_event_interruptible().
Paraphrasing the description from the above report, it makes sendmsg()
block while UNIX garbage collection is in progress. This avoids a
situation where child processes continue to queue new FDs over a
AF_UNIX socket to a parent which is in the exit path and running
garbage collection on these FDs. This contention can result in soft
lockups and oom-killing of unrelated processes.
Signed-off-by: dann frazier <dannf@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void dump_data_string(FILE *trace, char *data, u32 dataLength)
{
u32 i;
for (i=0; i<dataLength; i++) {
switch ((unsigned char) data[i]) {
case '\'':
gf_fprintf(trace, "'");
break;
case '\"':
gf_fprintf(trace, """);
break;
case '&':
gf_fprintf(trace, "&");
break;
case '>':
gf_fprintf(trace, ">");
break;
case '<':
gf_fprintf(trace, "<");
break;
default:
gf_fprintf(trace, "%c", (u8) data[i]);
break;
}
}
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
|
1.2939397118797363e+38
| 26 |
fixed #2138
| 0 |
static inline unsigned short SinglePrecisionToHalf(const float value)
{
typedef union _SinglePrecision
{
unsigned int
fixed_point;
float
single_precision;
} SinglePrecision;
register int
exponent;
register unsigned int
significand,
sign_bit;
SinglePrecision
map;
unsigned short
half;
/*
The IEEE 754 standard specifies half precision as having:
Sign bit: 1 bit
Exponent width: 5 bits
Significand precision: 11 (10 explicitly stored)
*/
map.single_precision=value;
sign_bit=(map.fixed_point >> 16) & 0x00008000;
exponent=(int) ((map.fixed_point >> ExponentShift) & 0x000000ff)-ExponentBias;
significand=map.fixed_point & 0x007fffff;
if (exponent <= 0)
{
int
shift;
if (exponent < -10)
return((unsigned short) sign_bit);
significand=significand | 0x00800000;
shift=(int) (14-exponent);
significand=(unsigned int) ((significand+((1 << (shift-1))-1)+
((significand >> shift) & 0x01)) >> shift);
return((unsigned short) (sign_bit | significand));
}
else
if (exponent == (0xff-ExponentBias))
{
if (significand == 0)
return((unsigned short) (sign_bit | ExponentMask));
else
{
significand>>=SignificandShift;
half=(unsigned short) (sign_bit | significand |
(significand == 0) | ExponentMask);
return(half);
}
}
significand=significand+((significand >> SignificandShift) & 0x01)+0x00000fff;
if ((significand & 0x00800000) != 0)
{
significand=0;
exponent++;
}
if (exponent > 30)
{
float
alpha;
register int
i;
/*
Float overflow.
*/
alpha=1.0e10;
for (i=0; i < 10; i++)
alpha*=alpha;
return((unsigned short) (sign_bit | ExponentMask));
}
half=(unsigned short) (sign_bit | (exponent << 10) |
(significand >> SignificandShift));
return(half);
}
|
Safe
|
[
"CWE-190"
] |
ImageMagick
|
f60d59cc3a7e3402d403361e0985ffa56f746a82
|
3.4526097946053947e+37
| 87 |
https://github.com/ImageMagick/ImageMagick/issues/1727
| 0 |
copy_viminfo_marks(
vir_T *virp,
FILE *fp_out,
garray_T *buflist,
int eof,
int flags)
{
char_u *line = virp->vir_line;
buf_T *buf;
int num_marked_files;
int load_marks;
int copy_marks_out;
char_u *str;
int i;
char_u *p;
char_u *name_buf;
pos_T pos;
#ifdef FEAT_EVAL
list_T *list = NULL;
#endif
int count = 0;
int buflist_used = 0;
buf_T *buflist_buf = NULL;
if ((name_buf = alloc(LSIZE)) == NULL)
return;
*name_buf = NUL;
if (fp_out != NULL && buflist->ga_len > 0)
{
// Sort the list of buffers on b_last_used.
qsort(buflist->ga_data, (size_t)buflist->ga_len,
sizeof(buf_T *), buf_compare);
buflist_buf = ((buf_T **)buflist->ga_data)[0];
}
#ifdef FEAT_EVAL
if (fp_out == NULL && (flags & (VIF_GET_OLDFILES | VIF_FORCEIT)))
{
list = list_alloc();
if (list != NULL)
set_vim_var_list(VV_OLDFILES, list);
}
#endif
num_marked_files = get_viminfo_parameter('\'');
while (!eof && (count < num_marked_files || fp_out == NULL))
{
if (line[0] != '>')
{
if (line[0] != '\n' && line[0] != '\r' && line[0] != '#')
{
if (viminfo_error("E576: ", _(e_nonr_missing_gt), line))
break; // too many errors, return now
}
eof = vim_fgets(line, LSIZE, virp->vir_fd);
continue; // Skip this dud line
}
// Handle long line and translate escaped characters.
// Find file name, set str to start.
// Ignore leading and trailing white space.
str = skipwhite(line + 1);
str = viminfo_readstring(virp, (int)(str - virp->vir_line), FALSE);
if (str == NULL)
continue;
p = str + STRLEN(str);
while (p != str && (*p == NUL || vim_isspace(*p)))
p--;
if (*p)
p++;
*p = NUL;
#ifdef FEAT_EVAL
if (list != NULL)
list_append_string(list, str, -1);
#endif
// If fp_out == NULL, load marks for current buffer.
// If fp_out != NULL, copy marks for buffers not in buflist.
load_marks = copy_marks_out = FALSE;
if (fp_out == NULL)
{
if ((flags & VIF_WANT_MARKS) && curbuf->b_ffname != NULL)
{
if (*name_buf == NUL) // only need to do this once
home_replace(NULL, curbuf->b_ffname, name_buf, LSIZE, TRUE);
if (fnamecmp(str, name_buf) == 0)
load_marks = TRUE;
}
}
else // fp_out != NULL
{
// This is slow if there are many buffers!!
FOR_ALL_BUFFERS(buf)
if (buf->b_ffname != NULL)
{
home_replace(NULL, buf->b_ffname, name_buf, LSIZE, TRUE);
if (fnamecmp(str, name_buf) == 0)
break;
}
// Copy marks if the buffer has not been loaded.
if (buf == NULL || !buf->b_marks_read)
{
int did_read_line = FALSE;
if (buflist_buf != NULL)
{
// Read the next line. If it has the "*" mark compare the
// time stamps. Write entries from "buflist" that are
// newer.
if (!(eof = viminfo_readline(virp)) && line[0] == TAB)
{
did_read_line = TRUE;
if (line[1] == '*')
{
long ltime;
sscanf((char *)line + 2, "%ld ", <ime);
while ((time_T)ltime < buflist_buf->b_last_used)
{
write_buffer_marks(buflist_buf, fp_out);
if (++count >= num_marked_files)
break;
if (++buflist_used == buflist->ga_len)
{
buflist_buf = NULL;
break;
}
buflist_buf =
((buf_T **)buflist->ga_data)[buflist_used];
}
}
else
{
// No timestamp, must be written by an older Vim.
// Assume all remaining buffers are older than
// ours.
while (count < num_marked_files
&& buflist_used < buflist->ga_len)
{
buflist_buf = ((buf_T **)buflist->ga_data)
[buflist_used++];
write_buffer_marks(buflist_buf, fp_out);
++count;
}
buflist_buf = NULL;
}
if (count >= num_marked_files)
{
vim_free(str);
break;
}
}
}
fputs("\n> ", fp_out);
viminfo_writestring(fp_out, str);
if (did_read_line)
fputs((char *)line, fp_out);
count++;
copy_marks_out = TRUE;
}
}
vim_free(str);
pos.coladd = 0;
while (!(eof = viminfo_readline(virp)) && line[0] == TAB)
{
if (load_marks)
{
if (line[1] != NUL)
{
unsigned u;
sscanf((char *)line + 2, "%ld %u", &pos.lnum, &u);
pos.col = u;
switch (line[1])
{
case '"': curbuf->b_last_cursor = pos; break;
case '^': curbuf->b_last_insert = pos; break;
case '.': curbuf->b_last_change = pos; break;
case '+':
// changelist positions are stored oldest
// first
if (curbuf->b_changelistlen == JUMPLISTSIZE)
// list is full, remove oldest entry
mch_memmove(curbuf->b_changelist,
curbuf->b_changelist + 1,
sizeof(pos_T) * (JUMPLISTSIZE - 1));
else
++curbuf->b_changelistlen;
curbuf->b_changelist[
curbuf->b_changelistlen - 1] = pos;
break;
// Using the line number for the last-used
// timestamp.
case '*': curbuf->b_last_used = pos.lnum; break;
default: if ((i = line[1] - 'a') >= 0 && i < NMARKS)
curbuf->b_namedm[i] = pos;
}
}
}
else if (copy_marks_out)
fputs((char *)line, fp_out);
}
if (load_marks)
{
win_T *wp;
FOR_ALL_WINDOWS(wp)
{
if (wp->w_buffer == curbuf)
wp->w_changelistidx = curbuf->b_changelistlen;
}
if (flags & VIF_ONLY_CURBUF)
break;
}
}
if (fp_out != NULL)
// Write any remaining entries from buflist.
while (count < num_marked_files && buflist_used < buflist->ga_len)
{
buflist_buf = ((buf_T **)buflist->ga_data)[buflist_used++];
write_buffer_marks(buflist_buf, fp_out);
++count;
}
vim_free(name_buf);
}
|
Safe
|
[
"CWE-416"
] |
vim
|
9f1a39a5d1cd7989ada2d1cb32f97d84360e050f
|
2.0431202311137823e+38
| 237 |
patch 8.2.4040: keeping track of allocated lines is too complicated
Problem: Keeping track of allocated lines in user functions is too
complicated.
Solution: Instead of freeing individual lines keep them all until the end.
| 0 |
_XimEncodingNegotiation(
Xim im)
{
char *name_ptr = 0;
int name_len = 0;
char *detail_ptr = 0;
int detail_len = 0;
CARD8 *buf;
CARD16 *buf_s;
INT16 len;
CARD32 reply32[BUFSIZE/4];
char *reply = (char *)reply32;
XPointer preply;
int buf_size;
int ret_code;
if (!(_XimSetEncodingByName(im, &name_ptr, &name_len)))
return False;
if (!(_XimSetEncodingByDetail(im, &detail_ptr, &detail_len)))
goto free_name_ptr;
len = sizeof(CARD16)
+ sizeof(INT16)
+ name_len
+ XIM_PAD(name_len)
+ sizeof(INT16)
+ sizeof(CARD16)
+ detail_len;
if (!(buf = Xcalloc(XIM_HEADER_SIZE + len, 1)))
goto free_detail_ptr;
buf_s = (CARD16 *)&buf[XIM_HEADER_SIZE];
buf_s[0] = im->private.proto.imid;
buf_s[1] = (INT16)name_len;
if (name_ptr)
(void)memcpy((char *)&buf_s[2], name_ptr, name_len);
XIM_SET_PAD(&buf_s[2], name_len);
buf_s = (CARD16 *)((char *)&buf_s[2] + name_len);
buf_s[0] = detail_len;
buf_s[1] = 0;
if (detail_ptr)
(void)memcpy((char *)&buf_s[2], detail_ptr, detail_len);
_XimSetHeader((XPointer)buf, XIM_ENCODING_NEGOTIATION, 0, &len);
if (!(_XimWrite(im, len, (XPointer)buf))) {
Xfree(buf);
goto free_detail_ptr;
}
_XimFlush(im);
Xfree(buf);
buf_size = BUFSIZE;
ret_code = _XimRead(im, &len, (XPointer)reply, buf_size,
_XimEncodingNegoCheck, 0);
if(ret_code == XIM_TRUE) {
preply = reply;
} else if(ret_code == XIM_OVERFLOW) {
if(len <= 0) {
preply = reply;
} else {
buf_size = len;
preply = Xmalloc(buf_size);
ret_code = _XimRead(im, &len, preply, buf_size,
_XimEncodingNegoCheck, 0);
if(ret_code != XIM_TRUE)
goto free_preply;
}
} else
goto free_detail_ptr;
buf_s = (CARD16 *)((char *)preply + XIM_HEADER_SIZE);
if (*((CARD8 *)preply) == XIM_ERROR) {
_XimProcError(im, 0, (XPointer)&buf_s[3]);
goto free_preply;
}
if (!(_XimGetEncoding(im, &buf_s[1], name_ptr, name_len,
detail_ptr, detail_len)))
goto free_preply;
Xfree(name_ptr);
Xfree(detail_ptr);
if(reply != preply)
Xfree(preply);
return True;
free_preply:
if (reply != preply)
Xfree(preply);
free_detail_ptr:
Xfree(detail_ptr);
free_name_ptr:
Xfree(name_ptr);
return False;
}
|
Safe
|
[
"CWE-190"
] |
libx11
|
1a566c9e00e5f35c1f9e7f3d741a02e5170852b2
|
2.7801979818295628e+38
| 101 |
Zero out buffers in functions
It looks like uninitialized stack or heap memory can leak
out via padding bytes.
Signed-off-by: Matthieu Herrb <matthieu@herrb.eu>
Reviewed-by: Matthieu Herrb <matthieu@herrb.eu>
| 0 |
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
{
int ret = -1;
uint8_t *host_startaddr = rb->host + start;
if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) {
error_report("ram_block_discard_range: Unaligned start address: %p",
host_startaddr);
goto err;
}
if ((start + length) <= rb->used_length) {
bool need_madvise, need_fallocate;
if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
error_report("ram_block_discard_range: Unaligned length: %zx",
length);
goto err;
}
errno = ENOTSUP; /* If we are missing MADVISE etc */
/* The logic here is messy;
* madvise DONTNEED fails for hugepages
* fallocate works on hugepages and shmem
*/
need_madvise = (rb->page_size == qemu_host_page_size);
need_fallocate = rb->fd != -1;
if (need_fallocate) {
/* For a file, this causes the area of the file to be zero'd
* if read, and for hugetlbfs also causes it to be unmapped
* so a userfault will trigger.
*/
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start, length);
if (ret) {
ret = -errno;
error_report("ram_block_discard_range: Failed to fallocate "
"%s:%" PRIx64 " +%zx (%d)",
rb->idstr, start, length, ret);
goto err;
}
#else
ret = -ENOSYS;
error_report("ram_block_discard_range: fallocate not available/file"
"%s:%" PRIx64 " +%zx (%d)",
rb->idstr, start, length, ret);
goto err;
#endif
}
if (need_madvise) {
/* For normal RAM this causes it to be unmapped,
* for shared memory it causes the local mapping to disappear
* and to fall back on the file contents (which we just
* fallocate'd away).
*/
#if defined(CONFIG_MADVISE)
ret = madvise(host_startaddr, length, MADV_DONTNEED);
if (ret) {
ret = -errno;
error_report("ram_block_discard_range: Failed to discard range "
"%s:%" PRIx64 " +%zx (%d)",
rb->idstr, start, length, ret);
goto err;
}
#else
ret = -ENOSYS;
error_report("ram_block_discard_range: MADVISE not available"
"%s:%" PRIx64 " +%zx (%d)",
rb->idstr, start, length, ret);
goto err;
#endif
}
trace_ram_block_discard_range(rb->idstr, host_startaddr, length,
need_madvise, need_fallocate, ret);
} else {
error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
"/%zx/" RAM_ADDR_FMT")",
rb->idstr, start, length, rb->used_length);
}
err:
return ret;
}
|
Safe
|
[
"CWE-787"
] |
qemu
|
4bfb024bc76973d40a359476dc0291f46e435442
|
2.214630803319175e+38
| 85 |
memory: clamp cached translation in case it points to an MMIO region
In using the address_space_translate_internal API, address_space_cache_init
forgot one piece of advice that can be found in the code for
address_space_translate_internal:
/* MMIO registers can be expected to perform full-width accesses based only
* on their address, without considering adjacent registers that could
* decode to completely different MemoryRegions. When such registers
* exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
* regions overlap wildly. For this reason we cannot clamp the accesses
* here.
*
* If the length is small (as is the case for address_space_ldl/stl),
* everything works fine. If the incoming length is large, however,
* the caller really has to do the clamping through memory_access_size.
*/
address_space_cache_init is exactly one such case where "the incoming length
is large", therefore we need to clamp the resulting length---not to
memory_access_size though, since we are not doing an access yet, but to
the size of the resulting section. This ensures that subsequent accesses
to the cached MemoryRegionSection will be in range.
With this patch, the enclosed testcase notices that the used ring does
not fit into the MSI-X table and prints a "qemu-system-x86_64: Cannot map used"
error.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
void HierarchicalBitmapRequester::DefineRegion(LONG x,const struct Line *const *line,
const LONG *buffer,UBYTE comp)
{
int cnt = 8;
assert(comp < m_ucCount);
NOREF(comp);
x <<= 3;
do {
if (*line) memcpy((*line)->m_pData + x,buffer,8 * sizeof(LONG));
buffer += 8;
line++;
} while(--cnt);
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
libjpeg
|
187035b9726710b4fe11d565c7808975c930895d
|
1.0164498788824262e+38
| 16 |
The code now checks for consistency of the MCU sizes across
hierarchical levels, and fails in case they are different.
| 0 |
TEST(ArrayOpsTest, BatchToSpaceND_ShapeFn) {
ShapeInferenceTestOp op("BatchToSpaceND");
op.input_tensors.resize(3);
TF_ASSERT_OK(NodeDefBuilder("test", "BatchToSpaceND")
.Input("input", 0, DT_FLOAT)
.Input("block_shape", 1, DT_INT32)
.Input("crops", 2, DT_INT32)
.Finalize(&op.node_def));
// Verify that input shape and crops shape can be unknown.
INFER_OK(op, "?;[2];?", "?");
// Only number of input dimensions is known.
INFER_OK(op, "[?,?,?,?];[2];?", "[?,?,?,d0_3]");
{
// Dimensions are partially known, block_shape known.
Tensor block_shape = test::AsTensor<int32>({2, 3});
op.input_tensors[1] = &block_shape;
INFER_OK(op, "[?,?,?,2];[2];?", "[?,?,?,d0_3]");
INFER_OK(op, "[18,?,?,2];[2];?", "[3,?,?,d0_3]");
// Dimensions are partially known, block_shape and crops known.
{
Tensor crops = test::AsTensor<int32>({1, 1, 0, 1}, {{2, 2}});
op.input_tensors[2] = &crops;
INFER_OK(op, "[18,?,2,2];[2];[2,2]", "[3,?,5,d0_3]");
op.input_tensors[2] = nullptr;
}
// Dimensions are fully known, block_shape and crops are known.
{
Tensor crops = test::AsTensor<int32>({1, 1, 0, 0}, {{2, 2}});
op.input_tensors[2] = &crops;
INFER_OK(op, "[18,2,1,2];[2];[2,2]", "[3,2,3,d0_3]");
op.input_tensors[2] = nullptr;
}
op.input_tensors[1] = nullptr;
}
INFER_ERROR("block_shape must have rank 1", op, "?;[1,1];?");
INFER_ERROR("block_shape must have known size", op, "?;[?];?");
INFER_ERROR("rank", op, "[2,2];[2];[2,2]");
INFER_ERROR("rank", op, "[2,2,3];[3];[3,2]");
{
Tensor block_shape = test::AsTensor<int32>({0, 2});
op.input_tensors[1] = &block_shape;
INFER_ERROR("block_shape must be positive", op, "[1,2,2];[2];[2,2]");
op.input_tensors[1] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({1, 1});
op.input_tensors[1] = &block_shape;
Tensor paddings = test::AsTensor<int32>({0, -1, 0, 0}, {{2, 2}});
op.input_tensors[2] = &paddings;
INFER_ERROR("crops cannot be negative", op, "[1,2,2];[2];[2,2]");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
// The amount to crop exceeds the padded size.
{
Tensor block_shape = test::AsTensor<int32>({2, 2});
op.input_tensors[1] = &block_shape;
Tensor crops = test::AsTensor<int32>({3, 2, 0, 0}, {{2, 2}});
op.input_tensors[2] = &crops;
INFER_ERROR("Negative", op, "[4,2,3,1];[2];[2,2]");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
// The batch size is not divisible by the product of the block_shape.
{
Tensor block_shape = test::AsTensor<int32>({2, 3});
op.input_tensors[1] = &block_shape;
INFER_ERROR("divisible", op, "[3,1,1,1];[2];[2,2]");
op.input_tensors[1] = nullptr;
}
}
|
Safe
|
[
"CWE-125"
] |
tensorflow
|
7cf73a2274732c9d82af51c2bc2cf90d13cd7e6d
|
2.8996556788752652e+38
| 83 |
Address QuantizeAndDequantizeV* heap oob. Added additional checks for the 'axis' attribute.
PiperOrigin-RevId: 402446942
Change-Id: Id2f6b82e4e740d0550329be02621c46466b5a5b9
| 0 |
static VncServerInfo2List *qmp_query_server_entry(QIOChannelSocket *ioc,
bool websocket,
int auth,
int subauth,
VncServerInfo2List *prev)
{
VncServerInfo2List *list;
VncServerInfo2 *info;
Error *err = NULL;
SocketAddress *addr;
addr = qio_channel_socket_get_local_address(ioc, &err);
if (!addr) {
error_free(err);
return prev;
}
info = g_new0(VncServerInfo2, 1);
vnc_init_basic_info(addr, qapi_VncServerInfo2_base(info), &err);
qapi_free_SocketAddress(addr);
if (err) {
qapi_free_VncServerInfo2(info);
error_free(err);
return prev;
}
info->websocket = websocket;
qmp_query_auth(auth, subauth, &info->auth,
&info->vencrypt, &info->has_vencrypt);
list = g_new0(VncServerInfo2List, 1);
list->value = info;
list->next = prev;
return list;
}
|
Safe
|
[
"CWE-401"
] |
qemu
|
6bf21f3d83e95bcc4ba35a7a07cc6655e8b010b0
|
3.0932178291255966e+38
| 35 |
vnc: fix memory leak when vnc disconnect
Currently when qemu receives a vnc connect, it creates a 'VncState' to
represent this connection. In 'vnc_worker_thread_loop' it creates a
local 'VncState'. The connection 'VcnState' and local 'VncState' exchange
data in 'vnc_async_encoding_start' and 'vnc_async_encoding_end'.
In 'zrle_compress_data' it calls 'deflateInit2' to allocate the libz library
opaque data. The 'VncState' used in 'zrle_compress_data' is the local
'VncState'. In 'vnc_zrle_clear' it calls 'deflateEnd' to free the libz
library opaque data. The 'VncState' used in 'vnc_zrle_clear' is the connection
'VncState'. In currently implementation there will be a memory leak when the
vnc disconnect. Following is the asan output backtrack:
Direct leak of 29760 byte(s) in 5 object(s) allocated from:
0 0xffffa67ef3c3 in __interceptor_calloc (/lib64/libasan.so.4+0xd33c3)
1 0xffffa65071cb in g_malloc0 (/lib64/libglib-2.0.so.0+0x571cb)
2 0xffffa5e968f7 in deflateInit2_ (/lib64/libz.so.1+0x78f7)
3 0xaaaacec58613 in zrle_compress_data ui/vnc-enc-zrle.c:87
4 0xaaaacec58613 in zrle_send_framebuffer_update ui/vnc-enc-zrle.c:344
5 0xaaaacec34e77 in vnc_send_framebuffer_update ui/vnc.c:919
6 0xaaaacec5e023 in vnc_worker_thread_loop ui/vnc-jobs.c:271
7 0xaaaacec5e5e7 in vnc_worker_thread ui/vnc-jobs.c:340
8 0xaaaacee4d3c3 in qemu_thread_start util/qemu-thread-posix.c:502
9 0xffffa544e8bb in start_thread (/lib64/libpthread.so.0+0x78bb)
10 0xffffa53965cb in thread_start (/lib64/libc.so.6+0xd55cb)
This is because the opaque allocated in 'deflateInit2' is not freed in
'deflateEnd'. The reason is that the 'deflateEnd' calls 'deflateStateCheck'
and in the latter will check whether 's->strm != strm'(libz's data structure).
This check will be true so in 'deflateEnd' it just return 'Z_STREAM_ERROR' and
not free the data allocated in 'deflateInit2'.
The reason this happens is that the 'VncState' contains the whole 'VncZrle',
so when calling 'deflateInit2', the 's->strm' will be the local address.
So 's->strm != strm' will be true.
To fix this issue, we need to make 'zrle' of 'VncState' to be a pointer.
Then the connection 'VncState' and local 'VncState' exchange mechanism will
work as expection. The 'tight' of 'VncState' has the same issue, let's also turn
it to a pointer.
Reported-by: Ying Fang <fangying1@huawei.com>
Signed-off-by: Li Qiang <liq3ea@163.com>
Message-id: 20190831153922.121308-1-liq3ea@163.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
| 0 |
static Image *ReadICONImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
IconFile
icon_file;
IconInfo
icon_info;
Image
*image;
MagickBooleanType
status;
register ssize_t
i,
x;
register Quantum
*q;
register unsigned char
*p;
size_t
bit,
byte,
bytes_per_line,
one,
scanline_pad;
ssize_t
count,
offset,
y;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"%s",image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
icon_file.reserved=(short) ReadBlobLSBShort(image);
icon_file.resource_type=(short) ReadBlobLSBShort(image);
icon_file.count=(short) ReadBlobLSBShort(image);
if ((icon_file.reserved != 0) ||
((icon_file.resource_type != 1) && (icon_file.resource_type != 2)) ||
(icon_file.count > MaxIcons))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
for (i=0; i < icon_file.count; i++)
{
icon_file.directory[i].width=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].height=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].colors=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].reserved=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].planes=(unsigned short) ReadBlobLSBShort(image);
icon_file.directory[i].bits_per_pixel=(unsigned short)
ReadBlobLSBShort(image);
icon_file.directory[i].size=ReadBlobLSBLong(image);
icon_file.directory[i].offset=ReadBlobLSBLong(image);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
}
one=1;
for (i=0; i < icon_file.count; i++)
{
/*
Verify Icon identifier.
*/
offset=(ssize_t) SeekBlob(image,(MagickOffsetType)
icon_file.directory[i].offset,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
icon_info.size=ReadBlobLSBLong(image);
icon_info.width=(unsigned char) ((int) ReadBlobLSBLong(image));
icon_info.height=(unsigned char) ((int) ReadBlobLSBLong(image)/2);
icon_info.planes=ReadBlobLSBShort(image);
icon_info.bits_per_pixel=ReadBlobLSBShort(image);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
if (((icon_info.planes == 18505) && (icon_info.bits_per_pixel == 21060)) ||
(icon_info.size == 0x474e5089))
{
Image
*icon_image;
ImageInfo
*read_info;
size_t
length;
unsigned char
*png;
/*
Icon image encoded as a compressed PNG image.
*/
length=icon_file.directory[i].size;
png=(unsigned char *) AcquireQuantumMemory(length+16,sizeof(*png));
if (png == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) CopyMagickMemory(png,"\211PNG\r\n\032\n\000\000\000\015",12);
png[12]=(unsigned char) icon_info.planes;
png[13]=(unsigned char) (icon_info.planes >> 8);
png[14]=(unsigned char) icon_info.bits_per_pixel;
png[15]=(unsigned char) (icon_info.bits_per_pixel >> 8);
count=ReadBlob(image,length-16,png+16);
icon_image=(Image *) NULL;
if (count > 0)
{
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(read_info->magick,"PNG",MagickPathExtent);
icon_image=BlobToImage(read_info,png,length+16,exception);
read_info=DestroyImageInfo(read_info);
}
png=(unsigned char *) RelinquishMagickMemory(png);
if (icon_image == (Image *) NULL)
{
if (count != (ssize_t) (length-16))
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
image=DestroyImageList(image);
return((Image *) NULL);
}
DestroyBlob(icon_image);
icon_image->blob=ReferenceBlob(image->blob);
ReplaceImageInList(&image,icon_image);
}
else
{
if (icon_info.bits_per_pixel > 32)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
icon_info.compression=ReadBlobLSBLong(image);
icon_info.image_size=ReadBlobLSBLong(image);
icon_info.x_pixels=ReadBlobLSBLong(image);
icon_info.y_pixels=ReadBlobLSBLong(image);
icon_info.number_colors=ReadBlobLSBLong(image);
icon_info.colors_important=ReadBlobLSBLong(image);
image->alpha_trait=BlendPixelTrait;
image->columns=(size_t) icon_file.directory[i].width;
if ((ssize_t) image->columns > icon_info.width)
image->columns=(size_t) icon_info.width;
if (image->columns == 0)
image->columns=256;
image->rows=(size_t) icon_file.directory[i].height;
if ((ssize_t) image->rows > icon_info.height)
image->rows=(size_t) icon_info.height;
if (image->rows == 0)
image->rows=256;
image->depth=icon_info.bits_per_pixel;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" scene = %.20g",(double) i);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" size = %.20g",(double) icon_info.size);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" width = %.20g",(double) icon_file.directory[i].width);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" height = %.20g",(double) icon_file.directory[i].height);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" colors = %.20g",(double ) icon_info.number_colors);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" planes = %.20g",(double) icon_info.planes);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" bpp = %.20g",(double) icon_info.bits_per_pixel);
}
if ((icon_info.number_colors != 0) || (icon_info.bits_per_pixel <= 16U))
{
image->storage_class=PseudoClass;
image->colors=icon_info.number_colors;
if (image->colors == 0)
image->colors=one << icon_info.bits_per_pixel;
}
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
unsigned char
*icon_colormap;
/*
Read Icon raster colormap.
*/
if (AcquireImageColormap(image,image->colors,exception) ==
MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
icon_colormap=(unsigned char *) AcquireQuantumMemory((size_t)
image->colors,4UL*sizeof(*icon_colormap));
if (icon_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) (4*image->colors),icon_colormap);
if (count != (ssize_t) (4*image->colors))
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
p=icon_colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].blue=(Quantum) ScaleCharToQuantum(*p++);
image->colormap[i].green=(Quantum) ScaleCharToQuantum(*p++);
image->colormap[i].red=(Quantum) ScaleCharToQuantum(*p++);
p++;
}
icon_colormap=(unsigned char *) RelinquishMagickMemory(icon_colormap);
}
/*
Convert Icon raster image to pixel packets.
*/
if ((image_info->ping != MagickFalse) &&
(image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
bytes_per_line=(((image->columns*icon_info.bits_per_pixel)+31) &
~31) >> 3;
(void) bytes_per_line;
scanline_pad=((((image->columns*icon_info.bits_per_pixel)+31) & ~31)-
(image->columns*icon_info.bits_per_pixel)) >> 3;
switch (icon_info.bits_per_pixel)
{
case 1:
{
/*
Convert bitmap scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) (image->columns-7); x+=8)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < 8; bit++)
{
SetPixelIndex(image,((byte & (0x80 >> bit)) != 0 ? 0x01 :
0x00),q);
q+=GetPixelChannels(image);
}
}
if ((image->columns % 8) != 0)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < (image->columns % 8); bit++)
{
SetPixelIndex(image,((byte & (0x80 >> bit)) != 0 ? 0x01 :
0x00),q);
q+=GetPixelChannels(image);
}
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 4:
{
/*
Read 4-bit Icon scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-1); x+=2)
{
byte=(size_t) ReadBlobByte(image);
SetPixelIndex(image,((byte >> 4) & 0xf),q);
q+=GetPixelChannels(image);
SetPixelIndex(image,((byte) & 0xf),q);
q+=GetPixelChannels(image);
}
if ((image->columns % 2) != 0)
{
byte=(size_t) ReadBlobByte(image);
SetPixelIndex(image,((byte >> 4) & 0xf),q);
q+=GetPixelChannels(image);
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 8:
{
/*
Convert PseudoColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte=(size_t) ReadBlobByte(image);
SetPixelIndex(image,byte,q);
q+=GetPixelChannels(image);
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 16:
{
/*
Convert PseudoColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte=(size_t) ReadBlobByte(image);
byte|=(size_t) (ReadBlobByte(image) << 8);
SetPixelIndex(image,byte,q);
q+=GetPixelChannels(image);
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 24:
case 32:
{
/*
Convert DirectColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (icon_info.bits_per_pixel == 32)
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
q+=GetPixelChannels(image);
}
if (icon_info.bits_per_pixel == 24)
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (image_info->ping == MagickFalse)
(void) SyncImage(image,exception);
if (icon_info.bits_per_pixel != 32)
{
/*
Read the ICON alpha mask.
*/
image->storage_class=DirectClass;
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < 8; bit++)
{
SetPixelAlpha(image,(((byte & (0x80 >> bit)) != 0) ?
TransparentAlpha : OpaqueAlpha),q);
q+=GetPixelChannels(image);
}
}
if ((image->columns % 8) != 0)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < (image->columns % 8); bit++)
{
SetPixelAlpha(image,(((byte & (0x80 >> bit)) != 0) ?
TransparentAlpha : OpaqueAlpha),q);
q+=GetPixelChannels(image);
}
}
if ((image->columns % 32) != 0)
for (x=0; x < (ssize_t) ((32-(image->columns % 32))/8); x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (i < (ssize_t) (icon_file.count-1))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
|
Vulnerable
|
[
"CWE-190",
"CWE-189",
"CWE-703"
] |
ImageMagick
|
0f6fc2d5bf8f500820c3dbcf0d23ee14f2d9f734
|
1.2796815355407305e+38
| 501 | 1 |
|
int __init nfc_llcp_sock_init(void)
{
return nfc_proto_register(&llcp_nfc_proto);
}
|
Safe
|
[
"CWE-276"
] |
linux
|
3a359798b176183ef09efb7a3dc59abad1cc7104
|
2.70870617669665e+38
| 4 |
nfc: enforce CAP_NET_RAW for raw sockets
When creating a raw AF_NFC socket, CAP_NET_RAW needs to be checked
first.
Signed-off-by: Ori Nimron <orinimron123@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
MagickExport MagickRealType GetPixelInfoIntensity(
const Image *magick_restrict image,const PixelInfo *magick_restrict pixel)
{
MagickRealType
blue,
green,
red,
intensity;
PixelIntensityMethod
method;
method=Rec709LumaPixelIntensityMethod;
if (image != (const Image *) NULL)
method=image->intensity;
red=pixel->red;
green=pixel->green;
blue=pixel->blue;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+blue*blue)/
(3.0*QuantumRange));
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (pixel->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (pixel->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (pixel->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (pixel->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+blue*blue)/
sqrt(3.0));
break;
}
}
return(intensity);
}
|
Safe
|
[
"CWE-190"
] |
ImageMagick
|
406da3af9e09649cda152663c179902edf5ab3ac
|
1.2227275572497234e+38
| 96 |
https://github.com/ImageMagick/ImageMagick/issues/1732
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.