func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
void free_task(struct task_struct *tsk)
{
account_kernel_stack(tsk->stack, -1);
arch_release_thread_info(tsk->stack);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
put_seccomp_filter(tsk);
arch_release_task_struct(tsk);
free_task_struct(tsk);
}
|
Safe
|
[
"CWE-284",
"CWE-264"
] |
linux
|
e66eded8309ebf679d3d3c1f5820d1f2ca332c71
|
1.955689826265255e+38
| 11 |
userns: Don't allow CLONE_NEWUSER | CLONE_FS
Don't allowing sharing the root directory with processes in a
different user namespace. There doesn't seem to be any point, and to
allow it would require the overhead of putting a user namespace
reference in fs_struct (for permission checks) and incrementing that
reference count on practically every call to fork.
So just perform the inexpensive test of forbidding sharing fs_struct
acrosss processes in different user namespaces. We already disallow
other forms of threading when unsharing a user namespace so this
should be no real burden in practice.
This updates setns, clone, and unshare to disallow multiple user
namespaces sharing an fs_struct.
Cc: stable@vger.kernel.org
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
DEFFUNC_llExecFunc(flushRptdMsgsActions)
{
action_t *pAction = (action_t*) pData;
assert(pAction != NULL);
BEGINfunc
LockObj(pAction);
/* TODO: time() performance: the call below could be moved to
* the beginn of the llExec(). This makes it slightly less correct, but
* in an acceptable way. -- rgerhards, 2008-09-16
*/
if (pAction->f_prevcount && time(NULL) >= REPEATTIME(pAction)) {
DBGPRINTF("flush %s: repeated %d times, %d sec.\n",
module.GetStateName(pAction->pMod), pAction->f_prevcount,
repeatinterval[pAction->f_repeatcount]);
actionWriteToAction(pAction);
BACKOFF(pAction);
}
UnlockObj(pAction);
ENDfunc
return RS_RET_OK; /* we ignore errors, we can not do anything either way */
}
|
Safe
|
[
"CWE-119"
] |
rsyslog
|
1ca6cc236d1dabf1633238b873fb1c057e52f95e
|
1.5163745682416602e+38
| 23 |
bugfix: off-by-one(two) bug in legacy syslog parser
| 0 |
static int processcompl_compat(struct async *as, void __user * __user *arg)
{
struct urb *urb = as->urb;
struct usbdevfs_urb32 __user *userurb = as->userurb;
void __user *addr = as->userurb;
unsigned int i;
if (as->userbuffer && urb->actual_length) {
if (copy_urb_data_to_user(as->userbuffer, urb))
return -EFAULT;
}
if (put_user(as->status, &userurb->status))
return -EFAULT;
if (put_user(urb->actual_length, &userurb->actual_length))
return -EFAULT;
if (put_user(urb->error_count, &userurb->error_count))
return -EFAULT;
if (usb_endpoint_xfer_isoc(&urb->ep->desc)) {
for (i = 0; i < urb->number_of_packets; i++) {
if (put_user(urb->iso_frame_desc[i].actual_length,
&userurb->iso_frame_desc[i].actual_length))
return -EFAULT;
if (put_user(urb->iso_frame_desc[i].status,
&userurb->iso_frame_desc[i].status))
return -EFAULT;
}
}
if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
return -EFAULT;
return 0;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
681fef8380eb818c0b845fca5d2ab1dcbab114ee
|
7.127887267999603e+37
| 33 |
USB: usbfs: fix potential infoleak in devio
The stack object “ci” has a total size of 8 bytes. Its last 3 bytes
are padding bytes which are not initialized and leaked to userland
via “copy_to_user”.
Signed-off-by: Kangjie Lu <kjlu@gatech.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
static void GTextFieldDrawDDCursor(GTextField *gt, int pos) {
GRect old;
int x, y, l;
l = GTextFieldFindLine(gt,pos);
if ( l<gt->loff_top || l>=gt->loff_top + (gt->g.inner.height/gt->fh))
return;
_gt_cursor_pos(gt,pos,&x,&y);
if ( x<0 || x>=gt->g.inner.width )
return;
GDrawPushClip(gt->g.base,>->g.inner,&old);
GDrawSetDifferenceMode(gt->g.base);
GDrawSetFont(gt->g.base,gt->font);
GDrawSetLineWidth(gt->g.base,0);
GDrawSetDashedLine(gt->g.base,2,2,0);
GDrawDrawLine(gt->g.base,gt->g.inner.x+x,gt->g.inner.y+y,
gt->g.inner.x+x,gt->g.inner.y+y+gt->fh,
COLOR_WHITE);
GDrawPopClip(gt->g.base,&old);
GDrawSetDashedLine(gt->g.base,0,0,0);
gt->has_dd_cursor = !gt->has_dd_cursor;
gt->dd_cursor_pos = pos;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
fontforge
|
626f751752875a0ddd74b9e217b6f4828713573c
|
1.958807714608685e+38
| 24 |
Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846.
| 0 |
GF_Err udta_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e = gf_isom_box_array_read(s, bs, udta_on_child_box);
if (e) return e;
if (s->size==4) {
u32 val = gf_bs_read_u32(bs);
s->size = 0;
if (val) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] udta has 4 remaining bytes set to %08X but they should be 0\n", val));
}
}
return GF_OK;
|
Safe
|
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
|
1.6891070374760536e+38
| 13 |
fixed #1587
| 0 |
static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
{
return container_of(kvm, struct kvm_svm, kvm);
}
|
Safe
|
[
"CWE-401"
] |
linux
|
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
|
9.87504932764705e+35
| 4 |
KVM: SVM: Fix potential memory leak in svm_cpu_init()
When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page
held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually
the only possible outcome here.
Reviewed-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_spec *spec = rbd_dev->spec;
const char *pool_name;
const char *image_name;
const char *snap_name;
int ret;
rbd_assert(spec->pool_id != CEPH_NOPOOL);
rbd_assert(spec->image_id);
rbd_assert(spec->snap_id != CEPH_NOSNAP);
/* Get the pool name; we have to make our own copy of this */
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
if (!pool_name) {
rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
return -EIO;
}
pool_name = kstrdup(pool_name, GFP_KERNEL);
if (!pool_name)
return -ENOMEM;
/* Fetch the image name; tolerate failure here */
image_name = rbd_dev_image_name(rbd_dev);
if (!image_name)
rbd_warn(rbd_dev, "unable to get image name");
/* Fetch the snapshot name */
snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
if (IS_ERR(snap_name)) {
ret = PTR_ERR(snap_name);
goto out_err;
}
spec->pool_name = pool_name;
spec->image_name = image_name;
spec->snap_name = snap_name;
return 0;
out_err:
kfree(image_name);
kfree(pool_name);
return ret;
}
|
Safe
|
[
"CWE-863"
] |
linux
|
f44d04e696feaf13d192d942c4f14ad2e117065a
|
3.3047500493636954e+38
| 49 |
rbd: require global CAP_SYS_ADMIN for mapping and unmapping
It turns out that currently we rely only on sysfs attribute
permissions:
$ ll /sys/bus/rbd/{add*,remove*}
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add_single_major
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/remove
--w------- 1 root root 4096 Sep 3 20:38 /sys/bus/rbd/remove_single_major
This means that images can be mapped and unmapped (i.e. block devices
can be created and deleted) by a UID 0 process even after it drops all
privileges or by any process with CAP_DAC_OVERRIDE in its user namespace
as long as UID 0 is mapped into that user namespace.
Be consistent with other virtual block devices (loop, nbd, dm, md, etc)
and require CAP_SYS_ADMIN in the initial user namespace for mapping and
unmapping, and also for dumping the configuration string and refreshing
the image header.
Cc: stable@vger.kernel.org
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
| 0 |
static std::vector<std::string> AllDirectoryPrefixes(const std::string& d) {
std::vector<std::string> dirs;
const std::string patched = PatchPattern(d);
StringPiece dir(patched);
// If the pattern ends with a `/` (or `\\` on Windows), we need to strip it
// otherwise we would have one additional matching step and the result set
// would be empty.
bool is_directory = d[d.size() - 1] == '/';
#ifdef PLATFORM_WINDOWS
is_directory = is_directory || (d[d.size() - 1] == '\\');
#endif
if (is_directory) {
dir = io::Dirname(dir);
}
while (!dir.empty()) {
dirs.emplace_back(dir);
StringPiece new_dir(io::Dirname(dir));
// io::Dirname("/") returns "/" so we need to break the loop.
// On Windows, io::Dirname("C:\\") would return "C:\\", so we check for
// identity of the result instead of checking for dir[0] == `/`.
if (dir == new_dir) break;
dir = new_dir;
}
// Order the array from parent to ancestor (reverse order).
std::reverse(dirs.begin(), dirs.end());
return dirs;
}
|
Safe
|
[
"CWE-125"
] |
tensorflow
|
8b5b9dc96666a3a5d27fad7179ff215e3b74b67c
|
6.499829682852051e+37
| 31 |
Completely rewrite `GetMatchingPaths`.
The current parallel implementation is too complex (lambda inside lambda, two levels of parallelism) and has a read outside of bounds issue.
The new implementation cleans up artifacts from the previous implementations that were left in the code as it evolves. We add multiple helper functions, and document invariants and preconditions as well as every major step. This way, we fix the security issue and a potential new one which was not caught before
PiperOrigin-RevId: 346146220
Change-Id: Iec0f44673f43349797bf9944dffe9b2f779137d8
| 0 |
referral_mode_reply(Slapi_PBlock *pb)
{
struct slapdplugin *plugin;
plugin = (struct slapdplugin *)slapi_ch_calloc(1, sizeof(struct slapdplugin));
if (plugin != NULL) {
struct berval *urls[2], url;
char *refer;
refer = config_get_referral_mode();
slapi_pblock_set(pb, SLAPI_PLUGIN, plugin);
set_db_default_result_handlers(pb);
urls[0] = &url;
urls[1] = NULL;
url.bv_val = refer;
url.bv_len = refer ? strlen(refer) : 0;
slapi_send_ldap_result(pb, LDAP_REFERRAL, NULL, NULL, 0, urls);
slapi_ch_free((void **)&plugin);
slapi_ch_free((void **)&refer);
}
}
|
Safe
|
[
"CWE-415"
] |
389-ds-base
|
a3c298f8140d3e4fa1bd5a670f1bb965a21a9b7b
|
5.818327473289399e+37
| 19 |
Issue 5218 - double-free of the virtual attribute context in persistent search (#5219)
description:
A search is processed by a worker using a private pblock.
If the search is persistent, the worker spawn a thread
and kind of duplicate its private pblock so that the spawn
thread continue to process the persistent search.
Then worker ends the initial search, reinit (free) its private pblock,
and returns monitoring the wait_queue.
When the persistent search completes, it frees the duplicated
pblock.
The problem is that private pblock and duplicated pblock
are referring to a same structure (pb_vattr_context).
That can lead to a double free
Fix:
When cloning the pblock (slapi_pblock_clone) make sure
to transfert the references inside the original (private)
pblock to the target (cloned) one
That includes pb_vattr_context pointer.
Reviewed by: Mark Reynolds, James Chapman, Pierre Rogier (Thanks !)
Co-authored-by: Mark Reynolds <mreynolds@redhat.com>
| 0 |
sql_real_connect(char *host,char *database,char *user,char *password,
uint silent)
{
if (connected)
{
connected= 0;
mysql_close(&mysql);
}
mysql_init(&mysql);
if (opt_init_command)
mysql_options(&mysql, MYSQL_INIT_COMMAND, opt_init_command);
if (opt_connect_timeout)
{
uint timeout=opt_connect_timeout;
mysql_options(&mysql,MYSQL_OPT_CONNECT_TIMEOUT,
(char*) &timeout);
}
if (opt_compress)
mysql_options(&mysql,MYSQL_OPT_COMPRESS,NullS);
if (opt_secure_auth)
mysql_options(&mysql, MYSQL_SECURE_AUTH, (char *) &opt_secure_auth);
if (using_opt_local_infile)
mysql_options(&mysql,MYSQL_OPT_LOCAL_INFILE, (char*) &opt_local_infile);
#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
if (opt_use_ssl)
mysql_ssl_set(&mysql, opt_ssl_key, opt_ssl_cert, opt_ssl_ca,
opt_ssl_capath, opt_ssl_cipher);
mysql_options(&mysql,MYSQL_OPT_SSL_VERIFY_SERVER_CERT,
(char*)&opt_ssl_verify_server_cert);
#endif
if (opt_protocol)
mysql_options(&mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
#ifdef HAVE_SMEM
if (shared_memory_base_name)
mysql_options(&mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
#endif
if (safe_updates)
{
char init_command[100];
sprintf(init_command,
"SET SQL_SAFE_UPDATES=1,SQL_SELECT_LIMIT=%lu,MAX_JOIN_SIZE=%lu",
select_limit,max_join_size);
mysql_options(&mysql, MYSQL_INIT_COMMAND, init_command);
}
mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset);
if (opt_plugin_dir && *opt_plugin_dir)
mysql_options(&mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir);
if (opt_default_auth && *opt_default_auth)
mysql_options(&mysql, MYSQL_DEFAULT_AUTH, opt_default_auth);
if (using_opt_enable_cleartext_plugin)
mysql_options(&mysql, MYSQL_ENABLE_CLEARTEXT_PLUGIN,
(char*) &opt_enable_cleartext_plugin);
if (!mysql_connect_ssl_check(&mysql, host, user, password,
database, opt_mysql_port, opt_mysql_unix_port,
connect_flag | CLIENT_MULTI_STATEMENTS,
opt_ssl_mode == SSL_MODE_REQUIRED))
{
if (!silent ||
(mysql_errno(&mysql) != CR_CONN_HOST_ERROR &&
mysql_errno(&mysql) != CR_CONNECTION_ERROR))
{
(void) put_error(&mysql);
(void) fflush(stdout);
return ignore_errors ? -1 : 1; // Abort
}
return -1; // Retryable
}
charset_info= mysql.charset;
connected=1;
#ifndef EMBEDDED_LIBRARY
mysql.reconnect= debug_info_flag; // We want to know if this happens
#else
mysql.reconnect= 1;
#endif
#ifdef HAVE_READLINE
build_completion_hash(opt_rehash, 1);
#endif
return 0;
}
|
Safe
|
[
"CWE-319"
] |
mysql-server
|
060b1eadf4913f7066484ea34ec62feead1bca44
|
1.8380080132287513e+38
| 86 |
BUG#25575605: SETTING --SSL-MODE=REQUIRED SENDS CREDENTIALS BEFORE VERIFYING SSL CONNECTION
MYSQL_OPT_SSL_MODE option introduced.
It is set in case of --ssl-mode=REQUIRED and permits only SSL connection.
(cherry picked from commit 3b2d28578c526f347f5cfe763681eff365731f99)
| 0 |
static void ieee80211_enable_ps(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_conf *conf = &local->hw.conf;
/*
* If we are scanning right now then the parameters will
* take effect when scan finishes.
*/
if (local->scanning)
return;
if (conf->dynamic_ps_timeout > 0 &&
!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) {
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(conf->dynamic_ps_timeout));
} else {
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
ieee80211_send_nullfunc(local, sdata, true);
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
return;
conf->flags |= IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
}
|
Safe
|
[] |
linux
|
79c92ca42b5a3e0ea172ea2ce8df8e125af237da
|
1.1842091073949032e+38
| 28 |
mac80211: handle deauthentication/disassociation from TDLS peer
When receiving a deauthentication/disassociation frame from a TDLS
peer, a station should not disconnect the current AP, but only
disable the current TDLS link if it's enabled.
Without this change, a TDLS issue can be reproduced by following the
steps as below:
1. STA-1 and STA-2 are connected to AP, bidirection traffic is running
between STA-1 and STA-2.
2. Set up TDLS link between STA-1 and STA-2, stay for a while, then
teardown TDLS link.
3. Repeat step #2 and monitor the connection between STA and AP.
During the test, one STA may send a deauthentication/disassociation
frame to another, after TDLS teardown, with reason code 6/7, which
means: Class 2/3 frame received from nonassociated STA.
On receive this frame, the receiver STA will disconnect the current
AP and then reconnect. It's not a expected behavior, purpose of this
frame should be disabling the TDLS link, not the link with AP.
Cc: stable@vger.kernel.org
Signed-off-by: Yu Wang <yyuwang@codeaurora.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
| 0 |
int mailimf_msg_id_parse(const char * message, size_t length,
size_t * indx,
char ** result)
{
size_t cur_token;
#if 0
char * id_left;
char * id_right;
#endif
char * msg_id;
int r;
int res;
cur_token = * indx;
r = mailimf_cfws_parse(message, length, &cur_token);
if ((r != MAILIMF_NO_ERROR) && (r != MAILIMF_ERROR_PARSE))
return r;
r = mailimf_lower_parse(message, length, &cur_token);
if (r == MAILIMF_ERROR_PARSE) {
r = mailimf_addr_spec_msg_id_parse(message, length, &cur_token, &msg_id);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto err;
}
* result = msg_id;
* indx = cur_token;
return MAILIMF_NO_ERROR;
}
else if (r != MAILIMF_NO_ERROR) {
res = r;
goto err;
}
// workaround for mbox mail
r = mailimf_lower_parse(message, length, &cur_token);
if (r == MAILIMF_NO_ERROR) {
// ok
}
else if (r == MAILIMF_ERROR_PARSE) {
// ok
}
else {
res = r;
goto err;
}
r = mailimf_addr_spec_msg_id_parse(message, length, &cur_token, &msg_id);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto err;
}
r = mailimf_greater_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR) {
free(msg_id);
res = r;
goto err;
}
r = mailimf_greater_parse(message, length, &cur_token);
if (r == MAILIMF_NO_ERROR) {
// ok
}
else if (r == MAILIMF_ERROR_PARSE) {
// ok
}
else {
free(msg_id);
res = r;
goto err;
}
#if 0
r = mailimf_id_left_parse(message, length, &cur_token, &id_left);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto err;
}
r = mailimf_at_sign_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto free_id_left;
}
r = mailimf_id_right_parse(message, length, &cur_token, &id_right);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto free_id_left;
}
r = mailimf_greater_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto free_id_right;
}
msg_id = malloc(strlen(id_left) + strlen(id_right) + 2);
if (msg_id == NULL) {
res = MAILIMF_ERROR_MEMORY;
goto free_id_right;
}
strcpy(msg_id, id_left);
strcat(msg_id, "@");
strcat(msg_id, id_right);
mailimf_id_left_free(id_left);
mailimf_id_right_free(id_right);
#endif
* result = msg_id;
* indx = cur_token;
return MAILIMF_NO_ERROR;
#if 0
free_id_right:
mailimf_id_right_free(id_right);
free_id_left:
mailimf_id_left_free(id_left);
#endif
/*
free:
mailimf_atom_free(msg_id);
*/
err:
return res;
}
|
Safe
|
[
"CWE-476"
] |
libetpan
|
1fe8fbc032ccda1db9af66d93016b49c16c1f22d
|
1.3013626531137283e+38
| 130 |
Fixed crash #274
| 0 |
bool samdb_set_ntds_objectGUID(struct ldb_context *ldb, const struct GUID *ntds_guid_in)
{
return samdb_set_ntds_GUID(ldb,
ntds_guid_in,
"objectGUID",
"cache.ntds_guid");
}
|
Safe
|
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
|
2.82684838576534e+38
| 7 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz>
| 0 |
cssp_read_tsrequest(STREAM token, STREAM pubkey)
{
STREAM s;
int length;
int tagval;
s = tcp_recv(NULL, 4);
if (s == NULL)
return False;
// verify ASN.1 header
if (s->p[0] != (BER_TAG_SEQUENCE | BER_TAG_CONSTRUCTED))
{
error("Expected BER_TAG_SEQUENCE|BER_TAG_CONSTRUCTED, got %x", s->p[0]);
return False;
}
// peek at first 4 bytes to get full message length
if (s->p[1] < 0x80)
length = s->p[1] - 2;
else if (s->p[1] == 0x81)
length = s->p[2] - 1;
else if (s->p[1] == 0x82)
length = (s->p[2] << 8) | s->p[3];
else
return False;
// receive the remainings of message
s = tcp_recv(s, length);
#if WITH_DEBUG_CREDSSP
streamsave(s, "tsrequest_in.raw");
printf("In TSRequest token %ld bytes\n", s_length(s));
hexdump(s->data, s_length(s));
#endif
// parse the response and into nego token
if (!ber_in_header(s, &tagval, &length) ||
tagval != (BER_TAG_SEQUENCE | BER_TAG_CONSTRUCTED))
return False;
// version [0]
if (!ber_in_header(s, &tagval, &length) ||
tagval != (BER_TAG_CTXT_SPECIFIC | BER_TAG_CONSTRUCTED | 0))
return False;
in_uint8s(s, length);
// negoToken [1]
if (token)
{
if (!ber_in_header(s, &tagval, &length)
|| tagval != (BER_TAG_CTXT_SPECIFIC | BER_TAG_CONSTRUCTED | 1))
return False;
if (!ber_in_header(s, &tagval, &length)
|| tagval != (BER_TAG_SEQUENCE | BER_TAG_CONSTRUCTED))
return False;
if (!ber_in_header(s, &tagval, &length)
|| tagval != (BER_TAG_SEQUENCE | BER_TAG_CONSTRUCTED))
return False;
if (!ber_in_header(s, &tagval, &length)
|| tagval != (BER_TAG_CTXT_SPECIFIC | BER_TAG_CONSTRUCTED | 0))
return False;
if (!ber_in_header(s, &tagval, &length) || tagval != BER_TAG_OCTET_STRING)
return False;
token->end = token->p = token->data;
out_uint8p(token, s->p, length);
s_mark_end(token);
}
// pubKey [3]
if (pubkey)
{
if (!ber_in_header(s, &tagval, &length)
|| tagval != (BER_TAG_CTXT_SPECIFIC | BER_TAG_CONSTRUCTED | 3))
return False;
if (!ber_in_header(s, &tagval, &length) || tagval != BER_TAG_OCTET_STRING)
return False;
pubkey->data = pubkey->p = s->p;
pubkey->end = pubkey->data + length;
pubkey->size = length;
}
return True;
}
|
Vulnerable
|
[
"CWE-787"
] |
rdesktop
|
766ebcf6f23ccfe8323ac10242ae6e127d4505d2
|
1.8780420618993665e+38
| 90 |
Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182
| 1 |
void std_format(const T &value, std::basic_string<Char> &result) {
std::basic_ostringstream<Char> os;
os << value;
result = os.str();
}
|
Safe
|
[
"CWE-134",
"CWE-119",
"CWE-787"
] |
fmt
|
8cf30aa2be256eba07bb1cefb998c52326e846e7
|
3.336073710751672e+38
| 5 |
Fix segfault on complex pointer formatting (#642)
| 0 |
static long usbdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret;
ret = usbdev_do_ioctl(file, cmd, (void __user *)arg);
return ret;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
681fef8380eb818c0b845fca5d2ab1dcbab114ee
|
8.638031391894079e+37
| 9 |
USB: usbfs: fix potential infoleak in devio
The stack object “ci” has a total size of 8 bytes. Its last 3 bytes
are padding bytes which are not initialized and leaked to userland
via “copy_to_user”.
Signed-off-by: Kangjie Lu <kjlu@gatech.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
DEFUN(redoPos, REDO, "Cancel the last undo")
{
BufferPos *b = Currentbuf->undo;
int i;
if (!Currentbuf->firstLine)
return;
if (!b || !b->next)
return;
for (i = 0; i < PREC_NUM && b->next; i++, b = b->next) ;
resetPos(b);
}
|
Safe
|
[
"CWE-59",
"CWE-241"
] |
w3m
|
18dcbadf2771cdb0c18509b14e4e73505b242753
|
3.198054504410139e+38
| 12 |
Make temporary directory safely when ~/.w3m is unwritable
| 0 |
same_headers(header_line *one, header_line *two)
{
for (;; one = one->next, two = two->next)
{
if (one == two) return TRUE; /* Includes the case where both NULL */
if (!one || !two) return FALSE;
if (Ustrcmp(one->text, two->text) != 0) return FALSE;
}
}
|
Safe
|
[
"CWE-78"
] |
exim
|
d740d2111f189760593a303124ff6b9b1f83453d
|
2.0516807738163733e+38
| 9 |
Fix CVE-2019-10149
| 0 |
static int ndp_sock_recv(struct ndp *ndp)
{
struct ndp_msg *msg;
enum ndp_msg_type msg_type;
size_t len;
int err;
msg = ndp_msg_alloc();
if (!msg)
return -ENOMEM;
len = ndp_msg_payload_maxlen(msg);
err = myrecvfrom6(ndp->sock, msg->buf, &len, 0,
&msg->addrto, &msg->ifindex, &msg->hoplimit);
if (err) {
err(ndp, "Failed to receive message");
goto free_msg;
}
dbg(ndp, "rcvd from: %s, ifindex: %u, hoplimit: %d",
str_in6_addr(&msg->addrto), msg->ifindex, msg->hoplimit);
if (msg->hoplimit != 255) {
warn(ndp, "ignoring packet with bad hop limit (%d)", msg->hoplimit);
err = 0;
goto free_msg;
}
if (len < sizeof(*msg->icmp6_hdr)) {
warn(ndp, "rcvd icmp6 packet too short (%luB)", len);
err = 0;
goto free_msg;
}
err = ndp_msg_type_by_raw_type(&msg_type, msg->icmp6_hdr->icmp6_type);
if (err) {
err = 0;
goto free_msg;
}
ndp_msg_init(msg, msg_type);
ndp_msg_payload_len_set(msg, len);
if (!ndp_msg_check_valid(msg)) {
warn(ndp, "rcvd invalid ND message");
err = 0;
goto free_msg;
}
dbg(ndp, "rcvd %s, len: %zuB",
ndp_msg_type_info(msg_type)->strabbr, len);
if (!ndp_msg_check_opts(msg)) {
err = 0;
goto free_msg;
}
err = ndp_call_handlers(ndp, msg);;
free_msg:
ndp_msg_destroy(msg);
return err;
}
|
Safe
|
[
"CWE-284"
] |
libndp
|
a4892df306e0532487f1634ba6d4c6d4bb381c7f
|
1.752004472964389e+38
| 60 |
libndp: validate the IPv6 hop limit
None of the NDP messages should ever come from a non-local network; as
stated in RFC4861's 6.1.1 (RS), 6.1.2 (RA), 7.1.1 (NS), 7.1.2 (NA),
and 8.1. (redirect):
- The IP Hop Limit field has a value of 255, i.e., the packet
could not possibly have been forwarded by a router.
This fixes CVE-2016-3698.
Reported by: Julien BERNARD <julien.bernard@viagenie.ca>
Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
| 0 |
void CLASS parse_kodak_ifd(int base)
{
unsigned entries, tag, type, len, save;
int i, c, wbi = -2, wbtemp = 6500;
float mul[3] = {1, 1, 1}, num;
static const int wbtag[] = {64037, 64040, 64039, 64041, -1, -1, 64042};
entries = get2();
if (entries > 1024)
return;
while (entries--)
{
tiff_get(base, &tag, &type, &len, &save);
if (tag == 1020)
wbi = getint(type);
if (tag == 1021 && len == 72)
{ /* WB set in software */
fseek(ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / fMAX(1.0, get2());
wbi = -2;
}
if (tag == 2118)
wbtemp = getint(type);
if (tag == 2120 + wbi && wbi >= 0)
FORC3 cam_mul[c] = 2048.0 / fMAX(1.0, getreal(type));
if (tag == 2130 + wbi)
FORC3 mul[c] = getreal(type);
if (tag == 2140 + wbi && wbi >= 0)
FORC3
{
for (num = i = 0; i < 4; i++)
num += getreal(type) * pow(wbtemp / 100.0, i);
cam_mul[c] = 2048 / fMAX(1.0, (num * mul[c]));
}
if (tag == 2317)
linear_table(len);
if (tag == 6020)
iso_speed = getint(type);
if (tag == 64013)
wbi = fgetc(ifp);
if ((unsigned)wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 64019)
width = getint(type);
if (tag == 64020)
height = (getint(type) + 1) & -2;
fseek(ifp, save, SEEK_SET);
}
}
|
Safe
|
[
"CWE-476",
"CWE-119"
] |
LibRaw
|
d7c3d2cb460be10a3ea7b32e9443a83c243b2251
|
2.786115057734521e+38
| 49 |
Secunia SA75000 advisory: several buffer overruns
| 0 |
static unsigned long cmd_input_size(unsigned int cmd)
{
/* Size of structure up to and including 'field' */
#define CMDINSIZE(cmd, type, field) \
case VIDIOC_##cmd: \
return offsetof(struct v4l2_##type, field) + \
sizeof(((struct v4l2_##type *)0)->field);
switch (cmd) {
CMDINSIZE(ENUM_FMT, fmtdesc, type);
CMDINSIZE(G_FMT, format, type);
CMDINSIZE(QUERYBUF, buffer, length);
CMDINSIZE(G_PARM, streamparm, type);
CMDINSIZE(ENUMSTD, standard, index);
CMDINSIZE(ENUMINPUT, input, index);
CMDINSIZE(G_CTRL, control, id);
CMDINSIZE(G_TUNER, tuner, index);
CMDINSIZE(QUERYCTRL, queryctrl, id);
CMDINSIZE(QUERYMENU, querymenu, index);
CMDINSIZE(ENUMOUTPUT, output, index);
CMDINSIZE(G_MODULATOR, modulator, index);
CMDINSIZE(G_FREQUENCY, frequency, tuner);
CMDINSIZE(CROPCAP, cropcap, type);
CMDINSIZE(G_CROP, crop, type);
CMDINSIZE(ENUMAUDIO, audio, index);
CMDINSIZE(ENUMAUDOUT, audioout, index);
CMDINSIZE(ENCODER_CMD, encoder_cmd, flags);
CMDINSIZE(TRY_ENCODER_CMD, encoder_cmd, flags);
CMDINSIZE(G_SLICED_VBI_CAP, sliced_vbi_cap, type);
CMDINSIZE(ENUM_FRAMESIZES, frmsizeenum, pixel_format);
CMDINSIZE(ENUM_FRAMEINTERVALS, frmivalenum, height);
default:
return _IOC_SIZE(cmd);
}
}
|
Safe
|
[
"CWE-399"
] |
linux
|
fc0a80798576f80ca10b3f6c9c7097f12fd1d64e
|
5.414379219511864e+37
| 35 |
[media] v4l: Share code between video_usercopy and video_ioctl2
The two functions are mostly identical. They handle the copy_from_user
and copy_to_user operations related with V4L2 ioctls and call the real
ioctl handler.
Create a __video_usercopy function that implements the core of
video_usercopy and video_ioctl2, and call that function from both.
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Acked-by: Hans Verkuil <hverkuil@xs4all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
| 0 |
void udscs_destroy_server(struct udscs_server *server)
{
if (!server)
return;
g_list_free_full(server->connections, vdagent_connection_destroy);
g_object_unref(server->service);
g_free(server);
}
|
Safe
|
[
"CWE-770"
] |
spice-vd_agent
|
91caa9223857708475d29df1768208fed1675340
|
2.78840467086522e+38
| 9 |
Avoids unlimited agent connections
Limit the number of agents that can be connected.
Avoids reaching the maximum number of files in a process.
Beside one file descriptor per agent the daemon open just some
other fixed number of files.
This issue was reported by SUSE security team.
Signed-off-by: Frediano Ziglio <freddy77@gmail.com>
| 0 |
void Ogg::XiphComment::setAlbum(const String &s)
{
addField("ALBUM", s);
}
|
Safe
|
[
"CWE-20"
] |
taglib
|
ab8a0ee8937256311e649a88e8ddd7c7f870ad59
|
2.722446561867061e+38
| 4 |
Don't store the output of ByteVector::toUInt() in int, use uint instead
| 0 |
TEST_P(ProtocolIntegrationTest, RouterVirtualClusters) { testRouterVirtualClusters(); }
|
Safe
|
[
"CWE-770"
] |
envoy
|
7ca28ff7d46454ae930e193d97b7d08156b1ba59
|
3.3454367232331544e+38
| 1 |
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <avd@google.com>
| 0 |
static bool check_simple_equality(THD *thd, const Item::Context &ctx,
Item *left_item, Item *right_item,
COND_EQUAL *cond_equal)
{
Item *orig_left_item= left_item;
Item *orig_right_item= right_item;
if (left_item->type() == Item::REF_ITEM &&
((Item_ref*)left_item)->ref_type() == Item_ref::VIEW_REF)
{
if (((Item_ref*)left_item)->get_depended_from())
return FALSE;
if (((Item_direct_view_ref*)left_item)->get_null_ref_table() !=
NO_NULL_TABLE && !left_item->real_item()->used_tables())
return FALSE;
left_item= left_item->real_item();
}
if (right_item->type() == Item::REF_ITEM &&
((Item_ref*)right_item)->ref_type() == Item_ref::VIEW_REF)
{
if (((Item_ref*)right_item)->get_depended_from())
return FALSE;
if (((Item_direct_view_ref*)right_item)->get_null_ref_table() !=
NO_NULL_TABLE && !right_item->real_item()->used_tables())
return FALSE;
right_item= right_item->real_item();
}
if (left_item->type() == Item::FIELD_ITEM &&
right_item->type() == Item::FIELD_ITEM &&
!((Item_field*)left_item)->get_depended_from() &&
!((Item_field*)right_item)->get_depended_from())
{
/* The predicate the form field1=field2 is processed */
Field *left_field= ((Item_field*) left_item)->field;
Field *right_field= ((Item_field*) right_item)->field;
if (!left_field->eq_def(right_field))
return FALSE;
/* Search for multiple equalities containing field1 and/or field2 */
bool left_copyfl, right_copyfl;
Item_equal *left_item_equal=
find_item_equal(cond_equal, left_field, &left_copyfl);
Item_equal *right_item_equal=
find_item_equal(cond_equal, right_field, &right_copyfl);
/* As (NULL=NULL) != TRUE we can't just remove the predicate f=f */
if (left_field->eq(right_field)) /* f = f */
return (!(left_field->maybe_null() && !left_item_equal));
if (left_item_equal && left_item_equal == right_item_equal)
{
/*
The equality predicate is inference of one of the existing
multiple equalities, i.e the condition is already covered
by upper level equalities
*/
return TRUE;
}
/* Copy the found multiple equalities at the current level if needed */
if (left_copyfl)
{
/* left_item_equal of an upper level contains left_item */
left_item_equal= new (thd->mem_root) Item_equal(thd, left_item_equal);
left_item_equal->set_context_field(((Item_field*) left_item));
cond_equal->current_level.push_back(left_item_equal, thd->mem_root);
}
if (right_copyfl)
{
/* right_item_equal of an upper level contains right_item */
right_item_equal= new (thd->mem_root) Item_equal(thd, right_item_equal);
right_item_equal->set_context_field(((Item_field*) right_item));
cond_equal->current_level.push_back(right_item_equal, thd->mem_root);
}
if (left_item_equal)
{
/* left item was found in the current or one of the upper levels */
if (! right_item_equal)
left_item_equal->add(orig_right_item, thd->mem_root);
else
{
/* Merge two multiple equalities forming a new one */
left_item_equal->merge(thd, right_item_equal);
/* Remove the merged multiple equality from the list */
List_iterator<Item_equal> li(cond_equal->current_level);
while ((li++) != right_item_equal) ;
li.remove();
}
}
else
{
/* left item was not found neither the current nor in upper levels */
if (right_item_equal)
right_item_equal->add(orig_left_item, thd->mem_root);
else
{
/* None of the fields was found in multiple equalities */
Item_equal *item_equal= new (thd->mem_root) Item_equal(thd,
orig_left_item,
orig_right_item,
FALSE);
item_equal->set_context_field((Item_field*)left_item);
cond_equal->current_level.push_back(item_equal, thd->mem_root);
}
}
return TRUE;
}
{
/* The predicate of the form field=const/const=field is processed */
Item *const_item= 0;
Item_field *field_item= 0;
Item *orig_field_item= 0;
if (left_item->type() == Item::FIELD_ITEM &&
!((Item_field*)left_item)->get_depended_from() &&
right_item->const_item() && !right_item->is_expensive())
{
orig_field_item= orig_left_item;
field_item= (Item_field *) left_item;
const_item= right_item;
}
else if (right_item->type() == Item::FIELD_ITEM &&
!((Item_field*)right_item)->get_depended_from() &&
left_item->const_item() && !left_item->is_expensive())
{
orig_field_item= orig_right_item;
field_item= (Item_field *) right_item;
const_item= left_item;
}
if (const_item &&
field_item->field->test_if_equality_guarantees_uniqueness(const_item))
{
/*
field_item and const_item are arguments of a scalar or a row
comparison function:
WHERE column=constant
WHERE (column, ...) = (constant, ...)
The owner comparison function has previously called fix_fields(),
so field_item and const_item should be directly comparable items,
field_item->cmp_context and const_item->cmp_context should be set.
In case of string comparison, charsets and collations of
field_item and const_item should have already be aggregated
for comparison, all necessary character set converters installed
and fixed.
In case of string comparison, const_item can be either:
- a weaker constant that does not need to be converted to field_item:
WHERE latin1_field = 'latin1_const'
WHERE varbinary_field = 'latin1_const'
WHERE latin1_bin_field = 'latin1_general_ci_const'
- a stronger constant that does not need to be converted to field_item:
WHERE latin1_field = binary 0xDF
WHERE latin1_field = 'a' COLLATE latin1_bin
- a result of conversion (e.g. from the session character set)
to the character set of field_item:
WHERE latin1_field = 'utf8_string_with_latin1_repertoire'
*/
bool copyfl;
Item_equal *item_equal = find_item_equal(cond_equal,
field_item->field, ©fl);
if (copyfl)
{
item_equal= new (thd->mem_root) Item_equal(thd, item_equal);
cond_equal->current_level.push_back(item_equal, thd->mem_root);
item_equal->set_context_field(field_item);
}
Item *const_item2= field_item->field->get_equal_const_item(thd, ctx,
const_item);
if (!const_item2)
return false;
if (item_equal)
{
/*
The flag cond_false will be set to 1 after this, if item_equal
already contains a constant and its value is not equal to
the value of const_item.
*/
item_equal->add_const(thd, const_item2);
}
else
{
item_equal= new (thd->mem_root) Item_equal(thd, const_item2,
orig_field_item, TRUE);
item_equal->set_context_field(field_item);
cond_equal->current_level.push_back(item_equal, thd->mem_root);
}
return TRUE;
}
}
return FALSE;
}
|
Safe
|
[
"CWE-89"
] |
server
|
5ba77222e9fe7af8ff403816b5338b18b342053c
|
2.734954380443161e+38
| 197 |
MDEV-21028 Server crashes in Query_arena::set_query_arena upon SELECT from view
if the view has algorithm=temptable it is not updatable,
so DEFAULT() for its fields is meaningless,
and thus it's NULL or 0/'' for NOT NULL columns.
| 0 |
verifier_load_method (VerifyContext *ctx, int token, const char *opcode) {
MonoMethod* method;
if (!IS_METHOD_DEF_OR_REF_OR_SPEC (token) || !token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid method token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return NULL;
}
method = mono_get_method_full (ctx->image, token, NULL, ctx->generic_context);
if (!method) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Cannot load method from token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return NULL;
}
if (mono_method_is_valid_in_context (ctx, method) == RESULT_INVALID)
return NULL;
return method;
}
|
Safe
|
[
"CWE-20"
] |
mono
|
4905ef1130feb26c3150b28b97e4a96752e0d399
|
1.3930568418724428e+38
| 20 |
Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847
| 0 |
WriteCompressedCellArrayField(mat_t *mat,matvar_t *matvar,z_streamp z)
{
mat_uint32_t comp_buf[512];
mat_uint32_t uncomp_buf[512] = {0,};
int buf_size = 512;
size_t byteswritten = 0, field_buf_size;
if ( NULL == matvar || NULL == mat || NULL == z)
return 0;
uncomp_buf[0] = MAT_T_MATRIX;
if ( MAT_C_EMPTY != matvar->class_type ) {
int err = GetCellArrayFieldBufSize(matvar, &field_buf_size);
if (err || field_buf_size > UINT32_MAX)
return 0;
uncomp_buf[1] = field_buf_size;
} else {
uncomp_buf[1] = 0;
}
z->next_in = ZLIB_BYTE_PTR(uncomp_buf);
z->avail_in = 8;
do {
z->next_out = ZLIB_BYTE_PTR(comp_buf);
z->avail_out = buf_size*sizeof(*comp_buf);
deflate(z,Z_NO_FLUSH);
byteswritten += fwrite(comp_buf,1,buf_size*sizeof(*comp_buf)-z->avail_out,
(FILE*)mat->fp);
} while ( z->avail_out == 0 );
byteswritten += WriteCompressedTypeArrayFlags(mat,matvar,z);
return byteswritten;
}
|
Safe
|
[
"CWE-190",
"CWE-401"
] |
matio
|
5fa49ef9fc4368fe3d19b5fdaa36d8fa5e7f4606
|
3.06074256961502e+38
| 33 |
Fix integer addition overflow
As reported by https://github.com/tbeu/matio/issues/121
| 0 |
static void igmp6_group_dropped(struct ifmcaddr6 *mc)
{
struct net_device *dev = mc->idev->dev;
char buf[MAX_ADDR_LEN];
if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
IPV6_ADDR_SCOPE_LINKLOCAL)
return;
if (mc->mca_flags&MAF_LOADED) {
mc->mca_flags &= ~MAF_LOADED;
if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
dev_mc_del(dev, buf);
}
if (mc->mca_flags & MAF_NOREPORT)
return;
if (!mc->idev->dead)
igmp6_leave_group(mc);
if (cancel_delayed_work(&mc->mca_work))
refcount_dec(&mc->mca_refcnt);
}
|
Safe
|
[
"CWE-703"
] |
linux
|
2d3916f3189172d5c69d33065c3c21119fe539fc
|
1.7416036563623e+38
| 24 |
ipv6: fix skb drops in igmp6_event_query() and igmp6_event_report()
While investigating on why a synchronize_net() has been added recently
in ipv6_mc_down(), I found that igmp6_event_query() and igmp6_event_report()
might drop skbs in some cases.
Discussion about removing synchronize_net() from ipv6_mc_down()
will happen in a different thread.
Fixes: f185de28d9ae ("mld: add new workqueues for process mld events")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Taehee Yoo <ap420073@gmail.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: David Ahern <dsahern@kernel.org>
Link: https://lore.kernel.org/r/20220303173728.937869-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
| 0 |
static void ucma_close_id(struct work_struct *work)
{
struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
/* once all inflight tasks are finished, we close all underlying
* resources. The context is still alive till its explicit destryoing
* by its creator.
*/
ucma_put_ctx(ctx);
wait_for_completion(&ctx->comp);
/* No new events will be generated after destroying the id. */
rdma_destroy_id(ctx->cm_id);
}
|
Safe
|
[
"CWE-416",
"CWE-703"
] |
linux
|
cb2595c1393b4a5211534e6f0a0fbad369e21ad8
|
2.341391799762249e+38
| 13 |
infiniband: fix a possible use-after-free bug
ucma_process_join() will free the new allocated "mc" struct,
if there is any error after that, especially the copy_to_user().
But in parallel, ucma_leave_multicast() could find this "mc"
through idr_find() before ucma_process_join() frees it, since it
is already published.
So "mc" could be used in ucma_leave_multicast() after it is been
allocated and freed in ucma_process_join(), since we don't refcnt
it.
Fix this by separating "publish" from ID allocation, so that we
can get an ID first and publish it later after copy_to_user().
Fixes: c8f6a362bf3e ("RDMA/cma: Add multicast communication support")
Reported-by: Noam Rathaus <noamr@beyondsecurity.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
| 0 |
static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
{
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};
NvmeIdCtrlNvm *id_nvm = (NvmeIdCtrlNvm *)&id;
trace_pci_nvme_identify_ctrl_csi(c->csi);
switch (c->csi) {
case NVME_CSI_NVM:
id_nvm->vsl = n->params.vsl;
id_nvm->dmrsl = cpu_to_le32(n->dmrsl);
break;
case NVME_CSI_ZONED:
((NvmeIdCtrlZoned *)&id)->zasl = n->params.zasl;
break;
default:
return NVME_INVALID_FIELD | NVME_DNR;
}
return nvme_c2h(n, id, sizeof(id), req);
}
|
Safe
|
[] |
qemu
|
736b01642d85be832385063f278fe7cd4ffb5221
|
2.2070529929472928e+38
| 24 |
hw/nvme: fix CVE-2021-3929
This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the
device itself. This still allows DMA to MMIO regions of other devices
(e.g. doing P2P DMA to the controller memory buffer of another NVMe
device).
Fixes: CVE-2021-3929
Reported-by: Qiuhao Li <Qiuhao.Li@outlook.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
| 0 |
static inline int kmem_cache_close(struct kmem_cache *s)
{
int node;
flush_all(s);
/* Attempt to free all objects */
free_kmem_cache_cpus(s);
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
free_partial(s, n);
if (n->nr_partial || slabs_node(s, node))
return 1;
}
free_kmem_cache_nodes(s);
return 0;
}
|
Safe
|
[
"CWE-189"
] |
linux
|
f8bd2258e2d520dff28c855658bd24bdafb5102d
|
5.552205995890531e+37
| 18 |
remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static av_always_inline void predict_slice(SnowContext *s, IDWTELEM *buf, int plane_index, int add, int mb_y){
Plane *p= &s->plane[plane_index];
const int mb_w= s->b_width << s->block_max_depth;
const int mb_h= s->b_height << s->block_max_depth;
int x, y, mb_x;
int block_size = MB_SIZE >> s->block_max_depth;
int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
int ref_stride= s->current_picture->linesize[plane_index];
uint8_t *dst8= s->current_picture->data[plane_index];
int w= p->width;
int h= p->height;
av_assert2(s->chroma_h_shift == s->chroma_v_shift); // obmc params assume squares
if(s->keyframe || (s->avctx->debug&512)){
if(mb_y==mb_h)
return;
if(add){
for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
for(x=0; x<w; x++){
int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
v >>= FRAC_BITS;
if(v&(~255)) v= ~(v>>31);
dst8[x + y*ref_stride]= v;
}
}
}else{
for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
for(x=0; x<w; x++){
buf[x + y*w]-= 128<<FRAC_BITS;
}
}
}
return;
}
for(mb_x=0; mb_x<=mb_w; mb_x++){
add_yblock(s, 0, NULL, buf, dst8, obmc,
block_w*mb_x - block_w/2,
block_h*mb_y - block_h/2,
block_w, block_h,
w, h,
w, ref_stride, obmc_stride,
mb_x - 1, mb_y - 1,
add, 1, plane_index);
}
}
|
Safe
|
[
"CWE-703"
] |
FFmpeg
|
61d59703c91869f4e5cdacd8d6be52f8b89d4ba4
|
2.9752070557085664e+38
| 50 |
avcodec/snow: split block clipping checks
Fixes out of array read
Fixes: d4476f68ca1c1c57afbc45806f581963-asan_heap-oob_2266b27_8607_cov_4044577381_snow_chroma_bug.avi
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
| 0 |
static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
{
vmx_decache_cr0_guest_bits(vcpu);
vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits = 0;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
}
|
Safe
|
[
"CWE-400"
] |
linux-2.6
|
9581d442b9058d3699b4be568b6e5eae38a41493
|
7.116553193198133e+37
| 9 |
KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
| 0 |
GF_Err rssr_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_ReceivedSsrcBox *ptr = (GF_ReceivedSsrcBox *)s;
ISOM_DECREASE_SIZE(ptr, 4)
ptr->ssrc = gf_bs_read_u32(bs);
return GF_OK;
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
|
2.266134872922485e+38
| 7 |
fixed #1587
| 0 |
flatpak_context_load_metadata (FlatpakContext *context,
GKeyFile *metakey,
GError **error)
{
gboolean remove;
g_auto(GStrv) groups = NULL;
int i;
if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_SHARED, NULL))
{
g_auto(GStrv) shares = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT,
FLATPAK_METADATA_KEY_SHARED, NULL, error);
if (shares == NULL)
return FALSE;
for (i = 0; shares[i] != NULL; i++)
{
FlatpakContextShares share;
share = flatpak_context_share_from_string (parse_negated (shares[i], &remove), error);
if (share == 0)
return FALSE;
if (remove)
flatpak_context_remove_shares (context, share);
else
flatpak_context_add_shares (context, share);
}
}
if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_SOCKETS, NULL))
{
g_auto(GStrv) sockets = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT,
FLATPAK_METADATA_KEY_SOCKETS, NULL, error);
if (sockets == NULL)
return FALSE;
for (i = 0; sockets[i] != NULL; i++)
{
FlatpakContextSockets socket = flatpak_context_socket_from_string (parse_negated (sockets[i], &remove), error);
if (socket == 0)
return FALSE;
if (remove)
flatpak_context_remove_sockets (context, socket);
else
flatpak_context_add_sockets (context, socket);
}
}
if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_DEVICES, NULL))
{
g_auto(GStrv) devices = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT,
FLATPAK_METADATA_KEY_DEVICES, NULL, error);
if (devices == NULL)
return FALSE;
for (i = 0; devices[i] != NULL; i++)
{
FlatpakContextDevices device = flatpak_context_device_from_string (parse_negated (devices[i], &remove), error);
if (device == 0)
return FALSE;
if (remove)
flatpak_context_remove_devices (context, device);
else
flatpak_context_add_devices (context, device);
}
}
if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_FEATURES, NULL))
{
g_auto(GStrv) features = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT,
FLATPAK_METADATA_KEY_FEATURES, NULL, error);
if (features == NULL)
return FALSE;
for (i = 0; features[i] != NULL; i++)
{
FlatpakContextFeatures feature = flatpak_context_feature_from_string (parse_negated (features[i], &remove), error);
if (feature == 0)
return FALSE;
if (remove)
flatpak_context_remove_features (context, feature);
else
flatpak_context_add_features (context, feature);
}
}
if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_FILESYSTEMS, NULL))
{
g_auto(GStrv) filesystems = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT,
FLATPAK_METADATA_KEY_FILESYSTEMS, NULL, error);
if (filesystems == NULL)
return FALSE;
for (i = 0; filesystems[i] != NULL; i++)
{
const char *fs = parse_negated (filesystems[i], &remove);
if (!flatpak_context_verify_filesystem (fs, error))
return FALSE;
if (remove)
flatpak_context_remove_filesystem (context, fs);
else
flatpak_context_add_filesystem (context, fs);
}
}
if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_PERSISTENT, NULL))
{
g_auto(GStrv) persistent = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT,
FLATPAK_METADATA_KEY_PERSISTENT, NULL, error);
if (persistent == NULL)
return FALSE;
for (i = 0; persistent[i] != NULL; i++)
flatpak_context_set_persistent (context, persistent[i]);
}
if (g_key_file_has_group (metakey, FLATPAK_METADATA_GROUP_SESSION_BUS_POLICY))
{
g_auto(GStrv) keys = NULL;
gsize i, keys_count;
keys = g_key_file_get_keys (metakey, FLATPAK_METADATA_GROUP_SESSION_BUS_POLICY, &keys_count, NULL);
for (i = 0; i < keys_count; i++)
{
const char *key = keys[i];
g_autofree char *value = g_key_file_get_string (metakey, FLATPAK_METADATA_GROUP_SESSION_BUS_POLICY, key, NULL);
FlatpakPolicy policy;
if (!flatpak_verify_dbus_name (key, error))
return FALSE;
policy = flatpak_policy_from_string (value, error);
if ((int) policy == -1)
return FALSE;
flatpak_context_set_session_bus_policy (context, key, policy);
}
}
if (g_key_file_has_group (metakey, FLATPAK_METADATA_GROUP_SYSTEM_BUS_POLICY))
{
g_auto(GStrv) keys = NULL;
gsize i, keys_count;
keys = g_key_file_get_keys (metakey, FLATPAK_METADATA_GROUP_SYSTEM_BUS_POLICY, &keys_count, NULL);
for (i = 0; i < keys_count; i++)
{
const char *key = keys[i];
g_autofree char *value = g_key_file_get_string (metakey, FLATPAK_METADATA_GROUP_SYSTEM_BUS_POLICY, key, NULL);
FlatpakPolicy policy;
if (!flatpak_verify_dbus_name (key, error))
return FALSE;
policy = flatpak_policy_from_string (value, error);
if ((int) policy == -1)
return FALSE;
flatpak_context_set_system_bus_policy (context, key, policy);
}
}
if (g_key_file_has_group (metakey, FLATPAK_METADATA_GROUP_ENVIRONMENT))
{
g_auto(GStrv) keys = NULL;
gsize i, keys_count;
keys = g_key_file_get_keys (metakey, FLATPAK_METADATA_GROUP_ENVIRONMENT, &keys_count, NULL);
for (i = 0; i < keys_count; i++)
{
const char *key = keys[i];
g_autofree char *value = g_key_file_get_string (metakey, FLATPAK_METADATA_GROUP_ENVIRONMENT, key, NULL);
flatpak_context_set_env_var (context, key, value);
}
}
groups = g_key_file_get_groups (metakey, NULL);
for (i = 0; groups[i] != NULL; i++)
{
const char *group = groups[i];
const char *subsystem;
int j;
if (g_str_has_prefix (group, FLATPAK_METADATA_GROUP_PREFIX_POLICY))
{
g_auto(GStrv) keys = NULL;
subsystem = group + strlen (FLATPAK_METADATA_GROUP_PREFIX_POLICY);
keys = g_key_file_get_keys (metakey, group, NULL, NULL);
for (j = 0; keys != NULL && keys[j] != NULL; j++)
{
const char *key = keys[j];
g_autofree char *policy_key = g_strdup_printf ("%s.%s", subsystem, key);
g_auto(GStrv) values = NULL;
int k;
values = g_key_file_get_string_list (metakey, group, key, NULL, NULL);
for (k = 0; values != NULL && values[k] != NULL; k++)
flatpak_context_apply_generic_policy (context, policy_key,
values[k]);
}
}
}
return TRUE;
}
|
Safe
|
[
"CWE-20"
] |
flatpak
|
902fb713990a8f968ea4350c7c2a27ff46f1a6c4
|
1.044220763104088e+38
| 208 |
Use seccomp to filter out TIOCSTI ioctl
This would otherwise let the sandbox add input to the controlling tty.
| 0 |
ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
char **field, int *ftype, int maxfields, int *numfields)
{
int nf = 0;
const char *cp = timestr;
char *bufp = workbuf;
const char *bufend = workbuf + buflen;
/*
* Set the character pointed-to by "bufptr" to "newchar", and increment
* "bufptr". "end" gives the end of the buffer -- we return an error if
* there is no space left to append a character to the buffer. Note that
* "bufptr" is evaluated twice.
*/
#define APPEND_CHAR(bufptr, end, newchar) \
do \
{ \
if (((bufptr) + 1) >= (end)) \
return DTERR_BAD_FORMAT; \
*(bufptr)++ = newchar; \
} while (0)
/* outer loop through fields */
while (*cp != '\0')
{
/* Ignore spaces between fields */
if (isspace((unsigned char) *cp))
{
cp++;
continue;
}
/* Record start of current field */
if (nf >= maxfields)
return DTERR_BAD_FORMAT;
field[nf] = bufp;
/* leading digit? then date or time */
if (isdigit((unsigned char) *cp))
{
APPEND_CHAR(bufp, bufend, *cp++);
while (isdigit((unsigned char) *cp))
APPEND_CHAR(bufp, bufend, *cp++);
/* time field? */
if (*cp == ':')
{
ftype[nf] = DTK_TIME;
APPEND_CHAR(bufp, bufend, *cp++);
while (isdigit((unsigned char) *cp) ||
(*cp == ':') || (*cp == '.'))
APPEND_CHAR(bufp, bufend, *cp++);
}
/* date field? allow embedded text month */
else if (*cp == '-' || *cp == '/' || *cp == '.')
{
/* save delimiting character to use later */
char delim = *cp;
APPEND_CHAR(bufp, bufend, *cp++);
/* second field is all digits? then no embedded text month */
if (isdigit((unsigned char) *cp))
{
ftype[nf] = ((delim == '.') ? DTK_NUMBER : DTK_DATE);
while (isdigit((unsigned char) *cp))
APPEND_CHAR(bufp, bufend, *cp++);
/*
* insist that the delimiters match to get a three-field
* date.
*/
if (*cp == delim)
{
ftype[nf] = DTK_DATE;
APPEND_CHAR(bufp, bufend, *cp++);
while (isdigit((unsigned char) *cp) || *cp == delim)
APPEND_CHAR(bufp, bufend, *cp++);
}
}
else
{
ftype[nf] = DTK_DATE;
while (isalnum((unsigned char) *cp) || *cp == delim)
APPEND_CHAR(bufp, bufend, pg_tolower((unsigned char) *cp++));
}
}
/*
* otherwise, number only and will determine year, month, day, or
* concatenated fields later...
*/
else
ftype[nf] = DTK_NUMBER;
}
/* Leading decimal point? Then fractional seconds... */
else if (*cp == '.')
{
APPEND_CHAR(bufp, bufend, *cp++);
while (isdigit((unsigned char) *cp))
APPEND_CHAR(bufp, bufend, *cp++);
ftype[nf] = DTK_NUMBER;
}
/*
* text? then date string, month, day of week, special, or timezone
*/
else if (isalpha((unsigned char) *cp))
{
bool is_date;
ftype[nf] = DTK_STRING;
APPEND_CHAR(bufp, bufend, pg_tolower((unsigned char) *cp++));
while (isalpha((unsigned char) *cp))
APPEND_CHAR(bufp, bufend, pg_tolower((unsigned char) *cp++));
/*
* Dates can have embedded '-', '/', or '.' separators. It could
* also be a timezone name containing embedded '/', '+', '-', '_',
* or ':' (but '_' or ':' can't be the first punctuation). If the
* next character is a digit or '+', we need to check whether what
* we have so far is a recognized non-timezone keyword --- if so,
* don't believe that this is the start of a timezone.
*/
is_date = false;
if (*cp == '-' || *cp == '/' || *cp == '.')
is_date = true;
else if (*cp == '+' || isdigit((unsigned char) *cp))
{
*bufp = '\0'; /* null-terminate current field value */
/* we need search only the core token table, not TZ names */
if (datebsearch(field[nf], datetktbl, szdatetktbl) == NULL)
is_date = true;
}
if (is_date)
{
ftype[nf] = DTK_DATE;
do
{
APPEND_CHAR(bufp, bufend, pg_tolower((unsigned char) *cp++));
} while (*cp == '+' || *cp == '-' ||
*cp == '/' || *cp == '_' ||
*cp == '.' || *cp == ':' ||
isalnum((unsigned char) *cp));
}
}
/* sign? then special or numeric timezone */
else if (*cp == '+' || *cp == '-')
{
APPEND_CHAR(bufp, bufend, *cp++);
/* soak up leading whitespace */
while (isspace((unsigned char) *cp))
cp++;
/* numeric timezone? */
/* note that "DTK_TZ" could also be a signed float or yyyy-mm */
if (isdigit((unsigned char) *cp))
{
ftype[nf] = DTK_TZ;
APPEND_CHAR(bufp, bufend, *cp++);
while (isdigit((unsigned char) *cp) ||
*cp == ':' || *cp == '.' || *cp == '-')
APPEND_CHAR(bufp, bufend, *cp++);
}
/* special? */
else if (isalpha((unsigned char) *cp))
{
ftype[nf] = DTK_SPECIAL;
APPEND_CHAR(bufp, bufend, pg_tolower((unsigned char) *cp++));
while (isalpha((unsigned char) *cp))
APPEND_CHAR(bufp, bufend, pg_tolower((unsigned char) *cp++));
}
/* otherwise something wrong... */
else
return DTERR_BAD_FORMAT;
}
/* ignore other punctuation but use as delimiter */
else if (ispunct((unsigned char) *cp))
{
cp++;
continue;
}
/* otherwise, something is not right... */
else
return DTERR_BAD_FORMAT;
/* force in a delimiter after each field */
*bufp++ = '\0';
nf++;
}
*numfields = nf;
return 0;
}
|
Safe
|
[
"CWE-119"
] |
postgres
|
01824385aead50e557ca1af28640460fa9877d51
|
1.8994736808106075e+38
| 194 |
Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()
| 0 |
GF_Err ireftype_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_ItemReferenceTypeBox *ptr = (GF_ItemReferenceTypeBox *)s;
ptr->type = ptr->reference_type;
e = gf_isom_box_write_header(s, bs);
ptr->type = GF_ISOM_BOX_TYPE_REFI;
if (e) return e;
gf_bs_write_u16(bs, ptr->from_item_id);
gf_bs_write_u16(bs, ptr->reference_count);
for (i = 0; i < ptr->reference_count; i++) {
gf_bs_write_u16(bs, ptr->to_item_IDs[i]);
}
return GF_OK;
}
|
Safe
|
[
"CWE-401",
"CWE-787"
] |
gpac
|
ec64c7b8966d7e4642d12debb888be5acf18efb9
|
8.641441620444012e+37
| 16 |
fixed #1786 (fuzz)
| 0 |
static int createFromTiffLines(TIFF *tif, gdImagePtr im, uint16 bps, uint16 photometric,
char has_alpha, char is_bw, int extra)
{
uint16 planar;
uint32 im_height, im_width, y;
unsigned char *buffer;
if (!TIFFGetField(tif, TIFFTAG_PLANARCONFIG, &planar)) {
planar = PLANARCONFIG_CONTIG;
}
if (!TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &im_height)) {
gd_error("Can't fetch TIFF height\n");
return FALSE;
}
if (!TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &im_width)) {
gd_error("Can't fetch TIFF width \n");
return FALSE;
}
buffer = (unsigned char *)gdMalloc(im_width * 4);
if (!buffer) {
return GD_FAILURE;
}
if (planar == PLANARCONFIG_CONTIG) {
switch (bps) {
case 16:
/* TODO
* or simply use force_rgba
*/
break;
case 8:
for (y = 0; y < im_height; y++ ) {
if (!TIFFReadScanline (tif, buffer, y, 0)) {
gd_error("Error while reading scanline %i", y);
break;
}
/* reading one line at a time */
readTiff8bit(buffer, im, photometric, 0, y, im_width, 1, has_alpha, extra, 0);
}
break;
default:
if (is_bw) {
for (y = 0; y < im_height; y++ ) {
if (!TIFFReadScanline (tif, buffer, y, 0)) {
gd_error("Error while reading scanline %i", y);
break;
}
/* reading one line at a time */
readTiffBw(buffer, im, photometric, 0, y, im_width, 1, has_alpha, extra, 0);
}
} else {
/* TODO: implement some default reader or detect this case earlier > force_rgb */
}
break;
}
} else {
/* TODO: implement a reader for separate panes. We detect this case earlier for now and use force_rgb */
}
gdFree(buffer);
return GD_SUCCESS;
}
|
Safe
|
[
"CWE-125"
] |
libgd
|
4859d69e07504d4b0a4bdf9bcb4d9e3769ca35ae
|
7.547158949026561e+37
| 67 |
Fix invalid read in gdImageCreateFromTiffPtr()
tiff_invalid_read.tiff is corrupt, and causes an invalid read in
gdImageCreateFromTiffPtr(), but not in gdImageCreateFromTiff(). The culprit
is dynamicGetbuf(), which doesn't check for out-of-bound reads. In this case,
dynamicGetbuf() is called with a negative dp->pos, but also positive buffer
overflows have to be handled, in which case 0 has to be returned (cf. commit
75e29a9).
Fixing dynamicGetbuf() exhibits that the corrupt TIFF would still create
the image, because the return value of TIFFReadRGBAImage() is not checked.
We do that, and let createFromTiffRgba() fail if TIFFReadRGBAImage() fails.
This issue had been reported by Ibrahim El-Sayed to security@libgd.org.
CVE-2016-6911
| 0 |
void ConnectionImpl::flushOutput(bool end_encode) {
if (end_encode) {
// If this is an HTTP response in ServerConnectionImpl, track outbound responses for flood
// protection
maybeAddSentinelBufferFragment(output_buffer_);
}
connection().write(output_buffer_, false);
ASSERT(0UL == output_buffer_.length());
}
|
Safe
|
[
"CWE-770"
] |
envoy
|
7ca28ff7d46454ae930e193d97b7d08156b1ba59
|
2.2907974318807512e+38
| 9 |
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <avd@google.com>
| 0 |
static inline int chomp_trailing_dir_sep(const char *path, int len)
{
while (len && is_dir_sep(path[len - 1]))
len--;
return len;
}
|
Safe
|
[
"CWE-125"
] |
git
|
11a9f4d807a0d71dc6eff51bb87baf4ca2cccf1d
|
2.707812719758406e+38
| 6 |
is_ntfs_dotgit: use a size_t for traversing string
We walk through the "name" string using an int, which can
wrap to a negative value and cause us to read random memory
before our array (e.g., by creating a tree with a name >2GB,
since "int" is still 32 bits even on most 64-bit platforms).
Worse, this is easy to trigger during the fsck_tree() check,
which is supposed to be protecting us from malicious
garbage.
Note one bit of trickiness in the existing code: we
sometimes assign -1 to "len" at the end of the loop, and
then rely on the "len++" in the for-loop's increment to take
it back to 0. This is still legal with a size_t, since
assigning -1 will turn into SIZE_MAX, which then wraps
around to 0 on increment.
Signed-off-by: Jeff King <peff@peff.net>
| 0 |
class_ptr_p(mrb_value obj)
{
switch (mrb_type(obj)) {
case MRB_TT_CLASS:
case MRB_TT_SCLASS:
case MRB_TT_MODULE:
return TRUE;
default:
return FALSE;
}
}
|
Safe
|
[
"CWE-476",
"CWE-415"
] |
mruby
|
faa4eaf6803bd11669bc324b4c34e7162286bfa3
|
2.655181153373933e+37
| 11 |
`mrb_class_real()` did not work for `BasicObject`; fix #4037
| 0 |
void SNC_io_parser<EW>::add_infi_box() {
for(i=0; i<8; ++i) Vertex_of.push_back(this->sncp()->new_vertex_only());
for(i=0; i<24; ++i) Edge_of.push_back(this->sncp()->new_halfedge_only());
for(i=0; i<12; ++i) Halffacet_of.push_back(this->sncp()->new_halffacet_only());
for(i=0; i<48; ++i) SEdge_of.push_back(this->sncp()->new_shalfedge_only());
for(i=0; i<16; ++i) SFace_of.push_back(this->sncp()->new_sface_only());
typename Standard_kernel::RT hx,hy,hz,hw;
for(int i=0; i<8; ++i) {
Vertex_handle vh = Vertex_of[vn+i];
vh->svertices_begin() = Edge_of[en+3*i];
vh->svertices_last() = Edge_of[en+3*i+2];
vh->shalfedges_begin() = SEdge_of[sen+6*i];
vh->shalfedges_last() = SEdge_of[sen+6*i+5];
vh->sfaces_begin() = SFace_of[sfn+2*i];
vh->sfaces_last() = SFace_of[sfn+2*i+1];
vh->shalfloop() = this->shalfloops_end();
hx = i % 2 ? -1 : 1;
hy = i % 4 > 1 ? -1 : 1;
hz = i > 3 ? -1 : 1;
vh->point() = Infi_box::create_extended_point(hx, hy, hz);
vh->mark() = 1;
vh->sncp() = this->sncp();
}
int seOff[3] = {0, 1, 3};
int twinIdx[24] = { 3, 7,14,
0,10,17,
9, 1,20,
6, 4,23,
15,19, 2,
12,22, 5,
21,13, 8,
18,16,11};
for(int i = 0; i < 24; ++i) {
Halfedge_handle eh = Edge_of[en+i];
eh->twin() = Edge_of[en+twinIdx[i]];
eh->center_vertex() = Vertex_of[vn+(i/3)];
eh->out_sedge() = SEdge_of[sen+(i/3*6)+seOff[i%3]];
switch(i%3) {
case 0 :
hx = i % 6 ? 1 : -1;
hy = hz = 0;
break;
case 1:
hy = i % 12 >= 6 ? 1 : -1;
hx = hz = 0;
break;
case 2:
hz = i >= 12 ? 1 : -1;
hx = hy = 0;
break;
}
eh->point() = Sphere_point(hx,hy,hz);
eh->mark() = 1;
}
int bnd[12] = {19, 18, 43, 42, 35, 34,
47, 46, 39, 38, 45, 44};
for(int i = 0; i < 12; ++i) {
Halffacet_handle fh = Halffacet_of[fn+i];
fh->twin() = Halffacet_of[fn+(i/2*2)+((i+1)%2)];
fh->boundary_entry_objects().push_back(make_object(SEdge_of[sen+bnd[i]]));
fh->incident_volume() = Volume_of[((i%4) == 1 || (i%4 == 2)) ? 1 : 0];
if(i<4) {
hz = i % 2 ? -1 : 1;
hx = hy = 0;
}
else if(i<8) {
hy = i % 2 ? -1 : 1;
hx = hz = 0;
}
else {
hx = i % 2 ? -1 : 1;
hz = hy = 0;
}
hw = ((i%4) == 1 || (i%4) == 2) ? 1 : -1;
fh->plane() = Infi_box::create_extended_plane(hx,hy,hz,hw);
fh->mark() = 1;
}
Volume_of[0]->shell_entry_objects().push_back(make_object(SFace_of[sfn]));
Volume_of[0]->mark() = 0;
Volume_of[1]->shell_entry_objects().push_front(make_object(SFace_of[sfn+1]));
int sprevOff[6] = {4,3,0,5,2,1};
int snextOff[6] = {2,5,4,1,0,3};
int prevIdx[48] = {7,12,15,26,29,10,
1,18,21,32,35,4,
19,0,3,38,41,22,
13,6,9,44,47,16,
31,36,39,2,5,34,
25,42,45,8,11,28,
43,24,27,14,17,46,
37,30,33,20,23,40};
int nextIdx[48] = {13,6,27,14,11,28,
19,0,33,20,5,34,
1,18,39,2,23,40,
7,12,45,8,17,46,
37,30,3,38,35,4,
43,24,9,44,29,10,
25,42,15,26,47,16,
31,36,21,32,41,22};
int factIdx[48] = {1,0,9,8,5,4,
0,1,11,10,4,5,
0,1,8,9,7,6,
1,0,10,11,6,7,
3,2,8,9,4,5,
2,3,10,11,5,4,
2,3,9,8,6,7,
3,2,11,10,7,6};
int sgn[24] = {1,1,1,-1,1,-1,
-1,-1,1,1,-1,-1,
1,-1,-1,-1,-1,1,
-1,1,-1,1,1,1};
for(int i = 0; i < 48; ++i) {
SHalfedge_handle seh = SEdge_of[sen+i];
seh->twin() = SEdge_of[sen+(i/2*2)+((i+1)%2)];
seh->sprev() = SEdge_of[sen+sprevOff[i%6]+(i/6*6)];
seh->snext() = SEdge_of[sen+snextOff[i%6]+(i/6*6)];
seh->source() = Edge_of[en+((i+1)%6)/2+(i/6)*3];
seh->incident_sface() = SFace_of[sfn+(i%2)+(i/6)*2];
seh->prev() = SEdge_of[sen+prevIdx[i]];
seh->next() = SEdge_of[sen+nextIdx[i]];
seh->facet() = Halffacet_of[fn+factIdx[i]];
if(i%6 < 2) {
hz = (i%2) ? sgn[i/2] * (-1) : sgn[i/2];
hx = hy = 0;
}
else if(i%6 < 4) {
hx = (i%2) ? sgn[i/2] * (-1) : sgn[i/2];
hz = hy = 0;
}
else {
hy = (i%2) ? sgn[i/2] * (-1) : sgn[i/2];
hx = hz = 0;
}
seh->circle() = Sphere_circle(Plane_3(RT(hx),RT(hy),RT(hz),RT(0)));
seh->mark() = 1;
}
int volIdx[8] = {0,1,1,0,1,0,0,1};
for(int i = 0; i < 16; ++i) {
SFace_handle sfh = SFace_of[sfn+i];
sfh->center_vertex() = Vertex_of[vn+(i/2)];
sfh->boundary_entry_objects().push_back(make_object(SEdge_of[sen+(i/2*6)+(i%2)]));
this->sncp()->store_sm_boundary_item(SEdge_of[sen+(i/2*6)+(i%2)],
--(sfh->sface_cycles_end()));
int cIdx = i%2 ? 1-volIdx[i/2] : volIdx[i/2];
sfh->volume() = Volume_of[cIdx];
sfh->mark() = cIdx ? Volume_of[1]->mark() : 0;
}
}
|
Safe
|
[
"CWE-125"
] |
cgal
|
5a1ab45058112f8647c14c02f58905ecc597ec76
|
2.8552745161662705e+38
| 158 |
Fix Nef_3
| 0 |
flatpak_dir_update_remote_configuration (FlatpakDir *self,
const char *remote,
GCancellable *cancellable,
GError **error)
{
gboolean is_oci;
g_autoptr(FlatpakRemoteState) state = NULL;
if (flatpak_dir_get_remote_disabled (self, remote))
return TRUE;
is_oci = flatpak_dir_get_remote_oci (self, remote);
if (is_oci)
return TRUE;
state = flatpak_dir_get_remote_state (self, remote, cancellable, error);
if (state == NULL)
return FALSE;
if (flatpak_dir_use_system_helper (self, NULL))
{
gboolean has_changed = FALSE;
gboolean gpg_verify_summary;
gboolean gpg_verify;
if (!ostree_repo_remote_get_gpg_verify_summary (self->repo, remote, &gpg_verify_summary, error))
return FALSE;
if (!ostree_repo_remote_get_gpg_verify (self->repo, remote, &gpg_verify, error))
return FALSE;
if ((!gpg_verify_summary && state->collection_id == NULL) || !gpg_verify)
{
g_debug ("Ignoring automatic updates for system-helper remotes without gpg signatures");
return TRUE;
}
if (!flatpak_dir_update_remote_configuration_for_state (self, state, TRUE, &has_changed, cancellable, error))
return FALSE;
if (state->collection_id == NULL && state->summary_sig_bytes == NULL)
{
g_debug ("Can't update remote configuration as user, no GPG signature");
return TRUE;
}
if (has_changed)
{
g_autoptr(GBytes) bytes = g_variant_get_data_as_bytes (state->summary);
glnx_autofd int summary_fd = -1;
g_autofree char *summary_path = NULL;
glnx_autofd int summary_sig_fd = -1;
g_autofree char *summary_sig_path = NULL;
const char *installation;
summary_fd = g_file_open_tmp ("remote-summary.XXXXXX", &summary_path, error);
if (summary_fd == -1)
return FALSE;
if (glnx_loop_write (summary_fd, g_bytes_get_data (bytes, NULL), g_bytes_get_size (bytes)) < 0)
return glnx_throw_errno (error);
if (state->summary_sig_bytes != NULL)
{
summary_sig_fd = g_file_open_tmp ("remote-summary-sig.XXXXXX", &summary_sig_path, error);
if (summary_sig_fd == -1)
return FALSE;
if (glnx_loop_write (summary_sig_fd, g_bytes_get_data (state->summary_sig_bytes, NULL), g_bytes_get_size (state->summary_sig_bytes)) < 0)
return glnx_throw_errno (error);
}
installation = flatpak_dir_get_id (self);
if (!flatpak_dir_system_helper_call_update_remote (self, 0, remote,
installation ? installation : "",
summary_path, summary_sig_path ? summary_sig_path : "",
cancellable, error))
return FALSE;
unlink (summary_path);
if (summary_sig_path)
unlink (summary_sig_path);
}
return TRUE;
}
return flatpak_dir_update_remote_configuration_for_state (self, state, FALSE, NULL, cancellable, error);
}
|
Safe
|
[
"CWE-668"
] |
flatpak
|
cd2142888fc4c199723a0dfca1f15ea8788a5483
|
3.0110501824217804e+38
| 89 |
Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this.
| 0 |
mark_fill_rect16_add1_no_spots_fast(int w, int h, uint16_t *gs_restrict dst_ptr, uint16_t *gs_restrict src, int num_comp, int num_spots, int first_blend_spot,
uint16_t src_alpha, int rowstride, int planestride, bool additive, pdf14_device *pdev, gs_blend_mode_t blend_mode,
bool overprint, gx_color_index drawn_comps, int tag_off, gs_graphics_type_tag_t curr_tag,
int alpha_g_off, int shape_off, uint16_t shape)
{
int i;
for (; h > 0; --h) {
for (i = w; i > 0; --i) {
/* background empty, nothing to change, or solid source */
uint16_t a_s = src[1];
int a_b = dst_ptr[planestride];
if (a_s == 0xffff || a_b == 0) {
dst_ptr[0] = src[0];
dst_ptr[planestride] = a_s;
} else if (a_s != 0) {
/* Result alpha is Union of backdrop and source alpha */
int tmp, src_scale, c_s, c_b;
unsigned int a_r;
a_b += a_b>>15;
tmp = (0x10000 - a_b) * (0xffff - a_s) + 0x8000;
a_r = 0xffff - (tmp >> 16);
/* Compute a_s / a_r in 16.16 format */
src_scale = ((a_s << 16) + (a_r >> 1)) / a_r;
src_scale >>= 1; /* Lose a bit to avoid overflow */
/* Do simple compositing of source over backdrop */
c_s = src[0];
c_b = dst_ptr[0];
tmp = src_scale * (c_s - c_b) + 0x4000;
dst_ptr[0] = c_b + (tmp >> 15);
dst_ptr[planestride] = a_r;
}
++dst_ptr;
}
dst_ptr += rowstride;
}
}
|
Safe
|
[
"CWE-476"
] |
ghostpdl
|
7870f4951bcc6a153f317e3439e14d0e929fd231
|
1.119598284461157e+38
| 40 |
Bug 701795: Segv due to image mask issue
| 0 |
void luaV_finishOp (lua_State *L) {
CallInfo *ci = L->ci;
StkId base = ci->func + 1;
Instruction inst = *(ci->u.l.savedpc - 1); /* interrupted instruction */
OpCode op = GET_OPCODE(inst);
switch (op) { /* finish its execution */
case OP_MMBIN: case OP_MMBINI: case OP_MMBINK: {
setobjs2s(L, base + GETARG_A(*(ci->u.l.savedpc - 2)), --L->top);
break;
}
case OP_UNM: case OP_BNOT: case OP_LEN:
case OP_GETTABUP: case OP_GETTABLE: case OP_GETI:
case OP_GETFIELD: case OP_SELF: {
setobjs2s(L, base + GETARG_A(inst), --L->top);
break;
}
case OP_LT: case OP_LE:
case OP_LTI: case OP_LEI:
case OP_GTI: case OP_GEI:
case OP_EQ: { /* note that 'OP_EQI'/'OP_EQK' cannot yield */
int res = !l_isfalse(s2v(L->top - 1));
L->top--;
#if defined(LUA_COMPAT_LT_LE)
if (ci->callstatus & CIST_LEQ) { /* "<=" using "<" instead? */
ci->callstatus ^= CIST_LEQ; /* clear mark */
res = !res; /* negate result */
}
#endif
lua_assert(GET_OPCODE(*ci->u.l.savedpc) == OP_JMP);
if (res != GETARG_k(inst)) /* condition failed? */
ci->u.l.savedpc++; /* skip jump instruction */
break;
}
case OP_CONCAT: {
StkId top = L->top - 1; /* top when 'luaT_tryconcatTM' was called */
int a = GETARG_A(inst); /* first element to concatenate */
int total = cast_int(top - 1 - (base + a)); /* yet to concatenate */
setobjs2s(L, top - 2, top); /* put TM result in proper position */
L->top = top - 1; /* top is one after last element (at top-2) */
luaV_concat(L, total); /* concat them (may yield again) */
break;
}
default: {
/* only these other opcodes can yield */
lua_assert(op == OP_TFORCALL || op == OP_CALL ||
op == OP_TAILCALL || op == OP_SETTABUP || op == OP_SETTABLE ||
op == OP_SETI || op == OP_SETFIELD);
break;
}
}
}
|
Safe
|
[
"CWE-416",
"CWE-125",
"CWE-787"
] |
lua
|
eb41999461b6f428186c55abd95f4ce1a76217d5
|
2.317912217729688e+38
| 51 |
Fixed bugs of stack reallocation x GC
Macro 'checkstackGC' was doing a GC step after resizing the stack;
the GC could shrink the stack and undo the resize. Moreover, macro
'checkstackp' also does a GC step, which could remove the preallocated
CallInfo when calling a function. (Its name has been changed to
'checkstackGCp' to emphasize that it calls the GC.)
| 0 |
int dma_async_device_register(struct dma_device *device)
{
int chancnt = 0, rc;
struct dma_chan* chan;
atomic_t *idr_ref;
if (!device)
return -ENODEV;
/* validate device routines */
BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
!device->device_prep_dma_memcpy);
BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
!device->device_prep_dma_xor);
BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
!device->device_prep_dma_xor_val);
BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
!device->device_prep_dma_pq);
BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
!device->device_prep_dma_pq_val);
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
!device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
!device->device_prep_dma_sg);
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
!device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_control);
BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
!device->device_prep_interleaved_dma);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
BUG_ON(!device->device_tx_status);
BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev);
/* note: this only matters in the
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
*/
if (device_has_all_tx_types(device))
dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
if (!idr_ref)
return -ENOMEM;
rc = get_dma_id(device);
if (rc != 0) {
kfree(idr_ref);
return rc;
}
atomic_set(idr_ref, 0);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
rc = -ENOMEM;
chan->local = alloc_percpu(typeof(*chan->local));
if (chan->local == NULL)
goto err_out;
chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
if (chan->dev == NULL) {
free_percpu(chan->local);
chan->local = NULL;
goto err_out;
}
chan->chan_id = chancnt++;
chan->dev->device.class = &dma_devclass;
chan->dev->device.parent = device->dev;
chan->dev->chan = chan;
chan->dev->idr_ref = idr_ref;
chan->dev->dev_id = device->dev_id;
atomic_inc(idr_ref);
dev_set_name(&chan->dev->device, "dma%dchan%d",
device->dev_id, chan->chan_id);
rc = device_register(&chan->dev->device);
if (rc) {
free_percpu(chan->local);
chan->local = NULL;
kfree(chan->dev);
atomic_dec(idr_ref);
goto err_out;
}
chan->client_count = 0;
}
device->chancnt = chancnt;
mutex_lock(&dma_list_mutex);
/* take references on public channels */
if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
list_for_each_entry(chan, &device->channels, device_node) {
/* if clients are already waiting for channels we need
* to take references on their behalf
*/
if (dma_chan_get(chan) == -ENODEV) {
/* note we can only get here for the first
* channel as the remaining channels are
* guaranteed to get a reference
*/
rc = -ENODEV;
mutex_unlock(&dma_list_mutex);
goto err_out;
}
}
list_add_tail_rcu(&device->global_node, &dma_device_list);
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
device->privatecnt++; /* Always private */
dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);
return 0;
err_out:
/* if we never registered a channel just release the idr */
if (atomic_read(idr_ref) == 0) {
mutex_lock(&dma_list_mutex);
idr_remove(&dma_idr, device->dev_id);
mutex_unlock(&dma_list_mutex);
kfree(idr_ref);
return rc;
}
list_for_each_entry(chan, &device->channels, device_node) {
if (chan->local == NULL)
continue;
mutex_lock(&dma_list_mutex);
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);
device_unregister(&chan->dev->device);
free_percpu(chan->local);
}
return rc;
}
|
Safe
|
[] |
linux
|
7bced397510ab569d31de4c70b39e13355046387
|
9.757067008744749e+37
| 135 |
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: David Whipple <whipple@securedatainnovations.ch>
Cc: Alexander Duyck <alexander.h.duyck@intel.com>
Cc: <stable@vger.kernel.org>
Reported-by: Roman Gushchin <klamm@yandex-team.ru>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
| 0 |
static int __net_init packet_net_init(struct net *net)
{
mutex_init(&net->packet.sklist_lock);
INIT_HLIST_HEAD(&net->packet.sklist);
if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
return -ENOMEM;
return 0;
}
|
Safe
|
[
"CWE-416",
"CWE-362"
] |
linux
|
84ac7260236a49c79eede91617700174c2c19b0c
|
1.1920626746638148e+38
| 10 |
packet: fix race condition in packet_set_ring
When packet_set_ring creates a ring buffer it will initialize a
struct timer_list if the packet version is TPACKET_V3. This value
can then be raced by a different thread calling setsockopt to
set the version to TPACKET_V1 before packet_set_ring has finished.
This leads to a use-after-free on a function pointer in the
struct timer_list when the socket is closed as the previously
initialized timer will not be deleted.
The bug is fixed by taking lock_sock(sk) in packet_setsockopt when
changing the packet version while also taking the lock at the start
of packet_set_ring.
Fixes: f6fb8f100b80 ("af-packet: TPACKET_V3 flexible buffer implementation.")
Signed-off-by: Philip Pettersson <philip.pettersson@gmail.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
hstore_fetchval(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
HEntry *entries = ARRPTR(hs);
text *out;
int idx = hstoreFindKey(hs, NULL,
VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
if (idx < 0 || HS_VALISNULL(entries, idx))
PG_RETURN_NULL();
out = cstring_to_text_with_len(HS_VAL(entries, STRPTR(hs), idx),
HS_VALLEN(entries, idx));
PG_RETURN_TEXT_P(out);
}
|
Safe
|
[
"CWE-703",
"CWE-189"
] |
postgres
|
31400a673325147e1205326008e32135a78b4d8a
|
2.566307075805215e+38
| 17 |
Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064
| 0 |
rubytk_kitpathObjCmd(ClientData dummy, Tcl_Interp *interp, int objc, Tcl_Obj *const objv[])
{
const char* str;
if (objc == 2) {
set_rubytk_kitpath(Tcl_GetString(objv[1]));
} else if (objc > 2) {
Tcl_WrongNumArgs(interp, 1, objv, "?path?");
}
str = rubytk_kitpath ? rubytk_kitpath : Tcl_GetNameOfExecutable();
Tcl_SetObjResult(interp, Tcl_NewStringObj(str, -1));
return TCL_OK;
}
|
Safe
|
[] |
tk
|
ebd0fc80d62eeb7b8556522256f8d035e013eb65
|
7.557074293414785e+37
| 12 |
tcltklib.c: check argument
* ext/tk/tcltklib.c (ip_cancel_eval_core): check argument type and
length.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
| 0 |
static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
{
struct {
__le32 len;
struct brcmf_bss_info_le bss_le;
} *buf;
u16 capability;
int err;
buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
if (!buf)
return;
buf->len = cpu_to_le32(WL_BSS_INFO_MAX);
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO, buf,
WL_BSS_INFO_MAX);
if (err) {
brcmf_err("Failed to get bss info (%d)\n", err);
goto out_kfree;
}
si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period);
si->bss_param.dtim_period = buf->bss_le.dtim_period;
capability = le16_to_cpu(buf->bss_le.capability);
if (capability & IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT)
si->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
if (capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
if (capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
out_kfree:
kfree(buf);
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
8f44c9a41386729fea410e688959ddaa9d51be7c
|
3.3967641528070536e+38
| 34 |
brcmfmac: fix possible buffer overflow in brcmf_cfg80211_mgmt_tx()
The lower level nl80211 code in cfg80211 ensures that "len" is between
25 and NL80211_ATTR_FRAME (2304). We subtract DOT11_MGMT_HDR_LEN (24) from
"len" so thats's max of 2280. However, the action_frame->data[] buffer is
only BRCMF_FIL_ACTION_FRAME_SIZE (1800) bytes long so this memcpy() can
overflow.
memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
le16_to_cpu(action_frame->len));
Cc: stable@vger.kernel.org # 3.9.x
Fixes: 18e2f61db3b70 ("brcmfmac: P2P action frame tx.")
Reported-by: "freenerguo(郭大兴)" <freenerguo@tencent.com>
Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int sgi_timer_del(struct k_itimer *timr)
{
cnodeid_t nodeid = timr->it.mmtimer.node;
unsigned long irqflags;
spin_lock_irqsave(&timers[nodeid].lock, irqflags);
if (timr->it.mmtimer.clock != TIMER_OFF) {
unsigned long expires = timr->it.mmtimer.expires;
struct rb_node *n = timers[nodeid].timer_head.rb_node;
struct mmtimer *uninitialized_var(t);
int r = 0;
timr->it.mmtimer.clock = TIMER_OFF;
timr->it.mmtimer.expires = 0;
while (n) {
t = rb_entry(n, struct mmtimer, list);
if (t->timer == timr)
break;
if (expires < t->timer->it.mmtimer.expires)
n = n->rb_left;
else
n = n->rb_right;
}
if (!n) {
spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
return 0;
}
if (timers[nodeid].next == n) {
timers[nodeid].next = rb_next(n);
r = 1;
}
rb_erase(n, &timers[nodeid].timer_head);
kfree(t);
if (r) {
mmtimer_disable_int(cnodeid_to_nasid(nodeid),
COMPARATOR);
mmtimer_set_next_timer(nodeid);
}
}
spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
return 0;
}
|
Safe
|
[
"CWE-189"
] |
linux
|
f8bd2258e2d520dff28c855658bd24bdafb5102d
|
4.261211554094137e+37
| 48 |
remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static char *get_local_name(struct file_list *flist, char *dest_path)
{
STRUCT_STAT st;
int statret;
char *cp;
if (DEBUG_GTE(RECV, 1)) {
rprintf(FINFO, "get_local_name count=%d %s\n",
file_total, NS(dest_path));
}
if (!dest_path || list_only)
return NULL;
/* Treat an empty string as a copy into the current directory. */
if (!*dest_path)
dest_path = ".";
if (daemon_filter_list.head) {
char *slash = strrchr(dest_path, '/');
if (slash && (slash[1] == '\0' || (slash[1] == '.' && slash[2] == '\0')))
*slash = '\0';
else
slash = NULL;
if ((*dest_path != '.' || dest_path[1] != '\0')
&& (check_filter(&daemon_filter_list, FLOG, dest_path, 0) < 0
|| check_filter(&daemon_filter_list, FLOG, dest_path, 1) < 0)) {
rprintf(FERROR, "ERROR: daemon has excluded destination \"%s\"\n",
dest_path);
exit_cleanup(RERR_FILESELECT);
}
if (slash)
*slash = '/';
}
/* See what currently exists at the destination. */
if ((statret = do_stat(dest_path, &st)) == 0) {
/* If the destination is a dir, enter it and use mode 1. */
if (S_ISDIR(st.st_mode)) {
if (!change_dir(dest_path, CD_NORMAL)) {
rsyserr(FERROR, errno, "change_dir#1 %s failed",
full_fname(dest_path));
exit_cleanup(RERR_FILESELECT);
}
filesystem_dev = st.st_dev; /* ensures --force works right w/-x */
return NULL;
}
if (file_total > 1) {
rprintf(FERROR,
"ERROR: destination must be a directory when"
" copying more than 1 file\n");
exit_cleanup(RERR_FILESELECT);
}
if (file_total == 1 && S_ISDIR(flist->files[0]->mode)) {
rprintf(FERROR,
"ERROR: cannot overwrite non-directory"
" with a directory\n");
exit_cleanup(RERR_FILESELECT);
}
} else if (errno != ENOENT) {
/* If we don't know what's at the destination, fail. */
rsyserr(FERROR, errno, "ERROR: cannot stat destination %s",
full_fname(dest_path));
exit_cleanup(RERR_FILESELECT);
}
cp = strrchr(dest_path, '/');
/* If we need a destination directory because the transfer is not
* of a single non-directory or the user has requested one via a
* destination path ending in a slash, create one and use mode 1. */
if (file_total > 1 || (cp && !cp[1])) {
/* Lop off the final slash (if any). */
if (cp && !cp[1])
*cp = '\0';
if (statret == 0) {
rprintf(FERROR,
"ERROR: destination path is not a directory\n");
exit_cleanup(RERR_SYNTAX);
}
if (do_mkdir(dest_path, ACCESSPERMS) != 0) {
rsyserr(FERROR, errno, "mkdir %s failed",
full_fname(dest_path));
exit_cleanup(RERR_FILEIO);
}
if (flist->high >= flist->low
&& strcmp(flist->files[flist->low]->basename, ".") == 0)
flist->files[0]->flags |= FLAG_DIR_CREATED;
if (INFO_GTE(NAME, 1))
rprintf(FINFO, "created directory %s\n", dest_path);
if (dry_run) {
/* Indicate that dest dir doesn't really exist. */
dry_run++;
}
if (!change_dir(dest_path, dry_run > 1 ? CD_SKIP_CHDIR : CD_NORMAL)) {
rsyserr(FERROR, errno, "change_dir#2 %s failed",
full_fname(dest_path));
exit_cleanup(RERR_FILESELECT);
}
return NULL;
}
/* Otherwise, we are writing a single file, possibly on top of an
* existing non-directory. Change to the item's parent directory
* (if it has a path component), return the basename of the
* destination file as the local name, and use mode 2. */
if (!cp)
return dest_path;
if (cp == dest_path)
dest_path = "/";
*cp = '\0';
if (!change_dir(dest_path, CD_NORMAL)) {
rsyserr(FERROR, errno, "change_dir#3 %s failed",
full_fname(dest_path));
exit_cleanup(RERR_FILESELECT);
}
*cp = '/';
return cp + 1;
}
|
Safe
|
[
"CWE-59"
] |
rsync
|
962f8b90045ab331fc04c9e65f80f1a53e68243b
|
1.0612989776966043e+37
| 129 |
Complain if an inc-recursive path is not right for its dir.
This ensures that a malicious sender can't use a just-sent
symlink as a trasnfer path.
| 0 |
static struct module *mod_find(unsigned long addr)
{
struct latch_tree_node *ltn;
ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
if (!ltn)
return NULL;
return container_of(ltn, struct mod_tree_node, node)->mod;
}
|
Safe
|
[
"CWE-362",
"CWE-347"
] |
linux
|
0c18f29aae7ce3dadd26d8ee3505d07cc982df75
|
3.820465885768376e+37
| 10 |
module: limit enabling module.sig_enforce
Irrespective as to whether CONFIG_MODULE_SIG is configured, specifying
"module.sig_enforce=1" on the boot command line sets "sig_enforce".
Only allow "sig_enforce" to be set when CONFIG_MODULE_SIG is configured.
This patch makes the presence of /sys/module/module/parameters/sig_enforce
dependent on CONFIG_MODULE_SIG=y.
Fixes: fda784e50aac ("module: export module signature enforcement status")
Reported-by: Nayna Jain <nayna@linux.ibm.com>
Tested-by: Mimi Zohar <zohar@linux.ibm.com>
Tested-by: Jessica Yu <jeyu@kernel.org>
Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
Signed-off-by: Jessica Yu <jeyu@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static git_commit_list_node **alloc_parents(
git_revwalk *walk, git_commit_list_node *commit, size_t n_parents)
{
if (n_parents <= PARENTS_PER_COMMIT)
return (git_commit_list_node **)((char *)commit + sizeof(git_commit_list_node));
return (git_commit_list_node **)git_pool_malloc(
&walk->commit_pool, (uint32_t)(n_parents * sizeof(git_commit_list_node *)));
}
|
Vulnerable
|
[] |
libgit2
|
3316f666566f768eb8aa8de521a5262524dc3424
|
3.3472284612657952e+38
| 9 |
commit_list: fix possible buffer overflow in `commit_quick_parse`
The function `commit_quick_parse` provides a way to quickly parse
parts of a commit without storing or verifying most of its
metadata. The first thing it does is calculating the number of
parents by skipping "parent " lines until it finds the first
non-parent line. Afterwards, this parent count is passed to
`alloc_parents`, which will allocate an array to store all the
parent.
To calculate the amount of storage required for the parents
array, `alloc_parents` simply multiplicates the number of parents
with the respective elements's size. This already screams "buffer
overflow", and in fact this problem is getting worse by the
result being cast to an `uint32_t`.
In fact, triggering this is possible: git-hash-object(1) will
happily write a commit with multiple millions of parents for you.
I've stopped at 67,108,864 parents as git-hash-object(1)
unfortunately soaks up the complete object without streaming
anything to disk and thus will cause an OOM situation at a later
point. The point here is: this commit was about 4.1GB of size but
compressed down to 24MB and thus easy to distribute.
The above doesn't yet trigger the buffer overflow, thus. As the
array's elements are all pointers which are 8 bytes on 64 bit, we
need a total of 536,870,912 parents to trigger the overflow to
`0`. The effect is that we're now underallocating the array
and do an out-of-bound writes. As the buffer is kindly provided
by the adversary, this may easily result in code execution.
Extrapolating from the test file with 67m commits to the one with
536m commits results in a factor of 8. Thus the uncompressed
contents would be about 32GB in size and the compressed ones
192MB. While still easily distributable via the network, only
servers will have that amount of RAM and not cause an
out-of-memory condition previous to triggering the overflow. This
at least makes this attack not an easy vector for client-side use
of libgit2.
| 1 |
xmlHTMLValidityWarning(void *ctx, const char *msg, ...)
{
xmlParserCtxtPtr ctxt = (xmlParserCtxtPtr) ctx;
xmlParserInputPtr input;
va_list args;
int len;
buffer[0] = 0;
input = ctxt->input;
if ((input->filename == NULL) && (ctxt->inputNr > 1))
input = ctxt->inputTab[ctxt->inputNr - 2];
xmlHTMLPrintFileInfo(input);
xmlGenericError(xmlGenericErrorContext, "<b>validity warning</b>: ");
va_start(args, msg);
len = strlen(buffer);
vsnprintf(&buffer[len], sizeof(buffer) - len, msg, args);
va_end(args);
xmlHTMLEncodeSend();
xmlGenericError(xmlGenericErrorContext, "</p>\n");
xmlHTMLPrintFileContext(input);
xmlHTMLEncodeSend();
}
|
Safe
|
[
"CWE-416"
] |
libxml2
|
1358d157d0bd83be1dfe356a69213df9fac0b539
|
9.168637114084226e+37
| 25 |
Fix use-after-free with `xmllint --html --push`
Call htmlCtxtUseOptions to make sure that names aren't stored in
dictionaries.
Note that this issue only affects xmllint using the HTML push parser.
Fixes #230.
| 0 |
static void wait(CImgDisplay& disp1) {
disp1._is_event = false;
while (!disp1._is_closed && !disp1._is_event) wait_all();
}
|
Safe
|
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
|
2.4360415754547286e+38
| 4 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
| 0 |
static netdev_features_t xenvif_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct xenvif *vif = netdev_priv(dev);
if (!vif->can_sg)
features &= ~NETIF_F_SG;
if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
features &= ~NETIF_F_TSO;
if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
features &= ~NETIF_F_TSO6;
if (!vif->ip_csum)
features &= ~NETIF_F_IP_CSUM;
if (!vif->ipv6_csum)
features &= ~NETIF_F_IPV6_CSUM;
return features;
}
|
Safe
|
[
"CWE-399"
] |
net-next
|
e9d8b2c2968499c1f96563e6522c56958d5a1d0d
|
8.637322035421608e+37
| 18 |
xen-netback: disable rogue vif in kthread context
When netback discovers frontend is sending malformed packet it will
disables the interface which serves that frontend.
However disabling a network interface involving taking a mutex which
cannot be done in softirq context, so we need to defer this process to
kthread context.
This patch does the following:
1. introduce a flag to indicate the interface is disabled.
2. check that flag in TX path, don't do any work if it's true.
3. check that flag in RX path, turn off that interface if it's true.
The reason to disable it in RX path is because RX uses kthread. After
this change the behavior of netback is still consistent -- it won't do
any TX work for a rogue frontend, and the interface will be eventually
turned off.
Also change a "continue" to "break" after xenvif_fatal_tx_err, as it
doesn't make sense to continue processing packets if frontend is rogue.
This is a fix for XSA-90.
Reported-by: Török Edwin <edwin@etorok.net>
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
void push_args(std::vector<std::shared_ptr<Ope>> &&args) {
args_stack.emplace_back(args);
}
|
Safe
|
[
"CWE-125"
] |
cpp-peglib
|
b3b29ce8f3acf3a32733d930105a17d7b0ba347e
|
1.7383215674162703e+38
| 3 |
Fix #122
| 0 |
static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc)
{
struct sctp_transport *t;
/* Stop all heartbeat timers. */
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
if (del_timer(&t->hb_timer))
sctp_transport_put(t);
}
}
|
Safe
|
[] |
linux
|
196d67593439b03088913227093e374235596e33
|
1.983429936538761e+38
| 13 |
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <michele@acksyn.org>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
void CLASS kodak_radc_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
// All kodak radc images are 768x512
if (width > 768 || raw_width > 768 || height > 512 || raw_height > 512)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
static const signed char src[] = {
1, 1, 2, 3, 3, 4, 4, 2, 5, 7, 6, 5, 7, 6, 7, 8, 1, 0, 2, 1, 3, 3, 4, 4, 5, 2, 6, 7, 7, 6,
8, 5, 8, 8, 2, 1, 2, 3, 3, 0, 3, 2, 3, 4, 4, 6, 5, 5, 6, 7, 6, 8, 2, 0, 2, 1, 2, 3, 3, 2,
4, 4, 5, 6, 6, 7, 7, 5, 7, 8, 2, 1, 2, 4, 3, 0, 3, 2, 3, 3, 4, 7, 5, 5, 6, 6, 6, 8, 2, 3,
3, 1, 3, 2, 3, 4, 3, 5, 3, 6, 4, 7, 5, 0, 5, 8, 2, 3, 2, 6, 3, 0, 3, 1, 4, 4, 4, 5, 4, 7,
5, 2, 5, 8, 2, 4, 2, 7, 3, 3, 3, 6, 4, 1, 4, 2, 4, 5, 5, 0, 5, 8, 2, 6, 3, 1, 3, 3, 3, 5,
3, 7, 3, 8, 4, 0, 5, 2, 5, 4, 2, 0, 2, 1, 3, 2, 3, 3, 4, 4, 4, 5, 5, 6, 5, 7, 4, 8, 1, 0,
2, 2, 2, -2, 1, -3, 1, 3, 2, -17, 2, -5, 2, 5, 2, 17, 2, -7, 2, 2, 2, 9, 2, 18, 2, -18, 2, -9, 2, -2,
2, 7, 2, -28, 2, 28, 3, -49, 3, -9, 3, 9, 4, 49, 5, -79, 5, 79, 2, -1, 2, 13, 2, 26, 3, 39, 4, -16, 5, 55,
6, -37, 6, 76, 2, -26, 2, -13, 2, 1, 3, -39, 4, 16, 5, -55, 6, -76, 6, 37};
ushort huff[19][256];
int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val;
short last[3] = {16, 16, 16}, mul[3], buf[3][3][386];
static const ushort pt[] = {0, 0, 1280, 1344, 2320, 3616, 3328, 8000, 4095, 16383, 65535, 16383};
for (i = 2; i < 12; i += 2)
for (c = pt[i - 2]; c <= pt[i]; c++)
curve[c] = (float)(c - pt[i - 2]) / (pt[i] - pt[i - 2]) * (pt[i + 1] - pt[i - 1]) + pt[i - 1] + 0.5;
for (s = i = 0; i < sizeof src; i += 2)
FORC(256 >> src[i])
((ushort *)huff)[s++] = src[i] << 8 | (uchar)src[i + 1];
s = kodak_cbpp == 243 ? 2 : 3;
FORC(256) huff[18][c] = (8 - s) << 8 | c >> s << s | 1 << (s - 1);
getbits(-1);
for (i = 0; i < sizeof(buf) / sizeof(short); i++)
((short *)buf)[i] = 2048;
for (row = 0; row < height; row += 4)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC3 mul[c] = getbits(6);
#ifdef LIBRAW_LIBRARY_BUILD
if (!mul[0] || !mul[1] || !mul[2])
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
FORC3
{
val = ((0x1000000 / last[c] + 0x7ff) >> 12) * mul[c];
s = val > 65564 ? 10 : 12;
x = ~((~0u) << (s - 1));
val <<= 12 - s;
for (i = 0; i < sizeof(buf[0]) / sizeof(short); i++)
((short *)buf[c])[i] = (((short *)buf[c])[i] * val + x) >> s;
last[c] = mul[c];
for (r = 0; r <= !c; r++)
{
buf[c][1][width / 2] = buf[c][2][width / 2] = mul[c] << 7;
for (tree = 1, col = width / 2; col > 0;)
{
if ((tree = radc_token(tree)))
{
col -= 2;
if(col>=0)
{
if (tree == 8)
FORYX buf[c][y][x] = (uchar)radc_token(18) * mul[c];
else
FORYX buf[c][y][x] = radc_token(tree + 10) * 16 + PREDICTOR;
}
}
else
do
{
nreps = (col > 2) ? radc_token(9) + 1 : 1;
for (rep = 0; rep < 8 && rep < nreps && col > 0; rep++)
{
col -= 2;
if(col>=0)
FORYX buf[c][y][x] = PREDICTOR;
if (rep & 1)
{
step = radc_token(10) << 4;
FORYX buf[c][y][x] += step;
}
}
} while (nreps == 9);
}
for (y = 0; y < 2; y++)
for (x = 0; x < width / 2; x++)
{
val = (buf[c][y + 1][x] << 4) / mul[c];
if (val < 0)
val = 0;
if (c)
RAW(row + y * 2 + c - 1, x * 2 + 2 - c) = val;
else
RAW(row + r * 2 + y, x * 2 + y) = val;
}
memcpy(buf[c][0] + !c, buf[c][2], sizeof buf[c][0] - 2 * !c);
}
}
for (y = row; y < row + 4; y++)
for (x = 0; x < width; x++)
if ((x + y) & 1)
{
r = x ? x - 1 : x + 1;
s = x + 1 < width ? x + 1 : x - 1;
val = (RAW(y, x) - 2048) * 2 + (RAW(y, r) + RAW(y, s)) / 2;
if (val < 0)
val = 0;
RAW(y, x) = val;
}
}
for (i = 0; i < height * width; i++)
raw_image[i] = curve[raw_image[i]];
maximum = 0x3fff;
}
|
Safe
|
[
"CWE-190"
] |
LibRaw
|
4554e24ce24beaef5d0ef48372801cfd91039076
|
2.312930552843822e+38
| 115 |
parse_qt: possible integer overflow
| 0 |
bfad_im_optionrom_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
char optrom_ver[BFA_VERSION_LEN];
bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
}
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
linux
|
0e62395da2bd5166d7c9e14cbc7503b256a34cb0
|
1.5010352199502179e+38
| 12 |
scsi: bfa: release allocated memory in case of error
In bfad_im_get_stats if bfa_port_get_stats fails, allocated memory needs to
be released.
Link: https://lore.kernel.org/r/20190910234417.22151-1-navid.emamdoost@gmail.com
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
| 0 |
int init_aliases(void)
{
FILE *fp;
char alias[MAXALIASLEN + 1U];
char dir[PATH_MAX + 1U];
if ((fp = fopen(ALIASES_FILE, "r")) == NULL) {
return 0;
}
while (fgets(alias, sizeof alias, fp) != NULL) {
if (*alias == '#' || *alias == '\n' || *alias == 0) {
continue;
}
{
char * const z = alias + strlen(alias) - 1U;
if (*z != '\n') {
goto bad;
}
*z = 0;
}
do {
if (fgets(dir, sizeof dir, fp) == NULL || *dir == 0) {
goto bad;
}
{
char * const z = dir + strlen(dir) - 1U;
if (*z == '\n') {
*z = 0;
}
}
} while (*dir == '#' || *dir == 0);
if (head == NULL) {
if ((head = tail = malloc(sizeof *head)) == NULL ||
(tail->alias = strdup(alias)) == NULL ||
(tail->dir = strdup(dir)) == NULL) {
die_mem();
}
} else {
DirAlias *curr;
if ((curr = malloc(sizeof *curr)) == NULL ||
(curr->alias = strdup(alias)) == NULL ||
(curr->dir = strdup(dir)) == NULL) {
die_mem();
}
tail->next = curr;
tail = curr;
}
tail->next = NULL;
}
fclose(fp);
aliases_up++;
return 0;
bad:
fclose(fp);
logfile(LOG_ERR, MSG_ALIASES_BROKEN_FILE " [" ALIASES_FILE "]");
return -1;
}
|
Safe
|
[
"CWE-824"
] |
pure-ftpd
|
8d0d42542e2cb7a56d645fbe4d0ef436e38bcefa
|
1.090883552986582e+38
| 63 |
diraliases: always set the tail of the list to NULL
Spotted and reported by Antonio Norales from GitHub Security Labs.
Thanks!
| 0 |
rb_str_hash(str)
VALUE str;
{
register long len = RSTRING(str)->len;
register char *p = RSTRING(str)->ptr;
register int key = 0;
#if defined(HASH_ELFHASH)
register unsigned int g;
while (len--) {
key = (key << 4) + *p++;
if (g = key & 0xF0000000)
key ^= g >> 24;
key &= ~g;
}
#elif defined(HASH_PERL)
while (len--) {
key += *p++;
key += (key << 10);
key ^= (key >> 6);
}
key += (key << 3);
key ^= (key >> 11);
key += (key << 15);
#else
while (len--) {
key = key*65599 + *p;
p++;
}
key = key + (key>>5);
#endif
return key;
}
|
Safe
|
[
"CWE-20"
] |
ruby
|
e926ef5233cc9f1035d3d51068abe9df8b5429da
|
1.0019485740987744e+38
| 34 |
* random.c (rb_genrand_int32, rb_genrand_real), intern.h: Export.
* string.c (rb_str_tmp_new), intern.h: New function.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_8@16014 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
| 0 |
TEST_F(Http1ServerConnectionImplTest, Http10Absolute) {
initialize();
TestRequestHeaderMapImpl expected_headers{
{":authority", "www.somewhere.com"}, {":path", "/foobar"}, {":method", "GET"}};
Buffer::OwnedImpl buffer("GET http://www.somewhere.com/foobar HTTP/1.0\r\n\r\n");
expectHeadersTest(Protocol::Http10, true, buffer, expected_headers);
}
|
Safe
|
[
"CWE-770"
] |
envoy
|
7ca28ff7d46454ae930e193d97b7d08156b1ba59
|
1.3082406583212276e+38
| 8 |
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <avd@google.com>
| 0 |
xmlXPtrRangeToFunction(xmlXPathParserContextPtr ctxt,
int nargs ATTRIBUTE_UNUSED) {
XP_ERROR(XPATH_EXPR_ERROR);
}
|
Safe
|
[
"CWE-416"
] |
libxml2
|
9ab01a277d71f54d3143c2cf333c5c2e9aaedd9e
|
1.2585589056804162e+38
| 4 |
Fix XPointer paths beginning with range-to
The old code would invoke the broken xmlXPtrRangeToFunction. range-to
isn't really a function but a special kind of location step. Remove
this function and always handle range-to in the XPath code.
The old xmlXPtrRangeToFunction could also be abused to trigger a
use-after-free error with the potential for remote code execution.
Found with afl-fuzz.
Fixes CVE-2016-5131.
| 0 |
listener_check_activation (GSListener *listener)
{
gboolean inhibited;
gboolean res;
gs_debug ("Checking for activation");
if (! listener->priv->activation_enabled) {
return TRUE;
}
if (! listener->priv->session_idle) {
return TRUE;
}
/* if we aren't inhibited then activate */
inhibited = listener_ref_entry_is_present (listener, REF_ENTRY_TYPE_INHIBIT);
res = FALSE;
if (! inhibited) {
gs_debug ("Trying to activate");
res = gs_listener_set_active (listener, TRUE);
}
return res;
}
|
Safe
|
[] |
gnome-screensaver
|
284c9924969a49dbf2d5fae1d680d3310c4df4a3
|
9.922959400900422e+37
| 26 |
Remove session inhibitors if the originator falls of the bus
This fixes a problem where totem leaves inhibitors behind, see
bug 600488.
| 0 |
winlink_clear_flags(struct winlink *wl)
{
struct winlink *loop;
wl->window->flags &= ~WINDOW_ALERTFLAGS;
TAILQ_FOREACH(loop, &wl->window->winlinks, wentry) {
if ((loop->flags & WINLINK_ALERTFLAGS) != 0) {
loop->flags &= ~WINLINK_ALERTFLAGS;
server_status_session(loop->session);
}
}
}
|
Safe
|
[] |
src
|
b32e1d34e10a0da806823f57f02a4ae6e93d756e
|
2.73128156973462e+38
| 12 |
evbuffer_new and bufferevent_new can both fail (when malloc fails) and
return NULL. GitHub issue 1547.
| 0 |
tee_print_sized_data(const char *data, unsigned int data_length, unsigned int total_bytes_to_send, bool right_justified)
{
/*
For '\0's print ASCII spaces instead, as '\0' is eaten by (at
least my) console driver, and that messes up the pretty table
grid. (The \0 is also the reason we can't use fprintf() .)
*/
unsigned int i;
const char *p;
if (right_justified)
for (i= data_length; i < total_bytes_to_send; i++)
tee_putc((int)' ', PAGER);
for (i= 0, p= data; i < data_length; i+= 1, p+= 1)
{
if (*p == '\0')
tee_putc((int)' ', PAGER);
else
tee_putc((int)*p, PAGER);
}
if (! right_justified)
for (i= data_length; i < total_bytes_to_send; i++)
tee_putc((int)' ', PAGER);
}
|
Safe
|
[
"CWE-295"
] |
mysql-server
|
b3e9211e48a3fb586e88b0270a175d2348935424
|
2.2558046568770802e+38
| 26 |
WL#9072: Backport WL#8785 to 5.5
| 0 |
gchar* tcp_follow_index_filter(int stream)
{
return g_strdup_printf("tcp.stream eq %d", stream);
}
|
Safe
|
[
"CWE-354"
] |
wireshark
|
7f3fe6164a68b76d9988c4253b24d43f498f1753
|
7.553684990839659e+37
| 4 |
TCP: do not use an unknown status when the checksum is 0xffff
Otherwise it triggers an assert when adding the column as the field is
defined as BASE_NONE and not BASE_DEC or BASE_HEX. Thus an unknown value
(not in proto_checksum_vals[)array) cannot be represented.
Mark the checksum as bad even if we process the packet.
Closes #16816
Conflicts:
epan/dissectors/packet-tcp.c
| 0 |
rtadv_prefix_set (struct zebra_if *zif, struct rtadv_prefix *rp)
{
struct rtadv_prefix *rprefix;
rprefix = rtadv_prefix_get (zif->rtadv.AdvPrefixList, &rp->prefix);
/* Set parameters. */
rprefix->AdvValidLifetime = rp->AdvValidLifetime;
rprefix->AdvPreferredLifetime = rp->AdvPreferredLifetime;
rprefix->AdvOnLinkFlag = rp->AdvOnLinkFlag;
rprefix->AdvAutonomousFlag = rp->AdvAutonomousFlag;
rprefix->AdvRouterAddressFlag = rp->AdvRouterAddressFlag;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
quagga
|
cfb1fae25f8c092e0d17073eaf7bd428ce1cd546
|
2.0681620657457766e+38
| 13 |
zebra: stack overrun in IPv6 RA receive code (CVE-2016-1245)
The IPv6 RA code also receives ICMPv6 RS and RA messages.
Unfortunately, by bad coding practice, the buffer size specified on
receiving such messages mixed up 2 constants that in fact have
different values.
The code itself has:
#define RTADV_MSG_SIZE 4096
While BUFSIZ is system-dependent, in my case (x86_64 glibc):
/usr/include/_G_config.h:#define _G_BUFSIZ 8192
/usr/include/libio.h:#define _IO_BUFSIZ _G_BUFSIZ
/usr/include/stdio.h:# define BUFSIZ _IO_BUFSIZ
FreeBSD, OpenBSD, NetBSD and Illumos are not affected, since all of them
have BUFSIZ == 1024.
As the latter is passed to the kernel on recvmsg(), it's possible to
overwrite 4kB of stack -- with ICMPv6 packets that can be globally sent
to any of the system's addresses (using fragmentation to get to 8k).
(The socket has filters installed limiting this to RS and RA packets,
but does not have a filter for source address or TTL.)
Issue discovered by trying to test other stuff, which randomly caused
the stack to be smaller than 8kB in that code location, which then
causes the kernel to report EFAULT (Bad address).
Signed-off-by: David Lamparter <equinox@opensourcerouting.org>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
| 0 |
inline Decimal128 BSONElement::numberDecimal() const {
switch (type()) {
case NumberDouble:
return Decimal128(_numberDouble());
case NumberInt:
return Decimal128(_numberInt());
case NumberLong:
return Decimal128(static_cast<int64_t>(_numberLong()));
case NumberDecimal:
return _numberDecimal();
default:
return Decimal128::kNormalizedZero;
}
}
|
Safe
|
[
"CWE-613"
] |
mongo
|
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
|
2.192704313295753e+38
| 14 |
SERVER-38984 Validate unique User ID on UserCache hit
| 0 |
count_slots_of_windows (GList *window_list)
{
NautilusWindow *window;
GList *slots, *l;
int count;
count = 0;
for (l = window_list; l != NULL; l = l->next) {
window = NAUTILUS_WINDOW (l->data);
slots = nautilus_window_get_slots (window);
count += g_list_length (slots);
g_list_free (slots);
}
return count;
}
|
Safe
|
[] |
nautilus
|
1e1c916f5537eb5e4144950f291f4a3962fc2395
|
3.026714852043374e+38
| 18 |
Add "interactive" argument to nautilus_file_mark_desktop_file_trusted.
2009-02-24 Alexander Larsson <alexl@redhat.com>
* libnautilus-private/nautilus-file-operations.c:
* libnautilus-private/nautilus-file-operations.h:
* libnautilus-private/nautilus-mime-actions.c:
Add "interactive" argument to
nautilus_file_mark_desktop_file_trusted.
* src/nautilus-application.c:
Mark all desktopfiles on the desktop trusted on first
run.
svn path=/trunk/; revision=15009
| 0 |
static void sfq_free(void *addr)
{
kvfree(addr);
}
|
Safe
|
[
"CWE-330"
] |
linux
|
55667441c84fa5e0911a0aac44fb059c15ba6da2
|
1.7807699571391587e+38
| 4 |
net/flow_dissector: switch to siphash
UDP IPv6 packets auto flowlabels are using a 32bit secret
(static u32 hashrnd in net/core/flow_dissector.c) and
apply jhash() over fields known by the receivers.
Attackers can easily infer the 32bit secret and use this information
to identify a device and/or user, since this 32bit secret is only
set at boot time.
Really, using jhash() to generate cookies sent on the wire
is a serious security concern.
Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be
a dead end. Trying to periodically change the secret (like in sch_sfq.c)
could change paths taken in the network for long lived flows.
Let's switch to siphash, as we did in commit df453700e8d8
("inet: switch IP ID generator to siphash")
Using a cryptographically strong pseudo random function will solve this
privacy issue and more generally remove other weak points in the stack.
Packet schedulers using skb_get_hash_perturb() benefit from this change.
Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default")
Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels")
Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel")
Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Jonathan Berger <jonathann1@walla.com>
Reported-by: Amit Klein <aksecurity@gmail.com>
Reported-by: Benny Pinkas <benny@pinkas.net>
Cc: Tom Herbert <tom@herbertland.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len)
{
int r;
unsigned long addr;
addr = gfn_to_hva_read(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
r = kvm_read_hva(data, (void __user *)addr + offset, len);
if (r)
return -EFAULT;
return 0;
}
|
Safe
|
[
"CWE-399"
] |
linux
|
12d6e7538e2d418c08f082b1b44ffa5fb7270ed8
|
2.0770276233899897e+38
| 14 |
KVM: perform an invalid memslot step for gpa base change
PPC must flush all translations before the new memory slot
is visible.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
| 0 |
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
ops->get_msr(ctxt, MSR_EFER, &efer);
/* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL)
return emulate_gp(ctxt, 0);
/*
* Not recognized on AMD in compat mode (but is recognized in legacy
* mode).
*/
if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
/* sysenter/sysexit have not been tested in 64bit mode. */
if (ctxt->mode == X86EMUL_MODE_PROT64)
return X86EMUL_UNHANDLEABLE;
setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
switch (ctxt->mode) {
case X86EMUL_MODE_PROT32:
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
break;
case X86EMUL_MODE_PROT64:
if (msr_data == 0x0)
return emulate_gp(ctxt, 0);
break;
default:
break;
}
ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
cs_sel = (u16)msr_data;
cs_sel &= ~SELECTOR_RPL_MASK;
ss_sel = cs_sel + 8;
ss_sel &= ~SELECTOR_RPL_MASK;
if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
*reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
return X86EMUL_CONTINUE;
}
|
Vulnerable
|
[
"CWE-362",
"CWE-269"
] |
linux
|
f3747379accba8e95d70cec0eae0582c8c182050
|
3.7847963362824325e+37
| 62 |
KVM: x86: SYSENTER emulation is broken
SYSENTER emulation is broken in several ways:
1. It misses the case of 16-bit code segments completely (CVE-2015-0239).
2. MSR_IA32_SYSENTER_CS is checked in 64-bit mode incorrectly (bits 0 and 1 can
still be set without causing #GP).
3. MSR_IA32_SYSENTER_EIP and MSR_IA32_SYSENTER_ESP are not masked in
legacy-mode.
4. There is some unneeded code.
Fix it.
Cc: stable@vger.linux.org
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 1 |
bool LEX::only_view_structure()
{
switch (sql_command) {
case SQLCOM_SHOW_CREATE:
case SQLCOM_SHOW_TABLES:
case SQLCOM_SHOW_FIELDS:
case SQLCOM_REVOKE_ALL:
case SQLCOM_REVOKE:
case SQLCOM_GRANT:
case SQLCOM_CREATE_VIEW:
return TRUE;
default:
return FALSE;
}
}
|
Safe
|
[
"CWE-476"
] |
server
|
3a52569499e2f0c4d1f25db1e81617a9d9755400
|
3.2753553011576824e+38
| 15 |
MDEV-25636: Bug report: abortion in sql/sql_parse.cc:6294
The asserion failure was caused by this query
select /*id=1*/ from t1
where
col= ( select /*id=2*/ from ... where corr_cond1
union
select /*id=4*/ from ... where corr_cond2)
Here,
- select with id=2 was correlated due to corr_cond1.
- select with id=4 was initially correlated due to corr_cond2, but then
the optimizer optimized away the correlation, making the select with id=4
uncorrelated.
However, since select with id=2 remained correlated, the execution had to
re-compute the whole UNION. When it tried to execute select with id=4, it
hit an assertion (join buffer already free'd).
This is because select with id=4 has freed its execution structures after
it has been executed once. The select is uncorrelated, so it did not expect
it would need to be executed for the second time.
Fixed this by adding this logic in
st_select_lex::optimize_unflattened_subqueries():
If a member of a UNION is correlated, mark all its members as
correlated, so that they are prepared to be executed multiple times.
| 0 |
static int ZEND_FASTCALL ZEND_BW_AND_SPEC_VAR_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_free_op free_op1;
bitwise_and_function(&EX_T(opline->result.u.var).tmp_var,
_get_zval_ptr_var(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC),
&opline->op2.u.constant TSRMLS_CC);
if (free_op1.var) {zval_ptr_dtor(&free_op1.var);};
ZEND_VM_NEXT_OPCODE();
}
|
Safe
|
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
|
1.171272223411967e+38
| 12 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
| 0 |
STATIC int GC_hblk_fl_from_blocks(word blocks_needed)
{
if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
+ UNIQUE_THRESHOLD;
}
|
Safe
|
[
"CWE-119"
] |
bdwgc
|
7292c02fac2066d39dd1bcc37d1a7054fd1e32ee
|
6.409904210653422e+37
| 8 |
Fix malloc routines to prevent size value wrap-around
See issue #135 on Github.
* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
* malloc.c (GC_alloc_large): Likewise.
* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
overflow when computing GC_heapsize+bytes > GC_max_heapsize.
* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc,
GC_debug_generic_malloc_inner,
GC_debug_generic_malloc_inner_ignore_off_page,
GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
value.
* fnlz_mlc.c (GC_finalized_malloc): Likewise.
* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
* include/private/gcconfig.h (GET_MEM): Likewise.
* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page,
GC_calloc_explicitly_typed): Likewise.
* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
defined).
* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
malloc.c file.
* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
include gcconfig.h).
* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
to size_t.
* os_dep.c (GC_page_size): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
* include/private/gcconfig.h (GET_MEM): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
GC_unix_get_mem): Change argument type from word to int.
* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
if no value wrap around is guaranteed.
* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
wrap around).
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* misc.c (GC_init_size_map): Change "i" local variable type from int
to size_t.
* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
* misc.c (GC_envfile_init): Cast len to size_t when passed to
ROUNDUP_PAGESIZE_IF_MMAP.
* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
GETPAGESIZE() to size_t (when setting GC_page_size).
* os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection):
Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
(the argument is of word type).
* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
cast to size_t.
* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
to SBRK_ARG_T.
* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
to size_t.
* typd_mlc.c: Do not include limits.h.
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
gc_priv.h now).
| 0 |
vrrp_shutdown_backstop_thread(thread_t *thread)
{
int count = 0;
thread_t *t;
/* Force terminate all script processes */
if (thread->master->child.rb_root.rb_node)
script_killall(thread->master, SIGKILL, true);
rb_for_each_entry_cached(t, &thread->master->child, n)
count++;
log_message(LOG_ERR, "Backstop thread invoked: shutdown timer %srunning, child count %d",
thread->master->shutdown_timer_running ? "" : "not ", count);
thread_add_terminate_event(thread->master);
return 0;
}
|
Safe
|
[
"CWE-200"
] |
keepalived
|
26c8d6374db33bcfcdcd758b1282f12ceef4b94f
|
8.581728927352472e+37
| 19 |
Disable fopen_safe() append mode by default
If a non privileged user creates /tmp/keepalived.log and has it open
for read (e.g. tail -f), then even though keepalived will change the
owner to root and remove all read/write permissions from non owners,
the application which already has the file open will be able to read
the added log entries.
Accordingly, opening a file in append mode is disabled by default, and
only enabled if --enable-smtp-alert-debug or --enable-log-file (which
are debugging options and unset by default) are enabled.
This should further alleviate security concerns related to CVE-2018-19046.
Signed-off-by: Quentin Armitage <quentin@armitage.org.uk>
| 0 |
static int b43_chip_init(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
int err;
u32 macctl;
u16 value16;
/* Initialize the MAC control */
macctl = B43_MACCTL_IHR_ENABLED | B43_MACCTL_SHM_ENABLED;
if (dev->phy.gmode)
macctl |= B43_MACCTL_GMODE;
macctl |= B43_MACCTL_INFRA;
b43_write32(dev, B43_MMIO_MACCTL, macctl);
err = b43_upload_microcode(dev);
if (err)
goto out; /* firmware is released later */
err = b43_gpio_init(dev);
if (err)
goto out; /* firmware is released later */
err = b43_upload_initvals(dev);
if (err)
goto err_gpio_clean;
/* Turn the Analog on and initialize the PHY. */
phy->ops->switch_analog(dev, 1);
err = b43_phy_init(dev);
if (err)
goto err_gpio_clean;
/* Disable Interference Mitigation. */
if (phy->ops->interf_mitigation)
phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE);
/* Select the antennae */
if (phy->ops->set_rx_antenna)
phy->ops->set_rx_antenna(dev, B43_ANTENNA_DEFAULT);
b43_mgmtframe_txantenna(dev, B43_ANTENNA_DEFAULT);
if (phy->type == B43_PHYTYPE_B) {
value16 = b43_read16(dev, 0x005E);
value16 |= 0x0004;
b43_write16(dev, 0x005E, value16);
}
b43_write32(dev, 0x0100, 0x01000000);
if (dev->dev->core_rev < 5)
b43_write32(dev, 0x010C, 0x01000000);
b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_INFRA, 0);
b43_maskset32(dev, B43_MMIO_MACCTL, ~0, B43_MACCTL_INFRA);
/* Probe Response Timeout value */
/* FIXME: Default to 0, has to be set by ioctl probably... :-/ */
b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRMAXTIME, 0);
/* Initially set the wireless operation mode. */
b43_adjust_opmode(dev);
if (dev->dev->core_rev < 3) {
b43_write16(dev, 0x060E, 0x0000);
b43_write16(dev, 0x0610, 0x8000);
b43_write16(dev, 0x0604, 0x0000);
b43_write16(dev, 0x0606, 0x0200);
} else {
b43_write32(dev, 0x0188, 0x80000000);
b43_write32(dev, 0x018C, 0x02000000);
}
b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
b43_write32(dev, B43_MMIO_DMA4_IRQ_MASK, 0x0000DC00);
b43_write32(dev, B43_MMIO_DMA5_IRQ_MASK, 0x0000DC00);
b43_mac_phy_clock_set(dev, true);
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
/* FIXME: 0xE74 is quite common, but should be read from CC */
b43_write16(dev, B43_MMIO_POWERUP_DELAY, 0xE74);
break;
#endif
#ifdef CONFIG_B43_SSB
case B43_BUS_SSB:
b43_write16(dev, B43_MMIO_POWERUP_DELAY,
dev->dev->sdev->bus->chipco.fast_pwrup_delay);
break;
#endif
}
err = 0;
b43dbg(dev->wl, "Chip initialized\n");
out:
return err;
err_gpio_clean:
b43_gpio_cleanup(dev);
return err;
}
|
Safe
|
[
"CWE-134"
] |
wireless
|
9538cbaab6e8b8046039b4b2eb6c9d614dc782bd
|
7.856692202301513e+36
| 103 |
b43: stop format string leaking into error msgs
The module parameter "fwpostfix" is userspace controllable, unfiltered,
and is used to define the firmware filename. b43_do_request_fw() populates
ctx->errors[] on error, containing the firmware filename. b43err()
parses its arguments as a format string. For systems with b43 hardware,
this could lead to a uid-0 to ring-0 escalation.
CVE-2013-2852
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: stable@vger.kernel.org
Signed-off-by: John W. Linville <linville@tuxdriver.com>
| 0 |
void Item_param::set_double(double d)
{
DBUG_ENTER("Item_param::set_double");
value.real= d;
state= REAL_VALUE;
collation.set_numeric();
max_length= DBL_DIG + 8;
decimals= NOT_FIXED_DEC;
maybe_null= 0;
null_value= 0;
fix_type(Item::REAL_ITEM);
DBUG_VOID_RETURN;
}
|
Safe
|
[
"CWE-89"
] |
server
|
b5e16a6e0381b28b598da80b414168ce9a5016e5
|
1.0517278906774616e+38
| 13 |
MDEV-26061 MariaDB server crash at Field::set_default
* Item_default_value::fix_fields creates a copy of its argument's field.
* Field::default_value is changed when its expression is prepared in
unpack_vcol_info_from_frm()
This means we must unpack any vcol expression that includes DEFAULT(x)
strictly after unpacking x->default_value.
To avoid building and solving this dependency graph on every table open,
we update Item_default_value::field->default_value after all vcols
are unpacked and fixed.
| 0 |
void test_curveto(bezctx *bc, double x1, double y1, double x2, double y2,
double x3, double y3) {
printf("test_curveto(%g,%g, %g,%g, %g,%g)\n",x1,y1,x2,y2,x3,y3);
}
|
Safe
|
[
"CWE-787"
] |
libspiro
|
35233450c922787dad42321e359e5229ff470a1e
|
8.126511811509011e+37
| 4 |
CVE-2019-19847, Stack-based buffer overflow in the spiro_to_bpath0()
Frederic Cambus (@fcambus) discovered a bug in call-test.c using:
./configure CFLAGS="-fsanitize=address"
make
./tests/call-test[14,15,16,17,18,19]
Fredrick Brennan (@ctrlcctrlv) provided bugfix. See issue #21
| 0 |
static unsigned usb_bus_is_wusb(struct usb_bus *bus)
{
struct usb_hcd *hcd = bus_to_hcd(bus);
return hcd->wireless;
}
|
Safe
|
[
"CWE-400",
"CWE-703"
] |
linux
|
704620afc70cf47abb9d6a1a57f3825d2bca49cf
|
2.8452611880379396e+38
| 5 |
USB: check usb_get_extra_descriptor for proper size
When reading an extra descriptor, we need to properly check the minimum
and maximum size allowed, to prevent from invalid data being sent by a
device.
Reported-by: Hui Peng <benquike@gmail.com>
Reported-by: Mathias Payer <mathias.payer@nebelwelt.net>
Co-developed-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Hui Peng <benquike@gmail.com>
Signed-off-by: Mathias Payer <mathias.payer@nebelwelt.net>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: stable <stable@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
RGWStatBucket() {}
|
Safe
|
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
|
2.0208263152972466e+37
| 1 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <joao@suse.de>
Signed-off-by: Abhishek Lekshmanan <abhishek@suse.com>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
| 0 |
static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
/*
* Avoid migrating a page that is shared with others.
*/
if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
if (!isolate_lru_page(page)) {
list_add_tail(&page->lru, pagelist);
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
}
}
}
|
Safe
|
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
|
3.3178256098120394e+38
| 14 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[akpm@linux-foundation.org: checkpatch fixes]
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Jones <davej@redhat.com>
Acked-by: Larry Woodman <lwoodman@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: <stable@vger.kernel.org> [2.6.38+]
Cc: Mark Salter <msalter@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
void DefaultEnv::SetLogLevel( const std::string &level )
{
Log *log = GetLog();
log->SetLevel( level );
}
|
Safe
|
[
"CWE-78"
] |
xrootd
|
befa2e627a5a33a38c92db3e57c07d8246a24acf
|
3.2529472556898098e+38
| 5 |
secgsi: do not build/package libXrdSecgsiGMAPLDAP-4.so
The way the LDAP query is implemented may represent a security threat.
Any related building and packaging reference is removed.
The code is left in place (for the time being) to remind its functionality
in the case a sanitized version is required.
| 0 |
int mg_asprintf(char **buf, size_t size, const char *fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = mg_vasprintf(buf, size, fmt, ap);
va_end(ap);
return ret;
}
|
Safe
|
[
"CWE-552"
] |
mongoose
|
c65c8fdaaa257e0487ab0aaae9e8f6b439335945
|
2.7414945814192494e+38
| 8 |
Protect against the directory traversal in mg_upload()
| 0 |
bool fix_partition_func(THD *thd, TABLE *table,
bool is_create_table_ind)
{
bool result= TRUE;
partition_info *part_info= table->part_info;
enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
DBUG_ENTER("fix_partition_func");
if (part_info->fixed)
{
DBUG_RETURN(FALSE);
}
thd->mark_used_columns= MARK_COLUMNS_NONE;
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
if (!is_create_table_ind ||
thd->lex->sql_command != SQLCOM_CREATE_TABLE)
{
if (partition_default_handling(table, part_info,
is_create_table_ind,
table->s->normalized_path.str))
{
DBUG_RETURN(TRUE);
}
}
if (part_info->is_sub_partitioned())
{
DBUG_ASSERT(part_info->subpart_type == HASH_PARTITION);
/*
Subpartition is defined. We need to verify that subpartitioning
function is correct.
*/
if (part_info->linear_hash_ind)
set_linear_hash_mask(part_info, part_info->num_subparts);
if (part_info->list_of_subpart_fields)
{
List_iterator<char> it(part_info->subpart_field_list);
if (unlikely(handle_list_of_fields(it, table, part_info, TRUE)))
goto end;
}
else
{
if (unlikely(fix_fields_part_func(thd, part_info->subpart_expr,
table, TRUE, is_create_table_ind)))
goto end;
if (unlikely(part_info->subpart_expr->result_type() != INT_RESULT))
{
part_info->report_part_expr_error(TRUE);
goto end;
}
}
}
DBUG_ASSERT(part_info->part_type != NOT_A_PARTITION);
/*
Partition is defined. We need to verify that partitioning
function is correct.
*/
if (part_info->part_type == HASH_PARTITION)
{
if (part_info->linear_hash_ind)
set_linear_hash_mask(part_info, part_info->num_parts);
if (part_info->list_of_part_fields)
{
List_iterator<char> it(part_info->part_field_list);
if (unlikely(handle_list_of_fields(it, table, part_info, FALSE)))
goto end;
}
else
{
if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
table, FALSE, is_create_table_ind)))
goto end;
if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
{
part_info->report_part_expr_error(FALSE);
goto end;
}
}
part_info->fixed= TRUE;
}
else
{
const char *error_str;
if (part_info->column_list)
{
List_iterator<char> it(part_info->part_field_list);
if (unlikely(handle_list_of_fields(it, table, part_info, FALSE)))
goto end;
}
else
{
if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
table, FALSE, is_create_table_ind)))
goto end;
}
part_info->fixed= TRUE;
if (part_info->part_type == RANGE_PARTITION)
{
error_str= partition_keywords[PKW_RANGE].str;
if (unlikely(part_info->check_range_constants(thd)))
goto end;
}
else if (part_info->part_type == LIST_PARTITION)
{
error_str= partition_keywords[PKW_LIST].str;
if (unlikely(part_info->check_list_constants(thd)))
goto end;
}
else
{
DBUG_ASSERT(0);
my_error(ER_INCONSISTENT_PARTITION_INFO_ERROR, MYF(0));
goto end;
}
if (unlikely(part_info->num_parts < 1))
{
my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_str);
goto end;
}
if (unlikely(!part_info->column_list &&
part_info->part_expr->result_type() != INT_RESULT))
{
part_info->report_part_expr_error(FALSE);
goto end;
}
}
if (((part_info->part_type != HASH_PARTITION ||
part_info->list_of_part_fields == FALSE) &&
!part_info->column_list &&
check_part_func_fields(part_info->part_field_array, TRUE)) ||
(part_info->list_of_subpart_fields == FALSE &&
part_info->is_sub_partitioned() &&
check_part_func_fields(part_info->subpart_field_array, TRUE)))
{
/*
Range/List/HASH (but not KEY) and not COLUMNS or HASH subpartitioning
with columns in the partitioning expression using unallowed charset.
*/
my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
goto end;
}
if (unlikely(create_full_part_field_array(thd, table, part_info)))
goto end;
if (unlikely(check_primary_key(table)))
goto end;
if (unlikely((!(table->s->db_type()->partition_flags &&
(table->s->db_type()->partition_flags() & HA_CAN_PARTITION_UNIQUE))) &&
check_unique_keys(table)))
goto end;
if (unlikely(set_up_partition_bitmap(thd, part_info)))
goto end;
if (unlikely(part_info->set_up_charset_field_preps()))
{
my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
goto end;
}
if (unlikely(part_info->check_partition_field_length()))
{
my_error(ER_PARTITION_FIELDS_TOO_LONG, MYF(0));
goto end;
}
check_range_capable_PF(table);
set_up_partition_key_maps(table, part_info);
set_up_partition_func_pointers(part_info);
set_up_range_analysis_info(part_info);
result= FALSE;
end:
thd->mark_used_columns= save_mark_used_columns;
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
DBUG_RETURN(result);
}
|
Safe
|
[] |
mysql-server
|
be901b60ae59c93848c829d1b0b2cb523ab8692e
|
2.2445398861263632e+38
| 171 |
Bug#26390632: CREATE TABLE CAN CAUSE MYSQL TO EXIT.
Analysis
========
CREATE TABLE of InnoDB table with a partition name
which exceeds the path limit can cause the server
to exit.
During the preparation of the partition name,
there was no check to identify whether the complete
path name for partition exceeds the max supported
path length, causing the server to exit during
subsequent processing.
Fix
===
During the preparation of partition name, check and report
an error if the partition path name exceeds the maximum path
name limit.
This is a 5.5 patch.
| 0 |
Tensor OutputSlice(Tensor* t, int pos, const string& name) {
Tensor res = UnalignedSlice(*t, pos);
if (res.IsAligned()) {
return res;
} else {
Tensor aligned = AlignTensor(res, name);
copy_out_.emplace_back(res, aligned);
return aligned;
}
}
|
Safe
|
[
"CWE-20",
"CWE-703"
] |
tensorflow
|
803404044ae7a1efac48ba82d74111fce1ddb09a
|
1.6115812371374009e+38
| 10 |
Fix security vulnerability with LSTMBlockCellOp
PiperOrigin-RevId: 446028341
| 0 |
WandExport void DrawPathCurveToRelative(DrawingWand *wand,const double x1,
const double y1,const double x2,const double y2,const double x,const double y)
{
assert(wand != (DrawingWand *) NULL);
assert(wand->signature == MagickWandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
DrawPathCurveTo(wand,RelativePathMode,x1,y1,x2,y2,x,y);
}
|
Safe
|
[
"CWE-476"
] |
ImageMagick
|
6ad5fc3c9b652eec27fc0b1a0817159f8547d5d9
|
6.166511389526699e+37
| 9 |
https://github.com/ImageMagick/ImageMagick/issues/716
| 0 |
int tls1_set_sigalgs(CERT *c, const int *psig_nids, size_t salglen)
{
unsigned char *sigalgs, *sptr;
int rhash, rsign;
size_t i;
if (salglen & 1)
return 0;
sigalgs = OPENSSL_malloc(salglen);
if (sigalgs == NULL)
return 0;
for (i = 0, sptr = sigalgs; i < salglen; i+=2)
{
rhash = tls12_find_id(*psig_nids++, tls12_md,
sizeof(tls12_md)/sizeof(tls12_lookup));
rsign = tls12_find_id(*psig_nids++, tls12_sig,
sizeof(tls12_sig)/sizeof(tls12_lookup));
if (rhash == -1 || rsign == -1)
goto err;
*sptr++ = rhash;
*sptr++ = rsign;
}
if (c->conf_sigalgs)
OPENSSL_free(c->conf_sigalgs);
c->conf_sigalgs = sigalgs;
c->conf_sigalgslen = salglen;
return 1;
err:
OPENSSL_free(sigalgs);
return 0;
}
|
Safe
|
[] |
openssl
|
c70a1fee71119a9005b1f304a3bf47694b4a53ac
|
4.8219285676912725e+37
| 34 |
Reorganise supported signature algorithm extension processing.
Only store encoded versions of peer and configured signature algorithms.
Determine shared signature algorithms and cache the result along with NID
equivalents of each algorithm.
(backport from HEAD)
| 0 |
static Expr *removeUnindexableInClauseTerms(
Parse *pParse, /* The parsing context */
int iEq, /* Look at loop terms starting here */
WhereLoop *pLoop, /* The current loop */
Expr *pX /* The IN expression to be reduced */
){
sqlite3 *db = pParse->db;
Expr *pNew = sqlite3ExprDup(db, pX, 0);
if( db->mallocFailed==0 ){
ExprList *pOrigRhs = pNew->x.pSelect->pEList; /* Original unmodified RHS */
ExprList *pOrigLhs = pNew->pLeft->x.pList; /* Original unmodified LHS */
ExprList *pRhs = 0; /* New RHS after modifications */
ExprList *pLhs = 0; /* New LHS after mods */
int i; /* Loop counter */
Select *pSelect; /* Pointer to the SELECT on the RHS */
for(i=iEq; i<pLoop->nLTerm; i++){
if( pLoop->aLTerm[i]->pExpr==pX ){
int iField = pLoop->aLTerm[i]->iField - 1;
if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */
pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr);
pOrigRhs->a[iField].pExpr = 0;
assert( pOrigLhs->a[iField].pExpr!=0 );
pLhs = sqlite3ExprListAppend(pParse, pLhs, pOrigLhs->a[iField].pExpr);
pOrigLhs->a[iField].pExpr = 0;
}
}
sqlite3ExprListDelete(db, pOrigRhs);
sqlite3ExprListDelete(db, pOrigLhs);
pNew->pLeft->x.pList = pLhs;
pNew->x.pSelect->pEList = pRhs;
if( pLhs && pLhs->nExpr==1 ){
/* Take care here not to generate a TK_VECTOR containing only a
** single value. Since the parser never creates such a vector, some
** of the subroutines do not handle this case. */
Expr *p = pLhs->a[0].pExpr;
pLhs->a[0].pExpr = 0;
sqlite3ExprDelete(db, pNew->pLeft);
pNew->pLeft = p;
}
pSelect = pNew->x.pSelect;
if( pSelect->pOrderBy ){
/* If the SELECT statement has an ORDER BY clause, zero the
** iOrderByCol variables. These are set to non-zero when an
** ORDER BY term exactly matches one of the terms of the
** result-set. Since the result-set of the SELECT statement may
** have been modified or reordered, these variables are no longer
** set correctly. Since setting them is just an optimization,
** it's easiest just to zero them here. */
ExprList *pOrderBy = pSelect->pOrderBy;
for(i=0; i<pOrderBy->nExpr; i++){
pOrderBy->a[i].u.x.iOrderByCol = 0;
}
}
#if 0
printf("For indexing, change the IN expr:\n");
sqlite3TreeViewExpr(0, pX, 0);
printf("Into:\n");
sqlite3TreeViewExpr(0, pNew, 0);
#endif
}
return pNew;
}
|
Safe
|
[
"CWE-476"
] |
sqlite
|
57f7ece78410a8aae86aa4625fb7556897db384c
|
1.5180520817579323e+38
| 64 |
Fix a problem that comes up when using generated columns that evaluate to a
constant in an index and then making use of that index in a join.
FossilOrigin-Name: 8b12e95fec7ce6e0de82a04ca3dfcf1a8e62e233b7382aa28a8a9be6e862b1af
| 0 |
static bool fdrive_perpendicular_needed(void *opaque)
{
FDrive *drive = opaque;
return drive->perpendicular != 0;
}
|
Safe
|
[
"CWE-119"
] |
qemu
|
e907746266721f305d67bc0718795fedee2e824c
|
3.1333203649409277e+38
| 6 |
fdc: force the fifo access to be in bounds of the allocated buffer
During processing of certain commands such as FD_CMD_READ_ID and
FD_CMD_DRIVE_SPECIFICATION_COMMAND the fifo memory access could
get out of bounds leading to memory corruption with values coming
from the guest.
Fix this by making sure that the index is always bounded by the
allocated memory.
This is CVE-2015-3456.
Signed-off-by: Petr Matousek <pmatouse@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Signed-off-by: John Snow <jsnow@redhat.com>
| 0 |
handle_server_data_cookie_sha1_mech (DBusAuth *auth,
const DBusString *data)
{
if (auth->cookie_id < 0)
return sha1_handle_first_client_response (auth, data);
else
return sha1_handle_second_client_response (auth, data);
}
|
Safe
|
[
"CWE-59"
] |
dbus
|
47b1a4c41004bf494b87370987b222c934b19016
|
4.2111129419685066e+37
| 8 |
auth: Reject DBUS_COOKIE_SHA1 for users other than the server owner
The DBUS_COOKIE_SHA1 authentication mechanism aims to prove ownership
of a shared home directory by having the server write a secret "cookie"
into a .dbus-keyrings subdirectory of the desired identity's home
directory with 0700 permissions, and having the client prove that it can
read the cookie. This never actually worked for non-malicious clients in
the case where server uid != client uid (unless the server and client
both have privileges, such as Linux CAP_DAC_OVERRIDE or traditional
Unix uid 0) because an unprivileged server would fail to write out the
cookie, and an unprivileged client would be unable to read the resulting
file owned by the server.
Additionally, since dbus 1.7.10 we have checked that ~/.dbus-keyrings
is owned by the uid of the server (a side-effect of a check added to
harden our use of XDG_RUNTIME_DIR), further ruling out successful use
by a non-malicious client with a uid differing from the server's.
Joe Vennix of Apple Information Security discovered that the
implementation of DBUS_COOKIE_SHA1 was susceptible to a symbolic link
attack: a malicious client with write access to its own home directory
could manipulate a ~/.dbus-keyrings symlink to cause the DBusServer to
read and write in unintended locations. In the worst case this could
result in the DBusServer reusing a cookie that is known to the
malicious client, and treating that cookie as evidence that a subsequent
client connection came from an attacker-chosen uid, allowing
authentication bypass.
This is mitigated by the fact that by default, the well-known system
dbus-daemon (since 2003) and the well-known session dbus-daemon (in
stable releases since dbus 1.10.0 in 2015) only accept the EXTERNAL
authentication mechanism, and as a result will reject DBUS_COOKIE_SHA1
at an early stage, before manipulating cookies. As a result, this
vulnerability only applies to:
* system or session dbus-daemons with non-standard configuration
* third-party dbus-daemon invocations such as at-spi2-core (although
in practice at-spi2-core also only accepts EXTERNAL by default)
* third-party uses of DBusServer such as the one in Upstart
Avoiding symlink attacks in a portable way is difficult, because APIs
like openat() and Linux /proc/self/fd are not universally available.
However, because DBUS_COOKIE_SHA1 already doesn't work in practice for
a non-matching uid, we can solve this vulnerability in an easier way
without regressions, by rejecting it early (before looking at
~/.dbus-keyrings) whenever the requested identity doesn't match the
identity of the process hosting the DBusServer.
Signed-off-by: Simon McVittie <smcv@collabora.com>
Closes: https://gitlab.freedesktop.org/dbus/dbus/issues/269
Closes: CVE-2019-12749
| 0 |
int handler::ha_rnd_init_with_error(bool scan)
{
int error;
if (likely(!(error= ha_rnd_init(scan))))
return 0;
table->file->print_error(error, MYF(0));
return error;
}
|
Safe
|
[
"CWE-416"
] |
server
|
af810407f78b7f792a9bb8c47c8c532eb3b3a758
|
7.112488051519909e+37
| 8 |
MDEV-28098 incorrect key in "dup value" error after long unique
reset errkey after using it, so that it wouldn't affect
the next error message in the next statement
| 0 |
R_API RBinJavaCPTypeObj *r_bin_java_find_cp_name_and_type_info(RBinJavaObj *bin, ut16 name_idx, ut16 descriptor_idx) {
RListIter *iter, *iter_tmp;
RBinJavaCPTypeObj *res = NULL, *obj = NULL;
IFDBG eprintf("Looking for name_idx: %d and descriptor_idx: %d\n", name_idx, descriptor_idx);
r_list_foreach_safe (bin->cp_list, iter, iter_tmp, obj) {
if (obj && obj->tag == R_BIN_JAVA_CP_NAMEANDTYPE) {
IFDBG eprintf("RBinJavaCPTypeNameAndType has name_idx: %d and descriptor_idx: %d\n",
obj->info.cp_name_and_type.name_idx, obj->info.cp_name_and_type.descriptor_idx);
if (obj->info.cp_name_and_type.name_idx == name_idx &&
obj->info.cp_name_and_type.descriptor_idx == descriptor_idx) {
res = obj;
break;
}
}
}
return res;
}
|
Safe
|
[
"CWE-787"
] |
radare2
|
9650e3c352f675687bf6c6f65ff2c4a3d0e288fa
|
2.3930990795020747e+38
| 17 |
Fix oobread segfault in java arith8.class ##crash
* Reported by Cen Zhang via huntr.dev
| 0 |
static double mp_list_set_Joff_v(_cimg_math_parser& mp) {
if (!mp.imglist.width()) return cimg::type<double>::nan();
const unsigned int ind = (unsigned int)cimg::mod((int)_mp_arg(2),mp.imglist.width());
CImg<T> &img = mp.imglist[ind];
const int
ox = (int)mp.mem[_cimg_mp_slot_x], oy = (int)mp.mem[_cimg_mp_slot_y],
oz = (int)mp.mem[_cimg_mp_slot_z], oc = (int)mp.mem[_cimg_mp_slot_c];
const longT
off = img.offset(ox,oy,oz,oc) + (longT)_mp_arg(3),
whd = (longT)img.width()*img.height()*img.depth();
const double *ptrs = &_mp_arg(1) + 1;
if (off>=0 && off<whd) {
const unsigned int vsiz = (unsigned int)mp.opcode[4];
T *ptrd = &img[off];
cimg_for_inC(img,0,vsiz - 1,c) { *ptrd = (T)*(ptrs++); ptrd+=whd; }
}
return cimg::type<double>::nan();
}
|
Safe
|
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
|
2.603457532861011e+37
| 18 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
| 0 |
STATIC void GC_init_size_map(void)
{
size_t i;
/* Map size 0 to something bigger. */
/* This avoids problems at lower levels. */
GC_size_map[0] = 1;
for (i = 1; i <= GRANULES_TO_BYTES(TINY_FREELISTS-1) - EXTRA_BYTES; i++) {
GC_size_map[i] = ROUNDED_UP_GRANULES(i);
# ifndef _MSC_VER
GC_ASSERT(GC_size_map[i] < TINY_FREELISTS);
/* Seems to tickle bug in VC++ 2008 for AMD64 */
# endif
}
/* We leave the rest of the array to be filled in on demand. */
}
|
Safe
|
[
"CWE-119"
] |
bdwgc
|
7292c02fac2066d39dd1bcc37d1a7054fd1e32ee
|
1.3250973830573412e+38
| 16 |
Fix malloc routines to prevent size value wrap-around
See issue #135 on Github.
* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
* malloc.c (GC_alloc_large): Likewise.
* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
overflow when computing GC_heapsize+bytes > GC_max_heapsize.
* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc,
GC_debug_generic_malloc_inner,
GC_debug_generic_malloc_inner_ignore_off_page,
GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
value.
* fnlz_mlc.c (GC_finalized_malloc): Likewise.
* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
* include/private/gcconfig.h (GET_MEM): Likewise.
* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page,
GC_calloc_explicitly_typed): Likewise.
* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
defined).
* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
malloc.c file.
* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
include gcconfig.h).
* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
to size_t.
* os_dep.c (GC_page_size): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
* include/private/gcconfig.h (GET_MEM): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
GC_unix_get_mem): Change argument type from word to int.
* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
if no value wrap around is guaranteed.
* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
wrap around).
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* misc.c (GC_init_size_map): Change "i" local variable type from int
to size_t.
* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
* misc.c (GC_envfile_init): Cast len to size_t when passed to
ROUNDUP_PAGESIZE_IF_MMAP.
* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
GETPAGESIZE() to size_t (when setting GC_page_size).
* os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection):
Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
(the argument is of word type).
* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
cast to size_t.
* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
to SBRK_ARG_T.
* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
to size_t.
* typd_mlc.c: Do not include limits.h.
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
gc_priv.h now).
| 0 |
static ssize_t proc_sys_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 1);
}
|
Safe
|
[
"CWE-20",
"CWE-399"
] |
linux
|
93362fa47fe98b62e4a34ab408c4a418432e7939
|
1.7550298116532982e+38
| 5 |
sysctl: Drop reference added by grab_header in proc_sys_readdir
Fixes CVE-2016-9191, proc_sys_readdir doesn't drop reference
added by grab_header when return from !dir_emit_dots path.
It can cause any path called unregister_sysctl_table will
wait forever.
The calltrace of CVE-2016-9191:
[ 5535.960522] Call Trace:
[ 5535.963265] [<ffffffff817cdaaf>] schedule+0x3f/0xa0
[ 5535.968817] [<ffffffff817d33fb>] schedule_timeout+0x3db/0x6f0
[ 5535.975346] [<ffffffff817cf055>] ? wait_for_completion+0x45/0x130
[ 5535.982256] [<ffffffff817cf0d3>] wait_for_completion+0xc3/0x130
[ 5535.988972] [<ffffffff810d1fd0>] ? wake_up_q+0x80/0x80
[ 5535.994804] [<ffffffff8130de64>] drop_sysctl_table+0xc4/0xe0
[ 5536.001227] [<ffffffff8130de17>] drop_sysctl_table+0x77/0xe0
[ 5536.007648] [<ffffffff8130decd>] unregister_sysctl_table+0x4d/0xa0
[ 5536.014654] [<ffffffff8130deff>] unregister_sysctl_table+0x7f/0xa0
[ 5536.021657] [<ffffffff810f57f5>] unregister_sched_domain_sysctl+0x15/0x40
[ 5536.029344] [<ffffffff810d7704>] partition_sched_domains+0x44/0x450
[ 5536.036447] [<ffffffff817d0761>] ? __mutex_unlock_slowpath+0x111/0x1f0
[ 5536.043844] [<ffffffff81167684>] rebuild_sched_domains_locked+0x64/0xb0
[ 5536.051336] [<ffffffff8116789d>] update_flag+0x11d/0x210
[ 5536.057373] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.064186] [<ffffffff81167acb>] ? cpuset_css_offline+0x1b/0x60
[ 5536.070899] [<ffffffff810fce3d>] ? trace_hardirqs_on+0xd/0x10
[ 5536.077420] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.084234] [<ffffffff8115a9f5>] ? css_killed_work_fn+0x25/0x220
[ 5536.091049] [<ffffffff81167ae5>] cpuset_css_offline+0x35/0x60
[ 5536.097571] [<ffffffff8115aa2c>] css_killed_work_fn+0x5c/0x220
[ 5536.104207] [<ffffffff810bc83f>] process_one_work+0x1df/0x710
[ 5536.110736] [<ffffffff810bc7c0>] ? process_one_work+0x160/0x710
[ 5536.117461] [<ffffffff810bce9b>] worker_thread+0x12b/0x4a0
[ 5536.123697] [<ffffffff810bcd70>] ? process_one_work+0x710/0x710
[ 5536.130426] [<ffffffff810c3f7e>] kthread+0xfe/0x120
[ 5536.135991] [<ffffffff817d4baf>] ret_from_fork+0x1f/0x40
[ 5536.142041] [<ffffffff810c3e80>] ? kthread_create_on_node+0x230/0x230
One cgroup maintainer mentioned that "cgroup is trying to offline
a cpuset css, which takes place under cgroup_mutex. The offlining
ends up trying to drain active usages of a sysctl table which apprently
is not happening."
The real reason is that proc_sys_readdir doesn't drop reference added
by grab_header when return from !dir_emit_dots path. So this cpuset
offline path will wait here forever.
See here for details: http://www.openwall.com/lists/oss-security/2016/11/04/13
Fixes: f0c3b5093add ("[readdir] convert procfs")
Cc: stable@vger.kernel.org
Reported-by: CAI Qian <caiqian@redhat.com>
Tested-by: Yang Shukui <yangshukui@huawei.com>
Signed-off-by: Zhou Chengming <zhouchengming1@huawei.com>
Acked-by: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
| 0 |
xemaclite_poll_controller(struct net_device *ndev)
{
disable_irq(ndev->irq);
xemaclite_interrupt(ndev->irq, ndev);
enable_irq(ndev->irq);
}
|
Safe
|
[
"CWE-703",
"CWE-824"
] |
linux
|
d0d62baa7f505bd4c59cd169692ff07ec49dde37
|
2.0613641469052153e+38
| 6 |
net: xilinx_emaclite: Do not print real IOMEM pointer
Printing kernel pointers is discouraged because they might leak kernel
memory layout. This fixes smatch warning:
drivers/net/ethernet/xilinx/xilinx_emaclite.c:1191 xemaclite_of_probe() warn:
argument 4 to %08lX specifier is cast from pointer
Signed-off-by: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.