func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
tta_type_find (GstTypeFind * tf, gpointer unused)
{
const guint8 *data = gst_type_find_peek (tf, 0, 3);
if (data) {
if (memcmp (data, "TTA", 3) == 0) {
gst_type_find_suggest (tf, GST_TYPE_FIND_MAXIMUM, TTA_CAPS);
return;
}
}
}
|
Safe
|
[
"CWE-125"
] |
gst-plugins-base
|
2fdccfd64fc609e44e9c4b8eed5bfdc0ab9c9095
|
2.1249614830930675e+38
| 11 |
typefind: bounds check windows ico detection
Fixes out of bounds read
https://bugzilla.gnome.org/show_bug.cgi?id=774902
| 0 |
int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
struct inode **delegated_inode, unsigned int flags)
{
int error;
bool is_dir = d_is_dir(old_dentry);
const unsigned char *old_name;
struct inode *source = old_dentry->d_inode;
struct inode *target = new_dentry->d_inode;
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
/*
* Check source == target.
* On overlayfs need to look at underlying inodes.
*/
if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
return 0;
error = may_delete(old_dir, old_dentry, is_dir);
if (error)
return error;
if (!target) {
error = may_create(new_dir, new_dentry);
} else {
new_is_dir = d_is_dir(new_dentry);
if (!(flags & RENAME_EXCHANGE))
error = may_delete(new_dir, new_dentry, is_dir);
else
error = may_delete(new_dir, new_dentry, new_is_dir);
}
if (error)
return error;
if (!old_dir->i_op->rename && !old_dir->i_op->rename2)
return -EPERM;
if (flags && !old_dir->i_op->rename2)
return -EINVAL;
/*
* If we are going to change the parent - check write permissions,
* we'll need to flip '..'.
*/
if (new_dir != old_dir) {
if (is_dir) {
error = inode_permission(source, MAY_WRITE);
if (error)
return error;
}
if ((flags & RENAME_EXCHANGE) && new_is_dir) {
error = inode_permission(target, MAY_WRITE);
if (error)
return error;
}
}
error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry,
flags);
if (error)
return error;
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
dget(new_dentry);
if (!is_dir || (flags & RENAME_EXCHANGE))
lock_two_nondirectories(source, target);
else if (target)
inode_lock(target);
error = -EBUSY;
if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry))
goto out;
if (max_links && new_dir != old_dir) {
error = -EMLINK;
if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links)
goto out;
if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir &&
old_dir->i_nlink >= max_links)
goto out;
}
if (is_dir && !(flags & RENAME_EXCHANGE) && target)
shrink_dcache_parent(new_dentry);
if (!is_dir) {
error = try_break_deleg(source, delegated_inode);
if (error)
goto out;
}
if (target && !new_is_dir) {
error = try_break_deleg(target, delegated_inode);
if (error)
goto out;
}
if (!old_dir->i_op->rename2) {
error = old_dir->i_op->rename(old_dir, old_dentry,
new_dir, new_dentry);
} else {
WARN_ON(old_dir->i_op->rename != NULL);
error = old_dir->i_op->rename2(old_dir, old_dentry,
new_dir, new_dentry, flags);
}
if (error)
goto out;
if (!(flags & RENAME_EXCHANGE) && target) {
if (is_dir)
target->i_flags |= S_DEAD;
dont_mount(new_dentry);
detach_mounts(new_dentry);
}
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) {
if (!(flags & RENAME_EXCHANGE))
d_move(old_dentry, new_dentry);
else
d_exchange(old_dentry, new_dentry);
}
out:
if (!is_dir || (flags & RENAME_EXCHANGE))
unlock_two_nondirectories(source, target);
else if (target)
inode_unlock(target);
dput(new_dentry);
if (!error) {
fsnotify_move(old_dir, new_dir, old_name, is_dir,
!(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
if (flags & RENAME_EXCHANGE) {
fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
new_is_dir, NULL, new_dentry);
}
}
fsnotify_oldname_free(old_name);
return error;
}
|
Safe
|
[
"CWE-284"
] |
linux
|
9409e22acdfc9153f88d9b1ed2bd2a5b34d2d3ca
|
6.023282690720863e+37
| 136 |
vfs: rename: check backing inode being equal
If a file is renamed to a hardlink of itself POSIX specifies that rename(2)
should do nothing and return success.
This condition is checked in vfs_rename(). However it won't detect hard
links on overlayfs where these are given separate inodes on the overlayfs
layer.
Overlayfs itself detects this condition and returns success without doing
anything, but then vfs_rename() will proceed as if this was a successful
rename (detach_mounts(), d_move()).
The correct thing to do is to detect this condition before even calling
into overlayfs. This patch does this by calling vfs_select_inode() to get
the underlying inodes.
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Cc: <stable@vger.kernel.org> # v4.2+
| 0 |
iperf_dump_fdset(FILE *fp, char *str, int nfds, fd_set *fds)
{
int fd;
int comma;
fprintf(fp, "%s: [", str);
comma = 0;
for (fd = 0; fd < nfds; ++fd) {
if (FD_ISSET(fd, fds)) {
if (comma)
fprintf(fp, ", ");
fprintf(fp, "%d", fd);
comma = 1;
}
}
fprintf(fp, "]\n");
}
|
Safe
|
[
"CWE-120",
"CWE-119",
"CWE-787"
] |
iperf
|
91f2fa59e8ed80dfbf400add0164ee0e508e412a
|
1.5742088767279975e+38
| 17 |
Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <bmah@es.net>
| 0 |
entering_window(win_T *win)
{
// Only matters for a prompt window.
if (!bt_prompt(win->w_buffer))
return;
// When switching to a prompt buffer that was in Insert mode, don't stop
// Insert mode, it may have been set in leaving_window().
if (win->w_buffer->b_prompt_insert != NUL)
stop_insert_mode = FALSE;
// When entering the prompt window restart Insert mode if we were in Insert
// mode when we left it and not already in Insert mode.
if ((State & INSERT) == 0)
restart_edit = win->w_buffer->b_prompt_insert;
}
|
Safe
|
[
"CWE-476"
] |
vim
|
0f6e28f686dbb59ab3b562408ab9b2234797b9b1
|
2.8332160702044835e+37
| 16 |
patch 8.2.4428: crash when switching tabpage while in the cmdline window
Problem: Crash when switching tabpage while in the cmdline window.
Solution: Disallow switching tabpage when in the cmdline window.
| 0 |
rdpsnd_queue_write(STREAM s, uint16 tick, uint8 index)
{
struct audio_packet *packet = &packet_queue[queue_hi];
unsigned int next_hi = (queue_hi + 1) % MAX_QUEUE;
if (next_hi == queue_pending)
{
logger(Sound, Error, "rdpsnd_queue_write(), no space to queue audio packet");
return;
}
queue_hi = next_hi;
packet->s = *s;
packet->tick = tick;
packet->index = index;
gettimeofday(&packet->arrive_tv, NULL);
}
|
Safe
|
[
"CWE-119",
"CWE-125",
"CWE-703",
"CWE-787"
] |
rdesktop
|
4dca546d04321a610c1835010b5dad85163b65e1
|
6.6886875481390035e+37
| 19 |
Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182
| 0 |
static int rtreeDestroy(sqlite3_vtab *pVtab){
Rtree *pRtree = (Rtree *)pVtab;
int rc;
char *zCreate = sqlite3_mprintf(
"DROP TABLE '%q'.'%q_node';"
"DROP TABLE '%q'.'%q_rowid';"
"DROP TABLE '%q'.'%q_parent';",
pRtree->zDb, pRtree->zName,
pRtree->zDb, pRtree->zName,
pRtree->zDb, pRtree->zName
);
if( !zCreate ){
rc = SQLITE_NOMEM;
}else{
nodeBlobReset(pRtree);
rc = sqlite3_exec(pRtree->db, zCreate, 0, 0, 0);
sqlite3_free(zCreate);
}
if( rc==SQLITE_OK ){
rtreeRelease(pRtree);
}
return rc;
}
|
Safe
|
[
"CWE-125"
] |
sqlite
|
e41fd72acc7a06ce5a6a7d28154db1ffe8ba37a8
|
3.0134065343480867e+38
| 24 |
Enhance the rtreenode() function of rtree (used for testing) so that it
uses the newer sqlite3_str object for better performance and improved
error reporting.
FossilOrigin-Name: 90acdbfce9c088582d5165589f7eac462b00062bbfffacdcc786eb9cf3ea5377
| 0 |
build_collating_symbol (bitset_t sbcset, const unsigned char *name)
# endif /* not RE_ENABLE_I18N */
{
size_t name_len = strlen ((const char *) name);
if (BE (name_len != 1, 0))
return REG_ECOLLATE;
else
{
bitset_set (sbcset, name[0]);
return REG_NOERROR;
}
}
|
Safe
|
[
"CWE-19"
] |
gnulib
|
5513b40999149090987a0341c018d05d3eea1272
|
1.053466279507318e+38
| 12 |
Diagnose ERE '()|\1'
Problem reported by Hanno Böck in: http://bugs.gnu.org/21513
* lib/regcomp.c (parse_reg_exp): While parsing alternatives, keep
track of the set of previously-completed subexpressions available
before the first alternative, and restore this set just before
parsing each subsequent alternative. This lets us diagnose the
invalid back-reference in the ERE '()|\1'.
| 0 |
static void perf_pmu_sched_task(struct task_struct *prev,
struct task_struct *next,
bool sched_in)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
if (prev == next)
return;
list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
if (WARN_ON_ONCE(!pmu->sched_task))
continue;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(pmu);
pmu->sched_task(cpuctx->task_ctx, sched_in);
perf_pmu_enable(pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
|
Safe
|
[
"CWE-190"
] |
linux
|
1572e45a924f254d9570093abde46430c3172e3d
|
1.117752435904506e+38
| 25 |
perf/core: Fix the perf_cpu_time_max_percent check
Use "proc_dointvec_minmax" instead of "proc_dointvec" to check the input
value from user-space.
If not, we can set a big value and some vars will overflow like
"sysctl_perf_event_sample_rate" which will cause a lot of unexpected
problems.
Signed-off-by: Tan Xiaojun <tanxiaojun@huawei.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <acme@kernel.org>
Cc: <alexander.shishkin@linux.intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/1487829879-56237-1-git-send-email-tanxiaojun@huawei.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| 0 |
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
{
int rc;
unsigned size, max_size;
unsigned long linear;
int cur_size = ctxt->fetch.end - ctxt->fetch.data;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->eip + cur_size };
/*
* We do not know exactly how many bytes will be needed, and
* __linearize is expensive, so fetch as much as possible. We
* just have to avoid going beyond the 15 byte limit, the end
* of the segment, or the end of the page.
*
* __linearize is called with size 0 so that it does not do any
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
&linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
size = min_t(unsigned, 15UL ^ cur_size, max_size);
size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
/*
* One instruction can only straddle two pages,
* and one has been loaded at the beginning of
* x86_decode_insn. So, if not enough bytes
* still, we must have hit the 15-byte boundary.
*/
if (unlikely(size < op_size))
return emulate_gp(ctxt, 0);
rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
size, &ctxt->exception);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
ctxt->fetch.end += size;
return X86EMUL_CONTINUE;
}
|
Safe
|
[
"CWE-362",
"CWE-269"
] |
linux
|
f3747379accba8e95d70cec0eae0582c8c182050
|
1.689567202625558e+37
| 43 |
KVM: x86: SYSENTER emulation is broken
SYSENTER emulation is broken in several ways:
1. It misses the case of 16-bit code segments completely (CVE-2015-0239).
2. MSR_IA32_SYSENTER_CS is checked in 64-bit mode incorrectly (bits 0 and 1 can
still be set without causing #GP).
3. MSR_IA32_SYSENTER_EIP and MSR_IA32_SYSENTER_ESP are not masked in
legacy-mode.
4. There is some unneeded code.
Fix it.
Cc: stable@vger.linux.org
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
encode_secinfo_no_name(struct xdr_stream *xdr,
const struct nfs41_secinfo_no_name_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 8);
*p++ = cpu_to_be32(OP_SECINFO_NO_NAME);
*p++ = cpu_to_be32(args->style);
hdr->nops++;
hdr->replen += decode_secinfo_no_name_maxsz;
return 0;
}
|
Safe
|
[
"CWE-703",
"CWE-189"
] |
linux
|
bf118a342f10dafe44b14451a1392c3254629a1f
|
1.3603553059832286e+38
| 12 |
NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: stable@kernel.org
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
| 0 |
static bool check_show_access(THD *thd, TABLE_LIST *table)
{
/*
This is a SHOW command using an INFORMATION_SCHEMA table.
check_access() has not been called for 'table',
and SELECT is currently always granted on the I_S, so we automatically
grant SELECT on table here, to bypass a call to check_access().
Note that not calling check_access(table) is an optimization,
which needs to be revisited if the INFORMATION_SCHEMA does
not always automatically grant SELECT but use the grant tables.
See Bug#38837 need a way to disable information_schema for security
*/
table->grant.privilege= SELECT_ACL;
switch (get_schema_table_idx(table->schema_table)) {
case SCH_SCHEMATA:
return (specialflag & SPECIAL_SKIP_SHOW_DB) &&
check_global_access(thd, SHOW_DB_ACL);
case SCH_TABLE_NAMES:
case SCH_TABLES:
case SCH_VIEWS:
case SCH_TRIGGERS:
case SCH_EVENTS:
{
const char *dst_db_name= table->schema_select_lex->db.str;
DBUG_ASSERT(dst_db_name);
if (check_access(thd, SELECT_ACL, dst_db_name,
&thd->col_access, NULL, FALSE, FALSE))
return TRUE;
if (!thd->col_access && check_grant_db(thd, dst_db_name))
{
status_var_increment(thd->status_var.access_denied_errors);
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
thd->security_ctx->priv_user,
thd->security_ctx->priv_host,
dst_db_name);
return TRUE;
}
return FALSE;
}
case SCH_COLUMNS:
case SCH_STATISTICS:
{
TABLE_LIST *dst_table;
dst_table= table->schema_select_lex->table_list.first;
DBUG_ASSERT(dst_table);
/*
Open temporary tables to be able to detect them during privilege check.
*/
if (thd->open_temporary_tables(dst_table))
return TRUE;
if (check_access(thd, SELECT_ACL, dst_table->db.str,
&dst_table->grant.privilege,
&dst_table->grant.m_internal,
FALSE, FALSE))
return TRUE; /* Access denied */
/*
Check_grant will grant access if there is any column privileges on
all of the tables thanks to the fourth parameter (bool show_table).
*/
if (check_grant(thd, SELECT_ACL, dst_table, TRUE, 1, FALSE))
return TRUE; /* Access denied */
close_thread_tables(thd);
dst_table->table= NULL;
/* Access granted */
return FALSE;
}
default:
break;
}
return FALSE;
}
|
Safe
|
[
"CWE-703"
] |
server
|
39feab3cd31b5414aa9b428eaba915c251ac34a2
|
1.7654015401987433e+38
| 85 |
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <sanja@mariadb.com>
| 0 |
TEST(GtOp, MatchesWholeArray) {
BSONObj operand = BSON("$gt" << BSON_ARRAY(5));
GTMatchExpression gt;
ASSERT(gt.init("a", operand["$gt"]).isOK());
ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL));
ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL));
// Nested array.
// XXX: The following assertion documents current behavior.
ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), NULL));
// XXX: The following assertion documents current behavior.
ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL));
ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), NULL));
}
|
Safe
|
[] |
mongo
|
b0ef26c639112b50648a02d969298650fbd402a4
|
3.207401541744771e+38
| 14 |
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
| 0 |
static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
struct radeon_gpio_rec *gpio)
{
struct radeon_hpd hpd;
u32 reg;
if (ASIC_IS_DCE4(rdev))
reg = EVERGREEN_DC_GPIO_HPD_A;
else
reg = AVIVO_DC_GPIO_HPD_A;
hpd.gpio = *gpio;
if (gpio->reg == reg) {
switch(gpio->mask) {
case (1 << 0):
hpd.hpd = RADEON_HPD_1;
break;
case (1 << 8):
hpd.hpd = RADEON_HPD_2;
break;
case (1 << 16):
hpd.hpd = RADEON_HPD_3;
break;
case (1 << 24):
hpd.hpd = RADEON_HPD_4;
break;
case (1 << 26):
hpd.hpd = RADEON_HPD_5;
break;
case (1 << 28):
hpd.hpd = RADEON_HPD_6;
break;
default:
hpd.hpd = RADEON_HPD_NONE;
break;
}
} else
hpd.hpd = RADEON_HPD_NONE;
return hpd;
}
|
Safe
|
[
"CWE-119",
"CWE-193"
] |
linux
|
0031c41be5c529f8329e327b63cde92ba1284842
|
2.6577684588784876e+38
| 40 |
drivers/gpu/drm/radeon/radeon_atombios.c: range check issues
This change makes the array larger, "MAX_SUPPORTED_TV_TIMING_V1_2" is 3
and the original size "MAX_SUPPORTED_TV_TIMING" is 2.
Also there were checks that were off by one.
Signed-off-by: Dan Carpenter <error27@gmail.com>
Acked-by: Alex Deucher <alexdeucher@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dave Airlie <airlied@redhat.com>
| 0 |
ldns_rdf2native_int16(const ldns_rdf *rd)
{
uint16_t data;
/* only allow 16 bit rdfs */
if (ldns_rdf_size(rd) != LDNS_RDF_SIZE_WORD) {
return 0;
}
memcpy(&data, ldns_rdf_data(rd), sizeof(data));
return ntohs(data);
}
|
Safe
|
[
"CWE-415"
] |
ldns
|
070b4595981f48a21cc6b4f5047fdc2d09d3da91
|
7.304944338721203e+37
| 12 |
CAA and URI
| 0 |
static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *arg)
{
__be32 *p;
RESERVE_SPACE(4);
switch (arg->open_flags & O_CREAT) {
case 0:
WRITE32(NFS4_OPEN_NOCREATE);
break;
default:
BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL);
WRITE32(NFS4_OPEN_CREATE);
encode_createmode(xdr, arg);
}
}
|
Safe
|
[
"CWE-703"
] |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
|
5.637650512977068e+37
| 15 |
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
| 0 |
am_cache_entry_t *am_cache_lock(server_rec *s,
am_cache_key_t type,
const char *key)
{
am_mod_cfg_rec *mod_cfg;
void *table;
apr_size_t i;
int rv;
char buffer[512];
/* Check if we have a valid session key. We abort if we don't. */
if (key == NULL)
return NULL;
switch (type) {
case AM_CACHE_SESSION:
if (strlen(key) != AM_ID_LENGTH)
return NULL;
break;
case AM_CACHE_NAMEID:
break;
default:
return NULL;
break;
}
mod_cfg = am_get_mod_cfg(s);
/* Lock the table. */
if((rv = apr_global_mutex_lock(mod_cfg->lock)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
"apr_global_mutex_lock() failed [%d]: %s",
rv, apr_strerror(rv, buffer, sizeof(buffer)));
return NULL;
}
table = apr_shm_baseaddr_get(mod_cfg->cache);
for(i = 0; i < mod_cfg->init_cache_size; i++) {
am_cache_entry_t *e = am_cache_entry_ptr(mod_cfg, table, i);
const char *tablekey;
if (e->key[0] == '\0') {
/* This entry is empty. Skip it. */
continue;
}
switch (type) {
case AM_CACHE_SESSION:
tablekey = e->key;
break;
case AM_CACHE_NAMEID:
/* tablekey may be NULL */
tablekey = am_cache_env_fetch_first(e, "NAME_ID");
break;
default:
tablekey = NULL;
break;
}
if (tablekey == NULL)
continue;
if(strcmp(tablekey, key) == 0) {
/* We found the entry. */
if(e->expires > apr_time_now()) {
/* And it hasn't expired. */
return e;
}
}
}
/* We didn't find a entry matching the key. Unlock the table and
* return NULL;
*/
apr_global_mutex_unlock(mod_cfg->lock);
return NULL;
}
|
Safe
|
[
"CWE-79"
] |
mod_auth_mellon
|
7af21c53da7bb1de024274ee6da30bc22316a079
|
1.4900019873523179e+38
| 82 |
Fix Cross-Site Session Transfer vulnerability
mod_auth_mellon did not verify that the site the session was created
for was the same site as the site the user accessed. This allows an
attacker with access to one web site on a server to use the same
session to get access to a different site running on the same server.
This patch fixes this vulnerability by storing the cookie parameters
used when creating the session in the session, and verifying those
parameters when the session is loaded.
Thanks to François Kooman for reporting this vulnerability.
This vulnerability has been assigned CVE-2017-6807.
| 0 |
getBuf(const std::string& hex, size_t headroom = 0, size_t tailroom = 0) {
auto data = unhexlify(hex);
return IOBuf::copyBuffer(data.data(), data.size(), headroom, tailroom);
}
|
Safe
|
[
"CWE-400",
"CWE-703",
"CWE-770"
] |
fizz
|
6bf67137ef1ee5cd70c842b014c322b7deaf994b
|
1.0796166473599376e+38
| 4 |
Reject zero length handshake records.
Summary:
Zero length (all padding) handshake are forbidden by RFC. Allowing
these was a regression in D13754697 (2c6f78a).
This is a partial fix for CVE-2019-11924
Reviewed By: xybu
Differential Revision: D16285100
fbshipit-source-id: 05a19d31ad74601ce89156a0e59517aaad8dd928
| 0 |
static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr)
{
struct ethtool_value edata = { ETHTOOL_GTSO };
if (!dev->ethtool_ops->get_ufo)
return -EOPNOTSUPP;
edata.data = dev->ethtool_ops->get_ufo(dev);
if (copy_to_user(useraddr, &edata, sizeof(edata)))
return -EFAULT;
return 0;
}
|
Safe
|
[] |
linux
|
e89e9cf539a28df7d0eb1d0a545368e9920b34ac
|
3.0893034848715388e+38
| 11 |
[IPv4/IPv6]: UFO Scatter-gather approach
Attached is kernel patch for UDP Fragmentation Offload (UFO) feature.
1. This patch incorporate the review comments by Jeff Garzik.
2. Renamed USO as UFO (UDP Fragmentation Offload)
3. udp sendfile support with UFO
This patches uses scatter-gather feature of skb to generate large UDP
datagram. Below is a "how-to" on changes required in network device
driver to use the UFO interface.
UDP Fragmentation Offload (UFO) Interface:
-------------------------------------------
UFO is a feature wherein the Linux kernel network stack will offload the
IP fragmentation functionality of large UDP datagram to hardware. This
will reduce the overhead of stack in fragmenting the large UDP datagram to
MTU sized packets
1) Drivers indicate their capability of UFO using
dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG
NETIF_F_HW_CSUM is required for UFO over ipv6.
2) UFO packet will be submitted for transmission using driver xmit routine.
UFO packet will have a non-zero value for
"skb_shinfo(skb)->ufo_size"
skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP
fragment going out of the adapter after IP fragmentation by hardware.
skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[]
contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW
indicating that hardware has to do checksum calculation. Hardware should
compute the UDP checksum of complete datagram and also ip header checksum of
each fragmented IP packet.
For IPV6 the UFO provides the fragment identification-id in
skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating
IPv6 fragments.
Signed-off-by: Ananda Raju <ananda.raju@neterion.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (forwarded)
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
| 0 |
bool IsSupported(const NodeDef* node) const override {
// Note: div_no_nan(a, sqrt(b)) => mul_no_nan(a, rsqrt(b))
// for b == 0 would result in a / Inf instead of 0.
return IsAnyDiv(*node) && !IsDivNoNan(*node) && !IsFloorDiv(*node);
}
|
Safe
|
[
"CWE-476"
] |
tensorflow
|
e6340f0665d53716ef3197ada88936c2a5f7a2d3
|
9.616590595604768e+37
| 5 |
Handle a special grappler case resulting in crash.
It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault.
PiperOrigin-RevId: 369242852
Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b
| 0 |
get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
unsigned int num_blks)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
ext4_lblk_t lblk_from, lblk_to, c_offset;
unsigned int allocated_clusters = 0;
alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
/* max possible clusters for this allocation */
allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
/* Check towards left side */
c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
if (c_offset) {
lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
lblk_to = lblk_from + c_offset - 1;
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
allocated_clusters--;
}
/* Now check towards right. */
c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
if (allocated_clusters && c_offset) {
lblk_from = lblk_start + num_blks;
lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
allocated_clusters--;
}
return allocated_clusters;
}
|
Safe
|
[
"CWE-362"
] |
linux-2.6
|
dee1f973ca341c266229faa5a1a5bb268bed3531
|
1.921771781414506e+38
| 38 |
ext4: race-condition protection for ext4_convert_unwritten_extents_endio
We assumed that at the time we call ext4_convert_unwritten_extents_endio()
extent in question is fully inside [map.m_lblk, map->m_len] because
it was already split during submission. But this may not be true due to
a race between writeback vs fallocate.
If extent in question is larger than requested we will split it again.
Special precautions should being done if zeroout required because
[map.m_lblk, map->m_len] already contains valid data.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@vger.kernel.org
| 0 |
static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
{
struct i40e_ring *tx_ring, *rx_ring;
u16 pf_q;
int i;
for (i = 0; i < ch->num_queue_pairs; i++) {
pf_q = ch->base_queue + i;
tx_ring = vsi->tx_rings[pf_q];
tx_ring->ch = NULL;
rx_ring = vsi->rx_rings[pf_q];
rx_ring->ch = NULL;
}
}
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
linux
|
27d461333459d282ffa4a2bdb6b215a59d493a8f
|
3.449823526360756e+37
| 14 |
i40e: prevent memory leak in i40e_setup_macvlans
In i40e_setup_macvlans if i40e_setup_channel fails the allocated memory
for ch should be released.
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
| 0 |
void Compute(OpKernelContext* ctx) override {
auto x = ctx->input(0);
auto i = ctx->input(1);
auto v = ctx->input(2);
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(i.shape()),
errors::InvalidArgument("i must be a vector. ",
i.shape().DebugString()));
OP_REQUIRES(ctx, x.dims() == v.dims(),
errors::InvalidArgument(
"x and v shape doesn't match (ranks differ): ",
x.shape().DebugString(), " vs. ", v.shape().DebugString()));
for (int i = 1; i < x.dims(); ++i) {
OP_REQUIRES(
ctx, x.dim_size(i) == v.dim_size(i),
errors::InvalidArgument("x and v shape doesn't match at index ", i,
" : ", x.shape().DebugString(), " vs. ",
v.shape().DebugString()));
}
OP_REQUIRES(ctx, i.dim_size(0) == v.dim_size(0),
errors::InvalidArgument(
"i and x shape doesn't match at index 0: ",
i.shape().DebugString(), " vs. ", v.shape().DebugString()));
Tensor y = x; // This creates an alias intentionally.
// Skip processing if tensors are empty.
if (x.NumElements() > 0 || v.NumElements() > 0) {
OP_REQUIRES_OK(ctx, DoCompute(ctx, i, v, &y));
}
ctx->set_output(0, y);
}
|
Vulnerable
|
[
"CWE-369"
] |
tensorflow
|
e86605c0a336c088b638da02135ea6f9f6753618
|
9.366368449414025e+37
| 31 |
Fix FPE in inpace update ops.
PiperOrigin-RevId: 388303197
Change-Id: Ib48309b6213ffe53eba81004b00e889d653e4b83
| 1 |
static void cil_reset_typeattr(struct cil_typeattribute *attr)
{
/* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a attributetypes statement */
if (attr->expr_list != NULL) {
/* we don't want to destroy the expression stacks (cil_list) inside
* this list cil_list_destroy destroys sublists, so we need to do it
* manually */
struct cil_list_item *expr = attr->expr_list->head;
while (expr != NULL) {
struct cil_list_item *next = expr->next;
cil_list_item_destroy(&expr, CIL_FALSE);
expr = next;
}
free(attr->expr_list);
attr->expr_list = NULL;
}
attr->used = CIL_FALSE;
attr->keep = CIL_FALSE;
}
|
Safe
|
[
"CWE-416"
] |
selinux
|
f34d3d30c8325e4847a6b696fe7a3936a8a361f3
|
2.69813341660385e+38
| 19 |
libsepol/cil: Destroy classperms list when resetting classpermission
Nicolas Iooss reports:
A few months ago, OSS-Fuzz found a crash in the CIL compiler, which
got reported as
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=28648 (the title
is misleading, or is caused by another issue that conflicts with the
one I report in this message). Here is a minimized CIL policy which
reproduces the issue:
(class CLASS (PERM))
(classorder (CLASS))
(sid SID)
(sidorder (SID))
(user USER)
(role ROLE)
(type TYPE)
(category CAT)
(categoryorder (CAT))
(sensitivity SENS)
(sensitivityorder (SENS))
(sensitivitycategory SENS (CAT))
(allow TYPE self (CLASS (PERM)))
(roletype ROLE TYPE)
(userrole USER ROLE)
(userlevel USER (SENS))
(userrange USER ((SENS)(SENS (CAT))))
(sidcontext SID (USER ROLE TYPE ((SENS)(SENS))))
(classpermission CLAPERM)
(optional OPT
(roletype nonexistingrole nonexistingtype)
(classpermissionset CLAPERM (CLASS (PERM)))
)
The CIL policy fuzzer (which mimics secilc built with clang Address
Sanitizer) reports:
==36541==ERROR: AddressSanitizer: heap-use-after-free on address
0x603000004f98 at pc 0x56445134c842 bp 0x7ffe2a256590 sp
0x7ffe2a256588
READ of size 8 at 0x603000004f98 thread T0
#0 0x56445134c841 in __cil_verify_classperms
/selinux/libsepol/src/../cil/src/cil_verify.c:1620:8
#1 0x56445134a43e in __cil_verify_classpermission
/selinux/libsepol/src/../cil/src/cil_verify.c:1650:9
#2 0x56445134a43e in __cil_pre_verify_helper
/selinux/libsepol/src/../cil/src/cil_verify.c:1715:8
#3 0x5644513225ac in cil_tree_walk_core
/selinux/libsepol/src/../cil/src/cil_tree.c:272:9
#4 0x564451322ab1 in cil_tree_walk
/selinux/libsepol/src/../cil/src/cil_tree.c:316:7
#5 0x5644513226af in cil_tree_walk_core
/selinux/libsepol/src/../cil/src/cil_tree.c:284:9
#6 0x564451322ab1 in cil_tree_walk
/selinux/libsepol/src/../cil/src/cil_tree.c:316:7
#7 0x5644512b88fd in cil_pre_verify
/selinux/libsepol/src/../cil/src/cil_post.c:2510:7
#8 0x5644512b88fd in cil_post_process
/selinux/libsepol/src/../cil/src/cil_post.c:2524:7
#9 0x5644511856ff in cil_compile
/selinux/libsepol/src/../cil/src/cil.c:564:7
The classperms list of a classpermission rule is created and filled
in when classpermissionset rules are processed, so it doesn't own any
part of the list and shouldn't retain any of it when it is reset.
Destroy the classperms list (without destroying the data in it) when
resetting a classpermission rule.
Reported-by: Nicolas Iooss <nicolas.iooss@m4x.org>
Signed-off-by: James Carter <jwcart2@gmail.com>
| 0 |
static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
{
uint32_t nIndex;
uint32_t idx;
Bucket *p, *arData;
arData = ht->arData;
nIndex = h | ht->nTableMask;
idx = HT_HASH_EX(arData, nIndex);
while (idx != HT_INVALID_IDX) {
ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
p = HT_HASH_TO_BUCKET_EX(arData, idx);
if ((p->h == h)
&& p->key
&& (ZSTR_LEN(p->key) == len)
&& !memcmp(ZSTR_VAL(p->key), str, len)) {
return p;
}
idx = Z_NEXT(p->val);
}
return NULL;
}
|
Safe
|
[
"CWE-190"
] |
php-src
|
4cc0286f2f3780abc6084bcdae5dce595daa3c12
|
2.7698854965926668e+38
| 22 |
Fix #73832 - leave the table in a safe state if the size is too big.
| 0 |
//! Fill sequentially all pixel values with specified values \newinstance.
CImg<T> get_fill(const T& val0, const T& val1, const T& val2, const T& val3, const T& val4, const T& val5,
const T& val6, const T& val7, const T& val8, const T& val9, const T& val10) const {
return CImg<T>(_width,_height,_depth,_spectrum).fill(val0,val1,val2,val3,val4,val5,val6,val7,val8,val9,val10);
|
Safe
|
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
|
1.1516338898072166e+38
| 4 |
Fix other issues in 'CImg<T>::load_bmp()'.
| 0 |
static int ocsp_match_issuerid(X509 *cert, OCSP_CERTID *cid,
STACK_OF(OCSP_SINGLERESP) *sresp)
{
/* If only one ID to match then do it */
if(cid)
{
const EVP_MD *dgst;
X509_NAME *iname;
int mdlen;
unsigned char md[EVP_MAX_MD_SIZE];
if (!(dgst = EVP_get_digestbyobj(cid->hashAlgorithm->algorithm)))
{
OCSPerr(OCSP_F_OCSP_MATCH_ISSUERID, OCSP_R_UNKNOWN_MESSAGE_DIGEST);
return -1;
}
mdlen = EVP_MD_size(dgst);
if (mdlen < 0)
return -1;
if ((cid->issuerNameHash->length != mdlen) ||
(cid->issuerKeyHash->length != mdlen))
return 0;
iname = X509_get_subject_name(cert);
if (!X509_NAME_digest(iname, dgst, md, NULL))
return -1;
if (memcmp(md, cid->issuerNameHash->data, mdlen))
return 0;
X509_pubkey_digest(cert, dgst, md, NULL);
if (memcmp(md, cid->issuerKeyHash->data, mdlen))
return 0;
return 1;
}
else
{
/* We have to match the whole lot */
int i, ret;
OCSP_CERTID *tmpid;
for (i = 0; i < sk_OCSP_SINGLERESP_num(sresp); i++)
{
tmpid = sk_OCSP_SINGLERESP_value(sresp, i)->certId;
ret = ocsp_match_issuerid(cert, tmpid, NULL);
if (ret <= 0) return ret;
}
return 1;
}
}
|
Safe
|
[
"CWE-310"
] |
openssl
|
ebc71865f0506a293242bd4aec97cdc7a8ef24b0
|
1.9945464925818507e+38
| 49 |
Don't try and verify signatures if key is NULL (CVE-2013-0166)
Add additional check to catch this in ASN1_item_verify too.
| 0 |
std::vector<uint32_t> bsonArrayToBitPositions(const BSONArray& ba) {
std::vector<uint32_t> bitPositions;
// Convert BSONArray of bit positions to int vector
for (const auto& elt : ba) {
bitPositions.push_back(elt._numberInt());
}
return bitPositions;
}
|
Safe
|
[] |
mongo
|
64095239f41e9f3841d8be9088347db56d35c891
|
1.6038317940702927e+38
| 10 |
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
| 0 |
rtadv_timer (struct thread *thread)
{
struct zebra_vrf *zvrf = THREAD_ARG (thread);
struct listnode *node, *nnode;
struct interface *ifp;
struct zebra_if *zif;
int period;
zvrf->rtadv.ra_timer = NULL;
if (zvrf->rtadv.adv_msec_if_count == 0)
{
period = 1000; /* 1 s */
rtadv_event (zvrf, RTADV_TIMER, 1 /* 1 s */);
}
else
{
period = 10; /* 10 ms */
rtadv_event (zvrf, RTADV_TIMER_MSEC, 10 /* 10 ms */);
}
for (ALL_LIST_ELEMENTS (vrf_iflist (zvrf->vrf_id), node, nnode, ifp))
{
if (if_is_loopback (ifp) || ! if_is_operative (ifp))
continue;
zif = ifp->info;
if (zif->rtadv.AdvSendAdvertisements)
{
zif->rtadv.AdvIntervalTimer -= period;
if (zif->rtadv.AdvIntervalTimer <= 0)
{
/* FIXME: using MaxRtrAdvInterval each time isn't what section
6.2.4 of RFC4861 tells to do. */
zif->rtadv.AdvIntervalTimer = zif->rtadv.MaxRtrAdvInterval;
rtadv_send_packet (zvrf->rtadv.sock, ifp);
}
}
}
return 0;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
quagga
|
cfb1fae25f8c092e0d17073eaf7bd428ce1cd546
|
6.722480139038816e+37
| 41 |
zebra: stack overrun in IPv6 RA receive code (CVE-2016-1245)
The IPv6 RA code also receives ICMPv6 RS and RA messages.
Unfortunately, by bad coding practice, the buffer size specified on
receiving such messages mixed up 2 constants that in fact have
different values.
The code itself has:
#define RTADV_MSG_SIZE 4096
While BUFSIZ is system-dependent, in my case (x86_64 glibc):
/usr/include/_G_config.h:#define _G_BUFSIZ 8192
/usr/include/libio.h:#define _IO_BUFSIZ _G_BUFSIZ
/usr/include/stdio.h:# define BUFSIZ _IO_BUFSIZ
FreeBSD, OpenBSD, NetBSD and Illumos are not affected, since all of them
have BUFSIZ == 1024.
As the latter is passed to the kernel on recvmsg(), it's possible to
overwrite 4kB of stack -- with ICMPv6 packets that can be globally sent
to any of the system's addresses (using fragmentation to get to 8k).
(The socket has filters installed limiting this to RS and RA packets,
but does not have a filter for source address or TTL.)
Issue discovered by trying to test other stuff, which randomly caused
the stack to be smaller than 8kB in that code location, which then
causes the kernel to report EFAULT (Bad address).
Signed-off-by: David Lamparter <equinox@opensourcerouting.org>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
| 0 |
void do_perl(struct st_command *command)
{
int error;
File fd;
FILE *res_file;
char buf[FN_REFLEN];
char temp_file_path[FN_REFLEN];
static DYNAMIC_STRING ds_script;
static DYNAMIC_STRING ds_delimiter;
const struct command_arg perl_args[] = {
{ "delimiter", ARG_STRING, FALSE, &ds_delimiter, "Delimiter to read until" }
};
DBUG_ENTER("do_perl");
check_command_args(command,
command->first_argument,
perl_args,
sizeof(perl_args)/sizeof(struct command_arg),
' ');
ds_script= command->content;
/* If it hasn't been done already by a loop iteration, fill it in */
if (! ds_script.str)
{
/* If no delimiter was provided, use EOF */
if (ds_delimiter.length == 0)
dynstr_set(&ds_delimiter, "EOF");
init_dynamic_string(&ds_script, "", 1024, 1024);
read_until_delimiter(&ds_script, &ds_delimiter);
command->content= ds_script;
}
/* This function could be called even if "false", so check before doing */
if (cur_block->ok)
{
DBUG_PRINT("info", ("Executing perl: %s", ds_script.str));
/* Create temporary file name */
if ((fd= create_temp_file(temp_file_path, getenv("MYSQLTEST_VARDIR"),
"tmp", O_CREAT | O_SHARE | O_RDWR,
MYF(MY_WME))) < 0)
die("Failed to create temporary file for perl command");
my_close(fd, MYF(0));
str_to_file(temp_file_path, ds_script.str, ds_script.length);
/* Format the "perl <filename>" command */
my_snprintf(buf, sizeof(buf), "perl %s", temp_file_path);
if (!(res_file= popen(buf, "r")) && command->abort_on_error)
die("popen(\"%s\", \"r\") failed", buf);
while (fgets(buf, sizeof(buf), res_file))
{
if (disable_result_log)
{
buf[strlen(buf)-1]=0;
DBUG_PRINT("exec_result",("%s", buf));
}
else
{
replace_dynstr_append(&ds_res, buf);
}
}
error= pclose(res_file);
/* Remove the temporary file, but keep it if perl failed */
if (!error)
my_delete(temp_file_path, MYF(0));
/* Check for error code that indicates perl could not be started */
int exstat= WEXITSTATUS(error);
#ifdef __WIN__
if (exstat == 1)
/* Text must begin 'perl not found' as mtr looks for it */
abort_not_supported_test("perl not found in path or did not start");
#else
if (exstat == 127)
abort_not_supported_test("perl not found in path");
#endif
else
handle_command_error(command, exstat);
}
dynstr_free(&ds_delimiter);
DBUG_VOID_RETURN;
}
|
Safe
|
[
"CWE-295"
] |
mysql-server
|
b3e9211e48a3fb586e88b0270a175d2348935424
|
4.520428739260253e+37
| 87 |
WL#9072: Backport WL#8785 to 5.5
| 0 |
int roce_resolve_route_from_path(struct sa_path_rec *rec,
const struct ib_gid_attr *attr)
{
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid, dgid;
struct rdma_dev_addr dev_addr = {};
int ret;
if (rec->roce.route_resolved)
return 0;
rdma_gid2ip((struct sockaddr *)&sgid, &rec->sgid);
rdma_gid2ip((struct sockaddr *)&dgid, &rec->dgid);
if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family)
return -EINVAL;
if (!attr || !attr->ndev)
return -EINVAL;
dev_addr.net = &init_net;
dev_addr.sgid_attr = attr;
ret = addr_resolve((struct sockaddr *)&sgid, (struct sockaddr *)&dgid,
&dev_addr, false, true, 0);
if (ret)
return ret;
if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
dev_addr.network == RDMA_NETWORK_IPV6) &&
rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
return -EINVAL;
rec->roce.route_resolved = true;
return 0;
}
|
Safe
|
[] |
net
|
6c8991f41546c3c472503dff1ea9daaddf9331c2
|
3.8046607413377216e+37
| 39 |
net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <xmu@redhat.com>
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
struct nft_set *nft_set_lookup_global(const struct net *net,
const struct nft_table *table,
const struct nlattr *nla_set_name,
const struct nlattr *nla_set_id,
u8 genmask)
{
struct nft_set *set;
set = nft_set_lookup(table, nla_set_name, genmask);
if (IS_ERR(set)) {
if (!nla_set_id)
return set;
set = nft_set_lookup_byid(net, nla_set_id, genmask);
}
return set;
}
|
Safe
|
[
"CWE-665"
] |
linux
|
ad9f151e560b016b6ad3280b48e42fa11e1a5440
|
3.309202712011358e+38
| 17 |
netfilter: nf_tables: initialize set before expression setup
nft_set_elem_expr_alloc() needs an initialized set if expression sets on
the NFT_EXPR_GC flag. Move set fields initialization before expression
setup.
[4512935.019450] ==================================================================
[4512935.019456] BUG: KASAN: null-ptr-deref in nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019487] Read of size 8 at addr 0000000000000070 by task nft/23532
[4512935.019494] CPU: 1 PID: 23532 Comm: nft Not tainted 5.12.0-rc4+ #48
[...]
[4512935.019502] Call Trace:
[4512935.019505] dump_stack+0x89/0xb4
[4512935.019512] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019536] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019560] kasan_report.cold.12+0x5f/0xd8
[4512935.019566] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019590] nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019615] nf_tables_newset+0xc7f/0x1460 [nf_tables]
Reported-by: syzbot+ce96ca2b1d0b37c6422d@syzkaller.appspotmail.com
Fixes: 65038428b2c6 ("netfilter: nf_tables: allow to specify stateful expression in set definition")
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
| 0 |
make_text_segment(Node** node, ScanEnv* env)
{
int r;
int i;
Node* x;
Node* ns[2];
/* \X == (?>\O(?:\Y\O)*) */
ns[1] = NULL_NODE;
r = ONIGERR_MEMORY;
ns[0] = onig_node_new_anchor(ANCR_NO_TEXT_SEGMENT_BOUNDARY, FALSE);
if (IS_NULL(ns[0])) goto err;
r = node_new_true_anychar(&ns[1], env);
if (r != 0) goto err1;
x = make_list(2, ns);
if (IS_NULL(x)) goto err;
ns[0] = x;
ns[1] = NULL_NODE;
x = node_new_quantifier(0, INFINITE_REPEAT, TRUE);
if (IS_NULL(x)) goto err;
NODE_BODY(x) = ns[0];
ns[0] = NULL_NODE;
ns[1] = x;
r = node_new_true_anychar(&ns[0], env);
if (r != 0) goto err1;
x = make_list(2, ns);
if (IS_NULL(x)) goto err;
ns[0] = x;
ns[1] = NULL_NODE;
x = node_new_bag(BAG_STOP_BACKTRACK);
if (IS_NULL(x)) goto err;
NODE_BODY(x) = ns[0];
*node = x;
return ONIG_NORMAL;
err:
r = ONIGERR_MEMORY;
err1:
for (i = 0; i < 2; i++) onig_node_free(ns[i]);
return r;
}
|
Safe
|
[
"CWE-125"
] |
oniguruma
|
aa0188eaedc056dca8374ac03d0177429b495515
|
1.862844915498915e+38
| 53 |
fix #163: heap-buffer-overflow in gb18030_mbc_enc_len
| 0 |
static int ensure_iov_space(conn *c) {
assert(c != NULL);
if (c->iovused >= c->iovsize) {
int i, iovnum;
struct iovec *new_iov = (struct iovec *)realloc(c->iov,
(c->iovsize * 2) * sizeof(struct iovec));
if (! new_iov) {
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
return -1;
}
c->iov = new_iov;
c->iovsize *= 2;
/* Point all the msghdr structures at the new list. */
for (i = 0, iovnum = 0; i < c->msgused; i++) {
c->msglist[i].msg_iov = &c->iov[iovnum];
iovnum += c->msglist[i].msg_iovlen;
}
}
return 0;
}
|
Safe
|
[
"CWE-287"
] |
memcached
|
87c1cf0f20be20608d3becf854e9cf0910f4ad32
|
3.0450572030368068e+38
| 25 |
explicitly record sasl auth states
It was previously possible to bypass authentication due to implicit
state management. Now we explicitly consider ourselves
unauthenticated on any new connections and authentication attempts.
bug316
Signed-off-by: Dustin Sallings <dustin@spy.net>
| 0 |
do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
{
#ifdef DEBUG_SIG
printk("do_notify_resume flags:%x ip:%lx sp:%lx caller:%p pending:%x\n",
thread_info_flags, regs->ip, regs->sp, __builtin_return_address(0),signal_pending(current));
#endif
/* Pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP) {
regs->flags |= X86_EFLAGS_TF;
clear_thread_flag(TIF_SINGLESTEP);
}
#ifdef CONFIG_X86_MCE
/* notify userspace of pending MCEs */
if (thread_info_flags & _TIF_MCE_NOTIFY)
mce_notify_user();
#endif /* CONFIG_X86_MCE */
/* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK))
do_signal(regs);
if (thread_info_flags & _TIF_HRTICK_RESCHED)
hrtick_resched();
}
|
Safe
|
[
"CWE-399"
] |
linux-2.6
|
e40cd10ccff3d9fbffd57b93780bee4b7b9bff51
|
1.6396957961172024e+38
| 26 |
x86: clear DF before calling signal handler
The Linux kernel currently does not clear the direction flag before
calling a signal handler, whereas the x86/x86-64 ABI requires that.
Linux had this behavior/bug forever, but this becomes a real problem
with gcc version 4.3, which assumes that the direction flag is
correctly cleared at the entry of a function.
This patches changes the setup_frame() functions to clear the
direction before entering the signal handler.
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: H. Peter Anvin <hpa@zytor.com>
| 0 |
static void nf_ct_frag6_expire(unsigned long data)
{
struct nf_ct_frag6_queue *fq;
fq = container_of((struct inet_frag_queue *)data,
struct nf_ct_frag6_queue, q);
spin_lock(&fq->q.lock);
if (fq->q.last_in & INET_FRAG_COMPLETE)
goto out;
fq_kill(fq);
out:
spin_unlock(&fq->q.lock);
fq_put(fq);
}
|
Safe
|
[] |
linux-2.6
|
9e2dcf72023d1447f09c47d77c99b0c49659e5ce
|
2.926408031598766e+38
| 18 |
netfilter: nf_conntrack_reasm: properly handle packets fragmented into a single fragment
When an ICMPV6_PKT_TOOBIG message is received with a MTU below 1280,
all further packets include a fragment header.
Unlike regular defragmentation, conntrack also needs to "reassemble"
those fragments in order to obtain a packet without the fragment
header for connection tracking. Currently nf_conntrack_reasm checks
whether a fragment has either IP6_MF set or an offset != 0, which
makes it ignore those fragments.
Remove the invalid check and make reassembly handle fragment queues
containing only a single fragment.
Reported-and-tested-by: Ulrich Weber <uweber@astaro.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
| 0 |
select_result_text_buffer(THD *thd_arg): select_result_sink(thd_arg) {}
|
Safe
|
[
"CWE-416"
] |
server
|
4681b6f2d8c82b4ec5cf115e83698251963d80d5
|
6.615532256217404e+37
| 1 |
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do)
| 0 |
PHP_FUNCTION(imageinterlace)
{
zval *IM;
int argc = ZEND_NUM_ARGS();
long INT = 0;
gdImagePtr im;
if (zend_parse_parameters(argc TSRMLS_CC, "r|l", &IM, &INT) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (argc > 1) {
gdImageInterlace(im, INT);
}
RETURN_LONG(gdImageGetInterlaced(im));
}
|
Safe
|
[
"CWE-703",
"CWE-189"
] |
php-src
|
2938329ce19cb8c4197dec146c3ec887c6f61d01
|
3.191474722241588e+38
| 19 |
Fixed bug #66356 (Heap Overflow Vulnerability in imagecrop())
And also fixed the bug: arguments are altered after some calls
| 0 |
virDomainSEVDefFree(virDomainSEVDefPtr def)
{
if (!def)
return;
VIR_FREE(def->dh_cert);
VIR_FREE(def->session);
VIR_FREE(def);
}
|
Safe
|
[
"CWE-212"
] |
libvirt
|
a5b064bf4b17a9884d7d361733737fb614ad8979
|
4.354417722837821e+37
| 10 |
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <hhan@redhat.com>
Signed-off-by: Peter Krempa <pkrempa@redhat.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
| 0 |
int handler::compare_key2(key_range *range) const
{
int cmp;
if (!range)
return 0; // no max range
cmp= key_cmp(range_key_part, range->key, range->length);
if (!cmp)
cmp= key_compare_result_on_equal;
return cmp;
}
|
Safe
|
[
"CWE-416"
] |
server
|
af810407f78b7f792a9bb8c47c8c532eb3b3a758
|
4.796102603349359e+37
| 10 |
MDEV-28098 incorrect key in "dup value" error after long unique
reset errkey after using it, so that it wouldn't affect
the next error message in the next statement
| 0 |
static int gather_surplus_pages(struct hstate *h, int delta)
{
struct list_head surplus_list;
struct page *page, *tmp;
int ret, i;
int needed, allocated;
bool alloc_ok = true;
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
if (needed <= 0) {
h->resv_huge_pages += delta;
return 0;
}
allocated = 0;
INIT_LIST_HEAD(&surplus_list);
ret = -ENOMEM;
retry:
spin_unlock(&hugetlb_lock);
for (i = 0; i < needed; i++) {
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
alloc_ok = false;
break;
}
list_add(&page->lru, &surplus_list);
}
allocated += i;
/*
* After retaking hugetlb_lock, we need to recalculate 'needed'
* because either resv_huge_pages or free_huge_pages may have changed.
*/
spin_lock(&hugetlb_lock);
needed = (h->resv_huge_pages + delta) -
(h->free_huge_pages + allocated);
if (needed > 0) {
if (alloc_ok)
goto retry;
/*
* We were not able to allocate enough pages to
* satisfy the entire reservation so we free what
* we've allocated so far.
*/
goto free;
}
/*
* The surplus_list now contains _at_least_ the number of extra pages
* needed to accommodate the reservation. Add the appropriate number
* of pages to the hugetlb pool and free the extras back to the buddy
* allocator. Commit the entire reservation here to prevent another
* process from stealing the pages as they are added to the pool but
* before they are reserved.
*/
needed += allocated;
h->resv_huge_pages += delta;
ret = 0;
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
if ((--needed) < 0)
break;
list_del(&page->lru);
/*
* This page is now managed by the hugetlb allocator and has
* no users -- drop the buddy allocator's reference.
*/
put_page_testzero(page);
VM_BUG_ON(page_count(page));
enqueue_huge_page(h, page);
}
free:
spin_unlock(&hugetlb_lock);
/* Free unnecessary surplus pages to the buddy allocator */
if (!list_empty(&surplus_list)) {
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
list_del(&page->lru);
put_page(page);
}
}
spin_lock(&hugetlb_lock);
return ret;
}
|
Safe
|
[
"CWE-399"
] |
linux
|
90481622d75715bfcb68501280a917dbfe516029
|
1.4175615329654054e+38
| 86 |
hugepages: fix use after free bug in "quota" handling
hugetlbfs_{get,put}_quota() are badly named. They don't interact with the
general quota handling code, and they don't much resemble its behaviour.
Rather than being about maintaining limits on on-disk block usage by
particular users, they are instead about maintaining limits on in-memory
page usage (including anonymous MAP_PRIVATE copied-on-write pages)
associated with a particular hugetlbfs filesystem instance.
Worse, they work by having callbacks to the hugetlbfs filesystem code from
the low-level page handling code, in particular from free_huge_page().
This is a layering violation of itself, but more importantly, if the
kernel does a get_user_pages() on hugepages (which can happen from KVM
amongst others), then the free_huge_page() can be delayed until after the
associated inode has already been freed. If an unmount occurs at the
wrong time, even the hugetlbfs superblock where the "quota" limits are
stored may have been freed.
Andrew Barry proposed a patch to fix this by having hugepages, instead of
storing a pointer to their address_space and reaching the superblock from
there, had the hugepages store pointers directly to the superblock,
bumping the reference count as appropriate to avoid it being freed.
Andrew Morton rejected that version, however, on the grounds that it made
the existing layering violation worse.
This is a reworked version of Andrew's patch, which removes the extra, and
some of the existing, layering violation. It works by introducing the
concept of a hugepage "subpool" at the lower hugepage mm layer - that is a
finite logical pool of hugepages to allocate from. hugetlbfs now creates
a subpool for each filesystem instance with a page limit set, and a
pointer to the subpool gets added to each allocated hugepage, instead of
the address_space pointer used now. The subpool has its own lifetime and
is only freed once all pages in it _and_ all other references to it (i.e.
superblocks) are gone.
subpools are optional - a NULL subpool pointer is taken by the code to
mean that no subpool limits are in effect.
Previous discussion of this bug found in: "Fix refcounting in hugetlbfs
quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or
http://marc.info/?l=linux-mm&m=126928970510627&w=1
v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to
alloc_huge_page() - since it already takes the vma, it is not necessary.
Signed-off-by: Andrew Barry <abarry@cray.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static ZIPARCHIVE_METHOD(setCommentIndex)
{
struct zip *intern;
zval *self = getThis();
zend_long index;
size_t comment_len;
char * comment;
struct zip_stat sb;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "ls",
&index, &comment, &comment_len) == FAILURE) {
return;
}
PHP_ZIP_STAT_INDEX(intern, index, 0, sb);
PHP_ZIP_SET_FILE_COMMENT(intern, index, comment, comment_len);
}
|
Safe
|
[
"CWE-190"
] |
php-src
|
3b8d4de300854b3517c7acb239b84f7726c1353c
|
2.1966596928407454e+38
| 23 |
Fix bug #71923 - integer overflow in ZipArchive::getFrom*
| 0 |
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
ptrace_area parea;
int copied, ret;
switch (request) {
case PTRACE_PEEKUSR:
/* read the word at location addr in the USER area. */
return peek_user(child, addr, data);
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area */
return poke_user(child, addr, data);
case PTRACE_PEEKUSR_AREA:
case PTRACE_POKEUSR_AREA:
if (copy_from_user(&parea, (void __force __user *) addr,
sizeof(parea)))
return -EFAULT;
addr = parea.kernel_addr;
data = parea.process_addr;
copied = 0;
while (copied < parea.len) {
if (request == PTRACE_PEEKUSR_AREA)
ret = peek_user(child, addr, data);
else {
addr_t utmp;
if (get_user(utmp,
(addr_t __force __user *) data))
return -EFAULT;
ret = poke_user(child, addr, utmp);
}
if (ret)
return ret;
addr += sizeof(unsigned long);
data += sizeof(unsigned long);
copied += sizeof(unsigned long);
}
return 0;
case PTRACE_GET_LAST_BREAK:
put_user(task_thread_info(child)->last_break,
(unsigned long __user *) data);
return 0;
case PTRACE_ENABLE_TE:
if (!MACHINE_HAS_TE)
return -EIO;
child->thread.per_flags &= ~PER_FLAG_NO_TE;
return 0;
case PTRACE_DISABLE_TE:
if (!MACHINE_HAS_TE)
return -EIO;
child->thread.per_flags |= PER_FLAG_NO_TE;
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
return 0;
case PTRACE_TE_ABORT_RAND:
if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
return -EIO;
switch (data) {
case 0UL:
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
break;
case 1UL:
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
break;
case 2UL:
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
break;
default:
return -EINVAL;
}
return 0;
default:
/* Removing high order bit from addr (only for 31 bit). */
addr &= PSW_ADDR_INSN;
return ptrace_request(child, request, addr, data);
}
}
|
Safe
|
[
"CWE-264",
"CWE-269"
] |
linux
|
dab6cf55f81a6e16b8147aed9a843e1691dcd318
|
2.676025792545326e+37
| 80 |
s390/ptrace: fix PSW mask check
The PSW mask check of the PTRACE_POKEUSR_AREA command is incorrect.
The PSW_MASK_USER define contains the PSW_MASK_ASC bits, the ptrace
interface accepts all combinations for the address-space-control
bits. To protect the kernel space the PSW mask check in ptrace needs
to reject the address-space-control bit combination for home space.
Fixes CVE-2014-3534
Cc: stable@vger.kernel.org
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
| 0 |
static long mem_seek(jas_stream_obj_t *obj, long offset, int origin)
{
jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj;
long newpos;
switch (origin) {
case SEEK_SET:
newpos = offset;
break;
case SEEK_END:
newpos = m->len_ - offset;
break;
case SEEK_CUR:
newpos = m->pos_ + offset;
break;
default:
abort();
break;
}
if (newpos < 0) {
return -1;
}
m->pos_ = newpos;
return m->pos_;
}
|
Safe
|
[
"CWE-189"
] |
jasper
|
3c55b399c36ef46befcb21e4ebc4799367f89684
|
1.3570931148331478e+38
| 26 |
At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems.
| 0 |
struct sc_card_driver * sc_get_tcos_driver(void)
{
struct sc_card_driver *iso_drv = sc_get_iso7816_driver();
if (iso_ops == NULL) iso_ops = iso_drv->ops;
tcos_ops = *iso_drv->ops;
tcos_ops.match_card = tcos_match_card;
tcos_ops.init = tcos_init;
tcos_ops.finish = tcos_finish;
tcos_ops.create_file = tcos_create_file;
tcos_ops.set_security_env = tcos_set_security_env;
tcos_ops.select_file = tcos_select_file;
tcos_ops.list_files = tcos_list_files;
tcos_ops.delete_file = tcos_delete_file;
tcos_ops.compute_signature = tcos_compute_signature;
tcos_ops.decipher = tcos_decipher;
tcos_ops.restore_security_env = tcos_restore_security_env;
tcos_ops.card_ctl = tcos_card_ctl;
return &tcos_drv;
}
|
Safe
|
[
"CWE-787"
] |
OpenSC
|
9d294de90d1cc66956389856e60b6944b27b4817
|
8.0805254200391235e+37
| 22 |
prevent out of bounds write
fixes https://oss-fuzz.com/testcase-detail/5226571123392512
| 0 |
static int nghttp2_session_predicate_data_send(nghttp2_session *session,
nghttp2_stream *stream) {
int rv;
rv = session_predicate_for_stream_send(session, stream);
if (rv != 0) {
return rv;
}
assert(stream);
if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) {
/* Request body data */
/* If stream->state is NGHTTP2_STREAM_CLOSING, RST_STREAM was
queued but not yet sent. In this case, we won't send DATA
frames. */
if (stream->state == NGHTTP2_STREAM_CLOSING) {
return NGHTTP2_ERR_STREAM_CLOSING;
}
if (stream->state == NGHTTP2_STREAM_RESERVED) {
return NGHTTP2_ERR_INVALID_STREAM_STATE;
}
return 0;
}
/* Response body data */
if (stream->state == NGHTTP2_STREAM_OPENED) {
return 0;
}
if (stream->state == NGHTTP2_STREAM_CLOSING) {
return NGHTTP2_ERR_STREAM_CLOSING;
}
return NGHTTP2_ERR_INVALID_STREAM_STATE;
}
|
Safe
|
[] |
nghttp2
|
0a6ce87c22c69438ecbffe52a2859c3a32f1620f
|
6.789102445973393e+37
| 30 |
Add nghttp2_option_set_max_outbound_ack
| 0 |
mm_answer_pam_free_ctx(int sock, Buffer *m)
{
debug3("%s", __func__);
(sshpam_device.free_ctx)(sshpam_ctxt);
buffer_clear(m);
mm_request_send(sock, MONITOR_ANS_PAM_FREE_CTX, m);
auth_method = "keyboard-interactive";
auth_submethod = "pam";
return (sshpam_authok == sshpam_ctxt);
}
|
Vulnerable
|
[
"CWE-284",
"CWE-264"
] |
openssh-portable
|
5e75f5198769056089fb06c4d738ab0e5abc66f7
|
7.595582662624174e+37
| 11 |
set sshpam_ctxt to NULL after free
Avoids use-after-free in monitor when privsep child is compromised.
Reported by Moritz Jodeit; ok dtucker@
| 1 |
HttpTransact::handle_content_length_header(State* s, HTTPHdr* header, HTTPHdr* base)
{
int64_t cl = HTTP_UNDEFINED_CL;
ink_assert(header->type_get() == HTTP_TYPE_RESPONSE);
if (base->presence(MIME_PRESENCE_CONTENT_LENGTH)) {
cl = base->get_content_length();
if (cl >= 0) {
// header->set_content_length(cl);
ink_assert(header->get_content_length() == cl);
switch (s->source) {
case SOURCE_HTTP_ORIGIN_SERVER:
// We made our decision about whether to trust the
// response content length in init_state_vars_from_response()
if (s->range_setup != HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED)
break;
case SOURCE_CACHE:
// if we are doing a single Range: request, calculate the new
// C-L: header
if (s->range_setup == HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED) {
change_response_header_because_of_range_request(s,header);
s->hdr_info.trust_response_cl = true;
}
////////////////////////////////////////////////
// Make sure that the cache's object size //
// agrees with the Content-Length //
// Otherwise, set the state's machine view //
// of c-l to undefined to turn off K-A //
////////////////////////////////////////////////
else if ((int64_t) s->cache_info.object_read->object_size_get() == cl) {
s->hdr_info.trust_response_cl = true;
} else {
DebugTxn("http_trans", "Content Length header and cache object size mismatch." "Disabling keep-alive");
s->hdr_info.trust_response_cl = false;
}
break;
case SOURCE_TRANSFORM:
if (s->range_setup == HttpTransact::RANGE_REQUESTED) {
header->set_content_length(s->range_output_cl);
s->hdr_info.trust_response_cl = true;
} else if (s->hdr_info.transform_response_cl == HTTP_UNDEFINED_CL) {
s->hdr_info.trust_response_cl = false;
} else {
s->hdr_info.trust_response_cl = true;
}
break;
default:
ink_release_assert(0);
break;
}
} else {
header->field_delete(MIME_FIELD_CONTENT_LENGTH, MIME_LEN_CONTENT_LENGTH);
s->hdr_info.trust_response_cl = false;
}
Debug("http_trans", "[handle_content_length_header] RESPONSE cont len in hdr is %" PRId64, header->get_content_length());
} else {
// No content length header
if (s->source == SOURCE_CACHE) {
// If there is no content-length header, we can
// insert one since the cache knows definately
// how long the object is unless we're in a
// read-while-write mode and object hasn't been
// written into a cache completely.
cl = s->cache_info.object_read->object_size_get();
if (cl == INT64_MAX) { //INT64_MAX cl in cache indicates rww in progress
header->field_delete(MIME_FIELD_CONTENT_LENGTH, MIME_LEN_CONTENT_LENGTH);
s->hdr_info.trust_response_cl = false;
s->hdr_info.request_content_length = HTTP_UNDEFINED_CL;
ink_assert(s->range_setup == RANGE_NONE);
}
else if (s->range_setup == RANGE_NOT_TRANSFORM_REQUESTED) {
// if we are doing a single Range: request, calculate the new
// C-L: header
change_response_header_because_of_range_request(s,header);
s->hdr_info.trust_response_cl = true;
}
else {
header->set_content_length(cl);
s->hdr_info.trust_response_cl = true;
}
} else {
// Check to see if there is no content length
// header because the response precludes a
// body
if (is_response_body_precluded(header->status_get(), s->method)) {
// We want to be able to do keep-alive here since
// there can't be body so we don't have any
// issues about trusting the body length
s->hdr_info.trust_response_cl = true;
} else {
s->hdr_info.trust_response_cl = false;
}
header->field_delete(MIME_FIELD_CONTENT_LENGTH, MIME_LEN_CONTENT_LENGTH);
ink_assert(s->range_setup != RANGE_NOT_TRANSFORM_REQUESTED);
}
}
return;
} /* End HttpTransact::handle_content_length_header */
|
Safe
|
[
"CWE-119"
] |
trafficserver
|
8b5f0345dade6b2822d9b52c8ad12e63011a5c12
|
3.2652552282000163e+38
| 101 |
Fix the internal buffer sizing. Thanks to Sudheer for helping isolating this bug
| 0 |
create_seq_hashtable(void)
{
HASHCTL ctl;
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(SeqTableData);
seqhashtab = hash_create("Sequence values", 16, &ctl,
HASH_ELEM | HASH_BLOBS);
}
|
Safe
|
[
"CWE-94"
] |
postgres
|
7e92f78abe80e4b30e648a40073abb59057e21f8
|
4.767344802462016e+37
| 11 |
In extensions, don't replace objects not belonging to the extension.
Previously, if an extension script did CREATE OR REPLACE and there was
an existing object not belonging to the extension, it would overwrite
the object and adopt it into the extension. This is problematic, first
because the overwrite is probably unintentional, and second because we
didn't change the object's ownership. Thus a hostile user could create
an object in advance of an expected CREATE EXTENSION command, and would
then have ownership rights on an extension object, which could be
modified for trojan-horse-type attacks.
Hence, forbid CREATE OR REPLACE of an existing object unless it already
belongs to the extension. (Note that we've always forbidden replacing
an object that belongs to some other extension; only the behavior for
previously-free-standing objects changes here.)
For the same reason, also fail CREATE IF NOT EXISTS when there is
an existing object that doesn't belong to the extension.
Our thanks to Sven Klemm for reporting this problem.
Security: CVE-2022-2625
| 0 |
static struct sock *unix_find_socket_byinode(struct inode *i)
{
struct sock *s;
spin_lock(&unix_table_lock);
sk_for_each(s,
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
struct dentry *dentry = unix_sk(s)->path.dentry;
if (dentry && dentry->d_inode == i) {
sock_hold(s);
goto found;
}
}
s = NULL;
found:
spin_unlock(&unix_table_lock);
return s;
}
|
Safe
|
[
"CWE-20",
"CWE-269"
] |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
|
4.630538395781592e+37
| 19 |
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <davem@davemloft.net>
Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static pj_status_t ssl_do_handshake(pj_ssl_sock_t *ssock)
{
ossl_sock_t *ossock = (ossl_sock_t *)ssock;
pj_status_t status;
int err;
/* Perform SSL handshake */
pj_lock_acquire(ssock->write_mutex);
err = SSL_do_handshake(ossock->ossl_ssl);
pj_lock_release(ssock->write_mutex);
/* SSL_do_handshake() may put some pending data into SSL write BIO,
* flush it if any.
*/
status = flush_circ_buf_output(ssock, &ssock->handshake_op_key, 0, 0);
if (status != PJ_SUCCESS && status != PJ_EPENDING) {
return status;
}
if (err < 0) {
int err2 = SSL_get_error(ossock->ossl_ssl, err);
if (err2 != SSL_ERROR_NONE && err2 != SSL_ERROR_WANT_READ)
{
/* Handshake fails */
status = STATUS_FROM_SSL_ERR2("Handshake", ssock, err, err2, 0);
return status;
}
}
/* Check if handshake has been completed */
if (SSL_is_init_finished(ossock->ossl_ssl)) {
ssock->ssl_state = SSL_STATE_ESTABLISHED;
return PJ_SUCCESS;
}
return PJ_EPENDING;
}
|
Safe
|
[
"CWE-362",
"CWE-703"
] |
pjproject
|
d5f95aa066f878b0aef6a64e60b61e8626e664cd
|
2.9566294292831278e+38
| 37 |
Merge pull request from GHSA-cv8x-p47p-99wr
* - Avoid SSL socket parent/listener getting destroyed during handshake by increasing parent's reference count.
- Add missing SSL socket close when the newly accepted SSL socket is discarded in SIP TLS transport.
* - Fix silly mistake: accepted active socket created without group lock in SSL socket.
- Replace assertion with normal validation check of SSL socket instance in OpenSSL verification callback (verify_cb()) to avoid crash, e.g: if somehow race condition with SSL socket destroy happens or OpenSSL application data index somehow gets corrupted.
| 0 |
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *nested_vmcb;
struct kvm_host_map map;
u64 guest;
u64 vmcb;
guest = GET_SMSTATE(u64, smstate, 0x7ed8);
vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
if (guest) {
if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
return 1;
nested_vmcb = map.hva;
enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
}
return 0;
}
|
Safe
|
[
"CWE-401"
] |
linux
|
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
|
3.0643038676014215e+38
| 19 |
KVM: SVM: Fix potential memory leak in svm_cpu_init()
When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page
held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually
the only possible outcome here.
Reviewed-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
server_client_suspend(struct client *c)
{
struct session *s = c->session;
if (s == NULL || (c->flags & CLIENT_DETACHING))
return;
tty_stop_tty(&c->tty);
c->flags |= CLIENT_SUSPENDED;
proc_send(c->peer, MSG_SUSPEND, -1, NULL, 0);
}
|
Safe
|
[] |
src
|
b32e1d34e10a0da806823f57f02a4ae6e93d756e
|
2.3728711981190017e+38
| 11 |
evbuffer_new and bufferevent_new can both fail (when malloc fails) and
return NULL. GitHub issue 1547.
| 0 |
static unsigned int tcp_synack_options(const struct sock *sk,
struct request_sock *req,
unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
const struct tcp_md5sig_key *md5,
struct tcp_fastopen_cookie *foc)
{
struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
#ifdef CONFIG_TCP_MD5SIG
if (md5) {
opts->options |= OPTION_MD5;
remaining -= TCPOLEN_MD5SIG_ALIGNED;
/* We can't fit any SACK blocks in a packet with MD5 + TS
* options. There was discussion about disabling SACK
* rather than TS in order to fit in better with old,
* buggy kernels, but that was deemed to be unnecessary.
*/
ireq->tstamp_ok &= !ireq->sack_ok;
}
#endif
/* We always send an MSS option. */
opts->mss = mss;
remaining -= TCPOLEN_MSS_ALIGNED;
if (likely(ireq->wscale_ok)) {
opts->ws = ireq->rcv_wscale;
opts->options |= OPTION_WSCALE;
remaining -= TCPOLEN_WSCALE_ALIGNED;
}
if (likely(ireq->tstamp_ok)) {
opts->options |= OPTION_TS;
opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
opts->tsecr = req->ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
if (likely(ireq->sack_ok)) {
opts->options |= OPTION_SACK_ADVERTISE;
if (unlikely(!ireq->tstamp_ok))
remaining -= TCPOLEN_SACKPERM_ALIGNED;
}
if (foc != NULL && foc->len >= 0) {
u32 need = foc->len;
need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
TCPOLEN_FASTOPEN_BASE;
need = (need + 3) & ~3U; /* Align to 32 bits */
if (remaining >= need) {
opts->options |= OPTION_FAST_OPEN_COOKIE;
opts->fastopen_cookie = foc;
remaining -= need;
}
}
smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
return MAX_TCP_OPTION_SPACE - remaining;
}
|
Safe
|
[
"CWE-190"
] |
net
|
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
|
2.9690874924210165e+38
| 61 |
tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Jonathan Looney <jtl@netflix.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Bruce Curtis <brucec@netflix.com>
Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
void WebContents::InspectElement(int x, int y) {
if (type_ == Type::kRemote)
return;
if (!enable_devtools_)
return;
DCHECK(inspectable_web_contents_);
if (!inspectable_web_contents_->GetDevToolsWebContents())
OpenDevTools(nullptr);
inspectable_web_contents_->InspectElement(x, y);
}
|
Safe
|
[] |
electron
|
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
|
7.97392967213627e+37
| 12 |
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <samuel.r.attard@gmail.com>
Co-authored-by: Samuel Attard <sattard@salesforce.com>
| 0 |
static int fuse_getlk(struct file *file, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
FUSE_ARGS(args);
struct fuse_lk_in inarg;
struct fuse_lk_out outarg;
int err;
fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
args.out.numargs = 1;
args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
err = convert_fuse_file_lock(&outarg.lk, fl);
return err;
}
|
Safe
|
[
"CWE-399",
"CWE-835"
] |
linux
|
3ca8138f014a913f98e6ef40e939868e1e9ea876
|
3.657073542565159e+37
| 19 |
fuse: break infinite loop in fuse_fill_write_pages()
I got a report about unkillable task eating CPU. Further
investigation shows, that the problem is in the fuse_fill_write_pages()
function. If iov's first segment has zero length, we get an infinite
loop, because we never reach iov_iter_advance() call.
Fix this by calling iov_iter_advance() before repeating an attempt to
copy data from userspace.
A similar problem is described in 124d3b7041f ("fix writev regression:
pan hanging unkillable and un-straceable"). If zero-length segmend
is followed by segment with invalid address,
iov_iter_fault_in_readable() checks only first segment (zero-length),
iov_iter_copy_from_user_atomic() skips it, fails at second and
returns zero -> goto again without skipping zero-length segment.
Patch calls iov_iter_advance() before goto again: we'll skip zero-length
segment at second iteraction and iov_iter_fault_in_readable() will detect
invalid address.
Special thanks to Konstantin Khlebnikov, who helped a lot with the commit
description.
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Maxim Patlasov <mpatlasov@parallels.com>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Signed-off-by: Roman Gushchin <klamm@yandex-team.ru>
Signed-off-by: Miklos Szeredi <miklos@szeredi.hu>
Fixes: ea9b9907b82a ("fuse: implement perform_write")
Cc: <stable@vger.kernel.org>
| 0 |
DefResize(wi, he)
int wi, he;
{
return -1;
}
|
Safe
|
[] |
screen
|
c5db181b6e017cfccb8d7842ce140e59294d9f62
|
2.4511056767244837e+38
| 5 |
ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <amade@asmblr.net
| 0 |
SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
siginfo_t __user *, info, unsigned int, flags)
{
int ret;
struct fd f;
struct pid *pid;
kernel_siginfo_t kinfo;
/* Enforce flags be set to 0 until we add an extension. */
if (flags)
return -EINVAL;
f = fdget(pidfd);
if (!f.file)
return -EBADF;
/* Is this a pidfd? */
pid = pidfd_to_pid(f.file);
if (IS_ERR(pid)) {
ret = PTR_ERR(pid);
goto err;
}
ret = -EINVAL;
if (!access_pidfd_pidns(pid))
goto err;
if (info) {
ret = copy_siginfo_from_user_any(&kinfo, info);
if (unlikely(ret))
goto err;
ret = -EINVAL;
if (unlikely(sig != kinfo.si_signo))
goto err;
/* Only allow sending arbitrary signals to yourself. */
ret = -EPERM;
if ((task_pid(current) != pid) &&
(kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
goto err;
} else {
prepare_kill_siginfo(sig, &kinfo);
}
ret = kill_pid_info(sig, &kinfo, pid);
err:
fdput(f);
return ret;
}
|
Safe
|
[
"CWE-190"
] |
linux
|
d1e7fd6462ca9fc76650fbe6ca800e35b24267da
|
3.3412842885709255e+37
| 51 |
signal: Extend exec_id to 64bits
Replace the 32bit exec_id with a 64bit exec_id to make it impossible
to wrap the exec_id counter. With care an attacker can cause exec_id
wrap and send arbitrary signals to a newly exec'd parent. This
bypasses the signal sending checks if the parent changes their
credentials during exec.
The severity of this problem can been seen that in my limited testing
of a 32bit exec_id it can take as little as 19s to exec 65536 times.
Which means that it can take as little as 14 days to wrap a 32bit
exec_id. Adam Zabrocki has succeeded wrapping the self_exe_id in 7
days. Even my slower timing is in the uptime of a typical server.
Which means self_exec_id is simply a speed bump today, and if exec
gets noticably faster self_exec_id won't even be a speed bump.
Extending self_exec_id to 64bits introduces a problem on 32bit
architectures where reading self_exec_id is no longer atomic and can
take two read instructions. Which means that is is possible to hit
a window where the read value of exec_id does not match the written
value. So with very lucky timing after this change this still
remains expoiltable.
I have updated the update of exec_id on exec to use WRITE_ONCE
and the read of exec_id in do_notify_parent to use READ_ONCE
to make it clear that there is no locking between these two
locations.
Link: https://lore.kernel.org/kernel-hardening/20200324215049.GA3710@pi3.com.pl
Fixes: 2.3.23pre2
Cc: stable@vger.kernel.org
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
| 0 |
d_lite_ajd(VALUE self)
{
get_d1(self);
return m_ajd(dat);
}
|
Safe
|
[] |
date
|
3959accef8da5c128f8a8e2fd54e932a4fb253b0
|
7.779382767781221e+37
| 5 |
Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301
| 0 |
sldns_is_last_of_string(const char* str)
{
if(*str == 0) return 1;
while(isspace((unsigned char)*str))
str++;
if(*str == 0) return 1;
return 0;
}
|
Safe
|
[] |
unbound
|
3f3cadd416d6efa92ff2d548ac090f42cd79fee9
|
4.910848740833612e+37
| 8 |
- Fix Out of Bounds Write in sldns_str2wire_str_buf(),
reported by X41 D-Sec.
| 0 |
void RGWCORSRule::erase_origin_if_present(string& origin, bool *rule_empty) {
set<string>::iterator it = allowed_origins.find(origin);
if (!rule_empty)
return;
*rule_empty = false;
if (it != allowed_origins.end()) {
dout(10) << "Found origin " << origin << ", set size:" <<
allowed_origins.size() << dendl;
allowed_origins.erase(it);
*rule_empty = (allowed_origins.empty());
}
}
|
Safe
|
[
"CWE-113"
] |
ceph
|
46817f30cee60bc5df8354ab326762e7c783fe2c
|
2.4703620570949744e+38
| 12 |
rgw: sanitize newlines in s3 CORSConfiguration's ExposeHeader
the values in the <ExposeHeader> element are sent back to clients in a
Access-Control-Expose-Headers response header. if the values are allowed
to have newlines in them, they can be used to inject arbitrary response
headers
this issue only affects s3, which gets these values from an xml document
in swift, they're given in the request header
X-Container-Meta-Access-Control-Expose-Headers, so the value itself
cannot contain newlines
Signed-off-by: Casey Bodley <cbodley@redhat.com>
Reported-by: Adam Mohammed <amohammed@linode.com>
| 0 |
cmd_http_timeout(CMD_ARGS)
{
struct http *hp;
(void)cmd;
(void)vl;
CAST_OBJ_NOTNULL(hp, priv, HTTP_MAGIC);
AN(av[1]);
AZ(av[2]);
hp->timeout = (int)(strtod(av[1], NULL) * 1000.0);
}
|
Safe
|
[
"CWE-269"
] |
Varnish-Cache
|
85e8468bec9416bd7e16b0d80cb820ecd2b330c3
|
2.5270171920473282e+38
| 11 |
Do not consider a CR by itself as a valid line terminator
Varnish (prior to version 4.0) was not following the standard with
regard to line separator.
Spotted and analyzed by: Régis Leroy [regilero] regis.leroy@makina-corpus.com
| 0 |
extend_integer_type (void *source, int type)
{
switch (type)
{
case FFI_TYPE_UINT8:
return *(UINT8 *) source;
case FFI_TYPE_SINT8:
return *(SINT8 *) source;
case FFI_TYPE_UINT16:
return *(UINT16 *) source;
case FFI_TYPE_SINT16:
return *(SINT16 *) source;
case FFI_TYPE_UINT32:
return *(UINT32 *) source;
case FFI_TYPE_INT:
case FFI_TYPE_SINT32:
return *(SINT32 *) source;
case FFI_TYPE_UINT64:
case FFI_TYPE_SINT64:
return *(UINT64 *) source;
break;
case FFI_TYPE_POINTER:
return *(uintptr_t *) source;
default:
abort();
}
}
|
Safe
|
[
"CWE-787"
] |
libffi
|
44a6c28545186d78642487927952844156fc7ab5
|
2.7391436445047554e+38
| 27 |
aarch64: Flush code mapping in addition to data mapping (#471)
This needs a new function, ffi_data_to_code_pointer, to translate
from data pointers to code pointers.
Fixes issue #470.
| 0 |
int x509_get_rsassa_pss_params( const x509_buf *params,
md_type_t *md_alg, md_type_t *mgf_md,
int *salt_len )
{
int ret;
unsigned char *p;
const unsigned char *end, *end2;
size_t len;
x509_buf alg_id, alg_params;
/* First set everything to defaults */
*md_alg = POLARSSL_MD_SHA1;
*mgf_md = POLARSSL_MD_SHA1;
*salt_len = 20;
/* Make sure params is a SEQUENCE and setup bounds */
if( params->tag != ( ASN1_CONSTRUCTED | ASN1_SEQUENCE ) )
return( POLARSSL_ERR_X509_INVALID_ALG +
POLARSSL_ERR_ASN1_UNEXPECTED_TAG );
p = (unsigned char *) params->p;
end = p + params->len;
if( p == end )
return( 0 );
/*
* HashAlgorithm
*/
if( ( ret = asn1_get_tag( &p, end, &len,
ASN1_CONTEXT_SPECIFIC | ASN1_CONSTRUCTED | 0 ) ) == 0 )
{
end2 = p + len;
/* HashAlgorithm ::= AlgorithmIdentifier (without parameters) */
if( ( ret = x509_get_alg_null( &p, end2, &alg_id ) ) != 0 )
return( ret );
if( ( ret = oid_get_md_alg( &alg_id, md_alg ) ) != 0 )
return( POLARSSL_ERR_X509_INVALID_ALG + ret );
if( p != end2 )
return( POLARSSL_ERR_X509_INVALID_ALG +
POLARSSL_ERR_ASN1_LENGTH_MISMATCH );
}
else if( ret != POLARSSL_ERR_ASN1_UNEXPECTED_TAG )
return( POLARSSL_ERR_X509_INVALID_ALG + ret );
if( p == end )
return( 0 );
/*
* MaskGenAlgorithm
*/
if( ( ret = asn1_get_tag( &p, end, &len,
ASN1_CONTEXT_SPECIFIC | ASN1_CONSTRUCTED | 1 ) ) == 0 )
{
end2 = p + len;
/* MaskGenAlgorithm ::= AlgorithmIdentifier (params = HashAlgorithm) */
if( ( ret = x509_get_alg( &p, end2, &alg_id, &alg_params ) ) != 0 )
return( ret );
/* Only MFG1 is recognised for now */
if( ! OID_CMP( OID_MGF1, &alg_id ) )
return( POLARSSL_ERR_X509_FEATURE_UNAVAILABLE +
POLARSSL_ERR_OID_NOT_FOUND );
/* Parse HashAlgorithm */
if( ( ret = x509_get_hash_alg( &alg_params, mgf_md ) ) != 0 )
return( ret );
if( p != end2 )
return( POLARSSL_ERR_X509_INVALID_ALG +
POLARSSL_ERR_ASN1_LENGTH_MISMATCH );
}
else if( ret != POLARSSL_ERR_ASN1_UNEXPECTED_TAG )
return( POLARSSL_ERR_X509_INVALID_ALG + ret );
if( p == end )
return( 0 );
/*
* salt_len
*/
if( ( ret = asn1_get_tag( &p, end, &len,
ASN1_CONTEXT_SPECIFIC | ASN1_CONSTRUCTED | 2 ) ) == 0 )
{
end2 = p + len;
if( ( ret = asn1_get_int( &p, end2, salt_len ) ) != 0 )
return( POLARSSL_ERR_X509_INVALID_ALG + ret );
if( p != end2 )
return( POLARSSL_ERR_X509_INVALID_ALG +
POLARSSL_ERR_ASN1_LENGTH_MISMATCH );
}
else if( ret != POLARSSL_ERR_ASN1_UNEXPECTED_TAG )
return( POLARSSL_ERR_X509_INVALID_ALG + ret );
if( p == end )
return( 0 );
/*
* trailer_field (if present, must be 1)
*/
if( ( ret = asn1_get_tag( &p, end, &len,
ASN1_CONTEXT_SPECIFIC | ASN1_CONSTRUCTED | 3 ) ) == 0 )
{
int trailer_field;
end2 = p + len;
if( ( ret = asn1_get_int( &p, end2, &trailer_field ) ) != 0 )
return( POLARSSL_ERR_X509_INVALID_ALG + ret );
if( p != end2 )
return( POLARSSL_ERR_X509_INVALID_ALG +
POLARSSL_ERR_ASN1_LENGTH_MISMATCH );
if( trailer_field != 1 )
return( POLARSSL_ERR_X509_INVALID_ALG );
}
else if( ret != POLARSSL_ERR_ASN1_UNEXPECTED_TAG )
return( POLARSSL_ERR_X509_INVALID_ALG + ret );
if( p != end )
return( POLARSSL_ERR_X509_INVALID_ALG +
POLARSSL_ERR_ASN1_LENGTH_MISMATCH );
return( 0 );
}
|
Safe
|
[
"CWE-310"
] |
polarssl
|
5d8618539f8e186c1b2c1b5a548d6f85936fe41f
|
3.2569459551677818e+38
| 132 |
Fix memory leak while parsing some X.509 certs
| 0 |
TEST(ArrayOpsTest, Reshape_ShapeFn) {
ShapeInferenceTestOp op("Reshape");
op.input_tensors.resize(2);
// No valid shape provided.
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?];?", "?");
INFER_OK(op, "?;[?]", "?");
INFER_OK(op, "[?];[?]", "?");
INFER_OK(op, "[4];[?]", "?");
// All dimensions provided.
Tensor new_shape = test::AsTensor<int32>({1, 2, 3});
op.input_tensors[1] = &new_shape;
INFER_OK(op, "?;[3]", "[1,2,3]");
INFER_OK(op, "[?];[3]", "[1,2,3]");
INFER_OK(op, "[6];[3]", "[1,2,3]");
// The number of elements should match for the reshape to succeed.
INFER_ERROR(
"Cannot reshape a tensor with 12 elements to shape [1,2,3] (6 elements)",
op, "[3,4];[3]");
// Unknown dimensions.
// Flatten:
new_shape = test::AsTensor<int32>({-1});
INFER_OK(op, "?;[1]", "[?]");
INFER_OK(op, "[?];[1]", "[d0_0]");
INFER_OK(op, "[2,2];[1]", "[4]");
// The first dimension is inferred:
new_shape = test::AsTensor<int32>({2, -1});
INFER_OK(op, "[3,4];[2]", "[2,6]");
// The total number of elements must be evenly divisible by the known
// dimensions.
INFER_ERROR("Dimension size must be evenly divisible by 2 but is 7", op,
"[7];[2]");
// Multiple missing dimensions cannot be inferred.
new_shape = test::AsTensor<int32>({-1, -1, 2});
INFER_OK(op, "[8];[3]", "[?,?,2]");
INFER_OK(op, "?;[3]", "[?,?,2]");
// Symbolic shape propagation
new_shape = test::AsTensor<int32>({-1, 2, 3});
INFER_OK(op, "[?,2,3];[3]", "[d0_0,2,3]");
// Reshaping to a scalar.
new_shape = test::AsTensor<int32>({});
INFER_OK(op, "[1];[0]", "[]");
INFER_ERROR(
"Cannot reshape a tensor with 2 elements to shape [] (1 elements)", op,
"[1,2];[0]");
// Reshaping a tensor with no elements.
new_shape = test::AsTensor<int32>({-1});
INFER_OK(op, "[0];[1]", "[0]");
new_shape = test::AsTensor<int32>({-1, 6});
INFER_OK(op, "[0,2];[1]", "[0,6]");
new_shape = test::AsTensor<int32>({0, -1});
INFER_OK(op, "[0,2];[1]", "[0,?]");
}
|
Safe
|
[
"CWE-125"
] |
tensorflow
|
7cf73a2274732c9d82af51c2bc2cf90d13cd7e6d
|
2.8588696638224126e+38
| 59 |
Address QuantizeAndDequantizeV* heap oob. Added additional checks for the 'axis' attribute.
PiperOrigin-RevId: 402446942
Change-Id: Id2f6b82e4e740d0550329be02621c46466b5a5b9
| 0 |
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
|
Safe
|
[
"CWE-369"
] |
ImageMagick
|
a4c89f2a61069ad7637bc7749cc1a839de442526
|
1.9234553381947157e+38
| 10 |
https://github.com/ImageMagick/ImageMagick/issues/1730
| 0 |
unsigned char *ssl_add_serverhello_tlsext(SSL *s, unsigned char *p, unsigned char *limit)
{
int extdatalen=0;
unsigned char *ret = p;
#ifndef OPENSSL_NO_NEXTPROTONEG
int next_proto_neg_seen;
#endif
#ifndef OPENSSL_NO_EC
unsigned long alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
unsigned long alg_a = s->s3->tmp.new_cipher->algorithm_auth;
int using_ecc = (alg_k & (SSL_kEECDH|SSL_kECDHr|SSL_kECDHe)) || (alg_a & SSL_aECDSA);
using_ecc = using_ecc && (s->session->tlsext_ecpointformatlist != NULL);
#endif
/* don't add extensions for SSLv3, unless doing secure renegotiation */
if (s->version == SSL3_VERSION && !s->s3->send_connection_binding)
return p;
ret+=2;
if (ret>=limit) return NULL; /* this really never occurs, but ... */
if (!s->hit && s->servername_done == 1 && s->session->tlsext_hostname != NULL)
{
if ((long)(limit - ret - 4) < 0) return NULL;
s2n(TLSEXT_TYPE_server_name,ret);
s2n(0,ret);
}
if(s->s3->send_connection_binding)
{
int el;
if(!ssl_add_serverhello_renegotiate_ext(s, 0, &el, 0))
{
SSLerr(SSL_F_SSL_ADD_SERVERHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
if((limit - p - 4 - el) < 0) return NULL;
s2n(TLSEXT_TYPE_renegotiate,ret);
s2n(el,ret);
if(!ssl_add_serverhello_renegotiate_ext(s, ret, &el, el))
{
SSLerr(SSL_F_SSL_ADD_SERVERHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
ret += el;
}
#ifndef OPENSSL_NO_EC
if (using_ecc)
{
const unsigned char *plist;
size_t plistlen;
/* Add TLS extension ECPointFormats to the ServerHello message */
long lenmax;
tls1_get_formatlist(s, &plist, &plistlen);
if ((lenmax = limit - ret - 5) < 0) return NULL;
if (plistlen > (size_t)lenmax) return NULL;
if (plistlen > 255)
{
SSLerr(SSL_F_SSL_ADD_SERVERHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
s2n(TLSEXT_TYPE_ec_point_formats,ret);
s2n(plistlen + 1,ret);
*(ret++) = (unsigned char) plistlen;
memcpy(ret, plist, plistlen);
ret+=plistlen;
}
/* Currently the server should not respond with a SupportedCurves extension */
#endif /* OPENSSL_NO_EC */
if (s->tlsext_ticket_expected
&& !(SSL_get_options(s) & SSL_OP_NO_TICKET))
{
if ((long)(limit - ret - 4) < 0) return NULL;
s2n(TLSEXT_TYPE_session_ticket,ret);
s2n(0,ret);
}
if (s->tlsext_status_expected)
{
if ((long)(limit - ret - 4) < 0) return NULL;
s2n(TLSEXT_TYPE_status_request,ret);
s2n(0,ret);
}
#ifdef TLSEXT_TYPE_opaque_prf_input
if (s->s3->server_opaque_prf_input != NULL)
{
size_t sol = s->s3->server_opaque_prf_input_len;
if ((long)(limit - ret - 6 - sol) < 0)
return NULL;
if (sol > 0xFFFD) /* can't happen */
return NULL;
s2n(TLSEXT_TYPE_opaque_prf_input, ret);
s2n(sol + 2, ret);
s2n(sol, ret);
memcpy(ret, s->s3->server_opaque_prf_input, sol);
ret += sol;
}
#endif
if(s->srtp_profile)
{
int el;
ssl_add_serverhello_use_srtp_ext(s, 0, &el, 0);
if((limit - p - 4 - el) < 0) return NULL;
s2n(TLSEXT_TYPE_use_srtp,ret);
s2n(el,ret);
if(ssl_add_serverhello_use_srtp_ext(s, ret, &el, el))
{
SSLerr(SSL_F_SSL_ADD_SERVERHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
ret+=el;
}
if (((s->s3->tmp.new_cipher->id & 0xFFFF)==0x80 || (s->s3->tmp.new_cipher->id & 0xFFFF)==0x81)
&& (SSL_get_options(s) & SSL_OP_CRYPTOPRO_TLSEXT_BUG))
{ const unsigned char cryptopro_ext[36] = {
0xfd, 0xe8, /*65000*/
0x00, 0x20, /*32 bytes length*/
0x30, 0x1e, 0x30, 0x08, 0x06, 0x06, 0x2a, 0x85,
0x03, 0x02, 0x02, 0x09, 0x30, 0x08, 0x06, 0x06,
0x2a, 0x85, 0x03, 0x02, 0x02, 0x16, 0x30, 0x08,
0x06, 0x06, 0x2a, 0x85, 0x03, 0x02, 0x02, 0x17};
if (limit-ret<36) return NULL;
memcpy(ret,cryptopro_ext,36);
ret+=36;
}
#ifndef OPENSSL_NO_HEARTBEATS
/* Add Heartbeat extension if we've received one */
if (s->tlsext_heartbeat & SSL_TLSEXT_HB_ENABLED)
{
s2n(TLSEXT_TYPE_heartbeat,ret);
s2n(1,ret);
/* Set mode:
* 1: peer may send requests
* 2: peer not allowed to send requests
*/
if (s->tlsext_heartbeat & SSL_TLSEXT_HB_DONT_RECV_REQUESTS)
*(ret++) = SSL_TLSEXT_HB_DONT_SEND_REQUESTS;
else
*(ret++) = SSL_TLSEXT_HB_ENABLED;
}
#endif
#ifndef OPENSSL_NO_NEXTPROTONEG
next_proto_neg_seen = s->s3->next_proto_neg_seen;
s->s3->next_proto_neg_seen = 0;
if (next_proto_neg_seen && s->ctx->next_protos_advertised_cb)
{
const unsigned char *npa;
unsigned int npalen;
int r;
r = s->ctx->next_protos_advertised_cb(s, &npa, &npalen, s->ctx->next_protos_advertised_cb_arg);
if (r == SSL_TLSEXT_ERR_OK)
{
if ((long)(limit - ret - 4 - npalen) < 0) return NULL;
s2n(TLSEXT_TYPE_next_proto_neg,ret);
s2n(npalen,ret);
memcpy(ret, npa, npalen);
ret += npalen;
s->s3->next_proto_neg_seen = 1;
}
}
#endif
/* If custom types were sent in ClientHello, add ServerHello responses */
if (s->s3->tlsext_custom_types_count)
{
size_t i;
for (i = 0; i < s->s3->tlsext_custom_types_count; i++)
{
size_t j;
custom_srv_ext_record *record;
for (j = 0; j < s->ctx->custom_srv_ext_records_count; j++)
{
record = &s->ctx->custom_srv_ext_records[j];
if (s->s3->tlsext_custom_types[i] == record->ext_type)
{
const unsigned char *out = NULL;
unsigned short outlen = 0;
int cb_retval = 0;
/* NULL callback or -1 omits extension */
if (!record->fn2)
break;
cb_retval = record->fn2(s, record->ext_type,
&out, &outlen,
record->arg);
if (cb_retval == 0)
return NULL; /* error */
if (cb_retval == -1)
break; /* skip this extension */
if (limit < ret + 4 + outlen)
return NULL;
s2n(record->ext_type, ret);
s2n(outlen, ret);
memcpy(ret, out, outlen);
ret += outlen;
break;
}
}
}
}
#ifdef TLSEXT_TYPE_encrypt_then_mac
if (s->s3->flags & TLS1_FLAGS_ENCRYPT_THEN_MAC)
{
/* Don't use encrypt_then_mac if AEAD: might want
* to disable for other ciphersuites too.
*/
if (s->s3->tmp.new_cipher->algorithm_mac == SSL_AEAD)
s->s3->flags &= ~TLS1_FLAGS_ENCRYPT_THEN_MAC;
else
{
s2n(TLSEXT_TYPE_encrypt_then_mac,ret);
s2n(0,ret);
}
}
#endif
if (s->s3->alpn_selected)
{
const unsigned char *selected = s->s3->alpn_selected;
unsigned len = s->s3->alpn_selected_len;
if ((long)(limit - ret - 4 - 2 - 1 - len) < 0)
return NULL;
s2n(TLSEXT_TYPE_application_layer_protocol_negotiation,ret);
s2n(3 + len,ret);
s2n(1 + len,ret);
*ret++ = len;
memcpy(ret, selected, len);
ret += len;
}
if ((extdatalen = ret-p-2)== 0)
return p;
s2n(extdatalen,p);
return ret;
}
|
Safe
|
[
"CWE-19"
] |
openssl
|
45473632c54947859a731dfe2db087c002ef7aa7
|
2.6578945660677038e+38
| 264 |
Prevent use of RSA+MD5 in TLS 1.2 by default.
Removing RSA+MD5 from the default signature algorithm list
prevents its use by default.
If a broken implementation attempts to use RSA+MD5 anyway the sanity
checking of signature algorithms will cause a fatal alert.
| 0 |
static void tcp_connect_init(struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
__u8 rcv_wscale;
u32 rcv_wnd;
/* We'll fix this up when we get a response from the other end.
* See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
*/
tp->tcp_header_len = sizeof(struct tcphdr);
if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
#ifdef CONFIG_TCP_MD5SIG
if (tp->af_specific->md5_lookup(sk, sk))
tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
/* If user gave his TCP_MAXSEG, record it to clamp */
if (tp->rx_opt.user_mss)
tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
tp->max_window = 0;
tcp_mtup_init(sk);
tcp_sync_mss(sk, dst_mtu(dst));
tcp_ca_dst_init(sk, dst);
if (!tp->window_clamp)
tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
tcp_initialize_rcv_mss(sk);
/* limit the window selection if the user enforce a smaller rx buffer */
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
(tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
tp->window_clamp = tcp_full_space(sk);
rcv_wnd = tcp_rwnd_init_bpf(sk);
if (rcv_wnd == 0)
rcv_wnd = dst_metric(dst, RTAX_INITRWND);
tcp_select_initial_window(sk, tcp_full_space(sk),
tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
&tp->rcv_wnd,
&tp->window_clamp,
sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
&rcv_wscale,
rcv_wnd);
tp->rx_opt.rcv_wscale = rcv_wscale;
tp->rcv_ssthresh = tp->rcv_wnd;
sk->sk_err = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->snd_wnd = 0;
tcp_init_wl(tp, 0);
tcp_write_queue_purge(sk);
tp->snd_una = tp->write_seq;
tp->snd_sml = tp->write_seq;
tp->snd_up = tp->write_seq;
tp->snd_nxt = tp->write_seq;
if (likely(!tp->repair))
tp->rcv_nxt = 0;
else
tp->rcv_tstamp = tcp_jiffies32;
tp->rcv_wup = tp->rcv_nxt;
tp->copied_seq = tp->rcv_nxt;
inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
inet_csk(sk)->icsk_retransmits = 0;
tcp_clear_retrans(tp);
}
|
Safe
|
[
"CWE-190"
] |
net
|
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
|
2.9162645948139966e+38
| 75 |
tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Jonathan Looney <jtl@netflix.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Bruce Curtis <brucec@netflix.com>
Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
TEST_F(HttpConnectionManagerConfigTest, UserDefinedSettingsDisallowServerPush) {
const std::string yaml_string = R"EOF(
codec_type: http2
stat_prefix: my_stat_prefix
route_config:
virtual_hosts:
- name: default
domains:
- "*"
routes:
- match:
prefix: "/"
route:
cluster: fake_cluster
http_filters:
- name: encoder-decoder-buffer-filter
typed_config: {}
http2_protocol_options:
custom_settings_parameters: { identifier: 2, value: 1 }
)EOF";
EXPECT_THROW_WITH_REGEX(
createHttpConnectionManagerConfig(yaml_string), EnvoyException,
"server push is not supported by Envoy and can not be enabled via a SETTINGS parameter.");
// Specify both the server push parameter and colliding named and user defined parameters.
const std::string yaml_string2 = R"EOF(
codec_type: http2
stat_prefix: my_stat_prefix
route_config:
virtual_hosts:
- name: default
domains:
- "*"
routes:
- match:
prefix: "/"
route:
cluster: fake_cluster
http_filters:
- name: encoder-decoder-buffer-filter
typed_config: {}
http2_protocol_options:
hpack_table_size: 2048
max_concurrent_streams: 4096
custom_settings_parameters:
- { identifier: 1, value: 2048 }
- { identifier: 2, value: 1 }
- { identifier: 3, value: 1024 }
)EOF";
// The server push exception is thrown first.
EXPECT_THROW_WITH_REGEX(
createHttpConnectionManagerConfig(yaml_string), EnvoyException,
"server push is not supported by Envoy and can not be enabled via a SETTINGS parameter.");
}
|
Safe
|
[
"CWE-22"
] |
envoy
|
5333b928d8bcffa26ab19bf018369a835f697585
|
1.734109749434834e+38
| 56 |
Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <yavlasov@google.com>
| 0 |
onig_reduce_nested_quantifier(Node* pnode, Node* cnode)
{
int pnum, cnum;
QuantNode *p, *c;
p = QUANT_(pnode);
c = QUANT_(cnode);
pnum = quantifier_type_num(p);
cnum = quantifier_type_num(c);
if (pnum < 0 || cnum < 0) {
if ((p->lower == p->upper) && ! IS_REPEAT_INFINITE(p->upper)) {
if ((c->lower == c->upper) && ! IS_REPEAT_INFINITE(c->upper)) {
int n = positive_int_multiply(p->lower, c->lower);
if (n >= 0) {
p->lower = p->upper = n;
NODE_BODY(pnode) = NODE_BODY(cnode);
goto remove_cnode;
}
}
}
return ;
}
switch(ReduceTypeTable[cnum][pnum]) {
case RQ_DEL:
*pnode = *cnode;
break;
case RQ_A:
NODE_BODY(pnode) = NODE_BODY(cnode);
p->lower = 0; p->upper = REPEAT_INFINITE; p->greedy = 1;
break;
case RQ_AQ:
NODE_BODY(pnode) = NODE_BODY(cnode);
p->lower = 0; p->upper = REPEAT_INFINITE; p->greedy = 0;
break;
case RQ_QQ:
NODE_BODY(pnode) = NODE_BODY(cnode);
p->lower = 0; p->upper = 1; p->greedy = 0;
break;
case RQ_P_QQ:
NODE_BODY(pnode) = cnode;
p->lower = 0; p->upper = 1; p->greedy = 0;
c->lower = 1; c->upper = REPEAT_INFINITE; c->greedy = 1;
return ;
break;
case RQ_PQ_Q:
NODE_BODY(pnode) = cnode;
p->lower = 0; p->upper = 1; p->greedy = 1;
c->lower = 1; c->upper = REPEAT_INFINITE; c->greedy = 0;
return ;
break;
case RQ_ASIS:
NODE_BODY(pnode) = cnode;
return ;
break;
}
remove_cnode:
NODE_BODY(cnode) = NULL_NODE;
onig_node_free(cnode);
}
|
Safe
|
[
"CWE-476"
] |
oniguruma
|
850bd9b0d8186eb1637722b46b12656814ab4ad2
|
8.35943254480383e+37
| 62 |
fix #87: Read unknown address in onig_error_code_to_str()
| 0 |
smtp_server_connection_get_transaction(struct smtp_server_connection *conn)
{
return conn->state.trans;
}
|
Safe
|
[
"CWE-77"
] |
core
|
321c339756f9b2b98fb7326359d1333adebb5295
|
1.824776497346255e+38
| 4 |
lib-smtp: smtp-server-connection - Fix STARTTLS command injection vulnerability.
The input handler kept reading more commands even though the input was locked by
the STARTTLS command, thereby causing it to read the command pipelined beyond
STARTTLS. This causes a STARTTLS command injection vulerability.
| 0 |
static int get_recurse_data_length(compiler_common *common, PCRE2_SPTR cc, PCRE2_SPTR ccend,
BOOL *needs_control_head, BOOL *has_quit, BOOL *has_accept)
{
int length = 1;
int size;
PCRE2_SPTR alternative;
BOOL quit_found = FALSE;
BOOL accept_found = FALSE;
BOOL setsom_found = FALSE;
BOOL setmark_found = FALSE;
BOOL capture_last_found = FALSE;
BOOL control_head_found = FALSE;
#if defined DEBUG_FORCE_CONTROL_HEAD && DEBUG_FORCE_CONTROL_HEAD
SLJIT_ASSERT(common->control_head_ptr != 0);
control_head_found = TRUE;
#endif
/* Calculate the sum of the private machine words. */
while (cc < ccend)
{
size = 0;
switch(*cc)
{
case OP_SET_SOM:
SLJIT_ASSERT(common->has_set_som);
setsom_found = TRUE;
cc += 1;
break;
case OP_RECURSE:
if (common->has_set_som)
setsom_found = TRUE;
if (common->mark_ptr != 0)
setmark_found = TRUE;
if (common->capture_last_ptr != 0)
capture_last_found = TRUE;
cc += 1 + LINK_SIZE;
break;
case OP_KET:
if (PRIVATE_DATA(cc) != 0)
{
length++;
SLJIT_ASSERT(PRIVATE_DATA(cc + 1) != 0);
cc += PRIVATE_DATA(cc + 1);
}
cc += 1 + LINK_SIZE;
break;
case OP_ASSERT:
case OP_ASSERT_NOT:
case OP_ASSERTBACK:
case OP_ASSERTBACK_NOT:
case OP_ONCE:
case OP_BRAPOS:
case OP_SBRA:
case OP_SBRAPOS:
case OP_SCOND:
length++;
SLJIT_ASSERT(PRIVATE_DATA(cc) != 0);
cc += 1 + LINK_SIZE;
break;
case OP_CBRA:
case OP_SCBRA:
length += 2;
if (common->capture_last_ptr != 0)
capture_last_found = TRUE;
if (common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0)
length++;
cc += 1 + LINK_SIZE + IMM2_SIZE;
break;
case OP_CBRAPOS:
case OP_SCBRAPOS:
length += 2 + 2;
if (common->capture_last_ptr != 0)
capture_last_found = TRUE;
cc += 1 + LINK_SIZE + IMM2_SIZE;
break;
case OP_COND:
/* Might be a hidden SCOND. */
alternative = cc + GET(cc, 1);
if (*alternative == OP_KETRMAX || *alternative == OP_KETRMIN)
length++;
cc += 1 + LINK_SIZE;
break;
CASE_ITERATOR_PRIVATE_DATA_1
if (PRIVATE_DATA(cc) != 0)
length++;
cc += 2;
#ifdef SUPPORT_UNICODE
if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
break;
CASE_ITERATOR_PRIVATE_DATA_2A
if (PRIVATE_DATA(cc) != 0)
length += 2;
cc += 2;
#ifdef SUPPORT_UNICODE
if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
break;
CASE_ITERATOR_PRIVATE_DATA_2B
if (PRIVATE_DATA(cc) != 0)
length += 2;
cc += 2 + IMM2_SIZE;
#ifdef SUPPORT_UNICODE
if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
break;
CASE_ITERATOR_TYPE_PRIVATE_DATA_1
if (PRIVATE_DATA(cc) != 0)
length++;
cc += 1;
break;
CASE_ITERATOR_TYPE_PRIVATE_DATA_2A
if (PRIVATE_DATA(cc) != 0)
length += 2;
cc += 1;
break;
CASE_ITERATOR_TYPE_PRIVATE_DATA_2B
if (PRIVATE_DATA(cc) != 0)
length += 2;
cc += 1 + IMM2_SIZE;
break;
case OP_CLASS:
case OP_NCLASS:
#if defined SUPPORT_UNICODE || PCRE2_CODE_UNIT_WIDTH != 8
case OP_XCLASS:
size = (*cc == OP_XCLASS) ? GET(cc, 1) : 1 + 32 / (int)sizeof(PCRE2_UCHAR);
#else
size = 1 + 32 / (int)sizeof(PCRE2_UCHAR);
#endif
if (PRIVATE_DATA(cc) != 0)
length += get_class_iterator_size(cc + size);
cc += size;
break;
case OP_MARK:
case OP_COMMIT_ARG:
case OP_PRUNE_ARG:
case OP_THEN_ARG:
SLJIT_ASSERT(common->mark_ptr != 0);
if (!setmark_found)
setmark_found = TRUE;
if (common->control_head_ptr != 0)
control_head_found = TRUE;
if (*cc != OP_MARK)
quit_found = TRUE;
cc += 1 + 2 + cc[1];
break;
case OP_PRUNE:
case OP_SKIP:
case OP_COMMIT:
quit_found = TRUE;
cc++;
break;
case OP_SKIP_ARG:
quit_found = TRUE;
cc += 1 + 2 + cc[1];
break;
case OP_THEN:
SLJIT_ASSERT(common->control_head_ptr != 0);
quit_found = TRUE;
if (!control_head_found)
control_head_found = TRUE;
cc++;
break;
case OP_ACCEPT:
case OP_ASSERT_ACCEPT:
accept_found = TRUE;
cc++;
break;
default:
cc = next_opcode(common, cc);
SLJIT_ASSERT(cc != NULL);
break;
}
}
SLJIT_ASSERT(cc == ccend);
if (control_head_found)
length++;
if (capture_last_found)
length++;
if (quit_found)
{
if (setsom_found)
length++;
if (setmark_found)
length++;
}
*needs_control_head = control_head_found;
*has_quit = quit_found;
*has_accept = accept_found;
return length;
}
|
Safe
|
[
"CWE-125"
] |
php-src
|
8947fd9e9fdce87cd6c59817b1db58e789538fe9
|
2.1490766021465295e+38
| 214 |
Fix #78338: Array cross-border reading in PCRE
We backport r1092 from pcre2.
| 0 |
static void call_rtas_display_status(unsigned char c)
{
unsigned long s;
if (!rtas.base)
return;
s = lock_rtas();
rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
unlock_rtas(s);
}
|
Safe
|
[
"CWE-862"
] |
linux
|
bd59380c5ba4147dcbaad3e582b55ccfd120b764
|
1.9330784736082482e+38
| 11 |
powerpc/rtas: Restrict RTAS requests from userspace
A number of userspace utilities depend on making calls to RTAS to retrieve
information and update various things.
The existing API through which we expose RTAS to userspace exposes more
RTAS functionality than we actually need, through the sys_rtas syscall,
which allows root (or anyone with CAP_SYS_ADMIN) to make any RTAS call they
want with arbitrary arguments.
Many RTAS calls take the address of a buffer as an argument, and it's up to
the caller to specify the physical address of the buffer as an argument. We
allocate a buffer (the "RMO buffer") in the Real Memory Area that RTAS can
access, and then expose the physical address and size of this buffer in
/proc/powerpc/rtas/rmo_buffer. Userspace is expected to read this address,
poke at the buffer using /dev/mem, and pass an address in the RMO buffer to
the RTAS call.
However, there's nothing stopping the caller from specifying whatever
address they want in the RTAS call, and it's easy to construct a series of
RTAS calls that can overwrite arbitrary bytes (even without /dev/mem
access).
Additionally, there are some RTAS calls that do potentially dangerous
things and for which there are no legitimate userspace use cases.
In the past, this would not have been a particularly big deal as it was
assumed that root could modify all system state freely, but with Secure
Boot and lockdown we need to care about this.
We can't fundamentally change the ABI at this point, however we can address
this by implementing a filter that checks RTAS calls against a list
of permitted calls and forces the caller to use addresses within the RMO
buffer.
The list is based off the list of calls that are used by the librtas
userspace library, and has been tested with a number of existing userspace
RTAS utilities. For compatibility with any applications we are not aware of
that require other calls, the filter can be turned off at build time.
Cc: stable@vger.kernel.org
Reported-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200820044512.7543-1-ajd@linux.ibm.com
| 0 |
Perl_repeatcpy(char *to, const char *from, I32 len, IV count)
{
PERL_ARGS_ASSERT_REPEATCPY;
assert(len >= 0);
if (count < 0)
croak_memory_wrap();
if (len == 1)
memset(to, *from, count);
else if (count) {
char *p = to;
IV items, linear, half;
linear = count < PERL_REPEATCPY_LINEAR ? count : PERL_REPEATCPY_LINEAR;
for (items = 0; items < linear; ++items) {
const char *q = from;
IV todo;
for (todo = len; todo > 0; todo--)
*p++ = *q++;
}
half = count / 2;
while (items <= half) {
IV size = items * len;
memcpy(p, to, size);
p += size;
items *= 2;
}
if (count > items)
memcpy(p, to, (count - items) * len);
}
}
|
Safe
|
[
"CWE-119",
"CWE-703",
"CWE-787"
] |
perl5
|
34716e2a6ee2af96078d62b065b7785c001194be
|
4.924280438739693e+37
| 35 |
Perl_my_setenv(); handle integer wrap
RT #133204
Wean this function off int/I32 and onto UV/Size_t.
Also, replace all malloc-ish calls with a wrapper that does
overflow checks,
In particular, it was doing (nlen + vlen + 2) which could wrap when
the combined length of the environment variable name and value
exceeded around 0x7fffffff.
The wrapper check function is probably overkill, but belt and braces...
NB this function has several variant parts, #ifdef'ed by platform
type; I have blindly changed the parts that aren't compiled under linux.
| 0 |
ecc_mod_mul (const struct ecc_modulo *m, mp_limb_t *rp,
const mp_limb_t *ap, const mp_limb_t *bp, mp_limb_t *tp)
{
mpn_mul_n (tp, ap, bp, m->size);
m->reduce (m, rp, tp);
}
|
Safe
|
[
"CWE-787"
] |
nettle
|
a63893791280d441c713293491da97c79c0950fe
|
6.601391924323729e+37
| 6 |
New functions ecc_mod_mul_canonical and ecc_mod_sqr_canonical.
* ecc-mod-arith.c (ecc_mod_mul_canonical, ecc_mod_sqr_canonical):
New functions.
* ecc-internal.h: Declare and document new functions.
* curve448-eh-to-x.c (curve448_eh_to_x): Use ecc_mod_sqr_canonical.
* curve25519-eh-to-x.c (curve25519_eh_to_x): Use ecc_mod_mul_canonical.
* ecc-eh-to-a.c (ecc_eh_to_a): Likewise.
* ecc-j-to-a.c (ecc_j_to_a): Likewise.
* ecc-mul-m.c (ecc_mul_m): Likewise.
(cherry picked from commit 2bf497ba4d6acc6f352bca015837fad33008565c)
| 0 |
pgp_write_binary(sc_card_t *card, unsigned int idx,
const u8 *buf, size_t count, unsigned long flags)
{
LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED);
}
|
Safe
|
[
"CWE-125"
] |
OpenSC
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
1.3336245400350608e+38
| 5 |
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
| 0 |
int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
struct cred *new;
long error = 0;
new = prepare_creds();
if (!new)
return -ENOMEM;
switch (option) {
case PR_CAPBSET_READ:
error = -EINVAL;
if (!cap_valid(arg2))
goto error;
error = !!cap_raised(new->cap_bset, arg2);
goto no_change;
case PR_CAPBSET_DROP:
error = cap_prctl_drop(new, arg2);
if (error < 0)
goto error;
goto changed;
/*
* The next four prctl's remain to assist with transitioning a
* system from legacy UID=0 based privilege (when filesystem
* capabilities are not in use) to a system using filesystem
* capabilities only - as the POSIX.1e draft intended.
*
* Note:
*
* PR_SET_SECUREBITS =
* issecure_mask(SECURE_KEEP_CAPS_LOCKED)
* | issecure_mask(SECURE_NOROOT)
* | issecure_mask(SECURE_NOROOT_LOCKED)
* | issecure_mask(SECURE_NO_SETUID_FIXUP)
* | issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)
*
* will ensure that the current process and all of its
* children will be locked into a pure
* capability-based-privilege environment.
*/
case PR_SET_SECUREBITS:
error = -EPERM;
if ((((new->securebits & SECURE_ALL_LOCKS) >> 1)
& (new->securebits ^ arg2)) /*[1]*/
|| ((new->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
|| (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
|| (cap_capable(current_cred(),
current_cred()->user->user_ns, CAP_SETPCAP,
SECURITY_CAP_AUDIT) != 0) /*[4]*/
/*
* [1] no changing of bits that are locked
* [2] no unlocking of locks
* [3] no setting of unsupported bits
* [4] doing anything requires privilege (go read about
* the "sendmail capabilities bug")
*/
)
/* cannot change a locked bit */
goto error;
new->securebits = arg2;
goto changed;
case PR_GET_SECUREBITS:
error = new->securebits;
goto no_change;
case PR_GET_KEEPCAPS:
if (issecure(SECURE_KEEP_CAPS))
error = 1;
goto no_change;
case PR_SET_KEEPCAPS:
error = -EINVAL;
if (arg2 > 1) /* Note, we rely on arg2 being unsigned here */
goto error;
error = -EPERM;
if (issecure(SECURE_KEEP_CAPS_LOCKED))
goto error;
if (arg2)
new->securebits |= issecure_mask(SECURE_KEEP_CAPS);
else
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
goto changed;
default:
/* No functionality available - continue with default */
error = -ENOSYS;
goto error;
}
/* Functionality provided */
changed:
return commit_creds(new);
no_change:
error:
abort_creds(new);
return error;
}
|
Safe
|
[
"CWE-264"
] |
linux
|
259e5e6c75a910f3b5e656151dc602f53f9d7548
|
1.2413252763802252e+38
| 102 |
Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs
With this change, calling
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
disables privilege granting operations at execve-time. For example, a
process will not be able to execute a setuid binary to change their uid
or gid if this bit is set. The same is true for file capabilities.
Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that
LSMs respect the requested behavior.
To determine if the NO_NEW_PRIVS bit is set, a task may call
prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
It returns 1 if set and 0 if it is not set. If any of the arguments are
non-zero, it will return -1 and set errno to -EINVAL.
(PR_SET_NO_NEW_PRIVS behaves similarly.)
This functionality is desired for the proposed seccomp filter patch
series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the
system call behavior for itself and its child tasks without being
able to impact the behavior of a more privileged task.
Another potential use is making certain privileged operations
unprivileged. For example, chroot may be considered "safe" if it cannot
affect privileged tasks.
Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is
set and AppArmor is in use. It is fixed in a subsequent patch.
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Will Drewry <wad@chromium.org>
Acked-by: Eric Paris <eparis@redhat.com>
Acked-by: Kees Cook <keescook@chromium.org>
v18: updated change desc
v17: using new define values as per 3.4
Signed-off-by: James Morris <james.l.morris@oracle.com>
| 0 |
RZ_API const RzList /*<RzBinField *>*/ *rz_bin_object_get_fields(RZ_NONNULL RzBinObject *obj) {
rz_return_val_if_fail(obj, NULL);
return obj->fields;
}
|
Safe
|
[
"CWE-200",
"CWE-787"
] |
rizin
|
07b43bc8aa1ffebd9b68d60624c9610cf7e460c7
|
2.383702467825843e+38
| 4 |
fix oob read on luac
| 0 |
int connect_n_handle_errors(struct st_command *command,
MYSQL* con, const char* host,
const char* user, const char* pass,
const char* db, int port, const char* sock)
{
DYNAMIC_STRING *ds;
int failed_attempts= 0;
ds= &ds_res;
/* Only log if an error is expected */
if (command->expected_errors.count > 0 &&
!disable_query_log)
{
/*
Log the connect to result log
*/
dynstr_append_mem(ds, "connect(", 8);
replace_dynstr_append(ds, host);
dynstr_append_mem(ds, ",", 1);
replace_dynstr_append(ds, user);
dynstr_append_mem(ds, ",", 1);
replace_dynstr_append(ds, pass);
dynstr_append_mem(ds, ",", 1);
if (db)
replace_dynstr_append(ds, db);
dynstr_append_mem(ds, ",", 1);
replace_dynstr_append_uint(ds, port);
dynstr_append_mem(ds, ",", 1);
if (sock)
replace_dynstr_append(ds, sock);
dynstr_append_mem(ds, ")", 1);
dynstr_append_mem(ds, delimiter, delimiter_length);
dynstr_append_mem(ds, "\n", 1);
}
/* Simlified logging if enabled */
if (!disable_connect_log && !disable_query_log)
{
replace_dynstr_append(ds, command->query);
dynstr_append_mem(ds, ";\n", 2);
}
mysql_options(con, MYSQL_OPT_CONNECT_ATTR_RESET, 0);
mysql_options4(con, MYSQL_OPT_CONNECT_ATTR_ADD, "program_name", "mysqltest");
mysql_options(con, MYSQL_OPT_CAN_HANDLE_EXPIRED_PASSWORDS,
&can_handle_expired_passwords);
while (!mysql_connect_ssl_check(con, host, user, pass, db, port,
sock ? sock: 0, CLIENT_MULTI_STATEMENTS,
opt_ssl_mode == SSL_MODE_REQUIRED))
{
/*
If we have used up all our connections check whether this
is expected (by --error). If so, handle the error right away.
Otherwise, give it some extra time to rule out race-conditions.
If extra-time doesn't help, we have an unexpected error and
must abort -- just proceeding to handle_error() when second
and third chances are used up will handle that for us.
There are various user-limits of which only max_user_connections
and max_connections_per_hour apply at connect time. For the
the second to create a race in our logic, we'd need a limits
test that runs without a FLUSH for longer than an hour, so we'll
stay clear of trying to work out which exact user-limit was
exceeded.
*/
if (((mysql_errno(con) == ER_TOO_MANY_USER_CONNECTIONS) ||
(mysql_errno(con) == ER_USER_LIMIT_REACHED)) &&
(failed_attempts++ < opt_max_connect_retries))
{
int i;
i= match_expected_error(command, mysql_errno(con), mysql_sqlstate(con));
if (i >= 0)
goto do_handle_error; /* expected error, handle */
my_sleep(connection_retry_sleep); /* unexpected error, wait */
continue; /* and give it 1 more chance */
}
do_handle_error:
var_set_errno(mysql_errno(con));
handle_error(command, mysql_errno(con), mysql_error(con),
mysql_sqlstate(con), ds);
return 0; /* Not connected */
}
var_set_errno(0);
handle_no_error(command);
revert_properties();
return 1; /* Connected */
}
|
Safe
|
[
"CWE-319"
] |
mysql-server
|
0002e1380d5f8c113b6bce91f2cf3f75136fd7c7
|
2.6885340256346247e+38
| 93 |
BUG#25575605: SETTING --SSL-MODE=REQUIRED SENDS CREDENTIALS BEFORE VERIFYING SSL CONNECTION
MYSQL_OPT_SSL_MODE option introduced.
It is set in case of --ssl-mode=REQUIRED and permits only SSL connection.
(cherry picked from commit f91b941842d240b8a62645e507f5554e8be76aec)
| 0 |
static bool share_conflict(struct share_mode_entry *entry,
uint32 access_mask,
uint32 share_access)
{
DEBUG(10,("share_conflict: entry->access_mask = 0x%x, "
"entry->share_access = 0x%x, "
"entry->private_options = 0x%x\n",
(unsigned int)entry->access_mask,
(unsigned int)entry->share_access,
(unsigned int)entry->private_options));
if (server_id_is_disconnected(&entry->pid)) {
/*
* note: cleanup should have been done by
* delay_for_batch_oplocks()
*/
return false;
}
DEBUG(10,("share_conflict: access_mask = 0x%x, share_access = 0x%x\n",
(unsigned int)access_mask, (unsigned int)share_access));
if ((entry->access_mask & (FILE_WRITE_DATA|
FILE_APPEND_DATA|
FILE_READ_DATA|
FILE_EXECUTE|
DELETE_ACCESS)) == 0) {
DEBUG(10,("share_conflict: No conflict due to "
"entry->access_mask = 0x%x\n",
(unsigned int)entry->access_mask ));
return False;
}
if ((access_mask & (FILE_WRITE_DATA|
FILE_APPEND_DATA|
FILE_READ_DATA|
FILE_EXECUTE|
DELETE_ACCESS)) == 0) {
DEBUG(10,("share_conflict: No conflict due to "
"access_mask = 0x%x\n",
(unsigned int)access_mask ));
return False;
}
#if 1 /* JRA TEST - Superdebug. */
#define CHECK_MASK(num, am, right, sa, share) \
DEBUG(10,("share_conflict: [%d] am (0x%x) & right (0x%x) = 0x%x\n", \
(unsigned int)(num), (unsigned int)(am), \
(unsigned int)(right), (unsigned int)(am)&(right) )); \
DEBUG(10,("share_conflict: [%d] sa (0x%x) & share (0x%x) = 0x%x\n", \
(unsigned int)(num), (unsigned int)(sa), \
(unsigned int)(share), (unsigned int)(sa)&(share) )); \
if (((am) & (right)) && !((sa) & (share))) { \
DEBUG(10,("share_conflict: check %d conflict am = 0x%x, right = 0x%x, \
sa = 0x%x, share = 0x%x\n", (num), (unsigned int)(am), (unsigned int)(right), (unsigned int)(sa), \
(unsigned int)(share) )); \
return True; \
}
#else
#define CHECK_MASK(num, am, right, sa, share) \
if (((am) & (right)) && !((sa) & (share))) { \
DEBUG(10,("share_conflict: check %d conflict am = 0x%x, right = 0x%x, \
sa = 0x%x, share = 0x%x\n", (num), (unsigned int)(am), (unsigned int)(right), (unsigned int)(sa), \
(unsigned int)(share) )); \
return True; \
}
#endif
CHECK_MASK(1, entry->access_mask, FILE_WRITE_DATA | FILE_APPEND_DATA,
share_access, FILE_SHARE_WRITE);
CHECK_MASK(2, access_mask, FILE_WRITE_DATA | FILE_APPEND_DATA,
entry->share_access, FILE_SHARE_WRITE);
CHECK_MASK(3, entry->access_mask, FILE_READ_DATA | FILE_EXECUTE,
share_access, FILE_SHARE_READ);
CHECK_MASK(4, access_mask, FILE_READ_DATA | FILE_EXECUTE,
entry->share_access, FILE_SHARE_READ);
CHECK_MASK(5, entry->access_mask, DELETE_ACCESS,
share_access, FILE_SHARE_DELETE);
CHECK_MASK(6, access_mask, DELETE_ACCESS,
entry->share_access, FILE_SHARE_DELETE);
DEBUG(10,("share_conflict: No conflict.\n"));
return False;
}
|
Safe
|
[] |
samba
|
60f922bf1bd8816eacbb32c24793ad1f97a1d9f2
|
2.223586015023215e+38
| 86 |
Fix bug #10229 - No access check verification on stream files.
https://bugzilla.samba.org/show_bug.cgi?id=10229
We need to check if the requested access mask
could be used to open the underlying file (if
it existed), as we're passing in zero for the
access mask to the base filename.
Signed-off-by: Jeremy Allison <jra@samba.org>
Reviewed-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: David Disseldorp <ddiss@suse.de>
| 0 |
int smb_vfs_call_sys_acl_delete_def_file(struct vfs_handle_struct *handle,
const char *path)
{
VFS_FIND(sys_acl_delete_def_file);
return handle->fns->sys_acl_delete_def_file(handle, path);
}
|
Safe
|
[
"CWE-22"
] |
samba
|
bd269443e311d96ef495a9db47d1b95eb83bb8f4
|
3.2800369699133824e+38
| 6 |
Fix bug 7104 - "wide links" and "unix extensions" are incompatible.
Change parameter "wide links" to default to "no".
Ensure "wide links = no" if "unix extensions = yes" on a share.
Fix man pages to refect this.
Remove "within share" checks for a UNIX symlink set - even if
widelinks = no. The server will not follow that link anyway.
Correct DEBUG message in check_reduced_name() to add missing "\n"
so it's really clear when a path is being denied as it's outside
the enclosing share path.
Jeremy.
| 0 |
TEST_F(ExprMatchTest, ComplexExprMatchesCorrectly) {
createMatcher(
fromjson("{"
" $expr: {"
" $and: ["
" {$eq: ['$a', 1]},"
" {$eq: ['$b', '$c']},"
" {"
" $or: ["
" {$eq: ['$d', 1]},"
" {$eq: ['$e', 3]},"
" {"
" $and: ["
" {$eq: ['$f', 1]},"
" {$eq: ['$g', '$h']},"
" {$or: [{$eq: ['$i', 3]}, {$eq: ['$j', '$k']}]}"
" ]"
" }"
" ]"
" }"
" ]"
" }"
"}"));
ASSERT_TRUE(matches(BSON("a" << 1 << "b" << 3 << "c" << 3 << "d" << 1)));
ASSERT_TRUE(matches(BSON("a" << 1 << "b" << 3 << "c" << 3 << "e" << 3)));
ASSERT_TRUE(matches(BSON("a" << 1 << "b" << 3 << "c" << 3 << "f" << 1 << "i" << 3)));
ASSERT_TRUE(
matches(BSON("a" << 1 << "b" << 3 << "c" << 3 << "f" << 1 << "j" << 5 << "k" << 5)));
ASSERT_FALSE(matches(BSON("a" << 1)));
ASSERT_FALSE(matches(BSON("a" << 1 << "b" << 3 << "c" << 3)));
ASSERT_FALSE(matches(BSON("a" << 1 << "b" << 3 << "c" << 3 << "d" << 5)));
ASSERT_FALSE(matches(BSON("a" << 1 << "b" << 3 << "c" << 3 << "j" << 5 << "k" << 10)));
}
|
Safe
|
[] |
mongo
|
ee97c0699fd55b498310996ee002328e533681a3
|
2.9332584440889782e+38
| 35 |
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
| 0 |
//! Save image as a BMP file \overloading.
const CImg<T>& save_bmp(std::FILE *const file) const {
return _save_bmp(file,0);
|
Safe
|
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
|
1.8752505889265494e+38
| 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
| 0 |
**/
CImgList<Tfloat> get_FFT(const bool is_invert=false) const {
CImgList<Tfloat> res(*this,CImg<Tfloat>());
CImg<Tfloat>::FFT(res[0],res[1],is_invert);
return res;
|
Safe
|
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
|
7.608260336966171e+36
| 5 |
Fix other issues in 'CImg<T>::load_bmp()'.
| 0 |
fr_clipboard_get (GtkClipboard *clipboard,
GtkSelectionData *selection_data,
guint info,
gpointer user_data_or_owner)
{
FrWindow *window = user_data_or_owner;
char *data;
if (gtk_selection_data_get_target (selection_data) != FR_SPECIAL_URI_LIST)
return;
data = get_selection_data_from_clipboard_data (window, window->priv->copy_data);
if (data != NULL) {
gtk_selection_data_set (selection_data,
gtk_selection_data_get_target (selection_data),
8,
(guchar *) data,
strlen (data));
g_free (data);
}
}
|
Safe
|
[
"CWE-22"
] |
file-roller
|
b147281293a8307808475e102a14857055f81631
|
1.281954033405989e+38
| 21 |
libarchive: sanitize filenames before extracting
| 0 |
static int doCancelBundleTask(struct nc_state_t *nc, ncMetadata * pMeta, char *instanceId)
{
ncInstance *instance = find_instance(&global_instances, instanceId);
if (instance == NULL) {
LOGERROR("[%s] instance not found\n", instanceId);
return EUCA_NOT_FOUND_ERROR;
}
instance->bundleCanceled = 1; // record the intent to cancel bundling so that bundling thread can abort
if ((instance->bundlePid > 0) && !check_process(instance->bundlePid, "euca-bundle-upload")) {
LOGDEBUG("[%s] found bundlePid '%d', sending kill signal...\n", instanceId, instance->bundlePid);
kill(instance->bundlePid, 9);
instance->bundlePid = 0;
}
return (EUCA_OK);
}
|
Safe
|
[] |
eucalyptus
|
c252889a46f41b4c396b89e005ec89836f2524be
|
2.6473195888611744e+38
| 17 |
Input validation, shellout hardening on back-end
- validating bucketName and bucketPath in BundleInstance
- validating device name in Attach and DetachVolume
- removed some uses of system() and popen()
Fixes EUCA-7572, EUCA-7520
| 0 |
QPDFTokenizer::includeIgnorable()
{
this->m->include_ignorable = true;
}
|
Safe
|
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
|
3.35492483252478e+38
| 4 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
| 0 |
int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
{
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
if (vpd->irr[0] & (1UL << NMI_VECTOR))
return NMI_VECTOR;
if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
return ExtINT_VECTOR;
return find_highest_bits((int *)&vpd->irr[0]);
}
|
Safe
|
[
"CWE-399"
] |
kvm
|
5b40572ed5f0344b9dbee486a17c589ce1abe1a3
|
2.5396295314478678e+38
| 11 |
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
If some vcpus are created before KVM_CREATE_IRQCHIP, then
irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
to potential NULL pointer dereferences.
Fix by:
- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
This is somewhat long winded because vcpu->arch.apic is created without
kvm->lock held.
Based on earlier patch by Michael Ellerman.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Avi Kivity <avi@redhat.com>
| 0 |
static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
u64 *phys_complete)
{
*phys_complete = ioat3_get_current_completion(chan);
if (*phys_complete == chan->last_completion)
return false;
clear_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
return true;
}
|
Safe
|
[] |
linux
|
7bced397510ab569d31de4c70b39e13355046387
|
1.655957338730491e+38
| 12 |
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: David Whipple <whipple@securedatainnovations.ch>
Cc: Alexander Duyck <alexander.h.duyck@intel.com>
Cc: <stable@vger.kernel.org>
Reported-by: Roman Gushchin <klamm@yandex-team.ru>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
| 0 |
GF_Err tref_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read_ex(s, bs, gf_isom_box_add_default, s->type);
}
|
Safe
|
[
"CWE-125"
] |
gpac
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
|
3.2394246455391063e+38
| 4 |
fixed 2 possible heap overflows (inc. #1088)
| 0 |
static inline size_t printCharTo(char c, Print &p) {
char specialChar = getSpecialChar(c);
return specialChar ? p.write('\\') + p.write(specialChar) : p.write(c);
}
|
Safe
|
[
"CWE-415",
"CWE-119"
] |
ArduinoJson
|
5e7b9ec688d79e7b16ec7064e1d37e8481a31e72
|
1.3130709893347683e+38
| 5 |
Fix buffer overflow (pull request #81)
| 0 |
xmlSchemaResolveAttrUseReferences(xmlSchemaAttributeUsePtr ause,
xmlSchemaParserCtxtPtr ctxt)
{
if ((ctxt == NULL) || (ause == NULL))
return(-1);
if ((ause->attrDecl == NULL) ||
(ause->attrDecl->type != XML_SCHEMA_EXTRA_QNAMEREF))
return(0);
{
xmlSchemaQNameRefPtr ref = WXS_QNAME_CAST ause->attrDecl;
/*
* TODO: Evaluate, what errors could occur if the declaration is not
* found.
*/
ause->attrDecl = xmlSchemaGetAttributeDecl(ctxt->schema,
ref->name, ref->targetNamespace);
if (ause->attrDecl == NULL) {
xmlSchemaPResCompAttrErr(ctxt,
XML_SCHEMAP_SRC_RESOLVE,
WXS_BASIC_CAST ause, ause->node,
"ref", ref->name, ref->targetNamespace,
XML_SCHEMA_TYPE_ATTRIBUTE, NULL);
return(ctxt->err);;
}
}
return(0);
}
|
Safe
|
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
|
1.922026577487448e+38
| 29 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
| 0 |
NO_INLINE JsVar *jspeStatementFor() {
JSP_ASSERT_MATCH(LEX_R_FOR);
JSP_MATCH('(');
bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0;
execInfo.execute |= EXEC_FOR_INIT;
// initialisation
JsVar *forStatement = 0;
// we could have 'for (;;)' - so don't munch up our semicolon if that's all we have
if (lex->tk != ';')
forStatement = jspeStatement();
if (jspIsInterrupted()) {
jsvUnLock(forStatement);
return 0;
}
execInfo.execute &= (JsExecFlags)~EXEC_FOR_INIT;
if (lex->tk == LEX_R_IN) {
// for (i in array)
// where i = jsvUnLock(forStatement);
if (JSP_SHOULD_EXECUTE && !jsvIsName(forStatement)) {
jsvUnLock(forStatement);
jsExceptionHere(JSET_ERROR, "FOR a IN b - 'a' must be a variable name, not %t", forStatement);
return 0;
}
bool addedIteratorToScope = false;
if (JSP_SHOULD_EXECUTE && !jsvGetRefs(forStatement)) {
// if the variable did not exist, add it to the scope
addedIteratorToScope = true;
jsvAddName(execInfo.root, forStatement);
}
JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_IN, jsvUnLock(forStatement), 0);
JsVar *array = jsvSkipNameAndUnLock(jspeExpression());
JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(forStatement, array), 0);
JslCharPos forBodyStart = jslCharPosClone(&lex->tokenStart);
JSP_SAVE_EXECUTE();
jspSetNoExecute();
execInfo.execute |= EXEC_IN_LOOP;
jsvUnLock(jspeBlockOrStatement());
JslCharPos forBodyEnd = jslCharPosClone(&lex->tokenStart);
if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP;
JSP_RESTORE_EXECUTE();
if (JSP_SHOULD_EXECUTE) {
if (jsvIsIterable(array)) {
JsvIsInternalChecker checkerFunction = jsvGetInternalFunctionCheckerFor(array);
JsVar *foundPrototype = 0;
JsvIterator it;
jsvIteratorNew(&it, array, JSIF_DEFINED_ARRAY_ElEMENTS);
bool hasHadBreak = false;
while (JSP_SHOULD_EXECUTE && jsvIteratorHasElement(&it) && !hasHadBreak) {
JsVar *loopIndexVar = jsvIteratorGetKey(&it);
bool ignore = false;
if (checkerFunction && checkerFunction(loopIndexVar)) {
ignore = true;
if (jsvIsString(loopIndexVar) &&
jsvIsStringEqual(loopIndexVar, JSPARSE_INHERITS_VAR))
foundPrototype = jsvSkipName(loopIndexVar);
}
if (!ignore) {
JsVar *indexValue = jsvIsName(loopIndexVar) ?
jsvCopyNameOnly(loopIndexVar, false/*no copy children*/, false/*not a name*/) :
loopIndexVar;
if (indexValue) { // could be out of memory
assert(!jsvIsName(indexValue) && jsvGetRefs(indexValue)==0);
jsvSetValueOfName(forStatement, indexValue);
if (indexValue!=loopIndexVar) jsvUnLock(indexValue);
jsvIteratorNext(&it);
jslSeekToP(&forBodyStart);
execInfo.execute |= EXEC_IN_LOOP;
jspDebuggerLoopIfCtrlC();
jsvUnLock(jspeBlockOrStatement());
if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP;
if (execInfo.execute & EXEC_CONTINUE)
execInfo.execute = EXEC_YES;
else if (execInfo.execute & EXEC_BREAK) {
execInfo.execute = EXEC_YES;
hasHadBreak = true;
}
}
} else
jsvIteratorNext(&it);
jsvUnLock(loopIndexVar);
if (!jsvIteratorHasElement(&it) && foundPrototype) {
jsvIteratorFree(&it);
jsvIteratorNew(&it, foundPrototype, JSIF_DEFINED_ARRAY_ElEMENTS);
jsvUnLock(foundPrototype);
foundPrototype = 0;
}
}
assert(!foundPrototype);
jsvIteratorFree(&it);
} else if (!jsvIsUndefined(array)) {
jsExceptionHere(JSET_ERROR, "FOR loop can only iterate over Arrays, Strings or Objects, not %t", array);
}
}
jslSeekToP(&forBodyEnd);
jslCharPosFree(&forBodyStart);
jslCharPosFree(&forBodyEnd);
if (addedIteratorToScope) {
jsvRemoveChild(execInfo.root, forStatement);
}
jsvUnLock2(forStatement, array);
} else { // ----------------------------------------------- NORMAL FOR LOOP
#ifdef JSPARSE_MAX_LOOP_ITERATIONS
int loopCount = JSPARSE_MAX_LOOP_ITERATIONS;
#endif
bool loopCond = true;
bool hasHadBreak = false;
jsvUnLock(forStatement);
JSP_MATCH(';');
JslCharPos forCondStart = jslCharPosClone(&lex->tokenStart);
if (lex->tk != ';') {
JsVar *cond = jspeAssignmentExpression(); // condition
loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond));
jsvUnLock(cond);
}
JSP_MATCH_WITH_CLEANUP_AND_RETURN(';',jslCharPosFree(&forCondStart);,0);
JslCharPos forIterStart = jslCharPosClone(&lex->tokenStart);
if (lex->tk != ')') { // we could have 'for (;;)'
JSP_SAVE_EXECUTE();
jspSetNoExecute();
jsvUnLock(jspeExpression()); // iterator
JSP_RESTORE_EXECUTE();
}
JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&forCondStart);jslCharPosFree(&forIterStart);,0);
JslCharPos forBodyStart = jslCharPosClone(&lex->tokenStart); // actual for body
JSP_SAVE_EXECUTE();
if (!loopCond) jspSetNoExecute();
execInfo.execute |= EXEC_IN_LOOP;
jsvUnLock(jspeBlockOrStatement());
JslCharPos forBodyEnd = jslCharPosClone(&lex->tokenStart);
if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP;
if (loopCond || !JSP_SHOULD_EXECUTE) {
if (execInfo.execute & EXEC_CONTINUE)
execInfo.execute = EXEC_YES;
else if (execInfo.execute & EXEC_BREAK) {
execInfo.execute = EXEC_YES;
hasHadBreak = true;
}
}
if (!loopCond) JSP_RESTORE_EXECUTE();
if (loopCond) {
jslSeekToP(&forIterStart);
if (lex->tk != ')') jsvUnLock(jspeExpression());
}
while (!hasHadBreak && JSP_SHOULD_EXECUTE && loopCond
#ifdef JSPARSE_MAX_LOOP_ITERATIONS
&& loopCount-->0
#endif
) {
jslSeekToP(&forCondStart);
;
if (lex->tk == ';') {
loopCond = true;
} else {
JsVar *cond = jspeAssignmentExpression();
loopCond = jsvGetBoolAndUnLock(jsvSkipName(cond));
jsvUnLock(cond);
}
if (JSP_SHOULD_EXECUTE && loopCond) {
jslSeekToP(&forBodyStart);
execInfo.execute |= EXEC_IN_LOOP;
jspDebuggerLoopIfCtrlC();
jsvUnLock(jspeBlockOrStatement());
if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP;
if (execInfo.execute & EXEC_CONTINUE)
execInfo.execute = EXEC_YES;
else if (execInfo.execute & EXEC_BREAK) {
execInfo.execute = EXEC_YES;
hasHadBreak = true;
}
}
if (JSP_SHOULD_EXECUTE && loopCond && !hasHadBreak) {
jslSeekToP(&forIterStart);
if (lex->tk != ')') jsvUnLock(jspeExpression());
}
}
jslSeekToP(&forBodyEnd);
jslCharPosFree(&forCondStart);
jslCharPosFree(&forIterStart);
jslCharPosFree(&forBodyStart);
jslCharPosFree(&forBodyEnd);
#ifdef JSPARSE_MAX_LOOP_ITERATIONS
if (loopCount<=0) {
jsExceptionHere(JSET_ERROR, "FOR Loop exceeded the maximum number of iterations ("STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS)")");
}
#endif
}
return 0;
}
|
Vulnerable
|
[
"CWE-125"
] |
Espruino
|
bf4416ab9129ee3afd56739ea4e3cd0da5484b6b
|
6.800343474801877e+37
| 199 |
Fix bug if using an undefined member of an object for for..in (fix #1437)
| 1 |
static void compute_string_bbox(TextInfo *text, DBBox *bbox)
{
int i;
if (text->length > 0) {
bbox->xMin = 32000;
bbox->xMax = -32000;
bbox->yMin = -1 * text->lines[0].asc + d6_to_double(text->glyphs[0].pos.y);
bbox->yMax = text->height - text->lines[0].asc +
d6_to_double(text->glyphs[0].pos.y);
for (i = 0; i < text->length; ++i) {
GlyphInfo *info = text->glyphs + i;
if (info->skip) continue;
double s = d6_to_double(info->pos.x);
double e = s + d6_to_double(info->cluster_advance.x);
bbox->xMin = FFMIN(bbox->xMin, s);
bbox->xMax = FFMAX(bbox->xMax, e);
}
} else
bbox->xMin = bbox->xMax = bbox->yMin = bbox->yMax = 0.;
}
|
Safe
|
[
"CWE-125"
] |
libass
|
f4f48950788b91c6a30029cc28a240b834713ea7
|
2.453455347947015e+38
| 22 |
Fix line wrapping mode 0/3 bugs
This fixes two separate bugs:
a) Don't move a linebreak into the first symbol. This results in a empty
line at the front, which does not help to equalize line lengths at all.
Instead, merge line with the second one.
b) When moving a linebreak into a symbol that already is a break, the
number of lines must be decremented. Otherwise, uninitialized memory
is possibly used for later layout operations.
Found by fuzzer test case
id:000085,sig:11,src:003377+003350,op:splice,rep:8.
This might also affect and hopefully fix libass#229.
v2: change semantics according to review
| 0 |
static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
{
unsigned long start = (unsigned long) page_address(page);
unsigned end = start + size;
apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
flush_tlb_kernel_range(start, end);
}
|
Safe
|
[
"CWE-284",
"CWE-264"
] |
linux
|
0ea1ec713f04bdfac343c9702b21cd3a7c711826
|
4.902423641575283e+37
| 8 |
ARM: dma-mapping: don't allow DMA mappings to be marked executable
DMA mapping permissions were being derived from pgprot_kernel directly
without using PAGE_KERNEL. This causes them to be marked with executable
permission, which is not what we want. Fix this.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
| 0 |
std::unique_ptr<folly::IOBuf> finalize() {
return encoder_.finalize();
}
|
Safe
|
[
"CWE-703",
"CWE-770"
] |
fbthrift
|
3f156207e8a6583d88999487e954320dc18955e6
|
6.839771509702718e+37
| 3 |
Better handling of truncated data when reading containers
Summary:
Currently we read the container size and blindly pre-allocate the container of that size. This allows malicious attacker to send few bytes message and cause server to allocate GBs of memory.
This diff changes the logic to check if we have at least 1b/element in our buffer, thus forcing attacker to send that much data.
This is a partial fix for CVE-2019-3553.
Reviewed By: yfeldblum, vitaut
Differential Revision: D14392438
fbshipit-source-id: b92e300a98e29faee564e2f5069027b28cb2cca4
| 0 |
void CompactProtocolReader::readMapEnd() {}
|
Safe
|
[
"CWE-703",
"CWE-770"
] |
fbthrift
|
c9a903e5902834e95bbd4ab0e9fa53ba0189f351
|
9.588929891207257e+37
| 1 |
Better handling of truncated data when reading strings
Summary:
Currently we read string size and blindly pre-allocate it. This allows malicious attacker to send a few bytes message and cause server to allocate huge amount of memory (>1GB).
This diff changes the logic to check if we have enough data in the buffer before allocating the string.
This is a second part of a fix for CVE-2019-3553.
Reviewed By: vitaut
Differential Revision: D14393393
fbshipit-source-id: e2046d2f5b087d3abc9a9d2c6c107cf088673057
| 0 |
double my_double_round(double value, longlong dec, bool dec_unsigned,
bool truncate)
{
double tmp;
bool dec_negative= (dec < 0) && !dec_unsigned;
ulonglong abs_dec= dec_negative ? -dec : dec;
/*
tmp2 is here to avoid return the value with 80 bit precision
This will fix that the test round(0.1,1) = round(0.1,1) is true
Tagging with volatile is no guarantee, it may still be optimized away...
*/
volatile double tmp2;
tmp=(abs_dec < array_elements(log_10) ?
log_10[abs_dec] : pow(10.0,(double) abs_dec));
// Pre-compute these, to avoid optimizing away e.g. 'floor(v/tmp) * tmp'.
volatile double value_div_tmp= value / tmp;
volatile double value_mul_tmp= value * tmp;
if (!dec_negative && std::isinf(tmp)) // "dec" is too large positive number
return value;
if (dec_negative && std::isinf(tmp))
tmp2= 0.0;
else if (!dec_negative && std::isinf(value_mul_tmp))
tmp2= value;
else if (truncate)
{
if (value >= 0.0)
tmp2= dec < 0 ? floor(value_div_tmp) * tmp : floor(value_mul_tmp) / tmp;
else
tmp2= dec < 0 ? ceil(value_div_tmp) * tmp : ceil(value_mul_tmp) / tmp;
}
else
tmp2=dec < 0 ? rint(value_div_tmp) * tmp : rint(value_mul_tmp) / tmp;
return tmp2;
}
|
Safe
|
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
|
2.9148928653058457e+37
| 39 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
| 0 |
static int io_madvise(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
struct io_madvise *ma = &req->madvise;
int ret;
if (force_nonblock)
return -EAGAIN;
ret = do_madvise(ma->addr, ma->len, ma->advice);
if (ret < 0)
req_set_fail_links(req);
io_req_complete(req, ret);
return 0;
#else
return -EOPNOTSUPP;
#endif
}
|
Safe
|
[] |
linux
|
0f2122045b946241a9e549c2a76cea54fa58a7ff
|
1.80270668096761e+38
| 18 |
io_uring: don't rely on weak ->files references
Grab actual references to the files_struct. To avoid circular references
issues due to this, we add a per-task note that keeps track of what
io_uring contexts a task has used. When the tasks execs or exits its
assigned files, we cancel requests based on this tracking.
With that, we can grab proper references to the files table, and no
longer need to rely on stashing away ring_fd and ring_file to check
if the ring_fd may have been closed.
Cc: stable@vger.kernel.org # v5.5+
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
static inline void pgtable_page_dtor(struct page *page)
{
pte_lock_deinit(page);
dec_zone_page_state(page, NR_PAGETABLE);
}
|
Safe
|
[
"CWE-119"
] |
linux
|
1be7107fbe18eed3e319a6c3e83c78254b693acb
|
2.60660799686751e+38
| 5 |
mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <oleg@redhat.com>
Original-patch-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Tested-by: Helge Deller <deller@gmx.de> # parisc
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
bool BSONObj::valid() const {
int mySize = objsize();
try {
BSONObjIterator it(*this);
while( it.moreWithEOO() ) {
// both throw exception on failure
BSONElement e = it.next(true);
if ( e.size() >= mySize )
return false;
e.validate();
if (e.eoo()) {
if (it.moreWithEOO())
return false;
return true;
}
else if (e.isABSONObj()) {
if(!e.embeddedObject().valid())
return false;
}
else if (e.type() == CodeWScope) {
if(!e.codeWScopeObject().valid())
return false;
}
}
}
catch (...) {
}
return false;
}
|
Vulnerable
|
[
"CWE-20"
] |
mongo
|
f9817a6cf64bdba8e1e1cef30a798110df746b58
|
7.02100274594498e+37
| 32 |
SERVER-7769 - turn objcheck on by default and use new fast bson validate
| 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.