func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
CImg<T>& operator>>=(const char *const expression) {
return *this>>=(+*this)._fill(expression,true,1,0,0,"operator>>=",this);
}
|
Safe
|
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
|
2.6813301315473e+38
| 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
| 0 |
MouseLimit(TScreen *screen)
{
int mouse_limit;
switch (screen->extend_coords) {
default:
mouse_limit = MOUSE_LIMIT;
break;
case SET_EXT_MODE_MOUSE:
mouse_limit = EXT_MOUSE_LIMIT;
break;
case SET_SGR_EXT_MODE_MOUSE:
case SET_URXVT_EXT_MODE_MOUSE:
case SET_PIXEL_POSITION_MOUSE:
mouse_limit = -1;
break;
}
return mouse_limit;
}
|
Safe
|
[
"CWE-399"
] |
xterm-snapshots
|
82ba55b8f994ab30ff561a347b82ea340ba7075c
|
1.542472010308159e+37
| 19 |
snapshot of project "xterm", label xterm-365d
| 0 |
get_application_no_mime_type_handler_message (NautilusFile *file,
char *uri)
{
char *uri_for_display;
char *name;
char *error_message;
name = nautilus_file_get_display_name (file);
/* Truncate the URI so it doesn't get insanely wide. Note that even
* though the dialog uses wrapped text, if the URI doesn't contain
* white space then the text-wrapping code is too stupid to wrap it.
*/
uri_for_display = eel_str_middle_truncate (name, MAX_URI_IN_DIALOG_LENGTH);
error_message = g_strdup_printf (_("Could not display “%s”."), uri_for_display);
g_free (uri_for_display);
g_free (name);
return error_message;
}
|
Safe
|
[
"CWE-20"
] |
nautilus
|
1630f53481f445ada0a455e9979236d31a8d3bb0
|
8.73615996155803e+37
| 20 |
mime-actions: use file metadata for trusting desktop files
Currently we only trust desktop files that have the executable bit
set, and don't replace the displayed icon or the displayed name until
it's trusted, which prevents for running random programs by a malicious
desktop file.
However, the executable permission is preserved if the desktop file
comes from a compressed file.
To prevent this, add a metadata::trusted metadata to the file once the
user acknowledges the file as trusted. This adds metadata to the file,
which cannot be added unless it has access to the computer.
Also remove the SHEBANG "trusted" content we were putting inside the
desktop file, since that doesn't add more security since it can come
with the file itself.
https://bugzilla.gnome.org/show_bug.cgi?id=777991
| 0 |
static int vhost_user_set_log_fd(struct virtio_net **pdev,
struct vhu_msg_context *ctx,
int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
if (validate_msg_fds(dev, ctx, 1) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
close(ctx->fds[0]);
VHOST_LOG_CONFIG(INFO, "(%s) not implemented.\n", dev->ifname);
return RTE_VHOST_MSG_RESULT_OK;
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
dpdk
|
6442c329b9d2ded0f44b27d2016aaba8ba5844c5
|
2.9325147811850563e+38
| 14 |
vhost: fix queue number check when setting inflight FD
In function vhost_user_set_inflight_fd, queue number in inflight
message is used to access virtqueue. However, queue number could
be larger than VHOST_MAX_VRING and cause write OOB as this number
will be used to write inflight info in virtqueue structure. This
patch checks the queue number to avoid the issue and also make
sure virtqueues are allocated before setting inflight information.
Fixes: ad0a4ae491fe ("vhost: checkout resubmit inflight information")
Cc: stable@dpdk.org
Reported-by: Wenxiang Qian <leonwxqian@gmail.com>
Signed-off-by: Chenbo Xia <chenbo.xia@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
| 0 |
ast_for_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq)
{
/* funcdef: 'def' NAME parameters ['->' test] ':' suite */
return ast_for_funcdef_impl(c, n, decorator_seq,
false /* is_async */);
}
|
Safe
|
[
"CWE-125"
] |
cpython
|
a4d78362397fc3bced6ea80fbc7b5f4827aec55e
|
1.5048971160265954e+38
| 6 |
bpo-36495: Fix two out-of-bounds array reads (GH-12641)
Research and fix by @bradlarsen.
| 0 |
static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
{
struct gs_device_mode *dm;
struct usb_interface *intf = gsdev->iface;
int rc;
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
if (!dm)
return -ENOMEM;
dm->mode = GS_CAN_MODE_RESET;
rc = usb_control_msg(interface_to_usbdev(intf),
usb_sndctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_MODE,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
gsdev->channel,
0,
dm,
sizeof(*dm),
1000);
return rc;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
c919a3069c775c1c876bec55e00b2305d5125caa
|
9.350813066447978e+37
| 24 |
can: gs_usb: Don't use stack memory for USB transfers
Fixes: 05ca5270005c can: gs_usb: add ethtool set_phys_id callback to locate physical device
The gs_usb driver is performing USB transfers using buffers allocated on
the stack. This causes the driver to not function with vmapped stacks.
Instead, allocate memory for the transfer buffers.
Signed-off-by: Ethan Zonca <e@ethanzonca.com>
Cc: linux-stable <stable@vger.kernel.org> # >= v4.8
Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
| 0 |
static RECTANGLE_16* next_band(RECTANGLE_16* band1, RECTANGLE_16* endPtr, int* nbItems)
{
UINT16 refY = band1->top;
*nbItems = 0;
while ((band1 < endPtr) && (band1->top == refY))
{
band1++;
*nbItems += 1;
}
return band1;
}
|
Safe
|
[
"CWE-401"
] |
FreeRDP
|
9fee4ae076b1ec97b97efb79ece08d1dab4df29a
|
1.069574899364659e+38
| 13 |
Fixed #5645: realloc return handling
| 0 |
test_bson_iter_key_len (void)
{
bson_t *bson = bson_with_all_types ();
bson_iter_t iter;
BSON_ASSERT (bson_iter_init (&iter, bson));
while (bson_iter_next (&iter)) {
ASSERT_WITH_MSG (strlen (bson_iter_key (&iter)) ==
bson_iter_key_len (&iter),
"iter_key_len differs from real key length. got %d but "
"expected %d for key %s\n",
bson_iter_key_len (&iter),
(int) strlen (bson_iter_key (&iter)),
bson_iter_key (&iter));
}
}
|
Safe
|
[
"CWE-125"
] |
mongo-c-driver
|
0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
|
3.1317977510542044e+38
| 16 |
Fix for CVE-2018-16790 -- Verify bounds before binary length read.
As reported here: https://jira.mongodb.org/browse/CDRIVER-2819,
a heap overread occurs due a failure to correctly verify data
bounds.
In the original check, len - o returns the data left including the
sizeof(l) we just read. Instead, the comparison should check
against the data left NOT including the binary int32, i.e. just
subtype (byte*) instead of int32 subtype (byte*).
Added in test for corrupted BSON example.
| 0 |
static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
{
BDRVBochsState *s = bs->opaque;
int64_t offset = sector_num * 512;
int64_t extent_index, extent_offset, bitmap_offset;
char bitmap_entry;
// seek to sector
extent_index = offset / s->extent_size;
extent_offset = (offset % s->extent_size) / 512;
if (s->catalog_bitmap[extent_index] == 0xffffffff) {
return -1; /* not allocated */
}
bitmap_offset = s->data_offset + (512 * s->catalog_bitmap[extent_index] *
(s->extent_blocks + s->bitmap_blocks));
/* read in bitmap for current extent */
if (bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8),
&bitmap_entry, 1) != 1) {
return -1;
}
if (!((bitmap_entry >> (extent_offset % 8)) & 1)) {
return -1; /* not allocated */
}
return bitmap_offset + (512 * (s->bitmap_blocks + extent_offset));
}
|
Vulnerable
|
[
"CWE-190"
] |
qemu
|
246f65838d19db6db55bfb41117c35645a2c4789
|
1.9587603955586135e+38
| 30 |
bochs: Use unsigned variables for offsets and sizes (CVE-2014-0147)
Gets us rid of integer overflows resulting in negative sizes which
aren't correctly checked.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
| 1 |
bool testSingleInterval(IndexBounds bounds) {
BSONObj startKey;
bool startKeyIn;
BSONObj endKey;
bool endKeyIn;
return IndexBoundsBuilder::isSingleInterval(bounds, &startKey, &startKeyIn, &endKey, &endKeyIn);
}
|
Safe
|
[
"CWE-754"
] |
mongo
|
f8f55e1825ee5c7bdb3208fc7c5b54321d172732
|
1.162197762714964e+38
| 7 |
SERVER-44377 generate correct plan for indexed inequalities to null
| 0 |
static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
int rc;
struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = NULL;
int connected = 0;
__be32 daddr;
if (sock_flag(sk, SOCK_DEAD))
return -ENOTCONN;
/* Get and verify the address. */
if (msg->msg_name) {
struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
if (msg->msg_namelen < sizeof(*lip))
return -EINVAL;
if (lip->l2tp_family != AF_INET) {
if (lip->l2tp_family != AF_UNSPEC)
return -EAFNOSUPPORT;
}
daddr = lip->l2tp_addr.s_addr;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = inet->inet_daddr;
connected = 1;
}
/* Allocate a socket buffer */
rc = -ENOMEM;
skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
4 + len, 0, GFP_KERNEL);
if (!skb)
goto error;
/* Reserve space for headers, putting IP header on 4-byte boundary. */
skb_reserve(skb, 2 + NET_SKB_PAD);
skb_reset_network_header(skb);
skb_reserve(skb, sizeof(struct iphdr));
skb_reset_transport_header(skb);
/* Insert 0 session_id */
*((__be32 *) skb_put(skb, 4)) = 0;
/* Copy user data into skb */
rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (rc < 0) {
kfree_skb(skb);
goto error;
}
if (connected)
rt = (struct rtable *) __sk_dst_check(sk, 0);
if (rt == NULL) {
struct ip_options_rcu *inet_opt;
inet_opt = rcu_dereference_protected(inet->inet_opt,
sock_owned_by_user(sk));
/* Use correct destination address if we have options. */
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
* itself out.
*/
rt = ip_route_output_ports(sock_net(sk), sk,
daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
sk->sk_protocol, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
sk_setup_caps(sk, &rt->dst);
}
skb_dst_set(skb, dst_clone(&rt->dst));
/* Queue the packet to IP for output */
rc = ip_queue_xmit(skb);
error:
/* Update stats */
if (rc >= 0) {
lsa->tx_packets++;
lsa->tx_bytes += len;
rc = len;
} else {
lsa->tx_errors++;
}
return rc;
no_route:
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EHOSTUNREACH;
}
|
Safe
|
[
"CWE-362"
] |
linux-2.6
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
|
2.891386433927776e+38
| 104 |
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void cli_feat_write_cb(struct gatt_db_attribute *attrib,
unsigned int id, uint16_t offset,
const uint8_t *value, size_t len,
uint8_t opcode, struct bt_att *att,
void *user_data)
{
struct btd_gatt_database *database = user_data;
struct device_state *state;
uint8_t bits[] = { BT_GATT_CHRC_CLI_FEAT_ROBUST_CACHING,
BT_GATT_CHRC_CLI_FEAT_EATT,
BT_GATT_CHRC_CLI_FEAT_NFY_MULTI };
uint8_t ecode = 0;
unsigned int i;
DBG("Client Features write");
state = get_device_state(database, att);
if (!state) {
ecode = BT_ATT_ERROR_UNLIKELY;
goto done;
}
if (!value || !len) {
ecode = BT_ATT_ERROR_INVALID_ATTRIBUTE_VALUE_LEN;
goto done;
}
for (i = 0; i < sizeof(bits); i++) {
/* A client shall never clear a bit it has set */
if (state->cli_feat[0] & (1 << i) && !(value[0] & (1 << i))) {
ecode = BT_ATT_ERROR_VALUE_NOT_ALLOWED;
goto done;
}
}
/* Shall we reallocate the feat array if bigger? */
len = MIN(sizeof(state->cli_feat), len);
while (len) {
state->cli_feat[len - 1] |= value[len - 1];
len--;
}
state->cli_feat[0] &= ((1 << sizeof(bits)) - 1);
state->change_aware = true;
done:
gatt_db_attribute_write_result(attrib, id, ecode);
}
|
Safe
|
[
"CWE-416"
] |
bluez
|
838c0dc7641e1c991c0f3027bf94bee4606012f8
|
2.1658084373624955e+38
| 48 |
gatt: Fix not cleaning up when disconnected
There is a current use after free possible on a gatt server if a client
disconnects while a WriteValue call is being processed with dbus.
This patch includes the addition of a pending disconnect callback to handle
cleanup better if a disconnect occurs during a write, an acquire write
or read operation using bt_att_register_disconnect with the cb.
| 0 |
static ESS_SIGNING_CERT *ESS_SIGNING_CERT_new_init(X509 *signcert,
STACK_OF(X509) *certs)
{
ESS_CERT_ID *cid;
ESS_SIGNING_CERT *sc = NULL;
int i;
/* Creating the ESS_CERT_ID stack. */
if (!(sc = ESS_SIGNING_CERT_new())) goto err;
if (!sc->cert_ids && !(sc->cert_ids = sk_ESS_CERT_ID_new_null()))
goto err;
/* Adding the signing certificate id. */
if (!(cid = ESS_CERT_ID_new_init(signcert, 0))
|| !sk_ESS_CERT_ID_push(sc->cert_ids, cid))
goto err;
/* Adding the certificate chain ids. */
for (i = 0; i < sk_X509_num(certs); ++i)
{
X509 *cert = sk_X509_value(certs, i);
if (!(cid = ESS_CERT_ID_new_init(cert, 1))
|| !sk_ESS_CERT_ID_push(sc->cert_ids, cid))
goto err;
}
return sc;
err:
ESS_SIGNING_CERT_free(sc);
TSerr(TS_F_ESS_SIGNING_CERT_NEW_INIT, ERR_R_MALLOC_FAILURE);
return NULL;
}
|
Safe
|
[] |
openssl
|
c7235be6e36c4bef84594aa3b2f0561db84b63d8
|
1.8782676516583045e+38
| 31 |
RFC 3161 compliant time stamp request creation, response generation
and response verification.
Submitted by: Zoltan Glozik <zglozik@opentsa.org>
Reviewed by: Ulf Moeller
| 0 |
LineBufferTask::LineBufferTask
(TaskGroup *group,
DeepScanLineInputFile::Data *ifd,
LineBuffer *lineBuffer,
int scanLineMin,
int scanLineMax)
:
Task (group),
_ifd (ifd),
_lineBuffer (lineBuffer),
_scanLineMin (scanLineMin),
_scanLineMax (scanLineMax)
{
// empty
}
|
Safe
|
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
|
3.3680773423022252e+38
| 15 |
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <peterh@wetafx.co.nz>
| 0 |
static SDL_INLINE void BG_Blended_NEON(const TTF_Image *image, Uint32 *destination, Sint32 srcskip, Uint32 dstskip, Uint8 fg_alpha)
{
const Uint32 *src = (Uint32 *)image->buffer;
Uint32 *dst = destination;
Uint32 width = image->width / 16;
Uint32 height = image->rows;
uint32x4_t s, d0, d1, d2, d3, r0, r1, r2, r3;
uint16x8_t Ls8, Hs8;
uint8x16x2_t sx, sx01, sx23;
const uint16x8_t alpha = vmovq_n_u16(fg_alpha);
const uint16x8_t one = vmovq_n_u16(1);
const uint32x4_t zero = vmovq_n_u32(0);
while (height--) {
/* *INDENT-OFF* */
DUFFS_LOOP4(
/* Read 4 Uint32 and put 16 Uint8 into uint32x4x2_t (uint8x16x2_t)
* takes advantage of vzipq_u8 which produces two lanes */
s = vld1q_u32(src); // load
d0 = vld1q_u32(dst); // load
d1 = vld1q_u32(dst + 4); // load
d2 = vld1q_u32(dst + 8); // load
d3 = vld1q_u32(dst + 12); // load
sx = vzipq_u8(s, zero); // interleave, no shifting
// enough room to multiply
/* Apply: alpha_table[i] = ((i * fg.a / 255) << 24; */
/* Divide by 255 is done as: (x + 1 + (x >> 8)) >> 8 */
sx.val[0] = vmulq_u16(sx.val[0], alpha); // x := i * fg.a
sx.val[1] = vmulq_u16(sx.val[1], alpha);
Ls8 = vshrq_n_u16(sx.val[0], 8); // x >> 8
Hs8 = vshrq_n_u16(sx.val[1], 8);
sx.val[0] = vaddq_u16(sx.val[0], one); // x + 1
sx.val[1] = vaddq_u16(sx.val[1], one);
sx.val[0] = vaddq_u16(sx.val[0], Ls8); // x + 1 + (x >> 8)
sx.val[1] = vaddq_u16(sx.val[1], Hs8);
sx.val[0] = vshrq_n_u16(sx.val[0], 8); // ((x + 1 + (x >> 8)) >> 8
sx.val[1] = vshrq_n_u16(sx.val[1], 8);
sx.val[0] = vshlq_n_u16(sx.val[0], 8); // shift << 8, so we're prepared
sx.val[1] = vshlq_n_u16(sx.val[1], 8); // to have final format << 24
sx01 = vzipq_u8(zero, sx.val[0]); // interleave
sx23 = vzipq_u8(zero, sx.val[1]); // interleave
// already shifted by 24
r0 = vorrq_u32(d0, sx01.val[0]); // or
r1 = vorrq_u32(d1, sx01.val[1]); // or
r2 = vorrq_u32(d2, sx23.val[0]); // or
r3 = vorrq_u32(d3, sx23.val[1]); // or
vst1q_u32(dst, r0); // store
vst1q_u32(dst + 4, r1); // store
vst1q_u32(dst + 8, r2); // store
vst1q_u32(dst + 12, r3); // store
dst += 16;
src += 4;
, width);
/* *INDENT-ON* */
src = (const Uint32 *)((const Uint8 *)src + srcskip);
dst = (Uint32 *)((Uint8 *)dst + dstskip);
}
}
|
Safe
|
[
"CWE-190",
"CWE-787"
] |
SDL_ttf
|
db1b41ab8bde6723c24b866e466cad78c2fa0448
|
4.332028012440323e+37
| 73 |
More integer overflow (see bug #187)
Make sure that 'width + alignment' doesn't overflow, otherwise
it could create a SDL_Surface of 'width' but with wrong 'pitch'
| 0 |
void CLASS apply_tiff()
{
int max_samp=0, raw=-1, thm=-1, i;
struct jhead jh;
thumb_misc = 16;
if (thumb_offset) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
thumb_misc = jh.bits;
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
for (i=0; i < (int) tiff_nifds; i++) {
if (max_samp < tiff_ifd[i].samples)
max_samp = tiff_ifd[i].samples;
if (max_samp > 3) max_samp = 3;
if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) &&
(tiff_ifd[i].width | tiff_ifd[i].height) < 0x10000 &&
tiff_ifd[i].width*tiff_ifd[i].height > raw_width*raw_height) {
raw_width = tiff_ifd[i].width;
raw_height = tiff_ifd[i].height;
tiff_bps = tiff_ifd[i].bps;
tiff_compress = tiff_ifd[i].comp;
data_offset = tiff_ifd[i].offset;
tiff_flip = tiff_ifd[i].flip;
tiff_samples = tiff_ifd[i].samples;
raw = i;
}
}
for (i=tiff_nifds; i--; )
if (tiff_ifd[i].flip) tiff_flip = tiff_ifd[i].flip;
if (raw >= 0 && !load_raw)
switch (tiff_compress) {
case 0: case 1:
switch (tiff_bps) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 12: load_raw = &CLASS packed_load_raw;
if (tiff_ifd[raw].phint == 2)
load_flags = 6;
if (strncmp(make,"PENTAX",6)) break;
case 14:
case 16: load_raw = &CLASS unpacked_load_raw; break;
}
if (tiff_ifd[raw].bytes*5 == raw_width*raw_height*8) {
tiff_bps = 12;
load_raw = &CLASS packed_load_raw;
load_flags = 81;
}
break;
case 6: case 7: case 99:
load_raw = &CLASS lossless_jpeg_load_raw; break;
case 262:
load_raw = &CLASS kodak_262_load_raw; break;
case 32767:
if (tiff_ifd[raw].bytes == raw_width*raw_height) {
tiff_bps = 12;
load_raw = &CLASS sony_arw2_load_raw; break;
}
if (tiff_ifd[raw].bytes*8 != (int)(raw_width*raw_height*tiff_bps)) {
raw_height += 8;
load_raw = &CLASS sony_arw_load_raw; break;
}
load_flags = 79;
case 32769:
load_flags++;
case 32770:
case 32773:
load_raw = &CLASS packed_load_raw; break;
case 34713:
load_raw = &CLASS nikon_compressed_load_raw; break;
case 65535:
load_raw = &CLASS pentax_load_raw; break;
case 65000:
switch (tiff_ifd[raw].phint) {
case 2: load_raw = &CLASS kodak_rgb_load_raw; filters = 0; break;
case 6: load_raw = &CLASS kodak_ycbcr_load_raw; filters = 0; break;
case 32803: load_raw = &CLASS kodak_65000_load_raw;
}
case 32867: break;
default: is_raw = 0;
}
if (!dng_version)
if ( (tiff_samples == 3 && tiff_ifd[raw].bytes &&
tiff_bps != 14 && tiff_bps != 2048)
|| (tiff_bps == 8 && !strstr(make,"KODAK") && !strstr(make,"Kodak") &&
!strstr(model2,"DEBUG RAW")))
is_raw = 0;
for (i=0; i < (int) tiff_nifds; i++)
if (i != raw && tiff_ifd[i].samples == max_samp &&
tiff_ifd[i].width * tiff_ifd[i].height / SQR(tiff_ifd[i].bps+1) >
(int)(thumb_width * thumb_height / SQR(thumb_misc+1))) {
thumb_width = tiff_ifd[i].width;
thumb_height = tiff_ifd[i].height;
thumb_offset = tiff_ifd[i].offset;
thumb_length = tiff_ifd[i].bytes;
thumb_misc = tiff_ifd[i].bps;
thm = i;
}
if (thm >= 0) {
thumb_misc |= tiff_ifd[thm].samples << 5;
switch (tiff_ifd[thm].comp) {
case 0:
write_thumb = &CLASS layer_thumb;
break;
case 1:
if (tiff_ifd[thm].bps > 8)
thumb_load_raw = &CLASS kodak_thumb_load_raw;
else
write_thumb = &CLASS ppm_thumb;
break;
case 65000:
thumb_load_raw = tiff_ifd[thm].phint == 6 ?
&CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw;
}
}
}
|
Safe
|
[
"CWE-189"
] |
rawstudio
|
983bda1f0fa5fa86884381208274198a620f006e
|
3.084595199477945e+38
| 118 |
Avoid overflow in ljpeg_start().
| 0 |
static void vgacon_save_screen(struct vc_data *c)
{
static int vga_bootup_console = 0;
if (!vga_bootup_console) {
/* This is a gross hack, but here is the only place we can
* set bootup console parameters without messing up generic
* console initialization routines.
*/
vga_bootup_console = 1;
c->state.x = screen_info.orig_x;
c->state.y = screen_info.orig_y;
}
/* We can't copy in more than the size of the video buffer,
* or we'll be copying in VGA BIOS */
if (!vga_is_gfx)
scr_memcpyw((u16 *) c->vc_screenbuf, (u16 *) c->vc_origin,
c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size);
}
|
Safe
|
[
"CWE-125"
] |
linux
|
973c096f6a85e5b5f2a295126ba6928d9a6afd45
|
1.1960209487146733e+38
| 21 |
vgacon: remove software scrollback support
Yunhai Zhang recently fixed a VGA software scrollback bug in commit
ebfdfeeae8c0 ("vgacon: Fix for missing check in scrollback handling"),
but that then made people look more closely at some of this code, and
there were more problems on the vgacon side, but also the fbcon software
scrollback.
We don't really have anybody who maintains this code - probably because
nobody actually _uses_ it any more. Sure, people still use both VGA and
the framebuffer consoles, but they are no longer the main user
interfaces to the kernel, and haven't been for decades, so these kinds
of extra features end up bitrotting and not really being used.
So rather than try to maintain a likely unused set of code, I'll just
aggressively remove it, and see if anybody even notices. Maybe there
are people who haven't jumped on the whole GUI badnwagon yet, and think
it's just a fad. And maybe those people use the scrollback code.
If that turns out to be the case, we can resurrect this again, once
we've found the sucker^Wmaintainer for it who actually uses it.
Reported-by: NopNop Nop <nopitydays@gmail.com>
Tested-by: Willy Tarreau <w@1wt.eu>
Cc: 张云海 <zhangyunhai@nsfocus.com>
Acked-by: Andy Lutomirski <luto@amacapital.net>
Acked-by: Willy Tarreau <w@1wt.eu>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
void *netdev_adjacent_get_private(struct list_head *adj_list)
{
struct netdev_adjacent *adj;
adj = list_entry(adj_list, struct netdev_adjacent, list);
return adj->private;
|
Safe
|
[
"CWE-400",
"CWE-703"
] |
linux
|
fac8e0f579695a3ecbc4d3cac369139d7f819971
|
1.1153664825577547e+38
| 8 |
tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <jesse@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
unsigned long get_pmcs_ext_regs(int idx)
{
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
return cpuhw->pmcs[idx];
}
|
Safe
|
[
"CWE-476"
] |
linux
|
60b7ed54a41b550d50caf7f2418db4a7e75b5bdc
|
3.929467460836104e+37
| 6 |
powerpc/perf: Fix crash in perf_instruction_pointer() when ppmu is not set
On systems without any specific PMU driver support registered, running
perf record causes Oops.
The relevant portion from call trace:
BUG: Kernel NULL pointer dereference on read at 0x00000040
Faulting instruction address: 0xc0021f0c
Oops: Kernel access of bad area, sig: 11 [#1]
BE PAGE_SIZE=4K PREEMPT CMPCPRO
SAF3000 DIE NOTIFICATION
CPU: 0 PID: 442 Comm: null_syscall Not tainted 5.13.0-rc6-s3k-dev-01645-g7649ee3d2957 #5164
NIP: c0021f0c LR: c00e8ad8 CTR: c00d8a5c
NIP perf_instruction_pointer+0x10/0x60
LR perf_prepare_sample+0x344/0x674
Call Trace:
perf_prepare_sample+0x7c/0x674 (unreliable)
perf_event_output_forward+0x3c/0x94
__perf_event_overflow+0x74/0x14c
perf_swevent_hrtimer+0xf8/0x170
__hrtimer_run_queues.constprop.0+0x160/0x318
hrtimer_interrupt+0x148/0x3b0
timer_interrupt+0xc4/0x22c
Decrementer_virt+0xb8/0xbc
During perf record session, perf_instruction_pointer() is called to
capture the sample IP. This function in core-book3s accesses
ppmu->flags. If a platform specific PMU driver is not registered, ppmu
is set to NULL and accessing its members results in a crash. Fix this
crash by checking if ppmu is set.
Fixes: 2ca13a4cc56c ("powerpc/perf: Use regs->nip when SIAR is zero")
Cc: stable@vger.kernel.org # v5.11+
Reported-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Tested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1623952506-1431-1-git-send-email-atrajeev@linux.vnet.ibm.com
| 0 |
static int ipip_err(struct sk_buff *skb, u32 info)
{
/* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible.
*/
struct iphdr *iph = (struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct ip_tunnel *t;
int err;
switch (type) {
default:
case ICMP_PARAMETERPROB:
return 0;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
case ICMP_PORT_UNREACH:
/* Impossible event. */
return 0;
case ICMP_FRAG_NEEDED:
/* Soft state for pmtu is maintained by IP core. */
return 0;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
I believe they are just ether pollution. --ANK
*/
break;
}
break;
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
return 0;
break;
}
err = -ENOENT;
rcu_read_lock();
t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
if (t == NULL || t->parms.iph.daddr == 0)
goto out;
err = 0;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;
if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
t->err_time = jiffies;
out:
rcu_read_unlock();
return err;
}
|
Safe
|
[] |
linux-2.6
|
d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978
|
2.155155866472493e+38
| 61 |
tunnels: fix netns vs proto registration ordering
Same stuff as in ip_gre patch: receive hook can be called before netns
setup is done, oopsing in net_generic().
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
void WebContents::OnPaint(const gfx::Rect& dirty_rect, const SkBitmap& bitmap) {
Emit("paint", dirty_rect, gfx::Image::CreateFrom1xBitmap(bitmap));
}
|
Safe
|
[] |
electron
|
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
|
2.2059752817431053e+38
| 3 |
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <samuel.r.attard@gmail.com>
Co-authored-by: Samuel Attard <sattard@salesforce.com>
| 0 |
JOIN::prepare(TABLE_LIST *tables_init,
uint wild_num, COND *conds_init, uint og_num,
ORDER *order_init, bool skip_order_by,
ORDER *group_init, Item *having_init,
ORDER *proc_param_init, SELECT_LEX *select_lex_arg,
SELECT_LEX_UNIT *unit_arg)
{
DBUG_ENTER("JOIN::prepare");
// to prevent double initialization on EXPLAIN
if (optimization_state != JOIN::NOT_OPTIMIZED)
DBUG_RETURN(0);
conds= conds_init;
order= order_init;
group_list= group_init;
having= having_init;
proc_param= proc_param_init;
tables_list= tables_init;
select_lex= select_lex_arg;
select_lex->join= this;
join_list= &select_lex->top_join_list;
union_part= unit_arg->is_unit_op();
// simple check that we got usable conds
dbug_print_item(conds);
if (select_lex->handle_derived(thd->lex, DT_PREPARE))
DBUG_RETURN(-1);
thd->lex->current_select->context_analysis_place= NO_MATTER;
thd->lex->current_select->is_item_list_lookup= 1;
/*
If we have already executed SELECT, then it have not sense to prevent
its table from update (see unique_table())
Affects only materialized derived tables.
*/
/* Check that all tables, fields, conds and order are ok */
if (!(select_options & OPTION_SETUP_TABLES_DONE) &&
setup_tables_and_check_access(thd, &select_lex->context, join_list,
tables_list, select_lex->leaf_tables,
FALSE, SELECT_ACL, SELECT_ACL, FALSE))
DBUG_RETURN(-1);
/* System Versioning: handle FOR SYSTEM_TIME clause. */
if (select_lex->vers_setup_conds(thd, tables_list) < 0)
DBUG_RETURN(-1);
/*
TRUE if the SELECT list mixes elements with and without grouping,
and there is no GROUP BY clause. Mixing non-aggregated fields with
aggregate functions in the SELECT list is a MySQL extenstion that
is allowed only if the ONLY_FULL_GROUP_BY sql mode is not set.
*/
mixed_implicit_grouping= false;
if ((~thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) &&
select_lex->with_sum_func && !group_list)
{
List_iterator_fast <Item> select_it(fields_list);
Item *select_el; /* Element of the SELECT clause, can be an expression. */
bool found_field_elem= false;
bool found_sum_func_elem= false;
while ((select_el= select_it++))
{
if (select_el->with_sum_func)
found_sum_func_elem= true;
if (select_el->with_field)
found_field_elem= true;
if (found_sum_func_elem && found_field_elem)
{
mixed_implicit_grouping= true;
break;
}
}
}
table_count= select_lex->leaf_tables.elements;
TABLE_LIST *tbl;
List_iterator_fast<TABLE_LIST> li(select_lex->leaf_tables);
while ((tbl= li++))
{
/*
If the query uses implicit grouping where the select list contains both
aggregate functions and non-aggregate fields, any non-aggregated field
may produce a NULL value. Set all fields of each table as nullable before
semantic analysis to take into account this change of nullability.
Note: this loop doesn't touch tables inside merged semi-joins, because
subquery-to-semijoin conversion has not been done yet. This is intended.
*/
if (mixed_implicit_grouping && tbl->table)
tbl->table->maybe_null= 1;
}
uint real_og_num= og_num;
if (skip_order_by &&
select_lex != select_lex->master_unit()->global_parameters())
real_og_num+= select_lex->order_list.elements;
DBUG_ASSERT(select_lex->hidden_bit_fields == 0);
if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num,
&select_lex->hidden_bit_fields))
DBUG_RETURN(-1);
if (select_lex->setup_ref_array(thd, real_og_num))
DBUG_RETURN(-1);
ref_ptrs= ref_ptr_array_slice(0);
enum_parsing_place save_place=
thd->lex->current_select->context_analysis_place;
thd->lex->current_select->context_analysis_place= SELECT_LIST;
if (setup_fields(thd, ref_ptrs, fields_list, MARK_COLUMNS_READ,
&all_fields, &select_lex->pre_fix, 1))
DBUG_RETURN(-1);
thd->lex->current_select->context_analysis_place= save_place;
if (setup_without_group(thd, ref_ptrs, tables_list,
select_lex->leaf_tables, fields_list,
all_fields, &conds, order, group_list,
select_lex->window_specs,
select_lex->window_funcs,
&hidden_group_fields,
&select_lex->select_n_reserved))
DBUG_RETURN(-1);
/*
Permanently remove redundant parts from the query if
1) This is a subquery
2) This is the first time this query is optimized (since the
transformation is permanent
3) Not normalizing a view. Removal should take place when a
query involving a view is optimized, not when the view
is created
*/
if (select_lex->master_unit()->item && // 1)
select_lex->first_cond_optimization && // 2)
!thd->lex->is_view_context_analysis()) // 3)
{
remove_redundant_subquery_clauses(select_lex);
}
/* Resolve the ORDER BY that was skipped, then remove it. */
if (skip_order_by && select_lex !=
select_lex->master_unit()->global_parameters())
{
nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
thd->lex->allow_sum_func.set_bit(select_lex->nest_level);
thd->where= "order clause";
for (ORDER *order= select_lex->order_list.first; order; order= order->next)
{
/* Don't add the order items to all fields. Just resolve them to ensure
the query is valid, we'll drop them immediately after. */
if (find_order_in_list(thd, ref_ptrs, tables_list, order,
fields_list, all_fields, false, false, false))
DBUG_RETURN(-1);
}
thd->lex->allow_sum_func= save_allow_sum_func;
select_lex->order_list.empty();
}
if (having)
{
nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
thd->where="having clause";
thd->lex->allow_sum_func.set_bit(select_lex_arg->nest_level);
select_lex->having_fix_field= 1;
/*
Wrap alone field in HAVING clause in case it will be outer field
of subquery which need persistent pointer on it, but having
could be changed by optimizer
*/
if (having->type() == Item::REF_ITEM &&
((Item_ref *)having)->ref_type() == Item_ref::REF)
wrap_ident(thd, &having);
bool having_fix_rc= having->fix_fields_if_needed_for_bool(thd, &having);
select_lex->having_fix_field= 0;
if (unlikely(having_fix_rc || thd->is_error()))
DBUG_RETURN(-1); /* purecov: inspected */
thd->lex->allow_sum_func= save_allow_sum_func;
if (having->with_window_func)
{
my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0));
DBUG_RETURN(-1);
}
}
/*
After setting up window functions, we may have discovered additional
used tables from the PARTITION BY and ORDER BY list. Update all items
that contain window functions.
*/
if (select_lex->have_window_funcs())
{
List_iterator_fast<Item> it(select_lex->item_list);
Item *item;
while ((item= it++))
{
if (item->with_window_func)
item->update_used_tables();
}
}
With_clause *with_clause=select_lex->get_with_clause();
if (with_clause && with_clause->prepare_unreferenced_elements(thd))
DBUG_RETURN(1);
With_element *with_elem= select_lex->get_with_element();
if (with_elem &&
select_lex->check_unrestricted_recursive(
thd->variables.only_standard_compliant_cte))
DBUG_RETURN(-1);
if (!(select_lex->changed_elements & TOUCHED_SEL_COND))
select_lex->check_subqueries_with_recursive_references();
int res= check_and_do_in_subquery_rewrites(this);
select_lex->fix_prepare_information(thd, &conds, &having);
if (res)
DBUG_RETURN(res);
if (order)
{
bool real_order= FALSE;
ORDER *ord;
for (ord= order; ord; ord= ord->next)
{
Item *item= *ord->item;
/*
Disregard sort order if there's only
zero length NOT NULL fields (e.g. {VAR}CHAR(0) NOT NULL") or
zero length NOT NULL string functions there.
Such tuples don't contain any data to sort.
*/
if (!real_order &&
/* Not a zero length NOT NULL field */
((item->type() != Item::FIELD_ITEM ||
((Item_field *) item)->field->maybe_null() ||
((Item_field *) item)->field->sort_length()) &&
/* AND not a zero length NOT NULL string function. */
(item->type() != Item::FUNC_ITEM ||
item->maybe_null ||
item->result_type() != STRING_RESULT ||
item->max_length)))
real_order= TRUE;
if ((item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) ||
item->with_window_func)
item->split_sum_func(thd, ref_ptrs, all_fields, SPLIT_SUM_SELECT);
}
if (!real_order)
order= NULL;
}
if (having && having->with_sum_func)
having->split_sum_func2(thd, ref_ptrs, all_fields,
&having, SPLIT_SUM_SKIP_REGISTERED);
if (select_lex->inner_sum_func_list)
{
Item_sum *end=select_lex->inner_sum_func_list;
Item_sum *item_sum= end;
do
{
item_sum= item_sum->next;
item_sum->split_sum_func2(thd, ref_ptrs,
all_fields, item_sum->ref_by, 0);
} while (item_sum != end);
}
if (select_lex->inner_refs_list.elements &&
fix_inner_refs(thd, all_fields, select_lex, ref_ptrs))
DBUG_RETURN(-1);
if (group_list)
{
/*
Because HEAP tables can't index BIT fields we need to use an
additional hidden field for grouping because later it will be
converted to a LONG field. Original field will remain of the
BIT type and will be returned to a client.
*/
for (ORDER *ord= group_list; ord; ord= ord->next)
{
if ((*ord->item)->type() == Item::FIELD_ITEM &&
(*ord->item)->field_type() == MYSQL_TYPE_BIT)
{
Item_field *field= new (thd->mem_root) Item_field(thd, *(Item_field**)ord->item);
if (!field)
DBUG_RETURN(-1);
int el= all_fields.elements;
ref_ptrs[el]= field;
all_fields.push_front(field, thd->mem_root);
ord->item= &ref_ptrs[el];
}
}
}
/*
Check if there are references to un-aggregated columns when computing
aggregate functions with implicit grouping (there is no GROUP BY).
*/
if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY && !group_list &&
!(select_lex->master_unit()->item &&
select_lex->master_unit()->item->is_in_predicate() &&
((Item_in_subselect*)select_lex->master_unit()->item)->
test_set_strategy(SUBS_MAXMIN_INJECTED)) &&
select_lex->non_agg_field_used() &&
select_lex->agg_func_used())
{
my_message(ER_MIX_OF_GROUP_FUNC_AND_FIELDS,
ER_THD(thd, ER_MIX_OF_GROUP_FUNC_AND_FIELDS), MYF(0));
DBUG_RETURN(-1);
}
{
/* Caclulate the number of groups */
send_group_parts= 0;
for (ORDER *group_tmp= group_list ; group_tmp ; group_tmp= group_tmp->next)
send_group_parts++;
}
procedure= setup_procedure(thd, proc_param, result, fields_list, &error);
if (unlikely(error))
goto err; /* purecov: inspected */
if (procedure)
{
if (setup_new_fields(thd, fields_list, all_fields,
procedure->param_fields))
goto err; /* purecov: inspected */
if (procedure->group)
{
if (!test_if_subpart(procedure->group,group_list))
{ /* purecov: inspected */
my_message(ER_DIFF_GROUPS_PROC, ER_THD(thd, ER_DIFF_GROUPS_PROC),
MYF(0)); /* purecov: inspected */
goto err; /* purecov: inspected */
}
}
if (order && (procedure->flags & PROC_NO_SORT))
{ /* purecov: inspected */
my_message(ER_ORDER_WITH_PROC, ER_THD(thd, ER_ORDER_WITH_PROC),
MYF(0)); /* purecov: inspected */
goto err; /* purecov: inspected */
}
if (thd->lex->derived_tables)
{
/*
Queries with derived tables and PROCEDURE are not allowed.
Many of such queries are disallowed grammatically, but there
are still some complex cases:
SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE()
*/
my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE",
thd->lex->derived_tables & DERIVED_VIEW ?
"view" : "subquery");
goto err;
}
if (thd->lex->sql_command != SQLCOM_SELECT)
{
// EXPLAIN SELECT * FROM t1 PROCEDURE ANALYSE()
my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "non-SELECT");
goto err;
}
}
if (!procedure && result && result->prepare(fields_list, unit_arg))
goto err; /* purecov: inspected */
unit= unit_arg;
if (prepare_stage2())
goto err;
DBUG_RETURN(0); // All OK
err:
delete procedure; /* purecov: inspected */
procedure= 0;
DBUG_RETURN(-1); /* purecov: inspected */
}
|
Safe
|
[] |
server
|
ff77a09bda884fe6bf3917eb29b9d3a2f53f919b
|
2.9342393720580764e+38
| 382 |
MDEV-22464 Server crash on UPDATE with nested subquery
Uninitialized ref_pointer_array[] because setup_fields() got empty
fields list. mysql_multi_update() for some reason does that by
substituting the fields list with empty total_list for the
mysql_select() call (looks like wrong merge since total_list is not
used anywhere else and is always empty). The fix would be to return
back the original fields list. But this fails update_use_source.test
case:
--error ER_BAD_FIELD_ERROR
update v1 set t1c1=2 order by 1;
Actually not failing the above seems to be ok.
The other fix would be to keep resolve_in_select_list false (and that
keeps outer context from being resolved in
Item_ref::fix_fields()). This fix is more consistent with how SELECT
behaves:
--error ER_SUBQUERY_NO_1_ROW
select a from t1 where a= (select 2 from t1 having (a = 3));
So this patch implements this fix.
| 0 |
static int swevent_hlist_get_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
int err = 0;
mutex_lock(&swhash->hlist_mutex);
if (!swevent_hlist_deref(swhash) &&
cpumask_test_cpu(cpu, perf_online_mask)) {
struct swevent_hlist *hlist;
hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
if (!hlist) {
err = -ENOMEM;
goto exit;
}
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
swhash->hlist_refcount++;
exit:
mutex_unlock(&swhash->hlist_mutex);
return err;
}
|
Safe
|
[
"CWE-401"
] |
tip
|
7bdb157cdebbf95a1cd94ed2e01b338714075d00
|
2.1859395494708033e+37
| 23 |
perf/core: Fix a memory leak in perf_event_parse_addr_filter()
As shown through runtime testing, the "filename" allocation is not
always freed in perf_event_parse_addr_filter().
There are three possible ways that this could happen:
- It could be allocated twice on subsequent iterations through the loop,
- or leaked on the success path,
- or on the failure path.
Clean up the code flow to make it obvious that 'filename' is always
freed in the reallocation path and in the two return paths as well.
We rely on the fact that kfree(NULL) is NOP and filename is initialized
with NULL.
This fixes the leak. No other side effects expected.
[ Dan Carpenter: cleaned up the code flow & added a changelog. ]
[ Ingo Molnar: updated the changelog some more. ]
Fixes: 375637bc5249 ("perf/core: Introduce address range filtering")
Signed-off-by: "kiyin(尹亮)" <kiyin@tencent.com>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: "Srivatsa S. Bhat" <srivatsa@csail.mit.edu>
Cc: Anthony Liguori <aliguori@amazon.com>
--
kernel/events/core.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
| 0 |
TEST(FormatterTest, FormatStringView) {
EXPECT_EQ("test", format("{0}", std::string_view("test")));
}
|
Safe
|
[
"CWE-134",
"CWE-119",
"CWE-787"
] |
fmt
|
8cf30aa2be256eba07bb1cefb998c52326e846e7
|
1.3955360946070435e+38
| 3 |
Fix segfault on complex pointer formatting (#642)
| 0 |
void MirrorJob::JobFinished(Job *j)
{
if(j->ExitCode()!=0)
stats.error_count++;
RemoveWaiting(j);
Delete(j);
assert(transfer_count>0);
transfer_count--;
}
|
Safe
|
[
"CWE-20",
"CWE-401"
] |
lftp
|
a27e07d90a4608ceaf928b1babb27d4d803e1992
|
2.4814796306876867e+38
| 9 |
mirror: prepend ./ to rm and chmod arguments to avoid URL recognition (fix #452)
| 0 |
http_session_ref (http_session_t sess)
{
if (sess)
{
sess->refcount++;
if (opt_debug > 1)
log_debug ("http.c:session_ref: sess %p ref now %d\n",
sess, sess->refcount);
}
return sess;
}
|
Safe
|
[
"CWE-352"
] |
gnupg
|
4a4bb874f63741026bd26264c43bb32b1099f060
|
1.0968226139227766e+38
| 11 |
dirmngr: Avoid possible CSRF attacks via http redirects.
* dirmngr/http.h (parsed_uri_s): Add fields off_host and off_path.
(http_redir_info_t): New.
* dirmngr/http.c (do_parse_uri): Set new fields.
(same_host_p): New.
(http_prepare_redirect): New.
* dirmngr/t-http-basic.c: New test.
* dirmngr/ks-engine-hkp.c (send_request): Use http_prepare_redirect
instead of the open code.
* dirmngr/ks-engine-http.c (ks_http_fetch): Ditto.
--
With this change a http query will not follow a redirect unless the
Location header gives the same host. If the host is different only
the host and port is taken from the Location header and the original
path and query parts are kept.
Signed-off-by: Werner Koch <wk@gnupg.org>
(cherry picked from commit fa1b1eaa4241ff3f0634c8bdf8591cbc7c464144)
| 0 |
static int cx23888_ir_tx_g_parameters(struct v4l2_subdev *sd,
struct v4l2_subdev_ir_parameters *p)
{
struct cx23888_ir_state *state = to_state(sd);
mutex_lock(&state->tx_params_lock);
memcpy(p, &state->tx_params, sizeof(struct v4l2_subdev_ir_parameters));
mutex_unlock(&state->tx_params_lock);
return 0;
}
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
linux
|
a7b2df76b42bdd026e3106cf2ba97db41345a177
|
6.001862792003734e+37
| 9 |
media: rc: prevent memory leak in cx23888_ir_probe
In cx23888_ir_probe if kfifo_alloc fails the allocated memory for state
should be released.
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Signed-off-by: Sean Young <sean@mess.org>
Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
| 0 |
static irqreturn_t wanxl_intr(int irq, void* dev_id)
{
card_t *card = dev_id;
int i;
u32 stat;
int handled = 0;
while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
handled = 1;
writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
for (i = 0; i < card->n_ports; i++) {
if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
wanxl_tx_intr(&card->ports[i]);
if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
wanxl_cable_intr(&card->ports[i]);
}
if (stat & (1 << DOORBELL_FROM_CARD_RX))
wanxl_rx_intr(card);
}
return IRQ_RETVAL(handled);
}
|
Safe
|
[
"CWE-399"
] |
linux
|
2b13d06c9584b4eb773f1e80bbaedab9a1c344e1
|
2.528573194124762e+38
| 24 |
wanxl: fix info leak in ioctl
The wanxl_ioctl() code fails to initialize the two padding bytes of
struct sync_serial_settings after the ->loopback member. Add an explicit
memset(0) before filling the structure to avoid the info leak.
Signed-off-by: Salva Peiró <speiro@ai2.upv.es>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static Variant HHVM_FUNCTION(zip_read, const Resource& zip) {
auto zipDir = cast<ZipDirectory>(zip);
FAIL_IF_INVALID_ZIPDIRECTORY(zip_read, zipDir);
return zipDir->nextFile();
}
|
Safe
|
[
"CWE-22"
] |
hhvm
|
65c95a01541dd2fbc9c978ac53bed235b5376686
|
3.2390699756305307e+38
| 7 |
ZipArchive::extractTo bug 70350
Summary:Don't allow upward directory traversal when extracting zip archive files.
Files in zip files with `..` or starting at main root `/` should be normalized
to something where the file being extracted winds up within the directory or
a subdirectory where the actual extraction is taking place.
http://git.php.net/?p=php-src.git;a=commit;h=f9c2bf73adb2ede0a486b0db466c264f2b27e0bb
Reviewed By: FBNeal
Differential Revision: D2798452
fb-gh-sync-id: 844549c93e011d1e991bb322bf85822246b04e30
shipit-source-id: 844549c93e011d1e991bb322bf85822246b04e30
| 0 |
static int handle_dr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
int dr, dr7, reg;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
/* First, if DR does not exist, trigger UD */
if (!kvm_require_dr(vcpu, dr))
return 1;
/* Do not handle if the CPL > 0, will trigger GP on re-entry */
if (!kvm_require_cpl(vcpu, 0))
return 1;
dr7 = vmcs_readl(GUEST_DR7);
if (dr7 & DR7_GD) {
/*
* As the vm-exit takes precedence over the debug trap, we
* need to emulate the latter, either for the host or the
* guest debugging itself.
*/
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
vcpu->run->debug.arch.dr7 = dr7;
vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
vcpu->run->debug.arch.exception = DB_VECTOR;
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
return 0;
} else {
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
kvm_queue_exception(vcpu, DB_VECTOR);
return 1;
}
}
if (vcpu->guest_debug == 0) {
u32 cpu_based_vm_exec_control;
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control &= ~CPU_BASED_MOV_DR_EXITING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
/*
* No more DR vmexits; force a reload of the debug registers
* and reenter on this instruction. The next vmexit will
* retrieve the full state of the debug registers.
*/
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
return 1;
}
reg = DEBUG_REG_ACCESS_REG(exit_qualification);
if (exit_qualification & TYPE_MOV_FROM_DR) {
unsigned long val;
if (kvm_get_dr(vcpu, dr, &val))
return 1;
kvm_register_write(vcpu, reg, val);
} else
if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
return 1;
skip_emulated_instruction(vcpu);
return 1;
}
|
Safe
|
[
"CWE-399"
] |
linux
|
54a20552e1eae07aa240fa370a0293e006b5faed
|
1.0376311601613688e+38
| 67 |
KVM: x86: work around infinite loop in microcode when #AC is delivered
It was found that a guest can DoS a host by triggering an infinite
stream of "alignment check" (#AC) exceptions. This causes the
microcode to enter an infinite loop where the core never receives
another interrupt. The host kernel panics pretty quickly due to the
effects (CVE-2015-5307).
Signed-off-by: Eric Northup <digitaleric@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static void bnx2x_link_attn(struct bnx2x *bp)
{
/* Make sure that we are synced with the current statistics */
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
bnx2x_link_update(&bp->link_params, &bp->link_vars);
bnx2x_init_dropless_fc(bp);
if (bp->link_vars.link_up) {
if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
struct host_port_stats *pstats;
pstats = bnx2x_sp(bp, port_stats);
/* reset old mac stats */
memset(&(pstats->mac_stx[0]), 0,
sizeof(struct mac_stx));
}
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
if (bp->link_vars.link_up && bp->link_vars.line_speed)
bnx2x_set_local_cmng(bp);
__bnx2x_link_report(bp);
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
}
|
Safe
|
[
"CWE-20"
] |
linux
|
8914a595110a6eca69a5e275b323f5d09e18f4f9
|
2.373183536766669e+37
| 31 |
bnx2x: disable GSO where gso_size is too big for hardware
If a bnx2x card is passed a GSO packet with a gso_size larger than
~9700 bytes, it will cause a firmware error that will bring the card
down:
bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert!
bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2
bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052
bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1
... (dump of values continues) ...
Detect when the mac length of a GSO packet is greater than the maximum
packet size (9700 bytes) and disable GSO.
Signed-off-by: Daniel Axtens <dja@axtens.net>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int stv06xx_init_controls(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
gspca_dbg(gspca_dev, D_PROBE, "Initializing controls\n");
gspca_dev->vdev.ctrl_handler = &gspca_dev->ctrl_handler;
return sd->sensor->init_controls(sd);
}
|
Safe
|
[
"CWE-476"
] |
linux
|
485b06aadb933190f4bc44e006076bc27a23f205
|
2.845063147808532e+38
| 9 |
media: stv06xx: add missing descriptor sanity checks
Make sure to check that we have two alternate settings and at least one
endpoint before accessing the second altsetting structure and
dereferencing the endpoint arrays.
This specifically avoids dereferencing NULL-pointers or corrupting
memory when a device does not have the expected descriptors.
Note that the sanity checks in stv06xx_start() and pb0100_start() are
not redundant as the driver is mixing looking up altsettings by index
and by number, which may not coincide.
Fixes: 8668d504d72c ("V4L/DVB (12082): gspca_stv06xx: Add support for st6422 bridge and sensor")
Fixes: c0b33bdc5b8d ("[media] gspca-stv06xx: support bandwidth changing")
Cc: stable <stable@vger.kernel.org> # 2.6.31
Cc: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Johan Hovold <johan@kernel.org>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
| 0 |
static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
{
struct keyring_list *klist;
int loop;
klist = container_of(rcu, struct keyring_list, rcu);
for (loop = klist->nkeys - 1; loop >= 0; loop--)
key_put(klist->keys[loop]);
kfree(klist);
} /* end keyring_clear_rcu_disposal() */
|
Safe
|
[
"CWE-362"
] |
linux-2.6
|
cea7daa3589d6b550546a8c8963599f7c1a3ae5c
|
2.3785938147841914e+38
| 13 |
KEYS: find_keyring_by_name() can gain access to a freed keyring
find_keyring_by_name() can gain access to a keyring that has had its reference
count reduced to zero, and is thus ready to be freed. This then allows the
dead keyring to be brought back into use whilst it is being destroyed.
The following timeline illustrates the process:
|(cleaner) (user)
|
| free_user(user) sys_keyctl()
| | |
| key_put(user->session_keyring) keyctl_get_keyring_ID()
| || //=> keyring->usage = 0 |
| |schedule_work(&key_cleanup_task) lookup_user_key()
| || |
| kmem_cache_free(,user) |
| . |[KEY_SPEC_USER_KEYRING]
| . install_user_keyrings()
| . ||
| key_cleanup() [<= worker_thread()] ||
| | ||
| [spin_lock(&key_serial_lock)] |[mutex_lock(&key_user_keyr..mutex)]
| | ||
| atomic_read() == 0 ||
| |{ rb_ease(&key->serial_node,) } ||
| | ||
| [spin_unlock(&key_serial_lock)] |find_keyring_by_name()
| | |||
| keyring_destroy(keyring) ||[read_lock(&keyring_name_lock)]
| || |||
| |[write_lock(&keyring_name_lock)] ||atomic_inc(&keyring->usage)
| |. ||| *** GET freeing keyring ***
| |. ||[read_unlock(&keyring_name_lock)]
| || ||
| |list_del() |[mutex_unlock(&key_user_k..mutex)]
| || |
| |[write_unlock(&keyring_name_lock)] ** INVALID keyring is returned **
| | .
| kmem_cache_free(,keyring) .
| .
| atomic_dec(&keyring->usage)
v *** DESTROYED ***
TIME
If CONFIG_SLUB_DEBUG=y then we may see the following message generated:
=============================================================================
BUG key_jar: Poison overwritten
-----------------------------------------------------------------------------
INFO: 0xffff880197a7e200-0xffff880197a7e200. First byte 0x6a instead of 0x6b
INFO: Allocated in key_alloc+0x10b/0x35f age=25 cpu=1 pid=5086
INFO: Freed in key_cleanup+0xd0/0xd5 age=12 cpu=1 pid=10
INFO: Slab 0xffffea000592cb90 objects=16 used=2 fp=0xffff880197a7e200 flags=0x200000000000c3
INFO: Object 0xffff880197a7e200 @offset=512 fp=0xffff880197a7e300
Bytes b4 0xffff880197a7e1f0: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZZZZZ
Object 0xffff880197a7e200: 6a 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b jkkkkkkkkkkkkkkk
Alternatively, we may see a system panic happen, such as:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000001
IP: [<ffffffff810e61a3>] kmem_cache_alloc+0x5b/0xe9
PGD 6b2b4067 PUD 6a80d067 PMD 0
Oops: 0000 [#1] SMP
last sysfs file: /sys/kernel/kexec_crash_loaded
CPU 1
...
Pid: 31245, comm: su Not tainted 2.6.34-rc5-nofixed-nodebug #2 D2089/PRIMERGY
RIP: 0010:[<ffffffff810e61a3>] [<ffffffff810e61a3>] kmem_cache_alloc+0x5b/0xe9
RSP: 0018:ffff88006af3bd98 EFLAGS: 00010002
RAX: 0000000000000000 RBX: 0000000000000001 RCX: ffff88007d19900b
RDX: 0000000100000000 RSI: 00000000000080d0 RDI: ffffffff81828430
RBP: ffffffff81828430 R08: ffff88000a293750 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000100000 R12: 00000000000080d0
R13: 00000000000080d0 R14: 0000000000000296 R15: ffffffff810f20ce
FS: 00007f97116bc700(0000) GS:ffff88000a280000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000000000001 CR3: 000000006a91c000 CR4: 00000000000006e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Process su (pid: 31245, threadinfo ffff88006af3a000, task ffff8800374414c0)
Stack:
0000000512e0958e 0000000000008000 ffff880037f8d180 0000000000000001
0000000000000000 0000000000008001 ffff88007d199000 ffffffff810f20ce
0000000000008000 ffff88006af3be48 0000000000000024 ffffffff810face3
Call Trace:
[<ffffffff810f20ce>] ? get_empty_filp+0x70/0x12f
[<ffffffff810face3>] ? do_filp_open+0x145/0x590
[<ffffffff810ce208>] ? tlb_finish_mmu+0x2a/0x33
[<ffffffff810ce43c>] ? unmap_region+0xd3/0xe2
[<ffffffff810e4393>] ? virt_to_head_page+0x9/0x2d
[<ffffffff81103916>] ? alloc_fd+0x69/0x10e
[<ffffffff810ef4ed>] ? do_sys_open+0x56/0xfc
[<ffffffff81008a02>] ? system_call_fastpath+0x16/0x1b
Code: 0f 1f 44 00 00 49 89 c6 fa 66 0f 1f 44 00 00 65 4c 8b 04 25 60 e8 00 00 48 8b 45 00 49 01 c0 49 8b 18 48 85 db 74 0d 48 63 45 18 <48> 8b 04 03 49 89 00 eb 14 4c 89 f9 83 ca ff 44 89 e6 48 89 ef
RIP [<ffffffff810e61a3>] kmem_cache_alloc+0x5b/0xe9
This problem is that find_keyring_by_name does not confirm that the keyring is
valid before accepting it.
Skipping keyrings that have been reduced to a zero count seems the way to go.
To this end, use atomic_inc_not_zero() to increment the usage count and skip
the candidate keyring if that returns false.
The following script _may_ cause the bug to happen, but there's no guarantee
as the window of opportunity is small:
#!/bin/sh
LOOP=100000
USER=dummy_user
/bin/su -c "exit;" $USER || { /usr/sbin/adduser -m $USER; add=1; }
for ((i=0; i<LOOP; i++))
do
/bin/su -c "echo '$i' > /dev/null" $USER
done
(( add == 1 )) && /usr/sbin/userdel -r $USER
exit
Note that the nominated user must not be in use.
An alternative way of testing this may be:
for ((i=0; i<100000; i++))
do
keyctl session foo /bin/true || break
done >&/dev/null
as that uses a keyring named "foo" rather than relying on the user and
user-session named keyrings.
Reported-by: Toshiyuki Okajima <toshi.okajima@jp.fujitsu.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Toshiyuki Okajima <toshi.okajima@jp.fujitsu.com>
Acked-by: Serge Hallyn <serue@us.ibm.com>
Signed-off-by: James Morris <jmorris@namei.org>
| 0 |
onig_regset_search_with_param(OnigRegSet* set,
const UChar* str, const UChar* end,
const UChar* start, const UChar* range,
OnigRegSetLead lead, OnigOptionType option, OnigMatchParam* mps[],
int* rmatch_pos)
{
int r;
int i;
UChar *s, *prev;
regex_t* reg;
OnigEncoding enc;
OnigRegion* region;
MatchArg* msas;
const UChar *orig_start = start;
const UChar *orig_range = range;
if (set->n == 0)
return ONIG_MISMATCH;
if (IS_POSIX_REGION(option))
return ONIGERR_INVALID_ARGUMENT;
r = 0;
enc = set->enc;
msas = (MatchArg* )NULL;
for (i = 0; i < set->n; i++) {
reg = set->rs[i].reg;
region = set->rs[i].region;
ADJUST_MATCH_PARAM(reg, mps[i]);
if (IS_NOT_NULL(region)) {
r = onig_region_resize_clear(region, reg->num_mem + 1);
if (r != 0) goto finish_no_msa;
}
}
if (start > end || start < str) goto mismatch_no_msa;
if (str < end) {
/* forward search only */
if (range <= start)
return ONIGERR_INVALID_ARGUMENT;
}
if (ONIG_IS_OPTION_ON(option, ONIG_OPTION_CHECK_VALIDITY_OF_STRING)) {
if (! ONIGENC_IS_VALID_MBC_STRING(enc, str, end)) {
r = ONIGERR_INVALID_WIDE_CHAR_VALUE;
goto finish_no_msa;
}
}
if (set->anchor != OPTIMIZE_NONE && str < end) {
UChar *min_semi_end, *max_semi_end;
if ((set->anchor & ANCR_BEGIN_POSITION) != 0) {
/* search start-position only */
begin_position:
range = start + 1;
}
else if ((set->anchor & ANCR_BEGIN_BUF) != 0) {
/* search str-position only */
if (start != str) goto mismatch_no_msa;
range = str + 1;
}
else if ((set->anchor & ANCR_END_BUF) != 0) {
min_semi_end = max_semi_end = (UChar* )end;
end_buf:
if ((OnigLen )(max_semi_end - str) < set->anc_dmin)
goto mismatch_no_msa;
if ((OnigLen )(min_semi_end - start) > set->anc_dmax) {
start = min_semi_end - set->anc_dmax;
if (start < end)
start = onigenc_get_right_adjust_char_head(enc, str, start);
}
if ((OnigLen )(max_semi_end - (range - 1)) < set->anc_dmin) {
range = max_semi_end - set->anc_dmin + 1;
}
if (start > range) goto mismatch_no_msa;
}
else if ((set->anchor & ANCR_SEMI_END_BUF) != 0) {
UChar* pre_end = ONIGENC_STEP_BACK(enc, str, end, 1);
max_semi_end = (UChar* )end;
if (ONIGENC_IS_MBC_NEWLINE(enc, pre_end, end)) {
min_semi_end = pre_end;
#ifdef USE_CRNL_AS_LINE_TERMINATOR
pre_end = ONIGENC_STEP_BACK(enc, str, pre_end, 1);
if (IS_NOT_NULL(pre_end) &&
ONIGENC_IS_MBC_CRNL(enc, pre_end, end)) {
min_semi_end = pre_end;
}
#endif
if (min_semi_end > str && start <= min_semi_end) {
goto end_buf;
}
}
else {
min_semi_end = (UChar* )end;
goto end_buf;
}
}
else if ((set->anchor & ANCR_ANYCHAR_INF_ML) != 0) {
goto begin_position;
}
}
else if (str == end) { /* empty string */
start = end = str;
s = (UChar* )start;
prev = (UChar* )NULL;
msas = (MatchArg* )xmalloc(sizeof(*msas) * set->n);
CHECK_NULL_RETURN_MEMERR(msas);
for (i = 0; i < set->n; i++) {
reg = set->rs[i].reg;
MATCH_ARG_INIT(msas[i], reg, option, set->rs[i].region, start, mps[i]);
}
for (i = 0; i < set->n; i++) {
reg = set->rs[i].reg;
if (reg->threshold_len == 0) {
REGSET_MATCH_AND_RETURN_CHECK(end);
}
}
goto mismatch;
}
if (lead == ONIG_REGSET_POSITION_LEAD) {
msas = (MatchArg* )xmalloc(sizeof(*msas) * set->n);
CHECK_NULL_RETURN_MEMERR(msas);
for (i = 0; i < set->n; i++) {
MATCH_ARG_INIT(msas[i], set->rs[i].reg, option, set->rs[i].region,
orig_start, mps[i]);
}
r = regset_search_body_position_lead(set, str, end, start, range,
orig_range, option, msas, rmatch_pos);
}
else {
r = regset_search_body_regex_lead(set, str, end, start, orig_range,
lead, option, mps, rmatch_pos);
}
if (r < 0) goto finish;
else goto match2;
mismatch:
r = ONIG_MISMATCH;
finish:
for (i = 0; i < set->n; i++) {
if (IS_NOT_NULL(msas))
MATCH_ARG_FREE(msas[i]);
if (IS_FIND_NOT_EMPTY(set->rs[i].reg->options) &&
IS_NOT_NULL(set->rs[i].region)) {
onig_region_clear(set->rs[i].region);
}
}
if (IS_NOT_NULL(msas)) xfree(msas);
return r;
mismatch_no_msa:
r = ONIG_MISMATCH;
finish_no_msa:
return r;
match:
*rmatch_pos = (int )(s - str);
match2:
for (i = 0; i < set->n; i++) {
if (IS_NOT_NULL(msas))
MATCH_ARG_FREE(msas[i]);
if (IS_FIND_NOT_EMPTY(set->rs[i].reg->options) &&
IS_NOT_NULL(set->rs[i].region)) {
onig_region_clear(set->rs[i].region);
}
}
if (IS_NOT_NULL(msas)) xfree(msas);
return r; /* regex index */
}
|
Safe
|
[
"CWE-125"
] |
oniguruma
|
0463e21432515631a9bc925ce5eb95b097c73719
|
1.7712433098110153e+38
| 180 |
fix #164: Integer overflow related to reg->dmax in search_in_range()
| 0 |
EXPORTED int annotate_msg_copy(struct mailbox *oldmailbox, uint32_t olduid,
struct mailbox *newmailbox, uint32_t newuid,
const char *userid)
{
annotate_db_t *d = NULL;
int r;
init_internal();
r = _annotate_getdb(newmailbox->name, newuid, CYRUSDB_CREATE, &d);
if (r) return r;
annotate_begin(d);
/* If these are not true, nobody will ever commit the data we're
* about to copy, and that would be sad */
assert(newmailbox->annot_state != NULL);
assert(newmailbox->annot_state->d == d);
r = _annotate_rewrite(oldmailbox, olduid, userid,
newmailbox, newuid, userid,
/*copy*/1);
annotate_putdb(&d);
return r;
}
|
Safe
|
[
"CWE-732"
] |
cyrus-imapd
|
621f9e41465b521399f691c241181300fab55995
|
1.3289354865716243e+38
| 26 |
annotate: don't allow everyone to write shared server entries
| 0 |
DEFUN (neighbor_disable_connected_check,
neighbor_disable_connected_check_cmd,
NEIGHBOR_CMD2 "disable-connected-check",
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"one-hop away EBGP peer using loopback address\n")
{
return peer_flag_set_vty (vty, argv[0], PEER_FLAG_DISABLE_CONNECTED_CHECK);
}
|
Safe
|
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
|
2.8345298501241587e+37
| 9 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <paul.jakma@sun.com>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
| 0 |
bool st_select_lex_unit::add_fake_select_lex(THD *thd_arg)
{
SELECT_LEX *first_sl= first_select();
DBUG_ENTER("st_select_lex_unit::add_fake_select_lex");
DBUG_ASSERT(!fake_select_lex);
if (!(fake_select_lex= new (thd_arg->mem_root) SELECT_LEX()))
DBUG_RETURN(1);
fake_select_lex->include_standalone(this,
(SELECT_LEX_NODE**)&fake_select_lex);
fake_select_lex->select_number= INT_MAX;
fake_select_lex->parent_lex= thd_arg->lex; /* Used in init_query. */
fake_select_lex->make_empty_select();
fake_select_lex->set_linkage(GLOBAL_OPTIONS_TYPE);
fake_select_lex->select_limit= 0;
fake_select_lex->no_table_names_allowed= 1;
fake_select_lex->context.outer_context=first_sl->context.outer_context;
/* allow item list resolving in fake select for ORDER BY */
fake_select_lex->context.resolve_in_select_list= TRUE;
fake_select_lex->context.select_lex= fake_select_lex;
fake_select_lex->nest_level_base= first_select()->nest_level_base;
if (fake_select_lex->set_nest_level(first_select()->nest_level))
DBUG_RETURN(1);
if (!is_unit_op())
{
/*
This works only for
(SELECT ... ORDER BY list [LIMIT n]) ORDER BY order_list [LIMIT m],
(SELECT ... LIMIT n) ORDER BY order_list [LIMIT m]
just before the parser starts processing order_list
*/
fake_select_lex->no_table_names_allowed= 1;
thd_arg->lex->current_select= fake_select_lex;
}
//thd_arg->lex->pop_context("add fake");
DBUG_RETURN(0);
}
|
Safe
|
[
"CWE-703"
] |
server
|
39feab3cd31b5414aa9b428eaba915c251ac34a2
|
5.27766269202654e+37
| 41 |
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <sanja@mariadb.com>
| 0 |
rsvg_standard_element_start (RsvgHandle * ctx, const char *name, RsvgPropertyBag * atts)
{
/*replace this stuff with a hash for fast reading! */
RsvgNode *newnode = NULL;
if (!strcmp (name, "g"))
newnode = rsvg_new_group ();
else if (!strcmp (name, "a")) /*treat anchors as groups for now */
newnode = rsvg_new_group ();
else if (!strcmp (name, "switch"))
newnode = rsvg_new_switch ();
else if (!strcmp (name, "defs"))
newnode = rsvg_new_defs ();
else if (!strcmp (name, "use"))
newnode = rsvg_new_use ();
else if (!strcmp (name, "path"))
newnode = rsvg_new_path ();
else if (!strcmp (name, "line"))
newnode = rsvg_new_line ();
else if (!strcmp (name, "rect"))
newnode = rsvg_new_rect ();
else if (!strcmp (name, "ellipse"))
newnode = rsvg_new_ellipse ();
else if (!strcmp (name, "circle"))
newnode = rsvg_new_circle ();
else if (!strcmp (name, "polygon"))
newnode = rsvg_new_polygon ();
else if (!strcmp (name, "polyline"))
newnode = rsvg_new_polyline ();
else if (!strcmp (name, "symbol"))
newnode = rsvg_new_symbol ();
else if (!strcmp (name, "svg"))
newnode = rsvg_new_svg ();
else if (!strcmp (name, "mask"))
newnode = rsvg_new_mask ();
else if (!strcmp (name, "clipPath"))
newnode = rsvg_new_clip_path ();
else if (!strcmp (name, "image"))
newnode = rsvg_new_image ();
else if (!strcmp (name, "marker"))
newnode = rsvg_new_marker ();
else if (!strcmp (name, "stop"))
newnode = rsvg_new_stop ();
else if (!strcmp (name, "pattern"))
newnode = rsvg_new_pattern ();
else if (!strcmp (name, "linearGradient"))
newnode = rsvg_new_linear_gradient ();
else if (!strcmp (name, "radialGradient"))
newnode = rsvg_new_radial_gradient ();
else if (!strcmp (name, "conicalGradient"))
newnode = rsvg_new_radial_gradient ();
else if (!strcmp (name, "filter"))
newnode = rsvg_new_filter ();
else if (!strcmp (name, "feBlend"))
newnode = rsvg_new_filter_primitive_blend ();
else if (!strcmp (name, "feColorMatrix"))
newnode = rsvg_new_filter_primitive_colour_matrix ();
else if (!strcmp (name, "feComponentTransfer"))
newnode = rsvg_new_filter_primitive_component_transfer ();
else if (!strcmp (name, "feComposite"))
newnode = rsvg_new_filter_primitive_composite ();
else if (!strcmp (name, "feConvolveMatrix"))
newnode = rsvg_new_filter_primitive_convolve_matrix ();
else if (!strcmp (name, "feDiffuseLighting"))
newnode = rsvg_new_filter_primitive_diffuse_lighting ();
else if (!strcmp (name, "feDisplacementMap"))
newnode = rsvg_new_filter_primitive_displacement_map ();
else if (!strcmp (name, "feFlood"))
newnode = rsvg_new_filter_primitive_flood ();
else if (!strcmp (name, "feGaussianBlur"))
newnode = rsvg_new_filter_primitive_gaussian_blur ();
else if (!strcmp (name, "feImage"))
newnode = rsvg_new_filter_primitive_image ();
else if (!strcmp (name, "feMerge"))
newnode = rsvg_new_filter_primitive_merge ();
else if (!strcmp (name, "feMorphology"))
newnode = rsvg_new_filter_primitive_erode ();
else if (!strcmp (name, "feOffset"))
newnode = rsvg_new_filter_primitive_offset ();
else if (!strcmp (name, "feSpecularLighting"))
newnode = rsvg_new_filter_primitive_specular_lighting ();
else if (!strcmp (name, "feTile"))
newnode = rsvg_new_filter_primitive_tile ();
else if (!strcmp (name, "feTurbulence"))
newnode = rsvg_new_filter_primitive_turbulence ();
else if (!strcmp (name, "feMergeNode"))
newnode = rsvg_new_filter_primitive_merge_node ();
else if (!strcmp (name, "feFuncR"))
newnode = rsvg_new_node_component_transfer_function ('r');
else if (!strcmp (name, "feFuncG"))
newnode = rsvg_new_node_component_transfer_function ('g');
else if (!strcmp (name, "feFuncB"))
newnode = rsvg_new_node_component_transfer_function ('b');
else if (!strcmp (name, "feFuncA"))
newnode = rsvg_new_node_component_transfer_function ('a');
else if (!strcmp (name, "feDistantLight"))
newnode = rsvg_new_filter_primitive_light_source ('d');
else if (!strcmp (name, "feSpotLight"))
newnode = rsvg_new_filter_primitive_light_source ('s');
else if (!strcmp (name, "fePointLight"))
newnode = rsvg_new_filter_primitive_light_source ('p');
/* hack to make multiImage sort-of work */
else if (!strcmp (name, "multiImage"))
newnode = rsvg_new_switch ();
else if (!strcmp (name, "subImageRef"))
newnode = rsvg_new_image ();
else if (!strcmp (name, "subImage"))
newnode = rsvg_new_group ();
else if (!strcmp (name, "text"))
newnode = rsvg_new_text ();
else if (!strcmp (name, "tspan"))
newnode = rsvg_new_tspan ();
else if (!strcmp (name, "tref"))
newnode = rsvg_new_tref ();
else {
/* hack for bug 401115. whenever we encounter a node we don't understand, push it into a group.
this will allow us to handle things like conditionals properly. */
newnode = rsvg_new_group ();
}
if (newnode) {
newnode->type = g_string_new (name);
newnode->parent = ctx->priv->currentnode;
rsvg_node_set_atts (newnode, ctx, atts);
rsvg_defs_register_memory (ctx->priv->defs, newnode);
if (ctx->priv->currentnode) {
rsvg_node_group_pack (ctx->priv->currentnode, newnode);
ctx->priv->currentnode = newnode;
} else if (!strcmp (name, "svg")) {
ctx->priv->treebase = newnode;
ctx->priv->currentnode = newnode;
}
}
}
|
Vulnerable
|
[] |
librsvg
|
34c95743ca692ea0e44778e41a7c0a129363de84
|
2.245288562657404e+38
| 134 |
Store node type separately in RsvgNode
The node name (formerly RsvgNode:type) cannot be used to infer
the sub-type of RsvgNode that we're dealing with, since for unknown
elements we put type = node-name. This lead to a (potentially exploitable)
crash e.g. when the element name started with "fe" which tricked
the old code into considering it as a RsvgFilterPrimitive.
CVE-2011-3146
https://bugzilla.gnome.org/show_bug.cgi?id=658014
| 1 |
TEST(Random, FixedSeed) {
// clang-format off
struct ConstantRNG {
typedef uint32_t result_type;
result_type operator()() {
return 4; // chosen by fair dice roll.
// guaranteed to be random.
}
static constexpr result_type min() {
return std::numeric_limits<result_type>::min();
}
static constexpr result_type max() {
return std::numeric_limits<result_type>::max();
}
};
// clang-format on
ConstantRNG gen;
// Pick a constant random number...
auto value = Random::rand32(10, gen);
// Loop to make sure it really is constant.
for (int i = 0; i < 1024; ++i) {
auto result = Random::rand32(10, gen);
EXPECT_EQ(value, result);
}
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
folly
|
8e927ee48b114c8a2f90d0cbd5ac753795a6761f
|
2.4859719395292657e+38
| 28 |
Flush secureRandom buffer on fork
Summary: On fork, flush the secureRandom buffer, so that we don't share entropy between the parent and child.
Reviewed By: ricklavoie
Differential Revision: D9196474
fbshipit-source-id: 12ff8488d814466186df61328a5f1d4000beb27f
| 0 |
int Field_real::store_time_dec(MYSQL_TIME *ltime, uint dec_arg)
{
return store(TIME_to_double(ltime));
}
|
Safe
|
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
|
4.210770248239009e+37
| 4 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
| 0 |
static int snd_emu0204_ch_switch_update(struct usb_mixer_interface *mixer,
int value)
{
struct snd_usb_audio *chip = mixer->chip;
int err;
unsigned char buf[2];
err = snd_usb_lock_shutdown(chip);
if (err < 0)
return err;
buf[0] = 0x01;
buf[1] = value ? 0x02 : 0x01;
err = snd_usb_ctl_msg(chip->dev,
usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
0x0400, 0x0e00, buf, 2);
snd_usb_unlock_shutdown(chip);
return err;
}
|
Safe
|
[] |
sound
|
447d6275f0c21f6cc97a88b3a0c601436a4cdf2a
|
8.601326443775323e+37
| 20 |
ALSA: usb-audio: Add sanity checks for endpoint accesses
Add some sanity check codes before actually accessing the endpoint via
get_endpoint() in order to avoid the invalid access through a
malformed USB descriptor. Mostly just checking bNumEndpoints, but in
one place (snd_microii_spdif_default_get()), the validity of iface and
altsetting index is checked as well.
Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=971125
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
| 0 |
void decoder_context::calc_tid_and_framerate_ratio()
{
int highestTID = get_highest_TID();
// if number of temporal layers changed, we have to recompute the framedrop table
if (framedrop_tab[100].tid != highestTID) {
compute_framedrop_table();
}
goal_HighestTid = framedrop_tab[framerate_ratio].tid;
layer_framerate_ratio = framedrop_tab[framerate_ratio].ratio;
// TODO: for now, we switch immediately
current_HighestTid = goal_HighestTid;
}
|
Safe
|
[
"CWE-416"
] |
libde265
|
f538254e4658ef5ea4e233c2185dcbfd165e8911
|
5.023518306810733e+37
| 17 |
fix streams where SPS image size changes without refreshing PPS (#299)
| 0 |
ZEND_METHOD(exception, __wakeup)
{
zval *value;
zval *object = getThis();
HashTable *intern_ht = zend_std_get_properties(getThis() TSRMLS_CC);
CHECK_EXC_TYPE("message", IS_STRING);
CHECK_EXC_TYPE("string", IS_STRING);
CHECK_EXC_TYPE("code", IS_LONG);
CHECK_EXC_TYPE("file", IS_STRING);
CHECK_EXC_TYPE("line", IS_LONG);
CHECK_EXC_TYPE("trace", IS_ARRAY);
CHECK_EXC_TYPE("previous", IS_OBJECT);
}
|
Safe
|
[
"CWE-20"
] |
php-src
|
4d2278143a08b7522de9471d0f014d7357c28fea
|
1.9938415816338943e+38
| 13 |
Fix #69793 - limit what we accept when unserializing exception
| 0 |
void psi_trigger_destroy(struct psi_trigger *t)
{
struct psi_group *group;
struct task_struct *task_to_destroy = NULL;
/*
* We do not check psi_disabled since it might have been disabled after
* the trigger got created.
*/
if (!t)
return;
group = t->group;
/*
* Wakeup waiters to stop polling. Can happen if cgroup is deleted
* from under a polling process.
*/
wake_up_interruptible(&t->event_wait);
mutex_lock(&group->trigger_lock);
if (!list_empty(&t->node)) {
struct psi_trigger *tmp;
u64 period = ULLONG_MAX;
list_del(&t->node);
group->nr_triggers[t->state]--;
if (!group->nr_triggers[t->state])
group->poll_states &= ~(1 << t->state);
/* reset min update period for the remaining triggers */
list_for_each_entry(tmp, &group->triggers, node)
period = min(period, div_u64(tmp->win.size,
UPDATES_PER_WINDOW));
group->poll_min_period = period;
/* Destroy poll_task when the last trigger is destroyed */
if (group->poll_states == 0) {
group->polling_until = 0;
task_to_destroy = rcu_dereference_protected(
group->poll_task,
lockdep_is_held(&group->trigger_lock));
rcu_assign_pointer(group->poll_task, NULL);
del_timer(&group->poll_timer);
}
}
mutex_unlock(&group->trigger_lock);
/*
* Wait for psi_schedule_poll_work RCU to complete its read-side
* critical section before destroying the trigger and optionally the
* poll_task.
*/
synchronize_rcu();
/*
* Stop kthread 'psimon' after releasing trigger_lock to prevent a
* deadlock while waiting for psi_poll_work to acquire trigger_lock
*/
if (task_to_destroy) {
/*
* After the RCU grace period has expired, the worker
* can no longer be found through group->poll_task.
*/
kthread_stop(task_to_destroy);
}
kfree(t);
}
|
Safe
|
[
"CWE-416"
] |
linux
|
a06247c6804f1a7c86a2e5398a4c1f1db1471848
|
1.82617533083729e+38
| 66 |
psi: Fix uaf issue when psi trigger is destroyed while being polled
With write operation on psi files replacing old trigger with a new one,
the lifetime of its waitqueue is totally arbitrary. Overwriting an
existing trigger causes its waitqueue to be freed and pending poll()
will stumble on trigger->event_wait which was destroyed.
Fix this by disallowing to redefine an existing psi trigger. If a write
operation is used on a file descriptor with an already existing psi
trigger, the operation will fail with EBUSY error.
Also bypass a check for psi_disabled in the psi_trigger_destroy as the
flag can be flipped after the trigger is created, leading to a memory
leak.
Fixes: 0e94682b73bf ("psi: introduce psi monitor")
Reported-by: syzbot+cdb5dd11c97cc532efad@syzkaller.appspotmail.com
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Analyzed-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20220111232309.1786347-1-surenb@google.com
| 0 |
dirserv_get_routerdescs(smartlist_t *descs_out, const char *key,
const char **msg)
{
*msg = NULL;
if (!strcmp(key, "/tor/server/all")) {
routerlist_t *rl = router_get_routerlist();
SMARTLIST_FOREACH(rl->routers, routerinfo_t *, r,
smartlist_add(descs_out, &(r->cache_info)));
} else if (!strcmp(key, "/tor/server/authority")) {
routerinfo_t *ri = router_get_my_routerinfo();
if (ri)
smartlist_add(descs_out, &(ri->cache_info));
} else if (!strcmpstart(key, "/tor/server/d/")) {
smartlist_t *digests = smartlist_create();
key += strlen("/tor/server/d/");
dir_split_resource_into_fingerprints(key, digests, NULL,
DSR_HEX|DSR_SORT_UNIQ);
SMARTLIST_FOREACH(digests, const char *, d,
{
signed_descriptor_t *sd = router_get_by_descriptor_digest(d);
if (sd)
smartlist_add(descs_out,sd);
});
SMARTLIST_FOREACH(digests, char *, d, tor_free(d));
smartlist_free(digests);
} else if (!strcmpstart(key, "/tor/server/fp/")) {
smartlist_t *digests = smartlist_create();
time_t cutoff = time(NULL) - ROUTER_MAX_AGE_TO_PUBLISH;
key += strlen("/tor/server/fp/");
dir_split_resource_into_fingerprints(key, digests, NULL,
DSR_HEX|DSR_SORT_UNIQ);
SMARTLIST_FOREACH(digests, const char *, d,
{
if (router_digest_is_me(d)) {
/* make sure desc_routerinfo exists */
routerinfo_t *ri = router_get_my_routerinfo();
if (ri)
smartlist_add(descs_out, &(ri->cache_info));
} else {
routerinfo_t *ri = router_get_by_digest(d);
/* Don't actually serve a descriptor that everyone will think is
* expired. This is an (ugly) workaround to keep buggy 0.1.1.10
* Tors from downloading descriptors that they will throw away.
*/
if (ri && ri->cache_info.published_on > cutoff)
smartlist_add(descs_out, &(ri->cache_info));
}
});
SMARTLIST_FOREACH(digests, char *, d, tor_free(d));
smartlist_free(digests);
} else {
*msg = "Key not recognized";
return -1;
}
if (!smartlist_len(descs_out)) {
*msg = "Servers unavailable";
return -1;
}
return 0;
}
|
Safe
|
[
"CWE-264"
] |
tor
|
00fffbc1a15e2696a89c721d0c94dc333ff419ef
|
2.0045825059295602e+38
| 62 |
Don't give the Guard flag to relays without the CVE-2011-2768 fix
| 0 |
sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type,
(struct sctp_transport *)arg, commands))
return SCTP_DISPOSITION_NOMEM;
/*
* RFC 2960 (bis), section 8.3
*
* D) Request an on-demand HEARTBEAT on a specific destination
* transport address of a given association.
*
* The endpoint should increment the respective error counter of
* the destination transport address each time a HEARTBEAT is sent
* to that address and not acknowledged within one RTO.
*
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET,
SCTP_TRANSPORT(arg));
return SCTP_DISPOSITION_CONSUME;
}
|
Safe
|
[
"CWE-20"
] |
linux-2.6
|
ba0166708ef4da7eeb61dd92bbba4d5a749d6561
|
1.2272855876395864e+38
| 26 |
sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <yjwei@cn.fujitsu.com>
Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static MYSQL *db_connect(char *host, char *database,
char *user, char *passwd)
{
MYSQL *mysql;
if (verbose)
fprintf(stdout, "Connecting to %s\n", host ? host : "localhost");
if (!(mysql= mysql_init(NULL)))
return 0;
if (opt_compress)
mysql_options(mysql,MYSQL_OPT_COMPRESS,NullS);
if (opt_local_file)
mysql_options(mysql,MYSQL_OPT_LOCAL_INFILE,
(char*) &opt_local_file);
#ifdef HAVE_OPENSSL
if (opt_use_ssl)
{
mysql_ssl_set(mysql, opt_ssl_key, opt_ssl_cert, opt_ssl_ca,
opt_ssl_capath, opt_ssl_cipher);
mysql_options(mysql, MYSQL_OPT_SSL_CRL, opt_ssl_crl);
mysql_options(mysql, MYSQL_OPT_SSL_CRLPATH, opt_ssl_crlpath);
}
mysql_options(mysql,MYSQL_OPT_SSL_VERIFY_SERVER_CERT,
(char*)&opt_ssl_verify_server_cert);
#endif
if (opt_protocol)
mysql_options(mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
if (opt_bind_addr)
mysql_options(mysql,MYSQL_OPT_BIND,opt_bind_addr);
#if defined (_WIN32) && !defined (EMBEDDED_LIBRARY)
if (shared_memory_base_name)
mysql_options(mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
#endif
if (opt_plugin_dir && *opt_plugin_dir)
mysql_options(mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir);
if (opt_default_auth && *opt_default_auth)
mysql_options(mysql, MYSQL_DEFAULT_AUTH, opt_default_auth);
mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset);
mysql_options(mysql, MYSQL_OPT_CONNECT_ATTR_RESET, 0);
mysql_options4(mysql, MYSQL_OPT_CONNECT_ATTR_ADD,
"program_name", "mysqlimport");
if (!(mysql_real_connect(mysql,host,user,passwd,
database,opt_mysql_port,opt_mysql_unix_port,
0)))
{
ignore_errors=0; /* NO RETURN FROM db_error */
db_error(mysql);
}
mysql->reconnect= 0;
if (verbose)
fprintf(stdout, "Selecting database %s\n", database);
if (mysql_select_db(mysql, database))
{
ignore_errors=0;
db_error(mysql);
}
return mysql;
}
|
Vulnerable
|
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
|
2.5815606827319215e+38
| 60 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
| 1 |
do_delete (GVfsBackend *backend,
GVfsJobDelete *delete_job,
const char *filename)
{
GVfsBackendAdmin *self = G_VFS_BACKEND_ADMIN (backend);
GVfsJob *job = G_VFS_JOB (delete_job);
GError *error = NULL;
GFile *file;
if (!check_permission (self, job))
return;
file = g_file_new_for_path (filename);
g_file_delete (file, job->cancellable, &error);
g_object_unref (file);
complete_job (job, error);
}
|
Safe
|
[] |
gvfs
|
d7d362995aa0cb8905c8d5c2a2a4c305d2ffff80
|
2.082476293036142e+38
| 18 |
admin: Use fsuid to ensure correct file ownership
Files created over admin backend should be owned by root, but they are
owned by the user itself. This is because the daemon drops the uid to
make dbus connection work. Use fsuid and euid to fix this issue.
Closes: https://gitlab.gnome.org/GNOME/gvfs/issues/21
| 0 |
static void task_numa_compare(struct task_numa_env *env,
long taskimp, long groupimp, bool maymove)
{
struct rq *dst_rq = cpu_rq(env->dst_cpu);
struct task_struct *cur;
long src_load, dst_load;
long load;
long imp = env->p->numa_group ? groupimp : taskimp;
long moveimp = imp;
int dist = env->dist;
if (READ_ONCE(dst_rq->numa_migrate_on))
return;
rcu_read_lock();
cur = task_rcu_dereference(&dst_rq->curr);
if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
cur = NULL;
/*
* Because we have preemption enabled we can get migrated around and
* end try selecting ourselves (current == env->p) as a swap candidate.
*/
if (cur == env->p)
goto unlock;
if (!cur) {
if (maymove && moveimp >= env->best_imp)
goto assign;
else
goto unlock;
}
/*
* "imp" is the fault differential for the source task between the
* source and destination node. Calculate the total differential for
* the source task and potential destination task. The more negative
* the value is, the more remote accesses that would be expected to
* be incurred if the tasks were swapped.
*/
/* Skip this swap candidate if cannot move to the source cpu */
if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
goto unlock;
/*
* If dst and source tasks are in the same NUMA group, or not
* in any group then look only at task weights.
*/
if (cur->numa_group == env->p->numa_group) {
imp = taskimp + task_weight(cur, env->src_nid, dist) -
task_weight(cur, env->dst_nid, dist);
/*
* Add some hysteresis to prevent swapping the
* tasks within a group over tiny differences.
*/
if (cur->numa_group)
imp -= imp / 16;
} else {
/*
* Compare the group weights. If a task is all by itself
* (not part of a group), use the task weight instead.
*/
if (cur->numa_group && env->p->numa_group)
imp += group_weight(cur, env->src_nid, dist) -
group_weight(cur, env->dst_nid, dist);
else
imp += task_weight(cur, env->src_nid, dist) -
task_weight(cur, env->dst_nid, dist);
}
if (maymove && moveimp > imp && moveimp > env->best_imp) {
imp = moveimp;
cur = NULL;
goto assign;
}
/*
* If the NUMA importance is less than SMALLIMP,
* task migration might only result in ping pong
* of tasks and also hurt performance due to cache
* misses.
*/
if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
goto unlock;
/*
* In the overloaded case, try and keep the load balanced.
*/
load = task_h_load(env->p) - task_h_load(cur);
if (!load)
goto assign;
dst_load = env->dst_stats.load + load;
src_load = env->src_stats.load - load;
if (load_too_imbalanced(src_load, dst_load, env))
goto unlock;
assign:
/*
* One idle CPU per node is evaluated for a task numa move.
* Call select_idle_sibling to maybe find a better one.
*/
if (!cur) {
/*
* select_idle_siblings() uses an per-CPU cpumask that
* can be used from IRQ context.
*/
local_irq_disable();
env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
env->dst_cpu);
local_irq_enable();
}
task_numa_assign(env, cur, imp);
unlock:
rcu_read_unlock();
}
|
Safe
|
[
"CWE-400",
"CWE-703",
"CWE-835"
] |
linux
|
c40f7d74c741a907cfaeb73a7697081881c497d0
|
2.9532648566886268e+38
| 118 |
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <xiexiuqi@huawei.com>
Analyzed-by: Vincent Guittot <vincent.guittot@linaro.org>
Reported-by: Zhipeng Xie <xiezhipeng1@huawei.com>
Reported-by: Sargun Dhillon <sargun@sargun.me>
Reported-by: Xie XiuQi <xiexiuqi@huawei.com>
Tested-by: Zhipeng Xie <xiezhipeng1@huawei.com>
Tested-by: Sargun Dhillon <sargun@sargun.me>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
Cc: <stable@vger.kernel.org> # v4.13+
Cc: Bin Li <huawei.libin@huawei.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/1545879866-27809-1-git-send-email-xiexiuqi@huawei.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| 0 |
ikev1_n_print(netdissect_options *ndo, u_char tpay _U_,
const struct isakmp_gen *ext, u_int item_len,
const u_char *ep, uint32_t phase _U_, uint32_t doi0 _U_,
uint32_t proto0 _U_, int depth _U_)
{
const struct ikev1_pl_n *p;
struct ikev1_pl_n n;
const u_char *cp;
const u_char *ep2;
uint32_t doi;
uint32_t proto;
static const char *notify_error_str[] = {
NULL, "INVALID-PAYLOAD-TYPE",
"DOI-NOT-SUPPORTED", "SITUATION-NOT-SUPPORTED",
"INVALID-COOKIE", "INVALID-MAJOR-VERSION",
"INVALID-MINOR-VERSION", "INVALID-EXCHANGE-TYPE",
"INVALID-FLAGS", "INVALID-MESSAGE-ID",
"INVALID-PROTOCOL-ID", "INVALID-SPI",
"INVALID-TRANSFORM-ID", "ATTRIBUTES-NOT-SUPPORTED",
"NO-PROPOSAL-CHOSEN", "BAD-PROPOSAL-SYNTAX",
"PAYLOAD-MALFORMED", "INVALID-KEY-INFORMATION",
"INVALID-ID-INFORMATION", "INVALID-CERT-ENCODING",
"INVALID-CERTIFICATE", "CERT-TYPE-UNSUPPORTED",
"INVALID-CERT-AUTHORITY", "INVALID-HASH-INFORMATION",
"AUTHENTICATION-FAILED", "INVALID-SIGNATURE",
"ADDRESS-NOTIFICATION", "NOTIFY-SA-LIFETIME",
"CERTIFICATE-UNAVAILABLE", "UNSUPPORTED-EXCHANGE-TYPE",
"UNEQUAL-PAYLOAD-LENGTHS",
};
static const char *ipsec_notify_error_str[] = {
"RESERVED",
};
static const char *notify_status_str[] = {
"CONNECTED",
};
static const char *ipsec_notify_status_str[] = {
"RESPONDER-LIFETIME", "REPLAY-STATUS",
"INITIAL-CONTACT",
};
/* NOTE: these macro must be called with x in proper range */
/* 0 - 8191 */
#define NOTIFY_ERROR_STR(x) \
STR_OR_ID((x), notify_error_str)
/* 8192 - 16383 */
#define IPSEC_NOTIFY_ERROR_STR(x) \
STR_OR_ID((u_int)((x) - 8192), ipsec_notify_error_str)
/* 16384 - 24575 */
#define NOTIFY_STATUS_STR(x) \
STR_OR_ID((u_int)((x) - 16384), notify_status_str)
/* 24576 - 32767 */
#define IPSEC_NOTIFY_STATUS_STR(x) \
STR_OR_ID((u_int)((x) - 24576), ipsec_notify_status_str)
ND_PRINT((ndo,"%s:", NPSTR(ISAKMP_NPTYPE_N)));
p = (const struct ikev1_pl_n *)ext;
ND_TCHECK(*p);
UNALIGNED_MEMCPY(&n, ext, sizeof(n));
doi = ntohl(n.doi);
proto = n.prot_id;
if (doi != 1) {
ND_PRINT((ndo," doi=%d", doi));
ND_PRINT((ndo," proto=%d", proto));
if (ntohs(n.type) < 8192)
ND_PRINT((ndo," type=%s", NOTIFY_ERROR_STR(ntohs(n.type))));
else if (ntohs(n.type) < 16384)
ND_PRINT((ndo," type=%s", numstr(ntohs(n.type))));
else if (ntohs(n.type) < 24576)
ND_PRINT((ndo," type=%s", NOTIFY_STATUS_STR(ntohs(n.type))));
else
ND_PRINT((ndo," type=%s", numstr(ntohs(n.type))));
if (n.spi_size) {
ND_PRINT((ndo," spi="));
if (!rawprint(ndo, (const uint8_t *)(p + 1), n.spi_size))
goto trunc;
}
return (const u_char *)(p + 1) + n.spi_size;
}
ND_PRINT((ndo," doi=ipsec"));
ND_PRINT((ndo," proto=%s", PROTOIDSTR(proto)));
if (ntohs(n.type) < 8192)
ND_PRINT((ndo," type=%s", NOTIFY_ERROR_STR(ntohs(n.type))));
else if (ntohs(n.type) < 16384)
ND_PRINT((ndo," type=%s", IPSEC_NOTIFY_ERROR_STR(ntohs(n.type))));
else if (ntohs(n.type) < 24576)
ND_PRINT((ndo," type=%s", NOTIFY_STATUS_STR(ntohs(n.type))));
else if (ntohs(n.type) < 32768)
ND_PRINT((ndo," type=%s", IPSEC_NOTIFY_STATUS_STR(ntohs(n.type))));
else
ND_PRINT((ndo," type=%s", numstr(ntohs(n.type))));
if (n.spi_size) {
ND_PRINT((ndo," spi="));
if (!rawprint(ndo, (const uint8_t *)(p + 1), n.spi_size))
goto trunc;
}
cp = (const u_char *)(p + 1) + n.spi_size;
ep2 = (const u_char *)p + item_len;
if (cp < ep) {
switch (ntohs(n.type)) {
case IPSECDOI_NTYPE_RESPONDER_LIFETIME:
{
const struct attrmap *map = oakley_t_map;
size_t nmap = sizeof(oakley_t_map)/sizeof(oakley_t_map[0]);
ND_PRINT((ndo," attrs=("));
while (cp < ep && cp < ep2) {
cp = ikev1_attrmap_print(ndo, cp, ep2, map, nmap);
if (cp == NULL) {
ND_PRINT((ndo,")"));
goto trunc;
}
}
ND_PRINT((ndo,")"));
break;
}
case IPSECDOI_NTYPE_REPLAY_STATUS:
ND_PRINT((ndo," status=("));
ND_TCHECK_32BITS(cp);
ND_PRINT((ndo,"replay detection %sabled",
EXTRACT_32BITS(cp) ? "en" : "dis"));
ND_PRINT((ndo,")"));
break;
default:
/*
* XXX - fill in more types here; see, for example,
* draft-ietf-ipsec-notifymsg-04.
*/
if (ndo->ndo_vflag > 3) {
ND_PRINT((ndo," data=("));
if (!rawprint(ndo, (const uint8_t *)(cp), ep - cp))
goto trunc;
ND_PRINT((ndo,")"));
} else {
if (!ike_show_somedata(ndo, cp, ep))
goto trunc;
}
break;
}
}
return (const u_char *)ext + item_len;
trunc:
ND_PRINT((ndo," [|%s]", NPSTR(ISAKMP_NPTYPE_N)));
return NULL;
}
|
Safe
|
[
"CWE-125"
] |
tcpdump
|
396e94ff55a80d554b1fe46bf107db1e91008d6c
|
1.7219751843044313e+38
| 150 |
(for 4.9.3) CVE-2018-14469/ISAKMP: Add a missing bounds check
In ikev1_n_print() check bounds before trying to fetch the replay detection
status.
This fixes a buffer over-read discovered by Bhargava Shastry.
Add a test using the capture file supplied by the reporter(s).
| 0 |
int restrict_link_reject(struct key *keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key)
{
return -EPERM;
}
|
Safe
|
[
"CWE-20"
] |
linux
|
363b02dab09b3226f3bd1420dad9c72b79a42a76
|
4.36886388098836e+37
| 7 |
KEYS: Fix race between updating and finding a negative key
Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection
error into one field such that:
(1) The instantiation state can be modified/read atomically.
(2) The error can be accessed atomically with the state.
(3) The error isn't stored unioned with the payload pointers.
This deals with the problem that the state is spread over three different
objects (two bits and a separate variable) and reading or updating them
atomically isn't practical, given that not only can uninstantiated keys
change into instantiated or rejected keys, but rejected keys can also turn
into instantiated keys - and someone accessing the key might not be using
any locking.
The main side effect of this problem is that what was held in the payload
may change, depending on the state. For instance, you might observe the
key to be in the rejected state. You then read the cached error, but if
the key semaphore wasn't locked, the key might've become instantiated
between the two reads - and you might now have something in hand that isn't
actually an error code.
The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error
code if the key is negatively instantiated. The key_is_instantiated()
function is replaced with key_is_positive() to avoid confusion as negative
keys are also 'instantiated'.
Additionally, barriering is included:
(1) Order payload-set before state-set during instantiation.
(2) Order state-read before payload-read when using the key.
Further separate barriering is necessary if RCU is being used to access the
payload content after reading the payload pointers.
Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data")
Cc: stable@vger.kernel.org # v4.4+
Reported-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Eric Biggers <ebiggers@google.com>
| 0 |
static inline void ok_png_premultiply(uint8_t *dst) {
const uint8_t a = dst[3];
if (a == 0) {
dst[0] = 0;
dst[1] = 0;
dst[2] = 0;
} else if (a < 255) {
dst[0] = (a * dst[0] + 127) / 255;
dst[1] = (a * dst[1] + 127) / 255;
dst[2] = (a * dst[2] + 127) / 255;
}
}
|
Safe
|
[
"CWE-787"
] |
ok-file-formats
|
e49cdfb84fb5eca2a6261f3c51a3c793fab9f62e
|
2.6306238392795788e+38
| 12 |
ok_png: Disallow multiple IHDR chunks (#15)
| 0 |
static void read_reply_cb(DBusMessage *message, void *user_data)
{
struct pending_op *op = user_data;
DBusError err;
DBusMessageIter iter, array;
uint8_t ecode = 0;
uint8_t *value = NULL;
int len = 0;
if (!op->owner_queue) {
DBG("Pending read was canceled when object got removed");
return;
}
dbus_error_init(&err);
if (dbus_set_error_from_message(&err, message) == TRUE) {
DBG("Failed to read value: %s: %s", err.name, err.message);
ecode = dbus_error_to_att_ecode(err.name,
BT_ATT_ERROR_READ_NOT_PERMITTED);
dbus_error_free(&err);
goto done;
}
dbus_message_iter_init(message, &iter);
if (dbus_message_iter_get_arg_type(&iter) != DBUS_TYPE_ARRAY) {
/*
* Return not supported for this, as the external app basically
* doesn't properly support reading from this characteristic.
*/
ecode = BT_ATT_ERROR_REQUEST_NOT_SUPPORTED;
error("Invalid return value received for \"ReadValue\"");
goto done;
}
dbus_message_iter_recurse(&iter, &array);
dbus_message_iter_get_fixed_array(&array, &value, &len);
if (len < 0) {
ecode = BT_ATT_ERROR_REQUEST_NOT_SUPPORTED;
value = NULL;
len = 0;
goto done;
}
/* Truncate the value if it's too large */
len = MIN(BT_ATT_MAX_VALUE_LEN, len);
value = len ? value : NULL;
done:
gatt_db_attribute_read_result(op->attrib, op->id, ecode, value, len);
}
|
Safe
|
[
"CWE-416"
] |
bluez
|
838c0dc7641e1c991c0f3027bf94bee4606012f8
|
1.4765295110137207e+38
| 53 |
gatt: Fix not cleaning up when disconnected
There is a current use after free possible on a gatt server if a client
disconnects while a WriteValue call is being processed with dbus.
This patch includes the addition of a pending disconnect callback to handle
cleanup better if a disconnect occurs during a write, an acquire write
or read operation using bt_att_register_disconnect with the cb.
| 0 |
multi_learn_addr(struct multi_context *m,
struct multi_instance *mi,
const struct mroute_addr *addr,
const unsigned int flags)
{
struct hash_element *he;
const uint32_t hv = hash_value(m->vhash, addr);
struct hash_bucket *bucket = hash_bucket(m->vhash, hv);
struct multi_route *oldroute = NULL;
struct multi_instance *owner = NULL;
struct gc_arena gc = gc_new();
/* if route currently exists, get the instance which owns it */
he = hash_lookup_fast(m->vhash, bucket, addr, hv);
if (he)
{
oldroute = (struct multi_route *) he->value;
}
if (oldroute && multi_route_defined(m, oldroute))
{
owner = oldroute->instance;
}
/* do we need to add address to hash table? */
if ((!owner || owner != mi) && mroute_learnable_address(addr, &gc)
&& !mroute_addr_equal(addr, &m->local))
{
struct multi_route *newroute;
bool learn_succeeded = false;
ALLOC_OBJ(newroute, struct multi_route);
newroute->addr = *addr;
newroute->instance = mi;
newroute->flags = flags;
newroute->last_reference = now;
newroute->cache_generation = 0;
/* The cache is invalidated when cache_generation is incremented */
if (flags & MULTI_ROUTE_CACHE)
{
newroute->cache_generation = m->route_helper->cache_generation;
}
if (oldroute) /* route already exists? */
{
if (route_quota_test(mi) && learn_address_script(m, mi, "update", &newroute->addr))
{
learn_succeeded = true;
owner = mi;
multi_instance_inc_refcount(mi);
route_quota_inc(mi);
/* delete old route */
multi_route_del(oldroute);
/* modify hash table entry, replacing old route */
he->key = &newroute->addr;
he->value = newroute;
}
}
else
{
if (route_quota_test(mi) && learn_address_script(m, mi, "add", &newroute->addr))
{
learn_succeeded = true;
owner = mi;
multi_instance_inc_refcount(mi);
route_quota_inc(mi);
/* add new route */
hash_add_fast(m->vhash, bucket, &newroute->addr, hv, newroute);
}
}
msg(D_MULTI_LOW, "MULTI: Learn%s: %s -> %s",
learn_succeeded ? "" : " FAILED",
mroute_addr_print(&newroute->addr, &gc),
multi_instance_string(mi, false, &gc));
if (!learn_succeeded)
{
free(newroute);
}
}
gc_free(&gc);
return owner;
}
|
Safe
|
[
"CWE-362",
"CWE-476"
] |
openvpn
|
37bc691e7d26ea4eb61a8a434ebd7a9ae76225ab
|
2.009503504052704e+37
| 88 |
Fix illegal client float (CVE-2020-11810)
There is a time frame between allocating peer-id and initializing data
channel key (which is performed on receiving push request or on async
push-reply) in which the existing peer-id float checks do not work right.
If a "rogue" data channel packet arrives during that time frame from
another address and with same peer-id, this would cause client to float
to that new address. This is because:
- tls_pre_decrypt() sets packet length to zero if
data channel key has not been initialized, which leads to
- openvpn_decrypt() returns true if packet length is zero,
which leads to
- process_incoming_link_part1() returns true, which
calls multi_process_float(), which commits float
Note that problem doesn't happen when data channel key is initialized,
since in this case openvpn_decrypt() returns false.
The net effect of this behaviour is that the VPN session for the
"victim client" is broken. Since the "attacker client" does not have
suitable keys, it can not inject or steal VPN traffic from the other
session. The time window is small and it can not be used to attack
a specific client's session, unless some other way is found to make it
disconnect and reconnect first.
CVE-2020-11810 has been assigned to acknowledge this risk.
Fix illegal float by adding buffer length check ("is this packet still
considered valid") before calling multi_process_float().
Trac: #1272
CVE: 2020-11810
Signed-off-by: Lev Stipakov <lev@openvpn.net>
Acked-by: Arne Schwabe <arne@rfc2549.org>
Acked-by: Antonio Quartulli <antonio@openvpn.net>
Acked-by: Gert Doering <gert@greenie.muc.de>
Message-Id: <20200415073017.22839-1-lstipakov@gmail.com>
URL: https://www.mail-archive.com/openvpn-devel@lists.sourceforge.net/msg19720.html
Signed-off-by: Gert Doering <gert@greenie.muc.de>
| 0 |
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
}
|
Safe
|
[
"CWE-416"
] |
linux
|
c3e2219216c92919a6bd1711f340f5faa98695e6
|
1.709496052851766e+38
| 5 |
block: free sched's request pool in blk_cleanup_queue
In theory, IO scheduler belongs to request queue, and the request pool
of sched tags belongs to the request queue too.
However, the current tags allocation interfaces are re-used for both
driver tags and sched tags, and driver tags is definitely host wide,
and doesn't belong to any request queue, same with its request pool.
So we need tagset instance for freeing request of sched tags.
Meantime, blk_mq_free_tag_set() often follows blk_cleanup_queue() in case
of non-BLK_MQ_F_TAG_SHARED, this way requires that request pool of sched
tags to be freed before calling blk_mq_free_tag_set().
Commit 47cdee29ef9d94e ("block: move blk_exit_queue into __blk_release_queue")
moves blk_exit_queue into __blk_release_queue for simplying the fast
path in generic_make_request(), then causes oops during freeing requests
of sched tags in __blk_release_queue().
Fix the above issue by move freeing request pool of sched tags into
blk_cleanup_queue(), this way is safe becasue queue has been frozen and no any
in-queue requests at that time. Freeing sched tags has to be kept in queue's
release handler becasue there might be un-completed dispatch activity
which might refer to sched tags.
Cc: Bart Van Assche <bvanassche@acm.org>
Cc: Christoph Hellwig <hch@lst.de>
Fixes: 47cdee29ef9d94e485eb08f962c74943023a5271 ("block: move blk_exit_queue into __blk_release_queue")
Tested-by: Yi Zhang <yi.zhang@redhat.com>
Reported-by: kernel test robot <rong.a.chen@intel.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
size_t vterm_screen_get_text(const VTermScreen *screen, char *str, size_t len, const VTermRect rect)
{
return _get_chars(screen, 1, str, len, rect);
}
|
Safe
|
[
"CWE-476"
] |
vim
|
cd929f7ba8cc5b6d6dcf35c8b34124e969fed6b8
|
3.1902903558855633e+38
| 4 |
patch 8.1.0633: crash when out of memory while opening a terminal window
Problem: Crash when out of memory while opening a terminal window.
Solution: Handle out-of-memory more gracefully.
| 0 |
static void vhost_net_vq_reset(struct vhost_net *n)
{
int i;
vhost_net_clear_ubuf_info(n);
for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
n->vqs[i].done_idx = 0;
n->vqs[i].upend_idx = 0;
n->vqs[i].ubufs = NULL;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
}
}
|
Safe
|
[
"CWE-20",
"CWE-787"
] |
linux
|
d8316f3991d207fe32881a9ac20241be8fa2bad0
|
1.4137132034008013e+38
| 15 |
vhost: fix total length when packets are too short
When mergeable buffers are disabled, and the
incoming packet is too large for the rx buffer,
get_rx_bufs returns success.
This was intentional in order for make recvmsg
truncate the packet and then handle_rx would
detect err != sock_len and drop it.
Unfortunately we pass the original sock_len to
recvmsg - which means we use parts of iov not fully
validated.
Fix this up by detecting this overrun and doing packet drop
immediately.
CVE-2014-0077
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
struct sctp_chunk *asconf,
sctp_addip_param_t *asconf_param)
{
struct sctp_transport *peer;
struct sctp_af *af;
union sctp_addr addr;
union sctp_addr_param *addr_param;
addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t);
if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP &&
asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP &&
asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY)
return SCTP_ERROR_UNKNOWN_PARAM;
switch (addr_param->p.type) {
case SCTP_PARAM_IPV6_ADDRESS:
if (!asoc->peer.ipv6_address)
return SCTP_ERROR_DNS_FAILED;
break;
case SCTP_PARAM_IPV4_ADDRESS:
if (!asoc->peer.ipv4_address)
return SCTP_ERROR_DNS_FAILED;
break;
default:
return SCTP_ERROR_DNS_FAILED;
}
af = sctp_get_af_specific(param_type2af(addr_param->p.type));
if (unlikely(!af))
return SCTP_ERROR_DNS_FAILED;
af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0);
/* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast
* or multicast address.
* (note: wildcard is permitted and requires special handling so
* make sure we check for that)
*/
if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb))
return SCTP_ERROR_DNS_FAILED;
switch (asconf_param->param_hdr.type) {
case SCTP_PARAM_ADD_IP:
/* Section 4.2.1:
* If the address 0.0.0.0 or ::0 is provided, the source
* address of the packet MUST be added.
*/
if (af->is_any(&addr))
memcpy(&addr, &asconf->source, sizeof(addr));
/* ADDIP 4.3 D9) If an endpoint receives an ADD IP address
* request and does not have the local resources to add this
* new address to the association, it MUST return an Error
* Cause TLV set to the new error code 'Operation Refused
* Due to Resource Shortage'.
*/
peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
if (!peer)
return SCTP_ERROR_RSRC_LOW;
/* Start the heartbeat timer. */
if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
sctp_transport_hold(peer);
asoc->new_transport = peer;
break;
case SCTP_PARAM_DEL_IP:
/* ADDIP 4.3 D7) If a request is received to delete the
* last remaining IP address of a peer endpoint, the receiver
* MUST send an Error Cause TLV with the error cause set to the
* new error code 'Request to Delete Last Remaining IP Address'.
*/
if (asoc->peer.transport_count == 1)
return SCTP_ERROR_DEL_LAST_IP;
/* ADDIP 4.3 D8) If a request is received to delete an IP
* address which is also the source address of the IP packet
* which contained the ASCONF chunk, the receiver MUST reject
* this request. To reject the request the receiver MUST send
* an Error Cause TLV set to the new error code 'Request to
* Delete Source IP Address'
*/
if (sctp_cmp_addr_exact(&asconf->source, &addr))
return SCTP_ERROR_DEL_SRC_IP;
/* Section 4.2.2
* If the address 0.0.0.0 or ::0 is provided, all
* addresses of the peer except the source address of the
* packet MUST be deleted.
*/
if (af->is_any(&addr)) {
sctp_assoc_set_primary(asoc, asconf->transport);
sctp_assoc_del_nonprimary_peers(asoc,
asconf->transport);
} else
sctp_assoc_del_peer(asoc, &addr);
break;
case SCTP_PARAM_SET_PRIMARY:
/* ADDIP Section 4.2.4
* If the address 0.0.0.0 or ::0 is provided, the receiver
* MAY mark the source address of the packet as its
* primary.
*/
if (af->is_any(&addr))
memcpy(&addr.v4, sctp_source(asconf), sizeof(addr));
peer = sctp_assoc_lookup_paddr(asoc, &addr);
if (!peer)
return SCTP_ERROR_DNS_FAILED;
sctp_assoc_set_primary(asoc, peer);
break;
}
return SCTP_ERROR_NO_ERROR;
}
|
Safe
|
[] |
linux
|
196d67593439b03088913227093e374235596e33
|
3.1031326953558215e+38
| 118 |
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <michele@acksyn.org>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void page_objects_list_renumber(pdf_write_state *opts)
{
int i, j;
for (i = 0; i < opts->page_object_lists->len; i++)
{
page_objects *po = opts->page_object_lists->page[i];
for (j = 0; j < po->len; j++)
{
po->object[j] = opts->renumber_map[po->object[j]];
}
po->page_object_number = opts->renumber_map[po->page_object_number];
}
}
|
Safe
|
[
"CWE-119"
] |
mupdf
|
520cc26d18c9ee245b56e9e91f9d4fcae02be5f0
|
1.5543628214526627e+38
| 14 |
Bug 689699: Avoid buffer overrun.
When cleaning a pdf file, various lists (of pdf_xref_len length) are
defined early on.
If we trigger a repair during the clean, this can cause pdf_xref_len
to increase causing an overrun.
Fix this by watching for changes in the length, and checking accesses
to the list for validity.
This also appears to fix bugs 698700-698703.
| 0 |
void CL_ForwardCommandToServer( const char *string ) {
char *cmd;
cmd = Cmd_Argv(0);
// ignore key up commands
if ( cmd[0] == '-' ) {
return;
}
if ( clc.demoplaying || clc.state < CA_CONNECTED || cmd[0] == '+' ) {
Com_Printf ("Unknown command \"%s" S_COLOR_WHITE "\"\n", cmd);
return;
}
if ( Cmd_Argc() > 1 ) {
CL_AddReliableCommand(string, qfalse);
} else {
CL_AddReliableCommand(cmd, qfalse);
}
}
|
Safe
|
[
"CWE-269"
] |
ioq3
|
376267d534476a875d8b9228149c4ee18b74a4fd
|
2.860618935192126e+38
| 21 |
Don't load .pk3s as .dlls, and don't load user config files from .pk3s.
| 0 |
int r_jwe_encrypt_payload(jwe_t * jwe) {
int ret = RHN_OK, res;
gnutls_cipher_hd_t handle;
gnutls_datum_t key, iv;
unsigned char * ptext = NULL, * text_zip = NULL, * ciphertext_b64url = NULL, tag[128] = {0}, * tag_b64url = NULL, * aad = NULL;
size_t ptext_len = 0, ciphertext_b64url_len = 0, tag_len = 0, text_zip_len = 0;
char * str_header = NULL;
int cipher_cbc;
struct _o_datum dat = {0, NULL};
if (jwe != NULL &&
jwe->payload != NULL &&
jwe->payload_len &&
jwe->enc != R_JWA_ENC_UNKNOWN &&
jwe->key != NULL &&
jwe->iv != NULL &&
jwe->iv_len &&
jwe->key_len == _r_get_key_size(jwe->enc) &&
r_jwe_set_enc_header(jwe, jwe->j_header) == RHN_OK) {
cipher_cbc = (jwe->enc == R_JWA_ENC_A128CBC || jwe->enc == R_JWA_ENC_A192CBC || jwe->enc == R_JWA_ENC_A256CBC);
if ((str_header = json_dumps(jwe->j_header, JSON_COMPACT)) != NULL) {
if (o_base64url_encode_alloc((const unsigned char *)str_header, o_strlen(str_header), &dat)) {
o_free(jwe->header_b64url);
jwe->header_b64url = (unsigned char *)o_strndup((const char *)dat.data, dat.size);
o_free(dat.data);
dat.data = NULL;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error o_base64url_encode str_header");
ret = RHN_ERROR;
}
o_free(str_header);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error json_dumps j_header");
ret = RHN_ERROR;
}
ptext_len = gnutls_cipher_get_block_size(_r_get_alg_from_enc(jwe->enc));
if (0 == o_strcmp("DEF", r_jwe_get_header_str_value(jwe, "zip"))) {
if (_r_deflate_payload(jwe->payload, jwe->payload_len, &text_zip, &text_zip_len) == RHN_OK) {
if (r_jwe_set_ptext_with_block(text_zip, text_zip_len, &ptext, &ptext_len, _r_get_alg_from_enc(jwe->enc), cipher_cbc) != RHN_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error r_jwe_set_ptext_with_block");
ret = RHN_ERROR;
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error _r_deflate_payload");
ret = RHN_ERROR;
}
o_free(text_zip);
} else {
if (r_jwe_set_ptext_with_block(jwe->payload, jwe->payload_len, &ptext, &ptext_len, _r_get_alg_from_enc(jwe->enc), cipher_cbc) != RHN_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error r_jwe_set_ptext_with_block");
ret = RHN_ERROR;
}
}
if (ret == RHN_OK) {
if (cipher_cbc) {
key.data = jwe->key+(jwe->key_len/2);
key.size = jwe->key_len/2;
} else {
key.data = jwe->key;
key.size = jwe->key_len;
}
iv.data = jwe->iv;
iv.size = jwe->iv_len;
if (!(res = gnutls_cipher_init(&handle, _r_get_alg_from_enc(jwe->enc), &key, &iv))) {
if (jwe->aad_b64url == NULL || jwe->token_mode == R_JSON_MODE_COMPACT) {
aad = (unsigned char *)o_strdup((const char *)jwe->header_b64url);
} else {
aad = (unsigned char *)msprintf("%s.%s", jwe->header_b64url, jwe->aad_b64url);
}
if (!cipher_cbc && (res = gnutls_cipher_add_auth(handle, aad, o_strlen((const char *)aad)))) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error gnutls_cipher_add_auth: '%s'", gnutls_strerror(res));
ret = RHN_ERROR;
}
if (ret == RHN_OK) {
if (!(res = gnutls_cipher_encrypt(handle, ptext, ptext_len))) {
if ((ciphertext_b64url = o_malloc(2*ptext_len)) != NULL) {
if (o_base64url_encode(ptext, ptext_len, ciphertext_b64url, &ciphertext_b64url_len)) {
o_free(jwe->ciphertext_b64url);
jwe->ciphertext_b64url = (unsigned char *)o_strndup((const char *)ciphertext_b64url, ciphertext_b64url_len);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error o_base64url_encode ciphertext");
ret = RHN_ERROR;
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error allocating resources for ciphertext_b64url");
ret = RHN_ERROR_MEMORY;
}
o_free(ciphertext_b64url);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error gnutls_cipher_encrypt: '%s'", gnutls_strerror(res));
ret = RHN_ERROR;
}
} else if (!cipher_cbc) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error gnutls_cipher_add_auth: '%s'", gnutls_strerror(res));
ret = RHN_ERROR;
}
if (ret == RHN_OK) {
if (cipher_cbc) {
if (r_jwe_compute_hmac_tag(jwe, ptext, ptext_len, aad, tag, &tag_len) != RHN_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error r_jwe_compute_hmac_tag");
ret = RHN_ERROR;
}
} else {
tag_len = gnutls_cipher_get_tag_size(_r_get_alg_from_enc(jwe->enc));
memset(tag, 0, tag_len);
if ((res = gnutls_cipher_tag(handle, tag, tag_len))) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error gnutls_cipher_tag: '%s'", gnutls_strerror(res));
ret = RHN_ERROR;
}
}
if (ret == RHN_OK && tag_len) {
if ((tag_b64url = o_malloc(tag_len*2)) != NULL) {
if (o_base64url_encode_alloc(tag, tag_len, &dat)) {
o_free(jwe->auth_tag_b64url);
jwe->auth_tag_b64url = (unsigned char *)o_strndup((const char *)dat.data, dat.size);
o_free(dat.data);
dat.data = NULL;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error o_base64url_encode tag_b64url");
ret = RHN_ERROR;
}
o_free(tag_b64url);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error allocating resources for tag_b64url");
ret = RHN_ERROR_MEMORY;
}
}
}
o_free(aad);
gnutls_cipher_deinit(handle);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error gnutls_cipher_init: '%s'", gnutls_strerror(res));
ret = RHN_ERROR;
}
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_encrypt_payload - Error input parameters");
ret = RHN_ERROR_PARAM;
}
o_free(ptext);
return ret;
}
|
Safe
|
[
"CWE-703"
] |
rhonabwy
|
dd528b3aabd13863f855a68e76966e4e019fc399
|
5.700193776396833e+37
| 145 |
Fix rsa oaep key length check before decryption
| 0 |
void jpc_qmfb_join_colgrp(jpc_fix_t *a, int numrows, int stride,
int parity)
{
int bufsize = JPC_CEILDIVPOW2(numrows, 1);
#if !defined(HAVE_VLA)
jpc_fix_t joinbuf[QMFB_JOINBUFSIZE * JPC_QMFB_COLGRPSIZE];
#else
jpc_fix_t joinbuf[bufsize * JPC_QMFB_COLGRPSIZE];
#endif
jpc_fix_t *buf = joinbuf;
jpc_fix_t *srcptr;
jpc_fix_t *dstptr;
register jpc_fix_t *srcptr2;
register jpc_fix_t *dstptr2;
register int n;
register int i;
int hstartcol;
#if !defined(HAVE_VLA)
/* Allocate memory for the join buffer from the heap. */
if (bufsize > QMFB_JOINBUFSIZE) {
if (!(buf = jas_malloc(bufsize * JPC_QMFB_COLGRPSIZE * sizeof(jpc_fix_t)))) {
/* We have no choice but to commit suicide. */
abort();
}
}
#endif
hstartcol = (numrows + 1 - parity) >> 1;
/* Save the samples from the lowpass channel. */
n = hstartcol;
srcptr = &a[0];
dstptr = buf;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
srcptr += stride;
dstptr += JPC_QMFB_COLGRPSIZE;
}
/* Copy the samples from the highpass channel into place. */
srcptr = &a[hstartcol * stride];
dstptr = &a[(1 - parity) * stride];
n = numrows - hstartcol;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += 2 * stride;
srcptr += stride;
}
/* Copy the samples from the lowpass channel into place. */
srcptr = buf;
dstptr = &a[parity * stride];
n = hstartcol;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += 2 * stride;
srcptr += JPC_QMFB_COLGRPSIZE;
}
#if !defined(HAVE_VLA)
/* If the join buffer was allocated on the heap, free this memory. */
if (buf != joinbuf) {
jas_free(buf);
}
#endif
}
|
Vulnerable
|
[
"CWE-119"
] |
jasper
|
0d64bde2b3ba7e1450710d540136a8ce4199ef30
|
4.662692126335228e+37
| 85 |
CVE-2014-8158
| 1 |
static void bn_GF2m_mul_2x2(BN_ULONG *r, const BN_ULONG a1, const BN_ULONG a0,
const BN_ULONG b1, const BN_ULONG b0)
{
BN_ULONG m1, m0;
/* r[3] = h1, r[2] = h0; r[1] = l1; r[0] = l0 */
bn_GF2m_mul_1x1(r + 3, r + 2, a1, b1);
bn_GF2m_mul_1x1(r + 1, r, a0, b0);
bn_GF2m_mul_1x1(&m1, &m0, a0 ^ a1, b0 ^ b1);
/* Correction on m1 ^= l1 ^ h1; m0 ^= l0 ^ h0; */
r[2] ^= m1 ^ r[1] ^ r[3]; /* h0 ^= m1 ^ l1 ^ h1; */
r[1] = r[3] ^ r[2] ^ r[0] ^ m1 ^ m0; /* l1 ^= l0 ^ h0 ^ m0; */
}
|
Safe
|
[
"CWE-399"
] |
openssl
|
f61bbf8da532038ed0eae16a9a11771f3da22d30
|
2.289575582545997e+38
| 12 |
bn/bn_gf2m.c: avoid infinite loop wich malformed ECParamters.
CVE-2015-1788
Reviewed-by: Matt Caswell <matt@openssl.org>
(cherry picked from commit 4924b37ee01f71ae19c94a8934b80eeb2f677932)
| 0 |
subselect_uniquesubquery_engine::~subselect_uniquesubquery_engine()
{
/* Tell handler we don't need the index anymore */
//psergey-merge-todo: the following was gone in 6.0:
//psergey-merge: don't need this after all: tab->table->file->ha_index_end();
}
|
Safe
|
[
"CWE-89"
] |
server
|
3c209bfc040ddfc41ece8357d772547432353fd2
|
9.321837746805089e+37
| 6 |
MDEV-25994: Crash with union of my_decimal type in ORDER BY clause
When single-row subquery fails with "Subquery reutrns more than 1 row"
error, it will raise an error and return NULL.
On the other hand, Item_singlerow_subselect sets item->maybe_null=0
for table-less subqueries like "(SELECT not_null_value)" (*)
This discrepancy (item with maybe_null=0 returning NULL) causes the
code in Type_handler_decimal_result::make_sort_key_part() to crash.
Fixed this by allowing inference (*) only when the subquery is NOT a
UNION.
| 0 |
static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
int width, int height, int stride,
const uint8_t *src, int src_size)
{
int i = 0;
int read = 0;
uint32_t length;
uint32_t offset = 1;
int esc_count = src[0];
GetBitContext gb;
lag_rac rac;
const uint8_t *src_end = src + src_size;
rac.avctx = l->avctx;
l->zeros = 0;
if (esc_count < 4) {
length = width * height;
if (esc_count && AV_RL32(src + 1) < length) {
length = AV_RL32(src + 1);
offset += 4;
}
init_get_bits(&gb, src + offset, src_size * 8);
if (lag_read_prob_header(&rac, &gb) < 0)
return -1;
ff_lag_rac_init(&rac, &gb, length - stride);
for (i = 0; i < height; i++)
read += lag_decode_line(l, &rac, dst + (i * stride), width,
stride, esc_count);
if (read > length)
av_log(l->avctx, AV_LOG_WARNING,
"Output more bytes than length (%d of %d)\n", read,
length);
} else if (esc_count < 8) {
esc_count -= 4;
if (esc_count > 0) {
/* Zero run coding only, no range coding. */
for (i = 0; i < height; i++) {
int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
src_end, width, esc_count);
if (res < 0)
return res;
src += res;
}
} else {
if (src_size < width * height)
return AVERROR_INVALIDDATA; // buffer not big enough
/* Plane is stored uncompressed */
for (i = 0; i < height; i++) {
memcpy(dst + (i * stride), src, width);
src += width;
}
}
} else if (esc_count == 0xff) {
/* Plane is a solid run of given value */
for (i = 0; i < height; i++)
memset(dst + i * stride, src[1], width);
/* Do not apply prediction.
Note: memset to 0 above, setting first value to src[1]
and applying prediction gives the same result. */
return 0;
} else {
av_log(l->avctx, AV_LOG_ERROR,
"Invalid zero run escape code! (%#x)\n", esc_count);
return -1;
}
if (l->avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
for (i = 0; i < height; i++) {
lag_pred_line(l, dst, width, stride, i);
dst += stride;
}
} else {
for (i = 0; i < height; i++) {
lag_pred_line_yuy2(l, dst, width, stride, i,
width == l->avctx->width);
dst += stride;
}
}
return 0;
}
|
Safe
|
[
"CWE-787"
] |
FFmpeg
|
4c3e1956ee35fdcc5ffdb28782050164b4623c0b
|
1.9689703806190015e+38
| 87 |
lagarith: reallocate rgb_planes when needed
Fixes invalid writes on pixel format changes.
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
CC:libav-stable@libav.org
| 0 |
static int ZEND_FASTCALL ZEND_IS_SMALLER_SPEC_CV_TMP_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_free_op free_op2;
zval *result = &EX_T(opline->result.u.var).tmp_var;
compare_function(result,
_get_zval_ptr_cv(&opline->op1, EX(Ts), BP_VAR_R TSRMLS_CC),
_get_zval_ptr_tmp(&opline->op2, EX(Ts), &free_op2 TSRMLS_CC) TSRMLS_CC);
ZVAL_BOOL(result, (Z_LVAL_P(result) < 0));
zval_dtor(free_op2.var);
ZEND_VM_NEXT_OPCODE();
}
|
Safe
|
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
|
2.15729247055433e+38
| 14 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
| 0 |
static int is_response_status_relevant(modsec_rec *msr, int status) {
char *my_error_msg = NULL;
apr_status_t rc;
char buf[32];
/* ENH: Setting is_relevant here will cause an audit even if noauditlog
* was set for the last rule that matched. Is this what we want?
*/
if ((msr->txcfg->auditlog_relevant_regex == NULL)
||(msr->txcfg->auditlog_relevant_regex == NOT_SET_P))
{
return 0;
}
apr_snprintf(buf, sizeof(buf), "%d", status);
rc = msc_regexec(msr->txcfg->auditlog_relevant_regex, buf, strlen(buf), &my_error_msg);
if (rc >= 0) return 1;
if (rc == PCRE_ERROR_NOMATCH) return 0;
msr_log(msr, 1, "Regex processing failed (rc %d): %s", rc, my_error_msg);
return 0;
}
|
Safe
|
[
"CWE-703",
"CWE-264"
] |
ModSecurity
|
f8d441cd25172fdfe5b613442fedfc0da3cc333d
|
1.4490614073069546e+38
| 25 |
Fix Chunked string case sensitive issue - CVE-2013-5705
| 0 |
virDomainSetBlkioParameters(virDomainPtr domain,
virTypedParameterPtr params,
int nparams, unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(domain, "params=%p, nparams=%d, flags=%x",
params, nparams, flags);
VIR_TYPED_PARAMS_DEBUG(params, nparams);
virResetLastError();
virCheckDomainReturn(domain, -1);
conn = domain->conn;
virCheckReadOnlyGoto(conn->flags, error);
virCheckNonNullArgGoto(params, error);
virCheckNonNegativeArgGoto(nparams, error);
if (virTypedParameterValidateSet(conn, params, nparams) < 0)
goto error;
if (conn->driver->domainSetBlkioParameters) {
int ret;
ret = conn->driver->domainSetBlkioParameters(domain, params, nparams, flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(domain->conn);
return -1;
}
|
Safe
|
[
"CWE-254"
] |
libvirt
|
506e9d6c2d4baaf580d489fff0690c0ff2ff588f
|
2.8822840296481767e+38
| 36 |
virDomainGetTime: Deny on RO connections
We have a policy that if API may end up talking to a guest agent
it should require RW connection. We don't obey the rule in
virDomainGetTime().
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
| 0 |
static int oidc_cache_crypto_decrypt(request_rec *r, const char *cache_value,
unsigned char *key, unsigned char **plaintext) {
int len = -1;
/* grab the base64url-encoded tag after the "." */
char *encoded_tag = strstr(cache_value, ".");
if (encoded_tag == NULL) {
oidc_error(r,
"corrupted cache value: no tag separator found in encrypted value");
return FALSE;
}
/* make sure we don't modify the original string since it may be just a pointer into the cache (shm) */
cache_value = apr_pstrmemdup(r->pool, cache_value,
strlen(cache_value) - strlen(encoded_tag));
encoded_tag++;
/* base64url decode the ciphertext */
char *d_bytes = NULL;
int d_len = oidc_base64url_decode(r->pool, &d_bytes, cache_value);
/* base64url decode the tag */
char *t_bytes = NULL;
int t_len = oidc_base64url_decode(r->pool, &t_bytes, encoded_tag);
/* see if we're still good to go */
if ((d_len > 0) && (t_len > 0)) {
/* allocated space for the plaintext */
*plaintext = apr_pcalloc(r->pool,
(d_len + EVP_CIPHER_block_size(OIDC_CACHE_CIPHER) - 1));
/* decrypt the ciphertext providing the tag value */
len = oidc_cache_crypto_decrypt_impl(r, (unsigned char *) d_bytes,
d_len, OIDC_CACHE_CRYPTO_GCM_AAD,
sizeof(OIDC_CACHE_CRYPTO_GCM_AAD), (unsigned char *) t_bytes,
t_len, key, OIDC_CACHE_CRYPTO_GCM_IV,
sizeof(OIDC_CACHE_CRYPTO_GCM_IV), *plaintext);
/* check the result and make sure it is \0 terminated */
if (len > -1) {
(*plaintext)[len] = '\0';
} else {
*plaintext = NULL;
}
}
return len;
}
|
Vulnerable
|
[
"CWE-330"
] |
mod_auth_openidc
|
375407c16c61a70b56fdbe13b0d2c8f11398e92c
|
1.3178003925756344e+37
| 52 |
use encrypted JWTs for storing encrypted cache contents
- avoid using static AAD/IV; thanks @niebardzo
- bump to 2.4.9-dev
Signed-off-by: Hans Zandbelt <hans.zandbelt@zmartzone.eu>
| 1 |
ciEnv::ciEnv(CompileTask* task)
: _ciEnv_arena(mtCompiler) {
VM_ENTRY_MARK;
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
thread->set_env(this);
assert(ciEnv::current() == this, "sanity");
_oop_recorder = NULL;
_debug_info = NULL;
_dependencies = NULL;
_failure_reason = NULL;
_inc_decompile_count_on_failure = true;
_compilable = MethodCompilable;
_break_at_compile = false;
_compiler_data = NULL;
#ifndef PRODUCT
assert(!firstEnv, "not initialized properly");
#endif /* !PRODUCT */
_num_inlined_bytecodes = 0;
assert(task == NULL || thread->task() == task, "sanity");
if (task != NULL) {
task->mark_started(os::elapsed_counter());
}
_task = task;
_log = NULL;
// Temporary buffer for creating symbols and such.
_name_buffer = NULL;
_name_buffer_len = 0;
_arena = &_ciEnv_arena;
_factory = new (_arena) ciObjectFactory(_arena, 128);
// Preload commonly referenced system ciObjects.
// During VM initialization, these instances have not yet been created.
// Assertions ensure that these instances are not accessed before
// their initialization.
assert(Universe::is_fully_initialized(), "should be complete");
oop o = Universe::null_ptr_exception_instance();
assert(o != NULL, "should have been initialized");
_NullPointerException_instance = get_object(o)->as_instance();
o = Universe::arithmetic_exception_instance();
assert(o != NULL, "should have been initialized");
_ArithmeticException_instance = get_object(o)->as_instance();
_ArrayIndexOutOfBoundsException_instance = NULL;
_ArrayStoreException_instance = NULL;
_ClassCastException_instance = NULL;
_the_null_string = NULL;
_the_min_jint_string = NULL;
_jvmti_can_hotswap_or_post_breakpoint = false;
_jvmti_can_access_local_variables = false;
_jvmti_can_post_on_exceptions = false;
_jvmti_can_pop_frame = false;
}
|
Safe
|
[] |
jdk11u
|
6c0ba0785a2f0900be301f72764cf4dcfa720991
|
2.4777032462517403e+38
| 61 |
8281859: Improve class compilation
Reviewed-by: mbaesken
Backport-of: 3ac62a66efd05d0842076dd4cfbea0e53b12630f
| 0 |
static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm)
{
memset(scm, 0, sizeof(*scm));
unix_get_peersec_dgram(sock, scm);
if (msg->msg_controllen <= 0)
return 0;
return __scm_send(sock, msg, scm);
}
|
Vulnerable
|
[
"CWE-287",
"CWE-284"
] |
linux
|
e0e3cea46d31d23dc40df0a49a7a2c04fe8edfea
|
4.949314025340195e+37
| 9 |
af_netlink: force credentials passing [CVE-2012-3520]
Pablo Neira Ayuso discovered that avahi and
potentially NetworkManager accept spoofed Netlink messages because of a
kernel bug. The kernel passes all-zero SCM_CREDENTIALS ancillary data
to the receiver if the sender did not provide such data, instead of not
including any such data at all or including the correct data from the
peer (as it is the case with AF_UNIX).
This bug was introduced in commit 16e572626961
(af_unix: dont send SCM_CREDENTIALS by default)
This patch forces passing credentials for netlink, as
before the regression.
Another fix would be to not add SCM_CREDENTIALS in
netlink messages if not provided by the sender, but it
might break some programs.
With help from Florian Weimer & Petr Matousek
This issue is designated as CVE-2012-3520
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Petr Matousek <pmatouse@redhat.com>
Cc: Florian Weimer <fweimer@redhat.com>
Cc: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 1 |
int rfc822_remove_from_adrlist (ADDRESS **a, const char *mailbox)
{
ADDRESS *p, *last = NULL, *t;
int rv = -1;
p = *a;
last = NULL;
while (p)
{
if (ascii_strcasecmp (mailbox, p->mailbox) == 0)
{
if (last)
last->next = p->next;
else
(*a) = p->next;
t = p;
p = p->next;
free_address (t);
rv = 0;
}
else
{
last = p;
p = p->next;
}
}
return (rv);
}
|
Safe
|
[
"CWE-401"
] |
mutt
|
4a2becbdb4422aaffe3ce314991b9d670b7adf17
|
1.658572648542016e+38
| 29 |
Fix memory leak parsing group addresses without a display name.
When there was a group address terminator with no previous
addresses (including the group display-name), an address would be
allocated but not attached to the address list.
Change this to only allocate when last exists.
It would be more correct to not allocate at all unless we are inside a
group list, but I will address that in a separate commit to master.
| 0 |
TEST_F(QuantizedConv2DTest, Small) {
const int stride = 1;
TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
// The image data should always be able to represent zero, to allow a fast
// implementation of border padding, so we set the min value to 0.
const float image_min = 0.0f;
const float image_max = 12.0f;
// The image matrix is:
// | 1 | 2 | 3 | 4 |
// | 5 | 6 | 7 | 8 |
// | 9 | 10 | 11 | 12 |
Tensor image_float(DT_FLOAT,
{image_batch_count, image_height, image_width, depth});
test::FillValues<float>(&image_float,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
Tensor image_quantized =
FloatTensorToQuantized<quint8>(image_float, image_min, image_max);
// The filter matrix is:
// | 1 | 4 | 7 |
// | 2 | 5 | 8 |
// | 3 | 6 | 9 |
const int filter_size = 3;
const int filter_count = 1;
const float filter_min = 1.0f;
const float filter_max = 9.0f;
Tensor filter_float(DT_FLOAT,
{filter_size, filter_size, depth, filter_count});
test::FillValues<float>(&filter_float, {1, 4, 7, 2, 5, 8, 3, 6, 9});
Tensor filter_quantized =
FloatTensorToQuantized<quint8>(filter_float, filter_min, filter_max);
AddInputFromArray<quint8>(image_quantized.shape(),
image_quantized.flat<quint8>());
AddInputFromArray<quint8>(filter_quantized.shape(),
filter_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({}), {image_min});
AddInputFromArray<float>(TensorShape({}), {image_max});
AddInputFromArray<float>(TensorShape({}), {filter_min});
AddInputFromArray<float>(TensorShape({}), {filter_max});
TF_ASSERT_OK(RunOpKernel());
// We're sliding the 3x3 filter across the 3x4 image, with accesses outside
// the input set to zero because we're using the 'SAME' padding mode.
// The calculations behind the expected output are:
// (1*0)+(4*0)+(7*0)+(2*0)+(5*1)+(8*2)+(3*0)+(6*5)+(9*6)=105
// (1*0)+(4*0)+(7*0)+(2*1)+(5*2)+(8*3)+(3*5)+(6*6)+(9*7)=150
// (1*0)+(4*0)+(7*0)+(2*2)+(5*3)+(8*4)+(3*6)+(6*7)+(9*8)=183
// (1*0)+(4*0)+(7*0)+(2*3)+(5*4)+(8*0)+(3*7)+(6*8)+(9*0)=95
// (1*0)+(4*1)+(7*2)+(2*0)+(5*5)+(8*6)+(3*0)+(6*9)+(9*10)=235
// (1*1)+(4*2)+(7*3)+(2*5)+(5*6)+(8*7)+(3*9)+(6*10)+(9*11)=312
// (1*2)+(4*3)+(7*4)+(2*6)+(5*7)+(8*8)+(3*10)+(6*11)+(9*12)=357
// (1*3)+(4*4)+(7*0)+(2*7)+(5*8)+(8*0)+(3*11)+(6*12)+(9*0)=178
// (1*0)+(4*5)+(7*6)+(2*0)+(5*9)+(8*10)+(3*0)+(6*0)+(9*0)=187
// (1*5)+(4*6)+(7*7)+(2*9)+(5*10)+(8*11)+(3*0)+(6*0)+(9*0)=234
// (1*6)+(4*7)+(7*8)+(2*10)+(5*11)+(8*12)+(3*0)+(6*0)+(9*0)=261
// (1*7)+(4*11)+(7*0)+(2*8)+(5*12)+(8*0)+(3*0)+(6*0)+(9*0)=121
// This means we should end up with this matrix:
// | 105 | 150 | 183 | 95 |
// | 235 | 312 | 357 | 178 |
// | 187 | 234 | 261 | 121 |
const int expected_width = image_width;
const int expected_height = image_height * filter_count;
Tensor expected_float(
DT_FLOAT, TensorShape({image_batch_count, expected_height, expected_width,
filter_count}));
test::FillValues<float>(&expected_float, {105, 150, 183, 95, 235, 312, 357,
178, 187, 234, 261, 121});
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 1.0);
}
|
Safe
|
[
"CWE-20",
"CWE-476"
] |
tensorflow
|
0f0b080ecde4d3dfec158d6f60da34d5e31693c4
|
1.3154612963954025e+38
| 91 |
Fix undefined behavior in QuantizedConv2D
Added more input validation and tests. Prior to this, we could get
`nullptr` exceptions when attempting to access 0th elements of 0-sized
inputs, leading to security vulnerability bugs.
Also needed to modify `quantized_conv_ops_test.cc` for consistency.
Previously the CPU kernel did technically support passing tensors
of rank larger than 0 for min/max values. However, the XLA kernels do not.
PiperOrigin-RevId: 445518507
| 0 |
static int do_show_master_status(MYSQL *mysql_con)
{
MYSQL_ROW row;
MYSQL_RES *master;
const char *comment_prefix=
(opt_master_data == MYSQL_OPT_MASTER_DATA_COMMENTED_SQL) ? "-- " : "";
if (mysql_query_with_error_report(mysql_con, &master, "SHOW MASTER STATUS"))
{
return 1;
}
else
{
row= mysql_fetch_row(master);
if (row && row[0] && row[1])
{
/* SHOW MASTER STATUS reports file and position */
print_comment(md_result_file, 0,
"\n--\n-- Position to start replication or point-in-time "
"recovery from\n--\n\n");
fprintf(md_result_file,
"%sCHANGE MASTER TO MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n",
comment_prefix, row[0], row[1]);
check_io(md_result_file);
}
else if (!ignore_errors)
{
/* SHOW MASTER STATUS reports nothing and --force is not enabled */
my_printf_error(0, "Error: Binlogging on server not active",
MYF(0));
mysql_free_result(master);
maybe_exit(EX_MYSQLERR);
return 1;
}
mysql_free_result(master);
}
return 0;
}
|
Safe
|
[
"CWE-295"
] |
mysql-server
|
b3e9211e48a3fb586e88b0270a175d2348935424
|
1.993167120399932e+38
| 37 |
WL#9072: Backport WL#8785 to 5.5
| 0 |
void intel_lr_context_reset(struct intel_engine_cs *engine,
struct intel_context *ce,
u32 head,
bool scrub)
{
GEM_BUG_ON(!intel_context_is_pinned(ce));
/*
* We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang,
* so clear it and rebuild just what we need for the breadcrumb.
* All pending requests for this context will be zapped, and any
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
if (scrub)
restore_default_state(ce, engine);
/* Rerun the request; its payload has been neutered (if guilty). */
ce->ring->head = head;
intel_ring_update_space(ce->ring);
__execlists_update_reg_state(ce, engine);
}
|
Safe
|
[] |
linux
|
bc8a76a152c5f9ef3b48104154a65a68a8b76946
|
1.0993687347290926e+38
| 24 |
drm/i915/gen9: Clear residual context state on context switch
Intel ID: PSIRT-TA-201910-001
CVEID: CVE-2019-14615
Intel GPU Hardware prior to Gen11 does not clear EU state
during a context switch. This can result in information
leakage between contexts.
For Gen8 and Gen9, hardware provides a mechanism for
fast cleardown of the EU state, by issuing a PIPE_CONTROL
with bit 27 set. We can use this in a context batch buffer
to explicitly cleardown the state on every context switch.
As this workaround is already in place for gen8, we can borrow
the code verbatim for Gen9.
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
Cc: Kumar Valsan Prathap <prathap.kumar.valsan@intel.com>
Cc: Chris Wilson <chris.p.wilson@intel.com>
Cc: Balestrieri Francesco <francesco.balestrieri@intel.com>
Cc: Bloomfield Jon <jon.bloomfield@intel.com>
Cc: Dutt Sudeep <sudeep.dutt@intel.com>
| 0 |
void trun_box_del(GF_Box *s)
{
GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s;
if (ptr == NULL) return;
if (ptr->samples) gf_free(ptr->samples);
if (ptr->cache) gf_bs_del(ptr->cache);
if (ptr->sample_order) gf_free(ptr->sample_order);
gf_free(ptr);
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
77510778516803b7f7402d7423c6d6bef50254c3
|
2.4845458960962157e+38
| 10 |
fixed #2255
| 0 |
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
u16 sel, old_cs;
ulong old_eip;
int rc;
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
int cpl = ctxt->ops->cpl(ctxt);
old_eip = ctxt->_eip;
ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return X86EMUL_CONTINUE;
rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_cs;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_eip;
rc = em_push(ctxt);
/* If we failed, we tainted the memory, but the very least we should
restore cs */
if (rc != X86EMUL_CONTINUE)
goto fail;
return rc;
fail:
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
return rc;
}
|
Safe
|
[] |
kvm
|
d1442d85cc30ea75f7d399474ca738e0bc96f715
|
2.229187065779274e+38
| 39 |
KVM: x86: Handle errors when RIP is set during far jumps
Far jmp/call/ret may fault while loading a new RIP. Currently KVM does not
handle this case, and may result in failed vm-entry once the assignment is
done. The tricky part of doing so is that loading the new CS affects the
VMCS/VMCB state, so if we fail during loading the new RIP, we are left in
unconsistent state. Therefore, this patch saves on 64-bit the old CS
descriptor and restores it if loading RIP failed.
This fixes CVE-2014-3647.
Cc: stable@vger.kernel.org
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
rfbProcessClientTunnelingType(rfbClientPtr cl)
{
/* If we were called, then something's really wrong. */
rfbLog("rfbProcessClientTunnelingType: not implemented\n");
rfbCloseClient(cl);
return;
}
|
Safe
|
[] |
libvncserver
|
804335f9d296440bb708ca844f5d89b58b50b0c6
|
1.3708337465615978e+38
| 7 |
Thread safety for zrle, zlib, tight.
Proposed tight security type fix for debian bug 517422.
| 0 |
void requestHeadersInitialized() {
if (Http::Headers::get().MethodValues.Head ==
filter_manager_callbacks_.requestHeaders()->getMethodValue()) {
state_.is_head_request_ = true;
}
state_.is_grpc_request_ =
Grpc::Common::isGrpcRequestHeaders(filter_manager_callbacks_.requestHeaders().ref());
}
|
Safe
|
[
"CWE-416"
] |
envoy
|
148de954ed3585d8b4298b424aa24916d0de6136
|
1.855598993508439e+38
| 8 |
CVE-2021-43825
Response filter manager crash
Signed-off-by: Yan Avlasov <yavlasov@google.com>
| 0 |
fpad4(OutputFile *fo)
{
unsigned len = fo->st_size();
unsigned d = 3u & (0 - len);
unsigned zero = 0;
fo->write(&zero, d);
return d + len;
}
|
Safe
|
[
"CWE-476"
] |
upx
|
ef336dbcc6dc8344482f8cf6c909ae96c3286317
|
1.9177938245346207e+38
| 8 |
Protect against bad crafted input.
https://github.com/upx/upx/issues/128
modified: p_lx_elf.cpp
| 0 |
bool delete_precheck(THD *thd, TABLE_LIST *tables)
{
DBUG_ENTER("delete_precheck");
if (check_one_table_access(thd, DELETE_ACL, tables))
DBUG_RETURN(TRUE);
/* Set privilege for the WHERE clause */
tables->grant.want_privilege=(SELECT_ACL & ~tables->grant.privilege);
DBUG_RETURN(FALSE);
}
|
Safe
|
[] |
server
|
ba4927e520190bbad763bb5260ae154f29a61231
|
1.8140576466160308e+37
| 9 |
MDEV-19398: Assertion `item1->type() == Item::FIELD_ITEM ...
Window Functions code tries to minimize the number of times it
needs to sort the select's resultset by finding "compatible"
OVER (PARTITION BY ... ORDER BY ...) clauses.
This employs compare_order_elements(). That function assumed that
the order expressions are Item_field-derived objects (that refer
to a temp.table). But this is not always the case: one can
construct queries order expressions are arbitrary item expressions.
Add handling for such expressions: sort them according to the window
specification they appeared in.
This means we cannot detect that two compatible PARTITION BY clauses
that use expressions can share the sorting step.
But at least we won't crash.
| 0 |
bool Item_field::collect_item_field_processor(void *arg)
{
DBUG_ENTER("Item_field::collect_item_field_processor");
DBUG_PRINT("info", ("%s", field->field_name.str ?
field->field_name.str : "noname"));
List<Item_field> *item_list= (List<Item_field>*) arg;
List_iterator<Item_field> item_list_it(*item_list);
Item_field *curr_item;
while ((curr_item= item_list_it++))
{
if (curr_item->eq(this, 1))
DBUG_RETURN(FALSE); /* Already in the set. */
}
item_list->push_back(this);
DBUG_RETURN(FALSE);
}
|
Safe
|
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
|
2.1221692789271616e+38
| 16 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
| 0 |
void set_reconnect(MYSQL* mysql, int val)
{
my_bool reconnect= val;
DBUG_ENTER("set_reconnect");
DBUG_PRINT("info", ("val: %d", val));
#if MYSQL_VERSION_ID < 50000
mysql->reconnect= reconnect;
#else
mysql_options(mysql, MYSQL_OPT_RECONNECT, (char *)&reconnect);
#endif
DBUG_VOID_RETURN;
}
|
Safe
|
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
|
3.712633542831244e+37
| 12 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
| 0 |
CreateForeignServer(CreateForeignServerStmt *stmt)
{
Relation rel;
Datum srvoptions;
Datum values[Natts_pg_foreign_server];
bool nulls[Natts_pg_foreign_server];
HeapTuple tuple;
Oid srvId;
Oid ownerId;
AclResult aclresult;
ObjectAddress myself;
ObjectAddress referenced;
ForeignDataWrapper *fdw;
rel = heap_open(ForeignServerRelationId, RowExclusiveLock);
/* For now the owner cannot be specified on create. Use effective user ID. */
ownerId = GetUserId();
/*
* Check that there is no other foreign server by this name. If there is
* one, do nothing if IF NOT EXISTS was specified.
*/
srvId = get_foreign_server_oid(stmt->servername, true);
if (OidIsValid(srvId))
{
if (stmt->if_not_exists)
{
/*
* If we are in an extension script, insist that the pre-existing
* object be a member of the extension, to avoid security risks.
*/
ObjectAddressSet(myself, ForeignServerRelationId, srvId);
checkMembershipInCurrentExtension(&myself);
/* OK to skip */
ereport(NOTICE,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("server \"%s\" already exists, skipping",
stmt->servername)));
heap_close(rel, RowExclusiveLock);
return InvalidObjectAddress;
}
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("server \"%s\" already exists",
stmt->servername)));
}
/*
* Check that the FDW exists and that we have USAGE on it. Also get the
* actual FDW for option validation etc.
*/
fdw = GetForeignDataWrapperByName(stmt->fdwname, false);
aclresult = pg_foreign_data_wrapper_aclcheck(fdw->fdwid, ownerId, ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FDW, fdw->fdwname);
/*
* Insert tuple into pg_foreign_server.
*/
memset(values, 0, sizeof(values));
memset(nulls, false, sizeof(nulls));
values[Anum_pg_foreign_server_srvname - 1] =
DirectFunctionCall1(namein, CStringGetDatum(stmt->servername));
values[Anum_pg_foreign_server_srvowner - 1] = ObjectIdGetDatum(ownerId);
values[Anum_pg_foreign_server_srvfdw - 1] = ObjectIdGetDatum(fdw->fdwid);
/* Add server type if supplied */
if (stmt->servertype)
values[Anum_pg_foreign_server_srvtype - 1] =
CStringGetTextDatum(stmt->servertype);
else
nulls[Anum_pg_foreign_server_srvtype - 1] = true;
/* Add server version if supplied */
if (stmt->version)
values[Anum_pg_foreign_server_srvversion - 1] =
CStringGetTextDatum(stmt->version);
else
nulls[Anum_pg_foreign_server_srvversion - 1] = true;
/* Start with a blank acl */
nulls[Anum_pg_foreign_server_srvacl - 1] = true;
/* Add server options */
srvoptions = transformGenericOptions(ForeignServerRelationId,
PointerGetDatum(NULL),
stmt->options,
fdw->fdwvalidator);
if (PointerIsValid(DatumGetPointer(srvoptions)))
values[Anum_pg_foreign_server_srvoptions - 1] = srvoptions;
else
nulls[Anum_pg_foreign_server_srvoptions - 1] = true;
tuple = heap_form_tuple(rel->rd_att, values, nulls);
srvId = CatalogTupleInsert(rel, tuple);
heap_freetuple(tuple);
/* record dependencies */
myself.classId = ForeignServerRelationId;
myself.objectId = srvId;
myself.objectSubId = 0;
referenced.classId = ForeignDataWrapperRelationId;
referenced.objectId = fdw->fdwid;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
recordDependencyOnOwner(ForeignServerRelationId, srvId, ownerId);
/* dependency on extension */
recordDependencyOnCurrentExtension(&myself, false);
/* Post creation hook for new foreign server */
InvokeObjectPostCreateHook(ForeignServerRelationId, srvId, 0);
heap_close(rel, RowExclusiveLock);
return myself;
}
|
Safe
|
[
"CWE-94"
] |
postgres
|
f52d2fbd8c62f667191b61228acf9d8aa53607b9
|
2.3154097504719633e+38
| 127 |
In extensions, don't replace objects not belonging to the extension.
Previously, if an extension script did CREATE OR REPLACE and there was
an existing object not belonging to the extension, it would overwrite
the object and adopt it into the extension. This is problematic, first
because the overwrite is probably unintentional, and second because we
didn't change the object's ownership. Thus a hostile user could create
an object in advance of an expected CREATE EXTENSION command, and would
then have ownership rights on an extension object, which could be
modified for trojan-horse-type attacks.
Hence, forbid CREATE OR REPLACE of an existing object unless it already
belongs to the extension. (Note that we've always forbidden replacing
an object that belongs to some other extension; only the behavior for
previously-free-standing objects changes here.)
For the same reason, also fail CREATE IF NOT EXISTS when there is
an existing object that doesn't belong to the extension.
Our thanks to Sven Klemm for reporting this problem.
Security: CVE-2022-2625
| 0 |
includeFile(const FileInfo *file, CharsString *includedFile,
TranslationTableHeader **table, DisplayTableHeader **displayTable) {
int k;
char includeThis[MAXSTRING];
char **tableFiles;
int rv;
for (k = 0; k < includedFile->length; k++)
includeThis[k] = (char)includedFile->chars[k];
if (k >= MAXSTRING) {
compileError(file, "Include statement too long: 'include %s'", includeThis);
return 0;
}
includeThis[k] = 0;
tableFiles = _lou_resolveTable(includeThis, file->fileName);
if (tableFiles == NULL) {
errorCount++;
return 0;
}
if (tableFiles[1] != NULL) {
free_tablefiles(tableFiles);
compileError(file, "Table list not supported in include statement: 'include %s'",
includeThis);
return 0;
}
rv = compileFile(*tableFiles, table, displayTable);
free_tablefiles(tableFiles);
if (!rv)
_lou_logMessage(LOU_LOG_ERROR, "%s:%d: Error in included file", file->fileName,
file->lineNumber);
return rv;
}
|
Safe
|
[
"CWE-787"
] |
liblouis
|
2e4772befb2b1c37cb4b9d6572945115ee28630a
|
2.2490176151473658e+38
| 31 |
Prevent an invalid memory writes in compileRule
Thanks to Han Zheng for reporting it
Fixes #1214
| 0 |
void line6_start_timer(struct timer_list *timer, unsigned long msecs,
void (*function)(struct timer_list *t))
{
timer->function = function;
mod_timer(timer, jiffies + msecs_to_jiffies(msecs));
}
|
Safe
|
[
"CWE-476"
] |
linux
|
0b074ab7fc0d575247b9cc9f93bb7e007ca38840
|
3.0483638546699745e+38
| 6 |
ALSA: line6: Assure canceling delayed work at disconnection
The current code performs the cancel of a delayed work at the late
stage of disconnection procedure, which may lead to the access to the
already cleared state.
This patch assures to call cancel_delayed_work_sync() at the beginning
of the disconnection procedure for avoiding that race. The delayed
work object is now assigned in the common line6 object instead of its
derivative, so that we can call cancel_delayed_work_sync().
Along with the change, the startup function is called via the new
callback instead. This will make it easier to port other LINE6
drivers to use the delayed work for startup in later patches.
Reported-by: syzbot+5255458d5e0a2b10bbb9@syzkaller.appspotmail.com
Fixes: 7f84ff68be05 ("ALSA: line6: toneport: Fix broken usage of timer for delayed execution")
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
| 0 |
static inline void _gdScaleRow(gdImagePtr pSrc, unsigned int src_width, gdImagePtr dst, unsigned int dst_width, unsigned int row, LineContribType *contrib)
{
int *p_src_row = pSrc->tpixels[row];
int *p_dst_row = dst->tpixels[row];
unsigned int x;
for (x = 0; x < dst_width - 1; x++) {
register unsigned char r = 0, g = 0, b = 0, a = 0;
const int left = contrib->ContribRow[x].Left;
const int right = contrib->ContribRow[x].Right;
int i;
/* Accumulate each channel */
for (i = left; i <= right; i++) {
const int left_channel = i - left;
r += (unsigned char)(contrib->ContribRow[x].Weights[left_channel] * (double)(gdTrueColorGetRed(p_src_row[i])));
g += (unsigned char)(contrib->ContribRow[x].Weights[left_channel] * (double)(gdTrueColorGetGreen(p_src_row[i])));
b += (unsigned char)(contrib->ContribRow[x].Weights[left_channel] * (double)(gdTrueColorGetBlue(p_src_row[i])));
a += (unsigned char)(contrib->ContribRow[x].Weights[left_channel] * (double)(gdTrueColorGetAlpha(p_src_row[i])));
}
p_dst_row[x] = gdTrueColorAlpha(r, g, b, a);
}
}
|
Safe
|
[
"CWE-125"
] |
libgd
|
4f65a3e4eedaffa1efcf9ee1eb08f0b504fbc31a
|
2.3181517559899547e+38
| 23 |
Fixed memory overrun bug in gdImageScaleTwoPass
_gdContributionsCalc would compute a window size and then adjust
the left and right positions of the window to make a window within
that size. However, it was storing the values in the struct *before*
it made the adjustment. This change fixes that.
| 0 |
void ConnectionManagerImpl::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) {
read_callbacks_ = &callbacks;
stats_.named_.downstream_cx_total_.inc();
stats_.named_.downstream_cx_active_.inc();
if (read_callbacks_->connection().ssl()) {
stats_.named_.downstream_cx_ssl_total_.inc();
stats_.named_.downstream_cx_ssl_active_.inc();
}
read_callbacks_->connection().addConnectionCallbacks(*this);
if (config_.idleTimeout()) {
connection_idle_timer_ = read_callbacks_->connection().dispatcher().createScaledTimer(
Event::ScaledTimerType::HttpDownstreamIdleConnectionTimeout,
[this]() -> void { onIdleTimeout(); });
connection_idle_timer_->enableTimer(config_.idleTimeout().value());
}
if (config_.maxConnectionDuration()) {
connection_duration_timer_ = read_callbacks_->connection().dispatcher().createTimer(
[this]() -> void { onConnectionDurationTimeout(); });
connection_duration_timer_->enableTimer(config_.maxConnectionDuration().value());
}
read_callbacks_->connection().setDelayedCloseTimeout(config_.delayedCloseTimeout());
read_callbacks_->connection().setConnectionStats(
{stats_.named_.downstream_cx_rx_bytes_total_, stats_.named_.downstream_cx_rx_bytes_buffered_,
stats_.named_.downstream_cx_tx_bytes_total_, stats_.named_.downstream_cx_tx_bytes_buffered_,
nullptr, &stats_.named_.downstream_cx_delayed_close_timeout_});
}
|
Safe
|
[
"CWE-22"
] |
envoy
|
5333b928d8bcffa26ab19bf018369a835f697585
|
3.0479796740261474e+38
| 31 |
Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <yavlasov@google.com>
| 0 |
static void vxlan_config_apply(struct net_device *dev,
struct vxlan_config *conf,
struct net_device *lowerdev,
struct net *src_net,
bool changelink)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
unsigned short needed_headroom = ETH_HLEN;
bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
int max_mtu = ETH_MAX_MTU;
if (!changelink) {
if (conf->flags & VXLAN_F_GPE)
vxlan_raw_setup(dev);
else
vxlan_ether_setup(dev);
if (conf->mtu)
dev->mtu = conf->mtu;
vxlan->net = src_net;
}
dst->remote_vni = conf->vni;
memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
if (lowerdev) {
dst->remote_ifindex = conf->remote_ifindex;
dev->gso_max_size = lowerdev->gso_max_size;
dev->gso_max_segs = lowerdev->gso_max_segs;
needed_headroom = lowerdev->hard_header_len;
max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
VXLAN_HEADROOM);
if (max_mtu < ETH_MIN_MTU)
max_mtu = ETH_MIN_MTU;
if (!changelink && !conf->mtu)
dev->mtu = max_mtu;
}
if (dev->mtu > max_mtu)
dev->mtu = max_mtu;
if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
needed_headroom += VXLAN6_HEADROOM;
else
needed_headroom += VXLAN_HEADROOM;
dev->needed_headroom = needed_headroom;
memcpy(&vxlan->cfg, conf, sizeof(*conf));
}
|
Safe
|
[] |
net
|
6c8991f41546c3c472503dff1ea9daaddf9331c2
|
1.0023888939601073e+38
| 56 |
net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <xmu@redhat.com>
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
const struct btrfs_key *cpu_key, u32 *data_size,
int nr)
{
int ret = 0;
int slot;
int i;
u32 total_size = 0;
u32 total_data = 0;
for (i = 0; i < nr; i++)
total_data += data_size[i];
total_size = total_data + (nr * sizeof(struct btrfs_item));
ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
if (ret == 0)
return -EEXIST;
if (ret < 0)
return ret;
slot = path->slots[0];
BUG_ON(slot < 0);
setup_items_for_insert(root, path, cpu_key, data_size, nr);
return 0;
}
|
Safe
|
[
"CWE-362"
] |
linux
|
dbcc7d57bffc0c8cac9dac11bec548597d59a6a5
|
1.800665544143151e+38
| 28 |
btrfs: fix race when cloning extent buffer during rewind of an old root
While resolving backreferences, as part of a logical ino ioctl call or
fiemap, we can end up hitting a BUG_ON() when replaying tree mod log
operations of a root, triggering a stack trace like the following:
------------[ cut here ]------------
kernel BUG at fs/btrfs/ctree.c:1210!
invalid opcode: 0000 [#1] SMP KASAN PTI
CPU: 1 PID: 19054 Comm: crawl_335 Tainted: G W 5.11.0-2d11c0084b02-misc-next+ #89
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
RIP: 0010:__tree_mod_log_rewind+0x3b1/0x3c0
Code: 05 48 8d 74 10 (...)
RSP: 0018:ffffc90001eb70b8 EFLAGS: 00010297
RAX: 0000000000000000 RBX: ffff88812344e400 RCX: ffffffffb28933b6
RDX: 0000000000000007 RSI: dffffc0000000000 RDI: ffff88812344e42c
RBP: ffffc90001eb7108 R08: 1ffff11020b60a20 R09: ffffed1020b60a20
R10: ffff888105b050f9 R11: ffffed1020b60a1f R12: 00000000000000ee
R13: ffff8880195520c0 R14: ffff8881bc958500 R15: ffff88812344e42c
FS: 00007fd1955e8700(0000) GS:ffff8881f5600000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007efdb7928718 CR3: 000000010103a006 CR4: 0000000000170ee0
Call Trace:
btrfs_search_old_slot+0x265/0x10d0
? lock_acquired+0xbb/0x600
? btrfs_search_slot+0x1090/0x1090
? free_extent_buffer.part.61+0xd7/0x140
? free_extent_buffer+0x13/0x20
resolve_indirect_refs+0x3e9/0xfc0
? lock_downgrade+0x3d0/0x3d0
? __kasan_check_read+0x11/0x20
? add_prelim_ref.part.11+0x150/0x150
? lock_downgrade+0x3d0/0x3d0
? __kasan_check_read+0x11/0x20
? lock_acquired+0xbb/0x600
? __kasan_check_write+0x14/0x20
? do_raw_spin_unlock+0xa8/0x140
? rb_insert_color+0x30/0x360
? prelim_ref_insert+0x12d/0x430
find_parent_nodes+0x5c3/0x1830
? resolve_indirect_refs+0xfc0/0xfc0
? lock_release+0xc8/0x620
? fs_reclaim_acquire+0x67/0xf0
? lock_acquire+0xc7/0x510
? lock_downgrade+0x3d0/0x3d0
? lockdep_hardirqs_on_prepare+0x160/0x210
? lock_release+0xc8/0x620
? fs_reclaim_acquire+0x67/0xf0
? lock_acquire+0xc7/0x510
? poison_range+0x38/0x40
? unpoison_range+0x14/0x40
? trace_hardirqs_on+0x55/0x120
btrfs_find_all_roots_safe+0x142/0x1e0
? find_parent_nodes+0x1830/0x1830
? btrfs_inode_flags_to_xflags+0x50/0x50
iterate_extent_inodes+0x20e/0x580
? tree_backref_for_extent+0x230/0x230
? lock_downgrade+0x3d0/0x3d0
? read_extent_buffer+0xdd/0x110
? lock_downgrade+0x3d0/0x3d0
? __kasan_check_read+0x11/0x20
? lock_acquired+0xbb/0x600
? __kasan_check_write+0x14/0x20
? _raw_spin_unlock+0x22/0x30
? __kasan_check_write+0x14/0x20
iterate_inodes_from_logical+0x129/0x170
? iterate_inodes_from_logical+0x129/0x170
? btrfs_inode_flags_to_xflags+0x50/0x50
? iterate_extent_inodes+0x580/0x580
? __vmalloc_node+0x92/0xb0
? init_data_container+0x34/0xb0
? init_data_container+0x34/0xb0
? kvmalloc_node+0x60/0x80
btrfs_ioctl_logical_to_ino+0x158/0x230
btrfs_ioctl+0x205e/0x4040
? __might_sleep+0x71/0xe0
? btrfs_ioctl_get_supported_features+0x30/0x30
? getrusage+0x4b6/0x9c0
? __kasan_check_read+0x11/0x20
? lock_release+0xc8/0x620
? __might_fault+0x64/0xd0
? lock_acquire+0xc7/0x510
? lock_downgrade+0x3d0/0x3d0
? lockdep_hardirqs_on_prepare+0x210/0x210
? lockdep_hardirqs_on_prepare+0x210/0x210
? __kasan_check_read+0x11/0x20
? do_vfs_ioctl+0xfc/0x9d0
? ioctl_file_clone+0xe0/0xe0
? lock_downgrade+0x3d0/0x3d0
? lockdep_hardirqs_on_prepare+0x210/0x210
? __kasan_check_read+0x11/0x20
? lock_release+0xc8/0x620
? __task_pid_nr_ns+0xd3/0x250
? lock_acquire+0xc7/0x510
? __fget_files+0x160/0x230
? __fget_light+0xf2/0x110
__x64_sys_ioctl+0xc3/0x100
do_syscall_64+0x37/0x80
entry_SYSCALL_64_after_hwframe+0x44/0xa9
RIP: 0033:0x7fd1976e2427
Code: 00 00 90 48 8b 05 (...)
RSP: 002b:00007fd1955e5cf8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
RAX: ffffffffffffffda RBX: 00007fd1955e5f40 RCX: 00007fd1976e2427
RDX: 00007fd1955e5f48 RSI: 00000000c038943b RDI: 0000000000000004
RBP: 0000000001000000 R08: 0000000000000000 R09: 00007fd1955e6120
R10: 0000557835366b00 R11: 0000000000000246 R12: 0000000000000004
R13: 00007fd1955e5f48 R14: 00007fd1955e5f40 R15: 00007fd1955e5ef8
Modules linked in:
---[ end trace ec8931a1c36e57be ]---
(gdb) l *(__tree_mod_log_rewind+0x3b1)
0xffffffff81893521 is in __tree_mod_log_rewind (fs/btrfs/ctree.c:1210).
1205 * the modification. as we're going backwards, we do the
1206 * opposite of each operation here.
1207 */
1208 switch (tm->op) {
1209 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1210 BUG_ON(tm->slot < n);
1211 fallthrough;
1212 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1213 case MOD_LOG_KEY_REMOVE:
1214 btrfs_set_node_key(eb, &tm->key, tm->slot);
Here's what happens to hit that BUG_ON():
1) We have one tree mod log user (through fiemap or the logical ino ioctl),
with a sequence number of 1, so we have fs_info->tree_mod_seq == 1;
2) Another task is at ctree.c:balance_level() and we have eb X currently as
the root of the tree, and we promote its single child, eb Y, as the new
root.
Then, at ctree.c:balance_level(), we call:
tree_mod_log_insert_root(eb X, eb Y, 1);
3) At tree_mod_log_insert_root() we create tree mod log elements for each
slot of eb X, of operation type MOD_LOG_KEY_REMOVE_WHILE_FREEING each
with a ->logical pointing to ebX->start. These are placed in an array
named tm_list.
Lets assume there are N elements (N pointers in eb X);
4) Then, still at tree_mod_log_insert_root(), we create a tree mod log
element of operation type MOD_LOG_ROOT_REPLACE, ->logical set to
ebY->start, ->old_root.logical set to ebX->start, ->old_root.level set
to the level of eb X and ->generation set to the generation of eb X;
5) Then tree_mod_log_insert_root() calls tree_mod_log_free_eb() with
tm_list as argument. After that, tree_mod_log_free_eb() calls
__tree_mod_log_insert() for each member of tm_list in reverse order,
from highest slot in eb X, slot N - 1, to slot 0 of eb X;
6) __tree_mod_log_insert() sets the sequence number of each given tree mod
log operation - it increments fs_info->tree_mod_seq and sets
fs_info->tree_mod_seq as the sequence number of the given tree mod log
operation.
This means that for the tm_list created at tree_mod_log_insert_root(),
the element corresponding to slot 0 of eb X has the highest sequence
number (1 + N), and the element corresponding to the last slot has the
lowest sequence number (2);
7) Then, after inserting tm_list's elements into the tree mod log rbtree,
the MOD_LOG_ROOT_REPLACE element is inserted, which gets the highest
sequence number, which is N + 2;
8) Back to ctree.c:balance_level(), we free eb X by calling
btrfs_free_tree_block() on it. Because eb X was created in the current
transaction, has no other references and writeback did not happen for
it, we add it back to the free space cache/tree;
9) Later some other task T allocates the metadata extent from eb X, since
it is marked as free space in the space cache/tree, and uses it as a
node for some other btree;
10) The tree mod log user task calls btrfs_search_old_slot(), which calls
get_old_root(), and finally that calls __tree_mod_log_oldest_root()
with time_seq == 1 and eb_root == eb Y;
11) First iteration of the while loop finds the tree mod log element with
sequence number N + 2, for the logical address of eb Y and of type
MOD_LOG_ROOT_REPLACE;
12) Because the operation type is MOD_LOG_ROOT_REPLACE, we don't break out
of the loop, and set root_logical to point to tm->old_root.logical
which corresponds to the logical address of eb X;
13) On the next iteration of the while loop, the call to
tree_mod_log_search_oldest() returns the smallest tree mod log element
for the logical address of eb X, which has a sequence number of 2, an
operation type of MOD_LOG_KEY_REMOVE_WHILE_FREEING and corresponds to
the old slot N - 1 of eb X (eb X had N items in it before being freed);
14) We then break out of the while loop and return the tree mod log operation
of type MOD_LOG_ROOT_REPLACE (eb Y), and not the one for slot N - 1 of
eb X, to get_old_root();
15) At get_old_root(), we process the MOD_LOG_ROOT_REPLACE operation
and set "logical" to the logical address of eb X, which was the old
root. We then call tree_mod_log_search() passing it the logical
address of eb X and time_seq == 1;
16) Then before calling tree_mod_log_search(), task T adds a key to eb X,
which results in adding a tree mod log operation of type
MOD_LOG_KEY_ADD to the tree mod log - this is done at
ctree.c:insert_ptr() - but after adding the tree mod log operation
and before updating the number of items in eb X from 0 to 1...
17) The task at get_old_root() calls tree_mod_log_search() and gets the
tree mod log operation of type MOD_LOG_KEY_ADD just added by task T.
Then it enters the following if branch:
if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
(...)
} (...)
Calls read_tree_block() for eb X, which gets a reference on eb X but
does not lock it - task T has it locked.
Then it clones eb X while it has nritems set to 0 in its header, before
task T sets nritems to 1 in eb X's header. From hereupon we use the
clone of eb X which no other task has access to;
18) Then we call __tree_mod_log_rewind(), passing it the MOD_LOG_KEY_ADD
mod log operation we just got from tree_mod_log_search() in the
previous step and the cloned version of eb X;
19) At __tree_mod_log_rewind(), we set the local variable "n" to the number
of items set in eb X's clone, which is 0. Then we enter the while loop,
and in its first iteration we process the MOD_LOG_KEY_ADD operation,
which just decrements "n" from 0 to (u32)-1, since "n" is declared with
a type of u32. At the end of this iteration we call rb_next() to find the
next tree mod log operation for eb X, that gives us the mod log operation
of type MOD_LOG_KEY_REMOVE_WHILE_FREEING, for slot 0, with a sequence
number of N + 1 (steps 3 to 6);
20) Then we go back to the top of the while loop and trigger the following
BUG_ON():
(...)
switch (tm->op) {
case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
BUG_ON(tm->slot < n);
fallthrough;
(...)
Because "n" has a value of (u32)-1 (4294967295) and tm->slot is 0.
Fix this by taking a read lock on the extent buffer before cloning it at
ctree.c:get_old_root(). This should be done regardless of the extent
buffer having been freed and reused, as a concurrent task might be
modifying it (while holding a write lock on it).
Reported-by: Zygo Blaxell <ce3g8jdj@umail.furryterror.org>
Link: https://lore.kernel.org/linux-btrfs/20210227155037.GN28049@hungrycats.org/
Fixes: 834328a8493079 ("Btrfs: tree mod log's old roots could still be part of the tree")
CC: stable@vger.kernel.org # 4.4+
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
| 0 |
resolve_iffeature_getsizes(struct lys_iffeature *iffeat, unsigned int *expr_size, unsigned int *feat_size)
{
unsigned int e = 0, f = 0, r = 0;
uint8_t op;
assert(iffeat);
if (!iffeat->expr) {
goto result;
}
do {
op = iff_getop(iffeat->expr, e++);
switch (op) {
case LYS_IFF_NOT:
if (!r) {
r += 1;
}
break;
case LYS_IFF_AND:
case LYS_IFF_OR:
if (!r) {
r += 2;
} else {
r += 1;
}
break;
case LYS_IFF_F:
f++;
if (r) {
r--;
}
break;
}
} while(r);
result:
if (expr_size) {
*expr_size = e;
}
if (feat_size) {
*feat_size = f;
}
}
|
Safe
|
[
"CWE-119"
] |
libyang
|
32fb4993bc8bb49e93e84016af3c10ea53964be5
|
2.5650832204891754e+38
| 44 |
schema tree BUGFIX do not check features while still resolving schema
Fixes #723
| 0 |
xmlBufDumpEntityDecl(xmlBufPtr buf, xmlEntityPtr ent) {
xmlBufferPtr buffer;
buffer = xmlBufferCreate();
if (buffer == NULL) {
/*
* TODO set the error in buf
*/
return;
}
xmlDumpEntityDecl(buffer, ent);
xmlBufMergeBuffer(buf, buffer);
}
|
Safe
|
[
"CWE-502"
] |
libxml2
|
c97750d11bb8b6f3303e7131fe526a61ac65bcfd
|
3.1577642600886093e+38
| 13 |
Avoid an out of bound access when serializing malformed strings
For https://bugzilla.gnome.org/show_bug.cgi?id=766414
* xmlsave.c: xmlBufAttrSerializeTxtContent() if an attribute value
is not UTF-8 be more careful when serializing it as we may do an
out of bound access as a result.
| 0 |
void rfbVncAuthProcessResponse(rfbClientPtr cl)
{
char passwdFullControl[MAXPWLEN + 1] = "\0";
char passwdViewOnly[MAXPWLEN + 1] = "\0";
int numPasswords;
Bool ok;
int n;
CARD8 response[CHALLENGESIZE];
n = ReadExact(cl, (char *)response, CHALLENGESIZE);
if (n <= 0) {
if (n != 0)
rfbLogPerror("rfbVncAuthProcessResponse: read");
rfbCloseClient(cl);
return;
}
ok = FALSE;
if (rfbOptOtpAuth()) {
if (rfbAuthOTPValue == NULL) {
if (nSecTypesEnabled == 1) {
rfbClientAuthFailed(cl, "The one-time password has not been set on the server");
return;
}
} else {
memcpy(passwdFullControl, rfbAuthOTPValue, MAXPWLEN);
passwdFullControl[MAXPWLEN] = '\0';
numPasswords = rfbAuthOTPValueLen / MAXPWLEN;
if (numPasswords > 1) {
memcpy(passwdViewOnly, rfbAuthOTPValue + MAXPWLEN, MAXPWLEN);
passwdViewOnly[MAXPWLEN] = '\0';
}
ok = CheckResponse(cl, numPasswords, passwdFullControl, passwdViewOnly,
response);
if (ok) {
memset(rfbAuthOTPValue, 0, rfbAuthOTPValueLen);
free(rfbAuthOTPValue);
rfbAuthOTPValue = NULL;
}
}
}
if ((ok == FALSE) && rfbOptRfbAuth()) {
if (!rfbAuthPasswdFile) {
rfbClientAuthFailed(cl, "No VNC password file specified on the server (did you forget -rfbauth?)");
return;
}
numPasswords = vncDecryptPasswdFromFile2(rfbAuthPasswdFile,
passwdFullControl,
passwdViewOnly);
if (numPasswords == 0) {
rfbLog("rfbVncAuthProcessResponse: could not get password from %s\n",
rfbAuthPasswdFile);
if (nSecTypesEnabled == 1) {
rfbClientAuthFailed(cl, "The server could not read the VNC password file");
return;
}
}
ok = CheckResponse(cl, numPasswords, passwdFullControl, passwdViewOnly,
response);
}
if (ok) {
rfbAuthUnblock();
rfbClientAuthSucceeded(cl, rfbAuthVNC);
} else {
rfbLog("rfbVncAuthProcessResponse: authentication failed from %s\n",
cl->host);
if (rfbAuthConsiderBlocking())
rfbClientAuthFailed(cl, "Authentication failed. Too many tries");
else
rfbClientAuthFailed(cl, "Authentication failed");
}
}
|
Safe
|
[
"CWE-787"
] |
turbovnc
|
cea98166008301e614e0d36776bf9435a536136e
|
2.274829949305864e+38
| 79 |
Server: Fix two issues identified by ASan
1. If the TLSPlain and X509Plain security types were both disabled, then
rfbOptPamAuth() would overflow the name field in the secTypes
structure when testing the "none" security type, since the name of
that security type has less than five characters. This issue was
innocuous, since the overflow was fully contained within the secTypes
structure, but the ASan error caused Xvnc to abort, which made it
difficult to detect other errors.
2. If an ill-behaved RFB client sent the TurboVNC Server a fence
message with more than 64 bytes, then the TurboVNC Server would
try to read that message and subsequently overflow the stack before
it detected that the payload was too large. This could never have
occurred with any of the VNC viewers that currently support the RFB
flow control extensions (TigerVNC and TurboVNC, namely.) This issue
was also innocuous, since the stack overflow affected two variables
(newScreens and errMsg) that were never accessed before the function
returned.
| 0 |
network_pass_proxy (const char *proxy, int sock, const char *address, int port)
{
int rc;
struct t_proxy *ptr_proxy;
rc = 0;
ptr_proxy = proxy_search (proxy);
if (ptr_proxy)
{
switch (CONFIG_INTEGER(ptr_proxy->options[PROXY_OPTION_TYPE]))
{
case PROXY_TYPE_HTTP:
rc = network_pass_httpproxy (ptr_proxy, sock, address, port);
break;
case PROXY_TYPE_SOCKS4:
rc = network_pass_socks4proxy (ptr_proxy, sock, address, port);
break;
case PROXY_TYPE_SOCKS5:
rc = network_pass_socks5proxy (ptr_proxy, sock, address, port);
break;
}
}
return rc;
}
|
Safe
|
[
"CWE-20"
] |
weechat
|
c265cad1c95b84abfd4e8d861f25926ef13b5d91
|
2.3445524552684822e+38
| 25 |
Fix verification of SSL certificates by calling gnutls verify callback (patch #7459)
| 0 |
get_quote_count(const char *line)
{
int quote_count;
const char *ptr= line;
for(quote_count= 0; ptr ++ && *ptr; ptr= strpbrk(ptr, "\"\'`"))
quote_count ++;
return quote_count;
}
|
Vulnerable
|
[] |
mysql-server
|
20addb05e58fdf822896f490fcaaf2ec5ed4bcb5
|
1.8681916226058052e+38
| 10 |
Bug# 25998635: Client does not escape the USE statement
When there are quotes in the USE statement, the mysql client does
not correctly escape them.
The USE statement is processed line by line from the client's parser,
and cannot handle multi-line commands as the server.
The fix is to escape the USE parameters whenever quotes are used.
| 1 |
AP4_AvccAtom::GetProfileName(AP4_UI08 profile)
{
switch (profile) {
case AP4_AVC_PROFILE_BASELINE: return "Baseline";
case AP4_AVC_PROFILE_MAIN: return "Main";
case AP4_AVC_PROFILE_EXTENDED: return "Extended";
case AP4_AVC_PROFILE_HIGH: return "High";
case AP4_AVC_PROFILE_HIGH_10: return "High 10";
case AP4_AVC_PROFILE_HIGH_422: return "High 4:2:2";
case AP4_AVC_PROFILE_HIGH_444: return "High 4:4:4";
}
return NULL;
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
Bento4
|
53499d8d4c69142137c7c7f0097a444783fdeb90
|
2.7234895035708933e+38
| 14 |
fix for #188
| 0 |
static double mp_memcopy(_cimg_math_parser& mp) {
longT siz = (longT)_mp_arg(4);
const longT inc_d = (longT)_mp_arg(5), inc_s = (longT)_mp_arg(6);
const float
_opacity = (float)_mp_arg(7),
opacity = (float)cimg::abs(_opacity),
omopacity = 1 - std::max(_opacity,0.0f);
if (siz>0) {
const bool
is_doubled = mp.opcode[8]<=1,
is_doubles = mp.opcode[15]<=1;
if (is_doubled && is_doubles) { // (double*) <- (double*)
double *ptrd = _mp_memcopy_double(mp,(unsigned int)mp.opcode[2],&mp.opcode[8],siz,inc_d);
const double *ptrs = _mp_memcopy_double(mp,(unsigned int)mp.opcode[3],&mp.opcode[15],siz,inc_s);
if (inc_d==1 && inc_s==1 && _opacity>=1) {
if (ptrs + siz - 1<ptrd || ptrs>ptrd + siz - 1) std::memcpy(ptrd,ptrs,siz*sizeof(double));
else std::memmove(ptrd,ptrs,siz*sizeof(double));
} else {
if (ptrs + (siz - 1)*inc_s<ptrd || ptrs>ptrd + (siz - 1)*inc_d) {
if (_opacity>=1) while (siz-->0) { *ptrd = *ptrs; ptrd+=inc_d; ptrs+=inc_s; }
else while (siz-->0) { *ptrd = omopacity**ptrd + opacity**ptrs; ptrd+=inc_d; ptrs+=inc_s; }
} else { // Overlapping buffers
CImg<doubleT> buf((unsigned int)siz);
cimg_for(buf,ptr,double) { *ptr = *ptrs; ptrs+=inc_s; }
ptrs = buf;
if (_opacity>=1) while (siz-->0) { *ptrd = *(ptrs++); ptrd+=inc_d; }
else while (siz-->0) { *ptrd = omopacity**ptrd + opacity**(ptrs++); ptrd+=inc_d; }
}
}
} else if (is_doubled && !is_doubles) { // (double*) <- (float*)
double *ptrd = _mp_memcopy_double(mp,(unsigned int)mp.opcode[2],&mp.opcode[8],siz,inc_d);
const float *ptrs = _mp_memcopy_float(mp,&mp.opcode[15],siz,inc_s);
if (_opacity>=1) while (siz-->0) { *ptrd = *ptrs; ptrd+=inc_d; ptrs+=inc_s; }
else while (siz-->0) { *ptrd = omopacity**ptrd + _opacity**ptrs; ptrd+=inc_d; ptrs+=inc_s; }
} else if (!is_doubled && is_doubles) { // (float*) <- (double*)
float *ptrd = _mp_memcopy_float(mp,&mp.opcode[8],siz,inc_d);
const double *ptrs = _mp_memcopy_double(mp,(unsigned int)mp.opcode[3],&mp.opcode[15],siz,inc_s);
if (_opacity>=1) while (siz-->0) { *ptrd = (float)*ptrs; ptrd+=inc_d; ptrs+=inc_s; }
else while (siz-->0) { *ptrd = (float)(omopacity**ptrd + opacity**ptrs); ptrd+=inc_d; ptrs+=inc_s; }
} else { // (float*) <- (float*)
float *ptrd = _mp_memcopy_float(mp,&mp.opcode[8],siz,inc_d);
const float *ptrs = _mp_memcopy_float(mp,&mp.opcode[15],siz,inc_s);
if (inc_d==1 && inc_s==1 && _opacity>=1) {
if (ptrs + siz - 1<ptrd || ptrs>ptrd + siz - 1) std::memcpy(ptrd,ptrs,siz*sizeof(float));
else std::memmove(ptrd,ptrs,siz*sizeof(float));
} else {
if (ptrs + (siz - 1)*inc_s<ptrd || ptrs>ptrd + (siz - 1)*inc_d) {
if (_opacity>=1) while (siz-->0) { *ptrd = *ptrs; ptrd+=inc_d; ptrs+=inc_s; }
else while (siz-->0) { *ptrd = omopacity**ptrd + opacity**ptrs; ptrd+=inc_d; ptrs+=inc_s; }
} else { // Overlapping buffers
CImg<floatT> buf((unsigned int)siz);
cimg_for(buf,ptr,float) { *ptr = *ptrs; ptrs+=inc_s; }
ptrs = buf;
if (_opacity>=1) while (siz-->0) { *ptrd = *(ptrs++); ptrd+=inc_d; }
else while (siz-->0) { *ptrd = omopacity**ptrd + opacity**(ptrs++); ptrd+=inc_d; }
}
}
}
}
return _mp_arg(1);
|
Safe
|
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
|
8.944073927105609e+37
| 61 |
Fix other issues in 'CImg<T>::load_bmp()'.
| 0 |
int udev_monitor_enable_receiving(struct udev_monitor *udev_monitor)
{
int err;
const int on = 1;
if (udev_monitor->snl.nl_family != 0) {
err = bind(udev_monitor->sock,
(struct sockaddr *)&udev_monitor->snl, sizeof(struct sockaddr_nl));
if (err < 0) {
err(udev_monitor->udev, "bind failed: %m\n");
return err;
}
dbg(udev_monitor->udev, "monitor %p listening on netlink\n", udev_monitor);
} else if (udev_monitor->sun.sun_family != 0) {
err = bind(udev_monitor->sock,
(struct sockaddr *)&udev_monitor->sun, udev_monitor->addrlen);
if (err < 0) {
err(udev_monitor->udev, "bind failed: %m\n");
return err;
}
/* enable receiving of the sender credentials */
setsockopt(udev_monitor->sock, SOL_SOCKET, SO_PASSCRED, &on, sizeof(on));
dbg(udev_monitor->udev, "monitor %p listening on socket\n", udev_monitor);
}
return 0;
}
|
Vulnerable
|
[
"CWE-346"
] |
udev
|
e2b362d9f23d4c63018709ab5f81a02f72b91e75
|
4.514299789452442e+37
| 26 |
libudev: monitor - unify socket message handling
| 1 |
camel_imapx_server_set_tagprefix (CamelIMAPXServer *is,
gchar tagprefix)
{
g_return_if_fail (CAMEL_IS_IMAPX_SERVER (is));
g_return_if_fail ((tagprefix >= 'A' && tagprefix <= 'Z') || (tagprefix >= 'a' && tagprefix <= 'z'));
is->priv->tagprefix = tagprefix;
}
|
Safe
|
[] |
evolution-data-server
|
f26a6f672096790d0bbd76903db4c9a2e44f116b
|
2.3587530252314238e+38
| 8 |
[IMAPx] 'STARTTLS not supported' error ignored
When a user has setup the STARTTLS encryption method, but the server doesn't
support it, then an error should be shown to the user, instead of using
unsecure connection. There had been two bugs in the existing code which
prevented this error from being used and the failure properly reported.
This had been filled at:
https://bugzilla.redhat.com/show_bug.cgi?id=1334842
| 0 |
CModule::EModRet CModule::OnModuleLoading(const CString& sModName,
const CString& sArgs,
CModInfo::EModuleType eType,
bool& bSuccess, CString& sRetMsg) {
return CONTINUE;
}
|
Safe
|
[
"CWE-20",
"CWE-264"
] |
znc
|
8de9e376ce531fe7f3c8b0aa4876d15b479b7311
|
2.955446755490408e+38
| 6 |
Fix remote code execution and privilege escalation vulnerability.
To trigger this, need to have a user already.
Thanks for Jeriko One <jeriko.one@gmx.us> for finding and reporting this.
CVE-2019-12816
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.