func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
check_symlinks_fsobj(char *path, int *a_eno, struct archive_string *a_estr,
int flags, int checking_linkname)
{
#if !defined(HAVE_LSTAT) && \
!(defined(HAVE_OPENAT) && defined(HAVE_FSTATAT) && defined(HAVE_UNLINKAT))
/* Platform doesn't have lstat, so we can't look for symlinks. */
(void)path; /* UNUSED */
(void)error_number; /* UNUSED */
(void)error_string; /* UNUSED */
(void)flags; /* UNUSED */
(void)checking_linkname; /* UNUSED */
return (ARCHIVE_OK);
#else
int res = ARCHIVE_OK;
char *tail;
char *head;
int last;
char c;
int r;
struct stat st;
int chdir_fd;
#if defined(HAVE_OPENAT) && defined(HAVE_FSTATAT) && defined(HAVE_UNLINKAT)
int fd;
#endif
/* Nothing to do here if name is empty */
if(path[0] == '\0')
return (ARCHIVE_OK);
/*
* Guard against symlink tricks. Reject any archive entry whose
* destination would be altered by a symlink.
*
* Walk the filename in chunks separated by '/'. For each segment:
* - if it doesn't exist, continue
* - if it's symlink, abort or remove it
* - if it's a directory and it's not the last chunk, cd into it
* As we go:
* head points to the current (relative) path
* tail points to the temporary \0 terminating the segment we're
* currently examining
* c holds what used to be in *tail
* last is 1 if this is the last tail
*/
chdir_fd = la_opendirat(AT_FDCWD, ".");
__archive_ensure_cloexec_flag(chdir_fd);
if (chdir_fd < 0) {
fsobj_error(a_eno, a_estr, errno,
"Could not open ", path);
return (ARCHIVE_FATAL);
}
head = path;
tail = path;
last = 0;
/* TODO: reintroduce a safe cache here? */
/* Skip the root directory if the path is absolute. */
if(tail == path && tail[0] == '/')
++tail;
/* Keep going until we've checked the entire name.
* head, tail, path all alias the same string, which is
* temporarily zeroed at tail, so be careful restoring the
* stashed (c=tail[0]) for error messages.
* Exiting the loop with break is okay; continue is not.
*/
while (!last) {
/*
* Skip the separator we just consumed, plus any adjacent ones
*/
while (*tail == '/')
++tail;
/* Skip the next path element. */
while (*tail != '\0' && *tail != '/')
++tail;
/* is this the last path component? */
last = (tail[0] == '\0') || (tail[0] == '/' && tail[1] == '\0');
/* temporarily truncate the string here */
c = tail[0];
tail[0] = '\0';
/* Check that we haven't hit a symlink. */
#if defined(HAVE_OPENAT) && defined(HAVE_FSTATAT) && defined(HAVE_UNLINKAT)
r = fstatat(chdir_fd, head, &st, AT_SYMLINK_NOFOLLOW);
#else
r = lstat(head, &st);
#endif
if (r != 0) {
tail[0] = c;
/* We've hit a dir that doesn't exist; stop now. */
if (errno == ENOENT) {
break;
} else {
/*
* Treat any other error as fatal - best to be
* paranoid here.
* Note: This effectively disables deep
* directory support when security checks are
* enabled. Otherwise, very long pathnames that
* trigger an error here could evade the
* sandbox.
* TODO: We could do better, but it would
* probably require merging the symlink checks
* with the deep-directory editing.
*/
fsobj_error(a_eno, a_estr, errno,
"Could not stat ", path);
res = ARCHIVE_FAILED;
break;
}
} else if (S_ISDIR(st.st_mode)) {
if (!last) {
#if defined(HAVE_OPENAT) && defined(HAVE_FSTATAT) && defined(HAVE_UNLINKAT)
fd = la_opendirat(chdir_fd, head);
if (fd < 0)
r = -1;
else {
r = 0;
close(chdir_fd);
chdir_fd = fd;
}
#else
r = chdir(head);
#endif
if (r != 0) {
tail[0] = c;
fsobj_error(a_eno, a_estr, errno,
"Could not chdir ", path);
res = (ARCHIVE_FATAL);
break;
}
/* Our view is now from inside this dir: */
head = tail + 1;
}
} else if (S_ISLNK(st.st_mode)) {
if (last && checking_linkname) {
#ifdef HAVE_LINKAT
/*
* Hardlinks to symlinks are safe to write
* if linkat() is supported as it does not
* follow symlinks.
*/
res = ARCHIVE_OK;
#else
/*
* We return ARCHIVE_FAILED here as we are
* not able to safely write hardlinks
* to symlinks.
*/
tail[0] = c;
fsobj_error(a_eno, a_estr, errno,
"Cannot write hardlink to symlink ",
path);
res = ARCHIVE_FAILED;
#endif
break;
} else
if (last) {
/*
* Last element is symlink; remove it
* so we can overwrite it with the
* item being extracted.
*/
#if defined(HAVE_OPENAT) && defined(HAVE_FSTATAT) && defined(HAVE_UNLINKAT)
r = unlinkat(chdir_fd, head, 0);
#else
r = unlink(head);
#endif
if (r != 0) {
tail[0] = c;
fsobj_error(a_eno, a_estr, errno,
"Could not remove symlink ",
path);
res = ARCHIVE_FAILED;
break;
}
/*
* Even if we did remove it, a warning
* is in order. The warning is silly,
* though, if we're just replacing one
* symlink with another symlink.
*/
tail[0] = c;
/*
* FIXME: not sure how important this is to
* restore
*/
/*
if (!S_ISLNK(path)) {
fsobj_error(a_eno, a_estr, 0,
"Removing symlink ", path);
}
*/
/* Symlink gone. No more problem! */
res = ARCHIVE_OK;
break;
} else if (flags & ARCHIVE_EXTRACT_UNLINK) {
/* User asked us to remove problems. */
#if defined(HAVE_OPENAT) && defined(HAVE_FSTATAT) && defined(HAVE_UNLINKAT)
r = unlinkat(chdir_fd, head, 0);
#else
r = unlink(head);
#endif
if (r != 0) {
tail[0] = c;
fsobj_error(a_eno, a_estr, 0,
"Cannot remove intervening "
"symlink ", path);
res = ARCHIVE_FAILED;
break;
}
tail[0] = c;
} else if ((flags &
ARCHIVE_EXTRACT_SECURE_SYMLINKS) == 0) {
/*
* We are not the last element and we want to
* follow symlinks if they are a directory.
*
* This is needed to extract hardlinks over
* symlinks.
*/
#if defined(HAVE_OPENAT) && defined(HAVE_FSTATAT) && defined(HAVE_UNLINKAT)
r = fstatat(chdir_fd, head, &st, 0);
#else
r = la_stat(head, &st);
#endif
if (r != 0) {
tail[0] = c;
if (errno == ENOENT) {
break;
} else {
fsobj_error(a_eno, a_estr,
errno,
"Could not stat ", path);
res = (ARCHIVE_FAILED);
break;
}
} else if (S_ISDIR(st.st_mode)) {
#if defined(HAVE_OPENAT) && defined(HAVE_FSTATAT) && defined(HAVE_UNLINKAT)
fd = la_opendirat(chdir_fd, head);
if (fd < 0)
r = -1;
else {
r = 0;
close(chdir_fd);
chdir_fd = fd;
}
#else
r = chdir(head);
#endif
if (r != 0) {
tail[0] = c;
fsobj_error(a_eno, a_estr,
errno,
"Could not chdir ", path);
res = (ARCHIVE_FATAL);
break;
}
/*
* Our view is now from inside
* this dir:
*/
head = tail + 1;
} else {
tail[0] = c;
fsobj_error(a_eno, a_estr, 0,
"Cannot extract through "
"symlink ", path);
res = ARCHIVE_FAILED;
break;
}
} else {
tail[0] = c;
fsobj_error(a_eno, a_estr, 0,
"Cannot extract through symlink ", path);
res = ARCHIVE_FAILED;
break;
}
}
/* be sure to always maintain this */
tail[0] = c;
if (tail[0] != '\0')
tail++; /* Advance to the next segment. */
}
/* Catches loop exits via break */
tail[0] = c;
#if defined(HAVE_OPENAT) && defined(HAVE_FSTATAT) && defined(HAVE_UNLINKAT)
/* If we operate with openat(), fstatat() and unlinkat() there was
* no chdir(), so just close the fd */
if (chdir_fd >= 0)
close(chdir_fd);
#elif HAVE_FCHDIR
/* If we changed directory above, restore it here. */
if (chdir_fd >= 0) {
r = fchdir(chdir_fd);
if (r != 0) {
fsobj_error(a_eno, a_estr, errno,
"chdir() failure", "");
}
close(chdir_fd);
chdir_fd = -1;
if (r != 0) {
res = (ARCHIVE_FATAL);
}
}
#endif
/* TODO: reintroduce a safe cache here? */
return res;
#endif
}
|
Safe
|
[
"CWE-59",
"CWE-269"
] |
libarchive
|
b41daecb5ccb4c8e3b2c53fd6147109fc12c3043
|
2.6449521042107702e+38
| 307 |
Do not follow symlinks when processing the fixup list
Use lchmod() instead of chmod() and tell the remaining functions that the
real file to be modified is a symbolic link.
Fixes #1566
| 0 |
static ssize_t snd_pcm_oss_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
struct snd_pcm_oss_file *pcm_oss_file;
struct snd_pcm_substream *substream;
pcm_oss_file = file->private_data;
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
if (substream == NULL)
return -ENXIO;
substream->f_flags = file->f_flags & O_NONBLOCK;
#ifndef OSS_DEBUG
return snd_pcm_oss_read1(substream, buf, count);
#else
{
ssize_t res = snd_pcm_oss_read1(substream, buf, count);
pcm_dbg(substream->pcm,
"pcm_oss: read %li bytes (returned %li bytes)\n",
(long)count, (long)res);
return res;
}
#endif
}
|
Safe
|
[
"CWE-362"
] |
linux
|
8423f0b6d513b259fdab9c9bf4aaa6188d054c2d
|
6.904949680364804e+36
| 22 |
ALSA: pcm: oss: Fix race at SNDCTL_DSP_SYNC
There is a small race window at snd_pcm_oss_sync() that is called from
OSS PCM SNDCTL_DSP_SYNC ioctl; namely the function calls
snd_pcm_oss_make_ready() at first, then takes the params_lock mutex
for the rest. When the stream is set up again by another thread
between them, it leads to inconsistency, and may result in unexpected
results such as NULL dereference of OSS buffer as a fuzzer spotted
recently.
The fix is simply to cover snd_pcm_oss_make_ready() call into the same
params_lock mutex with snd_pcm_oss_make_ready_locked() variant.
Reported-and-tested-by: butt3rflyh4ck <butterflyhuangxx@gmail.com>
Reviewed-by: Jaroslav Kysela <perex@perex.cz>
Cc: <stable@vger.kernel.org>
Link: https://lore.kernel.org/r/CAFcO6XN7JDM4xSXGhtusQfS2mSBcx50VJKwQpCq=WeLt57aaZA@mail.gmail.com
Link: https://lore.kernel.org/r/20220905060714.22549-1-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
| 0 |
vim9_aborting(int prev_uncaught_emsg)
{
return uncaught_emsg > prev_uncaught_emsg || got_int || did_throw;
}
|
Safe
|
[
"CWE-416"
] |
vim
|
9c23f9bb5fe435b28245ba8ac65aa0ca6b902c04
|
1.4836076471306725e+38
| 4 |
patch 8.2.3902: Vim9: double free with nested :def function
Problem: Vim9: double free with nested :def function.
Solution: Pass "line_to_free" from compile_def_function() and make sure
cmdlinep is valid.
| 0 |
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
size_t copied;
int err;
BT_DBG("sock %p sk %p len %zu", sock, sk, len);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
msg->msg_namelen = 0;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err == 0)
sock_recv_ts_and_drops(msg, sk, skb);
skb_free_datagram(sk, skb);
return err ? : copied;
}
|
Vulnerable
|
[
"CWE-200"
] |
linux
|
4683f42fde3977bdb4e8a09622788cc8b5313778
|
1.9941110067346586e+37
| 38 |
Bluetooth: fix possible info leak in bt_sock_recvmsg()
In case the socket is already shutting down, bt_sock_recvmsg() returns
with 0 without updating msg_namelen leading to net/socket.c leaking the
local, uninitialized sockaddr_storage variable to userland -- 128 bytes
of kernel stack memory.
Fix this by moving the msg_namelen assignment in front of the shutdown
test.
Cc: Marcel Holtmann <marcel@holtmann.org>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: Johan Hedberg <johan.hedberg@gmail.com>
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 1 |
static int zero_clientid(clientid_t *clid)
{
return (clid->cl_boot == 0) && (clid->cl_id == 0);
}
|
Safe
|
[
"CWE-20",
"CWE-129"
] |
linux
|
f961e3f2acae94b727380c0b74e2d3954d0edf79
|
3.056293135721937e+38
| 4 |
nfsd: encoders mustn't use unitialized values in error cases
In error cases, lgp->lg_layout_type may be out of bounds; so we
shouldn't be using it until after the check of nfserr.
This was seen to crash nfsd threads when the server receives a LAYOUTGET
request with a large layout type.
GETDEVICEINFO has the same problem.
Reported-by: Ari Kauppi <Ari.Kauppi@synopsys.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: stable@vger.kernel.org
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
| 0 |
void dtls1_reset_seq_numbers(SSL *s, int rw)
{
unsigned char *seq;
unsigned int seq_bytes = sizeof(s->s3->read_sequence);
if (rw & SSL3_CC_READ) {
seq = s->s3->read_sequence;
s->d1->r_epoch++;
memcpy(&(s->d1->bitmap), &(s->d1->next_bitmap), sizeof(DTLS1_BITMAP));
memset(&(s->d1->next_bitmap), 0x00, sizeof(DTLS1_BITMAP));
/*
* We must not use any buffered messages received from the previous
* epoch
*/
dtls1_clear_received_buffer(s);
} else {
seq = s->s3->write_sequence;
memcpy(s->d1->last_write_sequence, seq,
sizeof(s->s3->write_sequence));
s->d1->w_epoch++;
}
memset(seq, 0x00, seq_bytes);
}
|
Safe
|
[
"CWE-399"
] |
openssl
|
cfd40fd39e69f5e3c654ae8fbf9acb1d2a051144
|
6.0797014542262695e+37
| 25 |
Prevent DTLS Finished message injection
Follow on from CVE-2016-2179
The investigation and analysis of CVE-2016-2179 highlighted a related flaw.
This commit fixes a security "near miss" in the buffered message handling
code. Ultimately this is not currently believed to be exploitable due to
the reasons outlined below, and therefore there is no CVE for this on its
own.
The issue this commit fixes is a MITM attack where the attacker can inject
a Finished message into the handshake. In the description below it is
assumed that the attacker injects the Finished message for the server to
receive it. The attack could work equally well the other way around (i.e
where the client receives the injected Finished message).
The MITM requires the following capabilities:
- The ability to manipulate the MTU that the client selects such that it
is small enough for the client to fragment Finished messages.
- The ability to selectively drop and modify records sent from the client
- The ability to inject its own records and send them to the server
The MITM forces the client to select a small MTU such that the client
will fragment the Finished message. Ideally for the attacker the first
fragment will contain all but the last byte of the Finished message,
with the second fragment containing the final byte.
During the handshake and prior to the client sending the CCS the MITM
injects a plaintext Finished message fragment to the server containing
all but the final byte of the Finished message. The message sequence
number should be the one expected to be used for the real Finished message.
OpenSSL will recognise that the received fragment is for the future and
will buffer it for later use.
After the client sends the CCS it then sends its own Finished message in
two fragments. The MITM causes the first of these fragments to be
dropped. The OpenSSL server will then receive the second of the fragments
and reassemble the complete Finished message consisting of the MITM
fragment and the final byte from the real client.
The advantage to the attacker in injecting a Finished message is that
this provides the capability to modify other handshake messages (e.g.
the ClientHello) undetected. A difficulty for the attacker is knowing in
advance what impact any of those changes might have on the final byte of
the handshake hash that is going to be sent in the "real" Finished
message. In the worst case for the attacker this means that only 1 in
256 of such injection attempts will succeed.
It may be possible in some situations for the attacker to improve this such
that all attempts succeed. For example if the handshake includes client
authentication then the final message flight sent by the client will
include a Certificate. Certificates are ASN.1 objects where the signed
portion is DER encoded. The non-signed portion could be BER encoded and so
the attacker could re-encode the certificate such that the hash for the
whole handshake comes to a different value. The certificate re-encoding
would not be detectable because only the non-signed portion is changed. As
this is the final flight of messages sent from the client the attacker
knows what the complete hanshake hash value will be that the client will
send - and therefore knows what the final byte will be. Through a process
of trial and error the attacker can re-encode the certificate until the
modified handhshake also has a hash with the same final byte. This means
that when the Finished message is verified by the server it will be
correct in all cases.
In practice the MITM would need to be able to perform the same attack
against both the client and the server. If the attack is only performed
against the server (say) then the server will not detect the modified
handshake, but the client will and will abort the connection.
Fortunately, although OpenSSL is vulnerable to Finished message
injection, it is not vulnerable if *both* client and server are OpenSSL.
The reason is that OpenSSL has a hard "floor" for a minimum MTU size
that it will never go below. This minimum means that a Finished message
will never be sent in a fragmented form and therefore the MITM does not
have one of its pre-requisites. Therefore this could only be exploited
if using OpenSSL and some other DTLS peer that had its own and separate
Finished message injection flaw.
The fix is to ensure buffered messages are cleared on epoch change.
Reviewed-by: Richard Levitte <levitte@openssl.org>
| 0 |
GetFileDownloadLengthErrResponseMsg()
{
char reason [] = "Path length exceeds PATH_MAX (4096) bytes";
int reasonLen = strlen(reason);
return CreateFileDownloadErrMsg(reason, reasonLen);
}
|
Safe
|
[
"CWE-416"
] |
libvncserver
|
73cb96fec028a576a5a24417b57723b55854ad7b
|
2.833196360881357e+38
| 7 |
tightvnc-filetransfer: wait for download thread end in CloseUndoneFileDownload()
...and use it when deregistering the file transfer extension.
Closes #242
| 0 |
set_pfd(char *s) {
if (!isdigit(*s))
die(EX_USAGE,
_("mount: argument to -p or --pass-fd must be a number"));
pfd = atoi(optarg);
}
|
Safe
|
[
"CWE-399"
] |
util-linux
|
4b39b6aefd5dd8ac68a92adc650dc13d5d54d704
|
8.662785558653121e+37
| 6 |
mount: use fflush() and temporary file for mtab updates (CVE-2011-1089)
http://thread.gmane.org/gmane.comp.security.oss.general/4374
Changes:
- force mount(8) to use /etc/mtab.tmp file every time. The original
code used the tmp file for remount/move operations only.
- call and check fflush() return code for the tmp file
Note mount(8) blocks all signals when writing to mtab, so it's not
affected by SIGXFSZ and the mtab lock file is always removed.
This patch does not fix the same issue in umount(8) and libmount.
Signed-off-by: Karel Zak <kzak@redhat.com>
| 0 |
GF_Err gitn_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GroupIdToNameBox *ptr = (GroupIdToNameBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u16(bs, ptr->nb_entries);
for (i=0; i<ptr->nb_entries; i++) {
gf_bs_write_u32(bs, ptr->entries[i].group_id);
if (ptr->entries[i].name) gf_bs_write_data(bs, ptr->entries[i].name, (u32)strlen(ptr->entries[i].name) );
gf_bs_write_u8(bs, 0);
}
return GF_OK;
|
Safe
|
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
|
2.5435052420936267e+38
| 17 |
fixed #1587
| 0 |
static inline int input_available_p(struct tty_struct *tty, int poll)
{
struct n_tty_data *ldata = tty->disc_data;
int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1;
if (ldata->icanon && !L_EXTPROC(tty))
return ldata->canon_head != ldata->read_tail;
else
return ldata->commit_head - ldata->read_tail >= amt;
}
|
Safe
|
[
"CWE-704"
] |
linux
|
966031f340185eddd05affcf72b740549f056348
|
2.046611858138718e+38
| 10 |
n_tty: fix EXTPROC vs ICANON interaction with TIOCINQ (aka FIONREAD)
We added support for EXTPROC back in 2010 in commit 26df6d13406d ("tty:
Add EXTPROC support for LINEMODE") and the intent was to allow it to
override some (all?) ICANON behavior. Quoting from that original commit
message:
There is a new bit in the termios local flag word, EXTPROC.
When this bit is set, several aspects of the terminal driver
are disabled. Input line editing, character echo, and mapping
of signals are all disabled. This allows the telnetd to turn
off these functions when in linemode, but still keep track of
what state the user wants the terminal to be in.
but the problem turns out that "several aspects of the terminal driver
are disabled" is a bit ambiguous, and you can really confuse the n_tty
layer by setting EXTPROC and then causing some of the ICANON invariants
to no longer be maintained.
This fixes at least one such case (TIOCINQ) becoming unhappy because of
the confusion over whether ICANON really means ICANON when EXTPROC is set.
This basically makes TIOCINQ match the case of read: if EXTPROC is set,
we ignore ICANON. Also, make sure to reset the ICANON state ie EXTPROC
changes, not just if ICANON changes.
Fixes: 26df6d13406d ("tty: Add EXTPROC support for LINEMODE")
Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Reported-by: syzkaller <syzkaller@googlegroups.com>
Cc: Jiri Slaby <jslaby@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
parse_CT(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols)
{
const size_t ct_offset = ofpacts_pull(ofpacts);
struct ofpact_conntrack *oc;
char *error = NULL;
char *key, *value;
oc = ofpact_put_CT(ofpacts);
oc->flags = 0;
oc->recirc_table = NX_CT_RECIRC_NONE;
while (ofputil_parse_key_value(&arg, &key, &value)) {
if (!strcmp(key, "commit")) {
oc->flags |= NX_CT_F_COMMIT;
} else if (!strcmp(key, "table")) {
error = str_to_u8(value, "recirc_table", &oc->recirc_table);
if (!error && oc->recirc_table == NX_CT_RECIRC_NONE) {
error = xasprintf("invalid table %#"PRIx16, oc->recirc_table);
}
} else if (!strcmp(key, "zone")) {
error = str_to_u16(value, "zone", &oc->zone_imm);
if (error) {
free(error);
error = mf_parse_subfield(&oc->zone_src, value);
if (error) {
return error;
}
}
} else if (!strcmp(key, "alg")) {
error = str_to_connhelper(value, &oc->alg);
} else if (!strcmp(key, "nat")) {
const size_t nat_offset = ofpacts_pull(ofpacts);
error = parse_NAT(value, ofpacts, usable_protocols);
/* Update CT action pointer and length. */
ofpacts->header = ofpbuf_push_uninit(ofpacts, nat_offset);
oc = ofpacts->header;
} else if (!strcmp(key, "exec")) {
/* Hide existing actions from ofpacts_parse_copy(), so the
* nesting can be handled transparently. */
enum ofputil_protocol usable_protocols2;
const size_t exec_offset = ofpacts_pull(ofpacts);
/* Initializes 'usable_protocol2', fold it back to
* '*usable_protocols' afterwards, so that we do not lose
* restrictions already in there. */
error = ofpacts_parse_copy(value, ofpacts, &usable_protocols2,
false, OFPACT_CT);
*usable_protocols &= usable_protocols2;
ofpacts->header = ofpbuf_push_uninit(ofpacts, exec_offset);
oc = ofpacts->header;
} else {
error = xasprintf("invalid argument to \"ct\" action: `%s'", key);
}
if (error) {
break;
}
}
ofpact_finish_CT(ofpacts, &oc);
ofpbuf_push_uninit(ofpacts, ct_offset);
return error;
}
|
Safe
|
[
"CWE-125"
] |
ovs
|
9237a63c47bd314b807cda0bd2216264e82edbe8
|
1.8891867050541084e+38
| 64 |
ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Justin Pettit <jpettit@ovn.org>
| 0 |
int input_register_device(struct input_dev *dev)
{
struct input_devres *devres = NULL;
struct input_handler *handler;
unsigned int packet_size;
const char *path;
int error;
if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) {
dev_err(&dev->dev,
"Absolute device without dev->absinfo, refusing to register\n");
return -EINVAL;
}
if (dev->devres_managed) {
devres = devres_alloc(devm_input_device_unregister,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
devres->input = dev;
}
/* Every input device generates EV_SYN/SYN_REPORT events. */
__set_bit(EV_SYN, dev->evbit);
/* KEY_RESERVED is not supposed to be transmitted to userspace. */
__clear_bit(KEY_RESERVED, dev->keybit);
/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
input_cleanse_bitmasks(dev);
packet_size = input_estimate_events_per_packet(dev);
if (dev->hint_events_per_packet < packet_size)
dev->hint_events_per_packet = packet_size;
dev->max_vals = dev->hint_events_per_packet + 2;
dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
if (!dev->vals) {
error = -ENOMEM;
goto err_devres_free;
}
/*
* If delay and period are pre-set by the driver, then autorepeating
* is handled by the driver itself and we don't do it in input.c.
*/
if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD])
input_enable_softrepeat(dev, 250, 33);
if (!dev->getkeycode)
dev->getkeycode = input_default_getkeycode;
if (!dev->setkeycode)
dev->setkeycode = input_default_setkeycode;
if (dev->poller)
input_dev_poller_finalize(dev->poller);
error = device_add(&dev->dev);
if (error)
goto err_free_vals;
path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
pr_info("%s as %s\n",
dev->name ? dev->name : "Unspecified device",
path ? path : "N/A");
kfree(path);
error = mutex_lock_interruptible(&input_mutex);
if (error)
goto err_device_del;
list_add_tail(&dev->node, &input_dev_list);
list_for_each_entry(handler, &input_handler_list, node)
input_attach_handler(dev, handler);
input_wakeup_procfs_readers();
mutex_unlock(&input_mutex);
if (dev->devres_managed) {
dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
__func__, dev_name(&dev->dev));
devres_add(dev->dev.parent, devres);
}
return 0;
err_device_del:
device_del(&dev->dev);
err_free_vals:
kfree(dev->vals);
dev->vals = NULL;
err_devres_free:
devres_free(devres);
return error;
}
|
Safe
|
[
"CWE-703",
"CWE-787"
] |
linux
|
cb222aed03d798fc074be55e59d9a112338ee784
|
1.5321831125451978e+38
| 98 |
Input: add safety guards to input_set_keycode()
If we happen to have a garbage in input device's keycode table with values
too big we'll end up doing clear_bit() with offset way outside of our
bitmaps, damaging other objects within an input device or even outside of
it. Let's add sanity checks to the returned old keycodes.
Reported-by: syzbot+c769968809f9359b07aa@syzkaller.appspotmail.com
Reported-by: syzbot+76f3a30e88d256644c78@syzkaller.appspotmail.com
Link: https://lore.kernel.org/r/20191207212757.GA245964@dtor-ws
Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
| 0 |
ptaRemovePt(PTA *pta,
l_int32 index)
{
l_int32 i, n;
PROCNAME("ptaRemovePt");
if (!pta)
return ERROR_INT("pta not defined", procName, 1);
n = ptaGetCount(pta);
if (index < 0 || index >= n)
return ERROR_INT("index not in {0...n - 1}", procName, 1);
/* Remove the point */
for (i = index + 1; i < n; i++) {
pta->x[i - 1] = pta->x[i];
pta->y[i - 1] = pta->y[i];
}
pta->n--;
return 0;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
leptonica
|
ee301cb2029db8a6289c5295daa42bba7715e99a
|
1.3340457013044813e+38
| 21 |
Security fixes: expect final changes for release 1.75.3.
* Fixed a debian security issue with fscanf() reading a string with
possible buffer overflow.
* There were also a few similar situations with sscanf().
| 0 |
static void stab_kfree_rcu(struct rcu_head *head)
{
kfree(container_of(head, struct qdisc_size_table, rcu));
}
|
Safe
|
[
"CWE-264"
] |
net
|
90f62cf30a78721641e08737bda787552428061e
|
1.22486493413509e+38
| 4 |
net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
int32_t yang_dnode_get_int32(const struct lyd_node *dnode,
const char *xpath_fmt, ...)
{
const struct lyd_value *dvalue;
dvalue = YANG_DNODE_XPATH_GET_VALUE(dnode, xpath_fmt);
assert(dvalue->realtype->basetype == LY_TYPE_INT32);
return dvalue->int32;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
frr
|
ac3133450de12ba86c051265fc0f1b12bc57b40c
|
2.3081092746051853e+38
| 8 |
isisd: fix #10505 using base64 encoding
Using base64 instead of the raw string to encode
the binary data.
Signed-off-by: whichbug <whichbug@github.com>
| 0 |
static GFINLINE Bool isor_is_local(const char *url)
{
if (!strnicmp(url, "file://", 7)) return GF_TRUE;
if (!strnicmp(url, "gmem://", 7)) return GF_TRUE;
if (!strnicmp(url, "gfio://", 7)) return GF_TRUE;
if (!strnicmp(url, "isobmff://", 10)) return GF_TRUE;
if (strstr(url, "://")) return GF_FALSE;
/*the rest is local (mounted on FS)*/
return GF_TRUE;
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
da37ec8582266983d0ec4b7550ec907401ec441e
|
3.7701712222062746e+37
| 10 |
fixed crashes for very long path - cf #1908
| 0 |
isofs_export_encode_fh(struct inode *inode,
__u32 *fh32,
int *max_len,
struct inode *parent)
{
struct iso_inode_info * ei = ISOFS_I(inode);
int len = *max_len;
int type = 1;
__u16 *fh16 = (__u16*)fh32;
/*
* WARNING: max_len is 5 for NFSv2. Because of this
* limitation, we use the lower 16 bits of fh32[1] to hold the
* offset of the inode and the upper 16 bits of fh32[1] to
* hold the offset of the parent.
*/
if (parent && (len < 5)) {
*max_len = 5;
return 255;
} else if (len < 3) {
*max_len = 3;
return 255;
}
len = 3;
fh32[0] = ei->i_iget5_block;
fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
fh16[3] = 0; /* avoid leaking uninitialized data */
fh32[2] = inode->i_generation;
if (parent) {
struct iso_inode_info *eparent;
eparent = ISOFS_I(parent);
fh32[3] = eparent->i_iget5_block;
fh16[3] = (__u16)eparent->i_iget5_offset; /* fh16 [sic] */
fh32[4] = parent->i_generation;
len = 5;
type = 2;
}
*max_len = len;
return type;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
fe685aabf7c8c9f138e5ea900954d295bf229175
|
1.6777931714953154e+37
| 41 |
isofs: avoid info leak on export
For type 1 the parent_offset member in struct isofs_fid gets copied
uninitialized to userland. Fix this by initializing it to 0.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Jan Kara <jack@suse.cz>
| 0 |
static int sctp_setsockopt_bindx(struct sock *sk,
struct sockaddr __user *addrs,
int addrs_size, int op)
{
struct sockaddr *kaddrs;
int err;
int addrcnt = 0;
int walk_size = 0;
struct sockaddr *sa_addr;
void *addr_buf;
struct sctp_af *af;
pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
__func__, sk, addrs, addrs_size, op);
if (unlikely(addrs_size <= 0))
return -EINVAL;
/* Check the user passed a healthy pointer. */
if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
return -EFAULT;
/* Alloc space for the address array in kernel memory. */
kaddrs = kmalloc(addrs_size, GFP_KERNEL);
if (unlikely(!kaddrs))
return -ENOMEM;
if (__copy_from_user(kaddrs, addrs, addrs_size)) {
kfree(kaddrs);
return -EFAULT;
}
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
if (walk_size + sizeof(sa_family_t) > addrs_size) {
kfree(kaddrs);
return -EINVAL;
}
sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
/* If the address family is not supported or if this address
* causes the address buffer to overflow return EINVAL.
*/
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
kfree(kaddrs);
return -EINVAL;
}
addrcnt++;
addr_buf += af->sockaddr_len;
walk_size += af->sockaddr_len;
}
/* Do the work. */
switch (op) {
case SCTP_BINDX_ADD_ADDR:
err = sctp_bindx_add(sk, kaddrs, addrcnt);
if (err)
goto out;
err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
break;
case SCTP_BINDX_REM_ADDR:
err = sctp_bindx_rem(sk, kaddrs, addrcnt);
if (err)
goto out;
err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
break;
default:
err = -EINVAL;
break;
}
out:
kfree(kaddrs);
return err;
}
|
Safe
|
[
"CWE-362",
"CWE-703"
] |
linux
|
2d45a02d0166caf2627fe91897c6ffc3b19514c4
|
3.6982777205318536e+37
| 81 |
sctp: fix ASCONF list handling
->auto_asconf_splist is per namespace and mangled by functions like
sctp_setsockopt_auto_asconf() which doesn't guarantee any serialization.
Also, the call to inet_sk_copy_descendant() was backuping
->auto_asconf_list through the copy but was not honoring
->do_auto_asconf, which could lead to list corruption if it was
different between both sockets.
This commit thus fixes the list handling by using ->addr_wq_lock
spinlock to protect the list. A special handling is done upon socket
creation and destruction for that. Error handlig on sctp_init_sock()
will never return an error after having initialized asconf, so
sctp_destroy_sock() can be called without addrq_wq_lock. The lock now
will be take on sctp_close_sock(), before locking the socket, so we
don't do it in inverse order compared to sctp_addr_wq_timeout_handler().
Instead of taking the lock on sctp_sock_migrate() for copying and
restoring the list values, it's preferred to avoid rewritting it by
implementing sctp_copy_descendant().
Issue was found with a test application that kept flipping sysctl
default_auto_asconf on and off, but one could trigger it by issuing
simultaneous setsockopt() calls on multiple sockets or by
creating/destroying sockets fast enough. This is only triggerable
locally.
Fixes: 9f7d653b67ae ("sctp: Add Auto-ASCONF support (core).")
Reported-by: Ji Jianwen <jiji@redhat.com>
Suggested-by: Neil Horman <nhorman@tuxdriver.com>
Suggested-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn,
int level, pte_t unused)
{
u64 *sptep;
struct rmap_iterator iter;
for_each_rmap_spte(rmap_head, &iter, sptep)
if (is_accessed_spte(*sptep))
return true;
return false;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
9f46c187e2e680ecd9de7983e4d081c3391acc76
|
2.3120686661364783e+38
| 12 |
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
With shadow paging enabled, the INVPCID instruction results in a call
to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
invlpg callback is not set and the result is a NULL pointer dereference.
Fix it trivially by checking for mmu->invlpg before every call.
There are other possibilities:
- check for CR0.PG, because KVM (like all Intel processors after P5)
flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
nop with paging disabled
- check for EFER.LMA, because KVM syncs and flushes when switching
MMU contexts outside of 64-bit mode
All of these are tricky, go for the simple solution. This is CVE-2022-1789.
Reported-by: Yongkang Jia <kangel@zju.edu.cn>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static int create_intf_ep_devs(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
if (intf->ep_devs_created || intf->unregistering)
return 0;
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
(void) usb_create_ep_devs(&intf->dev, &alt->endpoint[i], udev);
intf->ep_devs_created = 1;
return 0;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
2e1c42391ff2556387b3cb6308b24f6f65619feb
|
1.0448850781794316e+38
| 14 |
USB: core: harden cdc_parse_cdc_header
Andrey Konovalov reported a possible out-of-bounds problem for the
cdc_parse_cdc_header function. He writes:
It looks like cdc_parse_cdc_header() doesn't validate buflen
before accessing buffer[1], buffer[2] and so on. The only check
present is while (buflen > 0).
So fix this issue up by properly validating the buffer length matches
what the descriptor says it is.
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Tested-by: Andrey Konovalov <andreyknvl@google.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
bash_kill_shellword (count, key)
int count, key;
{
int p;
if (count < 0)
return (bash_backward_kill_shellword (-count, key));
p = rl_point;
bash_forward_shellword (count, key);
if (rl_point != p)
rl_kill_text (p, rl_point);
rl_point = p;
if (rl_editing_mode == 1) /* 1 == emacs_mode */
rl_mark = rl_point;
return 0;
}
|
Safe
|
[
"CWE-20"
] |
bash
|
4f747edc625815f449048579f6e65869914dd715
|
2.5074790418633896e+38
| 20 |
Bash-4.4 patch 7
| 0 |
static int dispatch_raw_connection_event(sd_event_source *event,
int fd,
uint32_t revents,
void *userdata) {
RemoteServer *s = userdata;
int fd2;
SocketAddress addr = {
.size = sizeof(union sockaddr_union),
.type = SOCK_STREAM,
};
char *hostname = NULL;
fd2 = accept_connection("raw", fd, &addr, &hostname);
if (fd2 < 0)
return fd2;
return journal_remote_add_source(s, fd2, hostname, true);
}
|
Safe
|
[
"CWE-770"
] |
systemd
|
ef4d6abe7c7fab6cbff975b32e76b09feee56074
|
3.110566233153028e+38
| 18 |
journal-remote: set a limit on the number of fields in a message
Existing use of E2BIG is replaced with ENOBUFS (entry too long), and E2BIG is
reused for the new error condition (too many fields).
This matches the change done for systemd-journald, hence forming the second
part of the fix for CVE-2018-16865
(https://bugzilla.redhat.com/show_bug.cgi?id=1653861).
| 0 |
int X509_STORE_CTX_get_num_untrusted(X509_STORE_CTX *ctx)
{
return ctx->num_untrusted;
}
|
Safe
|
[] |
openssl
|
33cc5dde478ba5ad79f8fd4acd8737f0e60e236e
|
2.6306346203019316e+37
| 4 |
Compat self-signed trust with reject-only aux data
When auxiliary data contains only reject entries, continue to trust
self-signed objects just as when no auxiliary data is present.
This makes it possible to reject specific uses without changing
what's accepted (and thus overring the underlying EKU).
Added new supported certs and doubled test count from 38 to 76.
Reviewed-by: Dr. Stephen Henson <steve@openssl.org>
| 0 |
static void FVMenuLoadNamelist(GWindow UNUSED(gw), struct gmenuitem *UNUSED(mi), GEvent *UNUSED(e)) {
/* Read in a name list and copy it into the prefs dir so that we'll find */
/* it in the future */
/* Be prepared to update what we've already got if names match */
char buffer[1025];
char *ret = gwwv_open_filename(_("Load Namelist"),NULL,
"*.nam",NULL);
char *temp, *pt;
char *buts[3];
FILE *old, *new;
int ch, ans;
NameList *nl;
if ( ret==NULL )
return; /* Cancelled */
temp = utf82def_copy(ret);
pt = strrchr(temp,'/');
if ( pt==NULL )
pt = temp;
else
++pt;
snprintf(buffer,sizeof(buffer),"%s/%s", getFontForgeUserDir(Config), pt);
if ( access(buffer,F_OK)==0 ) {
buts[0] = _("_Replace");
buts[1] = _("_Cancel");
buts[2] = NULL;
ans = gwwv_ask( _("Replace"),(const char **) buts,0,1,_("A name list with this name already exists. Replace it?"));
if ( ans==1 ) {
free(temp);
free(ret);
return;
}
}
old = fopen( temp,"r");
if ( old==NULL ) {
ff_post_error(_("No such file"),_("Could not read %s"), ret );
free(ret); free(temp);
return;
}
if ( (nl = LoadNamelist(temp))==NULL ) {
ff_post_error(_("Bad namelist file"),_("Could not parse %s"), ret );
free(ret); free(temp);
fclose(old);
return;
}
free(ret); free(temp);
if ( nl->uses_unicode ) {
if ( nl->a_utf8_name!=NULL )
ff_post_notice(_("Non-ASCII glyphnames"),_("This namelist contains at least one non-ASCII glyph name, namely: %s"), nl->a_utf8_name );
else
ff_post_notice(_("Non-ASCII glyphnames"),_("This namelist is based on a namelist which contains non-ASCII glyph names"));
}
new = fopen( buffer,"w");
if ( new==NULL ) {
ff_post_error(_("Create failed"),_("Could not write %s"), buffer );
fclose(old);
return;
}
while ( (ch=getc(old))!=EOF )
putc(ch,new);
fclose(old);
fclose(new);
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
fontforge
|
626f751752875a0ddd74b9e217b6f4828713573c
|
2.1326468817681048e+38
| 66 |
Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846.
| 0 |
static CURLcode pubkey_pem_to_der(const char *pem,
unsigned char **der, size_t *der_len)
{
char *stripped_pem, *begin_pos, *end_pos;
size_t pem_count, stripped_pem_count = 0, pem_len;
CURLcode result;
/* if no pem, exit. */
if(!pem)
return CURLE_BAD_CONTENT_ENCODING;
begin_pos = strstr(pem, "-----BEGIN PUBLIC KEY-----");
if(!begin_pos)
return CURLE_BAD_CONTENT_ENCODING;
pem_count = begin_pos - pem;
/* Invalid if not at beginning AND not directly following \n */
if(0 != pem_count && '\n' != pem[pem_count - 1])
return CURLE_BAD_CONTENT_ENCODING;
/* 26 is length of "-----BEGIN PUBLIC KEY-----" */
pem_count += 26;
/* Invalid if not directly following \n */
end_pos = strstr(pem + pem_count, "\n-----END PUBLIC KEY-----");
if(!end_pos)
return CURLE_BAD_CONTENT_ENCODING;
pem_len = end_pos - pem;
stripped_pem = malloc(pem_len - pem_count + 1);
if(!stripped_pem)
return CURLE_OUT_OF_MEMORY;
/*
* Here we loop through the pem array one character at a time between the
* correct indices, and place each character that is not '\n' or '\r'
* into the stripped_pem array, which should represent the raw base64 string
*/
while(pem_count < pem_len) {
if('\n' != pem[pem_count] && '\r' != pem[pem_count])
stripped_pem[stripped_pem_count++] = pem[pem_count];
++pem_count;
}
/* Place the null terminator in the correct place */
stripped_pem[stripped_pem_count] = '\0';
result = Curl_base64_decode(stripped_pem, der, der_len);
Curl_safefree(stripped_pem);
return result;
}
|
Safe
|
[
"CWE-290"
] |
curl
|
b09c8ee15771c614c4bf3ddac893cdb12187c844
|
6.380035194937967e+37
| 53 |
vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
To make sure we set and extract the correct session.
Reported-by: Mingtao Yang
Bug: https://curl.se/docs/CVE-2021-22890.html
CVE-2021-22890
| 0 |
Item_iterator_ref_list(List_iterator<Item*> &arg_list):
list(arg_list) {}
|
Safe
|
[
"CWE-617"
] |
server
|
2e7891080667c59ac80f788eef4d59d447595772
|
3.123438236260211e+38
| 2 |
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <sergey.petrunya@mariadb.com>
| 0 |
static int adu_open(struct inode *inode, struct file *file)
{
struct adu_device *dev = NULL;
struct usb_interface *interface;
int subminor;
int retval;
subminor = iminor(inode);
retval = mutex_lock_interruptible(&adutux_mutex);
if (retval)
goto exit_no_lock;
interface = usb_find_interface(&adu_driver, subminor);
if (!interface) {
pr_err("%s - error, can't find device for minor %d\n",
__func__, subminor);
retval = -ENODEV;
goto exit_no_device;
}
dev = usb_get_intfdata(interface);
if (!dev || !dev->udev) {
retval = -ENODEV;
goto exit_no_device;
}
/* check that nobody else is using the device */
if (dev->open_count) {
retval = -EBUSY;
goto exit_no_device;
}
++dev->open_count;
dev_dbg(&dev->udev->dev, "%s: open count %d\n", __func__,
dev->open_count);
/* save device in the file's private structure */
file->private_data = dev;
/* initialize in direction */
dev->read_buffer_length = 0;
/* fixup first read by having urb waiting for it */
usb_fill_int_urb(dev->interrupt_in_urb, dev->udev,
usb_rcvintpipe(dev->udev,
dev->interrupt_in_endpoint->bEndpointAddress),
dev->interrupt_in_buffer,
usb_endpoint_maxp(dev->interrupt_in_endpoint),
adu_interrupt_in_callback, dev,
dev->interrupt_in_endpoint->bInterval);
dev->read_urb_finished = 0;
if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL))
dev->read_urb_finished = 1;
/* we ignore failure */
/* end of fixup for first read */
/* initialize out direction */
dev->out_urb_finished = 1;
retval = 0;
exit_no_device:
mutex_unlock(&adutux_mutex);
exit_no_lock:
return retval;
}
|
Safe
|
[
"CWE-416"
] |
linux
|
44efc269db7929f6275a1fa927ef082e533ecde0
|
3.237348032706966e+38
| 67 |
USB: adutux: fix use-after-free on disconnect
The driver was clearing its struct usb_device pointer, which it used as
an inverted disconnected flag, before deregistering the character device
and without serialising against racing release().
This could lead to a use-after-free if a racing release() callback
observes the cleared pointer and frees the driver data before
disconnect() is finished with it.
This could also lead to NULL-pointer dereferences in a racing open().
Fixes: f08812d5eb8f ("USB: FIx locks and urb->status in adutux (updated)")
Cc: stable <stable@vger.kernel.org> # 2.6.24
Reported-by: syzbot+0243cb250a51eeefb8cc@syzkaller.appspotmail.com
Tested-by: syzbot+0243cb250a51eeefb8cc@syzkaller.appspotmail.com
Signed-off-by: Johan Hovold <johan@kernel.org>
Link: https://lore.kernel.org/r/20190925092913.8608-1-johan@kernel.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
int _cmsSearchTag(_cmsICCPROFILE* Icc, cmsTagSignature sig, cmsBool lFollowLinks)
{
int n;
cmsTagSignature LinkedSig;
do {
// Search for given tag in ICC profile directory
n = SearchOneTag(Icc, sig);
if (n < 0)
return -1; // Not found
if (!lFollowLinks)
return n; // Found, don't follow links
// Is this a linked tag?
LinkedSig = Icc ->TagLinked[n];
// Yes, follow link
if (LinkedSig != (cmsTagSignature) 0) {
sig = LinkedSig;
}
} while (LinkedSig != (cmsTagSignature) 0);
return n;
}
|
Safe
|
[] |
Little-CMS
|
d2d902b9a03583ae482c782b2f243f7e5268a47d
|
3.370644956468106e+38
| 27 |
>Changes from Richard Hughes
| 0 |
static void dec_pending(struct dm_io *io, blk_status_t error)
{
unsigned long flags;
blk_status_t io_error;
struct bio *bio;
struct mapped_device *md = io->md;
/* Push-back supersedes any I/O errors */
if (unlikely(error)) {
spin_lock_irqsave(&io->endio_lock, flags);
if (!(io->status == BLK_STS_DM_REQUEUE &&
__noflush_suspending(md)))
io->status = error;
spin_unlock_irqrestore(&io->endio_lock, flags);
}
if (atomic_dec_and_test(&io->io_count)) {
if (io->status == BLK_STS_DM_REQUEUE) {
/*
* Target requested pushing back the I/O.
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md))
bio_list_add_head(&md->deferred, io->bio);
else
/* noflush suspend was interrupted. */
io->status = BLK_STS_IOERR;
spin_unlock_irqrestore(&md->deferred_lock, flags);
}
io_error = io->status;
bio = io->bio;
end_io_acct(io);
free_io(md, io);
if (io_error == BLK_STS_DM_REQUEUE)
return;
if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
/*
* Preflush done for flush with data, reissue
* without REQ_PREFLUSH.
*/
bio->bi_opf &= ~REQ_PREFLUSH;
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
bio->bi_status = io_error;
bio_endio(bio);
}
}
}
|
Safe
|
[
"CWE-362"
] |
linux
|
b9a41d21dceadf8104812626ef85dc56ee8a60ed
|
8.470558781669973e+37
| 52 |
dm: fix race between dm_get_from_kobject() and __dm_destroy()
The following BUG_ON was hit when testing repeat creation and removal of
DM devices:
kernel BUG at drivers/md/dm.c:2919!
CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44
Call Trace:
[<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a
[<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e
[<ffffffff817b46d1>] ? mutex_lock+0x26/0x44
[<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf
[<ffffffff811de257>] kernfs_seq_show+0x23/0x25
[<ffffffff81199118>] seq_read+0x16f/0x325
[<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f
[<ffffffff8117b625>] __vfs_read+0x26/0x9d
[<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44
[<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9
[<ffffffff8117be9d>] vfs_read+0x8f/0xcf
[<ffffffff81193e34>] ? __fdget_pos+0x12/0x41
[<ffffffff8117c686>] SyS_read+0x4b/0x76
[<ffffffff817b606e>] system_call_fastpath+0x12/0x71
The bug can be easily triggered, if an extra delay (e.g. 10ms) is added
between the test of DMF_FREEING & DMF_DELETING and dm_get() in
dm_get_from_kobject().
To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and
dm_get() are done in an atomic way, so _minor_lock is used.
The other callers of dm_get() have also been checked to be OK: some
callers invoke dm_get() under _minor_lock, some callers invoke it under
_hash_lock, and dm_start_request() invoke it after increasing
md->open_count.
Cc: stable@vger.kernel.org
Signed-off-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
| 0 |
static void sp_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
dev->netdev_ops = &sp_netdev_ops;
dev->needs_free_netdev = true;
dev->mtu = SIXP_MTU;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->header_ops = &ax25_header_ops;
dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
/* Only activated in AX.25 mode */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
dev->flags = 0;
}
|
Safe
|
[
"CWE-787"
] |
linux
|
19d1532a187669ce86d5a2696eb7275310070793
|
1.2246665944429101e+38
| 19 |
net: 6pack: fix slab-out-of-bounds in decode_data
Syzbot reported slab-out-of bounds write in decode_data().
The problem was in missing validation checks.
Syzbot's reproducer generated malicious input, which caused
decode_data() to be called a lot in sixpack_decode(). Since
rx_count_cooked is only 400 bytes and noone reported before,
that 400 bytes is not enough, let's just check if input is malicious
and complain about buffer overrun.
Fail log:
==================================================================
BUG: KASAN: slab-out-of-bounds in drivers/net/hamradio/6pack.c:843
Write of size 1 at addr ffff888087c5544e by task kworker/u4:0/7
CPU: 0 PID: 7 Comm: kworker/u4:0 Not tainted 5.6.0-rc3-syzkaller #0
...
Workqueue: events_unbound flush_to_ldisc
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x197/0x210 lib/dump_stack.c:118
print_address_description.constprop.0.cold+0xd4/0x30b mm/kasan/report.c:374
__kasan_report.cold+0x1b/0x32 mm/kasan/report.c:506
kasan_report+0x12/0x20 mm/kasan/common.c:641
__asan_report_store1_noabort+0x17/0x20 mm/kasan/generic_report.c:137
decode_data.part.0+0x23b/0x270 drivers/net/hamradio/6pack.c:843
decode_data drivers/net/hamradio/6pack.c:965 [inline]
sixpack_decode drivers/net/hamradio/6pack.c:968 [inline]
Reported-and-tested-by: syzbot+fc8cd9a673d4577fb2e4@syzkaller.appspotmail.com
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Pavel Skripkin <paskripkin@gmail.com>
Reviewed-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
hostkey_method_ssh_ed25519_initPEMFromMemory(LIBSSH2_SESSION * session,
const char *privkeyfiledata,
size_t privkeyfiledata_len,
unsigned const char *passphrase,
void **abstract)
{
libssh2_ed25519_ctx *ed_ctx = NULL;
int ret;
if(abstract != NULL && *abstract) {
hostkey_method_ssh_ed25519_dtor(session, abstract);
*abstract = NULL;
}
ret = _libssh2_ed25519_new_private_frommemory(&ed_ctx, session,
privkeyfiledata,
privkeyfiledata_len, passphrase);
if(ret) {
return -1;
}
if(abstract != NULL)
*abstract = ed_ctx;
return 0;
}
|
Safe
|
[
"CWE-787"
] |
libssh2
|
dc109a7f518757741590bb993c0c8412928ccec2
|
1.7086203404693002e+38
| 26 |
Security fixes (#315)
* Bounds checks
Fixes for CVEs
https://www.libssh2.org/CVE-2019-3863.html
https://www.libssh2.org/CVE-2019-3856.html
* Packet length bounds check
CVE
https://www.libssh2.org/CVE-2019-3855.html
* Response length check
CVE
https://www.libssh2.org/CVE-2019-3859.html
* Bounds check
CVE
https://www.libssh2.org/CVE-2019-3857.html
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
and additional data validation
* Check bounds before reading into buffers
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
* declare SIZE_MAX and UINT_MAX if needed
| 0 |
static inline PixelTrait GetPixelTraits(const Image *magick_restrict image,
const PixelChannel channel)
{
return(image->channel_map[channel].traits);
}
|
Safe
|
[
"CWE-20",
"CWE-125"
] |
ImageMagick
|
8187d2d8fd010d2d6b1a3a8edd935beec404dddc
|
2.5816944305634936e+38
| 5 |
https://github.com/ImageMagick/ImageMagick/issues/1610
| 0 |
void FIPS_drbg_stick(int onoff)
{
drbg_stick = onoff;
}
|
Safe
|
[] |
openssl
|
200f249b8c3b6439e0200d01caadc24806f1a983
|
4.538010286449361e+37
| 4 |
Remove Dual EC DRBG from FIPS module.
| 0 |
pktap_if_print(netdissect_options *ndo,
const struct pcap_pkthdr *h, const u_char *p)
{
uint32_t dlt, hdrlen, rectype;
u_int caplen = h->caplen;
u_int length = h->len;
if_printer printer;
const pktap_header_t *hdr;
struct pcap_pkthdr nhdr;
if (caplen < sizeof(pktap_header_t) || length < sizeof(pktap_header_t)) {
ND_PRINT((ndo, "[|pktap]"));
return (0);
}
hdr = (const pktap_header_t *)p;
dlt = EXTRACT_LE_32BITS(&hdr->pkt_dlt);
hdrlen = EXTRACT_LE_32BITS(&hdr->pkt_len);
if (hdrlen < sizeof(pktap_header_t)) {
/*
* Claimed header length < structure length.
* XXX - does this just mean some fields aren't
* being supplied, or is it truly an error (i.e.,
* is the length supplied so that the header can
* be expanded in the future)?
*/
ND_PRINT((ndo, "[|pktap]"));
return (0);
}
if (caplen < hdrlen || length < hdrlen) {
ND_PRINT((ndo, "[|pktap]"));
return (hdrlen);
}
if (ndo->ndo_eflag)
pktap_header_print(ndo, p, length);
length -= hdrlen;
caplen -= hdrlen;
p += hdrlen;
rectype = EXTRACT_LE_32BITS(&hdr->pkt_rectype);
switch (rectype) {
case PKT_REC_NONE:
ND_PRINT((ndo, "no data"));
break;
case PKT_REC_PACKET:
if ((printer = lookup_printer(dlt)) != NULL) {
nhdr = *h;
nhdr.caplen = caplen;
nhdr.len = length;
hdrlen += printer(ndo, &nhdr, p);
} else {
if (!ndo->ndo_eflag)
pktap_header_print(ndo, (const u_char *)hdr,
length + hdrlen);
if (!ndo->ndo_suppress_default_print)
ND_DEFAULTPRINT(p, caplen);
}
break;
}
return (hdrlen);
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
tcpdump
|
ca336198e8bebccc18502de27672fdbd6eb34856
|
5.05139079500692e+37
| 66 |
CVE-2017-13007/PKTAP: Pass a properly updated struct pcap_pkthdr to the sub-dissector.
The sub-dissector expects that the length and captured length will
reflect the actual remaining data in the packet, not the raw amount
including the PKTAP header; pass an updated header, just as we do for
PPI.
This fixes a buffer over-read discovered by Yannick Formaggio.
Add a test using the capture file supplied by the reporter(s).
| 0 |
Item *clone_item()
{
return new Item_decimal(name, &decimal_value, decimals, max_length);
}
|
Safe
|
[] |
mysql-server
|
f7316aa0c9a3909fc7498e7b95d5d3af044a7e21
|
1.4627433747133235e+38
| 4 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
| 0 |
static char *TraceSVGClippath(const unsigned char *blob,size_t length,
const size_t columns,const size_t rows)
{
char
*path,
*message;
MagickBooleanType
in_subpath;
PointInfo
first[3],
last[3],
point[3];
ssize_t
i;
ssize_t
knot_count,
selector,
x,
y;
path=AcquireString((char *) NULL);
if (path == (char *) NULL)
return((char *) NULL);
message=AcquireString((char *) NULL);
(void) FormatLocaleString(message,MagickPathExtent,(
"<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n"
"<svg xmlns=\"http://www.w3.org/2000/svg\""
" width=\"%.20g\" height=\"%.20g\">\n"
"<g>\n"
"<path fill-rule=\"evenodd\" style=\"fill:#000000;stroke:#000000;"
"stroke-width:0;stroke-antialiasing:false\" d=\"\n"),(double) columns,
(double) rows);
(void) ConcatenateString(&path,message);
(void) memset(point,0,sizeof(point));
(void) memset(first,0,sizeof(first));
(void) memset(last,0,sizeof(last));
knot_count=0;
in_subpath=MagickFalse;
while (length != 0)
{
selector=(ssize_t) ReadPropertyMSBShort(&blob,&length);
switch (selector)
{
case 0:
case 3:
{
if (knot_count != 0)
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Expected subpath length record.
*/
knot_count=(ssize_t) ReadPropertyMSBShort(&blob,&length);
blob+=22;
length-=MagickMin(22,(ssize_t) length);
break;
}
case 1:
case 2:
case 4:
case 5:
{
if (knot_count == 0)
{
/*
Unexpected subpath knot.
*/
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Add sub-path knot
*/
for (i=0; i < 3; i++)
{
y=(ssize_t) ReadPropertyMSBLong(&blob,&length);
x=(ssize_t) ReadPropertyMSBLong(&blob,&length);
point[i].x=(double) x*columns/4096.0/4096.0;
point[i].y=(double) y*rows/4096.0/4096.0;
}
if (in_subpath == MagickFalse)
{
(void) FormatLocaleString(message,MagickPathExtent,"M %g %g\n",
point[1].x,point[1].y);
for (i=0; i < 3; i++)
{
first[i]=point[i];
last[i]=point[i];
}
}
else
{
TraceBezierCurve(message,last,point);
for (i=0; i < 3; i++)
last[i]=point[i];
}
(void) ConcatenateString(&path,message);
in_subpath=MagickTrue;
knot_count--;
/*
Close the subpath if there are no more knots.
*/
if (knot_count == 0)
{
TraceBezierCurve(message,last,first);
(void) ConcatenateString(&path,message);
in_subpath=MagickFalse;
}
break;
}
case 6:
case 7:
case 8:
default:
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
}
}
/*
Return an empty SVG image if the path does not have knots.
*/
(void) ConcatenateString(&path,"\"/>\n</g>\n</svg>\n");
message=DestroyString(message);
return(path);
}
|
Safe
|
[
"CWE-703",
"CWE-704"
] |
ImageMagick
|
eac8ce4d873f28bb6a46aa3a662fb196b49b95d0
|
2.6855943276524684e+38
| 136 |
fix #5033: runtime error: load of misaligned address (#5034)
* fix Division by zero in XMenuWidget() of MagickCore/widget.c
* Fix memory leak in AnimateImageCommand() of MagickWand/animate.c and DisplayImageCommand() of MagickWand/display.c
* fix Division by zero in ReadEnhMetaFile() of coders/emf.c
* Resolve conflicts
* fix issue: outside the range of representable values of type 'unsigned char' at coders/psd.c:1025
* fix error: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299
* fix #5033:runtime error: load of misaligned address
Co-authored-by: zhailiangliang <zhailiangliang@loongson.cn>
| 0 |
auto make_stack_allocator() {
return boost::context::protected_fixedsize_stack{512*1024};
}
|
Safe
|
[
"CWE-400"
] |
ceph
|
ff72c50a2c43c57aead933eb4903ad1ca6d1748a
|
1.5218350905208232e+38
| 3 |
rgw: improve beast
Avoid leaking connections that had partially-consumed
client data on unexpected disconnect.
Resolves CVE-2020-1700 (moderate impact flaw).
Fixes: https://tracker.ceph.com/issues/42531
Signed-off-by: Or Friedmann <ofriedma@redhat.com>
Signed-off-by: Matt Benjamin <mbenjamin@redhat.com>
| 0 |
static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
struct net_local *lp = bus->priv;
u32 ctrl_reg;
u32 rc;
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
/* Write the PHY address, register number and set the OP bit in the
* MDIO Address register. Set the Status bit in the MDIO Control
* register to start a MDIO read transaction.
*/
ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
xemaclite_writel(XEL_MDIOADDR_OP_MASK |
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET);
xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
"%s(phy_id=%i, reg=%x) == %x\n", __func__,
phy_id, reg, rc);
return rc;
}
|
Safe
|
[
"CWE-703",
"CWE-824"
] |
linux
|
d0d62baa7f505bd4c59cd169692ff07ec49dde37
|
2.8145390971775855e+38
| 31 |
net: xilinx_emaclite: Do not print real IOMEM pointer
Printing kernel pointers is discouraged because they might leak kernel
memory layout. This fixes smatch warning:
drivers/net/ethernet/xilinx/xilinx_emaclite.c:1191 xemaclite_of_probe() warn:
argument 4 to %08lX specifier is cast from pointer
Signed-off-by: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
ReadFilter(TestDnsServerQuery& parent) : parent_(parent) {}
|
Safe
|
[
"CWE-400"
] |
envoy
|
542f84c66e9f6479bc31c6f53157c60472b25240
|
1.5715690884170087e+38
| 1 |
overload: Runtime configurable global connection limits (#147)
Signed-off-by: Tony Allen <tony@allen.gg>
| 0 |
static void swizzle_to_dml_params(
enum swizzle_mode_values swizzle,
unsigned int *sw_mode)
{
switch (swizzle) {
case DC_SW_LINEAR:
*sw_mode = dm_sw_linear;
break;
case DC_SW_4KB_S:
*sw_mode = dm_sw_4kb_s;
break;
case DC_SW_4KB_S_X:
*sw_mode = dm_sw_4kb_s_x;
break;
case DC_SW_4KB_D:
*sw_mode = dm_sw_4kb_d;
break;
case DC_SW_4KB_D_X:
*sw_mode = dm_sw_4kb_d_x;
break;
case DC_SW_64KB_S:
*sw_mode = dm_sw_64kb_s;
break;
case DC_SW_64KB_S_X:
*sw_mode = dm_sw_64kb_s_x;
break;
case DC_SW_64KB_S_T:
*sw_mode = dm_sw_64kb_s_t;
break;
case DC_SW_64KB_D:
*sw_mode = dm_sw_64kb_d;
break;
case DC_SW_64KB_D_X:
*sw_mode = dm_sw_64kb_d_x;
break;
case DC_SW_64KB_D_T:
*sw_mode = dm_sw_64kb_d_t;
break;
case DC_SW_64KB_R_X:
*sw_mode = dm_sw_64kb_r_x;
break;
case DC_SW_VAR_S:
*sw_mode = dm_sw_var_s;
break;
case DC_SW_VAR_S_X:
*sw_mode = dm_sw_var_s_x;
break;
case DC_SW_VAR_D:
*sw_mode = dm_sw_var_d;
break;
case DC_SW_VAR_D_X:
*sw_mode = dm_sw_var_d_x;
break;
default:
ASSERT(0); /* Not supported */
break;
}
}
|
Safe
|
[
"CWE-400",
"CWE-703",
"CWE-401"
] |
linux
|
055e547478a11a6360c7ce05e2afc3e366968a12
|
2.096467285507999e+38
| 59 |
drm/amd/display: memory leak
In dcn*_clock_source_create when dcn20_clk_src_construct fails allocated
clk_src needs release.
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
| 0 |
static int get_free_dqblk(struct quota_handle *h)
{
dqbuf_t buf = getdqbuf();
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
struct qtree_mem_dqinfo *info = &h->qh_info.u.v2_mdqi.dqi_qtree;
int blk;
if (!buf)
return -ENOMEM;
if (info->dqi_free_blk) {
blk = info->dqi_free_blk;
read_blk(h, blk, buf);
info->dqi_free_blk = ext2fs_le32_to_cpu(dh->dqdh_next_free);
} else {
memset(buf, 0, QT_BLKSIZE);
/* Assure block allocation... */
if (write_blk(h, info->dqi_blocks, buf) < 0) {
freedqbuf(buf);
log_err("Cannot allocate new quota block "
"(out of disk space).");
return -ENOSPC;
}
blk = info->dqi_blocks++;
}
mark_quotafile_info_dirty(h);
freedqbuf(buf);
return blk;
}
|
Safe
|
[
"CWE-787"
] |
e2fsprogs
|
8dbe7b475ec5e91ed767239f0e85880f416fc384
|
3.2841566145050986e+38
| 29 |
libsupport: add checks to prevent buffer overrun bugs in quota code
A maliciously corrupted file systems can trigger buffer overruns in
the quota code used by e2fsck. To fix this, add sanity checks to the
quota header fields as well as to block number references in the quota
tree.
Addresses: CVE-2019-5094
Addresses: TALOS-2019-0887
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
| 0 |
static long snd_seq_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_seq_client *client = file->private_data;
/* To use kernel stack for ioctl data. */
union {
int pversion;
int client_id;
struct snd_seq_system_info system_info;
struct snd_seq_running_info running_info;
struct snd_seq_client_info client_info;
struct snd_seq_port_info port_info;
struct snd_seq_port_subscribe port_subscribe;
struct snd_seq_queue_info queue_info;
struct snd_seq_queue_status queue_status;
struct snd_seq_queue_tempo tempo;
struct snd_seq_queue_timer queue_timer;
struct snd_seq_queue_client queue_client;
struct snd_seq_client_pool client_pool;
struct snd_seq_remove_events remove_events;
struct snd_seq_query_subs query_subs;
} buf;
const struct ioctl_handler *handler;
unsigned long size;
int err;
if (snd_BUG_ON(!client))
return -ENXIO;
for (handler = ioctl_handlers; handler->cmd > 0; ++handler) {
if (handler->cmd == cmd)
break;
}
if (handler->cmd == 0)
return -ENOTTY;
memset(&buf, 0, sizeof(buf));
/*
* All of ioctl commands for ALSA sequencer get an argument of size
* within 13 bits. We can safely pick up the size from the command.
*/
size = _IOC_SIZE(handler->cmd);
if (handler->cmd & IOC_IN) {
if (copy_from_user(&buf, (const void __user *)arg, size))
return -EFAULT;
}
mutex_lock(&client->ioctl_mutex);
err = handler->func(client, &buf);
mutex_unlock(&client->ioctl_mutex);
if (err >= 0) {
/* Some commands includes a bug in 'dir' field. */
if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
handler->cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_POOL ||
(handler->cmd & IOC_OUT))
if (copy_to_user((void __user *)arg, &buf, size))
return -EFAULT;
}
return err;
}
|
Safe
|
[
"CWE-362"
] |
linux
|
b3defb791b26ea0683a93a4f49c77ec45ec96f10
|
3.3766842221627797e+38
| 62 |
ALSA: seq: Make ioctls race-free
The ALSA sequencer ioctls have no protection against racy calls while
the concurrent operations may lead to interfere with each other. As
reported recently, for example, the concurrent calls of setting client
pool with a combination of write calls may lead to either the
unkillable dead-lock or UAF.
As a slightly big hammer solution, this patch introduces the mutex to
make each ioctl exclusive. Although this may reduce performance via
parallel ioctl calls, usually it's not demanded for sequencer usages,
hence it should be negligible.
Reported-by: Luo Quan <a4651386@163.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
| 0 |
void reds_on_vm_stop(RedsState *reds)
{
FOREACH_QXL_INSTANCE(reds, qxl) {
red_qxl_stop(qxl);
}
}
|
Safe
|
[] |
spice
|
ca5bbc5692e052159bce1a75f55dc60b36078749
|
2.243914375740071e+37
| 6 |
With OpenSSL 1.1: Disable client-initiated renegotiation.
Fixes issue #49
Fixes BZ#1904459
Signed-off-by: Julien Ropé <jrope@redhat.com>
Reported-by: BlackKD
Acked-by: Frediano Ziglio <fziglio@redhat.com>
| 0 |
static int network_set_ttl (const sockent_t *se, const struct addrinfo *ai)
{
DEBUG ("network plugin: network_set_ttl: network_config_ttl = %i;",
network_config_ttl);
assert (se->type == SOCKENT_TYPE_CLIENT);
if ((network_config_ttl < 1) || (network_config_ttl > 255))
return (-1);
if (ai->ai_family == AF_INET)
{
struct sockaddr_in *addr = (struct sockaddr_in *) ai->ai_addr;
int optname;
if (IN_MULTICAST (ntohl (addr->sin_addr.s_addr)))
optname = IP_MULTICAST_TTL;
else
optname = IP_TTL;
if (setsockopt (se->data.client.fd, IPPROTO_IP, optname,
&network_config_ttl,
sizeof (network_config_ttl)) != 0)
{
char errbuf[1024];
ERROR ("network plugin: setsockopt (ipv4-ttl): %s",
sstrerror (errno, errbuf, sizeof (errbuf)));
return (-1);
}
}
else if (ai->ai_family == AF_INET6)
{
/* Useful example: http://gsyc.escet.urjc.es/~eva/IPv6-web/examples/mcast.html */
struct sockaddr_in6 *addr = (struct sockaddr_in6 *) ai->ai_addr;
int optname;
if (IN6_IS_ADDR_MULTICAST (&addr->sin6_addr))
optname = IPV6_MULTICAST_HOPS;
else
optname = IPV6_UNICAST_HOPS;
if (setsockopt (se->data.client.fd, IPPROTO_IPV6, optname,
&network_config_ttl,
sizeof (network_config_ttl)) != 0)
{
char errbuf[1024];
ERROR ("network plugin: setsockopt(ipv6-ttl): %s",
sstrerror (errno, errbuf,
sizeof (errbuf)));
return (-1);
}
}
return (0);
} /* int network_set_ttl */
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
collectd
|
b589096f907052b3a4da2b9ccc9b0e2e888dfc18
|
2.9426263224110804e+38
| 55 |
network plugin: Fix heap overflow in parse_packet().
Emilien Gaspar has identified a heap overflow in parse_packet(), the
function used by the network plugin to parse incoming network packets.
This is a vulnerability in collectd, though the scope is not clear at
this point. At the very least specially crafted network packets can be
used to crash the daemon. We can't rule out a potential remote code
execution though.
Fixes: CVE-2016-6254
| 0 |
int mutt_seqset_iterator_next(struct SeqsetIterator *iter, unsigned int *next)
{
if (!iter || !next)
return -1;
if (iter->in_range)
{
if ((iter->down && (iter->range_cur == (iter->range_end - 1))) ||
(!iter->down && (iter->range_cur == (iter->range_end + 1))))
{
iter->in_range = 0;
}
}
if (!iter->in_range)
{
iter->substr_cur = iter->substr_end;
if (iter->substr_cur == iter->eostr)
return 1;
while (!*(iter->substr_cur))
iter->substr_cur++;
iter->substr_end = strchr(iter->substr_cur, ',');
if (!iter->substr_end)
iter->substr_end = iter->eostr;
else
*(iter->substr_end) = '\0';
char *range_sep = strchr(iter->substr_cur, ':');
if (range_sep)
*range_sep++ = '\0';
if (mutt_str_atoui(iter->substr_cur, &iter->range_cur) != 0)
return -1;
if (range_sep)
{
if (mutt_str_atoui(range_sep, &iter->range_end) != 0)
return -1;
}
else
iter->range_end = iter->range_cur;
iter->down = (iter->range_end < iter->range_cur);
iter->in_range = 1;
}
*next = iter->range_cur;
if (iter->down)
iter->range_cur--;
else
iter->range_cur++;
return 0;
}
|
Vulnerable
|
[
"CWE-125"
] |
neomutt
|
fa1db5785e5cfd9d3cd27b7571b9fe268d2ec2dc
|
2.555634233734735e+38
| 54 |
Fix seqset iterator when it ends in a comma
If the seqset ended with a comma, the substr_end marker would be just
before the trailing nul. In the next call, the loop to skip the
marker would iterate right past the end of string too.
The fix is simple: place the substr_end marker and skip past it
immediately.
| 1 |
void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask)
{
WARN_ON_ONCE(flags & ~mask);
set_mask_bits(&inode->i_flags, mask, flags);
}
|
Safe
|
[
"CWE-416"
] |
tip
|
8019ad13ef7f64be44d4f892af9c840179009254
|
2.0418203010483357e+38
| 6 |
futex: Fix inode life-time issue
As reported by Jann, ihold() does not in fact guarantee inode
persistence. And instead of making it so, replace the usage of inode
pointers with a per boot, machine wide, unique inode identifier.
This sequence number is global, but shared (file backed) futexes are
rare enough that this should not become a performance issue.
Reported-by: Jann Horn <jannh@google.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
| 0 |
WritingTask (TaskGroup *group, MultiPartOutputFile* file, vector<WritingTaskData*> data,
Array2D<FrameBuffer>* tiledFrameBuffers):
Task(group),
file(file),
data(data),
tiledFrameBuffers(tiledFrameBuffers)
{}
|
Safe
|
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
|
1.358072164637247e+38
| 7 |
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <peterh@wetafx.co.nz>
| 0 |
EncryptionLevel EncryptedWriteRecordLayer::getEncryptionLevel() const {
return encryptionLevel_;
}
|
Safe
|
[
"CWE-400",
"CWE-703",
"CWE-770"
] |
fizz
|
3eaddb33619eaaf74a760872850c550ad8f5c52f
|
5.828766046849604e+37
| 3 |
Coalesce handshake buffers
Summary:
It is possible that a peer might send us records in a manner such
that there is a 16KB record and only 1 byte of handshake message in
each record. Since we normally just trim the IOBuf, we would end up
holding 16K of data per actual byte of data. To prevent this we allocate a contiguous
buffer to copy over these bytes for handshake messages for now.
This is a partial fix for CVE-2019-11924
Reviewed By: ngoyal
Differential Revision: D16478044
fbshipit-source-id: 464bc68eaefda065d9a327818100427377293fbd
| 0 |
bool SSL_CTX::GetSessionCacheFlushOff() const
{
return sessionCacheFlushOff_;
}
|
Safe
|
[
"CWE-254"
] |
mysql-server
|
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
|
3.284414767828108e+37
| 4 |
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
| 0 |
int sock_register(const struct net_proto_family *ops)
{
int err;
if (ops->family >= NPROTO) {
printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family,
NPROTO);
return -ENOBUFS;
}
spin_lock(&net_family_lock);
if (rcu_dereference_protected(net_families[ops->family],
lockdep_is_held(&net_family_lock)))
err = -EEXIST;
else {
rcu_assign_pointer(net_families[ops->family], ops);
err = 0;
}
spin_unlock(&net_family_lock);
printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family);
return err;
}
|
Safe
|
[] |
linux-2.6
|
644595f89620ba8446cc555be336d24a34464950
|
2.066520802374826e+38
| 23 |
compat: Handle COMPAT_USE_64BIT_TIME in net/socket.c
Use helper functions aware of COMPAT_USE_64BIT_TIME to write struct
timeval and struct timespec to userspace in net/socket.c.
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
| 0 |
WORD_LIST *
expand_words_no_vars (list)
WORD_LIST *list;
{
return (expand_word_list_internal (list, WEXP_NOVARS));
|
Safe
|
[
"CWE-20"
] |
bash
|
4f747edc625815f449048579f6e65869914dd715
|
1.6655850568600661e+38
| 5 |
Bash-4.4 patch 7
| 0 |
static int __init ip_vs_genl_register(void)
{
return genl_register_family_with_ops(&ip_vs_genl_family,
ip_vs_genl_ops, ARRAY_SIZE(ip_vs_genl_ops));
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
04bcef2a83f40c6db24222b27a52892cba39dffb
|
2.2312533158400774e+38
| 5 |
ipvs: Add boundary check on ioctl arguments
The ipvs code has a nifty system for doing the size of ioctl command
copies; it defines an array with values into which it indexes the cmd
to find the right length.
Unfortunately, the ipvs code forgot to check if the cmd was in the
range that the array provides, allowing for an index outside of the
array, which then gives a "garbage" result into the length, which
then gets used for copying into a stack buffer.
Fix this by adding sanity checks on these as well as the copy size.
[ horms@verge.net.au: adjusted limit to IP_VS_SO_GET_MAX ]
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Acked-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Simon Horman <horms@verge.net.au>
Signed-off-by: Patrick McHardy <kaber@trash.net>
| 0 |
static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct ext4_inode_info *ei = EXT4_I(inode);
handle_t *handle;
ssize_t ret;
int orphan = 0;
size_t count = iov_length(iov, nr_segs);
if (rw == WRITE) {
loff_t final_size = offset + count;
if (final_size > inode->i_size) {
/* Credits for sb + inode write */
handle = ext4_journal_start(inode, 2);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
ret = ext4_orphan_add(handle, inode);
if (ret) {
ext4_journal_stop(handle);
goto out;
}
orphan = 1;
ei->i_disksize = inode->i_size;
ext4_journal_stop(handle);
}
}
ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext4_get_block, NULL);
if (orphan) {
int err;
/* Credits for sb + inode write */
handle = ext4_journal_start(inode, 2);
if (IS_ERR(handle)) {
/* This is really bad luck. We've written the data
* but cannot extend i_size. Bail out and pretend
* the write failed... */
ret = PTR_ERR(handle);
goto out;
}
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
if (ret > 0) {
loff_t end = offset + ret;
if (end > inode->i_size) {
ei->i_disksize = end;
i_size_write(inode, end);
/*
* We're going to return a positive `ret'
* here due to non-zero-length I/O, so there's
* no way of reporting error returns from
* ext4_mark_inode_dirty() to userspace. So
* ignore it.
*/
ext4_mark_inode_dirty(handle, inode);
}
}
err = ext4_journal_stop(handle);
if (ret == 0)
ret = err;
}
out:
return ret;
}
|
Safe
|
[
"CWE-399"
] |
linux-2.6
|
06a279d636734da32bb62dd2f7b0ade666f65d7c
|
1.0007087044459107e+38
| 73 |
ext4: only use i_size_high for regular files
Directories are not allowed to be bigger than 2GB, so don't use
i_size_high for anything other than regular files. E2fsck should
complain about these inodes, but the simplest thing to do for the
kernel is to only use i_size_high for regular files.
This prevents an intentially corrupted filesystem from causing the
kernel to burn a huge amount of CPU and issuing error messages such
as:
EXT4-fs warning (device loop0): ext4_block_to_path: block 135090028 > max
Thanks to David Maciejak from Fortinet's FortiGuard Global Security
Research Team for reporting this issue.
http://bugzilla.kernel.org/show_bug.cgi?id=12375
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@kernel.org
| 0 |
int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
{
Coroutine *co;
DiscardCo rwco = {
.bs = bs,
.sector_num = sector_num,
.nb_sectors = nb_sectors,
.ret = NOT_DONE,
};
if (qemu_in_coroutine()) {
/* Fast-path if already in coroutine context */
bdrv_discard_co_entry(&rwco);
} else {
co = qemu_coroutine_create(bdrv_discard_co_entry);
qemu_coroutine_enter(co, &rwco);
while (rwco.ret == NOT_DONE) {
qemu_aio_wait();
}
}
return rwco.ret;
}
|
Safe
|
[
"CWE-190"
] |
qemu
|
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
|
1.6474729955134246e+38
| 23 |
block: Limit request size (CVE-2014-0143)
Limiting the size of a single request to INT_MAX not only fixes a
direct integer overflow in bdrv_check_request() (which would only
trigger bad behaviour with ridiculously huge images, as in close to
2^64 bytes), but can also prevent overflows in all block drivers.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
| 0 |
void vrend_set_index_buffer(struct vrend_context *ctx,
uint32_t res_handle,
uint32_t index_size,
uint32_t offset)
{
struct vrend_resource *res;
ctx->sub->ib.index_size = index_size;
ctx->sub->ib.offset = offset;
if (res_handle) {
if (ctx->sub->index_buffer_res_id != res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, NULL);
ctx->sub->index_buffer_res_id = 0;
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return;
}
vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, res);
ctx->sub->index_buffer_res_id = res_handle;
}
} else {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, NULL);
ctx->sub->index_buffer_res_id = 0;
}
}
|
Safe
|
[
"CWE-787"
] |
virglrenderer
|
95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
|
3.9016752214397364e+37
| 26 |
vrend: Add test to resource OOB write and fix it
v2: Also check that no depth != 1 has been send when none is due
Closes: #250
Signed-off-by: Gert Wollny <gert.wollny@collabora.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
| 0 |
bntseq_t *bns_restore_core(const char *ann_filename, const char* amb_filename, const char* pac_filename)
{
char str[8192];
FILE *fp;
const char *fname;
bntseq_t *bns;
long long xx;
int i;
int scanres;
bns = (bntseq_t*)calloc(1, sizeof(bntseq_t));
{ // read .ann
fp = xopen(fname = ann_filename, "r");
scanres = fscanf(fp, "%lld%d%u", &xx, &bns->n_seqs, &bns->seed);
if (scanres != 3) goto badread;
bns->l_pac = xx;
bns->anns = (bntann1_t*)calloc(bns->n_seqs, sizeof(bntann1_t));
for (i = 0; i < bns->n_seqs; ++i) {
bntann1_t *p = bns->anns + i;
char *q = str;
int c;
// read gi and sequence name
scanres = fscanf(fp, "%u%s", &p->gi, str);
if (scanres != 2) goto badread;
p->name = strdup(str);
// read fasta comments
while (q - str < sizeof(str) - 1 && (c = fgetc(fp)) != '\n' && c != EOF) *q++ = c;
while (c != '\n' && c != EOF) c = fgetc(fp);
if (c == EOF) {
scanres = EOF;
goto badread;
}
*q = 0;
if (q - str > 1 && strcmp(str, " (null)") != 0) p->anno = strdup(str + 1); // skip leading space
else p->anno = strdup("");
// read the rest
scanres = fscanf(fp, "%lld%d%d", &xx, &p->len, &p->n_ambs);
if (scanres != 3) goto badread;
p->offset = xx;
}
err_fclose(fp);
}
{ // read .amb
int64_t l_pac;
int32_t n_seqs;
fp = xopen(fname = amb_filename, "r");
scanres = fscanf(fp, "%lld%d%d", &xx, &n_seqs, &bns->n_holes);
if (scanres != 3) goto badread;
l_pac = xx;
xassert(l_pac == bns->l_pac && n_seqs == bns->n_seqs, "inconsistent .ann and .amb files.");
bns->ambs = bns->n_holes? (bntamb1_t*)calloc(bns->n_holes, sizeof(bntamb1_t)) : 0;
for (i = 0; i < bns->n_holes; ++i) {
bntamb1_t *p = bns->ambs + i;
scanres = fscanf(fp, "%lld%d%s", &xx, &p->len, str);
if (scanres != 3) goto badread;
p->offset = xx;
p->amb = str[0];
}
err_fclose(fp);
}
{ // open .pac
bns->fp_pac = xopen(pac_filename, "rb");
}
return bns;
badread:
if (EOF == scanres) {
err_fatal(__func__, "Error reading %s : %s\n", fname, ferror(fp) ? strerror(errno) : "Unexpected end of file");
}
err_fatal(__func__, "Parse error reading %s\n", fname);
}
|
Safe
|
[
"CWE-787"
] |
bwa
|
20d0a13092aa4cb73230492b05f9697d5ef0b88e
|
3.3584718550845224e+36
| 70 |
r1198: exit if .alt is malformatted
Resolves #232
| 0 |
double ldb_msg_find_attr_as_double(const struct ldb_message *msg,
const char *attr_name,
double default_value)
{
const struct ldb_val *v = ldb_msg_find_ldb_val(msg, attr_name);
char *buf;
char *end = NULL;
double ret;
if (!v || !v->data) {
return default_value;
}
buf = talloc_strndup(msg, (const char *)v->data, v->length);
if (buf == NULL) {
return default_value;
}
errno = 0;
ret = strtod(buf, &end);
talloc_free(buf);
if (errno != 0) {
return default_value;
}
if (end && end[0] != '\0') {
return default_value;
}
return ret;
}
|
Safe
|
[
"CWE-200"
] |
samba
|
7efe8182c165fbf17d2f88c173527a7a554e214b
|
3.203573065308179e+38
| 28 |
CVE-2022-32746 ldb: Add flag to mark message element values as shared
When making a shallow copy of an ldb message, mark the message elements
of the copy as sharing their values with the message elements in the
original message.
This flag value will be heeded in the next commit.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz>
| 0 |
void MonCap::generate_test_instances(list<MonCap*>& ls)
{
ls.push_back(new MonCap);
ls.push_back(new MonCap);
ls.back()->parse("allow *");
ls.push_back(new MonCap);
ls.back()->parse("allow rwx");
ls.push_back(new MonCap);
ls.back()->parse("allow service foo x");
ls.push_back(new MonCap);
ls.back()->parse("allow command bar x");
ls.push_back(new MonCap);
ls.back()->parse("allow service foo r, allow command bar x");
ls.push_back(new MonCap);
ls.back()->parse("allow command bar with k1=v1 x");
ls.push_back(new MonCap);
ls.back()->parse("allow command bar with k1=v1 k2=v2 x");
}
|
Safe
|
[
"CWE-285"
] |
ceph
|
a2acedd2a7e12d58af6db35edbd8a9d29c557578
|
2.6464833331776575e+38
| 18 |
mon/config-key: limit caps allowed to access the store
Henceforth, we'll require explicit `allow` caps for commands, or for the
config-key service. Blanket caps are no longer allowed for the
config-key service, except for 'allow *'.
(for luminous and mimic, we're also ensuring MonCap's parser is able to
understand forward slashes '/' when parsing prefixes)
Signed-off-by: Joao Eduardo Luis <joao@suse.de>
(cherry picked from commit 5fff611041c5afeaf3c8eb09e4de0cc919d69237)
| 0 |
static int mxf_read_content_storage(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
switch (tag) {
case 0x1901:
if (mxf->packages_refs)
av_log(mxf->fc, AV_LOG_VERBOSE, "Multiple packages_refs\n");
av_free(mxf->packages_refs);
return mxf_read_strong_ref_array(pb, &mxf->packages_refs, &mxf->packages_count);
case 0x1902:
av_free(mxf->essence_container_data_refs);
return mxf_read_strong_ref_array(pb, &mxf->essence_container_data_refs, &mxf->essence_container_data_count);
}
return 0;
}
|
Safe
|
[
"CWE-125"
] |
FFmpeg
|
bab0716c7f4793ec42e05a5aa7e80d82a0dd4e75
|
8.922697366352129e+37
| 15 |
avformat/mxfdec: Fix av_log context
Fixes: out of array access
Fixes: mxf-crash-1c2e59bf07a34675bfb3ada5e1ec22fa9f38f923
Found-by: Paul Ch <paulcher@icloud.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
| 0 |
ipp_length(ipp_t *ipp, /* I - IPP message or collection */
int collection) /* I - 1 if a collection, 0 otherwise */
{
int i; /* Looping var */
size_t bytes; /* Number of bytes */
ipp_attribute_t *attr; /* Current attribute */
ipp_tag_t group; /* Current group */
_ipp_value_t *value; /* Current value */
DEBUG_printf(("3ipp_length(ipp=%p, collection=%d)", (void *)ipp, collection));
if (!ipp)
{
DEBUG_puts("4ipp_length: Returning 0 bytes");
return (0);
}
/*
* Start with 8 bytes for the IPP message header...
*/
bytes = collection ? 0 : 8;
/*
* Then add the lengths of each attribute...
*/
group = IPP_TAG_ZERO;
for (attr = ipp->attrs; attr != NULL; attr = attr->next)
{
if (attr->group_tag != group && !collection)
{
group = attr->group_tag;
if (group == IPP_TAG_ZERO)
continue;
bytes ++; /* Group tag */
}
if (!attr->name)
continue;
DEBUG_printf(("5ipp_length: attr->name=\"%s\", attr->num_values=%d, "
"bytes=" CUPS_LLFMT, attr->name, attr->num_values, CUPS_LLCAST bytes));
if ((attr->value_tag & ~IPP_TAG_CUPS_CONST) < IPP_TAG_EXTENSION)
bytes += (size_t)attr->num_values;/* Value tag for each value */
else
bytes += (size_t)(5 * attr->num_values);
/* Value tag for each value */
bytes += (size_t)(2 * attr->num_values);
/* Name lengths */
bytes += strlen(attr->name); /* Name */
bytes += (size_t)(2 * attr->num_values);
/* Value lengths */
if (collection)
bytes += 5; /* Add membername overhead */
switch (attr->value_tag & ~IPP_TAG_CUPS_CONST)
{
case IPP_TAG_UNSUPPORTED_VALUE :
case IPP_TAG_DEFAULT :
case IPP_TAG_UNKNOWN :
case IPP_TAG_NOVALUE :
case IPP_TAG_NOTSETTABLE :
case IPP_TAG_DELETEATTR :
case IPP_TAG_ADMINDEFINE :
break;
case IPP_TAG_INTEGER :
case IPP_TAG_ENUM :
bytes += (size_t)(4 * attr->num_values);
break;
case IPP_TAG_BOOLEAN :
bytes += (size_t)attr->num_values;
break;
case IPP_TAG_TEXT :
case IPP_TAG_NAME :
case IPP_TAG_KEYWORD :
case IPP_TAG_URI :
case IPP_TAG_URISCHEME :
case IPP_TAG_CHARSET :
case IPP_TAG_LANGUAGE :
case IPP_TAG_MIMETYPE :
for (i = 0, value = attr->values;
i < attr->num_values;
i ++, value ++)
if (value->string.text)
bytes += strlen(value->string.text);
break;
case IPP_TAG_DATE :
bytes += (size_t)(11 * attr->num_values);
break;
case IPP_TAG_RESOLUTION :
bytes += (size_t)(9 * attr->num_values);
break;
case IPP_TAG_RANGE :
bytes += (size_t)(8 * attr->num_values);
break;
case IPP_TAG_TEXTLANG :
case IPP_TAG_NAMELANG :
bytes += (size_t)(4 * attr->num_values);
/* Charset + text length */
for (i = 0, value = attr->values;
i < attr->num_values;
i ++, value ++)
{
if (value->string.language)
bytes += strlen(value->string.language);
if (value->string.text)
bytes += strlen(value->string.text);
}
break;
case IPP_TAG_BEGIN_COLLECTION :
for (i = 0, value = attr->values;
i < attr->num_values;
i ++, value ++)
bytes += ipp_length(value->collection, 1);
break;
default :
for (i = 0, value = attr->values;
i < attr->num_values;
i ++, value ++)
bytes += (size_t)value->unknown.length;
break;
}
}
/*
* Finally, add 1 byte for the "end of attributes" tag or 5 bytes
* for the "end of collection" tag and return...
*/
if (collection)
bytes += 5;
else
bytes ++;
DEBUG_printf(("4ipp_length: Returning " CUPS_LLFMT " bytes", CUPS_LLCAST bytes));
return (bytes);
}
|
Safe
|
[
"CWE-120"
] |
cups
|
f24e6cf6a39300ad0c3726a41a4aab51ad54c109
|
9.584198509046124e+37
| 155 |
Fix multiple security/disclosure issues:
- CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251)
- Fixed IPP buffer overflow (rdar://50035411)
- Fixed memory disclosure issue in the scheduler (rdar://51373853)
- Fixed DoS issues in the scheduler (rdar://51373929)
| 0 |
bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{
return (vcpu->arch.msr_kvm_poll_control & 1) == 0;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
55749769fe608fa3f4a075e42e89d237c8e37637
|
1.8161748797872325e+38
| 4 |
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <butterflyhuangxx@gmail.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Message-Id: <20211210163625.2886-6-dwmw2@infradead.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
alloc_and_load_pkcs11_key (gnutls_pkcs11_privkey_t key, int deinit)
{
gnutls_privkey_t local_key;
int ret = 0;
if (key == NULL)
return NULL;
ret = gnutls_privkey_init (&local_key);
if (ret < 0)
{
gnutls_assert ();
return NULL;
}
ret =
gnutls_privkey_import_pkcs11 (local_key, key,
deinit ? GNUTLS_PRIVKEY_IMPORT_AUTO_RELEASE
: 0);
if (ret < 0)
{
gnutls_assert ();
gnutls_privkey_deinit (local_key);
return NULL;
}
return local_key;
}
|
Safe
|
[
"CWE-399"
] |
gnutls
|
9c62f4feb2bdd6fbbb06eb0c60bfdea80d21bbb8
|
2.4708523375084387e+38
| 28 |
Deinitialize the correct number of certificates. Reported by Remi Gacogne.
| 0 |
Argument_Obj Parser::parse_argument()
{
if (peek< alternatives< exactly<','>, exactly< '{' >, exactly<';'> > >()) {
css_error("Invalid CSS", " after ", ": expected \")\", was ");
}
if (peek_css< sequence < exactly< hash_lbrace >, exactly< rbrace > > >()) {
position += 2;
css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was ");
}
Argument_Obj arg;
if (peek_css< sequence < variable, optional_css_comments, exactly<':'> > >()) {
lex_css< variable >();
std::string name(Util::normalize_underscores(lexed));
ParserState p = pstate;
lex_css< exactly<':'> >();
Expression_Obj val = parse_space_list();
arg = SASS_MEMORY_NEW(Argument, p, val, name);
}
else {
bool is_arglist = false;
bool is_keyword = false;
Expression_Obj val = parse_space_list();
List_Ptr l = Cast<List>(val);
if (lex_css< exactly< ellipsis > >()) {
if (val->concrete_type() == Expression::MAP || (
(l != NULL && l->separator() == SASS_HASH)
)) is_keyword = true;
else is_arglist = true;
}
arg = SASS_MEMORY_NEW(Argument, pstate, val, "", is_arglist, is_keyword);
}
return arg;
}
|
Safe
|
[
"CWE-125"
] |
libsass
|
eb15533b07773c30dc03c9d742865604f47120ef
|
1.5888585993212708e+38
| 34 |
Fix memory leak in `parse_ie_keyword_arg`
`kwd_arg` would never get freed when there was a parse error in
`parse_ie_keyword_arg`.
Closes #2656
| 0 |
void Compute(OpKernelContext* context) override {
// Get inputs
const Tensor& input_tensor = context->input(0);
const auto input_tensor_flat = input_tensor.flat<int32>();
const Tensor& input_splits = context->input(1);
const auto input_splits_flat = input_splits.flat<SPLITS_TYPE>();
OP_REQUIRES(
context, input_splits.NumElements() > 0,
errors::InvalidArgument("Input_splits should contain elements, but "
"given input_values has 0 elements"));
// Operation will treat first argument in input_splits as if it were zero
// regardless of its actual value since splits should begin with zero and
// end with the length of the input values vector.
OP_REQUIRES(
context, input_splits_flat(0) == 0,
errors::InvalidArgument("First value in input_splits must be zero."));
OP_REQUIRES(context,
input_splits_flat(input_splits_flat.size() - 1) ==
input_tensor_flat.size(),
errors::InvalidArgument("Last value in input_splits must be "
"equal to length of input_tensor."));
// Since we limit to a 2-D input (flat_values of rank 1 and a single splits
// tensor), our output dimension will be 1 with it's size equal to the
// number of splits (outer dimension or ragged tensor).
TensorShape output_shape({input_splits.dim_size(0) - 1});
Tensor* output_tensor;
OP_REQUIRES_OK(context, context->allocate_output("output", output_shape,
&output_tensor));
auto output_tensor_flat = output_tensor->flat<tstring>();
// Use a single index over the flattened input values tensor.
int idx = 0;
// Loop through our split dimension to create a new string at each split.
for (int i = 1; i < input_splits_flat.size(); ++i) {
icu::UnicodeString unicode_string;
icu::UnicodeStringAppendable appendable_unicode_string(unicode_string);
OP_REQUIRES(
context, input_splits_flat(i - 1) <= input_splits_flat(i),
errors::InvalidArgument(
"Values in input_splits must be equal or in ascending order."));
OP_REQUIRES(
context, input_splits_flat(i) <= input_tensor_flat.size(),
errors::InvalidArgument("Values in input_splits must be less than or "
"equal to input_tensor length."));
for (; idx < input_splits_flat(i); ++idx) {
int32_t code_point = input_tensor_flat(idx);
// Check for invalid code point
if (!U_IS_UNICODE_CHAR(code_point)) {
if (error_options_.error_on_malformatting) {
context->CtxFailure(errors::InvalidArgument(
"Code point is out of range for Unicode, or a noncharacter."));
return;
} else if (!error_options_.elide_replacement) {
code_point = error_options_.subst;
}
}
appendable_unicode_string.appendCodePoint(code_point);
}
// Encode our string and save in the output.
tstring result;
Encode(encoding_, unicode_string, &result);
output_tensor_flat(i - 1) = std::move(result);
}
}
|
Safe
|
[
"CWE-824",
"CWE-787"
] |
tensorflow
|
2e0ee46f1a47675152d3d865797a18358881d7a6
|
1.9158207844484898e+38
| 65 |
Ensure non-empty input_splits in tf.raw_ops.UnicodeEncode
PiperOrigin-RevId: 387170080
Change-Id: I3b489acc51c5cb4124c535b9df7cc6e62ef21766
| 0 |
static int powered_update_hci(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
u8 link_sec;
hci_dev_lock(hdev);
if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
!lmp_host_ssp_capable(hdev)) {
u8 mode = 0x01;
hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
u8 support = 0x01;
hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
sizeof(support), &support);
}
}
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
lmp_bredr_capable(hdev)) {
struct hci_cp_write_le_host_supported cp;
cp.le = 0x01;
cp.simul = 0x00;
/* Check first if we already have the right
* host state (host features set)
*/
if (cp.le != lmp_host_le_capable(hdev) ||
cp.simul != lmp_host_le_br_capable(hdev))
hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
sizeof(cp), &cp);
}
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
/* Make sure the controller has a good default for
* advertising data. This also applies to the case
* where BR/EDR was toggled during the AUTO_OFF phase.
*/
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
list_empty(&hdev->adv_instances)) {
int err;
if (ext_adv_capable(hdev)) {
err = __hci_req_setup_ext_adv_instance(req,
0x00);
if (!err)
__hci_req_update_scan_rsp_data(req,
0x00);
} else {
err = 0;
__hci_req_update_adv_data(req, 0x00);
__hci_req_update_scan_rsp_data(req, 0x00);
}
if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
if (!ext_adv_capable(hdev))
__hci_req_enable_advertising(req);
else if (!err)
__hci_req_enable_ext_advertising(req,
0x00);
}
} else if (!list_empty(&hdev->adv_instances)) {
struct adv_info *adv_instance;
adv_instance = list_first_entry(&hdev->adv_instances,
struct adv_info, list);
__hci_req_schedule_adv_instance(req,
adv_instance->instance,
true);
}
}
link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
sizeof(link_sec), &link_sec);
if (lmp_bredr_capable(hdev)) {
if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
__hci_req_write_fast_connectable(req, true);
else
__hci_req_write_fast_connectable(req, false);
__hci_req_update_scan(req);
__hci_req_update_class(req);
__hci_req_update_name(req);
__hci_req_update_eir(req);
}
hci_dev_unlock(hdev);
return 0;
}
|
Safe
|
[
"CWE-362"
] |
linux
|
e2cb6b891ad2b8caa9131e3be70f45243df82a80
|
2.131306403706717e+38
| 95 |
bluetooth: eliminate the potential race condition when removing the HCI controller
There is a possible race condition vulnerability between issuing a HCI
command and removing the cont. Specifically, functions hci_req_sync()
and hci_dev_do_close() can race each other like below:
thread-A in hci_req_sync() | thread-B in hci_dev_do_close()
| hci_req_sync_lock(hdev);
test_bit(HCI_UP, &hdev->flags); |
... | test_and_clear_bit(HCI_UP, &hdev->flags)
hci_req_sync_lock(hdev); |
|
In this commit we alter the sequence in function hci_req_sync(). Hence,
the thread-A cannot issue th.
Signed-off-by: Lin Ma <linma@zju.edu.cn>
Cc: Marcel Holtmann <marcel@holtmann.org>
Fixes: 7c6a329e4447 ("[Bluetooth] Fix regression from using default link policy")
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
qemuProcessReconnectAll(virQEMUDriverPtr driver)
{
struct qemuProcessReconnectData data = {.driver = driver};
virDomainObjListForEach(driver->domains, true,
qemuProcessReconnectHelper, &data);
}
|
Safe
|
[
"CWE-416"
] |
libvirt
|
1ac703a7d0789e46833f4013a3876c2e3af18ec7
|
2.2123797021334724e+38
| 6 |
qemu: Add missing lock in qemuProcessHandleMonitorEOF
qemuMonitorUnregister will be called in multiple threads (e.g. threads
in rpc worker pool and the vm event thread). In some cases, it isn't
protected by the monitor lock, which may lead to call g_source_unref
more than one time and a use-after-free problem eventually.
Add the missing lock in qemuProcessHandleMonitorEOF (which is the only
position missing lock of monitor I found).
Suggested-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Peng Liang <liangpeng10@huawei.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
| 0 |
hook_infolist (struct t_weechat_plugin *plugin, const char *infolist_name,
const char *description, const char *pointer_description,
const char *args_description,
t_hook_callback_infolist *callback, void *callback_data)
{
struct t_hook *new_hook;
struct t_hook_infolist *new_hook_infolist;
int priority;
const char *ptr_infolist_name;
if (!infolist_name || !infolist_name[0] || !callback)
return NULL;
new_hook = malloc (sizeof (*new_hook));
if (!new_hook)
return NULL;
new_hook_infolist = malloc (sizeof (*new_hook_infolist));
if (!new_hook_infolist)
{
free (new_hook);
return NULL;
}
hook_get_priority_and_name (infolist_name, &priority, &ptr_infolist_name);
hook_init_data (new_hook, plugin, HOOK_TYPE_INFOLIST, priority,
callback_data);
new_hook->hook_data = new_hook_infolist;
new_hook_infolist->callback = callback;
new_hook_infolist->infolist_name = strdup ((ptr_infolist_name) ?
ptr_infolist_name : infolist_name);
new_hook_infolist->description = strdup ((description) ? description : "");
new_hook_infolist->pointer_description = strdup ((pointer_description) ?
pointer_description : "");
new_hook_infolist->args_description = strdup ((args_description) ?
args_description : "");
hook_add_to_list (new_hook);
return new_hook;
}
|
Safe
|
[
"CWE-20"
] |
weechat
|
c265cad1c95b84abfd4e8d861f25926ef13b5d91
|
3.2125210260647644e+38
| 41 |
Fix verification of SSL certificates by calling gnutls verify callback (patch #7459)
| 0 |
int hidp_connection_add(struct hidp_connadd_req *req,
struct socket *ctrl_sock,
struct socket *intr_sock)
{
struct hidp_session *session;
struct l2cap_conn *conn;
struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan;
int ret;
ret = hidp_verify_sockets(ctrl_sock, intr_sock);
if (ret)
return ret;
conn = NULL;
l2cap_chan_lock(chan);
if (chan->conn)
conn = l2cap_conn_get(chan->conn);
l2cap_chan_unlock(chan);
if (!conn)
return -EBADFD;
ret = hidp_session_new(&session, &chan->dst, ctrl_sock,
intr_sock, req, conn);
if (ret)
goto out_conn;
ret = l2cap_register_user(conn, &session->user);
if (ret)
goto out_session;
ret = 0;
out_session:
hidp_session_put(session);
out_conn:
l2cap_conn_put(conn);
return ret;
}
|
Vulnerable
|
[
"CWE-843"
] |
linux
|
51bda2bca53b265715ca1852528f38dc67429d9a
|
9.931306649801744e+37
| 39 |
Bluetooth: hidp_connection_add() unsafe use of l2cap_pi()
it's OK after we'd verified the sockets, but not before that.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
| 1 |
static inline bool gif_set(struct vcpu_svm *svm)
{
if (vgif_enabled(svm))
return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
else
return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
}
|
Safe
|
[
"CWE-401"
] |
linux
|
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
|
1.5775322462528926e+38
| 7 |
KVM: SVM: Fix potential memory leak in svm_cpu_init()
When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page
held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually
the only possible outcome here.
Reviewed-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static int mxf_add_metadata_stream(MXFContext *mxf, MXFTrack *track)
{
MXFStructuralComponent *component = NULL;
const MXFCodecUL *codec_ul = NULL;
MXFPackage tmp_package;
AVStream *st;
int j;
for (j = 0; j < track->sequence->structural_components_count; j++) {
component = mxf_resolve_sourceclip(mxf, &track->sequence->structural_components_refs[j]);
if (!component)
continue;
break;
}
if (!component)
return 0;
st = avformat_new_stream(mxf->fc, NULL);
if (!st) {
av_log(mxf->fc, AV_LOG_ERROR, "could not allocate metadata stream\n");
return AVERROR(ENOMEM);
}
st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
st->codecpar->codec_id = AV_CODEC_ID_NONE;
st->id = track->track_id;
memcpy(&tmp_package.package_ul, component->source_package_ul, 16);
memcpy(&tmp_package.package_uid, component->source_package_uid, 16);
mxf_add_umid_metadata(&st->metadata, "file_package_umid", &tmp_package);
if (track->name && track->name[0])
av_dict_set(&st->metadata, "track_name", track->name, 0);
codec_ul = mxf_get_codec_ul(ff_mxf_data_definition_uls, &track->sequence->data_definition_ul);
av_dict_set(&st->metadata, "data_type", av_get_media_type_string(codec_ul->id), 0);
return 0;
}
|
Safe
|
[
"CWE-703",
"CWE-834"
] |
FFmpeg
|
900f39692ca0337a98a7cf047e4e2611071810c2
|
2.171905800797105e+38
| 37 |
avformat/mxfdec: Fix DoS issues in mxf_read_index_entry_array()
Fixes: 20170829A.mxf
Co-Author: 张洪亮(望初)" <wangchu.zhl@alibaba-inc.com>
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
| 0 |
void init_tree_desc(struct tree_desc *desc, const void *buffer, unsigned long size)
{
struct strbuf err = STRBUF_INIT;
if (init_tree_desc_internal(desc, buffer, size, &err))
die("%s", err.buf);
strbuf_release(&err);
}
|
Safe
|
[
"CWE-20"
] |
git
|
e1d911dd4c7b76a5a8cec0f5c8de15981e34da83
|
1.3207751822577826e+38
| 7 |
mingw: disallow backslash characters in tree objects' file names
The backslash character is not a valid part of a file name on Windows.
Hence it is dangerous to allow writing files that were unpacked from
tree objects, when the stored file name contains a backslash character:
it will be misinterpreted as directory separator.
This not only causes ambiguity when a tree contains a blob `a\b` and a
tree `a` that contains a blob `b`, but it also can be used as part of an
attack vector to side-step the careful protections against writing into
the `.git/` directory during a clone of a maliciously-crafted
repository.
Let's prevent that, addressing CVE-2019-1354.
Note: we guard against backslash characters in tree objects' file names
_only_ on Windows (because on other platforms, even on those where NTFS
volumes can be mounted, the backslash character is _not_ a directory
separator), and _only_ when `core.protectNTFS = true` (because users
might need to generate tree objects for other platforms, of course
without touching the worktree, e.g. using `git update-index
--cacheinfo`).
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
| 0 |
static inline void nfs4_init_once(struct nfs_inode *nfsi)
{
#ifdef CONFIG_NFS_V4
INIT_LIST_HEAD(&nfsi->open_states);
nfsi->delegation = NULL;
nfsi->delegation_state = 0;
init_rwsem(&nfsi->rwsem);
#endif
}
|
Safe
|
[
"CWE-703"
] |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
|
7.615520408124218e+37
| 9 |
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
| 0 |
ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
union nf_inet_addr daddr;
__be16 dport = udest->port;
int ret;
EnterFunction(2);
if (udest->weight < 0) {
pr_err("%s(): server weight less than zero\n", __func__);
return -ERANGE;
}
if (udest->l_threshold > udest->u_threshold) {
pr_err("%s(): lower threshold is higher than upper threshold\n",
__func__);
return -ERANGE;
}
ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
/*
* Check if the dest already exists in the list
*/
dest = ip_vs_lookup_dest(svc, &daddr, dport);
if (dest != NULL) {
IP_VS_DBG(1, "%s(): dest already exists\n", __func__);
return -EEXIST;
}
/*
* Check if the dest already exists in the trash and
* is from the same service
*/
dest = ip_vs_trash_get_dest(svc, &daddr, dport);
if (dest != NULL) {
IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, "
"dest->refcnt=%d, service %u/%s:%u\n",
IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport),
atomic_read(&dest->refcnt),
dest->vfwmark,
IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
ntohs(dest->vport));
__ip_vs_update_dest(svc, dest, udest);
/*
* Get the destination from the trash
*/
list_del(&dest->n_list);
ip_vs_new_estimator(&dest->stats);
write_lock_bh(&__ip_vs_svc_lock);
/*
* Wait until all other svc users go away.
*/
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
list_add(&dest->n_list, &svc->destinations);
svc->num_dests++;
/* call the update_service function of its scheduler */
if (svc->scheduler->update_service)
svc->scheduler->update_service(svc);
write_unlock_bh(&__ip_vs_svc_lock);
return 0;
}
/*
* Allocate and initialize the dest structure
*/
ret = ip_vs_new_dest(svc, udest, &dest);
if (ret) {
return ret;
}
/*
* Add the dest entry into the list
*/
atomic_inc(&dest->refcnt);
write_lock_bh(&__ip_vs_svc_lock);
/*
* Wait until all other svc users go away.
*/
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
list_add(&dest->n_list, &svc->destinations);
svc->num_dests++;
/* call the update_service function of its scheduler */
if (svc->scheduler->update_service)
svc->scheduler->update_service(svc);
write_unlock_bh(&__ip_vs_svc_lock);
LeaveFunction(2);
return 0;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
04bcef2a83f40c6db24222b27a52892cba39dffb
|
1.5292319764578334e+38
| 107 |
ipvs: Add boundary check on ioctl arguments
The ipvs code has a nifty system for doing the size of ioctl command
copies; it defines an array with values into which it indexes the cmd
to find the right length.
Unfortunately, the ipvs code forgot to check if the cmd was in the
range that the array provides, allowing for an index outside of the
array, which then gives a "garbage" result into the length, which
then gets used for copying into a stack buffer.
Fix this by adding sanity checks on these as well as the copy size.
[ horms@verge.net.au: adjusted limit to IP_VS_SO_GET_MAX ]
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Acked-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Simon Horman <horms@verge.net.au>
Signed-off-by: Patrick McHardy <kaber@trash.net>
| 0 |
njs_vmcode_try_break(njs_vm_t *vm, njs_value_t *exit_value,
njs_value_t *offset)
{
/* exit_value can contain valid value set by vmcode_try_return. */
if (!njs_is_valid(exit_value)) {
njs_number(exit_value) = 1;
}
return (njs_jump_off_t) offset;
}
|
Safe
|
[
"CWE-703",
"CWE-754"
] |
njs
|
222d6fdcf0c6485ec8e175f3a7b70d650c234b4e
|
2.9459318196816395e+38
| 10 |
Fixed njs_vmcode_interpreter() when "toString" conversion fails.
Previously, while interpreting a user function, njs_vmcode_interpreter()
might return prematurely when an error happens. This is not correct
because the current frame has to be unwound (or exception caught)
first.
The fix is exit through only 5 appropriate exit points to ensure
proper unwinding.
This closes #467 issue on Github.
| 0 |
chacha20_poly1305_aead_encrypt(struct ssh_cipher_struct *cipher,
void *in,
void *out,
size_t len,
uint8_t *tag,
uint64_t seq)
{
struct ssh_packet_header *in_packet = in, *out_packet = out;
struct chacha20_poly1305_keysched *ctx = cipher->chacha20_schedule;
size_t taglen = POLY1305_TAGLEN;
int ret, outlen = 0;
/* Prepare the Poly1305 key */
ret = chacha20_poly1305_packet_setup(cipher, seq, 1);
if (ret != SSH_OK) {
SSH_LOG(SSH_LOG_WARNING, "Failed to setup packet");
return;
}
#ifdef DEBUG_CRYPTO
ssh_log_hexdump("plaintext length",
(unsigned char *)&in_packet->length, sizeof(uint32_t));
#endif /* DEBUG_CRYPTO */
/* step 2, encrypt length field */
ret = EVP_CipherUpdate(ctx->header_evp,
(unsigned char *)&out_packet->length,
&outlen,
(unsigned char *)&in_packet->length,
sizeof(uint32_t));
if (ret != 1 || outlen != sizeof(uint32_t)) {
SSH_LOG(SSH_LOG_WARNING, "EVP_CipherUpdate failed");
return;
}
#ifdef DEBUG_CRYPTO
ssh_log_hexdump("encrypted length",
(unsigned char *)&out_packet->length, outlen);
#endif /* DEBUG_CRYPTO */
ret = EVP_CipherFinal_ex(ctx->header_evp, (uint8_t *)out + outlen, &outlen);
if (ret != 1 || outlen != 0) {
SSH_LOG(SSH_LOG_PACKET, "EVP_EncryptFinal_ex failed");
return;
}
/* step 3, encrypt packet payload (main_evp counter == 1) */
/* We already did encrypt one block so the counter should be in the correct position */
ret = EVP_CipherUpdate(ctx->main_evp,
out_packet->payload,
&outlen,
in_packet->payload,
len - sizeof(uint32_t));
if (ret != 1) {
SSH_LOG(SSH_LOG_WARNING, "EVP_CipherUpdate failed");
return;
}
/* step 4, compute the MAC */
ret = EVP_DigestSignUpdate(ctx->mctx, out_packet, len);
if (ret <= 0) {
SSH_LOG(SSH_LOG_WARNING, "EVP_DigestSignUpdate failed");
return;
}
ret = EVP_DigestSignFinal(ctx->mctx, tag, &taglen);
if (ret <= 0) {
SSH_LOG(SSH_LOG_WARNING, "EVP_DigestSignFinal failed");
return;
}
}
|
Safe
|
[
"CWE-476"
] |
libssh
|
b36272eac1b36982598c10de7af0a501582de07a
|
8.083693225220036e+37
| 67 |
CVE-2020-1730: Fix a possible segfault when zeroing AES-CTR key
Fixes T213
Signed-off-by: Andreas Schneider <asn@cryptomilk.org>
Reviewed-by: Anderson Toshiyuki Sasaki <ansasaki@redhat.com>
| 0 |
void RGWCopyObj_ObjStore_SWIFT::send_response()
{
if (! sent_header) {
string content_type;
if (! op_ret)
op_ret = STATUS_CREATED;
set_req_state_err(s, op_ret);
dump_errno(s);
dump_etag(s, etag);
dump_last_modified(s, mtime);
dump_copy_info();
get_contype_from_attrs(attrs, content_type);
dump_object_metadata(s, attrs);
end_header(s, this, !content_type.empty() ? content_type.c_str()
: "binary/octet-stream");
} else {
s->formatter->close_section();
rgw_flush_formatter(s, s->formatter);
}
}
|
Safe
|
[
"CWE-617"
] |
ceph
|
f44a8ae8aa27ecef69528db9aec220f12492810e
|
2.3286716215853528e+38
| 20 |
rgw: RGWSwiftWebsiteHandler::is_web_dir checks empty subdir_name
checking for empty name avoids later assertion in RGWObjectCtx::set_atomic
Fixes: CVE-2021-3531
Reviewed-by: Casey Bodley <cbodley@redhat.com>
Signed-off-by: Casey Bodley <cbodley@redhat.com>
(cherry picked from commit 7196a469b4470f3c8628489df9a41ec8b00a5610)
| 0 |
TRIO_PRIVATE BOOLEAN_T TrioReadDouble TRIO_ARGS4((self, target, flags, width), trio_class_t* self,
trio_pointer_t target, trio_flags_t flags,
int width)
{
int ch;
char doubleString[512];
int offset = 0;
int start;
#if TRIO_FEATURE_QUOTE
int j;
#endif
BOOLEAN_T isHex = FALSE;
trio_long_double_t infinity;
doubleString[0] = 0;
if ((width == NO_WIDTH) || (width > (int)sizeof(doubleString) - 1))
width = sizeof(doubleString) - 1;
TrioSkipWhitespaces(self);
/*
* Read entire double number from stream. trio_to_double requires
* a string as input, but InStream can be anything, so we have to
* collect all characters.
*/
ch = self->current;
if ((ch == '+') || (ch == '-'))
{
doubleString[offset++] = (char)ch;
self->InStream(self, &ch);
width--;
}
start = offset;
switch (ch)
{
case 'n':
case 'N':
/* Not-a-number */
if (offset != 0)
break;
/* FALLTHROUGH */
case 'i':
case 'I':
/* Infinity */
while (isalpha(ch) && (offset - start < width))
{
doubleString[offset++] = (char)ch;
self->InStream(self, &ch);
}
doubleString[offset] = NIL;
/* Case insensitive string comparison */
if (trio_equal(&doubleString[start], INFINITE_UPPER) ||
trio_equal(&doubleString[start], LONG_INFINITE_UPPER))
{
infinity = ((start == 1) && (doubleString[0] == '-')) ? trio_ninf() : trio_pinf();
if (!target)
return FALSE;
if (flags & FLAGS_LONGDOUBLE)
{
*((trio_long_double_t*)target) = infinity;
}
else if (flags & FLAGS_LONG)
{
*((double*)target) = infinity;
}
else
{
*((float*)target) = infinity;
}
return TRUE;
}
if (trio_equal(doubleString, NAN_UPPER))
{
if (!target)
return FALSE;
/* NaN must not have a preceeding + nor - */
if (flags & FLAGS_LONGDOUBLE)
{
*((trio_long_double_t*)target) = trio_nan();
}
else if (flags & FLAGS_LONG)
{
*((double*)target) = trio_nan();
}
else
{
*((float*)target) = trio_nan();
}
return TRUE;
}
return FALSE;
case '0':
doubleString[offset++] = (char)ch;
self->InStream(self, &ch);
if (trio_to_upper(ch) == 'X')
{
isHex = TRUE;
doubleString[offset++] = (char)ch;
self->InStream(self, &ch);
}
break;
default:
break;
}
while ((ch != EOF) && (offset - start < width))
{
/* Integer part */
if (isHex ? isxdigit(ch) : isdigit(ch))
{
doubleString[offset++] = (char)ch;
self->InStream(self, &ch);
}
#if TRIO_FEATURE_QUOTE
else if (flags & FLAGS_QUOTE)
{
/* Compare with thousands separator */
for (j = 0; internalThousandSeparator[j] && self->current; j++)
{
if (internalThousandSeparator[j] != self->current)
break;
self->InStream(self, &ch);
}
if (internalThousandSeparator[j])
break; /* Mismatch */
else
continue; /* Match */
}
#endif
else
break; /* while */
}
if (ch == '.')
{
/* Decimal part */
doubleString[offset++] = (char)ch;
self->InStream(self, &ch);
while ((isHex ? isxdigit(ch) : isdigit(ch)) && (offset - start < width))
{
doubleString[offset++] = (char)ch;
self->InStream(self, &ch);
}
}
if (isHex ? (trio_to_upper(ch) == 'P') : (trio_to_upper(ch) == 'E'))
{
/* Exponent */
doubleString[offset++] = (char)ch;
self->InStream(self, &ch);
if ((ch == '+') || (ch == '-'))
{
doubleString[offset++] = (char)ch;
self->InStream(self, &ch);
}
while (isdigit(ch) && (offset - start < width))
{
doubleString[offset++] = (char)ch;
self->InStream(self, &ch);
}
}
if ((offset == start) || (*doubleString == NIL))
return FALSE;
doubleString[offset] = 0;
if (flags & FLAGS_LONGDOUBLE)
{
if (!target)
return FALSE;
*((trio_long_double_t*)target) = trio_to_long_double(doubleString, NULL);
}
else if (flags & FLAGS_LONG)
{
if (!target)
return FALSE;
*((double*)target) = trio_to_double(doubleString, NULL);
}
else
{
if (!target)
return FALSE;
*((float*)target) = trio_to_float(doubleString, NULL);
}
return TRUE;
}
|
Safe
|
[
"CWE-190",
"CWE-125"
] |
FreeRDP
|
05cd9ea2290d23931f615c1b004d4b2e69074e27
|
3.361507972910537e+38
| 196 |
Fixed TrioParse and trio_length limts.
CVE-2020-4030 thanks to @antonio-morales for finding this.
| 0 |
ClearArea(x1, y1, xs, xe, x2, y2, bce, uselayfn)
int x1, y1, xs, xe, x2, y2, bce, uselayfn;
{
int y, xxe;
struct canvas *cv;
struct viewport *vp;
debug2("Clear %d,%d", x1, y1);
debug2(" %d-%d", xs, xe);
debug2(" %d,%d", x2, y2);
debug2(" uselayfn=%d bce=%d\n", uselayfn, bce);
ASSERT(display);
if (x1 == D_width)
x1--;
if (x2 == D_width)
x2--;
if (xs == -1)
xs = x1;
if (xe == -1)
xe = x2;
if (D_UT) /* Safe to erase ? */
SetRendition(&mchar_null);
#ifdef COLOR
if (D_BE)
SetBackColor(bce);
#endif
if (D_lp_missing && y1 <= D_bot && xe >= D_width - 1)
{
if (y2 > D_bot || (y2 == D_bot && x2 >= D_width - 1))
D_lp_missing = 0;
}
if (x2 == D_width - 1 && (xs == 0 || y1 == y2) && xe == D_width - 1 && y2 == D_height - 1 && (!bce || D_BE))
{
#ifdef AUTO_NUKE
if (x1 == 0 && y1 == 0 && D_auto_nuke)
NukePending();
#endif
if (x1 == 0 && y1 == 0 && D_CL)
{
AddCStr(D_CL);
D_y = D_x = 0;
return;
}
/*
* Workaround a hp700/22 terminal bug. Do not use CD where CE
* is also appropriate.
*/
if (D_CD && (y1 < y2 || !D_CE))
{
GotoPos(x1, y1);
AddCStr(D_CD);
return;
}
}
if (x1 == 0 && xs == 0 && (xe == D_width - 1 || y1 == y2) && y1 == 0 && D_CCD && (!bce || D_BE))
{
GotoPos(x1, y1);
AddCStr(D_CCD);
return;
}
xxe = xe;
for (y = y1; y <= y2; y++, x1 = xs)
{
if (y == y2)
xxe = x2;
if (x1 == 0 && D_CB && (xxe != D_width - 1 || (D_x == xxe && D_y == y)) && (!bce || D_BE))
{
GotoPos(xxe, y);
AddCStr(D_CB);
continue;
}
if (xxe == D_width - 1 && D_CE && (!bce || D_BE))
{
GotoPos(x1, y);
AddCStr(D_CE);
continue;
}
if (uselayfn)
{
vp = 0;
for (cv = D_cvlist; cv; cv = cv->c_next)
{
if (y < cv->c_ys || y > cv->c_ye || xxe < cv->c_xs || x1 > cv->c_xe)
continue;
for (vp = cv->c_vplist; vp; vp = vp->v_next)
if (y >= vp->v_ys && y <= vp->v_ye && xxe >= vp->v_xs && x1 <= vp->v_xe)
break;
if (vp)
break;
}
if (cv && cv->c_layer && x1 >= vp->v_xs && xxe <= vp->v_xe &&
y - vp->v_yoff >= 0 && y - vp->v_yoff < cv->c_layer->l_height &&
xxe - vp->v_xoff >= 0 && x1 - vp->v_xoff < cv->c_layer->l_width)
{
struct layer *oldflayer = flayer;
struct canvas *cvlist, *cvlnext;
flayer = cv->c_layer;
cvlist = flayer->l_cvlist;
cvlnext = cv->c_lnext;
flayer->l_cvlist = cv;
cv->c_lnext = 0;
LayClearLine(y - vp->v_yoff, x1 - vp->v_xoff, xxe - vp->v_xoff, bce);
flayer->l_cvlist = cvlist;
cv->c_lnext = cvlnext;
flayer = oldflayer;
continue;
}
}
ClearLine((struct mline *)0, y, x1, xxe, bce);
}
}
|
Safe
|
[] |
screen
|
c5db181b6e017cfccb8d7842ce140e59294d9f62
|
1.0700462284395532e+38
| 111 |
ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <amade@asmblr.net
| 0 |
psutil_net_if_stats(PyObject* self, PyObject* args) {
char *nic_name;
int sock = 0;
int ret;
int mtu;
struct ifreq ifr;
PyObject *py_is_up = NULL;
PyObject *py_retlist = NULL;
if (! PyArg_ParseTuple(args, "s", &nic_name))
return NULL;
sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock == -1)
goto error;
strncpy(ifr.ifr_name, nic_name, sizeof(ifr.ifr_name));
// is up?
ret = ioctl(sock, SIOCGIFFLAGS, &ifr);
if (ret == -1)
goto error;
if ((ifr.ifr_flags & IFF_UP) != 0)
py_is_up = Py_True;
else
py_is_up = Py_False;
Py_INCREF(py_is_up);
// MTU
ret = ioctl(sock, SIOCGIFMTU, &ifr);
if (ret == -1)
goto error;
mtu = ifr.ifr_mtu;
close(sock);
py_retlist = Py_BuildValue("[Oi]", py_is_up, mtu);
if (!py_retlist)
goto error;
Py_DECREF(py_is_up);
return py_retlist;
error:
Py_XDECREF(py_is_up);
if (sock != 0)
close(sock);
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
|
Safe
|
[
"CWE-415"
] |
psutil
|
7d512c8e4442a896d56505be3e78f1156f443465
|
6.88895484830856e+37
| 48 |
Use Py_CLEAR instead of Py_DECREF to also set the variable to NULL (#1616)
These files contain loops that convert system data into python objects
and during the process they create objects and dereference their
refcounts after they have been added to the resulting list.
However, in case of errors during the creation of those python objects,
the refcount to previously allocated objects is dropped again with
Py_XDECREF, which should be a no-op in case the paramater is NULL. Even
so, in most of these loops the variables pointing to the objects are
never set to NULL, even after Py_DECREF is called at the end of the loop
iteration. This means, after the first iteration, if an error occurs
those python objects will get their refcount dropped two times,
resulting in a possible double-free.
| 0 |
static char *dex_class_name(RBinDexObj *bin, RBinDexClass *c) {
return dex_class_name_byid (bin, c->class_id);
}
|
Safe
|
[
"CWE-125"
] |
radare2
|
ead645853a63bf83d8386702cad0cf23b31d7eeb
|
2.9098989708199147e+38
| 3 |
fix #6857
| 0 |
}
void dynstr_append_sorted(DYNAMIC_STRING* ds, DYNAMIC_STRING *ds_input,
bool keep_header)
{
unsigned i;
char *start= ds_input->str;
DYNAMIC_ARRAY lines;
DBUG_ENTER("dynstr_append_sorted");
if (!*start)
DBUG_VOID_RETURN; /* No input */
my_init_dynamic_array(&lines, sizeof(const char*), 32, 32);
if (keep_header)
{
/* First line is result header, skip past it */
while (*start && *start != '\n')
start++;
start++; /* Skip past \n */
dynstr_append_mem(ds, ds_input->str, start - ds_input->str);
}
/* Insert line(s) in array */
while (*start)
{
char* line_end= (char*)start;
/* Find end of line */
while (*line_end && *line_end != '\n')
line_end++;
*line_end= 0;
/* Insert pointer to the line in array */
if (insert_dynamic(&lines, (uchar*) &start))
die("Out of memory inserting lines to sort");
start= line_end+1;
}
/* Sort array */
qsort(lines.buffer, lines.elements,
sizeof(char**), (qsort_cmp)comp_lines);
/* Create new result */
for (i= 0; i < lines.elements ; i++)
{
const char **line= dynamic_element(&lines, i, const char**);
dynstr_append(ds, *line);
dynstr_append(ds, "\n");
}
delete_dynamic(&lines);
|
Safe
|
[] |
server
|
01b39b7b0730102b88d8ea43ec719a75e9316a1e
|
2.661894063985631e+37
| 54 |
mysqltest: don't eat new lines in --exec
pass them through as is
| 0 |
epoch_t OSDService::get_peer_epoch(int peer)
{
Mutex::Locker l(peer_map_epoch_lock);
map<int,epoch_t>::iterator p = peer_map_epoch.find(peer);
if (p == peer_map_epoch.end())
return 0;
return p->second;
}
|
Safe
|
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
|
2.859627469758981e+38
| 8 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <sage@redhat.com>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
| 0 |
void Compute(OpKernelContext* context) override {
const Tensor& input_sizes = context->input(0);
const Tensor& filter = context->input(1);
const Tensor& out_backprop = context->input(2);
TensorShape input_shape;
OP_REQUIRES_OK(context,
Conv2DBackpropComputeInputShape(input_sizes, filter.shape(),
out_backprop.shape(),
data_format_, &input_shape));
ConvBackpropDimensions dims;
OP_REQUIRES_OK(context,
ConvBackpropComputeDimensionsV2(
"Conv2DCustomBackpropInput", /*num_spatial_dims=*/2,
input_shape, filter.shape(), out_backprop.shape(),
/*dilations=*/{1, 1, 1, 1}, strides_, padding_,
explicit_paddings_, data_format_, &dims));
OP_REQUIRES(context, dims.in_depth == filter.shape().dim_size(2),
errors::InvalidArgument("Computed input depth ", dims.in_depth,
" doesn't match filter input depth ",
filter.shape().dim_size(2)));
OP_REQUIRES(
context, dims.out_depth == filter.shape().dim_size(3),
errors::InvalidArgument("Computed output depth ", dims.out_depth,
" doesn't match filter output depth ",
filter.shape().dim_size(3)));
Tensor* in_backprop = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input_shape, &in_backprop));
// If there is nothing to compute, return.
if (input_shape.num_elements() == 0) {
return;
}
// TODO(ezhulenev): Remove custom kernel and move XSMM support to
// LaunchConv2DBackpropInputOp functor.
#if defined TENSORFLOW_USE_LIBXSMM_CONVOLUTIONS && \
defined TENSORFLOW_USE_LIBXSMM_BACKWARD_CONVOLUTIONS
int64 pad_top, pad_bottom;
int64 pad_left, pad_right;
OP_REQUIRES_OK(
context,
GetWindowedOutputSizeVerbose(
dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size,
dims.spatial_dims[0].stride, padding_,
&dims.spatial_dims[0].output_size, &pad_top, &pad_bottom));
OP_REQUIRES_OK(
context,
GetWindowedOutputSizeVerbose(
dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size,
dims.spatial_dims[1].stride, padding_,
&dims.spatial_dims[1].output_size, &pad_left, &pad_right));
if (pad_left == pad_right && pad_top == pad_bottom) {
if (LaunchXsmmBackwardInputConvolution<Device, T>()(
context, context->eigen_device<Device>(),
in_backprop->tensor<T, 4>(), filter.tensor<T, 4>(),
out_backprop.tensor<T, 4>(), dims.spatial_dims[0].input_size,
dims.spatial_dims[1].input_size,
static_cast<int>(dims.spatial_dims[0].stride),
static_cast<int>(dims.spatial_dims[1].stride),
static_cast<int>(pad_top), static_cast<int>(pad_left),
data_format_)) {
return;
}
}
#else
int64 pad_top, pad_bottom;
int64 pad_left, pad_right;
#endif
if (padding_ == Padding::EXPLICIT) {
pad_top = explicit_paddings_[2];
pad_bottom = explicit_paddings_[3];
pad_left = explicit_paddings_[4];
pad_right = explicit_paddings_[5];
}
OP_REQUIRES_OK(
context,
GetWindowedOutputSizeVerbose(
dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size,
dims.spatial_dims[0].stride, padding_,
&dims.spatial_dims[0].output_size, &pad_top, &pad_bottom));
OP_REQUIRES_OK(
context,
GetWindowedOutputSizeVerbose(
dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size,
dims.spatial_dims[1].stride, padding_,
&dims.spatial_dims[1].output_size, &pad_left, &pad_right));
// The total dimension size of each kernel.
const int filter_total_size = dims.spatial_dims[0].filter_size *
dims.spatial_dims[1].filter_size *
dims.in_depth;
// The output image size is the spatial size of the output.
const int output_image_size =
dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size;
// TODO(andydavis) Get L2/L3 cache sizes from device.
const size_t l2_cache_size = 256LL << 10;
const size_t l3_cache_size = 30LL << 20;
// Use L3 cache size as target working set size.
const size_t target_working_set_size = l3_cache_size / sizeof(T);
// Calculate size of matrices involved in MatMul: C = A x B.
const size_t size_A = output_image_size * dims.out_depth;
const size_t size_B = filter_total_size * dims.out_depth;
const size_t size_C = output_image_size * filter_total_size;
const size_t work_unit_size = size_A + size_B + size_C;
auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads());
// Calculate per-thread work unit size.
const size_t thread_work_unit_size =
work_unit_size / worker_threads.num_threads;
// Set minimum per-thread work unit size to size of L2 cache.
const size_t min_thread_work_unit_size = l2_cache_size / sizeof(T);
// Use parallel tensor contractions if there is no batching, or if the
// minimum per-thread work unit size threshold has been exceeded.
// Otherwise, revert to multiple single-threaded matmul ops running in
// parallel to keep all threads busy.
// TODO(andydavis) Explore alternatives to branching the code in this way
// (i.e. run multiple, parallel tensor contractions in another thread pool).
const bool use_parallel_contraction =
dims.batch_size == 1 ||
thread_work_unit_size >= min_thread_work_unit_size;
OP_REQUIRES(
context, work_unit_size > 0,
errors::InvalidArgument("input, filter_sizes and out_backprop tensors "
"must all have at least 1 element"));
const size_t shard_size =
use_parallel_contraction
? 1
: (target_working_set_size + work_unit_size - 1) / work_unit_size;
Tensor col_buffer;
OP_REQUIRES_OK(context,
context->allocate_temp(
DataTypeToEnum<T>::value,
TensorShape({static_cast<int64>(shard_size),
static_cast<int64>(output_image_size),
static_cast<int64>(filter_total_size)}),
&col_buffer));
// The input offset corresponding to a single input image.
const int input_offset = dims.spatial_dims[0].input_size *
dims.spatial_dims[1].input_size * dims.in_depth;
// The output offset corresponding to a single output image.
const int output_offset = dims.spatial_dims[0].output_size *
dims.spatial_dims[1].output_size * dims.out_depth;
const T* filter_data = filter.template flat<T>().data();
T* col_buffer_data = col_buffer.template flat<T>().data();
const T* out_backprop_data = out_backprop.template flat<T>().data();
auto in_backprop_flat = in_backprop->template flat<T>();
T* input_backprop_data = in_backprop_flat.data();
in_backprop_flat.device(context->eigen_device<Device>()) =
in_backprop_flat.constant(T(0));
if (use_parallel_contraction) {
typedef Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor>,
Eigen::Unaligned>
TensorMap;
typedef Eigen::TensorMap<Eigen::Tensor<const T, 2, Eigen::RowMajor>,
Eigen::Unaligned>
ConstTensorMap;
// Initialize contraction dims (we need to transpose 'B' below).
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> contract_dims;
contract_dims[0].first = 1;
contract_dims[0].second = 1;
for (int image_id = 0; image_id < dims.batch_size; ++image_id) {
// Compute gradient into col_buffer.
TensorMap C(col_buffer_data, output_image_size, filter_total_size);
ConstTensorMap A(out_backprop_data + output_offset * image_id,
output_image_size, dims.out_depth);
ConstTensorMap B(filter_data, filter_total_size, dims.out_depth);
C.device(context->eigen_cpu_device()) = A.contract(B, contract_dims);
Col2im<T>(
col_buffer_data, dims.in_depth, dims.spatial_dims[0].input_size,
dims.spatial_dims[1].input_size, dims.spatial_dims[0].filter_size,
dims.spatial_dims[1].filter_size, pad_top, pad_left, pad_bottom,
pad_right, dims.spatial_dims[0].stride, dims.spatial_dims[1].stride,
input_backprop_data);
input_backprop_data += input_offset;
}
} else {
for (int image_id = 0; image_id < dims.batch_size;
image_id += shard_size) {
const int shard_limit =
std::min(static_cast<int>(shard_size),
static_cast<int>(dims.batch_size) - image_id);
auto shard = [&context, &dims, &pad_top, &pad_left, &pad_bottom,
&pad_right, &output_image_size, &filter_total_size,
&input_backprop_data, &col_buffer_data,
&out_backprop_data, &filter_data, &input_offset,
&output_offset, &size_C](int64 start, int64 limit) {
for (int shard_id = start; shard_id < limit; ++shard_id) {
T* im2col_buf = col_buffer_data + shard_id * size_C;
T* input_data = input_backprop_data + shard_id * input_offset;
const T* out_data = out_backprop_data + shard_id * output_offset;
Conv2DCustomBackpropInputMatMulFunctor<T>()(
context, out_data, filter_data, filter_total_size,
output_image_size, dims.out_depth, im2col_buf);
Col2im<T>(im2col_buf, dims.in_depth,
dims.spatial_dims[0].input_size,
dims.spatial_dims[1].input_size,
dims.spatial_dims[0].filter_size,
dims.spatial_dims[1].filter_size, pad_top, pad_left,
pad_bottom, pad_right, dims.spatial_dims[0].stride,
dims.spatial_dims[1].stride, input_data);
}
};
Shard(worker_threads.num_threads, worker_threads.workers, shard_limit,
work_unit_size, shard);
input_backprop_data += input_offset * shard_limit;
out_backprop_data += output_offset * shard_limit;
}
}
}
|
Safe
|
[
"CWE-369"
] |
tensorflow
|
2be2cdf3a123e231b16f766aa0e27d56b4606535
|
1.6518892515972335e+38
| 241 |
Prevent yet another division by zero
PiperOrigin-RevId: 369343977
Change-Id: I1a60da4cf512e60fd91e069c16e026544632fe7f
| 0 |
static int cieabcrange(i_ctx_t * i_ctx_p, ref *space, float *ptr)
{
int code;
ref CIEdict, *tempref;
code = array_get(imemory, space, 1, &CIEdict);
if (code < 0)
return code;
/* If we have a RangeABC, get the values from that */
code = dict_find_string(&CIEdict, "RangeABC", &tempref);
if (code > 0 && !r_has_type(tempref, t_null)) {
code = get_cie_param_array(imemory, tempref, 6, ptr);
if (code < 0)
return code;
} else {
/* Default values for CIEBasedABC */
memcpy(ptr, default_0_1, 6*sizeof(float));
}
return 0;
}
|
Safe
|
[] |
ghostpdl
|
b326a71659b7837d3acde954b18bda1a6f5e9498
|
7.763515150865687e+37
| 21 |
Bug 699655: Properly check the return value....
...when getting a value from a dictionary
| 0 |
void unit_trigger_notify(Unit *u) {
Unit *other;
Iterator i;
void *v;
assert(u);
HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
if (UNIT_VTABLE(other)->trigger_notify)
UNIT_VTABLE(other)->trigger_notify(other, u);
}
|
Safe
|
[
"CWE-269"
] |
systemd
|
bf65b7e0c9fc215897b676ab9a7c9d1c688143ba
|
2.7058681759209605e+38
| 11 |
core: imply NNP and SUID/SGID restriction for DynamicUser=yes service
Let's be safe, rather than sorry. This way DynamicUser=yes services can
neither take benefit of, nor create SUID/SGID binaries.
Given that DynamicUser= is a recent addition only we should be able to
get away with turning this on, even though this is strictly speaking a
binary compatibility breakage.
| 0 |
uint ha_partition::min_record_length(uint options) const
{
handler **file;
uint max= (*m_file)->min_record_length(options);
for (file= m_file, file++; *file; file++)
if (max < (*file)->min_record_length(options))
max= (*file)->min_record_length(options);
return max;
}
|
Safe
|
[] |
mysql-server
|
be901b60ae59c93848c829d1b0b2cb523ab8692e
|
3.0694359751650347e+37
| 10 |
Bug#26390632: CREATE TABLE CAN CAUSE MYSQL TO EXIT.
Analysis
========
CREATE TABLE of InnoDB table with a partition name
which exceeds the path limit can cause the server
to exit.
During the preparation of the partition name,
there was no check to identify whether the complete
path name for partition exceeds the max supported
path length, causing the server to exit during
subsequent processing.
Fix
===
During the preparation of partition name, check and report
an error if the partition path name exceeds the maximum path
name limit.
This is a 5.5 patch.
| 0 |
static struct rt6_info *__ip6_route_redirect(struct net *net,
struct fib6_table *table,
struct flowi6 *fl6,
int flags)
{
struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
struct rt6_info *rt;
struct fib6_node *fn;
/* Get the "current" route for this destination and
* check if the redirect has come from approriate router.
*
* RFC 4861 specifies that redirects should only be
* accepted if they come from the nexthop to the target.
* Due to the way the routes are chosen, this notion
* is a bit fuzzy and one might need to check all possible
* routes.
*/
read_lock_bh(&table->tb6_lock);
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
restart:
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
if (rt6_check_expired(rt))
continue;
if (rt->dst.error)
break;
if (!(rt->rt6i_flags & RTF_GATEWAY))
continue;
if (fl6->flowi6_oif != rt->dst.dev->ifindex)
continue;
if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
continue;
break;
}
if (!rt)
rt = net->ipv6.ip6_null_entry;
else if (rt->dst.error) {
rt = net->ipv6.ip6_null_entry;
} else if (rt == net->ipv6.ip6_null_entry) {
fn = fib6_backtrack(fn, &fl6->saddr);
if (fn)
goto restart;
}
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
return rt;
};
|
Safe
|
[
"CWE-17"
] |
linux-stable
|
9d289715eb5c252ae15bd547cb252ca547a3c4f2
|
8.140535337180838e+37
| 52 |
ipv6: stop sending PTB packets for MTU < 1280
Reduce the attack vector and stop generating IPv6 Fragment Header for
paths with an MTU smaller than the minimum required IPv6 MTU
size (1280 byte) - called atomic fragments.
See IETF I-D "Deprecating the Generation of IPv6 Atomic Fragments" [1]
for more information and how this "feature" can be misused.
[1] https://tools.ietf.org/html/draft-ietf-6man-deprecate-atomfrag-generation-00
Signed-off-by: Fernando Gont <fgont@si6networks.com>
Signed-off-by: Hagen Paul Pfeifer <hagen@jauu.net>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
GF_Err smhd_box_size(GF_Box *s)
{
GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s;
ptr->reserved = 0;
ptr->size += 4;
return GF_OK;
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
|
3.2642029812433815e+38
| 8 |
fixed #1587
| 0 |
int wolfSSH_SFTP_RecvReadDir(WOLFSSH* ssh, int reqId, byte* data, word32 maxSz)
{
WDIR dir;
word32 handle[2] = {0, 0};
word32 sz;
word32 idx = 0;
int count = 0;
int ret;
WS_SFTPNAME* name = NULL;
WS_SFTPNAME* list = NULL;
word32 outSz = 0;
DIR_HANDLE* cur = dirList;
char* dirName = NULL;
byte* out;
if (ssh == NULL) {
return WS_BAD_ARGUMENT;
}
WLOG(WS_LOG_SFTP, "Receiving WOLFSSH_FTP_READDIR");
#ifdef USE_WINDOWS_API
dir = INVALID_HANDLE_VALUE;
#endif
/* get directory handle */
ato32(data + idx, &sz); idx += UINT32_SZ;
if (sz + idx > maxSz || sz > WOLFSSH_MAX_HANDLE) {
return WS_BUFFER_E;
}
if (sz != (sizeof(word32) * 2)) {
WLOG(WS_LOG_SFTP, "Unexpected handle size");
return WS_FATAL_ERROR;
}
ato32(data + idx, &handle[0]);
ato32(data + idx + UINT32_SZ, &handle[1]);
/* find DIR given handle */
while (cur != NULL) {
if (cur->id[0] == handle[0] && cur->id[1] == handle[1]) {
dir = cur->dir;
dirName = cur->dirName;
break;
}
cur = cur->next;
}
if (cur == NULL) {
/* unable to find handle */
WLOG(WS_LOG_SFTP, "Unable to find handle");
return WS_FATAL_ERROR;
}
/* get directory information */
outSz += UINT32_SZ + WOLFSSH_SFTP_HEADER; /* hold header+number of files */
if (!cur->isEof) {
do {
name = wolfSSH_SFTPNAME_new(ssh->ctx->heap);
ret = wolfSSH_SFTPNAME_readdir(ssh, &dir, name, dirName);
if (ret == WS_SUCCESS || ret == WS_NEXT_ERROR) {
count++;
outSz += name->fSz + name->lSz + (UINT32_SZ * 2);
outSz += SFTP_AtributesSz(ssh, &name->atrb);
name->next = list;
list = name;
}
else {
wolfSSH_SFTPNAME_free(name);
}
} while (ret == WS_SUCCESS);
}
if (list == NULL || cur->isEof) {
if (wolfSSH_SFTP_CreateStatus(ssh, WOLFSSH_FTP_EOF, reqId,
"No More Files In Directory", "English", NULL, &outSz)
!= WS_SIZE_ONLY) {
return WS_FATAL_ERROR;
}
out = (byte*)WMALLOC(outSz, ssh->ctx->heap, DYNTYPE_BUFFER);
if (out == NULL) {
return WS_MEMORY_E;
}
if (wolfSSH_SFTP_CreateStatus(ssh, WOLFSSH_FTP_EOF, reqId,
"No More Files In Directory", "English", out, &outSz)
!= WS_SUCCESS) {
WFREE(out, ssh->ctx->heap, DYNTYPE_BUFFER);
return WS_FATAL_ERROR;
}
/* set send out buffer, "out" is taken by ssh */
wolfSSH_SFTP_RecvSetSend(ssh, out, outSz);
return WS_SUCCESS;
}
/* if next state would cause an error then set EOF flag for when called
* again */
cur->isEof = 1;
out = (byte*)WMALLOC(outSz, ssh->ctx->heap, DYNTYPE_BUFFER);
if (out == NULL) {
return WS_MEMORY_E;
}
if (wolfSSH_SFTP_SendName(ssh, list, count, out, &outSz, reqId)
!= WS_SUCCESS) {
WFREE(out, ssh->ctx->heap, DYNTYPE_BUFFER);
return WS_FATAL_ERROR;
}
wolfSSH_SFTPNAME_list_free(list);
/* set send out buffer, "out" is taken by ssh */
wolfSSH_SFTP_RecvSetSend(ssh, out, outSz);
return WS_SUCCESS;
}
|
Safe
|
[
"CWE-190"
] |
wolfssh
|
edb272e35ee57e7b89f3e127222c6981b6a1e730
|
8.375088518585155e+37
| 113 |
ASAN SFTP Fixes
When decoding SFTP messages, fix the size checks so they don't wrap. (ZD12766)
| 0 |
static int perf_event_ksymbol_match(struct perf_event *event)
{
return event->attr.ksymbol;
}
|
Safe
|
[
"CWE-401"
] |
tip
|
7bdb157cdebbf95a1cd94ed2e01b338714075d00
|
7.564972569933282e+37
| 4 |
perf/core: Fix a memory leak in perf_event_parse_addr_filter()
As shown through runtime testing, the "filename" allocation is not
always freed in perf_event_parse_addr_filter().
There are three possible ways that this could happen:
- It could be allocated twice on subsequent iterations through the loop,
- or leaked on the success path,
- or on the failure path.
Clean up the code flow to make it obvious that 'filename' is always
freed in the reallocation path and in the two return paths as well.
We rely on the fact that kfree(NULL) is NOP and filename is initialized
with NULL.
This fixes the leak. No other side effects expected.
[ Dan Carpenter: cleaned up the code flow & added a changelog. ]
[ Ingo Molnar: updated the changelog some more. ]
Fixes: 375637bc5249 ("perf/core: Introduce address range filtering")
Signed-off-by: "kiyin(尹亮)" <kiyin@tencent.com>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: "Srivatsa S. Bhat" <srivatsa@csail.mit.edu>
Cc: Anthony Liguori <aliguori@amazon.com>
--
kernel/events/core.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
| 0 |
static void av1_read_global_param(AV1State *state, GF_BitStream *bs, u8 type, u8 ref, u8 idx)
{
u8 absBits = GM_ABS_ALPHA_BITS;
u8 precBits = GM_ALPHA_PREC_BITS;
if (idx < 2) {
if (type == AV1_GMC_TRANSLATION) {
absBits = GM_ABS_TRANS_ONLY_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0);
precBits = GM_TRANS_ONLY_PREC_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0);
}
else {
absBits = GM_ABS_TRANS_BITS;
precBits = GM_TRANS_PREC_BITS;
}
}
s32 precDiff = WARPEDMODEL_PREC_BITS - precBits;
s32 round = (idx % 3) == 2 ? (1 << WARPEDMODEL_PREC_BITS) : 0;
s32 sub = (idx % 3) == 2 ? (1 << precBits) : 0;
s32 mx = (1 << absBits);
s32 r = (state->PrevGmParams.coefs[ref][idx] >> precDiff) - sub;
s32 val = av1_decode_signed_subexp_with_ref(bs, -mx, mx + 1, r);
if (val < 0) {
val = -val;
state->GmParams.coefs[ref][idx] = (-(val << precDiff) + round);
}
else {
state->GmParams.coefs[ref][idx] = (val << precDiff) + round;
}
}
|
Safe
|
[
"CWE-190",
"CWE-787"
] |
gpac
|
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
|
1.0029808729270289e+38
| 29 |
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
| 0 |
int paravirt_disable_iospace(void)
{
return request_resource(&ioport_resource, &reserve_ioports);
}
|
Safe
|
[
"CWE-200"
] |
linux
|
5800dc5c19f34e6e03b5adab1282535cb102fafd
|
8.587467691033538e+37
| 4 |
x86/paravirt: Fix spectre-v2 mitigations for paravirt guests
Nadav reported that on guests we're failing to rewrite the indirect
calls to CALLEE_SAVE paravirt functions. In particular the
pv_queued_spin_unlock() call is left unpatched and that is all over the
place. This obviously wrecks Spectre-v2 mitigation (for paravirt
guests) which relies on not actually having indirect calls around.
The reason is an incorrect clobber test in paravirt_patch_call(); this
function rewrites an indirect call with a direct call to the _SAME_
function, there is no possible way the clobbers can be different
because of this.
Therefore remove this clobber check. Also put WARNs on the other patch
failure case (not enough room for the instruction) which I've not seen
trigger in my (limited) testing.
Three live kernel image disassemblies for lock_sock_nested (as a small
function that illustrates the problem nicely). PRE is the current
situation for guests, POST is with this patch applied and NATIVE is with
or without the patch for !guests.
PRE:
(gdb) disassemble lock_sock_nested
Dump of assembler code for function lock_sock_nested:
0xffffffff817be970 <+0>: push %rbp
0xffffffff817be971 <+1>: mov %rdi,%rbp
0xffffffff817be974 <+4>: push %rbx
0xffffffff817be975 <+5>: lea 0x88(%rbp),%rbx
0xffffffff817be97c <+12>: callq 0xffffffff819f7160 <_cond_resched>
0xffffffff817be981 <+17>: mov %rbx,%rdi
0xffffffff817be984 <+20>: callq 0xffffffff819fbb00 <_raw_spin_lock_bh>
0xffffffff817be989 <+25>: mov 0x8c(%rbp),%eax
0xffffffff817be98f <+31>: test %eax,%eax
0xffffffff817be991 <+33>: jne 0xffffffff817be9ba <lock_sock_nested+74>
0xffffffff817be993 <+35>: movl $0x1,0x8c(%rbp)
0xffffffff817be99d <+45>: mov %rbx,%rdi
0xffffffff817be9a0 <+48>: callq *0xffffffff822299e8
0xffffffff817be9a7 <+55>: pop %rbx
0xffffffff817be9a8 <+56>: pop %rbp
0xffffffff817be9a9 <+57>: mov $0x200,%esi
0xffffffff817be9ae <+62>: mov $0xffffffff817be993,%rdi
0xffffffff817be9b5 <+69>: jmpq 0xffffffff81063ae0 <__local_bh_enable_ip>
0xffffffff817be9ba <+74>: mov %rbp,%rdi
0xffffffff817be9bd <+77>: callq 0xffffffff817be8c0 <__lock_sock>
0xffffffff817be9c2 <+82>: jmp 0xffffffff817be993 <lock_sock_nested+35>
End of assembler dump.
POST:
(gdb) disassemble lock_sock_nested
Dump of assembler code for function lock_sock_nested:
0xffffffff817be970 <+0>: push %rbp
0xffffffff817be971 <+1>: mov %rdi,%rbp
0xffffffff817be974 <+4>: push %rbx
0xffffffff817be975 <+5>: lea 0x88(%rbp),%rbx
0xffffffff817be97c <+12>: callq 0xffffffff819f7160 <_cond_resched>
0xffffffff817be981 <+17>: mov %rbx,%rdi
0xffffffff817be984 <+20>: callq 0xffffffff819fbb00 <_raw_spin_lock_bh>
0xffffffff817be989 <+25>: mov 0x8c(%rbp),%eax
0xffffffff817be98f <+31>: test %eax,%eax
0xffffffff817be991 <+33>: jne 0xffffffff817be9ba <lock_sock_nested+74>
0xffffffff817be993 <+35>: movl $0x1,0x8c(%rbp)
0xffffffff817be99d <+45>: mov %rbx,%rdi
0xffffffff817be9a0 <+48>: callq 0xffffffff810a0c20 <__raw_callee_save___pv_queued_spin_unlock>
0xffffffff817be9a5 <+53>: xchg %ax,%ax
0xffffffff817be9a7 <+55>: pop %rbx
0xffffffff817be9a8 <+56>: pop %rbp
0xffffffff817be9a9 <+57>: mov $0x200,%esi
0xffffffff817be9ae <+62>: mov $0xffffffff817be993,%rdi
0xffffffff817be9b5 <+69>: jmpq 0xffffffff81063aa0 <__local_bh_enable_ip>
0xffffffff817be9ba <+74>: mov %rbp,%rdi
0xffffffff817be9bd <+77>: callq 0xffffffff817be8c0 <__lock_sock>
0xffffffff817be9c2 <+82>: jmp 0xffffffff817be993 <lock_sock_nested+35>
End of assembler dump.
NATIVE:
(gdb) disassemble lock_sock_nested
Dump of assembler code for function lock_sock_nested:
0xffffffff817be970 <+0>: push %rbp
0xffffffff817be971 <+1>: mov %rdi,%rbp
0xffffffff817be974 <+4>: push %rbx
0xffffffff817be975 <+5>: lea 0x88(%rbp),%rbx
0xffffffff817be97c <+12>: callq 0xffffffff819f7160 <_cond_resched>
0xffffffff817be981 <+17>: mov %rbx,%rdi
0xffffffff817be984 <+20>: callq 0xffffffff819fbb00 <_raw_spin_lock_bh>
0xffffffff817be989 <+25>: mov 0x8c(%rbp),%eax
0xffffffff817be98f <+31>: test %eax,%eax
0xffffffff817be991 <+33>: jne 0xffffffff817be9ba <lock_sock_nested+74>
0xffffffff817be993 <+35>: movl $0x1,0x8c(%rbp)
0xffffffff817be99d <+45>: mov %rbx,%rdi
0xffffffff817be9a0 <+48>: movb $0x0,(%rdi)
0xffffffff817be9a3 <+51>: nopl 0x0(%rax)
0xffffffff817be9a7 <+55>: pop %rbx
0xffffffff817be9a8 <+56>: pop %rbp
0xffffffff817be9a9 <+57>: mov $0x200,%esi
0xffffffff817be9ae <+62>: mov $0xffffffff817be993,%rdi
0xffffffff817be9b5 <+69>: jmpq 0xffffffff81063ae0 <__local_bh_enable_ip>
0xffffffff817be9ba <+74>: mov %rbp,%rdi
0xffffffff817be9bd <+77>: callq 0xffffffff817be8c0 <__lock_sock>
0xffffffff817be9c2 <+82>: jmp 0xffffffff817be993 <lock_sock_nested+35>
End of assembler dump.
Fixes: 63f70270ccd9 ("[PATCH] i386: PARAVIRT: add common patching machinery")
Fixes: 3010a0663fd9 ("x86/paravirt, objtool: Annotate indirect calls")
Reported-by: Nadav Amit <namit@vmware.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Juergen Gross <jgross@suse.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: stable@vger.kernel.org
| 0 |
static inline int task_detached(struct task_struct *p)
{
return p->exit_signal == -1;
}
|
Safe
|
[
"CWE-284",
"CWE-264"
] |
linux
|
8141c7f3e7aee618312fa1c15109e1219de784a7
|
2.0842651290032736e+38
| 4 |
Move "exit_robust_list" into mm_release()
We don't want to get rid of the futexes just at exit() time, we want to
drop them when doing an execve() too, since that gets rid of the
previous VM image too.
Doing it at mm_release() time means that we automatically always do it
when we disassociate a VM map from the task.
Reported-by: pageexec@freemail.hu
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Brad Spengler <spender@grsecurity.net>
Cc: Alex Efros <powerman@powerman.name>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static bool io_register_op_must_quiesce(int op)
{
switch (op) {
case IORING_UNREGISTER_FILES:
case IORING_REGISTER_FILES_UPDATE:
case IORING_REGISTER_PROBE:
case IORING_REGISTER_PERSONALITY:
case IORING_UNREGISTER_PERSONALITY:
return false;
default:
return true;
}
}
|
Safe
|
[] |
linux
|
ff002b30181d30cdfbca316dadd099c3ca0d739c
|
3.251965117317613e+38
| 13 |
io_uring: grab ->fs as part of async preparation
This passes it in to io-wq, so it assumes the right fs_struct when
executing async work that may need to do lookups.
Cc: stable@vger.kernel.org # 5.3+
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
bool unit_job_is_applicable(Unit *u, JobType j) {
assert(u);
assert(j >= 0 && j < _JOB_TYPE_MAX);
switch (j) {
case JOB_VERIFY_ACTIVE:
case JOB_START:
case JOB_NOP:
/* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
* startable by us but may appear due to external events, and it thus makes sense to permit enqueing
* jobs for it. */
return true;
case JOB_STOP:
/* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
* external events), hence it makes no sense to permit enqueing such a request either. */
return !u->perpetual;
case JOB_RESTART:
case JOB_TRY_RESTART:
return unit_can_stop(u) && unit_can_start(u);
case JOB_RELOAD:
case JOB_TRY_RELOAD:
return unit_can_reload(u);
case JOB_RELOAD_OR_START:
return unit_can_reload(u) && unit_can_start(u);
default:
assert_not_reached("Invalid job type");
}
}
|
Safe
|
[
"CWE-269"
] |
systemd
|
bf65b7e0c9fc215897b676ab9a7c9d1c688143ba
|
2.0600521493952174e+38
| 34 |
core: imply NNP and SUID/SGID restriction for DynamicUser=yes service
Let's be safe, rather than sorry. This way DynamicUser=yes services can
neither take benefit of, nor create SUID/SGID binaries.
Given that DynamicUser= is a recent addition only we should be able to
get away with turning this on, even though this is strictly speaking a
binary compatibility breakage.
| 0 |
f_trunc(typval_T *argvars, typval_T *rettv)
{
float_T f = 0.0;
rettv->v_type = VAR_FLOAT;
if (get_float_arg(argvars, &f) == OK)
/* trunc() is not in C90, use floor() or ceil() instead. */
rettv->vval.v_float = f > 0 ? floor(f) : ceil(f);
else
rettv->vval.v_float = 0.0;
}
|
Safe
|
[
"CWE-78"
] |
vim
|
8c62a08faf89663e5633dc5036cd8695c80f1075
|
8.359794426120292e+37
| 11 |
patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others.
| 0 |
sigint(int sig)
{
running = 0;
}
|
Safe
|
[] |
iodine
|
b715be5cf3978fbe589b03b09c9398d0d791f850
|
9.049038216460536e+37
| 4 |
Fix authentication bypass bug
The client could bypass the password check by continuing after getting error
from the server and guessing the network parameters. The server would still
accept the rest of the setup and also network traffic.
Add checks for normal and raw mode that user has authenticated before allowing
any other communication.
Problem found by Oscar Reparaz.
| 0 |
Exit_status Load_log_processor::process(Create_file_log_event *ce)
{
const char *bname= ce->fname + dirname_length(ce->fname);
uint blen= ce->fname_len - (bname-ce->fname);
return process_first_event(bname, blen, ce->block, ce->block_len,
ce->file_id, ce);
}
|
Safe
|
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
|
2.650163512347531e+38
| 8 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
| 0 |
static struct bpf_func_state *func(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg)
{
struct bpf_verifier_state *cur = env->cur_state;
return cur->frame[reg->frameno];
}
|
Safe
|
[
"CWE-125"
] |
linux
|
b799207e1e1816b09e7a5920fbb2d5fcf6edd681
|
3.6831360105434343e+36
| 7 |
bpf: 32-bit RSH verification must truncate input before the ALU op
When I wrote commit 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification"), I
assumed that, in order to emulate 64-bit arithmetic with 32-bit logic, it
is sufficient to just truncate the output to 32 bits; and so I just moved
the register size coercion that used to be at the start of the function to
the end of the function.
That assumption is true for almost every op, but not for 32-bit right
shifts, because those can propagate information towards the least
significant bit. Fix it by always truncating inputs for 32-bit ops to 32
bits.
Also get rid of the coerce_reg_to_size() after the ALU op, since that has
no effect.
Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification")
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
| 0 |
struct evbuffer* tr_variantToBuf(tr_variant const* v, tr_variant_fmt fmt)
{
struct locale_context locale_ctx;
struct evbuffer* buf = evbuffer_new();
/* parse with LC_NUMERIC="C" to ensure a "." decimal separator */
use_numeric_locale(&locale_ctx, "C");
evbuffer_expand(buf, 4096); /* alloc a little memory to start off with */
switch (fmt)
{
case TR_VARIANT_FMT_BENC:
tr_variantToBufBenc(v, buf);
break;
case TR_VARIANT_FMT_JSON:
tr_variantToBufJson(v, buf, false);
break;
case TR_VARIANT_FMT_JSON_LEAN:
tr_variantToBufJson(v, buf, true);
break;
}
/* restore the previous locale */
restore_locale(&locale_ctx);
return buf;
}
|
Safe
|
[
"CWE-416",
"CWE-284"
] |
transmission
|
2123adf8e5e1c2b48791f9d22fc8c747e974180e
|
2.3788312734664208e+38
| 29 |
CVE-2018-10756: Fix heap-use-after-free in tr_variantWalk
In libtransmission/variant.c, function tr_variantWalk, when the variant
stack is reallocated, a pointer to the previously allocated memory
region is kept. This address is later accessed (heap use-after-free)
while walking back down the stack, causing the application to crash.
The application can be any application which uses libtransmission, such
as transmission-daemon, transmission-gtk, transmission-show, etc.
Reported-by: Tom Richards <tom@tomrichards.net>
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.