func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
check_fmt(struct magic_set *ms, const char *fmt)
{
file_regex_t rx;
int rc, rv = -1;
if (strchr(fmt, '%') == NULL)
return 0;
rc = file_regcomp(&rx, "%[-0-9\\.]*s", REG_EXTENDED|REG_NOSUB);
if (rc) {
file_regerror(&rx, rc, ms);
} else {
rc = file_regexec(&rx, fmt, 0, 0, 0);
rv = !rc;
}
file_regfree(&rx);
return rv;
}
|
Safe
|
[
"CWE-787"
] |
file
|
d65781527c8134a1202b2649695d48d5701ac60b
|
2.310043735943982e+38
| 18 |
PR/62: spinpx: limit size of file_printable.
| 0 |
test_bson_append_maxkey (void)
{
bson_t *b;
bson_t *b2;
b = bson_new ();
BSON_ASSERT (bson_append_maxkey (b, "maxkey", -1));
b2 = get_bson ("test37.bson");
BSON_ASSERT_BSON_EQUAL (b, b2);
bson_destroy (b);
bson_destroy (b2);
}
|
Safe
|
[
"CWE-125"
] |
libbson
|
42900956dc461dfe7fb91d93361d10737c1602b3
|
4.0953111918435694e+37
| 12 |
CDRIVER-2269 Check for zero string length in codewscope
| 0 |
static bool rgw_find_host_in_domains(const string& host, string *domain, string *subdomain, set<string> valid_hostnames_set)
{
set<string>::iterator iter;
/** TODO, Future optimization
* store hostnames_set elements _reversed_, and look for a prefix match,
* which is much faster than a suffix match.
*/
for (iter = valid_hostnames_set.begin(); iter != valid_hostnames_set.end(); ++iter) {
size_t pos;
if (!str_ends_with(host, *iter, &pos))
continue;
if (pos == 0) {
*domain = host;
subdomain->clear();
} else {
if (host[pos - 1] != '.') {
continue;
}
*domain = host.substr(pos);
*subdomain = host.substr(0, pos - 1);
}
return true;
}
return false;
}
|
Safe
|
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
|
8.298232820310994e+37
| 27 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <joao@suse.de>
Signed-off-by: Abhishek Lekshmanan <abhishek@suse.com>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
| 0 |
lr_get_best_checksum(const LrMetalink *metalink,
GSList **checksums)
{
gboolean ret;
LrChecksumType ch_type;
gchar *ch_value;
// From the metalink itself
ret = lr_best_checksum(metalink->hashes, &ch_type, &ch_value);
if (ret)
{
LrDownloadTargetChecksum *dtch;
dtch = lr_downloadtargetchecksum_new(ch_type, ch_value);
*checksums = g_slist_prepend(*checksums, dtch);
g_debug("%s: Expected checksum for repomd.xml: (%s) %s",
__func__, lr_checksum_type_to_str(ch_type), ch_value);
}
// From the alternates entries
for (GSList *elem = metalink->alternates; elem; elem = g_slist_next(elem))
{
LrMetalinkAlternate *alt = elem->data;
ret = lr_best_checksum(alt->hashes, &ch_type, &ch_value);
if (ret) {
LrDownloadTargetChecksum *dtch;
dtch = lr_downloadtargetchecksum_new(ch_type, ch_value);
*checksums = g_slist_prepend(*checksums, dtch);
g_debug("%s: Expected alternate checksum for repomd.xml: (%s) %s",
__func__, lr_checksum_type_to_str(ch_type), ch_value);
}
}
}
|
Safe
|
[
"CWE-22"
] |
librepo
|
7daea2a2429a54dad68b1de9b37a5f65c5cf2600
|
1.2919725150234368e+38
| 32 |
Validate path read from repomd.xml (RhBug:1868639)
= changelog =
msg: Validate path read from repomd.xml
type: security
resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1868639
| 0 |
parse_path_arg(const struct lys_module *mod, const char *id, const char **prefix, int *pref_len,
const char **name, int *nam_len, int *parent_times, int *has_predicate)
{
int parsed = 0, ret, par_times = 0;
assert(id);
assert(parent_times);
if (prefix) {
*prefix = NULL;
}
if (pref_len) {
*pref_len = 0;
}
if (name) {
*name = NULL;
}
if (nam_len) {
*nam_len = 0;
}
if (has_predicate) {
*has_predicate = 0;
}
if (!*parent_times && !strncmp(id, "..", 2)) {
++par_times;
parsed += 2;
id += 2;
while (!strncmp(id, "/..", 3)) {
++par_times;
parsed += 3;
id += 3;
}
}
if (!*parent_times) {
if (par_times) {
*parent_times = par_times;
} else {
*parent_times = -1;
}
}
if (id[0] != '/') {
return -parsed;
}
/* skip '/' */
++parsed;
++id;
/* node-identifier ([prefix:]identifier) */
if ((ret = parse_node_identifier(id, prefix, pref_len, name, nam_len, NULL, 0)) < 1) {
return -parsed + ret;
}
if (prefix && !(*prefix)) {
/* actually we always need prefix even it is not specified */
*prefix = lys_main_module(mod)->name;
*pref_len = strlen(*prefix);
}
parsed += ret;
id += ret;
/* there is no predicate */
if ((id[0] == '/') || !id[0]) {
return parsed;
} else if (id[0] != '[') {
return -parsed;
}
if (has_predicate) {
*has_predicate = 1;
}
return parsed;
}
|
Safe
|
[
"CWE-617"
] |
libyang
|
5ce30801f9ccc372bbe9b7c98bb5324b15fb010a
|
2.9109428788159295e+37
| 79 |
schema tree BUGFIX freeing nodes with no module set
Context must be passed explicitly for these cases.
Fixes #1452
| 0 |
static void trace_hotside(jit_State *J, const BCIns *pc)
{
SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) &&
snap->count != SNAPCOUNT_DONE &&
++snap->count >= J->param[JIT_P_hotexit]) {
lua_assert(J->state == LJ_TRACE_IDLE);
/* J->parent is non-zero for a side trace. */
J->state = LJ_TRACE_START;
lj_trace_ins(J, pc);
}
}
|
Safe
|
[
"CWE-125"
] |
LuaJIT
|
12ab596997b9cb27846a5b254d11230c3f9c50c8
|
1.796844097774719e+38
| 12 |
Fix handling of errors during snapshot restore.
| 0 |
BGD_DECLARE(gdImagePtr) gdImageCreateFromXbm(FILE * fd)
{
char fline[MAX_XBM_LINE_SIZE];
char iname[MAX_XBM_LINE_SIZE];
char *type;
int value;
unsigned int width = 0, height = 0;
int fail = 0;
int max_bit = 0;
gdImagePtr im;
int bytes = 0, i;
int bit, x = 0, y = 0;
int ch;
char h[8];
unsigned int b;
rewind(fd);
while (fgets(fline, MAX_XBM_LINE_SIZE, fd)) {
fline[MAX_XBM_LINE_SIZE-1] = '\0';
if (strlen(fline) == MAX_XBM_LINE_SIZE-1) {
return 0;
}
if (sscanf(fline, "#define %s %d", iname, &value) == 2) {
if (!(type = strrchr(iname, '_'))) {
type = iname;
} else {
type++;
}
if (!strcmp("width", type)) {
width = (unsigned int) value;
}
if (!strcmp("height", type)) {
height = (unsigned int) value;
}
} else {
if ( sscanf(fline, "static unsigned char %s = {", iname) == 1
|| sscanf(fline, "static char %s = {", iname) == 1)
{
max_bit = 128;
} else if (sscanf(fline, "static unsigned short %s = {", iname) == 1
|| sscanf(fline, "static short %s = {", iname) == 1)
{
max_bit = 32768;
}
if (max_bit) {
bytes = (width * height / 8) + 1;
if (!bytes) {
return 0;
}
if (!(type = strrchr(iname, '_'))) {
type = iname;
} else {
type++;
}
if (!strcmp("bits[]", type)) {
break;
}
}
}
}
if (!bytes || !max_bit) {
return 0;
}
if(!(im = gdImageCreate(width, height))) {
return 0;
}
gdImageColorAllocate(im, 255, 255, 255);
gdImageColorAllocate(im, 0, 0, 0);
h[2] = '\0';
h[4] = '\0';
for (i = 0; i < bytes; i++) {
while (1) {
if ((ch=getc(fd)) == EOF) {
fail = 1;
break;
}
if (ch == 'x') {
break;
}
}
if (fail) {
break;
}
/* Get hex value */
if ((ch=getc(fd)) == EOF) {
break;
}
h[0] = ch;
if ((ch=getc(fd)) == EOF) {
break;
}
h[1] = ch;
if (max_bit == 32768) {
if ((ch=getc(fd)) == EOF) {
break;
}
h[2] = ch;
if ((ch=getc(fd)) == EOF) {
break;
}
h[3] = ch;
}
sscanf(h, "%x", &b);
for (bit = 1; bit <= max_bit; bit = bit << 1) {
gdImageSetPixel(im, x++, y, (b & bit) ? 1 : 0);
if (x == im->sx) {
x = 0;
y++;
if (y == im->sy) {
return im;
}
break;
}
}
}
gd_error("EOF before image was complete");
gdImageDestroy(im);
return 0;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
libgd
|
4dc1a2d7931017d3625f2d7cff70a17ce58b53b4
|
6.668887205372035e+37
| 123 |
xbm: avoid stack overflow (read) with large names #211
We use the name passed in to printf into a local stack buffer which is
limited to 4000 bytes. So given a large enough value, lots of stack
data is leaked. Rewrite the code to do simple memory copies with most
of the strings to avoid that issue, and only use stack buffer for small
numbers of constant size.
This closes #211.
| 0 |
int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
{
vcpu->arch.nmi_pending = false;
vcpu->arch.nmi_injected = false;
vcpu->arch.switch_db_regs = 0;
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
vcpu->arch.dr6 = DR6_FIXED_1;
vcpu->arch.dr7 = DR7_FIXED_1;
return kvm_x86_ops->vcpu_reset(vcpu);
}
|
Safe
|
[
"CWE-476"
] |
linux-2.6
|
59839dfff5eabca01cc4e20b45797a60a80af8cb
|
3.3292682595298173e+38
| 12 |
KVM: x86: check for cr3 validity in ioctl_set_sregs
Matt T. Yourst notes that kvm_arch_vcpu_ioctl_set_sregs lacks validity
checking for the new cr3 value:
"Userspace callers of KVM_SET_SREGS can pass a bogus value of cr3 to
the kernel. This will trigger a NULL pointer access in gfn_to_rmap()
when userspace next tries to call KVM_RUN on the affected VCPU and kvm
attempts to activate the new non-existent page table root.
This happens since kvm only validates that cr3 points to a valid guest
physical memory page when code *inside* the guest sets cr3. However, kvm
currently trusts the userspace caller (e.g. QEMU) on the host machine to
always supply a valid page table root, rather than properly validating
it along with the rest of the reloaded guest state."
http://sourceforge.net/tracker/?func=detail&atid=893831&aid=2687641&group_id=180599
Check for a valid cr3 address in kvm_arch_vcpu_ioctl_set_sregs, triple
fault in case of failure.
Cc: stable@kernel.org
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
| 0 |
static unsigned long account_pipe_buffers(struct user_struct *user,
unsigned long old, unsigned long new)
{
return atomic_long_add_return(new - old, &user->pipe_bufs);
}
|
Safe
|
[
"CWE-416"
] |
linux
|
15fab63e1e57be9fdb5eec1bbc5916e9825e9acb
|
1.4159378006116109e+38
| 5 |
fs: prevent page refcount overflow in pipe_buf_get
Change pipe_buf_get() to return a bool indicating whether it succeeded
in raising the refcount of the page (if the thing in the pipe is a page).
This removes another mechanism for overflowing the page refcount. All
callers converted to handle a failure.
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static netdev_features_t netdev_fix_features(struct net_device *dev,
netdev_features_t features)
{
/* Fix illegal checksum combinations */
if ((features & NETIF_F_HW_CSUM) &&
(features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
netdev_warn(dev, "mixed HW and IP checksum settings.\n");
features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
}
/* TSO requires that SG is present as well. */
if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
features &= ~NETIF_F_ALL_TSO;
}
if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
!(features & NETIF_F_IP_CSUM)) {
netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
features &= ~NETIF_F_TSO;
features &= ~NETIF_F_TSO_ECN;
}
if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
!(features & NETIF_F_IPV6_CSUM)) {
netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
features &= ~NETIF_F_TSO6;
}
/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
features &= ~NETIF_F_TSO_MANGLEID;
/* TSO ECN requires that TSO is present as well. */
if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
features &= ~NETIF_F_TSO_ECN;
/* Software GSO depends on SG. */
if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
features &= ~NETIF_F_GSO;
}
/* GSO partial features require GSO partial be set */
if ((features & dev->gso_partial_features) &&
!(features & NETIF_F_GSO_PARTIAL)) {
netdev_dbg(dev,
"Dropping partially supported GSO features since no GSO partial.\n");
features &= ~dev->gso_partial_features;
}
if (!(features & NETIF_F_RXCSUM)) {
/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
* successfully merged by hardware must also have the
* checksum verified by hardware. If the user does not
* want to enable RXCSUM, logically, we should disable GRO_HW.
*/
if (features & NETIF_F_GRO_HW) {
netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
features &= ~NETIF_F_GRO_HW;
}
}
/* LRO/HW-GRO features cannot be combined with RX-FCS */
if (features & NETIF_F_RXFCS) {
if (features & NETIF_F_LRO) {
netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
features &= ~NETIF_F_LRO;
}
if (features & NETIF_F_GRO_HW) {
netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
features &= ~NETIF_F_GRO_HW;
}
}
return features;
|
Safe
|
[
"CWE-416"
] |
linux
|
a4270d6795b0580287453ea55974d948393e66ef
|
2.772804339375741e+38
| 78 |
net-gro: fix use-after-free read in napi_gro_frags()
If a network driver provides to napi_gro_frags() an
skb with a page fragment of exactly 14 bytes, the call
to gro_pull_from_frag0() will 'consume' the fragment
by calling skb_frag_unref(skb, 0), and the page might
be freed and reused.
Reading eth->h_proto at the end of napi_frags_skb() might
read mangled data, or crash under specific debugging features.
BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline]
BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
Read of size 2 at addr ffff88809366840c by task syz-executor599/8957
CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x172/0x1f0 lib/dump_stack.c:113
print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188
__kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
kasan_report+0x12/0x20 mm/kasan/common.c:614
__asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142
napi_frags_skb net/core/dev.c:5833 [inline]
napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991
tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037
call_write_iter include/linux/fs.h:1872 [inline]
do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693
do_iter_write fs/read_write.c:970 [inline]
do_iter_write+0x184/0x610 fs/read_write.c:951
vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015
do_writev+0x15b/0x330 fs/read_write.c:1058
Fixes: a50e233c50db ("net-gro: restore frag0 optimization")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
{
int i;
__u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
int ret;
__le16 le_data = cpu_to_le16(data);
set_registers(pegasus, EpromOffset, 4, d);
enable_eprom_write(pegasus);
set_register(pegasus, EpromOffset, index);
set_registers(pegasus, EpromData, 2, &le_data);
set_register(pegasus, EpromCtrl, EPROM_WRITE);
for (i = 0; i < REG_TIMEOUT; i++) {
ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
if (ret == -ESHUTDOWN)
goto fail;
if (tmp & EPROM_DONE)
break;
}
disable_eprom_write(pegasus);
if (i >= REG_TIMEOUT)
goto fail;
return ret;
fail:
netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
|
Safe
|
[
"CWE-119",
"CWE-284"
] |
linux
|
5593523f968bc86d42a035c6df47d5e0979b5ace
|
2.0487852222642536e+38
| 30 |
pegasus: Use heap buffers for all register access
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
References: https://bugs.debian.org/852556
Reported-by: Lisandro Damián Nicanor Pérez Meyer <lisandro@debian.org>
Tested-by: Lisandro Damián Nicanor Pérez Meyer <lisandro@debian.org>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
GetEmptyMatrixMaxBufSize(const char *name, int rank, size_t *size)
{
int err = 0;
size_t nBytes = 0, len, rank_size;
size_t tag_size = 8, array_flags_size = 8;
/* Add the Array Flags tag and space to the number of bytes */
nBytes += tag_size + array_flags_size;
/* Get size of variable name, pad it to an 8 byte block, and add it to nBytes */
if ( NULL != name )
len = strlen(name);
else
len = 4;
if ( len <= 4 ) {
nBytes += tag_size;
} else {
nBytes += tag_size;
if ( len % 8 )
err |= SafeAdd(&len, len, 8 - len % 8);
err |= SafeAdd(&nBytes, nBytes, len);
}
/* Add rank and dimensions, padded to an 8 byte block */
err |= SafeMul(&rank_size, rank, 4);
if ( rank % 2 )
err |= SafeAdd(&nBytes, nBytes, tag_size + 4);
else
err |= SafeAdd(&nBytes, nBytes, tag_size);
err |= SafeAdd(&nBytes, nBytes, rank_size);
/* Data tag */
err |= SafeAdd(&nBytes, nBytes, tag_size);
if ( err )
return 1;
*size = nBytes;
return 0;
}
|
Safe
|
[
"CWE-190",
"CWE-401"
] |
matio
|
5fa49ef9fc4368fe3d19b5fdaa36d8fa5e7f4606
|
2.010647424500464e+38
| 42 |
Fix integer addition overflow
As reported by https://github.com/tbeu/matio/issues/121
| 0 |
Tfloat cubic_atX(const float fx, const int y, const int z, const int c, const T& out_value) const {
const int
x = (int)fx - (fx>=0?0:1), px = x - 1, nx = x + 1, ax = x + 2;
const float
dx = fx - x;
const Tfloat
Ip = (Tfloat)atX(px,y,z,c,out_value), Ic = (Tfloat)atX(x,y,z,c,out_value),
In = (Tfloat)atX(nx,y,z,c,out_value), Ia = (Tfloat)atX(ax,y,z,c,out_value);
return Ic + 0.5f*(dx*(-Ip + In) + dx*dx*(2*Ip - 5*Ic + 4*In - Ia) + dx*dx*dx*(-Ip + 3*Ic - 3*In + Ia));
}
|
Safe
|
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
|
3.901248806110541e+37
| 10 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
| 0 |
static void __dev_printk(const char *level, const struct device *dev,
struct va_format *vaf)
{
if (dev)
dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
dev_driver_string(dev), dev_name(dev), vaf);
else
printk("%s(NULL device *): %pV", level, vaf);
}
|
Safe
|
[
"CWE-787"
] |
linux
|
aa838896d87af561a33ecefea1caa4c15a68bc47
|
2.471057925856064e+38
| 9 |
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <joe@perches.com>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
xsltCopyComp(xsltStylesheetPtr style, xmlNodePtr inst) {
#ifdef XSLT_REFACTORED
xsltStyleItemCopyPtr comp;
#else
xsltStylePreCompPtr comp;
#endif
if ((style == NULL) || (inst == NULL) || (inst->type != XML_ELEMENT_NODE))
return;
#ifdef XSLT_REFACTORED
comp = (xsltStyleItemCopyPtr) xsltNewStylePreComp(style, XSLT_FUNC_COPY);
#else
comp = xsltNewStylePreComp(style, XSLT_FUNC_COPY);
#endif
if (comp == NULL)
return;
inst->psvi = comp;
comp->inst = inst;
comp->use = xsltGetCNsProp(style, inst, (const xmlChar *)"use-attribute-sets",
XSLT_NAMESPACE);
if (comp->use == NULL)
comp->has_use = 0;
else
comp->has_use = 1;
}
|
Safe
|
[] |
libxslt
|
7ca19df892ca22d9314e95d59ce2abdeff46b617
|
2.4167235202532497e+38
| 28 |
Fix for type confusion in preprocessing attributes
CVE-2015-7995 http://www.openwall.com/lists/oss-security/2015/10/27/10
We need to check that the parent node is an element before dereferencing
its namespace
| 0 |
static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_cmd *dc)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
if (dc->error == -EOPNOTSUPP)
dc->error = 0;
if (dc->error)
f2fs_msg(sbi->sb, KERN_INFO,
"Issue discard(%u, %u, %u) failed, ret: %d",
dc->lstart, dc->start, dc->len, dc->error);
__detach_discard_cmd(dcc, dc);
}
|
Safe
|
[
"CWE-200",
"CWE-476"
] |
linux
|
d4fdf8ba0e5808ba9ad6b44337783bd9935e0982
|
3.363938868392124e+36
| 14 |
f2fs: fix a panic caused by NULL flush_cmd_control
Mount fs with option noflush_merge, boot failed for illegal address
fcc in function f2fs_issue_flush:
if (!test_opt(sbi, FLUSH_MERGE)) {
ret = submit_flush_wait(sbi);
atomic_inc(&fcc->issued_flush); -> Here, fcc illegal
return ret;
}
Signed-off-by: Yunlei He <heyunlei@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
| 0 |
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
{
return vmcs_readl(GUEST_RFLAGS);
}
|
Safe
|
[
"CWE-20"
] |
linux-2.6
|
16175a796d061833aacfbd9672235f2d2725df65
|
3.010597962801452e+38
| 4 |
KVM: VMX: Don't allow uninhibited access to EFER on i386
vmx_set_msr() does not allow i386 guests to touch EFER, but they can still
do so through the default: label in the switch. If they set EFER_LME, they
can oops the host.
Fix by having EFER access through the normal channel (which will check for
EFER_LME) even on i386.
Reported-and-tested-by: Benjamin Gilbert <bgilbert@cs.cmu.edu>
Cc: stable@kernel.org
Signed-off-by: Avi Kivity <avi@redhat.com>
| 0 |
static void tstream_tls_push_trigger_write(struct tevent_context *ev,
struct tevent_immediate *im,
void *private_data)
{
struct tstream_context *stream =
talloc_get_type_abort(private_data,
struct tstream_context);
struct tstream_tls *tlss =
tstream_context_data(stream,
struct tstream_tls);
struct tevent_req *subreq;
if (tlss->push.subreq) {
/* nothing todo */
return;
}
tlss->push.iov.iov_base = (char *)tlss->push.buf;
tlss->push.iov.iov_len = tlss->push.ofs;
subreq = tstream_writev_send(tlss,
tlss->current_ev,
tlss->plain_stream,
&tlss->push.iov, 1);
if (subreq == NULL) {
tlss->error = ENOMEM;
tstream_tls_retry(stream, false);
return;
}
tevent_req_set_callback(subreq, tstream_tls_push_done, stream);
tlss->push.subreq = subreq;
}
|
Safe
|
[] |
samba
|
22af043d2f20760f27150d7d469c7c7b944c6b55
|
1.4252242084229372e+38
| 33 |
CVE-2013-4476: s4:libtls: check for safe permissions of tls private key file (key.pem)
If the tls key is not owned by root or has not mode 0600 samba will not
start up.
Bug: https://bugzilla.samba.org/show_bug.cgi?id=10234
Pair-Programmed-With: Stefan Metzmacher <metze@samba.org>
Signed-off-by: Björn Baumbach <bb@sernet.de>
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Stefan Metzmacher <metze@samba.org>
Autobuild-User(master): Karolin Seeger <kseeger@samba.org>
Autobuild-Date(master): Mon Nov 11 13:07:16 CET 2013 on sn-devel-104
| 0 |
read_mysql_one_value(MYSQL *connection, const char *query)
{
MYSQL_RES *mysql_result;
MYSQL_ROW row;
char *result = NULL;
mysql_result = xb_mysql_query(connection, query, true);
ut_ad(mysql_num_fields(mysql_result) == 1);
if ((row = mysql_fetch_row(mysql_result))) {
result = strdup(row[0]);
}
mysql_free_result(mysql_result);
return(result);
}
|
Safe
|
[
"CWE-200"
] |
percona-xtrabackup
|
7742f875bb289a874246fb4653b7cd9f14b588fe
|
3.0099525375254932e+38
| 18 |
PXB-2722 password is written into xtrabackup_info
https://jira.percona.com/browse/PXB-2722
Analysis:
password passed with -p option is written into backup tool_command in xtrabackup_info
Fix:
mask password before writting into xtrabackup_info
| 0 |
do_close_reader (ccid_driver_t handle)
{
int rc;
unsigned char msg[100];
size_t msglen;
unsigned char seqno;
if (!handle->powered_off)
{
msg[0] = PC_to_RDR_IccPowerOff;
msg[5] = 0; /* slot */
msg[6] = seqno = handle->seqno++;
msg[7] = 0; /* RFU */
msg[8] = 0; /* RFU */
msg[9] = 0; /* RFU */
set_msg_len (msg, 0);
msglen = 10;
rc = bulk_out (handle, msg, msglen, 0);
if (!rc)
bulk_in (handle, msg, sizeof msg, &msglen, RDR_to_PC_SlotStatus,
seqno, 2000, 0);
handle->powered_off = 1;
}
if (handle->idev)
{
usb_release_interface (handle->idev, handle->ifc_no);
usb_close (handle->idev);
handle->idev = NULL;
}
if (handle->dev_fd != -1)
{
close (handle->dev_fd);
handle->dev_fd = -1;
}
}
|
Safe
|
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
|
3.365387618722313e+38
| 36 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <wk@gnupg.org>
| 0 |
static void FVMenuCenter(GWindow gw, struct gmenuitem *mi, GEvent *UNUSED(e)) {
FontViewBase *fv = (FontViewBase *) GDrawGetUserData(gw);
FVMetricsCenter(fv,mi->mid==MID_Center);
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
fontforge
|
626f751752875a0ddd74b9e217b6f4828713573c
|
2.7659238446250525e+38
| 4 |
Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846.
| 0 |
set_string_option_global(
int opt_idx, // option index
char_u **varp) // pointer to option variable
{
char_u **p, *s;
// the global value is always allocated
if (is_window_local_option(opt_idx))
p = (char_u **)GLOBAL_WO(varp);
else
p = (char_u **)get_option_var(opt_idx);
if (!is_global_option(opt_idx)
&& p != varp
&& (s = vim_strsave(*varp)) != NULL)
{
free_string_option(*p);
*p = s;
}
}
|
Safe
|
[
"CWE-122"
] |
vim
|
b7081e135a16091c93f6f5f7525a5c58fb7ca9f9
|
1.6691644290573741e+38
| 19 |
patch 8.2.3402: invalid memory access when using :retab with large value
Problem: Invalid memory access when using :retab with large value.
Solution: Check the number is positive.
| 0 |
int service_main(int argc __attribute__((unused)),
char **argv __attribute__((unused)),
char **envp __attribute__((unused)))
{
sasl_security_properties_t *secprops=NULL;
const char *mechlist, *mech;
int mechcount = 0;
size_t mechlen;
struct auth_scheme_t *scheme;
struct http_connection http_conn;
prometheus_decrement(CYRUS_HTTP_READY_LISTENERS);
session_new_id();
signals_poll();
httpd_in = prot_new(0, 0);
httpd_out = prot_new(1, 1);
protgroup_insert(protin, httpd_in);
/* Setup HTTP connection */
memset(&http_conn, 0, sizeof(struct http_connection));
http_conn.pin = httpd_in;
http_conn.pout = httpd_out;
http_conn.logfd = -1;
/* Create XML parser context */
if (!(http_conn.xml = xmlNewParserCtxt())) {
fatal("Unable to create XML parser", EX_TEMPFAIL);
}
/* Find out name of client host */
http_conn.clienthost = get_clienthost(0, &httpd_localip, &httpd_remoteip);
if (httpd_localip && httpd_remoteip) {
buf_setcstr(&saslprops.ipremoteport, httpd_remoteip);
buf_setcstr(&saslprops.iplocalport, httpd_localip);
}
/* other params should be filled in */
if (sasl_server_new("HTTP", config_servername, NULL,
buf_cstringnull_ifempty(&saslprops.iplocalport),
buf_cstringnull_ifempty(&saslprops.ipremoteport),
NULL, SASL_USAGE_FLAGS, &httpd_saslconn) != SASL_OK)
fatal("SASL failed initializing: sasl_server_new()",EX_TEMPFAIL);
/* will always return something valid */
secprops = mysasl_secprops(0);
/* no HTTP clients seem to use "auth-int" */
secprops->max_ssf = 0; /* "auth" only */
secprops->maxbufsize = 0; /* don't need maxbuf */
if (sasl_setprop(httpd_saslconn, SASL_SEC_PROPS, secprops) != SASL_OK)
fatal("Failed to set SASL property", EX_TEMPFAIL);
if (sasl_setprop(httpd_saslconn, SASL_SSF_EXTERNAL, &extprops_ssf) != SASL_OK)
fatal("Failed to set SASL property", EX_TEMPFAIL);
if (httpd_remoteip) {
char hbuf[NI_MAXHOST], *p;
/* Create pre-authentication telemetry log based on client IP */
strlcpy(hbuf, httpd_remoteip, NI_MAXHOST);
if ((p = strchr(hbuf, ';'))) *p = '\0';
http_conn.logfd = telemetry_log(hbuf, httpd_in, httpd_out, 0);
}
/* See which auth schemes are available to us */
avail_auth_schemes = 0; /* Reset auth schemes for each connection */
if ((extprops_ssf >= 2) || config_getswitch(IMAPOPT_ALLOWPLAINTEXT)) {
avail_auth_schemes |= AUTH_BASIC;
}
sasl_listmech(httpd_saslconn, NULL, NULL, " ", NULL,
&mechlist, NULL, &mechcount);
for (mech = mechlist; mechcount--; mech += ++mechlen) {
mechlen = strcspn(mech, " \0");
for (scheme = auth_schemes; scheme->name; scheme++) {
if (scheme->saslmech && !strncmp(mech, scheme->saslmech, mechlen)) {
avail_auth_schemes |= scheme->id;
break;
}
}
}
httpd_tls_required =
config_getswitch(IMAPOPT_TLS_REQUIRED) || !avail_auth_schemes;
proc_register(config_ident, http_conn.clienthost, NULL, NULL, NULL);
/* Set inactivity timer */
httpd_timeout = config_getduration(IMAPOPT_HTTPTIMEOUT, 'm');
if (httpd_timeout < 0) httpd_timeout = 0;
prot_settimeout(httpd_in, httpd_timeout);
prot_setflushonread(httpd_in, httpd_out);
/* we were connected on https port so we should do
TLS negotiation immediately */
if (https == 1) {
if (starttls(NULL, &http_conn) != 0) shut_down(0);
}
else if (http2_preface(&http_conn)) {
/* HTTP/2 client connection preface */
if (http2_start_session(NULL, &http_conn) != 0)
fatal("Failed initializing HTTP/2 session", EX_TEMPFAIL);
}
/* Setup the signal handler for keepalive heartbeat */
httpd_keepalive = config_getduration(IMAPOPT_HTTPKEEPALIVE, 's');
if (httpd_keepalive < 0) httpd_keepalive = 0;
if (httpd_keepalive) {
struct sigaction action;
sigemptyset(&action.sa_mask);
action.sa_flags = 0;
#ifdef SA_RESTART
action.sa_flags |= SA_RESTART;
#endif
action.sa_handler = sigalrm_handler;
if (sigaction(SIGALRM, &action, NULL) < 0) {
syslog(LOG_ERR, "unable to install signal handler for %d: %m", SIGALRM);
httpd_keepalive = 0;
}
}
index_text_extractor_init(httpd_in);
prometheus_increment(CYRUS_HTTP_CONNECTIONS_TOTAL);
prometheus_increment(CYRUS_HTTP_ACTIVE_CONNECTIONS);
cmdloop(&http_conn);
prometheus_decrement(CYRUS_HTTP_ACTIVE_CONNECTIONS);
/* Closing connection */
/* cleanup */
signal(SIGALRM, SIG_IGN);
httpd_reset(&http_conn);
prometheus_increment(CYRUS_HTTP_READY_LISTENERS);
return 0;
}
|
Safe
|
[] |
cyrus-imapd
|
602f12ed2af0a49ac4a58affbfea57d0fc23dea5
|
2.2557003335227757e+38
| 142 |
httpd.c: only allow reuse of auth creds on a persistent connection against a backend server in a Murder
| 0 |
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
double
radius;
Image
*canvas_image,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas_image->columns;
center.y=0.5*canvas_image->rows;
radius=center.x;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns*PerceptibleReciprocal((double) canvas_image->rows);
else
if (canvas_image->columns < canvas_image->rows)
{
scale.x=(double) canvas_image->rows*PerceptibleReciprocal((double) canvas_image->columns);
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(canvas_image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,implode_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)*PerceptibleReciprocal(radius)/2),-amount);
status=InterpolatePixelChannels(canvas_image,interpolate_view,
implode_image,method,(double) (factor*delta.x*PerceptibleReciprocal(scale.x)+center.x),
(double) (factor*delta.y*PerceptibleReciprocal(scale.y)+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
|
Safe
|
[
"CWE-369"
] |
ImageMagick
|
329dd528ab79531d884c0ba131e97d43f872ab5d
|
2.1924818353020854e+38
| 181 |
uses the PerceptibleReciprocal() to prevent the divide-by-zero from occurring (#3194)
Co-authored-by: Zhang Xiaohui <ruc_zhangxiaohui@163.com>
| 0 |
Supports_Condition_Obj Parser::parse_supports_negation()
{
if (!lex < kwd_not >()) return {};
Supports_Condition_Obj cond = parse_supports_condition_in_parens();
return SASS_MEMORY_NEW(Supports_Negation, pstate, cond);
}
|
Safe
|
[
"CWE-125"
] |
libsass
|
eb15533b07773c30dc03c9d742865604f47120ef
|
1.0116976935237277e+38
| 6 |
Fix memory leak in `parse_ie_keyword_arg`
`kwd_arg` would never get freed when there was a parse error in
`parse_ie_keyword_arg`.
Closes #2656
| 0 |
dissect_kafka_list_partition_reassignments_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, kafka_api_version_t api_version)
{
proto_item *subti, *subsubti;
proto_tree *subtree, *subsubtree;
subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, 0, NULL, NULL);
subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, 0, api_version,
&dissect_kafka_list_partition_reassignments_request_partition, NULL);
proto_item_set_end(subti, tvb, offset);
return offset;
}
|
Safe
|
[
"CWE-401"
] |
wireshark
|
f4374967bbf9c12746b8ec3cd54dddada9dd353e
|
1.8707948223867355e+38
| 19 |
Kafka: Limit our decompression size.
Don't assume that the Internet has our best interests at heart when it
gives us the size of our decompression buffer. Assign an arbitrary limit
of 50 MB.
This fixes #16739 in that it takes care of
** (process:17681): WARNING **: 20:03:07.440: Dissector bug, protocol Kafka, in packet 31: ../epan/proto.c:7043: failed assertion "end >= fi->start"
which is different from the original error output. It looks like *that*
might have taken care of in one of the other recent Kafka bug fixes.
The decompression routines return a success or failure status. Use
gbooleans instead of ints for that.
| 0 |
krb5_init_creds_set_keyblock(krb5_context context,
krb5_init_creds_context ctx,
krb5_keyblock *keyblock)
{
ctx->keyseed = (void *)keyblock;
ctx->keyproc = keyblock_key_proc;
return 0;
}
|
Safe
|
[
"CWE-320"
] |
heimdal
|
2f7f3d9960aa6ea21358bdf3687cee5149aa35cf
|
1.4954506828081333e+37
| 9 |
CVE-2019-12098: krb5: always confirm PA-PKINIT-KX for anon PKINIT
RFC8062 Section 7 requires verification of the PA-PKINIT-KX key excahnge
when anonymous PKINIT is used. Failure to do so can permit an active
attacker to become a man-in-the-middle.
Introduced by a1ef548600c5bb51cf52a9a9ea12676506ede19f. First tagged
release Heimdal 1.4.0.
CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N (4.8)
Change-Id: I6cc1c0c24985936468af08693839ac6c3edda133
Signed-off-by: Jeffrey Altman <jaltman@auristor.com>
Approved-by: Jeffrey Altman <jaltman@auritor.com>
(cherry picked from commit 38c797e1ae9b9c8f99ae4aa2e73957679031fd2b)
| 0 |
PHP_FUNCTION(sqlite_rewind)
{
zval *zres;
struct php_sqlite_result *res;
zval *object = getThis();
if (object) {
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RES_FROM_OBJECT(res, object);
} else {
if (FAILURE == zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &zres)) {
return;
}
ZEND_FETCH_RESOURCE(res, struct php_sqlite_result *, &zres, -1, "sqlite result", le_sqlite_result);
}
if (!res->buffered) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot rewind an unbuffered result set");
RETURN_FALSE;
}
if (!res->nrows) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "no rows received");
RETURN_FALSE;
}
res->curr_row = 0;
RETURN_TRUE;
}
|
Safe
|
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
|
3.8481060707895034e+37
| 31 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
| 0 |
gdk_pixbuf__xbm_image_load_increment (gpointer data,
const guchar *buf,
guint size,
GError **error)
{
XBMData *context = (XBMData *) data;
g_return_val_if_fail (data != NULL, FALSE);
if (fwrite (buf, sizeof (guchar), size, context->file) != size) {
gint save_errno = errno;
context->all_okay = FALSE;
g_set_error_literal (error,
G_FILE_ERROR,
g_file_error_from_errno (save_errno),
_("Failed to write to temporary file when loading XBM image"));
return FALSE;
}
return TRUE;
}
|
Safe
|
[
"CWE-189"
] |
gdk-pixbuf
|
4f0f465f991cd454d03189497f923eb40c170c22
|
1.4213339433695698e+38
| 21 |
Avoid an integer overflow in the xbm loader
At the same time, reject some silly input, such as negative
width or height.
https://bugzilla.gnome.org/show_bug.cgi?id=672811
| 0 |
EncodeNormLookup(NormTable<E> * d) : data(d) {}
|
Safe
|
[
"CWE-125"
] |
aspell
|
de29341638833ba7717bd6b5e6850998454b044b
|
4.743342917794132e+37
| 1 |
Don't allow null-terminated UCS-2/4 strings using the original API.
Detect if the encoding is UCS-2/4 and the length is -1 in affected API
functions and refuse to convert the string. If the string ends up
being converted somehow, abort with an error message in DecodeDirect
and ConvDirect. To convert a null terminated string in
Decode/ConvDirect, a negative number corresponding to the width of the
underlying character type for the encoding is expected; for example,
if the encoding is "ucs-2" then a the size is expected to be -2.
Also fix a 1-3 byte over-read in DecodeDirect when reading UCS-2/4
strings when a size is provided (found by OSS-Fuzz).
Also fix a bug in DecodeDirect that caused DocumentChecker to return
the wrong offsets when working with UCS-2/4 strings.
| 0 |
mysql_ssl_free(MYSQL *mysql __attribute__((unused)))
{
struct st_VioSSLFd *ssl_fd= (struct st_VioSSLFd*) mysql->connector_fd;
DBUG_ENTER("mysql_ssl_free");
my_free(mysql->options.ssl_key);
my_free(mysql->options.ssl_cert);
my_free(mysql->options.ssl_ca);
my_free(mysql->options.ssl_capath);
my_free(mysql->options.ssl_cipher);
if (ssl_fd)
SSL_CTX_free(ssl_fd->ssl_context);
my_free(mysql->connector_fd);
mysql->options.ssl_key = 0;
mysql->options.ssl_cert = 0;
mysql->options.ssl_ca = 0;
mysql->options.ssl_capath = 0;
mysql->options.ssl_cipher= 0;
mysql->options.use_ssl = FALSE;
mysql->connector_fd = 0;
DBUG_VOID_RETURN;
}
|
Safe
|
[
"CWE-254"
] |
server
|
f0d774d48416bb06063184380b684380ca005a41
|
1.2852730781942175e+38
| 22 |
MDEV-9212 ssl-validate-cert incorrect hostname check
Reimplement ssl_verify_server_cert() using the logic
from https://wiki.openssl.org/index.php/Hostname_validation
The bug was discovered by Alex Gaynor.
| 0 |
init_job_stats ()
{
js = zerojs;
}
|
Safe
|
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
|
1.5004844259356319e+37
| 4 |
bash-4.4-rc2 release
| 0 |
qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
virDomainObjPtr vm,
const qemuDomainJobObj *job,
virDomainState state,
int reason)
{
qemuDomainJobPrivatePtr jobPriv = job->privateData;
bool postcopy = (state == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED) ||
(state == VIR_DOMAIN_RUNNING &&
reason == VIR_DOMAIN_RUNNING_POSTCOPY);
switch ((qemuMigrationJobPhase) job->phase) {
case QEMU_MIGRATION_PHASE_NONE:
case QEMU_MIGRATION_PHASE_PERFORM2:
case QEMU_MIGRATION_PHASE_BEGIN3:
case QEMU_MIGRATION_PHASE_PERFORM3:
case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
case QEMU_MIGRATION_PHASE_CONFIRM3:
case QEMU_MIGRATION_PHASE_LAST:
/* N/A for incoming migration */
break;
case QEMU_MIGRATION_PHASE_PREPARE:
VIR_DEBUG("Killing unfinished incoming migration for domain %s",
vm->def->name);
return -1;
case QEMU_MIGRATION_PHASE_FINISH2:
/* source domain is already killed so let's just resume the domain
* and hope we are all set */
VIR_DEBUG("Incoming migration finished, resuming domain %s",
vm->def->name);
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATED,
QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
}
break;
case QEMU_MIGRATION_PHASE_FINISH3:
/* migration finished, we started resuming the domain but didn't
* confirm success or failure yet; killing it seems safest unless
* we already started guest CPUs or we were in post-copy mode */
if (postcopy) {
qemuMigrationAnyPostcopyFailed(driver, vm);
} else if (state != VIR_DOMAIN_RUNNING) {
VIR_DEBUG("Killing migrated domain %s", vm->def->name);
return -1;
}
break;
}
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE,
jobPriv->migParams, job->apiFlags);
return 0;
}
|
Safe
|
[
"CWE-416"
] |
libvirt
|
1ac703a7d0789e46833f4013a3876c2e3af18ec7
|
8.125589273058237e+37
| 59 |
qemu: Add missing lock in qemuProcessHandleMonitorEOF
qemuMonitorUnregister will be called in multiple threads (e.g. threads
in rpc worker pool and the vm event thread). In some cases, it isn't
protected by the monitor lock, which may lead to call g_source_unref
more than one time and a use-after-free problem eventually.
Add the missing lock in qemuProcessHandleMonitorEOF (which is the only
position missing lock of monitor I found).
Suggested-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Peng Liang <liangpeng10@huawei.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
| 0 |
static int parse_info(char *p)
{
struct SYMBOL *s;
char info_type = *p;
char *error_txt = NULL;
s = abc_new(ABC_T_INFO, p);
p += 2;
switch (info_type) {
case 'd':
case 's':
if (parse.abc_state == ABC_S_GLOBAL)
break;
if (!deco_start) {
error_txt = "Erroneous 'd:'/'s:'";
break;
}
error_txt = parse_decoline(p);
break;
case 'K':
if (parse.abc_state == ABC_S_GLOBAL)
break;
parse_key(p, s);
if (parse.abc_state == ABC_S_HEAD) {
int i;
parse.abc_state = ABC_S_TUNE;
if (ulen == 0)
ulen = BASE_LEN / 8;
for (i = MAXVOICE; --i >= 0; )
voice_tb[i].ulen = ulen;
lyric_started = 0;
}
break;
case 'L':
error_txt = get_len(p, s);
if (s->u.length.base_length > 0)
ulen = s->u.length.base_length;
break;
case 'M':
error_txt = parse_meter(p, s);
break;
case 'Q':
error_txt = parse_tempo(p, s);
break;
case 'U':
error_txt = get_user(p, s);
break;
case 'V':
if (parse.abc_state == ABC_S_GLOBAL)
break;
error_txt = parse_voice(p, s);
break;
case 'X':
memset(voice_tb, 0, sizeof voice_tb);
nvoice = 0;
curvoice = voice_tb;
parse.abc_state = ABC_S_HEAD;
lvlarena(1);
return 2;
}
if (error_txt)
syntax(error_txt, p);
return 0;
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
abcm2ps
|
3169ace6d63f6f517a64e8df0298f44a490c4a15
|
2.975394844379799e+38
| 67 |
fix: crash when accidental without a note at start of line after K:
Issue #84.
| 0 |
mbed_connect_step3(struct Curl_easy *data, struct connectdata *conn,
int sockindex)
{
CURLcode retcode = CURLE_OK;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
struct ssl_backend_data *backend = connssl->backend;
DEBUGASSERT(ssl_connect_3 == connssl->connecting_state);
if(SSL_SET_OPTION(primary.sessionid)) {
int ret;
mbedtls_ssl_session *our_ssl_sessionid;
void *old_ssl_sessionid = NULL;
bool isproxy = SSL_IS_PROXY() ? TRUE : FALSE;
our_ssl_sessionid = malloc(sizeof(mbedtls_ssl_session));
if(!our_ssl_sessionid)
return CURLE_OUT_OF_MEMORY;
mbedtls_ssl_session_init(our_ssl_sessionid);
ret = mbedtls_ssl_get_session(&backend->ssl, our_ssl_sessionid);
if(ret) {
if(ret != MBEDTLS_ERR_SSL_ALLOC_FAILED)
mbedtls_ssl_session_free(our_ssl_sessionid);
free(our_ssl_sessionid);
failf(data, "mbedtls_ssl_get_session returned -0x%x", -ret);
return CURLE_SSL_CONNECT_ERROR;
}
/* If there's already a matching session in the cache, delete it */
Curl_ssl_sessionid_lock(data);
if(!Curl_ssl_getsessionid(data, conn, isproxy, &old_ssl_sessionid, NULL,
sockindex))
Curl_ssl_delsessionid(data, old_ssl_sessionid);
retcode = Curl_ssl_addsessionid(data, conn, isproxy, our_ssl_sessionid,
0, sockindex);
Curl_ssl_sessionid_unlock(data);
if(retcode) {
mbedtls_ssl_session_free(our_ssl_sessionid);
free(our_ssl_sessionid);
failf(data, "failed to store ssl session");
return retcode;
}
}
connssl->connecting_state = ssl_connect_done;
return CURLE_OK;
}
|
Safe
|
[
"CWE-290"
] |
curl
|
b09c8ee15771c614c4bf3ddac893cdb12187c844
|
2.2287743054750503e+38
| 51 |
vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
To make sure we set and extract the correct session.
Reported-by: Mingtao Yang
Bug: https://curl.se/docs/CVE-2021-22890.html
CVE-2021-22890
| 0 |
smtp_transport_query_auth_types_sync (CamelService *service,
GCancellable *cancellable,
GError **error)
{
CamelSmtpTransport *transport = CAMEL_SMTP_TRANSPORT (service);
GList *sasl_types = NULL;
if (!connect_to_server (service, cancellable, error))
return NULL;
if (transport->authtypes) {
GHashTableIter iter;
gpointer key;
g_hash_table_iter_init (&iter, transport->authtypes);
while (g_hash_table_iter_next (&iter, &key, NULL)) {
CamelServiceAuthType *auth_type;
auth_type = camel_sasl_authtype (key);
if (auth_type)
sasl_types = g_list_prepend (sasl_types, auth_type);
}
}
smtp_transport_disconnect_sync (service, TRUE, cancellable, NULL);
return sasl_types;
}
|
Safe
|
[
"CWE-74"
] |
evolution-data-server
|
ba82be72cfd427b5d72ff21f929b3a6d8529c4df
|
1.7620652334231864e+38
| 28 |
I#226 - CVE-2020-14928: Response Injection via STARTTLS in SMTP and POP3
Closes https://gitlab.gnome.org/GNOME/evolution-data-server/-/issues/226
| 0 |
int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
struct buffer_head *primary = NULL;
struct ext4_group_desc *gdp;
struct inode *inode = NULL;
handle_t *handle;
int gdb_off, gdb_num;
int num_grp_locked = 0;
int err, err2;
gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
ext4_warning(sb, __func__,
"Can't resize non-sparse filesystem further");
return -EPERM;
}
if (ext4_blocks_count(es) + input->blocks_count <
ext4_blocks_count(es)) {
ext4_warning(sb, __func__, "blocks_count overflow");
return -EINVAL;
}
if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
le32_to_cpu(es->s_inodes_count)) {
ext4_warning(sb, __func__, "inodes_count overflow");
return -EINVAL;
}
if (reserved_gdb || gdb_off == 0) {
if (!EXT4_HAS_COMPAT_FEATURE(sb,
EXT4_FEATURE_COMPAT_RESIZE_INODE)
|| !le16_to_cpu(es->s_reserved_gdt_blocks)) {
ext4_warning(sb, __func__,
"No reserved GDT blocks, can't resize");
return -EPERM;
}
inode = ext4_iget(sb, EXT4_RESIZE_INO);
if (IS_ERR(inode)) {
ext4_warning(sb, __func__,
"Error opening resize inode");
return PTR_ERR(inode);
}
}
if ((err = verify_group_input(sb, input)))
goto exit_put;
if ((err = setup_new_group_blocks(sb, input)))
goto exit_put;
/*
* We will always be modifying at least the superblock and a GDT
* block. If we are adding a group past the last current GDT block,
* we will also modify the inode and the dindirect block. If we
* are adding a group with superblock/GDT backups we will also
* modify each of the reserved GDT dindirect blocks.
*/
handle = ext4_journal_start_sb(sb,
ext4_bg_has_super(sb, input->group) ?
3 + reserved_gdb : 4);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto exit_put;
}
lock_super(sb);
if (input->group != sbi->s_groups_count) {
ext4_warning(sb, __func__,
"multiple resizers run on filesystem!");
err = -EBUSY;
goto exit_journal;
}
if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
goto exit_journal;
/*
* We will only either add reserved group blocks to a backup group
* or remove reserved blocks for the first group in a new group block.
* Doing both would be mean more complex code, and sane people don't
* use non-sparse filesystems anymore. This is already checked above.
*/
if (gdb_off) {
primary = sbi->s_group_desc[gdb_num];
if ((err = ext4_journal_get_write_access(handle, primary)))
goto exit_journal;
if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) &&
(err = reserve_backup_gdb(handle, inode, input)))
goto exit_journal;
} else if ((err = add_new_gdb(handle, inode, input, &primary)))
goto exit_journal;
/*
* OK, now we've set up the new group. Time to make it active.
*
* Current kernels don't lock all allocations via lock_super(),
* so we have to be safe wrt. concurrent accesses the group
* data. So we need to be careful to set all of the relevant
* group descriptor data etc. *before* we enable the group.
*
* The key field here is sbi->s_groups_count: as long as
* that retains its old value, nobody is going to access the new
* group.
*
* So first we update all the descriptor metadata for the new
* group; then we update the total disk blocks count; then we
* update the groups count to enable the group; then finally we
* update the free space counts so that the system can start
* using the new disk blocks.
*/
num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, input->group);
/* Update group descriptor block for new group */
gdp = (struct ext4_group_desc *)((char *)primary->b_data +
gdb_off * EXT4_DESC_SIZE(sb));
ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
ext4_free_blks_set(sb, gdp, input->free_blocks_count);
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
/*
* We can allocate memory for mb_alloc based on the new group
* descriptor
*/
err = ext4_mb_add_groupinfo(sb, input->group, gdp);
if (err) {
ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
goto exit_journal;
}
/*
* Make the new blocks and inodes valid next. We do this before
* increasing the group count so that once the group is enabled,
* all of its blocks and inodes are already valid.
*
* We always allocate group-by-group, then block-by-block or
* inode-by-inode within a group, so enabling these
* blocks/inodes before the group is live won't actually let us
* allocate the new space yet.
*/
ext4_blocks_count_set(es, ext4_blocks_count(es) +
input->blocks_count);
le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb));
/*
* We need to protect s_groups_count against other CPUs seeing
* inconsistent state in the superblock.
*
* The precise rules we use are:
*
* * Writers of s_groups_count *must* hold lock_super
* AND
* * Writers must perform a smp_wmb() after updating all dependent
* data and before modifying the groups count
*
* * Readers must hold lock_super() over the access
* OR
* * Readers must perform an smp_rmb() after reading the groups count
* and before reading any dependent data.
*
* NB. These rules can be relaxed when checking the group count
* while freeing data, as we can only allocate from a block
* group after serialising against the group count, and we can
* only then free after serialising in turn against that
* allocation.
*/
smp_wmb();
/* Update the global fs size fields */
sbi->s_groups_count++;
ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
ext4_handle_dirty_metadata(handle, NULL, primary);
/* Update the reserved block counts only once the new group is
* active. */
ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
input->reserved_blocks);
/* Update the free space counts */
percpu_counter_add(&sbi->s_freeblocks_counter,
input->free_blocks_count);
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT4_INODES_PER_GROUP(sb));
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
ext4_group_t flex_group;
flex_group = ext4_flex_group(sbi, input->group);
sbi->s_flex_groups[flex_group].free_blocks +=
input->free_blocks_count;
sbi->s_flex_groups[flex_group].free_inodes +=
EXT4_INODES_PER_GROUP(sb);
}
ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
sb->s_dirt = 1;
exit_journal:
unlock_super(sb);
if ((err2 = ext4_journal_stop(handle)) && !err)
err = err2;
if (!err) {
update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
sizeof(struct ext4_super_block));
update_backups(sb, primary->b_blocknr, primary->b_data,
primary->b_size);
}
exit_put:
iput(inode);
return err;
} /* ext4_group_add */
|
Vulnerable
|
[
"CWE-20"
] |
linux-2.6
|
fdff73f094e7220602cc3f8959c7230517976412
|
2.8124556592973084e+38
| 225 |
ext4: Initialize the new group descriptor when resizing the filesystem
Make sure all of the fields of the group descriptor are properly
initialized. Previously, we allowed bg_flags field to be contain
random garbage, which could trigger non-deterministic behavior,
including a kernel OOPS.
http://bugzilla.kernel.org/show_bug.cgi?id=12433
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@kernel.org
| 1 |
__ieee80211_beacon_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_mutable_offsets *offs,
bool is_template)
{
struct ieee80211_local *local = hw_to_local(hw);
struct beacon_data *beacon = NULL;
struct sk_buff *skb = NULL;
struct ieee80211_tx_info *info;
struct ieee80211_sub_if_data *sdata = NULL;
enum nl80211_band band;
struct ieee80211_tx_rate_control txrc;
struct ieee80211_chanctx_conf *chanctx_conf;
int csa_off_base = 0;
rcu_read_lock();
sdata = vif_to_sdata(vif);
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
goto out;
if (offs)
memset(offs, 0, sizeof(*offs));
if (sdata->vif.type == NL80211_IFTYPE_AP) {
struct ieee80211_if_ap *ap = &sdata->u.ap;
beacon = rcu_dereference(ap->beacon);
if (beacon) {
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
ieee80211_beacon_update_cntdwn(vif);
ieee80211_set_beacon_cntdwn(sdata, beacon);
}
/*
* headroom, head length,
* tail length and maximum TIM length
*/
skb = dev_alloc_skb(local->tx_headroom +
beacon->head_len +
beacon->tail_len + 256 +
local->hw.extra_beacon_tailroom);
if (!skb)
goto out;
skb_reserve(skb, local->tx_headroom);
skb_put_data(skb, beacon->head, beacon->head_len);
ieee80211_beacon_add_tim(sdata, &ap->ps, skb,
is_template);
if (offs) {
offs->tim_offset = beacon->head_len;
offs->tim_length = skb->len - beacon->head_len;
/* for AP the csa offsets are from tail */
csa_off_base = skb->len;
}
if (beacon->tail)
skb_put_data(skb, beacon->tail,
beacon->tail_len);
if (ieee80211_beacon_protect(skb, local, sdata) < 0)
goto out;
} else
goto out;
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_hdr *hdr;
beacon = rcu_dereference(ifibss->presp);
if (!beacon)
goto out;
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
__ieee80211_beacon_update_cntdwn(beacon);
ieee80211_set_beacon_cntdwn(sdata, beacon);
}
skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
local->hw.extra_beacon_tailroom);
if (!skb)
goto out;
skb_reserve(skb, local->tx_headroom);
skb_put_data(skb, beacon->head, beacon->head_len);
hdr = (struct ieee80211_hdr *) skb->data;
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_BEACON);
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
beacon = rcu_dereference(ifmsh->beacon);
if (!beacon)
goto out;
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
/* TODO: For mesh csa_counter is in TU, so
* decrementing it by one isn't correct, but
* for now we leave it consistent with overall
* mac80211's behavior.
*/
__ieee80211_beacon_update_cntdwn(beacon);
ieee80211_set_beacon_cntdwn(sdata, beacon);
}
if (ifmsh->sync_ops)
ifmsh->sync_ops->adjust_tsf(sdata, beacon);
skb = dev_alloc_skb(local->tx_headroom +
beacon->head_len +
256 + /* TIM IE */
beacon->tail_len +
local->hw.extra_beacon_tailroom);
if (!skb)
goto out;
skb_reserve(skb, local->tx_headroom);
skb_put_data(skb, beacon->head, beacon->head_len);
ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
if (offs) {
offs->tim_offset = beacon->head_len;
offs->tim_length = skb->len - beacon->head_len;
}
skb_put_data(skb, beacon->tail, beacon->tail_len);
} else {
WARN_ON(1);
goto out;
}
/* CSA offsets */
if (offs && beacon) {
int i;
for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; i++) {
u16 csa_off = beacon->cntdwn_counter_offsets[i];
if (!csa_off)
continue;
offs->cntdwn_counter_offs[i] = csa_off_base + csa_off;
}
}
band = chanctx_conf->def.chan->band;
info = IEEE80211_SKB_CB(skb);
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
info->flags |= IEEE80211_TX_CTL_NO_ACK;
info->band = band;
memset(&txrc, 0, sizeof(txrc));
txrc.hw = hw;
txrc.sband = local->hw.wiphy->bands[band];
txrc.bss_conf = &sdata->vif.bss_conf;
txrc.skb = skb;
txrc.reported_rate.idx = -1;
if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band])
txrc.rate_idx_mask = sdata->beacon_rateidx_mask[band];
else
txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
txrc.bss = true;
rate_control_get_rate(sdata, NULL, &txrc);
info->control.vif = vif;
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
IEEE80211_TX_CTL_ASSIGN_SEQ |
IEEE80211_TX_CTL_FIRST_FRAGMENT;
out:
rcu_read_unlock();
return skb;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
bddc0c411a45d3718ac535a070f349be8eca8d48
|
3.049121365310494e+38
| 185 |
mac80211: Fix NULL ptr deref for injected rate info
The commit cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx
queue") moved the code to validate the radiotap header from
ieee80211_monitor_start_xmit to ieee80211_parse_tx_radiotap. This made is
possible to share more code with the new Tx queue selection code for
injected frames. But at the same time, it now required the call of
ieee80211_parse_tx_radiotap at the beginning of functions which wanted to
handle the radiotap header. And this broke the rate parser for radiotap
header parser.
The radiotap parser for rates is operating most of the time only on the
data in the actual radiotap header. But for the 802.11a/b/g rates, it must
also know the selected band from the chandef information. But this
information is only written to the ieee80211_tx_info at the end of the
ieee80211_monitor_start_xmit - long after ieee80211_parse_tx_radiotap was
already called. The info->band information was therefore always 0
(NL80211_BAND_2GHZ) when the parser code tried to access it.
For a 5GHz only device, injecting a frame with 802.11a rates would cause a
NULL pointer dereference because local->hw.wiphy->bands[NL80211_BAND_2GHZ]
would most likely have been NULL when the radiotap parser searched for the
correct rate index of the driver.
Cc: stable@vger.kernel.org
Reported-by: Ben Greear <greearb@candelatech.com>
Fixes: cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx queue")
Signed-off-by: Mathy Vanhoef <Mathy.Vanhoef@kuleuven.be>
[sven@narfation.org: added commit message]
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Link: https://lore.kernel.org/r/20210530133226.40587-1-sven@narfation.org
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
| 0 |
static void avifDecoderDataResetCodec(avifDecoderData * data)
{
for (unsigned int i = 0; i < data->tiles.count; ++i) {
avifTile * tile = &data->tiles.tile[i];
if (tile->image) {
avifImageFreePlanes(tile->image, AVIF_PLANES_ALL); // forget any pointers into codec image buffers
}
if (tile->codec) {
avifCodecDestroy(tile->codec);
tile->codec = NULL;
}
}
}
|
Safe
|
[
"CWE-703",
"CWE-787"
] |
libavif
|
0a8e7244d494ae98e9756355dfbfb6697ded2ff9
|
2.2052036146996506e+38
| 13 |
Set max image size to 16384 * 16384
Fix https://crbug.com/oss-fuzz/24728 and
https://crbug.com/oss-fuzz/24734.
| 0 |
u16 gf_mp3_sampling_rate(u32 hdr)
{
u16 res;
/* extract the necessary fields from the MP3 header */
u8 version = gf_mp3_version(hdr);
u8 sampleRateIndex = (hdr >> 10) & 0x3;
switch (sampleRateIndex) {
case 0:
res = 44100;
break;
case 1:
res = 48000;
break;
case 2:
res = 32000;
break;
default:
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] Samplerate index not valid\n"));
return 0;
}
/*reserved or MPEG-1*/
if (version & 1) return res;
/*MPEG-2*/
res /= 2;
/*MPEG-2.5*/
if (version == 0) res /= 2;
return res;
}
|
Safe
|
[
"CWE-190",
"CWE-787"
] |
gpac
|
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
|
2.240371347978761e+38
| 30 |
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
| 0 |
int ldb_comparison_fold(struct ldb_context *ldb, void *mem_ctx,
const struct ldb_val *v1, const struct ldb_val *v2)
{
const char *s1=(const char *)v1->data, *s2=(const char *)v2->data;
size_t n1 = v1->length, n2 = v2->length;
char *b1, *b2;
const char *u1, *u2;
int ret;
while (n1 && *s1 == ' ') { s1++; n1--; };
while (n2 && *s2 == ' ') { s2++; n2--; };
while (n1 && n2 && *s1 && *s2) {
/* the first 127 (0x7F) chars are ascii and utf8 guarantes they
* never appear in multibyte sequences */
if (((unsigned char)s1[0]) & 0x80) goto utf8str;
if (((unsigned char)s2[0]) & 0x80) goto utf8str;
if (toupper((unsigned char)*s1) != toupper((unsigned char)*s2))
break;
if (*s1 == ' ') {
while (n1 && s1[0] == s1[1]) { s1++; n1--; }
while (n2 && s2[0] == s2[1]) { s2++; n2--; }
}
s1++; s2++;
n1--; n2--;
}
/* check for trailing spaces only if the other pointers has
* reached the end of the strings otherwise we can
* mistakenly match. ex. "domain users" <->
* "domainUpdates"
*/
if (n1 && *s1 == ' ' && (!n2 || !*s2)) {
while (n1 && *s1 == ' ') { s1++; n1--; }
}
if (n2 && *s2 == ' ' && (!n1 || !*s1)) {
while (n2 && *s2 == ' ') { s2++; n2--; }
}
if (n1 == 0 && n2 != 0) {
return -(int)toupper(*s2);
}
if (n2 == 0 && n1 != 0) {
return (int)toupper(*s1);
}
if (n1 == 0 && n2 == 0) {
return 0;
}
return (int)toupper(*s1) - (int)toupper(*s2);
utf8str:
/* no need to recheck from the start, just from the first utf8 char found */
b1 = ldb_casefold(ldb, mem_ctx, s1, n1);
b2 = ldb_casefold(ldb, mem_ctx, s2, n2);
if (!b1 || !b2) {
/* One of the strings was not UTF8, so we have no
* options but to do a binary compare */
talloc_free(b1);
talloc_free(b2);
ret = memcmp(s1, s2, MIN(n1, n2));
if (ret == 0) {
if (n1 == n2) return 0;
if (n1 > n2) {
return (int)toupper(s1[n2]);
} else {
return -(int)toupper(s2[n1]);
}
}
return ret;
}
u1 = b1;
u2 = b2;
while (*u1 & *u2) {
if (*u1 != *u2)
break;
if (*u1 == ' ') {
while (u1[0] == u1[1]) u1++;
while (u2[0] == u2[1]) u2++;
}
u1++; u2++;
}
if (! (*u1 && *u2)) {
while (*u1 == ' ') u1++;
while (*u2 == ' ') u2++;
}
ret = (int)(*u1 - *u2);
talloc_free(b1);
talloc_free(b2);
return ret;
}
|
Safe
|
[
"CWE-787"
] |
samba
|
fab6b79b7724f0b636963be528483e3e946884aa
|
1.3694290586209832e+38
| 93 |
CVE-2021-20277 ldb/attrib_handlers casefold: stay in bounds
For a string that had N spaces at the beginning, we would
try to move N bytes beyond the end of the string.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14655
Signed-off-by: Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
Reviewed-by: Andrew Bartlett <abartlet@samba.org>
(cherry-picked from commit for master)
| 0 |
static void init_explored_state(struct bpf_verifier_env *env, int idx)
{
env->insn_aux_data[idx].prune_point = true;
}
|
Safe
|
[
"CWE-119",
"CWE-681",
"CWE-787"
] |
linux
|
5b9fbeb75b6a98955f628e205ac26689bcb1383e
|
6.400326263764747e+37
| 4 |
bpf: Fix scalar32_min_max_or bounds tracking
Simon reported an issue with the current scalar32_min_max_or() implementation.
That is, compared to the other 32 bit subreg tracking functions, the code in
scalar32_min_max_or() stands out that it's using the 64 bit registers instead
of 32 bit ones. This leads to bounds tracking issues, for example:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x1; 0x700000000),s32_max_value=1,u32_max_value=1) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
The bound tests on the map value force the upper unsigned bound to be 25769803777
in 64 bit (0b11000000000000000000000000000000001) and then lower one to be 1. By
using OR they are truncated and thus result in the range [1,1] for the 32 bit reg
tracker. This is incorrect given the only thing we know is that the value must be
positive and thus 2147483647 (0b1111111111111111111111111111111) at max for the
subregs. Fix it by using the {u,s}32_{min,max}_value vars instead. This also makes
sense, for example, for the case where we update dst_reg->s32_{min,max}_value in
the else branch we need to use the newly computed dst_reg->u32_{min,max}_value as
we know that these are positive. Previously, in the else branch the 64 bit values
of umin_value=1 and umax_value=32212254719 were used and latter got truncated to
be 1 as upper bound there. After the fix the subreg range is now correct:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking")
Reported-by: Simon Scannell <scannell.smn@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
| 0 |
show_entry_xml (ExifEntry *e, void *data)
{
unsigned char *ids = data;
char v[TAG_VALUE_BUF], t[TAG_VALUE_BUF];
if (*ids) {
fprintf (stdout, "<x%04x>", e->tag);
fprintf (stdout, "%s", escape_xml(exif_entry_get_value (e, v, sizeof (v))));
fprintf (stdout, "</x%04x>", e->tag);
} else {
strncpy (t, exif_tag_get_title_in_ifd(e->tag, exif_entry_get_ifd(e)), sizeof (t));
t[sizeof(t)-1] = 0;
/* Remove invalid characters from tag eg. (, ), space */
remove_bad_chars(t);
fprintf (stdout, "\t<%s>", t);
fprintf (stdout, "%s", escape_xml(exif_entry_get_value (e, v, sizeof (v))));
fprintf (stdout, "</%s>\n", t);
}
}
|
Safe
|
[
"CWE-476"
] |
exif
|
f6334d9d32437ef13dc902f0a88a2be0063d9d1c
|
1.8398780077702706e+37
| 21 |
added empty strign check, which would lead to NULL ptr deref/crash in exif XML display. fixes https://github.com/libexif/exif/issues/4
| 0 |
print_html_status(pe_working_set_t * data_set, const char *filename, gboolean web_cgi)
{
FILE *stream;
GListPtr gIter = NULL;
node_t *dc = NULL;
static int updates = 0;
char *filename_tmp = NULL;
if (web_cgi) {
stream = stdout;
fprintf(stream, "Content-type: text/html\n\n");
} else {
filename_tmp = crm_concat(filename, "tmp", '.');
stream = fopen(filename_tmp, "w");
if (stream == NULL) {
crm_perror(LOG_ERR, "Cannot open %s for writing", filename_tmp);
free(filename_tmp);
return -1;
}
}
updates++;
dc = data_set->dc_node;
fprintf(stream, "<html>");
fprintf(stream, "<head>");
fprintf(stream, "<title>Cluster status</title>");
/* content="%d;url=http://webdesign.about.com" */
fprintf(stream, "<meta http-equiv=\"refresh\" content=\"%d\">", reconnect_msec / 1000);
fprintf(stream, "</head>");
/*** SUMMARY ***/
fprintf(stream, "<h2>Cluster summary</h2>");
{
char *now_str = NULL;
time_t now = time(NULL);
now_str = ctime(&now);
now_str[24] = EOS; /* replace the newline */
fprintf(stream, "Last updated: <b>%s</b><br/>\n", now_str);
}
if (dc == NULL) {
fprintf(stream, "Current DC: <font color=\"red\"><b>NONE</b></font><br/>");
} else {
fprintf(stream, "Current DC: %s (%s)<br/>", dc->details->uname, dc->details->id);
}
fprintf(stream, "%d Nodes configured.<br/>", g_list_length(data_set->nodes));
fprintf(stream, "%d Resources configured.<br/>", count_resources(data_set, NULL));
/*** CONFIG ***/
fprintf(stream, "<h3>Config Options</h3>\n");
fprintf(stream, "<table>\n");
fprintf(stream, "<tr><td>STONITH of failed nodes</td><td>:</td><td>%s</td></tr>\n",
is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
fprintf(stream, "<tr><td>Cluster is</td><td>:</td><td>%ssymmetric</td></tr>\n",
is_set(data_set->flags, pe_flag_symmetric_cluster) ? "" : "a-");
fprintf(stream, "<tr><td>No Quorum Policy</td><td>:</td><td>");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
fprintf(stream, "Freeze resources");
break;
case no_quorum_stop:
fprintf(stream, "Stop ALL resources");
break;
case no_quorum_ignore:
fprintf(stream, "Ignore");
break;
case no_quorum_suicide:
fprintf(stream, "Suicide");
break;
}
fprintf(stream, "\n</td></tr>\n</table>\n");
/*** NODE LIST ***/
fprintf(stream, "<h2>Node List</h2>\n");
fprintf(stream, "<ul>\n");
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
fprintf(stream, "<li>");
if (node->details->standby_onfail && node->details->online) {
fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
"<font color=\"orange\">standby (on-fail)</font>\n");
} else if (node->details->standby && node->details->online) {
fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
"<font color=\"orange\">standby</font>\n");
} else if (node->details->standby) {
fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
"<font color=\"red\">OFFLINE (standby)</font>\n");
} else if (node->details->online) {
fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
"<font color=\"green\">online</font>\n");
} else {
fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
"<font color=\"red\">OFFLINE</font>\n");
}
if (group_by_node) {
GListPtr lpc2 = NULL;
fprintf(stream, "<ul>\n");
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
resource_t *rsc = (resource_t *) lpc2->data;
fprintf(stream, "<li>");
rsc->fns->print(rsc, NULL, pe_print_html | pe_print_rsconly, stream);
fprintf(stream, "</li>\n");
}
fprintf(stream, "</ul>\n");
}
fprintf(stream, "</li>\n");
}
fprintf(stream, "</ul>\n");
if (group_by_node && inactive_resources) {
fprintf(stream, "<h2>Inactive Resources</h2>\n");
} else if (group_by_node == FALSE) {
fprintf(stream, "<h2>Resource List</h2>\n");
}
if (group_by_node == FALSE || inactive_resources) {
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
gboolean is_active = rsc->fns->active(rsc, TRUE);
gboolean partially_active = rsc->fns->active(rsc, FALSE);
if (is_set(rsc->flags, pe_rsc_orphan) && is_active == FALSE) {
continue;
} else if (group_by_node == FALSE) {
if (partially_active || inactive_resources) {
rsc->fns->print(rsc, NULL, pe_print_html, stream);
}
} else if (is_active == FALSE && inactive_resources) {
rsc->fns->print(rsc, NULL, pe_print_html, stream);
}
}
}
fprintf(stream, "</html>");
fflush(stream);
fclose(stream);
if (!web_cgi) {
if (rename(filename_tmp, filename) != 0) {
crm_perror(LOG_ERR, "Unable to rename %s->%s", filename_tmp, filename);
}
free(filename_tmp);
}
return 0;
}
|
Safe
|
[
"CWE-399"
] |
pacemaker
|
564f7cc2a51dcd2f28ab12a13394f31be5aa3c93
|
2.1747650369709505e+38
| 160 |
High: core: Internal tls api improvements for reuse with future LRMD tls backend.
| 0 |
int cil_gen_boolif(struct cil_db *db, struct cil_tree_node *parse_current, struct cil_tree_node *ast_node, int tunableif)
{
enum cil_syntax syntax[] = {
CIL_SYN_STRING,
CIL_SYN_STRING | CIL_SYN_LIST,
CIL_SYN_LIST,
CIL_SYN_LIST | CIL_SYN_END,
CIL_SYN_END
};
int syntax_len = sizeof(syntax)/sizeof(*syntax);
struct cil_booleanif *bif = NULL;
struct cil_tree_node *next = NULL;
int rc = SEPOL_ERR;
if (db == NULL || parse_current == NULL || ast_node == NULL) {
goto exit;
}
rc = __cil_verify_syntax(parse_current, syntax, syntax_len);
if (rc != SEPOL_OK) {
goto exit;
}
cil_boolif_init(&bif);
bif->preserved_tunable = tunableif;
rc = cil_gen_expr(parse_current->next, CIL_BOOL, &bif->str_expr);
if (rc != SEPOL_OK) {
goto exit;
}
rc = cil_verify_conditional_blocks(parse_current->next->next);
if (rc != SEPOL_OK) {
goto exit;
}
/* Destroying expr tree */
next = parse_current->next->next;
cil_tree_subtree_destroy(parse_current->next);
parse_current->next = next;
ast_node->flavor = CIL_BOOLEANIF;
ast_node->data = bif;
return SEPOL_OK;
exit:
if (tunableif) {
cil_tree_log(parse_current, CIL_ERR, "Bad tunableif (treated as a booleanif due to preserve-tunables) declaration");
} else {
cil_tree_log(parse_current, CIL_ERR, "Bad booleanif declaration");
}
cil_destroy_boolif(bif);
return rc;
}
|
Safe
|
[
"CWE-125"
] |
selinux
|
340f0eb7f3673e8aacaf0a96cbfcd4d12a405521
|
2.4877026972253378e+38
| 55 |
libsepol/cil: Check for statements not allowed in optional blocks
While there are some checks for invalid statements in an optional
block when resolving the AST, there are no checks when building the
AST.
OSS-Fuzz found the following policy which caused a null dereference
in cil_tree_get_next_path().
(blockinherit b3)
(sid SID)
(sidorder(SID))
(optional o
(ibpkeycon :(1 0)s)
(block b3
(filecon""block())
(filecon""block())))
The problem is that the blockinherit copies block b3 before
the optional block is disabled. When the optional is disabled,
block b3 is deleted along with everything else in the optional.
Later, when filecon statements with the same path are found an
error message is produced and in trying to find out where the block
was copied from, the reference to the deleted block is used. The
error handling code assumes (rightly) that if something was copied
from a block then that block should still exist.
It is clear that in-statements, blocks, and macros cannot be in an
optional, because that allows nodes to be copied from the optional
block to somewhere outside even though the optional could be disabled
later. When optionals are disabled the AST is reset and the
resolution is restarted at the point of resolving macro calls, so
anything resolved before macro calls will never be re-resolved.
This includes tunableifs, in-statements, blockinherits,
blockabstracts, and macro definitions. Tunable declarations also
cannot be in an optional block because they are needed to resolve
tunableifs. It should be fine to allow blockinherit statements in
an optional, because that is copying nodes from outside the optional
to the optional and if the optional is later disabled, everything
will be deleted anyway.
Check and quit with an error if a tunable declaration, in-statement,
block, blockabstract, or macro definition is found within an
optional when either building or resolving the AST.
Signed-off-by: James Carter <jwcart2@gmail.com>
| 0 |
void Item_param::reset()
{
DBUG_ENTER("Item_param::reset");
/* Shrink string buffer if it's bigger than max possible CHAR column */
if (str_value.alloced_length() > MAX_CHAR_WIDTH)
str_value.free();
else
str_value.length(0);
str_value_ptr.length(0);
/*
We must prevent all charset conversions until data has been written
to the binary log.
*/
str_value.set_charset(&my_charset_bin);
collation.set(&my_charset_bin, DERIVATION_COERCIBLE);
state= NO_VALUE;
maybe_null= 1;
null_value= 0;
/*
Don't reset item_type to PARAM_ITEM: it's only needed to guard
us from item optimizations at prepare stage, when item doesn't yet
contain a literal of some kind.
In all other cases when this object is accessed its value is
set (this assumption is guarded by 'state' and
DBUG_ASSERTS(state != NO_VALUE) in all Item_param::get_*
methods).
*/
DBUG_VOID_RETURN;
}
|
Safe
|
[] |
server
|
b000e169562697aa072600695d4f0c0412f94f4f
|
2.143893984529401e+38
| 29 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <ajo.robert@oracle.com>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
| 0 |
//! Normalize multi-valued pixels of the image instance, with respect to their L2-norm \newinstance.
CImg<Tfloat> get_normalize() const {
return CImg<Tfloat>(*this,false).normalize();
|
Safe
|
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
|
3.2781845471091016e+38
| 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
| 0 |
void Monitor::do_admin_command(string command, cmdmap_t& cmdmap, string format,
ostream& ss)
{
Mutex::Locker l(lock);
boost::scoped_ptr<Formatter> f(Formatter::create(format));
string args;
for (cmdmap_t::iterator p = cmdmap.begin();
p != cmdmap.end(); ++p) {
if (p->first == "prefix")
continue;
if (!args.empty())
args += ", ";
args += cmd_vartype_stringify(p->second);
}
args = "[" + args + "]";
bool read_only = (command == "mon_status" ||
command == "mon metadata" ||
command == "quorum_status" ||
command == "ops" ||
command == "sessions");
(read_only ? audit_clog->debug() : audit_clog->info())
<< "from='admin socket' entity='admin socket' "
<< "cmd='" << command << "' args=" << args << ": dispatch";
if (command == "mon_status") {
get_mon_status(f.get(), ss);
if (f)
f->flush(ss);
} else if (command == "quorum_status") {
_quorum_status(f.get(), ss);
} else if (command == "sync_force") {
string validate;
if ((!cmd_getval(g_ceph_context, cmdmap, "validate", validate)) ||
(validate != "--yes-i-really-mean-it")) {
ss << "are you SURE? this will mean the monitor store will be erased "
"the next time the monitor is restarted. pass "
"'--yes-i-really-mean-it' if you really do.";
goto abort;
}
sync_force(f.get(), ss);
} else if (command.compare(0, 23, "add_bootstrap_peer_hint") == 0) {
if (!_add_bootstrap_peer_hint(command, cmdmap, ss))
goto abort;
} else if (command == "quorum enter") {
elector.start_participating();
start_election();
ss << "started responding to quorum, initiated new election";
} else if (command == "quorum exit") {
start_election();
elector.stop_participating();
ss << "stopped responding to quorum, initiated new election";
} else if (command == "ops") {
(void)op_tracker.dump_ops_in_flight(f.get());
if (f) {
f->flush(ss);
}
} else if (command == "sessions") {
if (f) {
f->open_array_section("sessions");
for (auto p : session_map.sessions) {
f->dump_stream("session") << *p;
}
f->close_section();
f->flush(ss);
}
} else {
assert(0 == "bad AdminSocket command binding");
}
(read_only ? audit_clog->debug() : audit_clog->info())
<< "from='admin socket' "
<< "entity='admin socket' "
<< "cmd=" << command << " "
<< "args=" << args << ": finished";
return;
abort:
(read_only ? audit_clog->debug() : audit_clog->info())
<< "from='admin socket' "
<< "entity='admin socket' "
<< "cmd=" << command << " "
<< "args=" << args << ": aborted";
}
|
Safe
|
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
|
2.6145467373290287e+38
| 88 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <sage@redhat.com>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
| 0 |
TEST(ExpressionTypeTest, WithMinKeyValue) {
assertExpectedResults("$type", {{{Value(MINKEY)}, Value("minKey"_sd)}});
}
|
Safe
|
[
"CWE-835"
] |
mongo
|
0a076417d1d7fba3632b73349a1fd29a83e68816
|
1.7070714080322945e+38
| 3 |
SERVER-38070 fix infinite loop in agg expression
| 0 |
static void __ip_vs_del_service(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest, *nxt;
struct ip_vs_scheduler *old_sched;
struct ip_vs_pe *old_pe;
struct netns_ipvs *ipvs = net_ipvs(svc->net);
pr_info("%s: enter\n", __func__);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
ipvs->num_services--;
ip_vs_stop_estimator(svc->net, &svc->stats);
/* Unbind scheduler */
old_sched = svc->scheduler;
ip_vs_unbind_scheduler(svc);
ip_vs_scheduler_put(old_sched);
/* Unbind persistence engine */
old_pe = svc->pe;
ip_vs_unbind_pe(svc);
ip_vs_pe_put(old_pe);
/* Unbind app inc */
if (svc->inc) {
ip_vs_app_inc_put(svc->inc);
svc->inc = NULL;
}
/*
* Unlink the whole destination list
*/
list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
__ip_vs_unlink_dest(svc, dest, 0);
__ip_vs_del_dest(svc->net, dest);
}
/*
* Update the virtual service counters
*/
if (svc->port == FTPPORT)
atomic_dec(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
atomic_dec(&ipvs->nullsvc_counter);
/*
* Free the service if nobody refers to it
*/
if (atomic_read(&svc->refcnt) == 0) {
IP_VS_DBG_BUF(3, "Removing service %u/%s:%u usecnt=%d\n",
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port), atomic_read(&svc->usecnt));
free_percpu(svc->stats.cpustats);
kfree(svc);
}
/* decrease the module use count */
ip_vs_use_count_dec();
}
|
Safe
|
[
"CWE-200"
] |
linux
|
2d8a041b7bfe1097af21441cb77d6af95f4f4680
|
1.831435881586548e+38
| 62 |
ipvs: fix info leak in getsockopt(IP_VS_SO_GET_TIMEOUT)
If at least one of CONFIG_IP_VS_PROTO_TCP or CONFIG_IP_VS_PROTO_UDP is
not set, __ip_vs_get_timeouts() does not fully initialize the structure
that gets copied to userland and that for leaks up to 12 bytes of kernel
stack. Add an explicit memset(0) before passing the structure to
__ip_vs_get_timeouts() to avoid the info leak.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Cc: Wensong Zhang <wensong@linux-vs.org>
Cc: Simon Horman <horms@verge.net.au>
Cc: Julian Anastasov <ja@ssi.bg>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
ins_compl_stop(int c, int prev_mode, int retval)
{
char_u *ptr;
int want_cindent;
// Get here when we have finished typing a sequence of ^N and
// ^P or other completion characters in CTRL-X mode. Free up
// memory that was used, and make sure we can redo the insert.
if (compl_curr_match != NULL || compl_leader != NULL || c == Ctrl_E)
{
// If any of the original typed text has been changed, eg when
// ignorecase is set, we must add back-spaces to the redo
// buffer. We add as few as necessary to delete just the part
// of the original text that has changed.
// When using the longest match, edited the match or used
// CTRL-E then don't use the current match.
if (compl_curr_match != NULL && compl_used_match && c != Ctrl_E)
ptr = compl_curr_match->cp_str;
else
ptr = NULL;
ins_compl_fixRedoBufForLeader(ptr);
}
want_cindent = (get_can_cindent() && cindent_on());
// When completing whole lines: fix indent for 'cindent'.
// Otherwise, break line if it's too long.
if (compl_cont_mode == CTRL_X_WHOLE_LINE)
{
// re-indent the current line
if (want_cindent)
{
do_c_expr_indent();
want_cindent = FALSE; // don't do it again
}
}
else
{
int prev_col = curwin->w_cursor.col;
// put the cursor on the last char, for 'tw' formatting
if (prev_col > 0)
dec_cursor();
// only format when something was inserted
if (!arrow_used && !ins_need_undo_get() && c != Ctrl_E)
insertchar(NUL, 0, -1);
if (prev_col > 0
&& ml_get_curline()[curwin->w_cursor.col] != NUL)
inc_cursor();
}
// If the popup menu is displayed pressing CTRL-Y means accepting
// the selection without inserting anything. When
// compl_enter_selects is set the Enter key does the same.
if ((c == Ctrl_Y || (compl_enter_selects
&& (c == CAR || c == K_KENTER || c == NL)))
&& pum_visible())
retval = TRUE;
// CTRL-E means completion is Ended, go back to the typed text.
// but only do this, if the Popup is still visible
if (c == Ctrl_E)
{
ins_compl_delete();
if (compl_leader != NULL)
ins_bytes(compl_leader + get_compl_len());
else if (compl_first_match != NULL)
ins_bytes(compl_orig_text + get_compl_len());
retval = TRUE;
}
auto_format(FALSE, TRUE);
// Trigger the CompleteDonePre event to give scripts a chance to
// act upon the completion before clearing the info, and restore
// ctrl_x_mode, so that complete_info() can be used.
ctrl_x_mode = prev_mode;
ins_apply_autocmds(EVENT_COMPLETEDONEPRE);
ins_compl_free();
compl_started = FALSE;
compl_matches = 0;
if (!shortmess(SHM_COMPLETIONMENU))
msg_clr_cmdline(); // necessary for "noshowmode"
ctrl_x_mode = CTRL_X_NORMAL;
compl_enter_selects = FALSE;
if (edit_submode != NULL)
{
edit_submode = NULL;
showmode();
}
#ifdef FEAT_CMDWIN
if (c == Ctrl_C && cmdwin_type != 0)
// Avoid the popup menu remains displayed when leaving the
// command line window.
update_screen(0);
#endif
// Indent now if a key was typed that is in 'cinkeys'.
if (want_cindent && in_cinkeys(KEY_COMPLETE, ' ', inindent(0)))
do_c_expr_indent();
// Trigger the CompleteDone event to give scripts a chance to act
// upon the end of completion.
ins_apply_autocmds(EVENT_COMPLETEDONE);
return retval;
}
|
Vulnerable
|
[
"CWE-125"
] |
vim
|
f12129f1714f7d2301935bb21d896609bdac221c
|
1.230777313640475e+37
| 107 |
patch 9.0.0020: with some completion reading past end of string
Problem: With some completion reading past end of string.
Solution: Check the length of the string.
| 1 |
utf8_str (const gchar *utf8,
gchar *buf)
{
gunichar c = g_utf8_get_char_validated (utf8, -1);
if (c == (gunichar) -1 || c == (gunichar) -2)
{
gchar *temp = g_strdup_printf ("\\x%02x", (guint)(guchar)*utf8);
memset (buf, 0, 8);
memcpy (buf, temp, strlen (temp));
g_free (temp);
}
else
char_str (c, buf);
return buf;
}
|
Safe
|
[
"CWE-476"
] |
glib
|
fccef3cc822af74699cca84cd202719ae61ca3b9
|
3.044372340383681e+37
| 15 |
gmarkup: Fix crash in error handling path for closing elements
If something which looks like a closing tag is left unfinished, but
isn’t paired to an opening tag in the document, the error handling code
would do a null pointer dereference. Avoid that, at the cost of
introducing a new translatable error message.
Includes a test case, courtesy of pdknsk.
Signed-off-by: Philip Withnall <withnall@endlessm.com>
https://gitlab.gnome.org/GNOME/glib/issues/1461
| 0 |
root_scan_phase(mrb_state *mrb, mrb_gc *gc)
{
int i, e;
if (!is_minor_gc(gc)) {
gc->gray_list = NULL;
gc->atomic_gray_list = NULL;
}
mrb_gc_mark_gv(mrb);
/* mark arena */
for (i=0,e=gc->arena_idx; i<e; i++) {
mrb_gc_mark(mrb, gc->arena[i]);
}
/* mark class hierarchy */
mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
/* mark built-in classes */
mrb_gc_mark(mrb, (struct RBasic*)mrb->class_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->module_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->proc_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->string_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->array_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->hash_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->range_class);
#ifndef MRB_WITHOUT_FLOAT
mrb_gc_mark(mrb, (struct RBasic*)mrb->float_class);
#endif
mrb_gc_mark(mrb, (struct RBasic*)mrb->fixnum_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->true_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->false_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->nil_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->symbol_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->kernel_module);
mrb_gc_mark(mrb, (struct RBasic*)mrb->eException_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->eStandardError_class);
/* mark top_self */
mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
/* mark exception */
mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
/* mark pre-allocated exception */
mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err);
mrb_gc_mark(mrb, (struct RBasic*)mrb->stack_err);
#ifdef MRB_GC_FIXED_ARENA
mrb_gc_mark(mrb, (struct RBasic*)mrb->arena_err);
#endif
mark_context(mrb, mrb->c);
if (mrb->root_c != mrb->c) {
mark_context(mrb, mrb->root_c);
}
}
|
Safe
|
[
"CWE-415"
] |
mruby
|
97319697c8f9f6ff27b32589947e1918e3015503
|
9.54979765353999e+37
| 55 |
Cancel 9cdf439
Should not free the pointer in `realloc` since it can cause
use-after-free problem.
| 0 |
lock_seed_file (int fd, const char *fname, int for_write)
{
#ifdef __GCC__
#warning Check whether we can lock on Windows.
#endif
#if LOCK_SEED_FILE
struct flock lck;
struct timeval tv;
int backoff=0;
/* We take a lock on the entire file. */
memset (&lck, 0, sizeof lck);
lck.l_type = for_write? F_WRLCK : F_RDLCK;
lck.l_whence = SEEK_SET;
while (fcntl (fd, F_SETLK, &lck) == -1)
{
if (errno != EAGAIN && errno != EACCES)
{
log_info (_("can't lock `%s': %s\n"), fname, strerror (errno));
return -1;
}
if (backoff > 2) /* Show the first message after ~2.25 seconds. */
log_info( _("waiting for lock on `%s'...\n"), fname);
tv.tv_sec = backoff;
tv.tv_usec = 250000;
select (0, NULL, NULL, NULL, &tv);
if (backoff < 10)
backoff++ ;
}
#endif /*!LOCK_SEED_FILE*/
return 0;
}
|
Safe
|
[
"CWE-200"
] |
libgcrypt
|
8dd45ad957b54b939c288a68720137386c7f6501
|
9.50140024786902e+37
| 35 |
random: Hash continuous areas in the csprng pool.
* random/random-csprng.c (mix_pool): Store the first hash at the end
of the pool.
--
This fixes a long standing bug (since 1998) in Libgcrypt and GnuPG.
An attacker who obtains 580 bytes of the random number from the
standard RNG can trivially predict the next 20 bytes of output.
For use in GnuPG this bug does not affect the default generation of
keys because running gpg for key creation creates at most 2 keys from
the pool: For a single 4096 bit RSA key 512 byte of random are
required and thus for the second key (encryption subkey), 20 bytes
could be predicted from the the first key. However, the security of
an OpenPGP key depends on the primary key (which was generated first)
and thus the 20 predictable bytes should not be a problem. For the
default key length of 2048 bit nothing will be predictable.
For the former default of DSA+Elgamal key it is complicate to give an
answer: For 2048 bit keys a pool of 30 non-secret candidate primes of
about 300 bits each are first created. This reads at least 1140 bytes
from the pool and thus parts could be predicted. At some point a 256
bit secret is read from the pool; which in the worst case might be
partly predictable.
The bug was found and reported by Felix Dörre and Vladimir Klebanov,
Karlsruhe Institute of Technology. A paper describing the problem in
detail will shortly be published.
CVE-id: CVE-2016-6313
Signed-off-by: Werner Koch <wk@gnupg.org>
| 0 |
int lcc_network_buffer_set_security_level (lcc_network_buffer_t *nb, /* {{{ */
lcc_security_level_t level,
const char *username, const char *password)
{
char *username_copy;
char *password_copy;
if (level == NONE)
{
free (nb->username);
free (nb->password);
nb->username = NULL;
nb->password = NULL;
nb->seclevel = NONE;
lcc_network_buffer_initialize (nb);
return (0);
}
if (!have_gcrypt ())
return (ENOTSUP);
username_copy = strdup (username);
password_copy = strdup (password);
if ((username_copy == NULL) || (password_copy == NULL))
{
free (username_copy);
free (password_copy);
return (ENOMEM);
}
free (nb->username);
free (nb->password);
nb->username = username_copy;
nb->password = password_copy;
nb->seclevel = level;
lcc_network_buffer_initialize (nb);
return (0);
} /* }}} int lcc_network_buffer_set_security_level */
|
Safe
|
[
"CWE-119"
] |
collectd
|
8b4fed9940e02138b7e273e56863df03d1a39ef7
|
1.0184593094740442e+38
| 39 |
network plugin, libcollectdclient: Check return value of gcry_control().
Fixes: #1665
| 0 |
sudoers_policy_main(int argc, char * const argv[], int pwflag, char *env_add[],
bool verbose, void *closure)
{
char *iolog_path = NULL;
mode_t cmnd_umask = ACCESSPERMS;
struct sudo_nss *nss;
int oldlocale, validated, ret = -1;
debug_decl(sudoers_policy_main, SUDOERS_DEBUG_PLUGIN);
sudo_warn_set_locale_func(sudoers_warn_setlocale);
unlimit_nproc();
/* Is root even allowed to run sudo? */
if (user_uid == 0 && !def_root_sudo) {
/* Not an audit event (should it be?). */
sudo_warnx("%s",
U_("sudoers specifies that root is not allowed to sudo"));
goto bad;
}
if (!set_perms(PERM_INITIAL))
goto bad;
/* Environment variables specified on the command line. */
if (env_add != NULL && env_add[0] != NULL)
sudo_user.env_vars = env_add;
/*
* Make a local copy of argc/argv, with special handling
* for pseudo-commands and the '-i' option.
*/
if (argc == 0) {
NewArgc = 1;
NewArgv = reallocarray(NULL, NewArgc + 1, sizeof(char *));
if (NewArgv == NULL) {
sudo_warnx(U_("%s: %s"), __func__, U_("unable to allocate memory"));
goto done;
}
sudoers_gc_add(GC_VECTOR, NewArgv);
NewArgv[0] = user_cmnd;
NewArgv[1] = NULL;
} else {
/* Must leave an extra slot before NewArgv for bash's --login */
NewArgc = argc;
NewArgv = reallocarray(NULL, NewArgc + 2, sizeof(char *));
if (NewArgv == NULL) {
sudo_warnx(U_("%s: %s"), __func__, U_("unable to allocate memory"));
goto done;
}
sudoers_gc_add(GC_VECTOR, NewArgv);
NewArgv++; /* reserve an extra slot for --login */
memcpy(NewArgv, argv, argc * sizeof(char *));
NewArgv[NewArgc] = NULL;
if (ISSET(sudo_mode, MODE_LOGIN_SHELL) && runas_pw != NULL) {
NewArgv[0] = strdup(runas_pw->pw_shell);
if (NewArgv[0] == NULL) {
sudo_warnx(U_("%s: %s"), __func__, U_("unable to allocate memory"));
goto done;
}
sudoers_gc_add(GC_PTR, NewArgv[0]);
}
}
/* If given the -P option, set the "preserve_groups" flag. */
if (ISSET(sudo_mode, MODE_PRESERVE_GROUPS))
def_preserve_groups = true;
/* Find command in path and apply per-command Defaults. */
cmnd_status = set_cmnd();
if (cmnd_status == NOT_FOUND_ERROR)
goto done;
/* Check for -C overriding def_closefrom. */
if (user_closefrom >= 0 && user_closefrom != def_closefrom) {
if (!def_closefrom_override) {
log_warningx(SLOG_NO_STDERR|SLOG_AUDIT,
N_("user not allowed to override closefrom limit"));
sudo_warnx("%s", U_("you are not permitted to use the -C option"));
goto bad;
}
def_closefrom = user_closefrom;
}
/*
* Check sudoers sources, using the locale specified in sudoers.
*/
sudoers_setlocale(SUDOERS_LOCALE_SUDOERS, &oldlocale);
validated = sudoers_lookup(snl, sudo_user.pw, &cmnd_status, pwflag);
if (ISSET(validated, VALIDATE_ERROR)) {
/* The lookup function should have printed an error. */
goto done;
}
/* Restore user's locale. */
sudoers_setlocale(oldlocale, NULL);
if (safe_cmnd == NULL) {
if ((safe_cmnd = strdup(user_cmnd)) == NULL) {
sudo_warnx(U_("%s: %s"), __func__, U_("unable to allocate memory"));
goto done;
}
}
/* Defer uid/gid checks until after defaults have been updated. */
if (unknown_runas_uid && !def_runas_allow_unknown_id) {
log_warningx(SLOG_AUDIT, N_("unknown user: %s"), runas_pw->pw_name);
goto done;
}
if (runas_gr != NULL) {
if (unknown_runas_gid && !def_runas_allow_unknown_id) {
log_warningx(SLOG_AUDIT, N_("unknown group: %s"),
runas_gr->gr_name);
goto done;
}
}
/*
* Look up the timestamp dir owner if one is specified.
*/
if (def_timestampowner) {
struct passwd *pw = NULL;
if (*def_timestampowner == '#') {
const char *errstr;
uid_t uid = sudo_strtoid(def_timestampowner + 1, &errstr);
if (errstr == NULL)
pw = sudo_getpwuid(uid);
}
if (pw == NULL)
pw = sudo_getpwnam(def_timestampowner);
if (pw != NULL) {
timestamp_uid = pw->pw_uid;
timestamp_gid = pw->pw_gid;
sudo_pw_delref(pw);
} else {
/* XXX - audit too? */
log_warningx(SLOG_SEND_MAIL,
N_("timestamp owner (%s): No such user"), def_timestampowner);
timestamp_uid = ROOT_UID;
timestamp_gid = ROOT_GID;
}
}
/* If no command line args and "shell_noargs" is not set, error out. */
if (ISSET(sudo_mode, MODE_IMPLIED_SHELL) && !def_shell_noargs) {
/* Not an audit event. */
ret = -2; /* usage error */
goto done;
}
/* Bail if a tty is required and we don't have one. */
if (def_requiretty && !tty_present()) {
log_warningx(SLOG_NO_STDERR|SLOG_AUDIT, N_("no tty"));
sudo_warnx("%s", U_("sorry, you must have a tty to run sudo"));
goto bad;
}
/* Check runas user's shell. */
if (!check_user_shell(runas_pw)) {
log_warningx(SLOG_RAW_MSG|SLOG_AUDIT,
N_("invalid shell for user %s: %s"),
runas_pw->pw_name, runas_pw->pw_shell);
goto bad;
}
/*
* We don't reset the environment for sudoedit or if the user
* specified the -E command line flag and they have setenv privs.
*/
if (ISSET(sudo_mode, MODE_EDIT) ||
(ISSET(sudo_mode, MODE_PRESERVE_ENV) && def_setenv))
def_env_reset = false;
/* Build a new environment that avoids any nasty bits. */
if (!rebuild_env())
goto bad;
/* Require a password if sudoers says so. */
switch (check_user(validated, sudo_mode)) {
case true:
/* user authenticated successfully. */
break;
case false:
/* Note: log_denial() calls audit for us. */
if (!ISSET(validated, VALIDATE_SUCCESS)) {
/* Only display a denial message if no password was read. */
if (!log_denial(validated, def_passwd_tries <= 0))
goto done;
}
goto bad;
default:
/* some other error, ret is -1. */
goto done;
}
/* Check whether user_runchroot is permitted (if specified). */
switch (check_user_runchroot()) {
case true:
break;
case false:
goto bad;
default:
goto done;
}
/* Check whether user_runcwd is permitted (if specified). */
switch (check_user_runcwd()) {
case true:
break;
case false:
goto bad;
default:
goto done;
}
/* If run as root with SUDO_USER set, set sudo_user.pw to that user. */
/* XXX - causes confusion when root is not listed in sudoers */
if (ISSET(sudo_mode, MODE_RUN|MODE_EDIT) && prev_user != NULL) {
if (user_uid == 0 && strcmp(prev_user, "root") != 0) {
struct passwd *pw;
if ((pw = sudo_getpwnam(prev_user)) != NULL) {
if (sudo_user.pw != NULL)
sudo_pw_delref(sudo_user.pw);
sudo_user.pw = pw;
}
}
}
/* If the user was not allowed to run the command we are done. */
if (!ISSET(validated, VALIDATE_SUCCESS)) {
/* Note: log_failure() calls audit for us. */
if (!log_failure(validated, cmnd_status))
goto done;
goto bad;
}
/* Create Ubuntu-style dot file to indicate sudo was successful. */
if (create_admin_success_flag() == -1)
goto done;
/* Finally tell the user if the command did not exist. */
if (cmnd_status == NOT_FOUND_DOT) {
audit_failure(NewArgv, N_("command in current directory"));
sudo_warnx(U_("ignoring \"%s\" found in '.'\nUse \"sudo ./%s\" if this is the \"%s\" you wish to run."), user_cmnd, user_cmnd, user_cmnd);
goto bad;
} else if (cmnd_status == NOT_FOUND) {
if (ISSET(sudo_mode, MODE_CHECK)) {
audit_failure(NewArgv, N_("%s: command not found"),
NewArgv[0]);
sudo_warnx(U_("%s: command not found"), NewArgv[0]);
} else {
audit_failure(NewArgv, N_("%s: command not found"),
user_cmnd);
sudo_warnx(U_("%s: command not found"), user_cmnd);
}
goto bad;
}
/* If user specified a timeout make sure sudoers allows it. */
if (!def_user_command_timeouts && user_timeout > 0) {
log_warningx(SLOG_NO_STDERR|SLOG_AUDIT,
N_("user not allowed to set a command timeout"));
sudo_warnx("%s",
U_("sorry, you are not allowed set a command timeout"));
goto bad;
}
/* If user specified env vars make sure sudoers allows it. */
if (ISSET(sudo_mode, MODE_RUN) && !def_setenv) {
if (ISSET(sudo_mode, MODE_PRESERVE_ENV)) {
log_warningx(SLOG_NO_STDERR|SLOG_AUDIT,
N_("user not allowed to preserve the environment"));
sudo_warnx("%s",
U_("sorry, you are not allowed to preserve the environment"));
goto bad;
} else {
if (!validate_env_vars(sudo_user.env_vars))
goto bad;
}
}
if (ISSET(sudo_mode, (MODE_RUN | MODE_EDIT)) && !remote_iologs) {
if ((def_log_input || def_log_output) && def_iolog_file && def_iolog_dir) {
if ((iolog_path = format_iolog_path()) == NULL) {
if (!def_ignore_iolog_errors)
goto done;
/* Unable to expand I/O log path, disable I/O logging. */
def_log_input = false;
def_log_output = false;
}
}
}
switch (sudo_mode & MODE_MASK) {
case MODE_CHECK:
ret = display_cmnd(snl, list_pw ? list_pw : sudo_user.pw);
break;
case MODE_LIST:
ret = display_privs(snl, list_pw ? list_pw : sudo_user.pw, verbose);
break;
case MODE_VALIDATE:
case MODE_RUN:
case MODE_EDIT:
/* ret may be overridden by "goto bad" later */
ret = true;
break;
default:
/* Should not happen. */
sudo_warnx("internal error, unexpected sudo mode 0x%x", sudo_mode);
goto done;
}
/* Cleanup sudoers sources */
TAILQ_FOREACH(nss, snl, entries) {
nss->close(nss);
}
if (def_group_plugin)
group_plugin_unload();
init_parser(NULL, false, false);
if (ISSET(sudo_mode, (MODE_VALIDATE|MODE_CHECK|MODE_LIST))) {
/* ret already set appropriately */
goto done;
}
/*
* Set umask based on sudoers.
* If user's umask is more restrictive, OR in those bits too
* unless umask_override is set.
*/
if (def_umask != ACCESSPERMS) {
cmnd_umask = def_umask;
if (!def_umask_override)
cmnd_umask |= user_umask;
}
if (ISSET(sudo_mode, MODE_LOGIN_SHELL)) {
char *p;
/* Convert /bin/sh -> -sh so shell knows it is a login shell */
if ((p = strrchr(NewArgv[0], '/')) == NULL)
p = NewArgv[0];
*p = '-';
NewArgv[0] = p;
/*
* Newer versions of bash require the --login option to be used
* in conjunction with the -c option even if the shell name starts
* with a '-'. Unfortunately, bash 1.x uses -login, not --login
* so this will cause an error for that.
*/
if (NewArgc > 1 && strcmp(NewArgv[0], "-bash") == 0 &&
strcmp(NewArgv[1], "-c") == 0) {
/* Use the extra slot before NewArgv so we can store --login. */
NewArgv--;
NewArgc++;
NewArgv[0] = NewArgv[1];
NewArgv[1] = "--login";
}
#if defined(_AIX) || (defined(__linux__) && !defined(HAVE_PAM))
/* Insert system-wide environment variables. */
if (!read_env_file(_PATH_ENVIRONMENT, true, false))
sudo_warn("%s", _PATH_ENVIRONMENT);
#endif
#ifdef HAVE_LOGIN_CAP_H
/* Set environment based on login class. */
if (login_class) {
login_cap_t *lc = login_getclass(login_class);
if (lc != NULL) {
setusercontext(lc, runas_pw, runas_pw->pw_uid, LOGIN_SETPATH|LOGIN_SETENV);
login_close(lc);
}
}
#endif /* HAVE_LOGIN_CAP_H */
}
/* Insert system-wide environment variables. */
if (def_restricted_env_file) {
if (!read_env_file(def_restricted_env_file, false, true))
sudo_warn("%s", def_restricted_env_file);
}
if (def_env_file) {
if (!read_env_file(def_env_file, false, false))
sudo_warn("%s", def_env_file);
}
/* Insert user-specified environment variables. */
if (!insert_env_vars(sudo_user.env_vars))
goto done;
/* Note: must call audit before uid change. */
if (ISSET(sudo_mode, MODE_EDIT)) {
char **edit_argv;
int edit_argc;
const char *env_editor;
free(safe_cmnd);
safe_cmnd = find_editor(NewArgc - 1, NewArgv + 1, &edit_argc,
&edit_argv, NULL, &env_editor, false);
if (safe_cmnd == NULL) {
if (errno != ENOENT)
goto done;
audit_failure(NewArgv, N_("%s: command not found"),
env_editor ? env_editor : def_editor);
sudo_warnx(U_("%s: command not found"),
env_editor ? env_editor : def_editor);
goto bad;
}
sudoers_gc_add(GC_VECTOR, edit_argv);
NewArgv = edit_argv;
NewArgc = edit_argc;
/* We want to run the editor with the unmodified environment. */
env_swap_old();
}
goto done;
bad:
ret = false;
done:
if (ret == -1) {
/* Free stashed copy of the environment. */
(void)env_init(NULL);
} else {
/* Store settings to pass back to front-end. */
if (!sudoers_policy_store_result(ret, NewArgv, env_get(), cmnd_umask,
iolog_path, closure))
ret = -1;
}
if (!rewind_perms())
ret = -1;
restore_nproc();
/* Destroy the password and group caches and free the contents. */
sudo_freepwcache();
sudo_freegrcache();
sudo_warn_set_locale_func(NULL);
debug_return_int(ret);
}
|
Safe
|
[
"CWE-193"
] |
sudo
|
1f8638577d0c80a4ff864a2aad80a0d95488e9a8
|
6.500395089307221e+37
| 448 |
Fix potential buffer overflow when unescaping backslashes in user_args.
Also, do not try to unescaping backslashes unless in run mode *and*
we are running the command via a shell.
Found by Qualys, this fixes CVE-2021-3156.
| 0 |
static uint8_t* my_malloc(size_t size) {
void* block = NULL;
int res = 0;
/* Do an alignment to 32 bytes because AVX2 is supported */
#if defined(_WIN32)
/* A (void *) cast needed for avoiding a warning with MINGW :-/ */
block = (void *)_aligned_malloc(size, 32);
#elif _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600
/* Platform does have an implementation of posix_memalign */
res = posix_memalign(&block, 32, size);
#else
block = malloc(size);
#endif /* _WIN32 */
if (block == NULL || res != 0) {
printf("Error allocating memory!");
return NULL;
}
return (uint8_t*)block;
}
|
Safe
|
[
"CWE-787"
] |
c-blosc2
|
c4c6470e88210afc95262c8b9fcc27e30ca043ee
|
9.616603533061181e+37
| 22 |
Fixed asan heap buffer overflow when not enough space to write compressed block size.
| 0 |
uint64_t HELPER(pacda)(CPUARMState *env, uint64_t x, uint64_t y)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnDA)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_addpac(env, x, y, &env->keys.apda, true);
}
|
Safe
|
[] |
qemu
|
de0b1bae6461f67243282555475f88b2384a1eb9
|
2.8588099796840003e+38
| 9 |
target/arm: Fix PAuth sbox functions
In the PAC computation, sbox was applied over wrong bits.
As this is a 4-bit sbox, bit index should be incremented by 4 instead of 16.
Test vector from QARMA paper (https://eprint.iacr.org/2016/444.pdf) was
used to verify one computation of the pauth_computepac() function which
uses sbox2.
Launchpad: https://bugs.launchpad.net/bugs/1859713
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Vincent DEHORS <vincent.dehors@smile.fr>
Signed-off-by: Adrien GRASSEIN <adrien.grassein@smile.fr>
Message-id: 20200116230809.19078-2-richard.henderson@linaro.org
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
| 0 |
QPDF::calculateLinearizationData(std::map<int, int> const& object_stream_data)
{
// This function calculates the ordering of objects, divides them
// into the appropriate parts, and computes some values for the
// linearization parameter dictionary and hint tables. The file
// must be optimized (via calling optimize()) prior to calling
// this function. Note that actual offsets and lengths are not
// computed here, but anything related to object ordering is.
if (this->m->object_to_obj_users.empty())
{
// Note that we can't call optimize here because we don't know
// whether it should be called with or without allow changes.
throw std::logic_error(
"INTERNAL ERROR: QPDF::calculateLinearizationData "
"called before optimize()");
}
// Separate objects into the categories sufficient for us to
// determine which part of the linearized file should contain the
// object. This categorization is useful for other purposes as
// well. Part numbers refer to version 1.4 of the PDF spec.
// Parts 1, 3, 5, 10, and 11 don't contain any objects from the
// original file (except the trailer dictionary in part 11).
// Part 4 is the document catalog (root) and the following root
// keys: /ViewerPreferences, /PageMode, /Threads, /OpenAction,
// /AcroForm, /Encrypt. Note that Thread information dictionaries
// are supposed to appear in part 9, but we are disregarding that
// recommendation for now.
// Part 6 is the first page section. It includes all remaining
// objects referenced by the first page including shared objects
// but not including thumbnails. Additionally, if /PageMode is
// /Outlines, then information from /Outlines also appears here.
// Part 7 contains remaining objects private to pages other than
// the first page.
// Part 8 contains all remaining shared objects except those that
// are shared only within thumbnails.
// Part 9 contains all remaining objects.
// We sort objects into the following categories:
// * open_document: part 4
// * first_page_private: part 6
// * first_page_shared: part 6
// * other_page_private: part 7
// * other_page_shared: part 8
// * thumbnail_private: part 9
// * thumbnail_shared: part 9
// * other: part 9
// * outlines: part 6 or 9
this->m->part4.clear();
this->m->part6.clear();
this->m->part7.clear();
this->m->part8.clear();
this->m->part9.clear();
this->m->c_linp = LinParameters();
this->m->c_page_offset_data = CHPageOffset();
this->m->c_shared_object_data = CHSharedObject();
this->m->c_outline_data = HGeneric();
QPDFObjectHandle root = getRoot();
bool outlines_in_first_page = false;
QPDFObjectHandle pagemode = root.getKey("/PageMode");
QTC::TC("qpdf", "QPDF categorize pagemode present",
pagemode.isName() ? 1 : 0);
if (pagemode.isName())
{
if (pagemode.getName() == "/UseOutlines")
{
if (root.hasKey("/Outlines"))
{
outlines_in_first_page = true;
}
else
{
QTC::TC("qpdf", "QPDF UseOutlines but no Outlines");
}
}
QTC::TC("qpdf", "QPDF categorize pagemode outlines",
outlines_in_first_page ? 1 : 0);
}
std::set<std::string> open_document_keys;
open_document_keys.insert("/ViewerPreferences");
open_document_keys.insert("/PageMode");
open_document_keys.insert("/Threads");
open_document_keys.insert("/OpenAction");
open_document_keys.insert("/AcroForm");
std::set<QPDFObjGen> lc_open_document;
std::set<QPDFObjGen> lc_first_page_private;
std::set<QPDFObjGen> lc_first_page_shared;
std::set<QPDFObjGen> lc_other_page_private;
std::set<QPDFObjGen> lc_other_page_shared;
std::set<QPDFObjGen> lc_thumbnail_private;
std::set<QPDFObjGen> lc_thumbnail_shared;
std::set<QPDFObjGen> lc_other;
std::set<QPDFObjGen> lc_outlines;
std::set<QPDFObjGen> lc_root;
for (std::map<QPDFObjGen, std::set<ObjUser> >::iterator oiter =
this->m->object_to_obj_users.begin();
oiter != this->m->object_to_obj_users.end(); ++oiter)
{
QPDFObjGen const& og = (*oiter).first;
std::set<ObjUser>& ous = (*oiter).second;
bool in_open_document = false;
bool in_first_page = false;
int other_pages = 0;
int thumbs = 0;
int others = 0;
bool in_outlines = false;
bool is_root = false;
for (std::set<ObjUser>::iterator uiter = ous.begin();
uiter != ous.end(); ++uiter)
{
ObjUser const& ou = *uiter;
switch (ou.ou_type)
{
case ObjUser::ou_trailer_key:
if (ou.key == "/Encrypt")
{
in_open_document = true;
}
else
{
++others;
}
break;
case ObjUser::ou_thumb:
++thumbs;
break;
case ObjUser::ou_root_key:
if (open_document_keys.count(ou.key) > 0)
{
in_open_document = true;
}
else if (ou.key == "/Outlines")
{
in_outlines = true;
}
else
{
++others;
}
break;
case ObjUser::ou_page:
if (ou.pageno == 0)
{
in_first_page = true;
}
else
{
++other_pages;
}
break;
case ObjUser::ou_root:
is_root = true;
break;
case ObjUser::ou_bad:
throw std::logic_error(
"INTERNAL ERROR: QPDF::calculateLinearizationData: "
"invalid user type");
break;
}
}
if (is_root)
{
lc_root.insert(og);
}
else if (in_outlines)
{
lc_outlines.insert(og);
}
else if (in_open_document)
{
lc_open_document.insert(og);
}
else if ((in_first_page) &&
(others == 0) && (other_pages == 0) && (thumbs == 0))
{
lc_first_page_private.insert(og);
}
else if (in_first_page)
{
lc_first_page_shared.insert(og);
}
else if ((other_pages == 1) && (others == 0) && (thumbs == 0))
{
lc_other_page_private.insert(og);
}
else if (other_pages > 1)
{
lc_other_page_shared.insert(og);
}
else if ((thumbs == 1) && (others == 0))
{
lc_thumbnail_private.insert(og);
}
else if (thumbs > 1)
{
lc_thumbnail_shared.insert(og);
}
else
{
lc_other.insert(og);
}
}
// Generate ordering for objects in the output file. Sometimes we
// just dump right from a set into a vector. Rather than
// optimizing this by going straight into the vector, we'll leave
// these phases separate for now. That way, this section can be
// concerned only with ordering, and the above section can be
// considered only with categorization. Note that sets of
// QPDFObjGens are sorted by QPDFObjGen. In a linearized file,
// objects appear in sequence with the possible exception of hints
// tables which we won't see here anyway. That means that running
// calculateLinearizationData() on a linearized file should give
// results identical to the original file ordering.
// We seem to traverse the page tree a lot in this code, but we
// can address this for a future code optimization if necessary.
// Premature optimization is the root of all evil.
std::vector<QPDFObjectHandle> pages;
{ // local scope
// Map all page objects to the containing object stream. This
// should be a no-op in a properly linearized file.
std::vector<QPDFObjectHandle> t = getAllPages();
for (std::vector<QPDFObjectHandle>::iterator iter = t.begin();
iter != t.end(); ++iter)
{
pages.push_back(getUncompressedObject(*iter, object_stream_data));
}
}
int npages = toI(pages.size());
// We will be initializing some values of the computed hint
// tables. Specifically, we can initialize any items that deal
// with object numbers or counts but not any items that deal with
// lengths or offsets. The code that writes linearized files will
// have to fill in these values during the first pass. The
// validation code can compute them relatively easily given the
// rest of the information.
// npages is the size of the existing pages vector, which has been
// created by traversing the pages tree, and as such is a
// reasonable size.
this->m->c_linp.npages = npages;
this->m->c_page_offset_data.entries =
std::vector<CHPageOffsetEntry>(toS(npages));
// Part 4: open document objects. We don't care about the order.
if (lc_root.size() != 1)
{
stopOnError("found other than one root while"
" calculating linearization data");
}
this->m->part4.push_back(objGenToIndirect(*(lc_root.begin())));
for (std::set<QPDFObjGen>::iterator iter = lc_open_document.begin();
iter != lc_open_document.end(); ++iter)
{
this->m->part4.push_back(objGenToIndirect(*iter));
}
// Part 6: first page objects. Note: implementation note 124
// states that Acrobat always treats page 0 as the first page for
// linearization regardless of /OpenAction. pdlin doesn't provide
// any option to set this and also disregards /OpenAction. We
// will do the same.
// First, place the actual first page object itself.
QPDFObjGen first_page_og(pages.at(0).getObjGen());
if (! lc_first_page_private.count(first_page_og))
{
throw std::logic_error(
"INTERNAL ERROR: QPDF::calculateLinearizationData: first page "
"object not in lc_first_page_private");
}
lc_first_page_private.erase(first_page_og);
this->m->c_linp.first_page_object = pages.at(0).getObjectID();
this->m->part6.push_back(pages.at(0));
// The PDF spec "recommends" an order for the rest of the objects,
// but we are going to disregard it except to the extent that it
// groups private and shared objects contiguously for the sake of
// hint tables.
for (std::set<QPDFObjGen>::iterator iter = lc_first_page_private.begin();
iter != lc_first_page_private.end(); ++iter)
{
this->m->part6.push_back(objGenToIndirect(*iter));
}
for (std::set<QPDFObjGen>::iterator iter = lc_first_page_shared.begin();
iter != lc_first_page_shared.end(); ++iter)
{
this->m->part6.push_back(objGenToIndirect(*iter));
}
// Place the outline dictionary if it goes in the first page section.
if (outlines_in_first_page)
{
pushOutlinesToPart(this->m->part6, lc_outlines, object_stream_data);
}
// Fill in page offset hint table information for the first page.
// The PDF spec says that nshared_objects should be zero for the
// first page. pdlin does not appear to obey this, but it fills
// in garbage values for all the shared object identifiers on the
// first page.
this->m->c_page_offset_data.entries.at(0).nobjects =
toI(this->m->part6.size());
// Part 7: other pages' private objects
// For each page in order:
for (size_t i = 1; i < toS(npages); ++i)
{
// Place this page's page object
QPDFObjGen page_og(pages.at(i).getObjGen());
if (! lc_other_page_private.count(page_og))
{
throw std::logic_error(
"INTERNAL ERROR: "
"QPDF::calculateLinearizationData: page object for page " +
QUtil::uint_to_string(i) + " not in lc_other_page_private");
}
lc_other_page_private.erase(page_og);
this->m->part7.push_back(pages.at(i));
// Place all non-shared objects referenced by this page,
// updating the page object count for the hint table.
this->m->c_page_offset_data.entries.at(i).nobjects = 1;
ObjUser ou(ObjUser::ou_page, toI(i));
if (this->m->obj_user_to_objects.count(ou) == 0)
{
stopOnError("found unreferenced page while"
" calculating linearization data");
}
std::set<QPDFObjGen> ogs = this->m->obj_user_to_objects[ou];
for (std::set<QPDFObjGen>::iterator iter = ogs.begin();
iter != ogs.end(); ++iter)
{
QPDFObjGen const& og = (*iter);
if (lc_other_page_private.count(og))
{
lc_other_page_private.erase(og);
this->m->part7.push_back(objGenToIndirect(og));
++this->m->c_page_offset_data.entries.at(i).nobjects;
}
}
}
// That should have covered all part7 objects.
if (! lc_other_page_private.empty())
{
throw std::logic_error(
"INTERNAL ERROR:"
" QPDF::calculateLinearizationData: lc_other_page_private is "
"not empty after generation of part7");
}
// Part 8: other pages' shared objects
// Order is unimportant.
for (std::set<QPDFObjGen>::iterator iter = lc_other_page_shared.begin();
iter != lc_other_page_shared.end(); ++iter)
{
this->m->part8.push_back(objGenToIndirect(*iter));
}
// Part 9: other objects
// The PDF specification makes recommendations on ordering here.
// We follow them only to a limited extent. Specifically, we put
// the pages tree first, then private thumbnail objects in page
// order, then shared thumbnail objects, and then outlines (unless
// in part 6). After that, we throw all remaining objects in
// arbitrary order.
// Place the pages tree.
std::set<QPDFObjGen> pages_ogs =
this->m->obj_user_to_objects[ObjUser(ObjUser::ou_root_key, "/Pages")];
if (pages_ogs.empty())
{
stopOnError("found empty pages tree while"
" calculating linearization data");
}
for (std::set<QPDFObjGen>::iterator iter = pages_ogs.begin();
iter != pages_ogs.end(); ++iter)
{
QPDFObjGen const& og = *iter;
if (lc_other.count(og))
{
lc_other.erase(og);
this->m->part9.push_back(objGenToIndirect(og));
}
}
// Place private thumbnail images in page order. Slightly more
// information would be required if we were going to bother with
// thumbnail hint tables.
for (size_t i = 0; i < toS(npages); ++i)
{
QPDFObjectHandle thumb = pages.at(i).getKey("/Thumb");
thumb = getUncompressedObject(thumb, object_stream_data);
if (! thumb.isNull())
{
// Output the thumbnail itself
QPDFObjGen thumb_og(thumb.getObjGen());
if (lc_thumbnail_private.count(thumb_og))
{
lc_thumbnail_private.erase(thumb_og);
this->m->part9.push_back(thumb);
}
else
{
// No internal error this time...there's nothing to
// stop this object from having been referred to
// somewhere else outside of a page's /Thumb, and if
// it had been, there's nothing to prevent it from
// having been in some set other than
// lc_thumbnail_private.
}
std::set<QPDFObjGen>& ogs =
this->m->obj_user_to_objects[
ObjUser(ObjUser::ou_thumb, toI(i))];
for (std::set<QPDFObjGen>::iterator iter = ogs.begin();
iter != ogs.end(); ++iter)
{
QPDFObjGen const& og = *iter;
if (lc_thumbnail_private.count(og))
{
lc_thumbnail_private.erase(og);
this->m->part9.push_back(objGenToIndirect(og));
}
}
}
}
if (! lc_thumbnail_private.empty())
{
throw std::logic_error(
"INTERNAL ERROR: "
"QPDF::calculateLinearizationData: lc_thumbnail_private "
"not empty after placing thumbnails");
}
// Place shared thumbnail objects
for (std::set<QPDFObjGen>::iterator iter = lc_thumbnail_shared.begin();
iter != lc_thumbnail_shared.end(); ++iter)
{
this->m->part9.push_back(objGenToIndirect(*iter));
}
// Place outlines unless in first page
if (! outlines_in_first_page)
{
pushOutlinesToPart(this->m->part9, lc_outlines, object_stream_data);
}
// Place all remaining objects
for (std::set<QPDFObjGen>::iterator iter = lc_other.begin();
iter != lc_other.end(); ++iter)
{
this->m->part9.push_back(objGenToIndirect(*iter));
}
// Make sure we got everything exactly once.
size_t num_placed =
this->m->part4.size() + this->m->part6.size() + this->m->part7.size() +
this->m->part8.size() + this->m->part9.size();
size_t num_wanted = this->m->object_to_obj_users.size();
if (num_placed != num_wanted)
{
throw std::logic_error(
"INTERNAL ERROR: QPDF::calculateLinearizationData: wrong "
"number of objects placed (num_placed = " +
QUtil::uint_to_string(num_placed) +
"; number of objects: " +
QUtil::uint_to_string(num_wanted));
}
// Calculate shared object hint table information including
// references to shared objects from page offset hint data.
// The shared object hint table consists of all part 6 (whether
// shared or not) in order followed by all part 8 objects in
// order. Add the objects to shared object data keeping a map of
// object number to index. Then populate the shared object
// information for the pages.
// Note that two objects never have the same object number, so we
// can map from object number only without regards to generation.
std::map<int, int> obj_to_index;
this->m->c_shared_object_data.nshared_first_page =
toI(this->m->part6.size());
this->m->c_shared_object_data.nshared_total =
this->m->c_shared_object_data.nshared_first_page +
toI(this->m->part8.size());
std::vector<CHSharedObjectEntry>& shared =
this->m->c_shared_object_data.entries;
for (std::vector<QPDFObjectHandle>::iterator iter = this->m->part6.begin();
iter != this->m->part6.end(); ++iter)
{
QPDFObjectHandle& oh = *iter;
int obj = oh.getObjectID();
obj_to_index[obj] = toI(shared.size());
shared.push_back(CHSharedObjectEntry(obj));
}
QTC::TC("qpdf", "QPDF lin part 8 empty", this->m->part8.empty() ? 1 : 0);
if (! this->m->part8.empty())
{
this->m->c_shared_object_data.first_shared_obj =
this->m->part8.at(0).getObjectID();
for (std::vector<QPDFObjectHandle>::iterator iter =
this->m->part8.begin();
iter != this->m->part8.end(); ++iter)
{
QPDFObjectHandle& oh = *iter;
int obj = oh.getObjectID();
obj_to_index[obj] = toI(shared.size());
shared.push_back(CHSharedObjectEntry(obj));
}
}
if (static_cast<size_t>(this->m->c_shared_object_data.nshared_total) !=
this->m->c_shared_object_data.entries.size())
{
throw std::logic_error(
"shared object hint table has wrong number of entries");
}
// Now compute the list of shared objects for each page after the
// first page.
for (size_t i = 1; i < toS(npages); ++i)
{
CHPageOffsetEntry& pe = this->m->c_page_offset_data.entries.at(i);
ObjUser ou(ObjUser::ou_page, toI(i));
if (this->m->obj_user_to_objects.count(ou) == 0)
{
stopOnError("found unreferenced page while"
" calculating linearization data");
}
std::set<QPDFObjGen> const& ogs = this->m->obj_user_to_objects[ou];
for (std::set<QPDFObjGen>::const_iterator iter = ogs.begin();
iter != ogs.end(); ++iter)
{
QPDFObjGen const& og = *iter;
if ((this->m->object_to_obj_users[og].size() > 1) &&
(obj_to_index.count(og.getObj()) > 0))
{
int idx = obj_to_index[og.getObj()];
++pe.nshared_objects;
pe.shared_identifiers.push_back(idx);
}
}
}
}
|
Safe
|
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
|
9.390188725793133e+37
| 590 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
| 0 |
int avrcp_set_volume(struct btd_device *dev, int8_t volume, bool notify)
{
struct avrcp_server *server;
struct avrcp *session;
uint8_t buf[AVRCP_HEADER_LENGTH + 1];
struct avrcp_header *pdu = (void *) buf;
if (volume < 0)
return -EINVAL;
server = find_server(servers, device_get_adapter(dev));
if (server == NULL)
return -EINVAL;
session = find_session(server->sessions, dev);
if (session == NULL)
return -ENOTCONN;
if (notify) {
if (!session->target)
return -ENOTSUP;
return avrcp_event(session, AVRCP_EVENT_VOLUME_CHANGED,
&volume);
}
if (!session->controller && !avrcp_event_registered(session,
AVRCP_EVENT_VOLUME_CHANGED))
return -ENOTSUP;
memset(buf, 0, sizeof(buf));
set_company_id(pdu->company_id, IEEEID_BTSIG);
pdu->pdu_id = AVRCP_SET_ABSOLUTE_VOLUME;
pdu->params[0] = volume;
pdu->params_len = htons(1);
return avctp_send_vendordep_req(session->conn,
AVC_CTYPE_CONTROL, AVC_SUBUNIT_PANEL,
buf, sizeof(buf),
avrcp_handle_set_volume, session);
}
|
Safe
|
[
"CWE-200"
] |
bluez
|
e2b0f0d8d63e1223bb714a9efb37e2257818268b
|
1.2706656928275026e+38
| 42 |
avrcp: Fix not checking if params_len match number of received bytes
This makes sure the number of bytes in the params_len matches the
remaining bytes received so the code don't end up accessing invalid
memory.
| 0 |
TEST(QueryProjectionTest, SortKeyMetaProjectionInInclusionProjection) {
auto proj = createProjection("{}", "{a: 1, foo: {$meta: 'sortKey'}}");
ASSERT_TRUE(proj.metadataDeps()[DocumentMetadataFields::kSortKey]);
ASSERT_FALSE(proj.requiresMatchDetails());
ASSERT_FALSE(proj.metadataDeps()[DocumentMetadataFields::kGeoNearDist]);
ASSERT_FALSE(proj.metadataDeps()[DocumentMetadataFields::kGeoNearPoint]);
ASSERT_FALSE(proj.requiresDocument());
}
|
Safe
|
[
"CWE-732"
] |
mongo
|
cd583b6c4d8aa2364f255992708b9bb54e110cf4
|
5.116881408012923e+36
| 9 |
SERVER-53929 Add stricter parser checks around positional projection
| 0 |
ofputil_put_ofp12_table_stats(const struct ofputil_table_stats *stats,
const struct ofputil_table_features *features,
struct ofpbuf *buf)
{
struct ofp12_table_stats *out;
out = ofpbuf_put_zeros(buf, sizeof *out);
out->table_id = features->table_id;
ovs_strlcpy_arrays(out->name, features->name);
out->match = oxm_bitmap_from_mf_bitmap(&features->match, OFP12_VERSION);
out->wildcards = oxm_bitmap_from_mf_bitmap(&features->wildcard,
OFP12_VERSION);
out->write_actions = ofpact_bitmap_to_openflow(
features->nonmiss.write.ofpacts, OFP12_VERSION);
out->apply_actions = ofpact_bitmap_to_openflow(
features->nonmiss.apply.ofpacts, OFP12_VERSION);
out->write_setfields = oxm_bitmap_from_mf_bitmap(
&features->nonmiss.write.set_fields, OFP12_VERSION);
out->apply_setfields = oxm_bitmap_from_mf_bitmap(
&features->nonmiss.apply.set_fields, OFP12_VERSION);
out->metadata_match = features->metadata_match;
out->metadata_write = features->metadata_write;
out->instructions = ovsinst_bitmap_to_openflow(
features->nonmiss.instructions, OFP12_VERSION);
out->config = ofputil_encode_table_config(features->miss_config,
OFPUTIL_TABLE_EVICTION_DEFAULT,
OFPUTIL_TABLE_VACANCY_DEFAULT,
OFP12_VERSION);
out->max_entries = htonl(features->max_entries);
out->active_count = htonl(stats->active_count);
out->lookup_count = htonll(stats->lookup_count);
out->matched_count = htonll(stats->matched_count);
}
|
Safe
|
[
"CWE-772"
] |
ovs
|
77ad4225d125030420d897c873e4734ac708c66b
|
3.1473661245417718e+38
| 33 |
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <bshastry@sec.t-labs.tu-berlin.de>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Justin Pettit <jpettit@ovn.org>
| 0 |
int dns_packet_append_key(DnsPacket *p, const DnsResourceKey *k, const DnsAnswerFlags flags, size_t *start) {
size_t saved_size;
uint16_t class;
int r;
assert(p);
assert(k);
saved_size = p->size;
r = dns_packet_append_name(p, dns_resource_key_name(k), true, true, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint16(p, k->type, NULL);
if (r < 0)
goto fail;
class = flags & DNS_ANSWER_CACHE_FLUSH ? k->class | MDNS_RR_CACHE_FLUSH : k->class;
r = dns_packet_append_uint16(p, class, NULL);
if (r < 0)
goto fail;
if (start)
*start = saved_size;
return 0;
fail:
dns_packet_truncate(p, saved_size);
return r;
}
|
Safe
|
[
"CWE-20",
"CWE-476"
] |
systemd
|
a924f43f30f9c4acaf70618dd2a055f8b0f166be
|
1.8708562249003177e+38
| 32 |
resolved: bugfix of null pointer p->question dereferencing (#6020)
See https://bugs.launchpad.net/ubuntu/+source/systemd/+bug/1621396
| 0 |
void SshIo::SshImpl::writeRemote(const byte* data, size_t size, long from, long to)
{
if (protocol_ == pSftp) throw Error(1, "not support SFTP write access.");
//printf("ssh update size=%ld from=%ld to=%ld\n", (long)size, from, to);
assert(isMalloced_);
std::string tempFile = hostInfo_.Path + ".exiv2tmp";
std::string response;
std::stringstream ss;
// copy the head (byte 0 to byte fromByte) of original file to filepath.exiv2tmp
ss << "head -c " << from
<< " " << hostInfo_.Path
<< " > " << tempFile;
std::string cmd = ss.str();
if (ssh_->runCommand(cmd, &response) != 0) {
throw Error(1, "SSH: Unable to cope the head of file to temp");
}
// upload the data (the byte ranges which are different between the original
// file and the new file) to filepath.exiv2datatemp
if (ssh_->scp(hostInfo_.Path + ".exiv2datatemp", data, size) != 0) {
throw Error(1, "SSH: Unable to copy file");
}
// concatenate the filepath.exiv2datatemp to filepath.exiv2tmp
cmd = "cat " + hostInfo_.Path + ".exiv2datatemp >> " + tempFile;
if (ssh_->runCommand(cmd, &response) != 0) {
throw Error(1, "SSH: Unable to copy the rest");
}
// copy the tail (from byte toByte to the end of file) of original file to filepath.exiv2tmp
ss.str("");
ss << "tail -c+" << (to + 1)
<< " " << hostInfo_.Path
<< " >> " << tempFile;
cmd = ss.str();
if (ssh_->runCommand(cmd, &response) != 0) {
throw Error(1, "SSH: Unable to copy the rest");
}
// replace the original file with filepath.exiv2tmp
cmd = "mv " + tempFile + " " + hostInfo_.Path;
if (ssh_->runCommand(cmd, &response) != 0) {
throw Error(1, "SSH: Unable to copy the rest");
}
// remove filepath.exiv2datatemp
cmd = "rm " + hostInfo_.Path + ".exiv2datatemp";
if (ssh_->runCommand(cmd, &response) != 0) {
throw Error(1, "SSH: Unable to copy the rest");
}
}
|
Safe
|
[
"CWE-125"
] |
exiv2
|
6e3855aed7ba8bb4731fc4087ca7f9078b2f3d97
|
1.3030437614202154e+38
| 53 |
Fix https://github.com/Exiv2/exiv2/issues/55
| 0 |
static PCRE2_SPTR SLJIT_FUNC do_extuni_no_utf(jit_arguments *args, PCRE2_SPTR cc)
{
PCRE2_SPTR start_subject = args->begin;
PCRE2_SPTR end_subject = args->end;
int lgb, rgb, ricount;
PCRE2_SPTR bptr;
uint32_t c;
c = *cc++;
lgb = UCD_GRAPHBREAK(c);
while (cc < end_subject)
{
c = *cc;
rgb = UCD_GRAPHBREAK(c);
if ((PRIV(ucp_gbtable)[lgb] & (1 << rgb)) == 0) break;
/* Not breaking between Regional Indicators is allowed only if there
are an even number of preceding RIs. */
if (lgb == ucp_gbRegionalIndicator && rgb == ucp_gbRegionalIndicator)
{
ricount = 0;
bptr = cc - 1;
/* bptr is pointing to the left-hand character */
while (bptr > start_subject)
{
bptr--;
c = *bptr;
if (UCD_GRAPHBREAK(c) != ucp_gbRegionalIndicator) break;
ricount++;
}
if ((ricount & 1) != 0) break; /* Grapheme break required */
}
/* If Extend or ZWJ follows Extended_Pictographic, do not update lgb; this
allows any number of them before a following Extended_Pictographic. */
if ((rgb != ucp_gbExtend && rgb != ucp_gbZWJ) ||
lgb != ucp_gbExtended_Pictographic)
lgb = rgb;
cc++;
}
return cc;
}
|
Safe
|
[
"CWE-125"
] |
php-src
|
8947fd9e9fdce87cd6c59817b1db58e789538fe9
|
5.399294030577806e+37
| 52 |
Fix #78338: Array cross-border reading in PCRE
We backport r1092 from pcre2.
| 0 |
static int cdrom_get_track_info(struct cdrom_device_info *cdi,
__u16 track, __u8 type, track_information *ti)
{
const struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
int ret, buflen;
init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
cgc.cmd[1] = type & 3;
cgc.cmd[4] = (track & 0xff00) >> 8;
cgc.cmd[5] = track & 0xff;
cgc.cmd[8] = 8;
cgc.quiet = 1;
ret = cdo->generic_packet(cdi, &cgc);
if (ret)
return ret;
buflen = be16_to_cpu(ti->track_information_length) +
sizeof(ti->track_information_length);
if (buflen > sizeof(track_information))
buflen = sizeof(track_information);
cgc.cmd[8] = cgc.buflen = buflen;
ret = cdo->generic_packet(cdi, &cgc);
if (ret)
return ret;
/* return actual fill size */
return buflen;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
9de4ee40547fd315d4a0ed1dd15a2fa3559ad707
|
1.0777579901854254e+38
| 33 |
cdrom: information leak in cdrom_ioctl_media_changed()
This cast is wrong. "cdi->capacity" is an int and "arg" is an unsigned
long. The way the check is written now, if one of the high 32 bits is
set then we could read outside the info->slots[] array.
This bug is pretty old and it predates git.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: stable@vger.kernel.org
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
virtio_find_modern_cr(int offset) {
return virtio_find_cr(modern_config_regs,
sizeof(modern_config_regs) / sizeof(*modern_config_regs),
offset);
}
|
Safe
|
[
"CWE-476"
] |
acrn-hypervisor
|
154fe59531c12b82e26d1b24b5531f5066d224f5
|
3.316845787170236e+38
| 5 |
dm: validate inputs in vq_endchains
inputs shall be validated to avoid NULL pointer access.
Tracked-On: #6129
Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
| 0 |
static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
AnsiContext *s = avctx->priv_data;
uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
const uint8_t *buf_end = buf+buf_size;
int ret, i, count;
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
return ret;
if (!avctx->frame_number) {
for (i=0; i<avctx->height; i++)
memset(s->frame->data[0]+ i*s->frame->linesize[0], 0, avctx->width);
memset(s->frame->data[1], 0, AVPALETTE_SIZE);
}
s->frame->pict_type = AV_PICTURE_TYPE_I;
s->frame->palette_has_changed = 1;
set_palette((uint32_t *)s->frame->data[1]);
if (!s->first_frame) {
erase_screen(avctx);
s->first_frame = 1;
}
while(buf < buf_end) {
switch(s->state) {
case STATE_NORMAL:
switch (buf[0]) {
case 0x00: //NUL
case 0x07: //BEL
case 0x1A: //SUB
/* ignore */
break;
case 0x08: //BS
s->x = FFMAX(s->x - 1, 0);
break;
case 0x09: //HT
i = s->x / FONT_WIDTH;
count = ((i + 8) & ~7) - i;
for (i = 0; i < count; i++)
draw_char(avctx, ' ');
break;
case 0x0A: //LF
hscroll(avctx);
case 0x0D: //CR
s->x = 0;
break;
case 0x0C: //FF
erase_screen(avctx);
break;
case 0x1B: //ESC
s->state = STATE_ESCAPE;
break;
default:
draw_char(avctx, buf[0]);
}
break;
case STATE_ESCAPE:
if (buf[0] == '[') {
s->state = STATE_CODE;
s->nb_args = 0;
s->args[0] = -1;
} else {
s->state = STATE_NORMAL;
draw_char(avctx, 0x1B);
continue;
}
break;
case STATE_CODE:
switch(buf[0]) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] < 6553)
s->args[s->nb_args] = FFMAX(s->args[s->nb_args], 0) * 10 + buf[0] - '0';
break;
case ';':
s->nb_args++;
if (s->nb_args < MAX_NB_ARGS)
s->args[s->nb_args] = 0;
break;
case 'M':
s->state = STATE_MUSIC_PREAMBLE;
break;
case '=': case '?':
/* ignore */
break;
default:
if (s->nb_args > MAX_NB_ARGS)
av_log(avctx, AV_LOG_WARNING, "args overflow (%i)\n", s->nb_args);
if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] >= 0)
s->nb_args++;
if ((ret = execute_code(avctx, buf[0])) < 0)
return ret;
s->state = STATE_NORMAL;
}
break;
case STATE_MUSIC_PREAMBLE:
if (buf[0] == 0x0E || buf[0] == 0x1B)
s->state = STATE_NORMAL;
/* ignore music data */
break;
}
buf++;
}
*got_frame = 1;
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
return buf_size;
}
|
Safe
|
[
"CWE-190"
] |
FFmpeg
|
d42ec8433c687fcbccefa51a7716d81920218e4f
|
2.0480376061412508e+38
| 112 |
avcodec/ansi: fix integer overflow
Fixes out of array read
Fixes: 5f9698e86d92f19bb08d54ff0d57027f-signal_sigsegv_b30756_3795_cov_2693691257_ansi256.ans
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
| 0 |
int explodeConf(const std::string &filepath, std::vector<Proxy> &nodes)
{
return explodeConfContent(fileGet(filepath), nodes);
}
|
Safe
|
[
"CWE-434",
"CWE-94"
] |
subconverter
|
ce8d2bd0f13f05fcbd2ed90755d097f402393dd3
|
2.65839166535699e+38
| 4 |
Enhancements
Add authorization check before loading scripts.
Add detailed logs when loading preference settings.
| 0 |
static const URI_CHAR * URI_FUNC(ParseOwnPortUserInfo)(
URI_TYPE(ParserState) * state, const URI_CHAR * first,
const URI_CHAR * afterLast, UriMemoryManager * memory) {
if (first >= afterLast) {
if (!URI_FUNC(OnExitOwnPortUserInfo)(state, first, memory)) {
URI_FUNC(StopMalloc)(state, memory);
return NULL;
}
return afterLast;
}
switch (*first) {
/* begin sub-delims */
case _UT('!'):
case _UT('$'):
case _UT('&'):
case _UT('\''):
case _UT('('):
case _UT(')'):
case _UT('*'):
case _UT('+'):
case _UT(','):
case _UT(';'):
case _UT('='):
/* end sub-delims */
/* begin unreserved (except alpha and digit) */
case _UT('-'):
case _UT('.'):
case _UT('_'):
case _UT('~'):
/* end unreserved (except alpha and digit) */
case _UT(':'):
case URI_SET_ALPHA:
state->uri->hostText.afterLast = NULL; /* Not a host, reset */
state->uri->portText.first = NULL; /* Not a port, reset */
return URI_FUNC(ParseOwnUserInfo)(state, first + 1, afterLast, memory);
case URI_SET_DIGIT:
return URI_FUNC(ParseOwnPortUserInfo)(state, first + 1, afterLast, memory);
case _UT('%'):
state->uri->portText.first = NULL; /* Not a port, reset */
{
const URI_CHAR * const afterPct
= URI_FUNC(ParsePctEncoded)(state, first, afterLast, memory);
if (afterPct == NULL) {
return NULL;
}
return URI_FUNC(ParseOwnUserInfo)(state, afterPct, afterLast, memory);
}
case _UT('@'):
state->uri->hostText.afterLast = NULL; /* Not a host, reset */
state->uri->portText.first = NULL; /* Not a port, reset */
state->uri->userInfo.afterLast = first; /* USERINFO END */
state->uri->hostText.first = first + 1; /* HOST BEGIN */
return URI_FUNC(ParseOwnHost)(state, first + 1, afterLast, memory);
default:
if (!URI_FUNC(OnExitOwnPortUserInfo)(state, first, memory)) {
URI_FUNC(StopMalloc)(state, memory);
return NULL;
}
return first;
}
}
|
Safe
|
[
"CWE-125"
] |
uriparser
|
cef25028de5ff872c2e1f0a6c562eb3ea9ecbce4
|
1.8502773933430576e+38
| 66 |
Fix uriParse*Ex* out-of-bounds read
| 0 |
static gboolean encrypt_notify(GIOChannel *io, GIOCondition condition,
gpointer data)
{
struct input_device *idev = data;
int err;
DBG("");
if (idev->uhid)
err = uhid_connadd(idev, idev->req);
else
err = ioctl_connadd(idev->req);
if (err < 0) {
error("ioctl_connadd(): %s (%d)", strerror(-err), -err);
if (idev->ctrl_io) {
g_io_channel_shutdown(idev->ctrl_io, FALSE, NULL);
g_io_channel_unref(idev->ctrl_io);
idev->ctrl_io = NULL;
}
if (idev->intr_io) {
g_io_channel_shutdown(idev->intr_io, FALSE, NULL);
g_io_channel_unref(idev->intr_io);
idev->intr_io = NULL;
}
}
idev->sec_watch = 0;
g_free(idev->req->rd_data);
g_free(idev->req);
idev->req = NULL;
return FALSE;
}
|
Safe
|
[] |
bluez
|
3cccdbab2324086588df4ccf5f892fb3ce1f1787
|
2.5947821019762175e+38
| 37 |
HID accepts bonded device connections only.
This change adds a configuration for platforms to choose a more secure
posture for the HID profile. While some older mice are known to not
support pairing or encryption, some platform may choose a more secure
posture by requiring the device to be bonded and require the
connection to be encrypted when bonding is required.
Reference:
https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00352.html
| 0 |
static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
struct net_device *dev)
{
struct vxlan_dev *vxlan, *next;
LIST_HEAD(list_kill);
list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
struct vxlan_rdst *dst = &vxlan->default_dst;
/* In case we created vxlan device with carrier
* and we loose the carrier due to module unload
* we also need to remove vxlan device. In other
* cases, it's not necessary and remote_ifindex
* is 0 here, so no matches.
*/
if (dst->remote_ifindex == dev->ifindex)
vxlan_dellink(vxlan->dev, &list_kill);
}
unregister_netdevice_many(&list_kill);
}
|
Safe
|
[] |
net
|
6c8991f41546c3c472503dff1ea9daaddf9331c2
|
2.8983034513655943e+38
| 21 |
net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <xmu@redhat.com>
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
const struct path *path)
{
char *p, *pathname;
if (prefix)
audit_log_format(ab, "%s", prefix);
/* We will allow 11 spaces for ' (deleted)' to be appended */
pathname = kmalloc(PATH_MAX+11, ab->gfp_mask);
if (!pathname) {
audit_log_string(ab, "<no_memory>");
return;
}
p = d_path(path, pathname, PATH_MAX+11);
if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */
/* FIXME: can we save some information here? */
audit_log_string(ab, "<too_long>");
} else
audit_log_untrustedstring(ab, p);
kfree(pathname);
}
|
Safe
|
[
"CWE-264"
] |
net
|
90f62cf30a78721641e08737bda787552428061e
|
4.194678326682833e+37
| 22 |
net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
free_job_history(cupsd_job_t *job) /* I - Job */
{
char *message; /* Current message */
if (!job->history)
return;
for (message = (char *)cupsArrayFirst(job->history);
message;
message = (char *)cupsArrayNext(job->history))
free(message);
cupsArrayDelete(job->history);
job->history = NULL;
}
|
Safe
|
[] |
cups
|
d47f6aec436e0e9df6554436e391471097686ecc
|
1.2700584335263287e+38
| 16 |
Fix local privilege escalation to root and sandbox bypasses in scheduler
(rdar://37836779, rdar://37836995, rdar://37837252, rdar://37837581)
| 0 |
int _cdk_pk_algo_usage(int algo)
{
int usage;
/* The ElGamal sign+encrypt algorithm is not supported any longer. */
switch (algo) {
case CDK_PK_RSA:
usage = CDK_KEY_USG_SIGN | CDK_KEY_USG_ENCR;
break;
case CDK_PK_RSA_E:
usage = CDK_KEY_USG_ENCR;
break;
case CDK_PK_RSA_S:
usage = CDK_KEY_USG_SIGN;
break;
case CDK_PK_ELG_E:
usage = CDK_KEY_USG_ENCR;
break;
case CDK_PK_DSA:
usage = CDK_KEY_USG_SIGN;
break;
default:
usage = 0;
}
return usage;
}
|
Safe
|
[
"CWE-119"
] |
gnutls
|
5140422e0d7319a8e2fe07f02cbcafc4d6538732
|
2.9895532999796103e+38
| 26 |
opencdk: cdk_pk_get_keyid: fix stack overflow
Issue found using oss-fuzz:
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=340
Signed-off-by: Nikos Mavrogiannopoulos <nmav@redhat.com>
| 0 |
TEST_F(HttpHealthCheckerImplTest, ServicePatternDoesNotMatchFail) {
setupServiceRegexPatternValidationHC();
EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));
EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100))
.WillOnce(Return(true));
EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));
EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));
cluster_->prioritySet().getMockHostSet(0)->hosts_ = {
makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())};
cluster_->info_->stats().upstream_cx_total_.inc();
expectSessionCreate();
expectStreamCreate(0);
EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));
health_checker_->start();
EXPECT_CALL(runtime_.snapshot_, getInteger("health_check.max_interval", _));
EXPECT_CALL(runtime_.snapshot_, getInteger("health_check.min_interval", _))
.WillOnce(Return(45000));
EXPECT_CALL(*test_sessions_[0]->interval_timer_,
enableTimer(std::chrono::milliseconds(45000), _));
EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());
absl::optional<std::string> health_checked_cluster("api-production-iad");
respond(0, "200", false, false, true, false, health_checked_cluster);
EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(
Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_EQ(Host::Health::Unhealthy,
cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());
}
|
Safe
|
[
"CWE-476"
] |
envoy
|
9b1c3962172a972bc0359398af6daa3790bb59db
|
1.8938243721720066e+38
| 30 |
healthcheck: fix grpc inline removal crashes (#749)
Signed-off-by: Matt Klein <mklein@lyft.com>
Signed-off-by: Pradeep Rao <pcrao@google.com>
| 0 |
static void set_cloexec_flag(const int fd)
{
fcntl(fd, F_SETFD, FD_CLOEXEC);
}
|
Safe
|
[
"CWE-434"
] |
pure-ftpd
|
37ad222868e52271905b94afea4fc780d83294b4
|
2.3101509787148162e+36
| 4 |
Initialize the max upload file size when quotas are enabled
Due to an unwanted check, files causing the quota to be exceeded
were deleted after the upload, but not during the upload.
The bug was introduced in 2009 in version 1.0.23
Spotted by @DroidTest, thanks!
| 0 |
zone_check_srv(dns_zone_t *zone, dns_db_t *db, dns_name_t *name,
dns_name_t *owner)
{
isc_result_t result;
char ownerbuf[DNS_NAME_FORMATSIZE];
char namebuf[DNS_NAME_FORMATSIZE];
char altbuf[DNS_NAME_FORMATSIZE];
dns_fixedname_t fixed;
dns_name_t *foundname;
int level;
/*
* "." means the services does not exist.
*/
if (dns_name_equal(name, dns_rootname))
return (true);
/*
* Outside of zone.
*/
if (!dns_name_issubdomain(name, &zone->origin)) {
if (zone->checksrv != NULL)
return ((zone->checksrv)(zone, name, owner));
return (true);
}
if (zone->type == dns_zone_master)
level = ISC_LOG_ERROR;
else
level = ISC_LOG_WARNING;
foundname = dns_fixedname_initname(&fixed);
result = dns_db_find(db, name, NULL, dns_rdatatype_a,
0, 0, NULL, foundname, NULL, NULL);
if (result == ISC_R_SUCCESS)
return (true);
if (result == DNS_R_NXRRSET) {
result = dns_db_find(db, name, NULL, dns_rdatatype_aaaa,
0, 0, NULL, foundname, NULL, NULL);
if (result == ISC_R_SUCCESS)
return (true);
}
dns_name_format(owner, ownerbuf, sizeof ownerbuf);
dns_name_format(name, namebuf, sizeof namebuf);
if (result == DNS_R_NXRRSET || result == DNS_R_NXDOMAIN ||
result == DNS_R_EMPTYNAME) {
dns_zone_log(zone, level,
"%s/SRV '%s' has no address records (A or AAAA)",
ownerbuf, namebuf);
/* XXX950 make fatal for 9.5.0. */
return (true);
}
if (result == DNS_R_CNAME) {
if (DNS_ZONE_OPTION(zone, DNS_ZONEOPT_WARNSRVCNAME) ||
DNS_ZONE_OPTION(zone, DNS_ZONEOPT_IGNORESRVCNAME))
level = ISC_LOG_WARNING;
if (!DNS_ZONE_OPTION(zone, DNS_ZONEOPT_IGNORESRVCNAME))
dns_zone_log(zone, level,
"%s/SRV '%s' is a CNAME (illegal)",
ownerbuf, namebuf);
return ((level == ISC_LOG_WARNING) ? true : false);
}
if (result == DNS_R_DNAME) {
if (DNS_ZONE_OPTION(zone, DNS_ZONEOPT_WARNSRVCNAME) ||
DNS_ZONE_OPTION(zone, DNS_ZONEOPT_IGNORESRVCNAME))
level = ISC_LOG_WARNING;
if (!DNS_ZONE_OPTION(zone, DNS_ZONEOPT_IGNORESRVCNAME)) {
dns_name_format(foundname, altbuf, sizeof altbuf);
dns_zone_log(zone, level, "%s/SRV '%s' is below a "
"DNAME '%s' (illegal)", ownerbuf, namebuf,
altbuf);
}
return ((level == ISC_LOG_WARNING) ? true : false);
}
if (zone->checksrv != NULL && result == DNS_R_DELEGATION)
return ((zone->checksrv)(zone, name, owner));
return (true);
}
|
Safe
|
[
"CWE-327"
] |
bind9
|
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
|
2.9926872315308467e+38
| 85 |
Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key.
| 0 |
static int handle_external_interrupt(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
++vcpu->stat.irq_exits;
KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler);
return 1;
}
|
Safe
|
[
"CWE-20"
] |
linux-2.6
|
16175a796d061833aacfbd9672235f2d2725df65
|
2.918011398059057e+38
| 7 |
KVM: VMX: Don't allow uninhibited access to EFER on i386
vmx_set_msr() does not allow i386 guests to touch EFER, but they can still
do so through the default: label in the switch. If they set EFER_LME, they
can oops the host.
Fix by having EFER access through the normal channel (which will check for
EFER_LME) even on i386.
Reported-and-tested-by: Benjamin Gilbert <bgilbert@cs.cmu.edu>
Cc: stable@kernel.org
Signed-off-by: Avi Kivity <avi@redhat.com>
| 0 |
**/
unsigned int normalization() const {
return _normalization;
|
Safe
|
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
|
9.0905962861916e+36
| 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
| 0 |
static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
int what)
{
switch (i->type) {
case PACKET_MR_MULTICAST:
if (i->alen != dev->addr_len)
return -EINVAL;
if (what > 0)
return dev_mc_add(dev, i->addr);
else
return dev_mc_del(dev, i->addr);
break;
case PACKET_MR_PROMISC:
return dev_set_promiscuity(dev, what);
break;
case PACKET_MR_ALLMULTI:
return dev_set_allmulti(dev, what);
break;
case PACKET_MR_UNICAST:
if (i->alen != dev->addr_len)
return -EINVAL;
if (what > 0)
return dev_uc_add(dev, i->addr);
else
return dev_uc_del(dev, i->addr);
break;
default:
break;
}
return 0;
}
|
Safe
|
[
"CWE-909"
] |
linux-2.6
|
67286640f638f5ad41a946b9a3dc75327950248f
|
2.5002660356085154e+38
| 31 |
net: packet: fix information leak to userland
packet_getname_spkt() doesn't initialize all members of sa_data field of
sockaddr struct if strlen(dev->name) < 13. This structure is then copied
to userland. It leads to leaking of contents of kernel stack memory.
We have to fully fill sa_data with strncpy() instead of strlcpy().
The same with packet_getname(): it doesn't initialize sll_pkttype field of
sockaddr_ll. Set it to zero.
Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
diffopt_hiddenoff(void)
{
return (diff_flags & DIFF_HIDDEN_OFF) != 0;
}
|
Safe
|
[
"CWE-787"
] |
vim
|
c101abff4c6756db4f5e740fde289decb9452efa
|
3.073177862837675e+38
| 4 |
patch 8.2.5164: invalid memory access after diff buffer manipulations
Problem: Invalid memory access after diff buffer manipulations.
Solution: Use zero offset when change removes all lines in a diff block.
| 0 |
char *
string_list_pos_params (pchar, list, quoted)
int pchar;
WORD_LIST *list;
int quoted;
{
char *ret;
WORD_LIST *tlist;
if (pchar == '*' && (quoted & Q_DOUBLE_QUOTES))
{
tlist = quote_list (list);
word_list_remove_quoted_nulls (tlist);
ret = string_list_dollar_star (tlist);
}
else if (pchar == '*' && (quoted & Q_HERE_DOCUMENT))
{
tlist = quote_list (list);
word_list_remove_quoted_nulls (tlist);
ret = string_list (tlist);
}
else if (pchar == '*')
{
/* Even when unquoted, string_list_dollar_star does the right thing
making sure that the first character of $IFS is used as the
separator. */
ret = string_list_dollar_star (list);
}
else if (pchar == '@' && (quoted & (Q_HERE_DOCUMENT|Q_DOUBLE_QUOTES)))
/* We use string_list_dollar_at, but only if the string is quoted, since
that quotes the escapes if it's not, which we don't want. We could
use string_list (the old code did), but that doesn't do the right
thing if the first character of $IFS is not a space. We use
string_list_dollar_star if the string is unquoted so we make sure that
the elements of $@ are separated by the first character of $IFS for
later splitting. */
ret = string_list_dollar_at (list, quoted, 0);
else if (pchar == '@')
ret = string_list_dollar_star (list);
else
ret = string_list ((quoted & (Q_HERE_DOCUMENT|Q_DOUBLE_QUOTES)) ? quote_list (list) : list);
return ret;
|
Safe
|
[
"CWE-20"
] |
bash
|
4f747edc625815f449048579f6e65869914dd715
|
2.3069102296438636e+38
| 43 |
Bash-4.4 patch 7
| 0 |
void vnc_write(VncState *vs, const void *data, size_t len)
{
buffer_reserve(&vs->output, len);
if (vs->csock != -1 && buffer_empty(&vs->output)) {
qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, vnc_client_write, vs);
}
buffer_append(&vs->output, data, len);
}
|
Safe
|
[
"CWE-125"
] |
qemu
|
bea60dd7679364493a0d7f5b54316c767cf894ef
|
5.56811048964824e+37
| 10 |
ui/vnc: fix potential memory corruption issues
this patch makes the VNC server work correctly if the
server surface and the guest surface have different sizes.
Basically the server surface is adjusted to not exceed VNC_MAX_WIDTH
x VNC_MAX_HEIGHT and additionally the width is rounded up to multiple of
VNC_DIRTY_PIXELS_PER_BIT.
If we have a resolution whose width is not dividable by VNC_DIRTY_PIXELS_PER_BIT
we now get a small black bar on the right of the screen.
If the surface is too big to fit the limits only the upper left area is shown.
On top of that this fixes 2 memory corruption issues:
The first was actually discovered during playing
around with a Windows 7 vServer. During resolution
change in Windows 7 it happens sometimes that Windows
changes to an intermediate resolution where
server_stride % cmp_bytes != 0 (in vnc_refresh_server_surface).
This happens only if width % VNC_DIRTY_PIXELS_PER_BIT != 0.
The second is a theoretical issue, but is maybe exploitable
by the guest. If for some reason the guest surface size is bigger
than VNC_MAX_WIDTH x VNC_MAX_HEIGHT we end up in severe corruption since
this limit is nowhere enforced.
Signed-off-by: Peter Lieven <pl@kamp.de>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
| 0 |
_rl_ttyflush ()
{
fflush (rl_outstream);
}
|
Safe
|
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
|
1.532818370800239e+38
| 4 |
bash-4.4-rc2 release
| 0 |
static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
struct sctp_paramhdr *param,
const struct sctp_chunk *chunk,
struct sctp_chunk **errp)
{
/* This is a fatal error. Any accumulated non-fatal errors are
* not reported.
*/
if (*errp)
sctp_chunk_free(*errp);
/* Create an error chunk and fill it in with our payload. */
*errp = sctp_make_violation_paramlen(asoc, chunk, param);
return 0;
}
|
Safe
|
[
"CWE-20"
] |
linux-2.6
|
ba0166708ef4da7eeb61dd92bbba4d5a749d6561
|
1.470247733564912e+37
| 16 |
sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <yjwei@cn.fujitsu.com>
Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int handle_vmptrst(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t vmcs_gva;
struct x86_exception e;
if (!nested_vmx_check_permission(vcpu))
return 1;
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &vmcs_gva))
return 1;
/* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
(void *)&to_vmx(vcpu)->nested.current_vmptr,
sizeof(u64), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
|
Safe
|
[
"CWE-284",
"CWE-264"
] |
linux
|
3ce424e45411cf5a13105e0386b6ecf6eeb4f66f
|
2.3300518162043336e+38
| 24 |
kvm:vmx: more complete state update on APICv on/off
The function to update APICv on/off state (in particular, to deactivate
it when enabling Hyper-V SynIC) is incomplete: it doesn't adjust
APICv-related fields among secondary processor-based VM-execution
controls. As a result, Windows 2012 guests get stuck when SynIC-based
auto-EOI interrupt intersected with e.g. an IPI in the guest.
In addition, the MSR intercept bitmap isn't updated every time "virtualize
x2APIC mode" is toggled. This path can only be triggered by a malicious
guest, because Windows didn't use x2APIC but rather their own synthetic
APIC access MSRs; however a guest running in a SynIC-enabled VM could
switch to x2APIC and thus obtain direct access to host APIC MSRs
(CVE-2016-4440).
The patch fixes those omissions.
Signed-off-by: Roman Kagan <rkagan@virtuozzo.com>
Reported-by: Steve Rutherford <srutherford@google.com>
Reported-by: Yang Zhang <yang.zhang.wz@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
TEST_F(RouterTest, HedgedPerTryTimeoutResetsOnBadHeaders) {
enableHedgeOnPerTryTimeout();
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _))
.WillOnce(Invoke(
[&](Http::ResponseDecoder& decoder,
Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {
response_decoder1 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)))
.Times(2);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
router_.retry_state_->expectHedgedPerTryTimeoutRetry();
per_try_timeout_->invokeCallback();
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _))
.WillOnce(Invoke(
[&](Http::ResponseDecoder& decoder,
Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {
response_decoder2 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectPerTryTimerCreate();
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// We should not have updated any stats yet because no requests have been
// canceled
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
// Now write a 5xx back on the 2nd request with no retries remaining. The 2nd request
// should be reset immediately.
Http::ResponseHeaderMapPtr bad_response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "500"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(500));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(encoder2.stream_, resetStream(_));
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _))
.WillOnce(Return(RetryStatus::NoOverflow));
// Not end_stream, otherwise we wouldn't need to reset.
response_decoder2->decodeHeaders(std::move(bad_response_headers), false);
// Now write a 200 back. We expect the 2nd stream to be reset and stats to be
// incremented properly.
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
EXPECT_TRUE(end_stream);
}));
response_decoder1->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
// TODO: Verify hedge stats here once they are implemented.
}
|
Safe
|
[
"CWE-703"
] |
envoy
|
18871dbfb168d3512a10c78dd267ff7c03f564c6
|
2.2590465653473237e+38
| 87 |
[1.18] CVE-2022-21655
Crash with direct_response
Signed-off-by: Otto van der Schaaf <ovanders@redhat.com>
| 0 |
megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
union megasas_sgl *mfi_sgl)
{
int i;
int sge_count;
struct scatterlist *os_sgl;
sge_count = scsi_dma_map(scp);
BUG_ON(sge_count < 0);
if (sge_count) {
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
}
}
return sge_count;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
bcf3b67d16a4c8ffae0aa79de5853435e683945c
|
1.6612365619688413e+38
| 18 |
scsi: megaraid_sas: return error when create DMA pool failed
when create DMA pool for cmd frames failed, we should return -ENOMEM,
instead of 0.
In some case in:
megasas_init_adapter_fusion()
-->megasas_alloc_cmds()
-->megasas_create_frame_pool
create DMA pool failed,
--> megasas_free_cmds() [1]
-->megasas_alloc_cmds_fusion()
failed, then goto fail_alloc_cmds.
-->megasas_free_cmds() [2]
we will call megasas_free_cmds twice, [1] will kfree cmd_list,
[2] will use cmd_list.it will cause a problem:
Unable to handle kernel NULL pointer dereference at virtual address
00000000
pgd = ffffffc000f70000
[00000000] *pgd=0000001fbf893003, *pud=0000001fbf893003,
*pmd=0000001fbf894003, *pte=006000006d000707
Internal error: Oops: 96000005 [#1] SMP
Modules linked in:
CPU: 18 PID: 1 Comm: swapper/0 Not tainted
task: ffffffdfb9290000 ti: ffffffdfb923c000 task.ti: ffffffdfb923c000
PC is at megasas_free_cmds+0x30/0x70
LR is at megasas_free_cmds+0x24/0x70
...
Call trace:
[<ffffffc0005b779c>] megasas_free_cmds+0x30/0x70
[<ffffffc0005bca74>] megasas_init_adapter_fusion+0x2f4/0x4d8
[<ffffffc0005b926c>] megasas_init_fw+0x2dc/0x760
[<ffffffc0005b9ab0>] megasas_probe_one+0x3c0/0xcd8
[<ffffffc0004a5abc>] local_pci_probe+0x4c/0xb4
[<ffffffc0004a5c40>] pci_device_probe+0x11c/0x14c
[<ffffffc00053a5e4>] driver_probe_device+0x1ec/0x430
[<ffffffc00053a92c>] __driver_attach+0xa8/0xb0
[<ffffffc000538178>] bus_for_each_dev+0x74/0xc8
[<ffffffc000539e88>] driver_attach+0x28/0x34
[<ffffffc000539a18>] bus_add_driver+0x16c/0x248
[<ffffffc00053b234>] driver_register+0x6c/0x138
[<ffffffc0004a5350>] __pci_register_driver+0x5c/0x6c
[<ffffffc000ce3868>] megasas_init+0xc0/0x1a8
[<ffffffc000082a58>] do_one_initcall+0xe8/0x1ec
[<ffffffc000ca7be8>] kernel_init_freeable+0x1c8/0x284
[<ffffffc0008d90b8>] kernel_init+0x1c/0xe4
Signed-off-by: Jason Yan <yanaijie@huawei.com>
Acked-by: Sumit Saxena <sumit.saxena@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
| 0 |
static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
{
struct dwc3_ep *dep;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
dep = dwc->eps[epnum];
if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
!dep->resource_index)
return;
/*
* NOTICE: We are violating what the Databook says about the
* EndTransfer command. Ideally we would _always_ wait for the
* EndTransfer Command Completion IRQ, but that's causing too
* much trouble synchronizing between us and gadget driver.
*
* We have discussed this with the IP Provider and it was
* suggested to giveback all requests here, but give HW some
* extra time to synchronize with the interconnect. We're using
* an arbitrary 100us delay for that.
*
* Note also that a similar handling was tested by Synopsys
* (thanks a lot Paul) and nothing bad has come out of it.
* In short, what we're doing is:
*
* - Issue EndTransfer WITH CMDIOC bit set
* - Wait 100us
*
* As of IP version 3.10a of the DWC_usb3 IP, the controller
* supports a mode to work around the above limitation. The
* software can poll the CMDACT bit in the DEPCMD register
* after issuing a EndTransfer command. This mode is enabled
* by writing GUCTL2[14]. This polling is already done in the
* dwc3_send_gadget_ep_cmd() function so if the mode is
* enabled, the EndTransfer command will have completed upon
* returning from this function and we don't need to delay for
* 100us.
*
* This mode is NOT available on the DWC_usb31 IP.
*/
cmd = DWC3_DEPCMD_ENDTRANSFER;
cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
cmd |= DWC3_DEPCMD_CMDIOC;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(¶ms, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
WARN_ON_ONCE(ret);
dep->resource_index = 0;
dep->flags &= ~DWC3_EP_BUSY;
if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) {
dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
udelay(100);
}
}
|
Safe
|
[
"CWE-703",
"CWE-667",
"CWE-189"
] |
linux
|
c91815b596245fd7da349ecc43c8def670d2269e
|
1.1258298393744397e+38
| 59 |
usb: dwc3: gadget: never call ->complete() from ->ep_queue()
This is a requirement which has always existed but, somehow, wasn't
reflected in the documentation and problems weren't found until now
when Tuba Yavuz found a possible deadlock happening between dwc3 and
f_hid. She described the situation as follows:
spin_lock_irqsave(&hidg->write_spinlock, flags); // first acquire
/* we our function has been disabled by host */
if (!hidg->req) {
free_ep_req(hidg->in_ep, hidg->req);
goto try_again;
}
[...]
status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
=>
[...]
=> usb_gadget_giveback_request
=>
f_hidg_req_complete
=>
spin_lock_irqsave(&hidg->write_spinlock, flags); // second acquire
Note that this happens because dwc3 would call ->complete() on a
failed usb_ep_queue() due to failed Start Transfer command. This is,
anyway, a theoretical situation because dwc3 currently uses "No
Response Update Transfer" command for Bulk and Interrupt endpoints.
It's still good to make this case impossible to happen even if the "No
Reponse Update Transfer" command is changed.
Reported-by: Tuba Yavuz <tuba@ece.ufl.edu>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
static int tls_check_preauth (const gnutls_datum_t *certdata,
gnutls_certificate_status_t certstat,
const char *hostname, int chainidx, int* certerr,
int* savedcert)
{
gnutls_x509_crt_t cert;
*certerr = CERTERR_VALID;
*savedcert = 0;
if (gnutls_x509_crt_init (&cert) < 0)
{
mutt_error (_("Error initialising gnutls certificate data"));
mutt_sleep (2);
return -1;
}
if (gnutls_x509_crt_import (cert, certdata, GNUTLS_X509_FMT_DER) < 0)
{
mutt_error (_("Error processing certificate data"));
mutt_sleep (2);
gnutls_x509_crt_deinit (cert);
return -1;
}
/* Note: tls_negotiate() contains a call to
* gnutls_certificate_set_verify_flags() with a flag disabling
* GnuTLS checking of the dates. So certstat shouldn't have the
* GNUTLS_CERT_EXPIRED and GNUTLS_CERT_NOT_ACTIVATED bits set. */
if (option (OPTSSLVERIFYDATES) != MUTT_NO)
{
if (gnutls_x509_crt_get_expiration_time (cert) < time(NULL))
*certerr |= CERTERR_EXPIRED;
if (gnutls_x509_crt_get_activation_time (cert) > time(NULL))
*certerr |= CERTERR_NOTYETVALID;
}
if (chainidx == 0 && option (OPTSSLVERIFYHOST) != MUTT_NO
&& !gnutls_x509_crt_check_hostname (cert, hostname)
&& !tls_check_stored_hostname (certdata, hostname))
*certerr |= CERTERR_HOSTNAME;
if (certstat & GNUTLS_CERT_REVOKED)
{
*certerr |= CERTERR_REVOKED;
certstat ^= GNUTLS_CERT_REVOKED;
}
/* see whether certificate is in our cache (certificates file) */
if (tls_compare_certificates (certdata))
{
*savedcert = 1;
/* We check above for certs with bad dates or that are revoked.
* These must be accepted manually each time. Otherwise, we
* accept saved certificates as valid. */
if (*certerr == CERTERR_VALID)
{
gnutls_x509_crt_deinit (cert);
return 0;
}
}
if (certstat & GNUTLS_CERT_INVALID)
{
*certerr |= CERTERR_NOTTRUSTED;
certstat ^= GNUTLS_CERT_INVALID;
}
if (certstat & GNUTLS_CERT_SIGNER_NOT_FOUND)
{
/* NB: already cleared if cert in cache */
*certerr |= CERTERR_NOTTRUSTED;
certstat ^= GNUTLS_CERT_SIGNER_NOT_FOUND;
}
if (certstat & GNUTLS_CERT_SIGNER_NOT_CA)
{
/* NB: already cleared if cert in cache */
*certerr |= CERTERR_SIGNERNOTCA;
certstat ^= GNUTLS_CERT_SIGNER_NOT_CA;
}
if (certstat & GNUTLS_CERT_INSECURE_ALGORITHM)
{
/* NB: already cleared if cert in cache */
*certerr |= CERTERR_INSECUREALG;
certstat ^= GNUTLS_CERT_INSECURE_ALGORITHM;
}
/* we've been zeroing the interesting bits in certstat -
* don't return OK if there are any unhandled bits we don't
* understand */
if (certstat != 0)
*certerr |= CERTERR_OTHER;
gnutls_x509_crt_deinit (cert);
if (*certerr == CERTERR_VALID)
return 0;
return -1;
}
|
Safe
|
[
"CWE-74"
] |
mutt
|
c547433cdf2e79191b15c6932c57f1472bfb5ff4
|
5.240737671624663e+37
| 103 |
Fix STARTTLS response injection attack.
Thanks again to Damian Poddebniak and Fabian Ising from the Münster
University of Applied Sciences for reporting this issue. Their
summary in ticket 248 states the issue clearly:
We found another STARTTLS-related issue in Mutt. Unfortunately, it
affects SMTP, POP3 and IMAP.
When the server responds with its "let's do TLS now message", e.g. A
OK begin TLS\r\n in IMAP or +OK begin TLS\r\n in POP3, Mutt will
also read any data after the \r\n and save it into some internal
buffer for later processing. This is problematic, because a MITM
attacker can inject arbitrary responses.
There is a nice blogpost by Wietse Venema about a "command
injection" in postfix (http://www.postfix.org/CVE-2011-0411.html).
What we have here is the problem in reverse, i.e. not a command
injection, but a "response injection."
This commit fixes the issue by clearing the CONNECTION input buffer in
mutt_ssl_starttls().
To make backporting this fix easier, the new functions only clear the
top-level CONNECTION buffer; they don't handle nested buffering in
mutt_zstrm.c or mutt_sasl.c. However both of those wrap the
connection *after* STARTTLS, so this is currently okay. mutt_tunnel.c
occurs before connecting, but it does not perform any nesting.
| 0 |
static int do_directive(Token *tline, Token **output)
{
enum preproc_token op;
int j;
bool err;
enum nolist_flags nolist;
bool casesense;
int k, m;
int offset;
const char *p;
char *q, *qbuf;
const char *found_path;
const char *mname;
struct ppscan pps;
Include *inc;
Context *ctx;
Cond *cond;
MMacro *mmac, **mmhead;
Token *t = NULL, *tt, *macro_start, *last, *origline;
Line *l;
struct tokenval tokval;
expr *evalresult;
int64_t count;
size_t len;
errflags severity;
const char *dname; /* Name of directive, for messages */
*output = NULL; /* No output generated */
origline = tline;
if (tok_is(tline, '#')) {
/* cpp-style line directive */
if (!tok_white(tline->next))
return NO_DIRECTIVE_FOUND;
dname = tok_text(tline);
goto pp_line;
}
tline = skip_white(tline);
if (!tline || !tok_type(tline, TOK_PREPROC_ID))
return NO_DIRECTIVE_FOUND;
dname = tok_text(tline);
if (dname[1] == '%')
return NO_DIRECTIVE_FOUND;
op = pp_token_hash(dname);
casesense = true;
if (PP_HAS_CASE(op) & PP_INSENSITIVE(op)) {
casesense = false;
op--;
}
/*
* %line directives are always processed immediately and
* unconditionally, as they are intended to reflect position
* in externally preprocessed sources.
*/
if (op == PP_LINE) {
pp_line:
/*
* Syntax is `%line nnn[+mmm] [filename]'
*/
if (pp_noline || istk->mstk.mstk)
goto done;
tline = tline->next;
tline = skip_white(tline);
if (!tok_type(tline, TOK_NUMBER)) {
nasm_nonfatal("`%s' expects line number", dname);
goto done;
}
k = readnum(tok_text(tline), &err);
m = 1;
tline = tline->next;
if (tok_is(tline, '+') || tok_is(tline, '-')) {
bool minus = tok_is(tline, '-');
tline = tline->next;
if (!tok_type(tline, TOK_NUMBER)) {
nasm_nonfatal("`%s' expects line increment", dname);
goto done;
}
m = readnum(tok_text(tline), &err);
if (minus)
m = -m;
tline = tline->next;
}
tline = skip_white(tline);
if (tline) {
if (tline->type == TOK_STRING) {
if (dname[0] == '#') {
/* cpp version: treat double quotes like NASM backquotes */
char *txt = tok_text_buf(tline);
if (txt[0] == '"') {
txt[0] = '`';
txt[tline->len - 1] = '`';
}
}
src_set_fname(unquote_token(tline));
/*
* Anything after the string is ignored by design (for cpp
* compatibility and future extensions.)
*/
} else {
char *fname = detoken(tline, false);
src_set_fname(fname);
nasm_free(fname);
}
}
src_set_linnum(k);
istk->where = src_where();
istk->lineinc = m;
goto done;
}
/*
* If we're in a non-emitting branch of a condition construct,
* or walking to the end of an already terminated %rep block,
* we should ignore all directives except for condition
* directives.
*/
if (((istk->conds && !emitting(istk->conds->state)) ||
(istk->mstk.mstk && !istk->mstk.mstk->in_progress)) &&
!is_condition(op)) {
return NO_DIRECTIVE_FOUND;
}
/*
* If we're defining a macro or reading a %rep block, we should
* ignore all directives except for %macro/%imacro (which nest),
* %endm/%endmacro, %line and (only if we're in a %rep block) %endrep.
* If we're in a %rep block, another %rep nests, so should be let through.
*/
if (defining && op != PP_MACRO && op != PP_RMACRO &&
op != PP_ENDMACRO && op != PP_ENDM &&
(defining->name || (op != PP_ENDREP && op != PP_REP))) {
return NO_DIRECTIVE_FOUND;
}
if (defining) {
if (op == PP_MACRO || op == PP_RMACRO) {
nested_mac_count++;
return NO_DIRECTIVE_FOUND;
} else if (nested_mac_count > 0) {
if (op == PP_ENDMACRO) {
nested_mac_count--;
return NO_DIRECTIVE_FOUND;
}
}
if (!defining->name) {
if (op == PP_REP) {
nested_rep_count++;
return NO_DIRECTIVE_FOUND;
} else if (nested_rep_count > 0) {
if (op == PP_ENDREP) {
nested_rep_count--;
return NO_DIRECTIVE_FOUND;
}
}
}
}
switch (op) {
default:
nasm_nonfatal("unknown preprocessor directive `%s'", dname);
return NO_DIRECTIVE_FOUND; /* didn't get it */
case PP_PRAGMA:
/*
* %pragma namespace options...
*
* The namespace "preproc" is reserved for the preprocessor;
* all other namespaces generate a [pragma] assembly directive.
*
* Invalid %pragmas are ignored and may have different
* meaning in future versions of NASM.
*/
t = tline;
tline = tline->next;
t->next = NULL;
tline = zap_white(expand_smacro(tline));
if (tok_type(tline, TOK_ID)) {
if (!nasm_stricmp(tok_text(tline), "preproc")) {
/* Preprocessor pragma */
do_pragma_preproc(tline);
free_tlist(tline);
} else {
/* Build the assembler directive */
/* Append bracket to the end of the output */
for (t = tline; t->next; t = t->next)
;
t->next = make_tok_char(NULL, ']');
/* Prepend "[pragma " */
t = new_White(tline);
t = new_Token(t, TOK_ID, "pragma", 6);
t = make_tok_char(t, '[');
tline = t;
*output = tline;
}
}
break;
case PP_STACKSIZE:
{
const char *arg;
/* Directive to tell NASM what the default stack size is. The
* default is for a 16-bit stack, and this can be overriden with
* %stacksize large.
*/
tline = skip_white(tline->next);
if (!tline || tline->type != TOK_ID) {
nasm_nonfatal("`%s' missing size parameter", dname);
break;
}
arg = tok_text(tline);
if (nasm_stricmp(arg, "flat") == 0) {
/* All subsequent ARG directives are for a 32-bit stack */
StackSize = 4;
StackPointer = "ebp";
ArgOffset = 8;
LocalOffset = 0;
} else if (nasm_stricmp(arg, "flat64") == 0) {
/* All subsequent ARG directives are for a 64-bit stack */
StackSize = 8;
StackPointer = "rbp";
ArgOffset = 16;
LocalOffset = 0;
} else if (nasm_stricmp(arg, "large") == 0) {
/* All subsequent ARG directives are for a 16-bit stack,
* far function call.
*/
StackSize = 2;
StackPointer = "bp";
ArgOffset = 4;
LocalOffset = 0;
} else if (nasm_stricmp(arg, "small") == 0) {
/* All subsequent ARG directives are for a 16-bit stack,
* far function call. We don't support near functions.
*/
StackSize = 2;
StackPointer = "bp";
ArgOffset = 6;
LocalOffset = 0;
} else {
nasm_nonfatal("`%s' invalid size type", dname);
}
break;
}
case PP_ARG:
/* TASM like ARG directive to define arguments to functions, in
* the following form:
*
* ARG arg1:WORD, arg2:DWORD, arg4:QWORD
*/
offset = ArgOffset;
do {
const char *arg;
char directive[256];
int size = StackSize;
/* Find the argument name */
tline = skip_white(tline->next);
if (!tline || tline->type != TOK_ID) {
nasm_nonfatal("`%s' missing argument parameter", dname);
goto done;
}
arg = tok_text(tline);
/* Find the argument size type */
tline = tline->next;
if (!tok_is(tline, ':')) {
nasm_nonfatal("syntax error processing `%s' directive", dname);
goto done;
}
tline = tline->next;
if (!tok_type(tline, TOK_ID)) {
nasm_nonfatal("`%s' missing size type parameter", dname);
goto done;
}
/* Allow macro expansion of type parameter */
tt = tokenize(tok_text(tline));
tt = expand_smacro(tt);
size = parse_size(tok_text(tt));
if (!size) {
nasm_nonfatal("invalid size type for `%s' missing directive", dname);
free_tlist(tt);
goto done;
}
free_tlist(tt);
/* Round up to even stack slots */
size = ALIGN(size, StackSize);
/* Now define the macro for the argument */
snprintf(directive, sizeof(directive), "%%define %s (%s+%d)",
arg, StackPointer, offset);
do_directive(tokenize(directive), output);
offset += size;
/* Move to the next argument in the list */
tline = skip_white(tline->next);
} while (tok_is(tline, ','));
ArgOffset = offset;
break;
case PP_LOCAL:
/* TASM like LOCAL directive to define local variables for a
* function, in the following form:
*
* LOCAL local1:WORD, local2:DWORD, local4:QWORD = LocalSize
*
* The '= LocalSize' at the end is ignored by NASM, but is
* required by TASM to define the local parameter size (and used
* by the TASM macro package).
*/
offset = LocalOffset;
do {
const char *local;
char directive[256];
int size = StackSize;
/* Find the argument name */
tline = skip_white(tline->next);
if (!tline || tline->type != TOK_ID) {
nasm_nonfatal("`%s' missing argument parameter", dname);
goto done;
}
local = tok_text(tline);
/* Find the argument size type */
tline = tline->next;
if (!tok_is(tline, ':')) {
nasm_nonfatal("syntax error processing `%s' directive", dname);
goto done;
}
tline = tline->next;
if (!tok_type(tline, TOK_ID)) {
nasm_nonfatal("`%s' missing size type parameter", dname);
goto done;
}
/* Allow macro expansion of type parameter */
tt = tokenize(tok_text(tline));
tt = expand_smacro(tt);
size = parse_size(tok_text(tt));
if (!size) {
nasm_nonfatal("invalid size type for `%s' missing directive", dname);
free_tlist(tt);
goto done;
}
free_tlist(tt);
/* Round up to even stack slots */
size = ALIGN(size, StackSize);
offset += size; /* Negative offset, increment before */
/* Now define the macro for the argument */
snprintf(directive, sizeof(directive), "%%define %s (%s-%d)",
local, StackPointer, offset);
do_directive(tokenize(directive), output);
/* Now define the assign to setup the enter_c macro correctly */
snprintf(directive, sizeof(directive),
"%%assign %%$localsize %%$localsize+%d", size);
do_directive(tokenize(directive), output);
/* Move to the next argument in the list */
tline = skip_white(tline->next);
} while (tok_is(tline, ','));
LocalOffset = offset;
break;
case PP_CLEAR:
{
bool context = false;
t = tline->next = expand_smacro(tline->next);
t = skip_white(t);
if (!t) {
/* Emulate legacy behavior */
do_clear(CLEAR_DEFINE|CLEAR_MMACRO, false);
} else {
while ((t = skip_white(t)) && t->type == TOK_ID) {
const char *txt = tok_text(t);
if (!nasm_stricmp(txt, "all")) {
do_clear(CLEAR_ALL, context);
} else if (!nasm_stricmp(txt, "define") ||
!nasm_stricmp(txt, "def") ||
!nasm_stricmp(txt, "smacro")) {
do_clear(CLEAR_DEFINE, context);
} else if (!nasm_stricmp(txt, "defalias") ||
!nasm_stricmp(txt, "alias") ||
!nasm_stricmp(txt, "salias")) {
do_clear(CLEAR_DEFALIAS, context);
} else if (!nasm_stricmp(txt, "alldef") ||
!nasm_stricmp(txt, "alldefine")) {
do_clear(CLEAR_ALLDEFINE, context);
} else if (!nasm_stricmp(txt, "macro") ||
!nasm_stricmp(txt, "mmacro")) {
do_clear(CLEAR_MMACRO, context);
} else if (!nasm_stricmp(txt, "context") ||
!nasm_stricmp(txt, "ctx")) {
context = true;
} else if (!nasm_stricmp(txt, "global")) {
context = false;
} else if (!nasm_stricmp(txt, "nothing") ||
!nasm_stricmp(txt, "none") ||
!nasm_stricmp(txt, "ignore") ||
!nasm_stricmp(txt, "-") ||
!nasm_stricmp(txt, "--")) {
/* Do nothing */
} else {
nasm_nonfatal("invalid option to %s: %s", dname, txt);
t = NULL;
}
}
}
t = skip_white(t);
if (t)
nasm_warn(WARN_OTHER, "trailing garbage after `%s' ignored", dname);
break;
}
case PP_DEPEND:
t = tline->next = expand_smacro(tline->next);
t = skip_white(t);
if (!t || (t->type != TOK_STRING &&
t->type != TOK_INTERNAL_STRING)) {
nasm_nonfatal("`%s' expects a file name", dname);
goto done;
}
if (t->next)
nasm_warn(WARN_OTHER, "trailing garbage after `%s' ignored", dname);
strlist_add(deplist, unquote_token_cstr(t));
goto done;
case PP_INCLUDE:
t = tline->next = expand_smacro(tline->next);
t = skip_white(t);
if (!t || (t->type != TOK_STRING &&
t->type != TOK_INTERNAL_STRING)) {
nasm_nonfatal("`%s' expects a file name", dname);
goto done;
}
if (t->next)
nasm_warn(WARN_OTHER, "trailing garbage after `%s' ignored", dname);
p = unquote_token_cstr(t);
nasm_new(inc);
inc->next = istk;
found_path = NULL;
inc->fp = inc_fopen(p, deplist, &found_path,
(pp_mode == PP_DEPS)
? INC_OPTIONAL : INC_NEEDED, NF_TEXT);
if (!inc->fp) {
/* -MG given but file not found */
nasm_free(inc);
} else {
inc->where = src_where();
inc->lineinc = 1;
inc->nolist = istk->nolist;
inc->noline = istk->noline;
if (!inc->noline)
src_set(0, found_path ? found_path : p);
istk = inc;
lfmt->uplevel(LIST_INCLUDE, 0);
}
break;
case PP_USE:
{
const struct use_package *pkg;
const char *name;
pkg = get_use_pkg(tline->next, dname, &name);
if (!name)
goto done;
if (!pkg) {
nasm_nonfatal("unknown `%s' package: `%s'", dname, name);
} else if (!use_loaded[pkg->index]) {
/*
* Not already included, go ahead and include it.
* Treat it as an include file for the purpose of
* producing a listing.
*/
use_loaded[pkg->index] = true;
stdmacpos = pkg->macros;
nasm_new(inc);
inc->next = istk;
inc->nolist = istk->nolist + !list_option('b');
inc->noline = istk->noline;
if (!inc->noline)
src_set(0, NULL);
istk = inc;
lfmt->uplevel(LIST_INCLUDE, 0);
}
break;
}
case PP_PUSH:
case PP_REPL:
case PP_POP:
tline = tline->next;
tline = skip_white(tline);
tline = expand_id(tline);
if (tline) {
if (!tok_type(tline, TOK_ID)) {
nasm_nonfatal("`%s' expects a context identifier", dname);
goto done;
}
if (tline->next)
nasm_warn(WARN_OTHER, "trailing garbage after `%s' ignored",
dname);
p = tok_text(tline);
} else {
p = NULL; /* Anonymous */
}
if (op == PP_PUSH) {
nasm_new(ctx);
ctx->depth = cstk ? cstk->depth + 1 : 1;
ctx->next = cstk;
ctx->name = p ? nasm_strdup(p) : NULL;
ctx->number = unique++;
cstk = ctx;
} else {
/* %pop or %repl */
if (!cstk) {
nasm_nonfatal("`%s': context stack is empty", dname);
} else if (op == PP_POP) {
if (p && (!cstk->name || nasm_stricmp(p, cstk->name)))
nasm_nonfatal("`%s' in wrong context: %s, "
"expected %s",
dname, cstk->name ? cstk->name : "anonymous", p);
else
ctx_pop();
} else {
/* op == PP_REPL */
nasm_free((char *)cstk->name);
cstk->name = p ? nasm_strdup(p) : NULL;
p = NULL;
}
}
break;
case PP_FATAL:
severity = ERR_FATAL;
goto issue_error;
case PP_ERROR:
severity = ERR_NONFATAL|ERR_PASS2;
goto issue_error;
case PP_WARNING:
/*!
*!user [on] %warning directives
*! controls output of \c{%warning} directives (see \k{pperror}).
*/
severity = ERR_WARNING|WARN_USER|ERR_PASS2;
goto issue_error;
issue_error:
{
/* Only error out if this is the final pass */
tline->next = expand_smacro(tline->next);
tline = tline->next;
tline = skip_white(tline);
t = tline ? tline->next : NULL;
t = skip_white(t);
if (tok_type(tline, TOK_STRING) && !t) {
/* The line contains only a quoted string */
p = unquote_token(tline); /* Ignore NUL character truncation */
nasm_error(severity, "%s", p);
} else {
/* Not a quoted string, or more than a quoted string */
q = detoken(tline, false);
nasm_error(severity, "%s", q);
nasm_free(q);
}
break;
}
CASE_PP_IF:
if (istk->conds && !emitting(istk->conds->state))
j = COND_NEVER;
else {
j = if_condition(tline->next, op);
tline->next = NULL; /* it got freed */
}
cond = nasm_malloc(sizeof(Cond));
cond->next = istk->conds;
cond->state = j;
istk->conds = cond;
if(istk->mstk.mstk)
istk->mstk.mstk->condcnt++;
break;
CASE_PP_ELIF:
if (!istk->conds)
nasm_fatal("`%s': no matching `%%if'", dname);
switch(istk->conds->state) {
case COND_IF_TRUE:
istk->conds->state = COND_DONE;
break;
case COND_DONE:
case COND_NEVER:
break;
case COND_ELSE_TRUE:
case COND_ELSE_FALSE:
nasm_warn(WARN_OTHER|ERR_PP_PRECOND,
"`%%elif' after `%%else' ignored");
istk->conds->state = COND_NEVER;
break;
case COND_IF_FALSE:
/*
* IMPORTANT: In the case of %if, we will already have
* called expand_mmac_params(); however, if we're
* processing an %elif we must have been in a
* non-emitting mode, which would have inhibited
* the normal invocation of expand_mmac_params().
* Therefore, we have to do it explicitly here.
*/
j = if_condition(expand_mmac_params(tline->next), op);
tline->next = NULL; /* it got freed */
istk->conds->state = j;
break;
}
break;
case PP_ELSE:
if (tline->next)
nasm_warn(WARN_OTHER|ERR_PP_PRECOND,
"trailing garbage after `%%else' ignored");
if (!istk->conds)
nasm_fatal("`%%else: no matching `%%if'");
switch(istk->conds->state) {
case COND_IF_TRUE:
case COND_DONE:
istk->conds->state = COND_ELSE_FALSE;
break;
case COND_NEVER:
break;
case COND_IF_FALSE:
istk->conds->state = COND_ELSE_TRUE;
break;
case COND_ELSE_TRUE:
case COND_ELSE_FALSE:
nasm_warn(WARN_OTHER|ERR_PP_PRECOND,
"`%%else' after `%%else' ignored.");
istk->conds->state = COND_NEVER;
break;
}
break;
case PP_ENDIF:
if (tline->next)
nasm_warn(WARN_OTHER|ERR_PP_PRECOND,
"trailing garbage after `%%endif' ignored");
if (!istk->conds)
nasm_fatal("`%%endif': no matching `%%if'");
cond = istk->conds;
istk->conds = cond->next;
nasm_free(cond);
if(istk->mstk.mstk)
istk->mstk.mstk->condcnt--;
break;
case PP_RMACRO:
case PP_MACRO:
{
MMacro *def;
nasm_assert(!defining);
nasm_new(def);
def->casesense = casesense;
/*
* dstk.mstk points to the previous definition bracket,
* whereas dstk.mmac points to the topmost mmacro, which
* in this case is the one we are just starting to create.
*/
def->dstk.mstk = defining;
def->dstk.mmac = def;
if (op == PP_RMACRO)
def->max_depth = nasm_limit[LIMIT_MACRO_LEVELS];
if (!parse_mmacro_spec(tline, def, dname)) {
nasm_free(def);
goto done;
}
defining = def;
defining->where = istk->where;
mmac = (MMacro *) hash_findix(&mmacros, defining->name);
while (mmac) {
if (!strcmp(mmac->name, defining->name) &&
(mmac->nparam_min <= defining->nparam_max
|| defining->plus)
&& (defining->nparam_min <= mmac->nparam_max
|| mmac->plus)) {
nasm_warn(WARN_OTHER, "redefining multi-line macro `%s'",
defining->name);
break;
}
mmac = mmac->next;
}
break;
}
case PP_ENDM:
case PP_ENDMACRO:
if (!(defining && defining->name)) {
nasm_nonfatal("`%s': not defining a macro", tok_text(tline));
goto done;
}
mmhead = (MMacro **) hash_findi_add(&mmacros, defining->name);
defining->next = *mmhead;
*mmhead = defining;
defining = NULL;
break;
case PP_EXITMACRO:
/*
* We must search along istk->expansion until we hit a
* macro-end marker for a macro with a name. Then we
* bypass all lines between exitmacro and endmacro.
*/
list_for_each(l, istk->expansion)
if (l->finishes && l->finishes->name)
break;
if (l) {
/*
* Remove all conditional entries relative to this
* macro invocation. (safe to do in this context)
*/
for ( ; l->finishes->condcnt > 0; l->finishes->condcnt --) {
cond = istk->conds;
istk->conds = cond->next;
nasm_free(cond);
}
istk->expansion = l;
} else {
nasm_nonfatal("`%%exitmacro' not within `%%macro' block");
}
break;
case PP_UNIMACRO:
casesense = false;
/* fall through */
case PP_UNMACRO:
{
MMacro **mmac_p;
MMacro spec;
nasm_zero(spec);
spec.casesense = casesense;
if (!parse_mmacro_spec(tline, &spec, dname)) {
goto done;
}
mmac_p = (MMacro **) hash_findi(&mmacros, spec.name, NULL);
while (mmac_p && *mmac_p) {
mmac = *mmac_p;
if (mmac->casesense == spec.casesense &&
!mstrcmp(mmac->name, spec.name, spec.casesense) &&
mmac->nparam_min == spec.nparam_min &&
mmac->nparam_max == spec.nparam_max &&
mmac->plus == spec.plus) {
*mmac_p = mmac->next;
free_mmacro(mmac);
} else {
mmac_p = &mmac->next;
}
}
free_tlist(spec.dlist);
break;
}
case PP_ROTATE:
while (tok_white(tline->next))
tline = tline->next;
if (!tline->next) {
free_tlist(origline);
nasm_nonfatal("`%%rotate' missing rotate count");
return DIRECTIVE_FOUND;
}
t = expand_smacro(tline->next);
tline->next = NULL;
pps.tptr = tline = t;
pps.ntokens = -1;
tokval.t_type = TOKEN_INVALID;
evalresult =
evaluate(ppscan, &pps, &tokval, NULL, true, NULL);
free_tlist(tline);
if (!evalresult)
return DIRECTIVE_FOUND;
if (tokval.t_type)
nasm_warn(WARN_OTHER, "trailing garbage after expression ignored");
if (!is_simple(evalresult)) {
nasm_nonfatal("non-constant value given to `%%rotate'");
return DIRECTIVE_FOUND;
}
mmac = istk->mstk.mmac;
if (!mmac) {
nasm_nonfatal("`%%rotate' invoked outside a macro call");
} else if (mmac->nparam == 0) {
nasm_nonfatal("`%%rotate' invoked within macro without parameters");
} else {
int rotate = mmac->rotate + reloc_value(evalresult);
rotate %= (int)mmac->nparam;
if (rotate < 0)
rotate += mmac->nparam;
mmac->rotate = rotate;
}
break;
case PP_REP:
{
MMacro *tmp_defining;
nolist = 0;
tline = skip_white(tline->next);
if (tok_type(tline, TOK_ID) && tline->len == 7 &&
!nasm_memicmp(tline->text.a, ".nolist", 7)) {
if (!list_option('f'))
nolist |= NL_LIST; /* ... but update line numbers */
tline = skip_white(tline->next);
}
if (tline) {
pps.tptr = expand_smacro(tline);
pps.ntokens = -1;
tokval.t_type = TOKEN_INVALID;
/* XXX: really critical?! */
evalresult =
evaluate(ppscan, &pps, &tokval, NULL, true, NULL);
if (!evalresult)
goto done;
if (tokval.t_type)
nasm_warn(WARN_OTHER, "trailing garbage after expression ignored");
if (!is_simple(evalresult)) {
nasm_nonfatal("non-constant value given to `%%rep'");
goto done;
}
count = reloc_value(evalresult);
if (count > nasm_limit[LIMIT_REP]) {
nasm_nonfatal("`%%rep' count %"PRId64" exceeds limit (currently %"PRId64")",
count, nasm_limit[LIMIT_REP]);
count = 0;
} else if (count < 0) {
/*!
*!negative-rep [on] regative %rep count
*! warns about negative counts given to the \c{%rep}
*! preprocessor directive.
*/
nasm_warn(ERR_PASS2|WARN_NEGATIVE_REP,
"negative `%%rep' count: %"PRId64, count);
count = 0;
} else {
count++;
}
} else {
nasm_nonfatal("`%%rep' expects a repeat count");
count = 0;
}
tmp_defining = defining;
nasm_new(defining);
defining->nolist = nolist;
defining->in_progress = count;
defining->mstk = istk->mstk;
defining->dstk.mstk = tmp_defining;
defining->dstk.mmac = tmp_defining ? tmp_defining->dstk.mmac : NULL;
defining->where = istk->where;
break;
}
case PP_ENDREP:
if (!defining || defining->name) {
nasm_nonfatal("`%%endrep': no matching `%%rep'");
goto done;
}
/*
* Now we have a "macro" defined - although it has no name
* and we won't be entering it in the hash tables - we must
* push a macro-end marker for it on to istk->expansion.
* After that, it will take care of propagating itself (a
* macro-end marker line for a macro which is really a %rep
* block will cause the macro to be re-expanded, complete
* with another macro-end marker to ensure the process
* continues) until the whole expansion is forcibly removed
* from istk->expansion by a %exitrep.
*/
nasm_new(l);
l->next = istk->expansion;
l->finishes = defining;
l->first = NULL;
l->where = src_where();
istk->expansion = l;
istk->mstk.mstk = defining;
/* A loop does not change istk->noline */
istk->nolist += !!(defining->nolist & NL_LIST);
if (!istk->nolist)
lfmt->uplevel(LIST_MACRO, 0);
defining = defining->dstk.mstk;
break;
case PP_EXITREP:
/*
* We must search along istk->expansion until we hit a
* macro-end marker for a macro with no name. Then we set
* its `in_progress' flag to 0.
*/
list_for_each(l, istk->expansion)
if (l->finishes && !l->finishes->name)
break;
if (l)
l->finishes->in_progress = 0;
else
nasm_nonfatal("`%%exitrep' not within `%%rep' block");
break;
case PP_DEFINE:
case PP_XDEFINE:
case PP_DEFALIAS:
{
SMacro tmpl;
Token **lastp;
int nparam;
if (!(mname = get_id(&tline, dname)))
goto done;
nasm_zero(tmpl);
lastp = &tline->next;
nparam = parse_smacro_template(&lastp, &tmpl);
tline = *lastp;
*lastp = NULL;
if (unlikely(op == PP_DEFALIAS)) {
macro_start = tline;
if (!is_macro_id(macro_start)) {
nasm_nonfatal("`%s' expects a macro identifier to alias",
dname);
goto done;
}
tt = macro_start->next;
macro_start->next = NULL;
tline = tline->next;
tline = skip_white(tline);
if (tline && tline->type) {
nasm_warn(WARN_OTHER,
"trailing garbage after aliasing identifier ignored");
}
free_tlist(tt);
tmpl.alias = true;
} else {
if (op == PP_XDEFINE) {
/* Protect macro parameter tokens */
if (nparam)
mark_smac_params(tline, &tmpl, TOK_XDEF_PARAM);
tline = expand_smacro(tline);
}
/* NB: Does this still make sense? */
macro_start = reverse_tokens(tline);
}
/*
* Good. We now have a macro name, a parameter count, and a
* token list (in reverse order) for an expansion. We ought
* to be OK just to create an SMacro, store it, and let
* free_tlist have the rest of the line (which we have
* carefully re-terminated after chopping off the expansion
* from the end).
*/
define_smacro(mname, casesense, macro_start, &tmpl);
break;
}
case PP_UNDEF:
case PP_UNDEFALIAS:
if (!(mname = get_id(&tline, dname)))
goto done;
if (tline->next)
nasm_warn(WARN_OTHER, "trailing garbage after macro name ignored");
undef_smacro(mname, op == PP_UNDEFALIAS);
break;
case PP_DEFSTR:
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
tline = zap_white(tline);
q = detoken(tline, false);
macro_start = make_tok_qstr(NULL, q);
nasm_free(q);
/*
* We now have a macro name, an implicit parameter count of
* zero, and a string token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
break;
case PP_DEFTOK:
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
t = skip_white(tline);
/* t should now point to the string */
if (!tok_type(t, TOK_STRING)) {
nasm_nonfatal("`%s' requires string as second parameter", dname);
free_tlist(tline);
goto done;
}
/*
* Convert the string to a token stream. Note that smacros
* are stored with the token stream reversed, so we have to
* reverse the output of tokenize().
*/
macro_start = reverse_tokens(tokenize(unquote_token_cstr(t)));
/*
* We now have a macro name, an implicit parameter count of
* zero, and a numeric token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
free_tlist(tline);
break;
case PP_PATHSEARCH:
{
const char *found_path;
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
t = skip_white(tline);
if (!t || (t->type != TOK_STRING &&
t->type != TOK_INTERNAL_STRING)) {
nasm_nonfatal("`%s' expects a file name", dname);
free_tlist(tline);
goto done;
}
if (t->next)
nasm_warn(WARN_OTHER, "trailing garbage after `%s' ignored", dname);
p = unquote_token_cstr(t);
inc_fopen(p, NULL, &found_path, INC_PROBE, NF_BINARY);
if (!found_path)
found_path = p;
macro_start = make_tok_qstr(NULL, found_path);
/*
* We now have a macro name, an implicit parameter count of
* zero, and a string token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
free_tlist(tline);
break;
}
case PP_STRLEN:
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
t = skip_white(tline);
/* t should now point to the string */
if (!tok_type(t, TOK_STRING)) {
nasm_nonfatal("`%s' requires string as second parameter", dname);
free_tlist(tline);
free_tlist(origline);
return DIRECTIVE_FOUND;
}
unquote_token(t);
macro_start = make_tok_num(NULL, t->len);
/*
* We now have a macro name, an implicit parameter count of
* zero, and a numeric token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
free_tlist(tline);
free_tlist(origline);
return DIRECTIVE_FOUND;
case PP_STRCAT:
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
len = 0;
list_for_each(t, tline) {
switch (t->type) {
case TOK_WHITESPACE:
break;
case TOK_STRING:
unquote_token(t);
len += t->len;
break;
case TOK_OTHER:
if (tok_is(t, ',')) /* permit comma separators */
break;
/* else fall through */
default:
nasm_nonfatal("non-string passed to `%s': %s", dname,
tok_text(t));
free_tlist(tline);
goto done;
}
}
q = qbuf = nasm_malloc(len+1);
list_for_each(t, tline) {
if (t->type == TOK_INTERNAL_STRING)
q = mempcpy(q, tok_text(t), t->len);
}
*q = '\0';
/*
* We now have a macro name, an implicit parameter count of
* zero, and a numeric token to use as an expansion. Create
* and store an SMacro.
*/
macro_start = make_tok_qstr_len(NULL, qbuf, len);
nasm_free(qbuf);
define_smacro(mname, casesense, macro_start, NULL);
free_tlist(tline);
break;
case PP_SUBSTR:
{
int64_t start, count;
const char *txt;
size_t len;
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
if (tline) /* skip expanded id */
t = tline->next;
t = skip_white(t);
/* t should now point to the string */
if (!tok_type(t, TOK_STRING)) {
nasm_nonfatal("`%s' requires string as second parameter", dname);
free_tlist(tline);
goto done;
}
pps.tptr = t->next;
pps.ntokens = -1;
tokval.t_type = TOKEN_INVALID;
evalresult = evaluate(ppscan, &pps, &tokval, NULL, true, NULL);
if (!evalresult) {
free_tlist(tline);
goto done;
} else if (!is_simple(evalresult)) {
nasm_nonfatal("non-constant value given to `%s'", dname);
free_tlist(tline);
goto done;
}
start = evalresult->value - 1;
pps.tptr = skip_white(pps.tptr);
if (!pps.tptr) {
count = 1; /* Backwards compatibility: one character */
} else {
tokval.t_type = TOKEN_INVALID;
evalresult = evaluate(ppscan, &pps, &tokval, NULL, true, NULL);
if (!evalresult) {
free_tlist(tline);
goto done;
} else if (!is_simple(evalresult)) {
nasm_nonfatal("non-constant value given to `%s'", dname);
free_tlist(tline);
goto done;
}
count = evalresult->value;
}
unquote_token(t);
len = t->len;
/* make start and count being in range */
if (start < 0)
start = 0;
if (count < 0)
count = len + count + 1 - start;
if (start + count > (int64_t)len)
count = len - start;
if (!len || count < 0 || start >=(int64_t)len)
start = -1, count = 0; /* empty string */
txt = (start < 0) ? "" : tok_text(t) + start;
len = count;
macro_start = make_tok_qstr_len(NULL, txt, len);
/*
* We now have a macro name, an implicit parameter count of
* zero, and a numeric token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
free_tlist(tline);
break;
}
case PP_ASSIGN:
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
pps.tptr = tline;
pps.ntokens = -1;
tokval.t_type = TOKEN_INVALID;
evalresult = evaluate(ppscan, &pps, &tokval, NULL, true, NULL);
free_tlist(tline);
if (!evalresult)
goto done;
if (tokval.t_type)
nasm_warn(WARN_OTHER, "trailing garbage after expression ignored");
if (!is_simple(evalresult)) {
nasm_nonfatal("non-constant value given to `%s'", dname);
free_tlist(origline);
return DIRECTIVE_FOUND;
}
macro_start = make_tok_num(NULL, reloc_value(evalresult));
/*
* We now have a macro name, an implicit parameter count of
* zero, and a numeric token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
break;
case PP_ALIASES:
tline = tline->next;
tline = expand_smacro(tline);
ppopt.noaliases = !pp_get_boolean_option(tline, !ppopt.noaliases);
break;
case PP_LINE:
nasm_panic("`%s' directive not preprocessed early", dname);
break;
case PP_NULL:
/* Goes nowhere, does nothing... */
break;
}
done:
free_tlist(origline);
return DIRECTIVE_FOUND;
}
|
Safe
|
[] |
nasm
|
6299a3114ce0f3acd55d07de201a8ca2f0a83059
|
2.120283249819343e+38
| 1,314 |
BR 3392708: fix NULL pointer reference for invalid %stacksize
After issuing an error message for a missing %stacksize argument, need
to quit rather than continuing to try to access the pointer.
Fold uses of tok_text() while we are at it.
Reported-by: Suhwan <prada960808@gmail.com>
Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
| 0 |
__do_block_io_op(struct xen_blkif_ring *ring)
{
union blkif_back_rings *blk_rings = &ring->blk_rings;
struct blkif_request req;
struct pending_req *pending_req;
RING_IDX rc, rp;
int more_to_do = 0;
rc = blk_rings->common.req_cons;
rp = blk_rings->common.sring->req_prod;
rmb(); /* Ensure we see queued requests up to 'rp'. */
if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
rc = blk_rings->common.rsp_prod_pvt;
pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
rp, rc, rp - rc, ring->blkif->vbd.pdevice);
return -EACCES;
}
while (rc != rp) {
if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
break;
if (kthread_should_stop()) {
more_to_do = 1;
break;
}
pending_req = alloc_req(ring);
if (NULL == pending_req) {
ring->st_oo_req++;
more_to_do = 1;
break;
}
switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
break;
case BLKIF_PROTOCOL_X86_32:
blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
break;
case BLKIF_PROTOCOL_X86_64:
blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
break;
default:
BUG();
}
blk_rings->common.req_cons = ++rc; /* before make_response() */
/* Apply all sanity checks to /private copy/ of request. */
barrier();
switch (req.operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_INDIRECT:
if (dispatch_rw_block_io(ring, &req, pending_req))
goto done;
break;
case BLKIF_OP_DISCARD:
free_req(ring, pending_req);
if (dispatch_discard_io(ring, &req))
goto done;
break;
default:
if (dispatch_other_io(ring, &req, pending_req))
goto done;
break;
}
/* Yield point for this unbounded loop. */
cond_resched();
}
done:
return more_to_do;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
089bc0143f489bd3a4578bdff5f4ca68fb26f341
|
1.3351413265676835e+38
| 79 |
xen-blkback: don't leak stack data via response ring
Rather than constructing a local structure instance on the stack, fill
the fields directly on the shared ring, just like other backends do.
Build on the fact that all response structure flavors are actually
identical (the old code did make this assumption too).
This is XSA-216.
Cc: stable@vger.kernel.org
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
| 0 |
gst_rtsp_connection_get_ip (const GstRTSPConnection * conn)
{
g_return_val_if_fail (conn != NULL, NULL);
return conn->remote_ip;
}
|
Safe
|
[] |
gst-plugins-base
|
f672277509705c4034bc92a141eefee4524d15aa
|
2.313649037147238e+38
| 6 |
gstrtspconnection: Security loophole making heap overflow
The former code allowed an attacker to create a heap overflow by
sending a longer than allowed session id in a response and including a
semicolon to change the maximum length. With this change, the parser
will never go beyond 512 bytes.
| 0 |
int SSL_set_trust(SSL *s, int trust)
{
return X509_VERIFY_PARAM_set_trust(s->param, trust);
}
|
Safe
|
[
"CWE-310"
] |
openssl
|
56f1acf5ef8a432992497a04792ff4b3b2c6f286
|
2.732533464898985e+38
| 4 |
Disable SSLv2 default build, default negotiation and weak ciphers.
SSLv2 is by default disabled at build-time. Builds that are not
configured with "enable-ssl2" will not support SSLv2. Even if
"enable-ssl2" is used, users who want to negotiate SSLv2 via the
version-flexible SSLv23_method() will need to explicitly call either
of:
SSL_CTX_clear_options(ctx, SSL_OP_NO_SSLv2);
or
SSL_clear_options(ssl, SSL_OP_NO_SSLv2);
as appropriate. Even if either of those is used, or the application
explicitly uses the version-specific SSLv2_method() or its client
or server variants, SSLv2 ciphers vulnerable to exhaustive search
key recovery have been removed. Specifically, the SSLv2 40-bit
EXPORT ciphers, and SSLv2 56-bit DES are no longer available.
Mitigation for CVE-2016-0800
Reviewed-by: Emilia Käsper <emilia@openssl.org>
| 0 |
get_compare_type(char_u *p, int *len, int *type_is)
{
exprtype_T type = EXPR_UNKNOWN;
int i;
switch (p[0])
{
case '=': if (p[1] == '=')
type = EXPR_EQUAL;
else if (p[1] == '~')
type = EXPR_MATCH;
break;
case '!': if (p[1] == '=')
type = EXPR_NEQUAL;
else if (p[1] == '~')
type = EXPR_NOMATCH;
break;
case '>': if (p[1] != '=')
{
type = EXPR_GREATER;
*len = 1;
}
else
type = EXPR_GEQUAL;
break;
case '<': if (p[1] != '=')
{
type = EXPR_SMALLER;
*len = 1;
}
else
type = EXPR_SEQUAL;
break;
case 'i': if (p[1] == 's')
{
// "is" and "isnot"; but not a prefix of a name
if (p[2] == 'n' && p[3] == 'o' && p[4] == 't')
*len = 5;
i = p[*len];
if (!isalnum(i) && i != '_')
{
type = *len == 2 ? EXPR_IS : EXPR_ISNOT;
*type_is = TRUE;
}
}
break;
}
return type;
}
|
Safe
|
[
"CWE-200",
"CWE-122"
] |
vim
|
5f25c3855071bd7e26255c68bf458b1b5cf92f39
|
3.2913645761104075e+38
| 49 |
patch 8.2.4049: Vim9: reading before the start of the line with "$"
Problem: Vim9: reading before the start of the line with "$" by itself.
Solution: Do not subtract one when reporting the error.
| 0 |
static void xhci_child_detach(USBPort *uport, USBDevice *child)
{
USBBus *bus = usb_bus_from_device(child);
XHCIState *xhci = container_of(bus, XHCIState, bus);
xhci_detach_slot(xhci, child->port);
}
|
Safe
|
[
"CWE-835"
] |
qemu
|
96d87bdda3919bb16f754b3d3fd1227e1f38f13c
|
1.4669350798230679e+38
| 7 |
xhci: guard xhci_kick_epctx against recursive calls
Track xhci_kick_epctx processing being active in a variable. Check the
variable before calling xhci_kick_epctx from xhci_kick_ep. Add an
assert to make sure we don't call recursively into xhci_kick_epctx.
Cc: 1653384@bugs.launchpad.net
Fixes: 94b037f2a451b3dc855f9f2c346e5049a361bd55
Reported-by: Fabian Lesniak <fabian@lesniak-it.de>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Message-id: 1486035372-3621-1-git-send-email-kraxel@redhat.com
Message-id: 1485790607-31399-5-git-send-email-kraxel@redhat.com
| 0 |
static void phar_do_403(char *entry, int entry_len TSRMLS_DC) /* {{{ */
{
sapi_header_line ctr = {0};
ctr.response_code = 403;
ctr.line_len = sizeof("HTTP/1.0 403 Access Denied")-1;
ctr.line = "HTTP/1.0 403 Access Denied";
sapi_header_op(SAPI_HEADER_REPLACE, &ctr TSRMLS_CC);
sapi_send_headers(TSRMLS_C);
PHPWRITE("<html>\n <head>\n <title>Access Denied</title>\n </head>\n <body>\n <h1>403 - File ", sizeof("<html>\n <head>\n <title>Access Denied</title>\n </head>\n <body>\n <h1>403 - File ") - 1);
PHPWRITE(entry, entry_len);
PHPWRITE(" Access Denied</h1>\n </body>\n</html>", sizeof(" Access Denied</h1>\n </body>\n</html>") - 1);
}
|
Vulnerable
|
[
"CWE-79"
] |
php-src
|
6e64aba47f4e41d97c4d010024c68320c0855f45
|
1.9539886820155505e+38
| 13 |
Fix #76129 - remove more potential unfiltered outputs for phar
| 1 |
bool vmxnet_tx_pkt_add_raw_fragment(struct VmxnetTxPkt *pkt, hwaddr pa,
size_t len)
{
hwaddr mapped_len = 0;
struct iovec *ventry;
assert(pkt);
assert(pkt->max_raw_frags > pkt->raw_frags);
if (!len) {
return true;
}
ventry = &pkt->raw[pkt->raw_frags];
mapped_len = len;
ventry->iov_base = cpu_physical_memory_map(pa, &mapped_len, false);
ventry->iov_len = mapped_len;
pkt->raw_frags += !!ventry->iov_base;
if ((ventry->iov_base == NULL) || (len != mapped_len)) {
return false;
}
return true;
}
|
Safe
|
[
"CWE-20"
] |
qemu
|
a7278b36fcab9af469563bd7b9dadebe2ae25e48
|
2.9782748366777035e+38
| 25 |
net/vmxnet3: Refine l2 header validation
Validation of l2 header length assumed minimal packet size as
eth_header + 2 * vlan_header regardless of the actual protocol.
This caused crash for valid non-IP packets shorter than 22 bytes, as
'tx_pkt->packet_type' hasn't been assigned for such packets, and
'vmxnet3_on_tx_done_update_stats()' expects it to be properly set.
Refine header length validation in 'vmxnet_tx_pkt_parse_headers'.
Check its return value during packet processing flow.
As a side effect, in case IPv4 and IPv6 header validation failure,
corrupt packets will be dropped.
Signed-off-by: Dana Rubin <dana.rubin@ravellosystems.com>
Signed-off-by: Shmulik Ladkani <shmulik.ladkani@ravellosystems.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
| 0 |
static void free_verifier_state(struct bpf_verifier_state *state,
bool free_self)
{
kfree(state->stack);
if (free_self)
kfree(state);
}
|
Safe
|
[
"CWE-20"
] |
linux
|
c131187db2d3fa2f8bf32fdf4e9a4ef805168467
|
5.709520770474973e+36
| 7 |
bpf: fix branch pruning logic
when the verifier detects that register contains a runtime constant
and it's compared with another constant it will prune exploration
of the branch that is guaranteed not to be taken at runtime.
This is all correct, but malicious program may be constructed
in such a way that it always has a constant comparison and
the other branch is never taken under any conditions.
In this case such path through the program will not be explored
by the verifier. It won't be taken at run-time either, but since
all instructions are JITed the malicious program may cause JITs
to complain about using reserved fields, etc.
To fix the issue we have to track the instructions explored by
the verifier and sanitize instructions that are dead at run time
with NOPs. We cannot reject such dead code, since llvm generates
it for valid C code, since it doesn't do as much data flow
analysis as the verifier does.
Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.