func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
int sr_is_xa(Scsi_CD *cd)
{
unsigned char *raw_sector;
int is_xa;
if (!xa_test)
return 0;
raw_sector = kmalloc(2048, GFP_KERNEL | SR_GFP_DMA(cd));
if (!raw_sector)
return -ENOMEM;
if (0 == sr_read_sector(cd, cd->ms_offset + 16,
CD_FRAMESIZE_RAW1, raw_sector)) {
is_xa = (raw_sector[3] == 0x02) ? 1 : 0;
} else {
/* read a raw sector failed for some reason. */
is_xa = -1;
}
kfree(raw_sector);
#ifdef DEBUG
sr_printk(KERN_INFO, cd, "sr_is_xa: %d\n", is_xa);
#endif
return is_xa;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
f7068114d45ec55996b9040e98111afa56e010fe
|
3.3707218837960685e+37
| 24 |
sr: pass down correctly sized SCSI sense buffer
We're casting the CDROM layer request_sense to the SCSI sense
buffer, but the former is 64 bytes and the latter is 96 bytes.
As we generally allocate these on the stack, we end up blowing
up the stack.
Fix this by wrapping the scsi_execute() call with a properly
sized sense buffer, and copying back the bits for the CDROM
layer.
Cc: stable@vger.kernel.org
Reported-by: Piotr Gabriel Kosinski <pg.kosinski@gmail.com>
Reported-by: Daniel Shapira <daniel@twistlock.com>
Tested-by: Kees Cook <keescook@chromium.org>
Fixes: 82ed4db499b8 ("block: split scsi_request out of struct request")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
void ptrace_notify(int exit_code)
{
siginfo_t info;
BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
memset(&info, 0, sizeof info);
info.si_signo = SIGTRAP;
info.si_code = exit_code;
info.si_pid = task_pid_vnr(current);
info.si_uid = current_uid();
/* Let the debugger run. */
spin_lock_irq(¤t->sighand->siglock);
ptrace_stop(exit_code, 1, &info);
spin_unlock_irq(¤t->sighand->siglock);
}
|
Safe
|
[] |
linux-2.6
|
0083fc2c50e6c5127c2802ad323adf8143ab7856
|
4.1854737981097245e+37
| 17 |
do_sigaltstack: avoid copying 'stack_t' as a structure to user space
Ulrich Drepper correctly points out that there is generally padding in
the structure on 64-bit hosts, and that copying the structure from
kernel to user space can leak information from the kernel stack in those
padding bytes.
Avoid the whole issue by just copying the three members one by one
instead, which also means that the function also can avoid the need for
a stack frame. This also happens to match how we copy the new structure
from user space, so it all even makes sense.
[ The obvious solution of adding a memset() generates horrid code, gcc
does really stupid things. ]
Reported-by: Ulrich Drepper <drepper@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
void ssl_cert_clear_certs(CERT *c)
{
int i;
if (c == NULL)
return;
for (i = 0; i<SSL_PKEY_NUM; i++)
{
CERT_PKEY *cpk = c->pkeys + i;
if (cpk->x509)
{
X509_free(cpk->x509);
cpk->x509 = NULL;
}
if (cpk->privatekey)
{
EVP_PKEY_free(cpk->privatekey);
cpk->privatekey = NULL;
}
if (cpk->chain)
{
sk_X509_pop_free(cpk->chain, X509_free);
cpk->chain = NULL;
}
#ifndef OPENSSL_NO_TLSEXT
if (cpk->authz != NULL)
OPENSSL_free(cpk->authz);
#endif
}
}
|
Safe
|
[] |
openssl
|
c70a1fee71119a9005b1f304a3bf47694b4a53ac
|
2.4737855905223298e+38
| 29 |
Reorganise supported signature algorithm extension processing.
Only store encoded versions of peer and configured signature algorithms.
Determine shared signature algorithms and cache the result along with NID
equivalents of each algorithm.
(backport from HEAD)
| 0 |
rename_callback (GObject *source_object,
GAsyncResult *res,
gpointer callback_data)
{
NautilusFileOperation *op;
GFile *new_file;
GError *error;
op = callback_data;
error = NULL;
new_file = g_file_set_display_name_finish (G_FILE (source_object),
res, &error);
if (new_file != NULL) {
g_file_query_info_async (new_file,
NAUTILUS_FILE_DEFAULT_ATTRIBUTES,
0,
G_PRIORITY_DEFAULT,
op->cancellable,
rename_get_info_callback, op);
} else {
nautilus_file_operation_complete (op, NULL, error);
g_error_free (error);
}
}
|
Safe
|
[] |
nautilus
|
7632a3e13874a2c5e8988428ca913620a25df983
|
2.2420257278173513e+38
| 26 |
Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <alexl@redhat.com>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003
| 0 |
test_bson_new (void)
{
bson_t *b;
b = bson_new ();
ASSERT_CMPUINT32 (b->len, ==, (uint32_t) 5);
bson_destroy (b);
b = bson_sized_new (32);
ASSERT_CMPUINT32 (b->len, ==, (uint32_t) 5);
bson_destroy (b);
}
|
Safe
|
[
"CWE-125"
] |
mongo-c-driver
|
0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
|
2.954465748652905e+38
| 12 |
Fix for CVE-2018-16790 -- Verify bounds before binary length read.
As reported here: https://jira.mongodb.org/browse/CDRIVER-2819,
a heap overread occurs due a failure to correctly verify data
bounds.
In the original check, len - o returns the data left including the
sizeof(l) we just read. Instead, the comparison should check
against the data left NOT including the binary int32, i.e. just
subtype (byte*) instead of int32 subtype (byte*).
Added in test for corrupted BSON example.
| 0 |
int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, int write)
{
struct mm_struct *mm;
int ret;
mm = get_task_mm(tsk);
if (!mm)
return 0;
ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
mmput(mm);
return ret;
}
|
Safe
|
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
|
1.9676662756027531e+37
| 15 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[akpm@linux-foundation.org: checkpatch fixes]
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Jones <davej@redhat.com>
Acked-by: Larry Woodman <lwoodman@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: <stable@vger.kernel.org> [2.6.38+]
Cc: Mark Salter <msalter@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
eval_map_expr(
char_u *str,
int c) /* NUL or typed character for abbreviation */
{
char_u *res;
char_u *p;
char_u *expr;
pos_T save_cursor;
int save_msg_col;
int save_msg_row;
/* Remove escaping of CSI, because "str" is in a format to be used as
* typeahead. */
expr = vim_strsave(str);
if (expr == NULL)
return NULL;
vim_unescape_csi(expr);
/* Forbid changing text or using ":normal" to avoid most of the bad side
* effects. Also restore the cursor position. */
++textlock;
++ex_normal_lock;
set_vim_var_char(c); /* set v:char to the typed character */
save_cursor = curwin->w_cursor;
save_msg_col = msg_col;
save_msg_row = msg_row;
p = eval_to_string(expr, NULL, FALSE);
--textlock;
--ex_normal_lock;
curwin->w_cursor = save_cursor;
msg_col = save_msg_col;
msg_row = save_msg_row;
vim_free(expr);
if (p == NULL)
return NULL;
/* Escape CSI in the result to be able to use the string as typeahead. */
res = vim_strsave_escape_csi(p);
vim_free(p);
return res;
}
|
Safe
|
[
"CWE-78"
] |
vim
|
53575521406739cf20bbe4e384d88e7dca11f040
|
5.951046880628355e+37
| 43 |
patch 8.1.1365: source command doesn't check for the sandbox
Problem: Source command doesn't check for the sandbox. (Armin Razmjou)
Solution: Check for the sandbox when sourcing a file.
| 0 |
static int tea6320_shift11(int val) { return val >> 11; }
|
Safe
|
[
"CWE-399"
] |
linux-2.6
|
01a1a3cc1e3fbe718bd06a2a5d4d1a2d0fb4d7d9
|
2.941590957160699e+38
| 1 |
V4L/DVB (9624): CVE-2008-5033: fix OOPS on tvaudio when controlling bass/treble
This bug were supposed to be fixed by 5ba2f67afb02c5302b2898949ed6fc3b3d37dcf1,
where a call to NULL happens.
Not all tvaudio chips allow controlling bass/treble. So, the driver
has a table with a flag to indicate if the chip does support it.
Unfortunately, the handling of this logic were broken for a very long
time (probably since the first module version). Due to that, an OOPS
were generated for devices that don't support bass/treble.
This were the resulting OOPS message before the patch, with debug messages
enabled:
tvaudio' 1-005b: VIDIOC_S_CTRL
BUG: unable to handle kernel NULL pointer dereference at 00000000
IP: [<00000000>]
*pde = 22fda067 *pte = 00000000
Oops: 0000 [#1] SMP
Modules linked in: snd_hda_intel snd_seq_dummy snd_seq_oss snd_seq_midi_event snd_seq snd_seq_device
snd_pcm_oss snd_mixer_oss snd_pcm snd_timer snd_hwdep snd soundcore tuner_simple tuner_types tea5767 tuner
tvaudio bttv bridgebnep rfcomm l2cap bluetooth it87 hwmon_vid hwmon fuse sunrpc ipt_REJECT
nf_conntrack_ipv4 iptable_filter ip_tables ip6t_REJECT xt_tcpudp nf_conntrack_ipv6 xt_state nf_conntrack
ip6table_filter ip6_tables x_tables ipv6 dm_mirrordm_multipath dm_mod configfs videodev v4l1_compat
ir_common 8139cp compat_ioctl32 v4l2_common 8139too videobuf_dma_sg videobuf_core mii btcx_risc tveeprom
i915 button snd_page_alloc serio_raw drm pcspkr i2c_algo_bit i2c_i801 i2c_core iTCO_wdt
iTCO_vendor_support sr_mod cdrom sg ata_generic pata_acpi ata_piix libata sd_mod scsi_mod ext3 jbdmbcache
uhci_hcd ohci_hcd ehci_hcd [last unloaded: soundcore]
Pid: 15413, comm: qv4l2 Not tainted (2.6.25.14-108.fc9.i686 #1)
EIP: 0060:[<00000000>] EFLAGS: 00210246 CPU: 0
EIP is at 0x0
EAX: 00008000 EBX: ebd21600 ECX: e2fd9ec4 EDX: 00200046
ESI: f8c0f0c4 EDI: f8c0f0c4 EBP: e2fd9d50 ESP: e2fd9d2c
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
Process qv4l2 (pid: 15413, ti=e2fd9000 task=ebe44000 task.ti=e2fd9000)
Stack: f8c0c6ae e2ff2a00 00000d00 e2fd9ec4 ebc4e000 e2fd9d5c f8c0c448 00000000
f899c12a e2fd9d5c f899c154 e2fd9d68 e2fd9d80 c0560185 e2fd9d88 f8f3e1d8
f8f3e1dc ebc4e034 f8f3e18c e2fd9ec4 00000000 e2fd9d90 f899c286 c008561c
Call Trace:
[<f8c0c6ae>] ? chip_command+0x266/0x4b6 [tvaudio]
[<f8c0c448>] ? chip_command+0x0/0x4b6 [tvaudio]
[<f899c12a>] ? i2c_cmd+0x0/0x2f [i2c_core]
[<f899c154>] ? i2c_cmd+0x2a/0x2f [i2c_core]
[<c0560185>] ? device_for_each_child+0x21/0x49
[<f899c286>] ? i2c_clients_command+0x1c/0x1e [i2c_core]
[<f8f283d8>] ? bttv_call_i2c_clients+0x14/0x16 [bttv]
[<f8f23601>] ? bttv_s_ctrl+0x1bc/0x313 [bttv]
[<f8f23445>] ? bttv_s_ctrl+0x0/0x313 [bttv]
[<f8b6096d>] ? __video_do_ioctl+0x1f84/0x3726 [videodev]
[<c05abb4e>] ? sock_aio_write+0x100/0x10d
[<c041b23e>] ? kmap_atomic_prot+0x1dd/0x1df
[<c043a0c9>] ? enqueue_hrtimer+0xc2/0xcd
[<c04f4fa4>] ? copy_from_user+0x39/0x121
[<f8b622b9>] ? __video_ioctl2+0x1aa/0x24a [videodev]
[<c04054fd>] ? do_notify_resume+0x768/0x795
[<c043c0f7>] ? getnstimeofday+0x34/0xd1
[<c0437b77>] ? autoremove_wake_function+0x0/0x33
[<f8b62368>] ? video_ioctl2+0xf/0x13 [videodev]
[<c048c6f0>] ? vfs_ioctl+0x50/0x69
[<c048c942>] ? do_vfs_ioctl+0x239/0x24c
[<c048c995>] ? sys_ioctl+0x40/0x5b
[<c0405bf2>] ? syscall_call+0x7/0xb
[<c0620000>] ? cpuid4_cache_sysfs_exit+0x3d/0x69
=======================
Code: Bad EIP value.
EIP: [<00000000>] 0x0 SS:ESP 0068:e2fd9d2c
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
| 0 |
static inline void inc_snd_pages(int order)
{
snd_allocated_pages += 1 << order;
}
|
Safe
|
[] |
linux-2.6
|
ccec6e2c4a74adf76ed4e2478091a311b1806212
|
1.8958897352803154e+38
| 4 |
Convert snd-page-alloc proc file to use seq_file
Use seq_file for the proc file read/write of snd-page-alloc module.
This automatically fixes bugs in the old proc code.
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
is_cross_tgs_principal(krb5_const_principal principal)
{
if (!krb5_is_tgs_principal(principal))
return FALSE;
if (!data_eq(*krb5_princ_component(kdc_context, principal, 1),
*krb5_princ_realm(kdc_context, principal)))
return TRUE;
else
return FALSE;
}
|
Safe
|
[
"CWE-476"
] |
krb5
|
93b4a6306a0026cf1cc31ac4bd8a49ba5d034ba7
|
2.756595274780317e+38
| 10 |
Fix S4U2Self KDC crash when anon is restricted
In validate_as_request(), when enforcing restrict_anonymous_to_tgt,
use client.princ instead of request->client; the latter is NULL when
validating S4U2Self requests.
CVE-2016-3120:
In MIT krb5 1.9 and later, an authenticated attacker can cause krb5kdc
to dereference a null pointer if the restrict_anonymous_to_tgt option
is set to true, by making an S4U2Self request.
CVSSv2 Vector: AV:N/AC:H/Au:S/C:N/I:N/A:C/E:H/RL:OF/RC:C
ticket: 8458 (new)
target_version: 1.14-next
target_version: 1.13-next
| 0 |
ZEND_VM_HELPER(zend_sub_helper, ANY, ANY, zval *op_1, zval *op_2)
{
USE_OPLINE
SAVE_OPLINE();
if (UNEXPECTED(Z_TYPE_INFO_P(op_1) == IS_UNDEF)) {
op_1 = ZVAL_UNDEFINED_OP1();
}
if (UNEXPECTED(Z_TYPE_INFO_P(op_2) == IS_UNDEF)) {
op_2 = ZVAL_UNDEFINED_OP2();
}
sub_function(EX_VAR(opline->result.var), op_1, op_2);
if (OP1_TYPE & (IS_TMP_VAR|IS_VAR)) {
zval_ptr_dtor_nogc(op_1);
}
if (OP2_TYPE & (IS_TMP_VAR|IS_VAR)) {
zval_ptr_dtor_nogc(op_2);
}
ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION();
}
|
Safe
|
[
"CWE-787"
] |
php-src
|
f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d
|
2.823189381553948e+38
| 20 |
Fix #73122: Integer Overflow when concatenating strings
We must avoid integer overflows in memory allocations, so we introduce
an additional check in the VM, and bail out in the rare case of an
overflow. Since the recent fix for bug #74960 still doesn't catch all
possible overflows, we fix that right away.
| 0 |
GF_Err tpay_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
|
3.1258913503091024e+37
| 5 |
prevent dref memleak on invalid input (#1183)
| 0 |
inline float rol(const float a, const unsigned int n=1) {
return (float)rol((int)a,n);
|
Safe
|
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
|
1.5229652412552484e+38
| 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
| 0 |
static int update_write_order_info(rdpContext* context, wStream* s, ORDER_INFO* orderInfo,
size_t offset)
{
size_t position;
WINPR_UNUSED(context);
position = Stream_GetPosition(s);
Stream_SetPosition(s, offset);
Stream_Write_UINT8(s, orderInfo->controlFlags); /* controlFlags (1 byte) */
if (orderInfo->controlFlags & ORDER_TYPE_CHANGE)
Stream_Write_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */
update_write_field_flags(s, orderInfo->fieldFlags, orderInfo->controlFlags,
PRIMARY_DRAWING_ORDER_FIELD_BYTES[orderInfo->orderType]);
update_write_bounds(s, orderInfo);
Stream_SetPosition(s, position);
return 0;
}
|
Vulnerable
|
[
"CWE-20",
"CWE-125"
] |
FreeRDP
|
733ee3208306b1ea32697b356c0215180fc3f049
|
2.4365932045314647e+37
| 18 |
Fixed invalid access in update_recv_primary_order
CVE-2020-11095 thanks @antonio-morales for finding this.
| 1 |
HttpTransact::handle_server_connection_not_open(State* s)
{
bool serve_from_cache = false;
DebugTxn("http_trans", "[handle_server_connection_not_open] (hscno)");
DebugTxn("http_seq", "[HttpTransact::handle_server_connection_not_open] ");
ink_assert(s->current.state != CONNECTION_ALIVE);
ink_assert(s->current.server->had_connect_fail());
SET_VIA_STRING(VIA_SERVER_RESULT, VIA_SERVER_ERROR);
HTTP_INCREMENT_TRANS_STAT(http_broken_server_connections_stat);
// Fire off a hostdb update to mark the server as down
s->state_machine->do_hostdb_update_if_necessary();
switch (s->cache_info.action) {
case CACHE_DO_UPDATE:
serve_from_cache = is_stale_cache_response_returnable(s);
break;
case CACHE_PREPARE_TO_DELETE:
/* fall through */
case CACHE_PREPARE_TO_UPDATE:
/* fall through */
case CACHE_PREPARE_TO_WRITE:
ink_release_assert(!"Why still preparing for cache action - " "we skipped a step somehow.");
break;
case CACHE_DO_LOOKUP:
/* fall through */
case CACHE_DO_SERVE:
ink_assert(!("Why server response? Should have been a cache operation"));
break;
case CACHE_DO_DELETE:
// decisions, decisions. what should we do here?
// we could theoretically still delete the cached
// copy or serve it back with a warning, or easier
// just punt and biff the user. i say: biff the user.
/* fall through */
case CACHE_DO_UNDEFINED:
/* fall through */
case CACHE_DO_NO_ACTION:
/* fall through */
case CACHE_DO_WRITE:
/* fall through */
default:
serve_from_cache = false;
break;
}
if (serve_from_cache) {
ink_assert(s->cache_info.object_read != NULL);
ink_assert(s->cache_info.action == CACHE_DO_UPDATE);
ink_assert(s->internal_msg_buffer == NULL);
DebugTxn("http_trans", "[hscno] serving stale doc to client");
build_response_from_cache(s, HTTP_WARNING_CODE_REVALIDATION_FAILED);
} else {
handle_server_died(s);
s->next_action = SM_ACTION_SEND_ERROR_CACHE_NOOP;
}
return;
}
|
Safe
|
[
"CWE-119"
] |
trafficserver
|
8b5f0345dade6b2822d9b52c8ad12e63011a5c12
|
1.7809153746051905e+38
| 65 |
Fix the internal buffer sizing. Thanks to Sudheer for helping isolating this bug
| 0 |
rdpBitmapCache* bitmap_cache_new(rdpSettings* settings)
{
int i;
rdpBitmapCache* bitmapCache;
bitmapCache = (rdpBitmapCache*)calloc(1, sizeof(rdpBitmapCache));
if (!bitmapCache)
return NULL;
bitmapCache->settings = settings;
bitmapCache->update = ((freerdp*)settings->instance)->update;
bitmapCache->context = bitmapCache->update->context;
bitmapCache->cells =
(BITMAP_V2_CELL*)calloc(settings->BitmapCacheV2NumCells, sizeof(BITMAP_V2_CELL));
if (!bitmapCache->cells)
goto fail;
bitmapCache->maxCells = settings->BitmapCacheV2NumCells;
for (i = 0; i < (int)bitmapCache->maxCells; i++)
{
bitmapCache->cells[i].number = settings->BitmapCacheV2CellInfo[i].numEntries;
/* allocate an extra entry for BITMAP_CACHE_WAITING_LIST_INDEX */
bitmapCache->cells[i].entries =
(rdpBitmap**)calloc((bitmapCache->cells[i].number + 1), sizeof(rdpBitmap*));
if (!bitmapCache->cells[i].entries)
goto fail;
}
return bitmapCache;
fail:
if (bitmapCache->cells)
{
for (i = 0; i < (int)bitmapCache->maxCells; i++)
free(bitmapCache->cells[i].entries);
}
free(bitmapCache);
return NULL;
}
|
Vulnerable
|
[
"CWE-125"
] |
FreeRDP
|
0b6b92a25a77d533b8a92d6acc840a81e103684e
|
1.48732299475659e+38
| 42 |
Fixed CVE-2020-11525: Out of bounds read in bitmap_cache_new
Thanks to Sunglin and HuanGMz from Knownsec 404
| 1 |
save_fonteffect(struct html_feed_environ *h_env, struct readbuffer *obuf)
{
if (obuf->fontstat_sp < FONT_STACK_SIZE)
bcopy(obuf->fontstat, obuf->fontstat_stack[obuf->fontstat_sp],
FONTSTAT_SIZE);
obuf->fontstat_sp++;
if (obuf->in_bold)
push_tag(obuf, "</b>", HTML_N_B);
if (obuf->in_italic)
push_tag(obuf, "</i>", HTML_N_I);
if (obuf->in_under)
push_tag(obuf, "</u>", HTML_N_U);
if (obuf->in_strike)
push_tag(obuf, "</s>", HTML_N_S);
if (obuf->in_ins)
push_tag(obuf, "</ins>", HTML_N_INS);
bzero(obuf->fontstat, FONTSTAT_SIZE);
}
|
Safe
|
[
"CWE-476"
] |
w3m
|
59b91cd8e30c86f23476fa81ae005cabff49ebb6
|
3.237059141198073e+38
| 18 |
Prevent segfault with malformed input type
Bug-Debian: https://github.com/tats/w3m/issues/7
| 0 |
void reset_vc(struct vc_data *vc)
{
vc->vc_mode = KD_TEXT;
vt_reset_unicode(vc->vc_num);
vc->vt_mode.mode = VT_AUTO;
vc->vt_mode.waitv = 0;
vc->vt_mode.relsig = 0;
vc->vt_mode.acqsig = 0;
vc->vt_mode.frsig = 0;
put_pid(vc->vt_pid);
vc->vt_pid = NULL;
vc->vt_newvt = -1;
if (!in_interrupt()) /* Via keyboard.c:SAK() - akpm */
reset_palette(vc);
}
|
Safe
|
[
"CWE-662"
] |
linux
|
90bfdeef83f1d6c696039b6a917190dcbbad3220
|
8.156186577433282e+35
| 15 |
tty: make FONTX ioctl use the tty pointer they were actually passed
Some of the font tty ioctl's always used the current foreground VC for
their operations. Don't do that then.
This fixes a data race on fg_console.
Side note: both Michael Ellerman and Jiri Slaby point out that all these
ioctls are deprecated, and should probably have been removed long ago,
and everything seems to be using the KDFONTOP ioctl instead.
In fact, Michael points out that it looks like busybox's loadfont
program seems to have switched over to using KDFONTOP exactly _because_
of this bug (ahem.. 12 years ago ;-).
Reported-by: Minh Yuan <yuanmingbuaa@gmail.com>
Acked-by: Michael Ellerman <mpe@ellerman.id.au>
Acked-by: Jiri Slaby <jirislaby@kernel.org>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
bool generateRequestId() override { return true; }
|
Safe
|
[
"CWE-400",
"CWE-703"
] |
envoy
|
afc39bea36fd436e54262f150c009e8d72db5014
|
1.4147749292193728e+38
| 1 |
Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <asraa@google.com>
| 0 |
eval7t(
char_u **arg,
typval_T *rettv,
evalarg_T *evalarg,
int want_string) // after "." operator
{
type_T *want_type = NULL;
garray_T type_list; // list of pointers to allocated types
int res;
int evaluate = evalarg == NULL ? 0
: (evalarg->eval_flags & EVAL_EVALUATE);
// Recognize <type> in Vim9 script only.
if (in_vim9script() && **arg == '<' && eval_isnamec1((*arg)[1])
&& STRNCMP(*arg, "<SNR>", 5) != 0)
{
++*arg;
ga_init2(&type_list, sizeof(type_T *), 10);
want_type = parse_type(arg, &type_list, TRUE);
if (want_type == NULL && (evaluate || **arg != '>'))
{
clear_type_list(&type_list);
return FAIL;
}
if (**arg != '>')
{
if (*skipwhite(*arg) == '>')
semsg(_(e_no_white_space_allowed_before_str_str), ">", *arg);
else
emsg(_(e_missing_gt));
clear_type_list(&type_list);
return FAIL;
}
++*arg;
*arg = skipwhite_and_linebreak(*arg, evalarg);
}
res = eval7(arg, rettv, evalarg, want_string);
if (want_type != NULL && evaluate)
{
if (res == OK)
{
type_T *actual = typval2type(rettv, get_copyID(), &type_list,
TVTT_DO_MEMBER);
if (!equal_type(want_type, actual, 0))
{
if (want_type == &t_bool && actual != &t_bool
&& (actual->tt_flags & TTFLAG_BOOL_OK))
{
int n = tv2bool(rettv);
// can use "0" and "1" for boolean in some places
clear_tv(rettv);
rettv->v_type = VAR_BOOL;
rettv->vval.v_number = n ? VVAL_TRUE : VVAL_FALSE;
}
else
{
where_T where = WHERE_INIT;
where.wt_variable = TRUE;
res = check_type(want_type, actual, TRUE, where);
}
}
}
clear_type_list(&type_list);
}
return res;
}
|
Safe
|
[
"CWE-786",
"CWE-119",
"CWE-787"
] |
vim
|
fe6fb267e6ee5c5da2f41889e4e0e0ac5bf4b89d
|
6.038035689909774e+37
| 73 |
patch 8.2.4206: condition with many "(" causes a crash
Problem: Condition with many "(" causes a crash.
Solution: Limit recursion to 1000.
| 0 |
static inline boost::string_ref get_sanitized_hdrval(ceph::buffer::list& raw)
{
/* std::string and thus boost::string_ref ARE OBLIGED to carry multiple
* 0x00 and count them to the length of a string. We need to take that
* into consideration and sanitize the size of a ceph::buffer::list used
* to store metadata values (x-amz-meta-*, X-Container-Meta-*, etags).
* Otherwise we might send 0x00 to clients. */
const char* const data = raw.c_str();
size_t len = raw.length();
if (len && data[len - 1] == '\0') {
/* That's the case - the null byte has been included at the last position
* of the bufferlist. We need to restore the proper string length we'll
* pass to string_ref. */
len--;
}
return boost::string_ref(data, len);
}
|
Safe
|
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
|
2.424361264390023e+37
| 19 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <joao@suse.de>
Signed-off-by: Abhishek Lekshmanan <abhishek@suse.com>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
| 0 |
int fuse_fs_access(struct fuse_fs *fs, const char *path, int mask)
{
fuse_get_context()->private_data = fs->user_data;
if (fs->op.access)
return fs->op.access(path, mask);
else
return -ENOSYS;
}
|
Safe
|
[] |
ntfs-3g
|
fb28eef6f1c26170566187c1ab7dc913a13ea43c
|
2.340124892414408e+37
| 8 |
Hardened the checking of directory offset requested by a readdir
When asked for the next directory entries, make sure the chunk offset
is within valid values, otherwise return no more entries in chunk.
| 0 |
static VALUE cState_space_before_set(VALUE self, VALUE space_before)
{
unsigned long len;
GET_STATE(self);
Check_Type(space_before, T_STRING);
len = RSTRING_LEN(space_before);
if (len == 0) {
if (state->space_before) {
ruby_xfree(state->space_before);
state->space_before = NULL;
state->space_before_len = 0;
}
} else {
if (state->space_before) ruby_xfree(state->space_before);
state->space_before = strdup(RSTRING_PTR(space_before));
state->space_before_len = len;
}
return Qnil;
}
|
Vulnerable
|
[
"CWE-119",
"CWE-787"
] |
json
|
8f782fd8e181d9cfe9387ded43a5ca9692266b85
|
4.1181915777464785e+37
| 19 |
Fix arbitrary heap exposure problem
| 1 |
String_Obj Parser::parse_url_function_argument()
{
const char* p = position;
std::string uri("");
if (lex< real_uri_value >(false)) {
uri = lexed.to_string();
}
if (peek< exactly< hash_lbrace > >()) {
const char* pp = position;
// TODO: error checking for unclosed interpolants
while (pp && peek< exactly< hash_lbrace > >(pp)) {
pp = sequence< interpolant, real_uri_value >(pp);
}
position = pp;
return parse_interpolated_chunk(Token(p, position));
}
else if (uri != "") {
std::string res = Util::rtrim(uri);
return SASS_MEMORY_NEW(String_Constant, pstate, res);
}
return 0;
}
|
Vulnerable
|
[
"CWE-125"
] |
libsass
|
b3374e3fd1a0c3658644d2bad24e4a0ff2e0dcea
|
2.4611058965439525e+38
| 25 |
Fix handling of unclosed interpolant in url
Fixes #2661
| 1 |
local void write_thread(void *dummy)
{
long seq; /* next sequence number looking for */
struct job *job; /* job pulled and working on */
size_t len; /* input length */
int more; /* true if more chunks to write */
unsigned long head; /* header length */
unsigned long ulen; /* total uncompressed size (overflow ok) */
unsigned long clen; /* total compressed size (overflow ok) */
unsigned long check; /* check value of uncompressed data */
(void)dummy;
/* build and write header */
Trace(("-- write thread running"));
head = put_header();
/* process output of compress threads until end of input */
ulen = clen = 0;
check = CHECK(0L, Z_NULL, 0);
seq = 0;
do {
/* get next write job in order */
possess(write_first);
wait_for(write_first, TO_BE, seq);
job = write_head;
write_head = job->next;
twist(write_first, TO, write_head == NULL ? -1 : write_head->seq);
/* update lengths, save uncompressed length for COMB */
more = job->more;
len = job->in->len;
drop_space(job->in);
ulen += (unsigned long)len;
clen += (unsigned long)(job->out->len);
/* write the compressed data and drop the output buffer */
Trace(("-- writing #%ld", seq));
writen(g.outd, job->out->buf, job->out->len);
drop_space(job->out);
Trace(("-- wrote #%ld%s", seq, more ? "" : " (last)"));
/* wait for check calculation to complete, then combine, once
the compress thread is done with the input, release it */
possess(job->calc);
wait_for(job->calc, TO_BE, 1);
release(job->calc);
check = COMB(check, job->check, len);
/* free the job */
free_lock(job->calc);
FREE(job);
/* get the next buffer in sequence */
seq++;
} while (more);
/* write trailer */
put_trailer(ulen, clen, check, head);
/* verify no more jobs, prepare for next use */
possess(compress_have);
assert(compress_head == NULL && peek_lock(compress_have) == 0);
release(compress_have);
possess(write_first);
assert(write_head == NULL);
twist(write_first, TO, -1);
}
|
Safe
|
[
"CWE-703",
"CWE-22"
] |
pigz
|
fdad1406b3ec809f4954ff7cdf9e99eb18c2458f
|
1.3587848819589164e+38
| 68 |
When decompressing with -N or -NT, strip any path from header name.
This uses the path of the compressed file combined with the name
from the header as the name of the decompressed output file. Any
path information in the header name is stripped. This avoids a
possible vulnerability where absolute or descending paths are put
in the gzip header.
| 0 |
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
component[MagickPathExtent],
magic[MagickPathExtent],
*q;
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*component='\0';
GetPathComponent(image_info->filename,SubimagePath,component);
if (*component != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(component,MagickFalse) == MagickFalse)
{
if (IsGeometry(component) != MagickFalse)
(void) CloneString(&image_info->extract,component);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,component);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
}
}
*component='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,component);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*component != '\0')
if ((LocaleCompare(component,"gz") == 0) ||
(LocaleCompare(component,"Z") == 0) ||
(LocaleCompare(component,"svgz") == 0) ||
(LocaleCompare(component,"wmz") == 0))
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*component != '\0')
if (LocaleCompare(component,"bz2") == 0)
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if (*component != '\0')
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,component,MagickPathExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
(void) CopyMagickString(magic,image_info->magick,MagickPathExtent);
else
{
/*
User specified image format.
*/
LocaleUpper(magic);
if (IsMagickConflict(magic) == MagickFalse)
{
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
image_info->affirm=MagickTrue;
}
}
magick_info=GetMagickInfo(magic,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,MagickPathExtent);
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,component,exception);
if ((LocaleCompare(component,image_info->filename) != 0) &&
(strchr(component,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
unsigned char
*magick;
size_t
magick_size;
/*
Determine the image format from the first few bytes of the file.
*/
magick_size=GetMagicPatternExtent(exception);
if (magick_size == 0)
return(MagickFalse);
image=AcquireImage(image_info,exception);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy standard input or pipe to temporary file.
*/
*component='\0';
status=ImageToFile(image,component,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,component,MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
image_info->temporary=MagickTrue;
}
magick=(unsigned char *) AcquireMagickMemory(magick_size);
if (magick == (unsigned char *) NULL)
{
(void) CloseBlob(image);
image=DestroyImage(image);
return(MagickFalse);
}
(void) ResetMagickMemory(magick,0,magick_size);
count=ReadBlob(image,magick_size,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
magick=(unsigned char *) RelinquishMagickMemory(magick);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
/*
Try to use magick_info that was determined earlier by the extension
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickUseExtension(magick_info) != MagickFalse) &&
(LocaleCompare(magick_info->module,GetMagicName(
magic_info)) == 0))
(void) CopyMagickString(image_info->magick,magick_info->name,
MagickPathExtent);
else
{
(void) CopyMagickString(image_info->magick,GetMagicName(
magic_info),MagickPathExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
ImageMagick
|
aecd0ada163a4d6c769cec178955d5f3e9316f2f
|
3.252983957970776e+38
| 323 |
Set pixel cache to undefined if any resource limit is exceeded
| 0 |
xmlXIncludeIncludeNode(xmlXIncludeCtxtPtr ctxt, int nr) {
xmlNodePtr cur, end, list, tmp;
if (ctxt == NULL)
return(-1);
if ((nr < 0) || (nr >= ctxt->incNr))
return(-1);
cur = ctxt->incTab[nr]->ref;
if ((cur == NULL) || (cur->type == XML_NAMESPACE_DECL))
return(-1);
list = ctxt->incTab[nr]->inc;
ctxt->incTab[nr]->inc = NULL;
ctxt->incTab[nr]->emptyFb = 0;
/*
* Check against the risk of generating a multi-rooted document
*/
if ((cur->parent != NULL) &&
(cur->parent->type != XML_ELEMENT_NODE)) {
int nb_elem = 0;
tmp = list;
while (tmp != NULL) {
if (tmp->type == XML_ELEMENT_NODE)
nb_elem++;
tmp = tmp->next;
}
if (nb_elem > 1) {
xmlXIncludeErr(ctxt, ctxt->incTab[nr]->ref,
XML_XINCLUDE_MULTIPLE_ROOT,
"XInclude error: would result in multiple root nodes\n",
NULL);
xmlFreeNodeList(list);
return(-1);
}
}
if (ctxt->parseFlags & XML_PARSE_NOXINCNODE) {
/*
* Add the list of nodes
*/
while (list != NULL) {
end = list;
list = list->next;
xmlAddPrevSibling(cur, end);
}
xmlUnlinkNode(cur);
xmlFreeNode(cur);
} else {
xmlNodePtr child, next;
/*
* Change the current node as an XInclude start one, and add an
* XInclude end one
*/
if (ctxt->incTab[nr]->fallback)
xmlUnsetProp(cur, BAD_CAST "href");
cur->type = XML_XINCLUDE_START;
/* Remove fallback children */
for (child = cur->children; child != NULL; child = next) {
next = child->next;
xmlUnlinkNode(child);
xmlFreeNode(child);
}
end = xmlNewDocNode(cur->doc, cur->ns, cur->name, NULL);
if (end == NULL) {
xmlXIncludeErr(ctxt, ctxt->incTab[nr]->ref,
XML_XINCLUDE_BUILD_FAILED,
"failed to build node\n", NULL);
xmlFreeNodeList(list);
return(-1);
}
end->type = XML_XINCLUDE_END;
xmlAddNextSibling(cur, end);
/*
* Add the list of nodes
*/
while (list != NULL) {
cur = list;
list = list->next;
xmlAddPrevSibling(end, cur);
}
}
return(0);
}
|
Safe
|
[
"CWE-416"
] |
libxml2
|
1098c30a040e72a4654968547f415be4e4c40fe7
|
3.358465274625817e+38
| 91 |
Fix user-after-free with `xmllint --xinclude --dropdtd`
The --dropdtd option can leave dangling pointers in entity reference
nodes. Make sure to skip these nodes when processing XIncludes.
This also avoids scanning entity declarations and even modifying
them inadvertently during XInclude processing.
Move from a block list to an allow list approach to avoid descending
into other node types that can't contain elements.
Fixes #237.
| 0 |
birthday_ber (EContact *contact)
{
EContactDate *dt;
struct berval ** result = NULL;
dt = e_contact_get (contact, E_CONTACT_BIRTH_DATE);
if (dt) {
gchar *birthday;
birthday = e_contact_date_to_string (dt);
result = g_new (struct berval *, 2);
result[0] = g_new (struct berval, 1);
result[0]->bv_val = birthday;
result[0]->bv_len = strlen (birthday);
result[1] = NULL;
e_contact_date_free (dt);
}
return result;
}
|
Safe
|
[] |
evolution-data-server
|
34bad61738e2127736947ac50e0c7969cc944972
|
1.8938385263823543e+38
| 23 |
Bug 796174 - strcat() considered unsafe for buffer overflow
| 0 |
cdio_is_device_quiet_generic(const char *source_name)
{
struct stat buf;
if (0 != stat(source_name, &buf)) {
return false;
}
return (S_ISBLK(buf.st_mode) || S_ISCHR(buf.st_mode));
}
|
Safe
|
[
"CWE-415"
] |
libcdio
|
dec2f876c2d7162da213429bce1a7140cdbdd734
|
2.9764610840678124e+38
| 8 |
Removed wrong line
| 0 |
int rtps_util_add_multichannel_locator_list(proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb,
gint offset, const guint8 *label, const guint encoding) {
proto_tree *locator_tree;
guint32 num_locators;
num_locators = tvb_get_guint32(tvb, offset, encoding);
locator_tree = proto_tree_add_subtree_format(tree, tvb, offset, 4,
ett_rtps_locator_udp_v4, NULL, "%s: %d Locators", label, num_locators);
offset += 4;
if (num_locators > 0) {
guint32 i;
for (i = 0; i < num_locators; ++i) {
proto_tree *ti, *locator_item_tree;
guint32 kind;
gint32 port;
gchar *channel_address;
locator_item_tree = proto_tree_add_subtree(locator_tree, tvb, offset, 24, ett_rtps_locator,
NULL, label);
proto_tree_add_item_ret_uint(locator_item_tree, hf_rtps_locator_kind, tvb, offset, 4, encoding, &kind);
switch (kind) {
case LOCATOR_KIND_UDPV4:
case LOCATOR_KIND_TUDPV4: {
proto_tree_add_item(locator_item_tree, hf_rtps_locator_ipv4, tvb, offset + 16, 4,
ENC_BIG_ENDIAN);
channel_address = tvb_ip_to_str(tvb, offset + 16);
break;
}
case LOCATOR_KIND_UDPV6: {
proto_tree_add_item(locator_tree, hf_rtps_locator_ipv6, tvb, offset + 4, 16, ENC_NA);
channel_address = tvb_ip6_to_str(tvb, offset + 4);
proto_item_append_text(tree, " (%s, %s)",
val_to_str(kind, rtps_locator_kind_vals, "%02x"),
tvb_ip6_to_str(tvb, offset + 4));
break;
}
/* Default case, Multichannel locators only should be present in UDPv4 and UDPv6 transports
* Unknown address format.
* */
default:
offset += 24;
continue;
break;
}
ti = proto_tree_add_item_ret_int(locator_item_tree, hf_rtps_locator_port, tvb, offset + 20, 4, encoding, &port);
if (port == 0)
expert_add_info(pinfo, ti, &ei_rtps_locator_port);
proto_item_append_text(tree, " (%s, %s:%d)",
val_to_str(kind, rtps_locator_kind_vals, "%02x"),
channel_address, port);
offset += 24;
}
}
return offset;
}
|
Safe
|
[
"CWE-401"
] |
wireshark
|
33e63d19e5496c151bad69f65cdbc7cba2b4c211
|
3.2352155214363337e+38
| 56 |
RTPS: Fixup our coherent set map.
coherent_set_tracking.coherent_set_registry_map uses a struct as a key,
but the hash and comparison routines treat keys as a sequence of bytes.
Make sure every key byte is initialized. Fixes #16994.
Call wmem_strong_hash on our key in coherent_set_key_hash_by_key instead
of creating and leaking a GBytes struct.
| 0 |
add_separate_debug_file (const char * filename, void * handle)
{
separate_info * i = xmalloc (sizeof * i);
i->filename = filename;
i->handle = handle;
i->next = first_separate_info;
first_separate_info = i;
}
|
Safe
|
[
"CWE-703"
] |
binutils-gdb
|
695c6dfe7e85006b98c8b746f3fd5f913c94ebff
|
1.3197837770857237e+38
| 9 |
PR29370, infinite loop in display_debug_abbrev
The PR29370 testcase is a fuzzed object file with multiple
.trace_abbrev sections. Multiple .trace_abbrev or .debug_abbrev
sections are not a violation of the DWARF standard. The DWARF5
standard even gives an example of multiple .debug_abbrev sections
contained in groups. Caching and lookup of processed abbrevs thus
needs to be done by section and offset rather than base and offset.
(Why base anyway?) Or, since section contents are kept, by a pointer
into the contents.
PR 29370
* dwarf.c (struct abbrev_list): Replace abbrev_base and
abbrev_offset with raw field.
(find_abbrev_list_by_abbrev_offset): Delete.
(find_abbrev_list_by_raw_abbrev): New function.
(process_abbrev_set): Set list->raw and list->next.
(find_and_process_abbrev_set): Replace abbrev list lookup with
new function. Don't set list abbrev_base, abbrev_offset or next.
| 0 |
unregister_object(gpointer key, gpointer value, __attribute__((unused)) gpointer user_data)
{
if (g_hash_table_remove(objects, key))
return g_dbus_connection_unregister_object(global_connection, GPOINTER_TO_UINT(value));
return false;
}
|
Safe
|
[
"CWE-59",
"CWE-61"
] |
keepalived
|
04f2d32871bb3b11d7dc024039952f2fe2750306
|
2.9620355749695443e+38
| 6 |
When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <quentin@armitage.org.uk>
| 0 |
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
struct kvm_vcpu *vcpu;
int i;
new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL);
mutex_lock(&kvm->arch.apic_map_lock);
if (!new)
goto out;
new->ldr_bits = 8;
/* flat mode is default */
new->cid_shift = 8;
new->cid_mask = 0;
new->lid_mask = 0xff;
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
u16 cid, lid;
u32 ldr;
if (!kvm_apic_present(vcpu))
continue;
/*
* All APICs have to be configured in the same mode by an OS.
* We take advatage of this while building logical id loockup
* table. After reset APICs are in xapic/flat mode, so if we
* find apic with different setting we assume this is the mode
* OS wants all apics to be in; build lookup table accordingly.
*/
if (apic_x2apic_mode(apic)) {
new->ldr_bits = 32;
new->cid_shift = 16;
new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
new->lid_mask = 0xffff;
} else if (kvm_apic_sw_enabled(apic) &&
!new->cid_mask /* flat mode */ &&
kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
new->cid_shift = 4;
new->cid_mask = 0xf;
new->lid_mask = 0xf;
}
new->phys_map[kvm_apic_id(apic)] = apic;
ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr);
lid = apic_logical_id(new, ldr);
if (lid)
new->logical_map[cid][ffs(lid) - 1] = apic;
}
out:
old = rcu_dereference_protected(kvm->arch.apic_map,
lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new);
mutex_unlock(&kvm->arch.apic_map_lock);
if (old)
kfree_rcu(old, rcu);
kvm_vcpu_request_scan_ioapic(kvm);
}
|
Safe
|
[
"CWE-703",
"CWE-189"
] |
linux
|
17d68b763f09a9ce824ae23eb62c9efc57b69271
|
9.71572628374774e+37
| 67 |
KVM: x86: fix guest-initiated crash with x2apic (CVE-2013-6376)
A guest can cause a BUG_ON() leading to a host kernel crash.
When the guest writes to the ICR to request an IPI, while in x2apic
mode the following things happen, the destination is read from
ICR2, which is a register that the guest can control.
kvm_irq_delivery_to_apic_fast uses the high 16 bits of ICR2 as the
cluster id. A BUG_ON is triggered, which is a protection against
accessing map->logical_map with an out-of-bounds access and manages
to avoid that anything really unsafe occurs.
The logic in the code is correct from real HW point of view. The problem
is that KVM supports only one cluster with ID 0 in clustered mode, but
the code that has the bug does not take this into account.
Reported-by: Lars Bull <larsbull@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static void ReadBlobFloatsMSB(Image * image, size_t len, float *data)
{
while (len >= 4)
{
*data++ = ReadBlobFloat(image);
len -= sizeof(float);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
|
Safe
|
[
"CWE-772"
] |
ImageMagick
|
cdafbc753cf794160eb53b320d4bc0b36ba6ed1d
|
2.8537797281111265e+38
| 10 |
Fix moment when quantum info is acquired to fix issue reported in #469.
| 0 |
static void sapi_globals_ctor(sapi_globals_struct *sapi_globals TSRMLS_DC)
{
memset(sapi_globals, 0, sizeof(*sapi_globals));
zend_hash_init_ex(&sapi_globals->known_post_content_types, 5, NULL, NULL, 1, 0);
php_setup_sapi_content_types(TSRMLS_C);
}
|
Safe
|
[
"CWE-190",
"CWE-79"
] |
php-src
|
996faf964bba1aec06b153b370a7f20d3dd2bb8b
|
3.072531487422636e+38
| 6 |
Update header handling to RFC 7230
| 0 |
void set_query_inner(const CSET_STRING &string_arg)
{
query_string= string_arg;
}
|
Safe
|
[
"CWE-416"
] |
server
|
4681b6f2d8c82b4ec5cf115e83698251963d80d5
|
2.715772707009804e+37
| 4 |
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do)
| 0 |
void operator delete[](void* ptr, yaSSL::new_t nt)
{
::operator delete(ptr, nt);
}
|
Safe
|
[
"CWE-254"
] |
mysql-server
|
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
|
1.803759250313862e+38
| 4 |
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
| 0 |
bool yang_str2bool(const char *value)
{
return strmatch(value, "true");
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
frr
|
ac3133450de12ba86c051265fc0f1b12bc57b40c
|
2.4946939159019922e+38
| 4 |
isisd: fix #10505 using base64 encoding
Using base64 instead of the raw string to encode
the binary data.
Signed-off-by: whichbug <whichbug@github.com>
| 0 |
mono_lookup_pinvoke_call (MonoMethod *method, const char **exc_class, const char **exc_arg)
{
MonoImage *image = method->klass->image;
MonoMethodPInvoke *piinfo = (MonoMethodPInvoke *)method;
MonoTableInfo *tables = image->tables;
MonoTableInfo *im = &tables [MONO_TABLE_IMPLMAP];
MonoTableInfo *mr = &tables [MONO_TABLE_MODULEREF];
guint32 im_cols [MONO_IMPLMAP_SIZE];
guint32 scope_token;
const char *import = NULL;
const char *orig_scope;
const char *new_scope;
char *error_msg;
char *full_name, *file_name;
int i;
MonoDl *module = NULL;
g_assert (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
if (piinfo->addr)
return piinfo->addr;
if (method->klass->image->dynamic) {
MonoReflectionMethodAux *method_aux =
g_hash_table_lookup (
((MonoDynamicImage*)method->klass->image)->method_aux_hash, method);
if (!method_aux)
return NULL;
import = method_aux->dllentry;
orig_scope = method_aux->dll;
}
else {
if (!piinfo->implmap_idx)
return NULL;
mono_metadata_decode_row (im, piinfo->implmap_idx - 1, im_cols, MONO_IMPLMAP_SIZE);
piinfo->piflags = im_cols [MONO_IMPLMAP_FLAGS];
import = mono_metadata_string_heap (image, im_cols [MONO_IMPLMAP_NAME]);
scope_token = mono_metadata_decode_row_col (mr, im_cols [MONO_IMPLMAP_SCOPE] - 1, MONO_MODULEREF_NAME);
orig_scope = mono_metadata_string_heap (image, scope_token);
}
mono_dllmap_lookup (image, orig_scope, import, &new_scope, &import);
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport attempting to load: '%s'.", new_scope);
if (exc_class) {
*exc_class = NULL;
*exc_arg = NULL;
}
/* we allow a special name to dlopen from the running process namespace */
if (strcmp (new_scope, "__Internal") == 0)
module = mono_dl_open (NULL, MONO_DL_LAZY, &error_msg);
/*
* Try loading the module using a variety of names
*/
for (i = 0; i < 4; ++i) {
switch (i) {
case 0:
/* Try the original name */
file_name = g_strdup (new_scope);
break;
case 1:
/* Try trimming the .dll extension */
if (strstr (new_scope, ".dll") == (new_scope + strlen (new_scope) - 4)) {
file_name = g_strdup (new_scope);
file_name [strlen (new_scope) - 4] = '\0';
}
else
continue;
break;
case 2:
if (strstr (new_scope, "lib") != new_scope) {
file_name = g_strdup_printf ("lib%s", new_scope);
}
else
continue;
break;
default:
#ifndef TARGET_WIN32
if (!g_ascii_strcasecmp ("user32.dll", new_scope) ||
!g_ascii_strcasecmp ("kernel32.dll", new_scope) ||
!g_ascii_strcasecmp ("user32", new_scope) ||
!g_ascii_strcasecmp ("kernel", new_scope)) {
file_name = g_strdup ("libMonoSupportW.so");
} else
#endif
continue;
#ifndef TARGET_WIN32
break;
#endif
}
if (!module) {
void *iter = NULL;
while ((full_name = mono_dl_build_path (NULL, file_name, &iter))) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport loading location: '%s'.", full_name);
module = cached_module_load (full_name, MONO_DL_LAZY, &error_msg);
if (!module) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport error loading library: '%s'.",
error_msg);
g_free (error_msg);
}
g_free (full_name);
if (module)
break;
}
}
if (!module) {
void *iter = NULL;
while ((full_name = mono_dl_build_path (".", file_name, &iter))) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport loading library: '%s'.", full_name);
module = cached_module_load (full_name, MONO_DL_LAZY, &error_msg);
if (!module) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport error loading library '%s'.",
error_msg);
g_free (error_msg);
}
g_free (full_name);
if (module)
break;
}
}
if (!module) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport loading: '%s'.", file_name);
module = cached_module_load (file_name, MONO_DL_LAZY, &error_msg);
if (!module) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport error loading library '%s'.",
error_msg);
}
}
g_free (file_name);
if (module)
break;
}
if (!module) {
mono_trace (G_LOG_LEVEL_WARNING, MONO_TRACE_DLLIMPORT,
"DllImport unable to load library '%s'.",
error_msg);
g_free (error_msg);
if (exc_class) {
*exc_class = "DllNotFoundException";
*exc_arg = new_scope;
}
return NULL;
}
#ifdef TARGET_WIN32
if (import && import [0] == '#' && isdigit (import [1])) {
char *end;
long id;
id = strtol (import + 1, &end, 10);
if (id > 0 && *end == '\0')
import++;
}
#endif
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"Searching for '%s'.", import);
if (piinfo->piflags & PINVOKE_ATTRIBUTE_NO_MANGLE) {
error_msg = mono_dl_symbol (module, import, &piinfo->addr);
} else {
char *mangled_name = NULL, *mangled_name2 = NULL;
int mangle_charset;
int mangle_stdcall;
int mangle_param_count;
#ifdef TARGET_WIN32
int param_count;
#endif
/*
* Search using a variety of mangled names
*/
for (mangle_charset = 0; mangle_charset <= 1; mangle_charset ++) {
for (mangle_stdcall = 0; mangle_stdcall <= 1; mangle_stdcall ++) {
gboolean need_param_count = FALSE;
#ifdef TARGET_WIN32
if (mangle_stdcall > 0)
need_param_count = TRUE;
#endif
for (mangle_param_count = 0; mangle_param_count <= (need_param_count ? 256 : 0); mangle_param_count += 4) {
if (piinfo->addr)
continue;
mangled_name = (char*)import;
switch (piinfo->piflags & PINVOKE_ATTRIBUTE_CHAR_SET_MASK) {
case PINVOKE_ATTRIBUTE_CHAR_SET_UNICODE:
/* Try the mangled name first */
if (mangle_charset == 0)
mangled_name = g_strconcat (import, "W", NULL);
break;
case PINVOKE_ATTRIBUTE_CHAR_SET_AUTO:
#ifdef TARGET_WIN32
if (mangle_charset == 0)
mangled_name = g_strconcat (import, "W", NULL);
#else
/* Try the mangled name last */
if (mangle_charset == 1)
mangled_name = g_strconcat (import, "A", NULL);
#endif
break;
case PINVOKE_ATTRIBUTE_CHAR_SET_ANSI:
default:
/* Try the mangled name last */
if (mangle_charset == 1)
mangled_name = g_strconcat (import, "A", NULL);
break;
}
#ifdef TARGET_WIN32
if (mangle_param_count == 0)
param_count = mono_method_signature (method)->param_count * sizeof (gpointer);
else
/* Try brute force, since it would be very hard to compute the stack usage correctly */
param_count = mangle_param_count;
/* Try the stdcall mangled name */
/*
* gcc under windows creates mangled names without the underscore, but MS.NET
* doesn't support it, so we doesn't support it either.
*/
if (mangle_stdcall == 1)
mangled_name2 = g_strdup_printf ("_%s@%d", mangled_name, param_count);
else
mangled_name2 = mangled_name;
#else
mangled_name2 = mangled_name;
#endif
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"Probing '%s'.", mangled_name2);
error_msg = mono_dl_symbol (module, mangled_name2, &piinfo->addr);
if (piinfo->addr)
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"Found as '%s'.", mangled_name2);
if (mangled_name != mangled_name2)
g_free (mangled_name2);
if (mangled_name != import)
g_free (mangled_name);
}
}
}
}
if (!piinfo->addr) {
g_free (error_msg);
if (exc_class) {
*exc_class = "EntryPointNotFoundException";
*exc_arg = import;
}
return NULL;
}
return piinfo->addr;
}
|
Vulnerable
|
[] |
mono
|
8e890a3bf80a4620e417814dc14886b1bbd17625
|
3.207157867431878e+38
| 276 |
Search for dllimported shared libs in the base directory, not cwd.
* loader.c: we don't search the current directory anymore for shared
libraries referenced in DllImport attributes, as it has a slight
security risk. We search in the same directory where the referencing
image was loaded from, instead. Fixes bug# 641915.
| 1 |
ebb_ews_convert_dl_to_updatexml_cb (ESoapMessage *msg,
gpointer user_data,
GError **error)
{
ConvertData *cd = user_data;
EContact *old_contact = cd->old_contact;
EContact *new_contact = cd->new_contact;
gchar *change_key = NULL;
if (!cd->change_key) {
change_key = e_vcard_util_dup_x_attribute (E_VCARD (old_contact), X_EWS_CHANGEKEY);
if (!change_key)
change_key = e_contact_get (old_contact, E_CONTACT_REV);
}
e_ews_message_start_item_change (msg, E_EWS_ITEMCHANGE_TYPE_ITEM,
e_contact_get_const (old_contact, E_CONTACT_UID),
cd->change_key ? cd->change_key : change_key,
0);
e_ews_message_start_set_item_field (msg, "Members", "distributionlist", "DistributionList");
ebb_ews_write_dl_members (msg, new_contact);
e_ews_message_end_set_item_field (msg);
e_ews_message_end_item_change (msg);
g_free (change_key);
return TRUE;
}
|
Safe
|
[
"CWE-295"
] |
evolution-ews
|
915226eca9454b8b3e5adb6f2fff9698451778de
|
1.4162741148116788e+37
| 28 |
I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
| 0 |
__archive_read_filter_ahead(struct archive_read_filter *filter,
size_t min, ssize_t *avail)
{
ssize_t bytes_read;
size_t tocopy;
if (filter->fatal) {
if (avail)
*avail = ARCHIVE_FATAL;
return (NULL);
}
/*
* Keep pulling more data until we can satisfy the request.
*/
for (;;) {
/*
* If we can satisfy from the copy buffer (and the
* copy buffer isn't empty), we're done. In particular,
* note that min == 0 is a perfectly well-defined
* request.
*/
if (filter->avail >= min && filter->avail > 0) {
if (avail != NULL)
*avail = filter->avail;
return (filter->next);
}
/*
* We can satisfy directly from client buffer if everything
* currently in the copy buffer is still in the client buffer.
*/
if (filter->client_total >= filter->client_avail + filter->avail
&& filter->client_avail + filter->avail >= min) {
/* "Roll back" to client buffer. */
filter->client_avail += filter->avail;
filter->client_next -= filter->avail;
/* Copy buffer is now empty. */
filter->avail = 0;
filter->next = filter->buffer;
/* Return data from client buffer. */
if (avail != NULL)
*avail = filter->client_avail;
return (filter->client_next);
}
/* Move data forward in copy buffer if necessary. */
if (filter->next > filter->buffer &&
filter->next + min > filter->buffer + filter->buffer_size) {
if (filter->avail > 0)
memmove(filter->buffer, filter->next,
filter->avail);
filter->next = filter->buffer;
}
/* If we've used up the client data, get more. */
if (filter->client_avail <= 0) {
if (filter->end_of_file) {
if (avail != NULL)
*avail = 0;
return (NULL);
}
bytes_read = (filter->read)(filter,
&filter->client_buff);
if (bytes_read < 0) { /* Read error. */
filter->client_total = filter->client_avail = 0;
filter->client_next =
filter->client_buff = NULL;
filter->fatal = 1;
if (avail != NULL)
*avail = ARCHIVE_FATAL;
return (NULL);
}
if (bytes_read == 0) {
/* Check for another client object first */
if (filter->archive->client.cursor !=
filter->archive->client.nodes - 1) {
if (client_switch_proxy(filter,
filter->archive->client.cursor + 1)
== ARCHIVE_OK)
continue;
}
/* Premature end-of-file. */
filter->client_total = filter->client_avail = 0;
filter->client_next =
filter->client_buff = NULL;
filter->end_of_file = 1;
/* Return whatever we do have. */
if (avail != NULL)
*avail = filter->avail;
return (NULL);
}
filter->client_total = bytes_read;
filter->client_avail = filter->client_total;
filter->client_next = filter->client_buff;
} else {
/*
* We can't satisfy the request from the copy
* buffer or the existing client data, so we
* need to copy more client data over to the
* copy buffer.
*/
/* Ensure the buffer is big enough. */
if (min > filter->buffer_size) {
size_t s, t;
char *p;
/* Double the buffer; watch for overflow. */
s = t = filter->buffer_size;
if (s == 0)
s = min;
while (s < min) {
t *= 2;
if (t <= s) { /* Integer overflow! */
archive_set_error(
&filter->archive->archive,
ENOMEM,
"Unable to allocate copy"
" buffer");
filter->fatal = 1;
if (avail != NULL)
*avail = ARCHIVE_FATAL;
return (NULL);
}
s = t;
}
/* Now s >= min, so allocate a new buffer. */
p = (char *)malloc(s);
if (p == NULL) {
archive_set_error(
&filter->archive->archive,
ENOMEM,
"Unable to allocate copy buffer");
filter->fatal = 1;
if (avail != NULL)
*avail = ARCHIVE_FATAL;
return (NULL);
}
/* Move data into newly-enlarged buffer. */
if (filter->avail > 0)
memmove(p, filter->next, filter->avail);
free(filter->buffer);
filter->next = filter->buffer = p;
filter->buffer_size = s;
}
/* We can add client data to copy buffer. */
/* First estimate: copy to fill rest of buffer. */
tocopy = (filter->buffer + filter->buffer_size)
- (filter->next + filter->avail);
/* Don't waste time buffering more than we need to. */
if (tocopy + filter->avail > min)
tocopy = min - filter->avail;
/* Don't copy more than is available. */
if (tocopy > filter->client_avail)
tocopy = filter->client_avail;
memcpy(filter->next + filter->avail,
filter->client_next, tocopy);
/* Remove this data from client buffer. */
filter->client_next += tocopy;
filter->client_avail -= tocopy;
/* add it to copy buffer. */
filter->avail += tocopy;
}
}
}
|
Safe
|
[
"CWE-125"
] |
libarchive
|
e6c9668f3202215ddb71617b41c19b6f05acf008
|
1.4711819952001834e+38
| 169 |
Add a check to archive_read_filter_consume to reject any
attempts to move the file pointer by a negative amount.
Note: Either this or commit 3865cf2 provides a fix for
Issue 394.
| 0 |
void CoreUserInputHandler::handleDelkey(const BufferInfo &bufferInfo, const QString &msg)
{
QString bufname = bufferInfo.bufferName().isNull() ? "" : bufferInfo.bufferName();
#ifdef HAVE_QCA2
if (!bufferInfo.isValid())
return;
if (!Cipher::neededFeaturesAvailable()) {
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: QCA provider plugin not found. It is usually provided by the qca-ossl plugin."));
return;
}
QStringList parms = msg.split(' ', QString::SkipEmptyParts);
if (parms.isEmpty() && !bufferInfo.bufferName().isEmpty() && bufferInfo.acceptsRegularMessages())
parms.prepend(bufferInfo.bufferName());
if (parms.isEmpty()) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname,
tr("[usage] /delkey <nick|channel> deletes the encryption key for nick or channel or just /delkey when in a channel or query."));
return;
}
QString target = parms.at(0);
if (network()->cipherKey(target).isEmpty()) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("No key has been set for %1.").arg(target));
return;
}
network()->setCipherKey(target, QByteArray());
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("The key for %1 has been deleted.").arg(target));
#else
Q_UNUSED(msg)
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: Setting an encryption key requires Quassel to have been built "
"with support for the Qt Cryptographic Architecture (QCA2) library. "
"Contact your distributor about a Quassel package with QCA2 "
"support, or rebuild Quassel with QCA2 present."));
#endif
}
|
Safe
|
[
"CWE-399"
] |
quassel
|
b5e38970ffd55e2dd9f706ce75af9a8d7730b1b8
|
7.419180833117458e+37
| 41 |
Improve the message-splitting algorithm for PRIVMSG and CTCP
This introduces a new message splitting algorithm based on
QTextBoundaryFinder. It works by first starting with the entire
message to be sent, encoding it, and checking to see if it is over
the maximum message length. If it is, it uses QTBF to find the
word boundary most immediately preceding the maximum length. If no
suitable boundary can be found, it falls back to searching for
grapheme boundaries. It repeats this process until the entire
message has been sent.
Unlike what it replaces, the new splitting code is not recursive
and cannot cause stack overflows. Additionally, if it is unable
to split a string, it will give up gracefully and not crash the
core or cause a thread to run away.
This patch fixes two bugs. The first is garbage characters caused
by accidentally splitting the string in the middle of a multibyte
character. Since the new code splits at a character level instead
of a byte level, this will no longer be an issue. The second is
the core crash caused by sending an overlength CTCP query ("/me")
containing only multibyte characters. This bug was caused by the
old CTCP splitter using the byte index from lastParamOverrun() as
a character index for a QString.
| 0 |
FILE *Sys_Mkfifo( const char *ospath )
{
return NULL;
}
|
Safe
|
[
"CWE-59"
] |
ioq3
|
b5acc31a4da72cc3a4a6d88facb15b6214d745c6
|
3.3580703006993663e+37
| 4 |
CVE-2012-3345
| 0 |
static inline struct kmem_cache_order_objects oo_make(unsigned int order,
unsigned int size)
{
struct kmem_cache_order_objects x = {
(order << OO_SHIFT) + order_objects(order, size)
};
return x;
}
|
Safe
|
[] |
linux
|
fd4d9c7d0c71866ec0c2825189ebd2ce35bd95b8
|
1.6880681973007764e+38
| 9 |
mm: slub: add missing TID bump in kmem_cache_alloc_bulk()
When kmem_cache_alloc_bulk() attempts to allocate N objects from a percpu
freelist of length M, and N > M > 0, it will first remove the M elements
from the percpu freelist, then call ___slab_alloc() to allocate the next
element and repopulate the percpu freelist. ___slab_alloc() can re-enable
IRQs via allocate_slab(), so the TID must be bumped before ___slab_alloc()
to properly commit the freelist head change.
Fix it by unconditionally bumping c->tid when entering the slowpath.
Cc: stable@vger.kernel.org
Fixes: ebe909e0fdb3 ("slub: improve bulk alloc strategy")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
const char* oidc_util_hdr_in_x_forwarded_host_get(const request_rec *r) {
return oidc_util_hdr_in_get_left_most_only(r,
OIDC_HTTP_HDR_X_FORWARDED_HOST, OIDC_STR_COMMA OIDC_STR_SPACE);
}
|
Safe
|
[
"CWE-79"
] |
mod_auth_openidc
|
55ea0a085290cd2c8cdfdd960a230cbc38ba8b56
|
1.024708893873884e+38
| 4 |
Add a function to escape Javascript characters
| 0 |
resize_screen(void)
{
need_resize_screen = FALSE;
setlinescols();
setupscreen();
if (CurrentTab)
displayBuffer(Currentbuf, B_FORCE_REDRAW);
}
|
Safe
|
[
"CWE-59",
"CWE-241"
] |
w3m
|
18dcbadf2771cdb0c18509b14e4e73505b242753
|
3.6953186618291857e+37
| 8 |
Make temporary directory safely when ~/.w3m is unwritable
| 0 |
static void cmd_window_right(void)
{
int refnum;
refnum = window_refnum_right(active_win->refnum, TRUE);
if (refnum != -1)
window_set_active(window_find_refnum(refnum));
}
|
Safe
|
[
"CWE-476"
] |
irssi
|
5b5bfef03596d95079c728f65f523570dd7b03aa
|
3.076617991681733e+38
| 8 |
check the error condition of mainwindow_create
| 0 |
_copyVariableShowStmt(const VariableShowStmt *from)
{
VariableShowStmt *newnode = makeNode(VariableShowStmt);
COPY_STRING_FIELD(name);
return newnode;
}
|
Safe
|
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
|
2.603459073442544e+38
| 8 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
| 0 |
static void worker_enter_idle(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
WARN_ON_ONCE(!list_empty(&worker->entry) &&
(worker->hentry.next || worker->hentry.pprev)))
return;
/* can't use worker_set_flags(), also called from create_worker() */
worker->flags |= WORKER_IDLE;
pool->nr_idle++;
worker->last_active = jiffies;
/* idle_list is LIFO */
list_add(&worker->entry, &pool->idle_list);
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
/*
* Sanity check nr_running. Because wq_unbind_fn() releases
* pool->lock between setting %WORKER_UNBOUND and zapping
* nr_running, the warning may trigger spuriously. Check iff
* unbind is not in progress.
*/
WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
pool->nr_workers == pool->nr_idle &&
atomic_read(&pool->nr_running));
}
|
Safe
|
[
"CWE-200"
] |
tip
|
dfb4357da6ddbdf57d583ba64361c9d792b0e0b1
|
2.977214946145573e+38
| 30 |
time: Remove CONFIG_TIMER_STATS
Currently CONFIG_TIMER_STATS exposes process information across namespaces:
kernel/time/timer_list.c print_timer():
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
/proc/timer_list:
#11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570
Given that the tracer can give the same information, this patch entirely
removes CONFIG_TIMER_STATS.
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: John Stultz <john.stultz@linaro.org>
Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: linux-doc@vger.kernel.org
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Xing Gao <xgao01@email.wm.edu>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Jessica Frazelle <me@jessfraz.com>
Cc: kernel-hardening@lists.openwall.com
Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Richard Cochran <richardcochran@gmail.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Michal Marek <mmarek@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Olof Johansson <olof@lixom.net>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-api@vger.kernel.org
Cc: Arjan van de Ven <arjan@linux.intel.com>
Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
| 0 |
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int ret;
int copylen;
ret = -EOPNOTSUPP;
if (m->msg_flags&MSG_OOB)
goto read_error;
m->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
copylen = skb->len;
if (len < copylen) {
m->msg_flags |= MSG_TRUNC;
copylen = len;
}
ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
if (ret)
goto out_free;
ret = (flags & MSG_TRUNC) ? skb->len : copylen;
out_free:
skb_free_datagram(sk, skb);
caif_check_flow_release(sk);
return ret;
read_error:
return ret;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
2d6fbfe733f35c6b355c216644e08e149c61b271
|
1.4625266145045784e+38
| 37 |
caif: Fix missing msg_namelen update in caif_seqpkt_recvmsg()
The current code does not fill the msg_name member in case it is set.
It also does not set the msg_namelen member to 0 and therefore makes
net/socket.c leak the local, uninitialized sockaddr_storage variable
to userland -- 128 bytes of kernel stack memory.
Fix that by simply setting msg_namelen to 0 as obviously nobody cared
about caif_seqpkt_recvmsg() not filling the msg_name in case it was
set.
Cc: Sjur Braendeland <sjur.brandeland@stericsson.com>
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void delayed_mntput(struct work_struct *unused)
{
struct llist_node *node = llist_del_all(&delayed_mntput_list);
struct llist_node *next;
for (; node; node = next) {
next = llist_next(node);
cleanup_mnt(llist_entry(node, struct mount, mnt_llist));
}
}
|
Safe
|
[
"CWE-703"
] |
linux
|
cd4a40174b71acd021877341684d8bb1dc8ea4ae
|
1.066311087554215e+38
| 10 |
mnt: Fail collect_mounts when applied to unmounted mounts
The only users of collect_mounts are in audit_tree.c
In audit_trim_trees and audit_add_tree_rule the path passed into
collect_mounts is generated from kern_path passed an audit_tree
pathname which is guaranteed to be an absolute path. In those cases
collect_mounts is obviously intended to work on mounted paths and
if a race results in paths that are unmounted when collect_mounts
it is reasonable to fail early.
The paths passed into audit_tag_tree don't have the absolute path
check. But are used to play with fsnotify and otherwise interact with
the audit_trees, so again operating only on mounted paths appears
reasonable.
Avoid having to worry about what happens when we try and audit
unmounted filesystems by restricting collect_mounts to mounts
that appear in the mount tree.
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
| 0 |
evdev_read_fuzz_prop(struct evdev_device *device, unsigned int code)
{
const char *prop;
char name[32];
int rc;
int fuzz = 0;
const struct input_absinfo *abs;
rc = snprintf(name, sizeof(name), "LIBINPUT_FUZZ_%02x", code);
if (rc == -1)
return 0;
prop = udev_device_get_property_value(device->udev_device, name);
if (prop && (safe_atoi(prop, &fuzz) == false || fuzz < 0)) {
evdev_log_bug_libinput(device,
"invalid LIBINPUT_FUZZ property value: %s\n",
prop);
return 0;
}
/* The udev callout should have set the kernel fuzz to zero.
* If the kernel fuzz is nonzero, something has gone wrong there, so
* let's complain but still use a fuzz of zero for our view of the
* device. Otherwise, the kernel will use the nonzero fuzz, we then
* use the same fuzz on top of the pre-fuzzed data and that leads to
* unresponsive behaviur.
*/
abs = libevdev_get_abs_info(device->evdev, code);
if (!abs || abs->fuzz == 0)
return fuzz;
if (prop) {
evdev_log_bug_libinput(device,
"kernel fuzz of %d even with LIBINPUT_FUZZ_%02x present\n",
abs->fuzz,
code);
} else {
evdev_log_bug_libinput(device,
"kernel fuzz of %d but LIBINPUT_FUZZ_%02x is missing\n",
abs->fuzz,
code);
}
return 0;
}
|
Safe
|
[
"CWE-134"
] |
libinput
|
a423d7d3269dc32a87384f79e29bb5ac021c83d1
|
3.0303604596719326e+38
| 45 |
evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
| 0 |
ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
int reset, int enable)
{
return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
}
|
Safe
|
[
"CWE-703"
] |
linux
|
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
|
1.4416348860866273e+38
| 5 |
tracing: Fix possible NULL pointer dereferences
Currently set_ftrace_pid and set_graph_function files use seq_lseek
for their fops. However seq_open() is called only for FMODE_READ in
the fops->open() so that if an user tries to seek one of those file
when she open it for writing, it sees NULL seq_file and then panic.
It can be easily reproduced with following command:
$ cd /sys/kernel/debug/tracing
$ echo 1234 | sudo tee -a set_ftrace_pid
In this example, GNU coreutils' tee opens the file with fopen(, "a")
and then the fopen() internally calls lseek().
Link: http://lkml.kernel.org/r/1365663302-2170-1-git-send-email-namhyung@kernel.org
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Namhyung Kim <namhyung.kim@lge.com>
Cc: stable@vger.kernel.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
| 0 |
static int avi_add_odml_index_entry_core(avi_t *AVI, int flags, u64 pos, unsigned int len, avistdindex_chunk *si)
{
u32 cur_chunk_idx;
// put new chunk into index
si->nEntriesInUse++;
cur_chunk_idx = si->nEntriesInUse-1;
// need to fetch more memory
if (cur_chunk_idx >= si->dwSize) {
si->dwSize += 4096;
si->aIndex = (avistdindex_entry *)gf_realloc ( si->aIndex, si->dwSize * sizeof (u32) * si->wLongsPerEntry);
}
if(len>AVI->max_len) AVI->max_len=len;
// if bit 31 is set, it is NOT a keyframe
if (flags != 0x10) {
len |= 0x80000000;
}
si->aIndex [ cur_chunk_idx ].dwSize = len;
si->aIndex [ cur_chunk_idx ].dwOffset = (u32) (pos - si->qwBaseOffset + 8);
//GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[avilib] ODML: POS: 0x%lX\n", si->aIndex [ cur_chunk_idx ].dwOffset));
return 0;
}
|
Safe
|
[
"CWE-835"
] |
gpac
|
7f060bbb72966cae80d6fee338d0b07fa3fc06e1
|
2.2475929171510703e+38
| 27 |
fixed #2159
| 0 |
AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
{
XLogCtlInsert *Insert = &XLogCtl->Insert;
int nextidx;
XLogRecPtr OldPageRqstPtr;
XLogwrtRqst WriteRqst;
XLogRecPtr NewPageEndPtr = InvalidXLogRecPtr;
XLogRecPtr NewPageBeginPtr;
XLogPageHeader NewPage;
int npages = 0;
LWLockAcquire(WALBufMappingLock, LW_EXCLUSIVE);
/*
* Now that we have the lock, check if someone initialized the page
* already.
*/
while (upto >= XLogCtl->InitializedUpTo || opportunistic)
{
nextidx = XLogRecPtrToBufIdx(XLogCtl->InitializedUpTo);
/*
* Get ending-offset of the buffer page we need to replace (this may
* be zero if the buffer hasn't been used yet). Fall through if it's
* already written out.
*/
OldPageRqstPtr = XLogCtl->xlblocks[nextidx];
if (LogwrtResult.Write < OldPageRqstPtr)
{
/*
* Nope, got work to do. If we just want to pre-initialize as much
* as we can without flushing, give up now.
*/
if (opportunistic)
break;
/* Before waiting, get info_lck and update LogwrtResult */
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire(&xlogctl->info_lck);
if (xlogctl->LogwrtRqst.Write < OldPageRqstPtr)
xlogctl->LogwrtRqst.Write = OldPageRqstPtr;
LogwrtResult = xlogctl->LogwrtResult;
SpinLockRelease(&xlogctl->info_lck);
}
/*
* Now that we have an up-to-date LogwrtResult value, see if we
* still need to write it or if someone else already did.
*/
if (LogwrtResult.Write < OldPageRqstPtr)
{
/*
* Must acquire write lock. Release WALBufMappingLock first,
* to make sure that all insertions that we need to wait for
* can finish (up to this same position). Otherwise we risk
* deadlock.
*/
LWLockRelease(WALBufMappingLock);
WaitXLogInsertionsToFinish(OldPageRqstPtr);
LWLockAcquire(WALWriteLock, LW_EXCLUSIVE);
LogwrtResult = XLogCtl->LogwrtResult;
if (LogwrtResult.Write >= OldPageRqstPtr)
{
/* OK, someone wrote it already */
LWLockRelease(WALWriteLock);
}
else
{
/* Have to write it ourselves */
TRACE_POSTGRESQL_WAL_BUFFER_WRITE_DIRTY_START();
WriteRqst.Write = OldPageRqstPtr;
WriteRqst.Flush = 0;
XLogWrite(WriteRqst, false);
LWLockRelease(WALWriteLock);
TRACE_POSTGRESQL_WAL_BUFFER_WRITE_DIRTY_DONE();
}
/* Re-acquire WALBufMappingLock and retry */
LWLockAcquire(WALBufMappingLock, LW_EXCLUSIVE);
continue;
}
}
/*
* Now the next buffer slot is free and we can set it up to be the next
* output page.
*/
NewPageBeginPtr = XLogCtl->InitializedUpTo;
NewPageEndPtr = NewPageBeginPtr + XLOG_BLCKSZ;
Assert(XLogRecPtrToBufIdx(NewPageBeginPtr) == nextidx);
NewPage = (XLogPageHeader) (XLogCtl->pages + nextidx * (Size) XLOG_BLCKSZ);
/*
* Be sure to re-zero the buffer so that bytes beyond what we've
* written will look like zeroes and not valid XLOG records...
*/
MemSet((char *) NewPage, 0, XLOG_BLCKSZ);
/*
* Fill the new page's header
*/
NewPage ->xlp_magic = XLOG_PAGE_MAGIC;
/* NewPage->xlp_info = 0; */ /* done by memset */
NewPage ->xlp_tli = ThisTimeLineID;
NewPage ->xlp_pageaddr = NewPageBeginPtr;
/* NewPage->xlp_rem_len = 0; */ /* done by memset */
/*
* If online backup is not in progress, mark the header to indicate
* that* WAL records beginning in this page have removable backup
* blocks. This allows the WAL archiver to know whether it is safe to
* compress archived WAL data by transforming full-block records into
* the non-full-block format. It is sufficient to record this at the
* page level because we force a page switch (in fact a segment switch)
* when starting a backup, so the flag will be off before any records
* can be written during the backup. At the end of a backup, the last
* page will be marked as all unsafe when perhaps only part is unsafe,
* but at worst the archiver would miss the opportunity to compress a
* few records.
*/
if (!Insert->forcePageWrites)
NewPage ->xlp_info |= XLP_BKP_REMOVABLE;
/*
* If first page of an XLOG segment file, make it a long header.
*/
if ((NewPage->xlp_pageaddr % XLogSegSize) == 0)
{
XLogLongPageHeader NewLongPage = (XLogLongPageHeader) NewPage;
NewLongPage->xlp_sysid = ControlFile->system_identifier;
NewLongPage->xlp_seg_size = XLogSegSize;
NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ;
NewPage ->xlp_info |= XLP_LONG_HEADER;
}
/*
* Make sure the initialization of the page becomes visible to others
* before the xlblocks update. GetXLogBuffer() reads xlblocks without
* holding a lock.
*/
pg_write_barrier();
*((volatile XLogRecPtr *) &XLogCtl->xlblocks[nextidx]) = NewPageEndPtr;
XLogCtl->InitializedUpTo = NewPageEndPtr;
npages++;
}
LWLockRelease(WALBufMappingLock);
#ifdef WAL_DEBUG
if (npages > 0)
{
elog(DEBUG1, "initialized %d pages, upto %X/%X",
npages, (uint32) (NewPageEndPtr >> 32), (uint32) NewPageEndPtr);
}
#endif
}
|
Safe
|
[
"CWE-119"
] |
postgres
|
01824385aead50e557ca1af28640460fa9877d51
|
3.4005064663729032e+38
| 167 |
Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()
| 0 |
int VvcUnit::deserialize()
{
m_reader.setBuffer(m_nalBuffer, m_nalBuffer + m_nalBufferLen);
try
{
m_reader.skipBits(2); // forbidden_zero_bit, nuh_reserved_zero_bit
nuh_layer_id = m_reader.getBits(6);
nal_unit_type = m_reader.getBits(5);
nuh_temporal_id_plus1 = m_reader.getBits(3);
if (nuh_temporal_id_plus1 == 0 || (nuh_temporal_id_plus1 != 1 && ((nal_unit_type >= 7 && nal_unit_type <= 15) ||
nal_unit_type == 21 || nal_unit_type == 22)))
return 1;
return 0;
}
catch (BitStreamException& e)
{
return NOT_ENOUGH_BUFFER;
}
}
|
Safe
|
[
"CWE-22"
] |
tsMuxer
|
3763dd34755a8944d903aa19578fa22cd3734165
|
3.293310883242999e+38
| 19 |
Fix Buffer Overflow
Fixes issue #509.
| 0 |
gdImageScaleBicubicFixed(gdImagePtr src, const unsigned int width,
const unsigned int height)
{
const long new_width = MAX(1, width);
const long new_height = MAX(1, height);
const int src_w = gdImageSX(src);
const int src_h = gdImageSY(src);
const gdFixed f_dx = gd_ftofx((float)src_w / (float)new_width);
const gdFixed f_dy = gd_ftofx((float)src_h / (float)new_height);
const gdFixed f_1 = gd_itofx(1);
const gdFixed f_2 = gd_itofx(2);
const gdFixed f_4 = gd_itofx(4);
const gdFixed f_6 = gd_itofx(6);
const gdFixed f_gamma = gd_ftofx(1.04f);
gdImagePtr dst;
unsigned int dst_offset_x;
unsigned int dst_offset_y = 0;
long i;
/* impact perf a bit, but not that much. Implementation for palette
images can be done at a later point.
*/
if (src->trueColor == 0) {
gdImagePaletteToTrueColor(src);
}
dst = gdImageCreateTrueColor(new_width, new_height);
if (!dst) {
return NULL;
}
dst->saveAlphaFlag = 1;
for (i=0; i < new_height; i++) {
long j;
dst_offset_x = 0;
for (j=0; j < new_width; j++) {
const gdFixed f_a = gd_mulfx(gd_itofx(i), f_dy);
const gdFixed f_b = gd_mulfx(gd_itofx(j), f_dx);
const long m = gd_fxtoi(f_a);
const long n = gd_fxtoi(f_b);
const gdFixed f_f = f_a - gd_itofx(m);
const gdFixed f_g = f_b - gd_itofx(n);
unsigned int src_offset_x[16], src_offset_y[16];
long k;
register gdFixed f_red = 0, f_green = 0, f_blue = 0, f_alpha = 0;
unsigned char red, green, blue, alpha = 0;
int *dst_row = dst->tpixels[dst_offset_y];
if ((m < 1) || (n < 1)) {
src_offset_x[0] = n;
src_offset_y[0] = m;
} else {
src_offset_x[0] = n - 1;
src_offset_y[0] = m;
}
src_offset_x[1] = n;
src_offset_y[1] = m;
if ((m < 1) || (n >= src_w - 1)) {
src_offset_x[2] = n;
src_offset_y[2] = m;
} else {
src_offset_x[2] = n + 1;
src_offset_y[2] = m;
}
if ((m < 1) || (n >= src_w - 2)) {
src_offset_x[3] = n;
src_offset_y[3] = m;
} else {
src_offset_x[3] = n + 1 + 1;
src_offset_y[3] = m;
}
if (n < 1) {
src_offset_x[4] = n;
src_offset_y[4] = m;
} else {
src_offset_x[4] = n - 1;
src_offset_y[4] = m;
}
src_offset_x[5] = n;
src_offset_y[5] = m;
if (n >= src_w-1) {
src_offset_x[6] = n;
src_offset_y[6] = m;
} else {
src_offset_x[6] = n + 1;
src_offset_y[6] = m;
}
if (n >= src_w - 2) {
src_offset_x[7] = n;
src_offset_y[7] = m;
} else {
src_offset_x[7] = n + 1 + 1;
src_offset_y[7] = m;
}
if ((m >= src_h - 1) || (n < 1)) {
src_offset_x[8] = n;
src_offset_y[8] = m;
} else {
src_offset_x[8] = n - 1;
src_offset_y[8] = m;
}
src_offset_x[9] = n;
src_offset_y[9] = m;
if ((m >= src_h-1) || (n >= src_w-1)) {
src_offset_x[10] = n;
src_offset_y[10] = m;
} else {
src_offset_x[10] = n + 1;
src_offset_y[10] = m;
}
if ((m >= src_h - 1) || (n >= src_w - 2)) {
src_offset_x[11] = n;
src_offset_y[11] = m;
} else {
src_offset_x[11] = n + 1 + 1;
src_offset_y[11] = m;
}
if ((m >= src_h - 2) || (n < 1)) {
src_offset_x[12] = n;
src_offset_y[12] = m;
} else {
src_offset_x[12] = n - 1;
src_offset_y[12] = m;
}
if (!(m >= src_h - 2)) {
src_offset_x[13] = n;
src_offset_y[13] = m;
}
if ((m >= src_h - 2) || (n >= src_w - 1)) {
src_offset_x[14] = n;
src_offset_y[14] = m;
} else {
src_offset_x[14] = n + 1;
src_offset_y[14] = m;
}
if ((m >= src_h - 2) || (n >= src_w - 2)) {
src_offset_x[15] = n;
src_offset_y[15] = m;
} else {
src_offset_x[15] = n + 1 + 1;
src_offset_y[15] = m;
}
for (k = -1; k < 3; k++) {
const gdFixed f = gd_itofx(k)-f_f;
const gdFixed f_fm1 = f - f_1;
const gdFixed f_fp1 = f + f_1;
const gdFixed f_fp2 = f + f_2;
register gdFixed f_a = 0, f_b = 0, f_d = 0, f_c = 0;
register gdFixed f_RY;
int l;
if (f_fp2 > 0) f_a = gd_mulfx(f_fp2, gd_mulfx(f_fp2,f_fp2));
if (f_fp1 > 0) f_b = gd_mulfx(f_fp1, gd_mulfx(f_fp1,f_fp1));
if (f > 0) f_c = gd_mulfx(f, gd_mulfx(f,f));
if (f_fm1 > 0) f_d = gd_mulfx(f_fm1, gd_mulfx(f_fm1,f_fm1));
f_RY = gd_divfx((f_a - gd_mulfx(f_4,f_b) + gd_mulfx(f_6,f_c) - gd_mulfx(f_4,f_d)),f_6);
for (l = -1; l < 3; l++) {
const gdFixed f = gd_itofx(l) - f_g;
const gdFixed f_fm1 = f - f_1;
const gdFixed f_fp1 = f + f_1;
const gdFixed f_fp2 = f + f_2;
register gdFixed f_a = 0, f_b = 0, f_c = 0, f_d = 0;
register gdFixed f_RX, f_R, f_rs, f_gs, f_bs, f_ba;
register int c;
const int _k = ((k+1)*4) + (l+1);
if (f_fp2 > 0) f_a = gd_mulfx(f_fp2,gd_mulfx(f_fp2,f_fp2));
if (f_fp1 > 0) f_b = gd_mulfx(f_fp1,gd_mulfx(f_fp1,f_fp1));
if (f > 0) f_c = gd_mulfx(f,gd_mulfx(f,f));
if (f_fm1 > 0) f_d = gd_mulfx(f_fm1,gd_mulfx(f_fm1,f_fm1));
f_RX = gd_divfx((f_a-gd_mulfx(f_4,f_b)+gd_mulfx(f_6,f_c)-gd_mulfx(f_4,f_d)),f_6);
f_R = gd_mulfx(f_RY,f_RX);
c = src->tpixels[*(src_offset_y + _k)][*(src_offset_x + _k)];
f_rs = gd_itofx(gdTrueColorGetRed(c));
f_gs = gd_itofx(gdTrueColorGetGreen(c));
f_bs = gd_itofx(gdTrueColorGetBlue(c));
f_ba = gd_itofx(gdTrueColorGetAlpha(c));
f_red += gd_mulfx(f_rs,f_R);
f_green += gd_mulfx(f_gs,f_R);
f_blue += gd_mulfx(f_bs,f_R);
f_alpha += gd_mulfx(f_ba,f_R);
}
}
red = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_red, f_gamma)), 0, 255);
green = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_green, f_gamma)), 0, 255);
blue = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_blue, f_gamma)), 0, 255);
alpha = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_alpha, f_gamma)), 0, 127);
*(dst_row + dst_offset_x) = gdTrueColorAlpha(red, green, blue, alpha);
dst_offset_x++;
}
dst_offset_y++;
}
return dst;
}
|
Safe
|
[
"CWE-125",
"CWE-191"
] |
libgd
|
60bfb401ad5a4a8ae995dcd36372fe15c71e1a35
|
9.356976143949519e+37
| 223 |
Fix potential unsigned underflow
No need to decrease `u`, so we don't do it. While we're at it, we also factor
out the overflow check of the loop, what improves performance and readability.
This issue has been reported by Stefan Esser to security@libgd.org.
| 0 |
void jpc_mqdec_setctx(jpc_mqdec_t *mqdec, int ctxno, jpc_mqctx_t *ctx)
{
jpc_mqstate_t **ctxi;
ctxi = &mqdec->ctxs[ctxno];
*ctxi = &jpc_mqstates[2 * ctx->ind + ctx->mps];
}
|
Safe
|
[
"CWE-189"
] |
jasper
|
3c55b399c36ef46befcb21e4ebc4799367f89684
|
2.7706141768914383e+38
| 6 |
At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems.
| 0 |
static int opficom(RAsm *a, ut8 *data, const Opcode *op) {
int l = 0;
switch (op->operands_count) {
case 1:
if ( op->operands[0].type & OT_MEMORY ) {
if ( op->operands[0].type & OT_WORD ) {
data[l++] = 0xde;
data[l++] = 0x10 | op->operands[0].regs[0];
} else if ( op->operands[0].type & OT_DWORD ) {
data[l++] = 0xda;
data[l++] = 0x10 | op->operands[0].regs[0];
} else {
return -1;
}
} else {
return -1;
}
break;
default:
return -1;
}
return l;
}
|
Safe
|
[
"CWE-119",
"CWE-125",
"CWE-787"
] |
radare2
|
9b46d38dd3c4de6048a488b655c7319f845af185
|
2.556938631074313e+38
| 23 |
Fix #12372 and #12373 - Crash in x86 assembler (#12380)
0 ,0,[bP-bL-bP-bL-bL-r-bL-bP-bL-bL-
mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx--
leA ,0,[bP-bL-bL-bP-bL-bP-bL-60@bL-
leA ,0,[bP-bL-r-bP-bL-bP-bL-60@bL-
mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx--
| 0 |
static int test_configuration(bool setting_SaveFullCore, bool setting_CreateCoreBacktrace)
{
if (!setting_SaveFullCore && !setting_CreateCoreBacktrace)
{
fprintf(stderr, "Both SaveFullCore and CreateCoreBacktrace are disabled - "
"at least one of them is needed for useful report.\n");
return 1;
}
return 0;
}
|
Safe
|
[
"CWE-362"
] |
abrt
|
a6cdfd6a16251447264d203e145624a96fa811e3
|
1.3967374739103837e+38
| 11 |
ccpp: add support for containers
A process is considered to be containerized when:
- has the 'container' env variable set to some value
- or has the 'container_uuid' env variable set to some value
- or has remounted /
Signed-off-by: Jakub Filak <jfilak@redhat.com>
| 0 |
inline void StreamResource::EmitAfterShutdown(ShutdownWrap* w, int status) {
#ifdef DEBUG
v8::SealHandleScope handle_scope(v8::Isolate::GetCurrent());
#endif
listener_->OnStreamAfterShutdown(w, status);
}
|
Safe
|
[
"CWE-416"
] |
node
|
7f178663ebffc82c9f8a5a1b6bf2da0c263a30ed
|
2.9206543885124538e+38
| 6 |
src: use unique_ptr for WriteWrap
This commit attempts to avoid a use-after-free error by using unqiue_ptr
and passing a reference to it.
CVE-ID: CVE-2020-8265
Fixes: https://github.com/nodejs-private/node-private/issues/227
PR-URL: https://github.com/nodejs-private/node-private/pull/238
Reviewed-By: Michael Dawson <midawson@redhat.com>
Reviewed-By: Tobias Nießen <tniessen@tnie.de>
Reviewed-By: Richard Lau <rlau@redhat.com>
| 0 |
xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
{
struct xfs_attr_leaf_entry *entries;
xfs_attr_leaf_name_local_t *name_loc;
xfs_attr_leaf_name_remote_t *name_rmt;
int size;
entries = xfs_attr3_leaf_entryp(leaf);
if (entries[index].flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr3_leaf_name_local(leaf, index);
size = xfs_attr_leaf_entsize_local(name_loc->namelen,
be16_to_cpu(name_loc->valuelen));
} else {
name_rmt = xfs_attr3_leaf_name_remote(leaf, index);
size = xfs_attr_leaf_entsize_remote(name_rmt->namelen);
}
return size;
}
|
Safe
|
[
"CWE-241",
"CWE-19"
] |
linux
|
8275cdd0e7ac550dcce2b3ef6d2fb3b808c1ae59
|
1.2500043240802298e+37
| 18 |
xfs: remote attribute overwrite causes transaction overrun
Commit e461fcb ("xfs: remote attribute lookups require the value
length") passes the remote attribute length in the xfs_da_args
structure on lookup so that CRC calculations and validity checking
can be performed correctly by related code. This, unfortunately has
the side effect of changing the args->valuelen parameter in cases
where it shouldn't.
That is, when we replace a remote attribute, the incoming
replacement stores the value and length in args->value and
args->valuelen, but then the lookup which finds the existing remote
attribute overwrites args->valuelen with the length of the remote
attribute being replaced. Hence when we go to create the new
attribute, we create it of the size of the existing remote
attribute, not the size it is supposed to be. When the new attribute
is much smaller than the old attribute, this results in a
transaction overrun and an ASSERT() failure on a debug kernel:
XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331
Fix this by keeping the remote attribute value length separate to
the attribute value length in the xfs_da_args structure. The enables
us to pass the length of the remote attribute to be removed without
overwriting the new attribute's length.
Also, ensure that when we save remote block contexts for a later
rename we zero the original state variables so that we don't confuse
the state of the attribute to be removes with the state of the new
attribute that we just added. [Spotted by Brain Foster.]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
| 0 |
static int robots_fsio_unlink(pr_fs_t *fs, const char *path) {
return 0;
}
|
Safe
|
[
"CWE-59",
"CWE-295"
] |
proftpd
|
349addc3be4fcdad9bd4ec01ad1ccd916c898ed8
|
2.6405126967025253e+38
| 3 |
Walk the entire DefaultRoot path, checking for symlinks of any component,
when AllowChrootSymlinks is disabled.
| 0 |
static inline Status ParseAndCheckBoxSizes(const Tensor& boxes,
const Tensor& box_index,
int* num_boxes) {
if (boxes.NumElements() == 0 && box_index.NumElements() == 0) {
*num_boxes = 0;
return Status::OK();
}
// The shape of 'boxes' is [num_boxes, 4].
if (boxes.dims() != 2) {
return errors::InvalidArgument("boxes must be 2-D",
boxes.shape().DebugString());
}
*num_boxes = boxes.dim_size(0);
if (boxes.dim_size(1) != 4) {
return errors::InvalidArgument("boxes must have 4 columns");
}
// The shape of 'box_index' is [num_boxes].
if (box_index.dims() != 1) {
return errors::InvalidArgument("box_index must be 1-D",
box_index.shape().DebugString());
}
if (box_index.dim_size(0) != *num_boxes) {
return errors::InvalidArgument("box_index has incompatible shape");
}
return Status::OK();
}
|
Vulnerable
|
[
"CWE-119",
"CWE-787"
] |
tensorflow
|
3ade2efec2e90c6237de32a19680caaa3ebc2845
|
3.2734885922217956e+38
| 26 |
Fix segmentation fault in tf.image.crop_and_resize when boxes is inf or nan
This fix tries to address the issue raised in 42129 where segmentation fault
happened in tf.image.crop_and_resize when boxes is inf or nan.
This fix adds the check to make sure boxes is not inf or nan (isfinite)
This fix fixes 42129.
Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
| 1 |
g_tcp_last_error_would_block(int sck)
{
#if defined(_WIN32)
return WSAGetLastError() == WSAEWOULDBLOCK;
#else
return (errno == EWOULDBLOCK) || (errno == EAGAIN) || (errno == EINPROGRESS);
#endif
}
|
Safe
|
[] |
xrdp
|
d8f9e8310dac362bb9578763d1024178f94f4ecc
|
9.382018719011825e+37
| 8 |
move temp files from /tmp to /tmp/.xrdp
| 0 |
int samdb_msg_set_uint(struct ldb_context *sam_ldb, TALLOC_CTX *mem_ctx,
struct ldb_message *msg, const char *attr_name,
unsigned int v)
{
struct ldb_message_element *el;
el = ldb_msg_find_element(msg, attr_name);
if (el) {
el->num_values = 0;
}
return samdb_msg_add_uint(sam_ldb, mem_ctx, msg, attr_name, v);
}
|
Safe
|
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
|
2.029661648415802e+38
| 12 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz>
| 0 |
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (enable_pml)
vmx_destroy_pml_buffer(vmx);
free_vpid(vmx->vpid);
leave_guest_mode(vcpu);
vmx_free_vcpu_nested(vcpu);
free_loaded_vmcs(vmx->loaded_vmcs);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
}
|
Safe
|
[
"CWE-284"
] |
linux
|
727ba748e110b4de50d142edca9d6a9b7e6111d8
|
7.54258396227036e+37
| 14 |
kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: stable@vger.kernel.org
Signed-off-by: Felix Wilhelm <fwilhelm@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static inline void openOptimized(sqlite3*& db) {
sqlite3_open(":memory:", &db);
std::string settings;
for (const auto& setting : kMemoryDBSettings) {
settings += "PRAGMA " + setting.first + "=" + setting.second + "; ";
}
sqlite3_exec(db, settings.c_str(), nullptr, nullptr, nullptr);
// Register function extensions.
registerMathExtensions(db);
#if !defined(FREEBSD)
registerStringExtensions(db);
#endif
#if !defined(SKIP_CARVER)
registerOperationExtensions(db);
#endif
registerFilesystemExtensions(db);
registerHashingExtensions(db);
registerEncodingExtensions(db);
auto rc = sqlite3_set_authorizer(db, &sqliteAuthorizer, nullptr);
if (rc != SQLITE_OK) {
LOG(ERROR) << "Failed to set sqlite authorizer: " << sqlite3_errmsg(db);
requestShutdown(rc);
}
}
|
Safe
|
[
"CWE-77",
"CWE-295"
] |
osquery
|
c3f9a3dae22d43ed3b4f6a403cbf89da4cba7c3c
|
3.736095285995873e+37
| 27 |
Merge pull request from GHSA-4g56-2482-x7q8
* Proposed fix for attach tables vulnerability
* Add authorizer to ATC tables and cleanups
- Add unit test for authorizer function
| 0 |
conntrack_flush(struct conntrack *ct, const uint16_t *zone)
{
for (unsigned i = 0; i < CONNTRACK_BUCKETS; i++) {
struct conntrack_bucket *ctb = &ct->buckets[i];
ovs_mutex_lock(&ctb->cleanup_mutex);
ct_lock_lock(&ctb->lock);
for (unsigned j = 0; j < N_CT_TM; j++) {
struct conn *conn, *next;
LIST_FOR_EACH_SAFE (conn, next, exp_node, &ctb->exp_lists[j]) {
if (!zone || *zone == conn->key.zone) {
conn_clean(ct, conn, ctb);
}
}
}
ct_lock_unlock(&ctb->lock);
ovs_mutex_unlock(&ctb->cleanup_mutex);
}
return 0;
}
|
Safe
|
[
"CWE-400"
] |
ovs
|
abd7a457652e6734902720fe6a5dddb3fc0d1e3b
|
5.886067548959209e+37
| 20 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <joakim.hindersson@elastx.se>
Acked-by: Ilya Maximets <i.maximets@ovn.org>
Signed-off-by: Flavio Leitner <fbl@sysclose.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
| 0 |
ExprList_Finish(ExprList *l, PyArena *arena)
{
asdl_seq *seq;
ExprList_check_invariants(l);
/* Allocate the asdl_seq and copy the expressions in to it. */
seq = _Py_asdl_seq_new(l->size, arena);
if (seq) {
Py_ssize_t i;
for (i = 0; i < l->size; i++)
asdl_seq_SET(seq, i, l->p[i]);
}
ExprList_Dealloc(l);
return seq;
}
|
Safe
|
[
"CWE-125"
] |
cpython
|
a4d78362397fc3bced6ea80fbc7b5f4827aec55e
|
2.9635830653780834e+38
| 16 |
bpo-36495: Fix two out-of-bounds array reads (GH-12641)
Research and fix by @bradlarsen.
| 0 |
void Item_func_mul::result_precision()
{
/* Integer operations keep unsigned_flag if one of arguments is unsigned */
if (result_type() == INT_RESULT)
unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag;
else
unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag;
decimals= MY_MIN(args[0]->decimal_scale() + args[1]->decimal_scale(),
DECIMAL_MAX_SCALE);
uint est_prec = args[0]->decimal_precision() + args[1]->decimal_precision();
uint precision= MY_MIN(est_prec, DECIMAL_MAX_PRECISION);
max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
unsigned_flag);
}
|
Safe
|
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
|
2.4445241068530904e+38
| 14 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
| 0 |
void snd_seq_queue_client_termination(int client)
{
unsigned long flags;
int i;
struct snd_seq_queue *q;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
if ((q = queueptr(i)) == NULL)
continue;
spin_lock_irqsave(&q->owner_lock, flags);
if (q->owner == client)
q->klocked = 1;
spin_unlock_irqrestore(&q->owner_lock, flags);
if (q->owner == client) {
if (q->timer->running)
snd_seq_timer_stop(q->timer);
snd_seq_timer_reset(q->timer);
}
queuefree(q);
}
}
|
Safe
|
[
"CWE-362"
] |
linux
|
3567eb6af614dac436c4b16a8d426f9faed639b3
|
9.141902650936746e+37
| 21 |
ALSA: seq: Fix race at timer setup and close
ALSA sequencer code has an open race between the timer setup ioctl and
the close of the client. This was triggered by syzkaller fuzzer, and
a use-after-free was caught there as a result.
This patch papers over it by adding a proper queue->timer_mutex lock
around the timer-related calls in the relevant code path.
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Tested-by: Dmitry Vyukov <dvyukov@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
| 0 |
static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
{
u64 fixed = DR6_FIXED_1;
if (!guest_cpuid_has_rtm(vcpu))
fixed |= DR6_RTM;
return fixed;
}
|
Safe
|
[] |
kvm
|
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
|
4.51226462708679e+37
| 8 |
KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: stable@vger.kernel.org
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
archive_read_support_format_7zip_capabilities(struct archive_read * a)
{
(void)a; /* UNUSED */
return (ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_DATA |
ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_METADATA);
}
|
Safe
|
[
"CWE-190",
"CWE-125"
] |
libarchive
|
e79ef306afe332faf22e9b442a2c6b59cb175573
|
1.2578031743370801e+38
| 6 |
Issue #718: Fix TALOS-CAN-152
If a 7-Zip archive declares a rediculously large number of substreams,
it can overflow an internal counter, leading a subsequent memory
allocation to be too small for the substream data.
Thanks to the Open Source and Threat Intelligence project at Cisco
for reporting this issue.
| 0 |
server_is_term(struct xrdp_mod* mod)
{
return g_is_term();
}
|
Safe
|
[] |
xrdp
|
d8f9e8310dac362bb9578763d1024178f94f4ecc
|
1.2030688114111002e+37
| 4 |
move temp files from /tmp to /tmp/.xrdp
| 0 |
end_lex_with_single_table(THD *thd, TABLE *table, LEX *old_lex)
{
LEX *lex= thd->lex;
table->map= 0;
table->get_fields_in_item_tree= FALSE;
lex_end(lex);
thd->lex= old_lex;
}
|
Safe
|
[] |
mysql-server
|
be901b60ae59c93848c829d1b0b2cb523ab8692e
|
2.174247380962139e+37
| 8 |
Bug#26390632: CREATE TABLE CAN CAUSE MYSQL TO EXIT.
Analysis
========
CREATE TABLE of InnoDB table with a partition name
which exceeds the path limit can cause the server
to exit.
During the preparation of the partition name,
there was no check to identify whether the complete
path name for partition exceeds the max supported
path length, causing the server to exit during
subsequent processing.
Fix
===
During the preparation of partition name, check and report
an error if the partition path name exceeds the maximum path
name limit.
This is a 5.5 patch.
| 0 |
static void alarmtimer_dequeue(struct alarm_base *base, struct alarm *alarm)
{
if (!(alarm->state & ALARMTIMER_STATE_ENQUEUED))
return;
timerqueue_del(&base->timerqueue, &alarm->node);
alarm->state &= ~ALARMTIMER_STATE_ENQUEUED;
}
|
Safe
|
[
"CWE-190"
] |
linux
|
5f936e19cc0ef97dbe3a56e9498922ad5ba1edef
|
2.2750071343467904e+38
| 8 |
alarmtimer: Prevent overflow for relative nanosleep
Air Icy reported:
UBSAN: Undefined behaviour in kernel/time/alarmtimer.c:811:7
signed integer overflow:
1529859276030040771 + 9223372036854775807 cannot be represented in type 'long long int'
Call Trace:
alarm_timer_nsleep+0x44c/0x510 kernel/time/alarmtimer.c:811
__do_sys_clock_nanosleep kernel/time/posix-timers.c:1235 [inline]
__se_sys_clock_nanosleep kernel/time/posix-timers.c:1213 [inline]
__x64_sys_clock_nanosleep+0x326/0x4e0 kernel/time/posix-timers.c:1213
do_syscall_64+0xb8/0x3a0 arch/x86/entry/common.c:290
alarm_timer_nsleep() uses ktime_add() to add the current time and the
relative expiry value. ktime_add() has no sanity checks so the addition
can overflow when the relative timeout is large enough.
Use ktime_add_safe() which has the necessary sanity checks in place and
limits the result to the valid range.
Fixes: 9a7adcf5c6de ("timers: Posix interface for alarm-timers")
Reported-by: Team OWL337 <icytxw@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: John Stultz <john.stultz@linaro.org>
Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1807020926360.1595@nanos.tec.linutronix.de
| 0 |
TRI_action_result_t execute(TRI_vocbase_t* vocbase, GeneralRequest* request,
GeneralResponse* response, Mutex* dataLock,
void** data) override {
TRI_action_result_t result;
// allow use database execution in rest calls?
bool allowUseDatabase = _allowUseDatabase || _actionFeature.allowUseDatabase();
// get a V8 context
V8ContextGuard guard(vocbase, _isSystem ?
JavaScriptSecurityContext::createInternalContext() :
JavaScriptSecurityContext::createRestActionContext(allowUseDatabase));
// locate the callback
READ_LOCKER(readLocker, _callbacksLock);
{
auto it = _callbacks.find(guard.isolate());
if (it == _callbacks.end()) {
LOG_TOPIC("94556", WARN, arangodb::Logger::FIXME)
<< "no callback function for JavaScript action '" << _url << "'";
result.isValid = true;
response->setResponseCode(rest::ResponseCode::NOT_FOUND);
return result;
}
// and execute it
{
// cppcheck-suppress redundantPointerOp
MUTEX_LOCKER(mutexLocker, *dataLock);
if (*data != nullptr) {
result.canceled = true;
return result;
}
*data = (void*)guard.isolate();
}
v8::HandleScope scope(guard.isolate());
auto localFunction = v8::Local<v8::Function>::New(guard.isolate(), it->second);
// we can release the lock here already as no other threads will
// work in our isolate at this time
readLocker.unlock();
try {
result = ExecuteActionVocbase(vocbase, guard.isolate(), this,
localFunction, request, response);
} catch (...) {
result.isValid = false;
}
{
// cppcheck-suppress redundantPointerOp
MUTEX_LOCKER(mutexLocker, *dataLock);
*data = nullptr;
}
}
return result;
}
|
Safe
|
[
"CWE-918"
] |
arangodb
|
d9b7f019d2435f107b19a59190bf9cc27d5f34dd
|
9.280933620573999e+37
| 64 |
[APM-78] Disable installation from remote URL (#15292)
| 0 |
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
{
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
int i;
int num_planes = av_pix_fmt_count_planes(frame->format);
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
int flags = desc ? desc->flags : 0;
if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
num_planes = 2;
for (i = 0; i < num_planes; i++) {
av_assert0(frame->data[i]);
}
// For now do not enforce anything for palette of pseudopal formats
if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PSEUDOPAL))
num_planes = 2;
// For formats without data like hwaccel allow unused pointers to be non-NULL.
for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
if (frame->data[i])
av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
frame->data[i] = NULL;
}
}
}
|
Safe
|
[
"CWE-787"
] |
FFmpeg
|
2080bc33717955a0e4268e738acf8c1eeddbf8cb
|
2.230435763390492e+37
| 23 |
avcodec/utils: correct align value for interplay
Fixes out of array access
Fixes: 452/fuzz-1-ffmpeg_VIDEO_AV_CODEC_ID_INTERPLAY_VIDEO_fuzzer
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
| 0 |
TEST(QuantizedUInt8PoolingOpTest, MaxPoolPaddingValidStride1) {
// Choose the input ranges carefully so that the dequantized output matches
// the results of the float model above.
// Input Range[0, 15.9375] --> [Scale{0.0625}, zero_point{0}]
QuantizedPoolingOpModel m(
BuiltinOperator_MAX_POOL_2D,
/*input=*/{TensorType_UINT8, {1, 2, 4, 1}, 0, 15.9375},
/*filter_width=*/2, /*filter_height=*/2,
/*output=*/{TensorType_UINT8, {}, 0, 15.9375}, Padding_VALID, 1, 1);
m.SetInput({
0, 6, 2, 4, //
3, 2, 10, 7, //
});
m.Invoke();
EXPECT_THAT(m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({6, 10, 10})));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({96, 160, 160}));
}
|
Safe
|
[
"CWE-369"
] |
tensorflow
|
5f7975d09eac0f10ed8a17dbb6f5964977725adc
|
8.666534249857422e+37
| 19 |
Prevent another div by 0 in optimized pooling implementations TFLite
PiperOrigin-RevId: 370800091
Change-Id: I2119352f57fb5ca4f2051e0e2d749403304a979b
| 0 |
static unsigned int input_to_handler(struct input_handle *handle,
struct input_value *vals, unsigned int count)
{
struct input_handler *handler = handle->handler;
struct input_value *end = vals;
struct input_value *v;
if (handler->filter) {
for (v = vals; v != vals + count; v++) {
if (handler->filter(handle, v->type, v->code, v->value))
continue;
if (end != v)
*end = *v;
end++;
}
count = end - vals;
}
if (!count)
return 0;
if (handler->events)
handler->events(handle, vals, count);
else if (handler->event)
for (v = vals; v != vals + count; v++)
handler->event(handle, v->type, v->code, v->value);
return count;
}
|
Safe
|
[
"CWE-703",
"CWE-787"
] |
linux
|
cb222aed03d798fc074be55e59d9a112338ee784
|
2.481759430633481e+38
| 29 |
Input: add safety guards to input_set_keycode()
If we happen to have a garbage in input device's keycode table with values
too big we'll end up doing clear_bit() with offset way outside of our
bitmaps, damaging other objects within an input device or even outside of
it. Let's add sanity checks to the returned old keycodes.
Reported-by: syzbot+c769968809f9359b07aa@syzkaller.appspotmail.com
Reported-by: syzbot+76f3a30e88d256644c78@syzkaller.appspotmail.com
Link: https://lore.kernel.org/r/20191207212757.GA245964@dtor-ws
Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
| 0 |
TEST_F(QuicServerTransportTest, TestCloseConnectionWithNoErrorPendingStreams) {
auto streamId = server->createBidirectionalStream().value();
server->writeChain(streamId, IOBuf::copyBuffer("hello"), true);
loopForWrites();
AckBlocks acks;
auto start = getFirstOutstandingPacket(
server->getNonConstConn(), PacketNumberSpace::AppData)
->packet.header.getPacketSequenceNum();
auto end = getLastOutstandingPacket(
server->getNonConstConn(), PacketNumberSpace::AppData)
->packet.header.getPacketSequenceNum();
acks.insert(start, end);
deliverData(packetToBuf(createAckPacket(
server->getNonConstConn(),
++clientNextAppDataPacketNum,
acks,
PacketNumberSpace::AppData)));
server->close(std::make_pair(
QuicErrorCode(GenericApplicationErrorCode::UNKNOWN),
std::string("stopping")));
EXPECT_THROW(
recvEncryptedStream(streamId, *IOBuf::copyBuffer("hello")),
std::runtime_error);
EXPECT_TRUE(verifyFramePresent(
serverWrites,
*makeClientEncryptedCodec(),
QuicFrame::Type::ConnectionCloseFrame));
}
|
Safe
|
[
"CWE-617",
"CWE-703"
] |
mvfst
|
a67083ff4b8dcbb7ee2839da6338032030d712b0
|
2.047492630308925e+38
| 31 |
Close connection if we derive an extra 1-rtt write cipher
Summary: Fixes CVE-2021-24029
Reviewed By: mjoras, lnicco
Differential Revision: D26613890
fbshipit-source-id: 19bb2be2c731808144e1a074ece313fba11f1945
| 0 |
static Image *ReadPICTImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
geometry[MagickPathExtent],
header_ole[4];
Image
*image;
int
c,
code;
MagickBooleanType
jpeg,
status;
PICTRectangle
frame;
PICTPixmap
pixmap;
Quantum
index;
register Quantum
*q;
register ssize_t
i,
x;
size_t
extent,
length;
ssize_t
count,
flags,
j,
version,
y;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read PICT header.
*/
pixmap.bits_per_pixel=0;
pixmap.component_count=0;
/*
Skip header : 512 for standard PICT and 4, ie "PICT" for OLE2.
*/
header_ole[0]=ReadBlobByte(image);
header_ole[1]=ReadBlobByte(image);
header_ole[2]=ReadBlobByte(image);
header_ole[3]=ReadBlobByte(image);
if (!((header_ole[0] == 0x50) && (header_ole[1] == 0x49) &&
(header_ole[2] == 0x43) && (header_ole[3] == 0x54 )))
for (i=0; i < 508; i++)
if (ReadBlobByte(image) == EOF)
break;
(void) ReadBlobMSBShort(image); /* skip picture size */
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
while ((c=ReadBlobByte(image)) == 0) ;
if (c != 0x11)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
version=ReadBlobByte(image);
if (version == 2)
{
c=ReadBlobByte(image);
if (c != 0xff)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
else
if (version != 1)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((frame.left < 0) || (frame.right < 0) || (frame.top < 0) ||
(frame.bottom < 0) || (frame.left >= frame.right) ||
(frame.top >= frame.bottom))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Create black canvas.
*/
flags=0;
image->depth=8;
image->columns=1UL*(frame.right-frame.left);
image->rows=1UL*(frame.bottom-frame.top);
image->resolution.x=DefaultResolution;
image->resolution.y=DefaultResolution;
image->units=UndefinedResolution;
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Interpret PICT opcodes.
*/
jpeg=MagickFalse;
for (code=0; EOFBlob(image) == MagickFalse; )
{
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if ((version == 1) || ((TellBlob(image) % 2) != 0))
code=ReadBlobByte(image);
if (version == 2)
code=ReadBlobMSBSignedShort(image);
if (code < 0)
break;
if (code > 0xa1)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"%04X:",code);
}
else
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" %04X %s: %s",code,codes[code].name,codes[code].description);
switch (code)
{
case 0x01:
{
/*
Clipping rectangle.
*/
length=ReadBlobMSBShort(image);
if (length != 0x000a)
{
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (((frame.left & 0x8000) != 0) || ((frame.top & 0x8000) != 0))
break;
image->columns=1UL*(frame.right-frame.left);
image->rows=1UL*(frame.bottom-frame.top);
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
(void) SetImageBackgroundColor(image,exception);
break;
}
case 0x12:
case 0x13:
case 0x14:
{
ssize_t
pattern;
size_t
height,
width;
/*
Skip pattern definition.
*/
pattern=1L*ReadBlobMSBShort(image);
for (i=0; i < 8; i++)
if (ReadBlobByte(image) == EOF)
break;
if (pattern == 2)
{
for (i=0; i < 5; i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
if (pattern != 1)
ThrowReaderException(CorruptImageError,"UnknownPatternType");
length=ReadBlobMSBShort(image);
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
ReadPixmap(pixmap);
image->depth=1UL*pixmap.component_size;
image->resolution.x=1.0*pixmap.horizontal_resolution;
image->resolution.y=1.0*pixmap.vertical_resolution;
image->units=PixelsPerInchResolution;
(void) ReadBlobMSBLong(image);
flags=1L*ReadBlobMSBShort(image);
length=ReadBlobMSBShort(image);
for (i=0; i <= (ssize_t) length; i++)
(void) ReadBlobMSBLong(image);
width=1UL*(frame.bottom-frame.top);
height=1UL*(frame.right-frame.left);
if (pixmap.bits_per_pixel <= 8)
length&=0x7fff;
if (pixmap.bits_per_pixel == 16)
width<<=1;
if (length == 0)
length=width;
if (length < 8)
{
for (i=0; i < (ssize_t) (length*height); i++)
if (ReadBlobByte(image) == EOF)
break;
}
else
for (j=0; j < (int) height; j++)
if (length > 200)
{
for (j=0; j < (ssize_t) ReadBlobMSBShort(image); j++)
if (ReadBlobByte(image) == EOF)
break;
}
else
for (j=0; j < (ssize_t) ReadBlobByte(image); j++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
case 0x1b:
{
/*
Initialize image background color.
*/
image->background_color.red=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
image->background_color.green=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
image->background_color.blue=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
break;
}
case 0x70:
case 0x71:
case 0x72:
case 0x73:
case 0x74:
case 0x75:
case 0x76:
case 0x77:
{
/*
Skip polygon or region.
*/
length=ReadBlobMSBShort(image);
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
case 0x90:
case 0x91:
case 0x98:
case 0x99:
case 0x9a:
case 0x9b:
{
Image
*tile_image;
PICTRectangle
source,
destination;
register unsigned char
*p;
size_t
j;
ssize_t
bytes_per_line;
unsigned char
*pixels;
/*
Pixmap clipped by a rectangle.
*/
bytes_per_line=0;
if ((code != 0x9a) && (code != 0x9b))
bytes_per_line=1L*ReadBlobMSBShort(image);
else
{
(void) ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
}
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize tile image.
*/
tile_image=CloneImage(image,1UL*(frame.right-frame.left),
1UL*(frame.bottom-frame.top),MagickTrue,exception);
if (tile_image == (Image *) NULL)
return((Image *) NULL);
if ((code == 0x9a) || (code == 0x9b) ||
((bytes_per_line & 0x8000) != 0))
{
ReadPixmap(pixmap);
tile_image->depth=1UL*pixmap.component_size;
tile_image->alpha_trait=pixmap.component_count == 4 ?
BlendPixelTrait : UndefinedPixelTrait;
tile_image->resolution.x=(double) pixmap.horizontal_resolution;
tile_image->resolution.y=(double) pixmap.vertical_resolution;
tile_image->units=PixelsPerInchResolution;
if (tile_image->alpha_trait != UndefinedPixelTrait)
image->alpha_trait=tile_image->alpha_trait;
}
if ((code != 0x9a) && (code != 0x9b))
{
/*
Initialize colormap.
*/
tile_image->colors=2;
if ((bytes_per_line & 0x8000) != 0)
{
(void) ReadBlobMSBLong(image);
flags=1L*ReadBlobMSBShort(image);
tile_image->colors=1UL*ReadBlobMSBShort(image)+1;
}
status=AcquireImageColormap(tile_image,tile_image->colors,
exception);
if (status == MagickFalse)
{
tile_image=DestroyImage(tile_image);
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
if ((bytes_per_line & 0x8000) != 0)
{
for (i=0; i < (ssize_t) tile_image->colors; i++)
{
j=ReadBlobMSBShort(image) % tile_image->colors;
if ((flags & 0x8000) != 0)
j=(size_t) i;
tile_image->colormap[j].red=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
tile_image->colormap[j].green=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
tile_image->colormap[j].blue=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
}
}
else
{
for (i=0; i < (ssize_t) tile_image->colors; i++)
{
tile_image->colormap[i].red=(Quantum) (QuantumRange-
tile_image->colormap[i].red);
tile_image->colormap[i].green=(Quantum) (QuantumRange-
tile_image->colormap[i].green);
tile_image->colormap[i].blue=(Quantum) (QuantumRange-
tile_image->colormap[i].blue);
}
}
}
if (ReadRectangle(image,&source) == MagickFalse)
{
tile_image=DestroyImage(tile_image);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (ReadRectangle(image,&destination) == MagickFalse)
{
tile_image=DestroyImage(tile_image);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
(void) ReadBlobMSBShort(image);
if ((code == 0x91) || (code == 0x99) || (code == 0x9b))
{
/*
Skip region.
*/
length=ReadBlobMSBShort(image);
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
}
if ((code != 0x9a) && (code != 0x9b) &&
(bytes_per_line & 0x8000) == 0)
pixels=DecodeImage(image,tile_image,1UL*bytes_per_line,1,&extent,
exception);
else
pixels=DecodeImage(image,tile_image,1UL*bytes_per_line,1U*
pixmap.bits_per_pixel,&extent,exception);
if (pixels == (unsigned char *) NULL)
{
tile_image=DestroyImage(tile_image);
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
/*
Convert PICT tile image to pixel packets.
*/
p=pixels;
for (y=0; y < (ssize_t) tile_image->rows; y++)
{
if (p > (pixels+extent+image->columns))
{
tile_image=DestroyImage(tile_image);
ThrowReaderException(CorruptImageError,"NotEnoughPixelData");
}
q=QueueAuthenticPixels(tile_image,0,y,tile_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
if (tile_image->storage_class == PseudoClass)
{
index=ConstrainColormapIndex(tile_image,*p,exception);
SetPixelIndex(tile_image,index,q);
SetPixelRed(tile_image,
tile_image->colormap[(ssize_t) index].red,q);
SetPixelGreen(tile_image,
tile_image->colormap[(ssize_t) index].green,q);
SetPixelBlue(tile_image,
tile_image->colormap[(ssize_t) index].blue,q);
}
else
{
if (pixmap.bits_per_pixel == 16)
{
i=(*p++);
j=(*p);
SetPixelRed(tile_image,ScaleCharToQuantum(
(unsigned char) ((i & 0x7c) << 1)),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
(unsigned char) (((i & 0x03) << 6) |
((j & 0xe0) >> 2))),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
(unsigned char) ((j & 0x1f) << 3)),q);
}
else
if (tile_image->alpha_trait == UndefinedPixelTrait)
{
if (p > (pixels+extent+2*image->columns))
{
tile_image=DestroyImage(tile_image);
ThrowReaderException(CorruptImageError,
"NotEnoughPixelData");
}
SetPixelRed(tile_image,ScaleCharToQuantum(*p),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
*(p+tile_image->columns)),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
*(p+2*tile_image->columns)),q);
}
else
{
if (p > (pixels+extent+3*image->columns))
{
tile_image=DestroyImage(tile_image);
ThrowReaderException(CorruptImageError,
"NotEnoughPixelData");
}
SetPixelAlpha(tile_image,ScaleCharToQuantum(*p),q);
SetPixelRed(tile_image,ScaleCharToQuantum(
*(p+tile_image->columns)),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
*(p+2*tile_image->columns)),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
*(p+3*tile_image->columns)),q);
}
}
p++;
q+=GetPixelChannels(tile_image);
}
if (SyncAuthenticPixels(tile_image,exception) == MagickFalse)
break;
if ((tile_image->storage_class == DirectClass) &&
(pixmap.bits_per_pixel != 16))
{
p+=(pixmap.component_count-1)*tile_image->columns;
if (p < pixels)
break;
}
status=SetImageProgress(image,LoadImageTag,y,tile_image->rows);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
if (jpeg == MagickFalse)
if ((code == 0x9a) || (code == 0x9b) ||
((bytes_per_line & 0x8000) != 0))
(void) CompositeImage(image,tile_image,CopyCompositeOp,
MagickTrue,destination.left,destination.top,exception);
tile_image=DestroyImage(tile_image);
break;
}
case 0xa1:
{
unsigned char
*info;
size_t
type;
/*
Comment.
*/
type=ReadBlobMSBShort(image);
length=ReadBlobMSBShort(image);
if (length == 0)
break;
(void) ReadBlobMSBLong(image);
length-=4;
if (length == 0)
break;
info=(unsigned char *) AcquireQuantumMemory(length,sizeof(*info));
if (info == (unsigned char *) NULL)
break;
count=ReadBlob(image,length,info);
if (count != (ssize_t) length)
ThrowReaderException(ResourceLimitError,"UnableToReadImageData");
switch (type)
{
case 0xe0:
{
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,info);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
info=(unsigned char *) RelinquishMagickMemory(info);
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
break;
}
case 0x1f2:
{
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,info);
status=SetImageProfile(image,"iptc",profile,exception);
if (status == MagickFalse)
{
info=(unsigned char *) RelinquishMagickMemory(info);
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
profile=DestroyStringInfo(profile);
break;
}
default:
break;
}
info=(unsigned char *) RelinquishMagickMemory(info);
break;
}
default:
{
/*
Skip to next op code.
*/
if (codes[code].length == -1)
(void) ReadBlobMSBShort(image);
else
for (i=0; i < (ssize_t) codes[code].length; i++)
if (ReadBlobByte(image) == EOF)
break;
}
}
}
if (code == 0xc00)
{
/*
Skip header.
*/
for (i=0; i < 24; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
if (((code >= 0xb0) && (code <= 0xcf)) ||
((code >= 0x8000) && (code <= 0x80ff)))
continue;
if (code == 0x8200)
{
FILE
*file;
Image
*tile_image;
ImageInfo
*read_info;
int
unique_file;
/*
Embedded JPEG.
*/
jpeg=MagickTrue;
read_info=CloneImageInfo(image_info);
SetImageInfoBlob(read_info,(void *) NULL,0);
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(read_info->filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if ((unique_file == -1) || (file == (FILE *) NULL))
{
(void) RelinquishUniqueFileResource(read_info->filename);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
ThrowFileException(exception,FileOpenError,
"UnableToCreateTemporaryFile",image->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=ReadBlobMSBLong(image);
if (length > 154)
{
for (i=0; i < 6; i++)
(void) ReadBlobMSBLong(image);
if (ReadRectangle(image,&frame) == MagickFalse)
{
(void) fclose(file);
(void) RelinquishUniqueFileResource(read_info->filename);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
for (i=0; i < 122; i++)
if (ReadBlobByte(image) == EOF)
break;
for (i=0; i < (ssize_t) (length-154); i++)
{
c=ReadBlobByte(image);
if (c == EOF)
break;
(void) fputc(c,file);
}
}
(void) fclose(file);
(void) close(unique_file);
tile_image=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
if (tile_image == (Image *) NULL)
continue;
(void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g",
(double) MagickMax(image->columns,tile_image->columns),
(double) MagickMax(image->rows,tile_image->rows));
(void) SetImageExtent(image,
MagickMax(image->columns,tile_image->columns),
MagickMax(image->rows,tile_image->rows),exception);
(void) TransformImageColorspace(image,tile_image->colorspace,exception);
(void) CompositeImage(image,tile_image,CopyCompositeOp,MagickTrue,
frame.left,frame.right,exception);
image->compression=tile_image->compression;
tile_image=DestroyImage(tile_image);
continue;
}
if ((code == 0xff) || (code == 0xffff))
break;
if (((code >= 0xd0) && (code <= 0xfe)) ||
((code >= 0x8100) && (code <= 0xffff)))
{
/*
Skip reserved.
*/
length=ReadBlobMSBShort(image);
for (i=0; i < (ssize_t) length; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
if ((code >= 0x100) && (code <= 0x7fff))
{
/*
Skip reserved.
*/
length=(size_t) ((code >> 7) & 0xff);
for (i=0; i < (ssize_t) length; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
|
Safe
|
[
"CWE-772",
"CWE-401"
] |
ImageMagick
|
c1b09bbec148f6ae11d0b686fdb89ac6dc0ab14e
|
6.406664964385959e+37
| 706 |
https://github.com/ImageMagick/ImageMagick/issues/577
| 0 |
builtin_hash(PyObject *module, PyObject *obj)
/*[clinic end generated code: output=237668e9d7688db7 input=58c48be822bf9c54]*/
{
Py_hash_t x;
x = PyObject_Hash(obj);
if (x == -1)
return NULL;
return PyLong_FromSsize_t(x);
}
|
Safe
|
[
"CWE-125"
] |
cpython
|
dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c
|
8.027209711788339e+37
| 10 |
bpo-35766: Merge typed_ast back into CPython (GH-11645)
| 0 |
GF_Box *ccst_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_CodingConstraintsBox, GF_ISOM_BOX_TYPE_CCST);
return (GF_Box *) tmp;
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
|
3.027214546400574e+37
| 5 |
fixed #1587
| 0 |
int tls1_cert_verify_mac(SSL *s, EVP_MD_CTX *in_ctx, unsigned char *out)
{
unsigned int ret;
EVP_MD_CTX ctx;
EVP_MD_CTX_init(&ctx);
EVP_MD_CTX_copy_ex(&ctx,in_ctx);
EVP_DigestFinal_ex(&ctx,out,&ret);
EVP_MD_CTX_cleanup(&ctx);
return((int)ret);
}
|
Safe
|
[
"CWE-310"
] |
openssl
|
35a65e814beb899fa1c69a7673a8956c6059dce7
|
3.253616194937755e+38
| 11 |
Make CBC decoding constant time.
This patch makes the decoding of SSLv3 and TLS CBC records constant
time. Without this, a timing side-channel can be used to build a padding
oracle and mount Vaudenay's attack.
This patch also disables the stitched AESNI+SHA mode pending a similar
fix to that code.
In order to be easy to backport, this change is implemented in ssl/,
rather than as a generic AEAD mode. In the future this should be changed
around so that HMAC isn't in ssl/, but crypto/ as FIPS expects.
(cherry picked from commit e130841bccfc0bb9da254dc84e23bc6a1c78a64e)
Conflicts:
crypto/evp/c_allc.c
ssl/ssl_algs.c
ssl/ssl_locl.h
ssl/t1_enc.c
(cherry picked from commit 3622239826698a0e534dcf0473204c724bb9b4b4)
Conflicts:
ssl/d1_enc.c
ssl/s3_enc.c
ssl/s3_pkt.c
ssl/ssl3.h
ssl/ssl_algs.c
ssl/t1_enc.c
| 0 |
static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
QDict *options, int flags, BlockDriver *drv, Error **errp)
{
int ret, open_flags;
const char *filename;
const char *node_name = NULL;
Error *local_err = NULL;
assert(drv != NULL);
assert(bs->file == NULL);
assert(options != NULL && bs->options != options);
if (file != NULL) {
filename = file->filename;
} else {
filename = qdict_get_try_str(options, "filename");
}
if (drv->bdrv_needs_filename && !filename) {
error_setg(errp, "The '%s' block driver requires a file name",
drv->format_name);
return -EINVAL;
}
trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
node_name = qdict_get_try_str(options, "node-name");
ret = bdrv_assign_node_name(bs, node_name, errp);
if (ret < 0) {
return ret;
}
qdict_del(options, "node-name");
/* bdrv_open() with directly using a protocol as drv. This layer is already
* opened, so assign it to bs (while file becomes a closed BlockDriverState)
* and return immediately. */
if (file != NULL && drv->bdrv_file_open) {
bdrv_swap(file, bs);
return 0;
}
bs->open_flags = flags;
bs->guest_block_size = 512;
bs->request_alignment = 512;
bs->zero_beyond_eof = true;
open_flags = bdrv_open_flags(bs, flags);
bs->read_only = !(open_flags & BDRV_O_RDWR);
if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
error_setg(errp,
!bs->read_only && bdrv_is_whitelisted(drv, true)
? "Driver '%s' can only be used for read-only devices"
: "Driver '%s' is not whitelisted",
drv->format_name);
return -ENOTSUP;
}
assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
if (flags & BDRV_O_COPY_ON_READ) {
if (!bs->read_only) {
bdrv_enable_copy_on_read(bs);
} else {
error_setg(errp, "Can't use copy-on-read on read-only device");
return -EINVAL;
}
}
if (filename != NULL) {
pstrcpy(bs->filename, sizeof(bs->filename), filename);
} else {
bs->filename[0] = '\0';
}
bs->drv = drv;
bs->opaque = g_malloc0(drv->instance_size);
bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
/* Open the image, either directly or using a protocol */
if (drv->bdrv_file_open) {
assert(file == NULL);
assert(!drv->bdrv_needs_filename || filename != NULL);
ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
} else {
if (file == NULL) {
error_setg(errp, "Can't use '%s' as a block driver for the "
"protocol level", drv->format_name);
ret = -EINVAL;
goto free_and_fail;
}
bs->file = file;
ret = drv->bdrv_open(bs, options, open_flags, &local_err);
}
if (ret < 0) {
if (local_err) {
error_propagate(errp, local_err);
} else if (bs->filename[0]) {
error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
} else {
error_setg_errno(errp, -ret, "Could not open image");
}
goto free_and_fail;
}
ret = refresh_total_sectors(bs, bs->total_sectors);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not refresh total sector count");
goto free_and_fail;
}
bdrv_refresh_limits(bs);
assert(bdrv_opt_mem_align(bs) != 0);
assert((bs->request_alignment != 0) || bs->sg);
#ifndef _WIN32
if (bs->is_temporary) {
assert(bs->filename[0] != '\0');
unlink(bs->filename);
}
#endif
return 0;
free_and_fail:
bs->file = NULL;
g_free(bs->opaque);
bs->opaque = NULL;
bs->drv = NULL;
return ret;
}
|
Safe
|
[
"CWE-190"
] |
qemu
|
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
|
1.2484725867723122e+38
| 130 |
block: Limit request size (CVE-2014-0143)
Limiting the size of a single request to INT_MAX not only fixes a
direct integer overflow in bdrv_check_request() (which would only
trigger bad behaviour with ridiculously huge images, as in close to
2^64 bytes), but can also prevent overflows in all block drivers.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
| 0 |
static inline unsigned char div255(int x) {
return (unsigned char)((x + (x >> 8) + 0x80) >> 8);
}
|
Safe
|
[
"CWE-369"
] |
poppler
|
b224e2f5739fe61de9fa69955d016725b2a4b78d
|
1.036992804144159e+38
| 3 |
SplashOutputDev::tilingPatternFill: Fix crash on broken file
Issue #802
| 0 |
void sqlite3ExprCodeGetColumnOfTable(
Vdbe *v, /* Parsing context */
Table *pTab, /* The table containing the value */
int iTabCur, /* The table cursor. Or the PK cursor for WITHOUT ROWID */
int iCol, /* Index of the column to extract */
int regOut /* Extract the value into this register */
){
Column *pCol;
assert( v!=0 );
if( pTab==0 ){
sqlite3VdbeAddOp3(v, OP_Column, iTabCur, iCol, regOut);
return;
}
if( iCol<0 || iCol==pTab->iPKey ){
sqlite3VdbeAddOp2(v, OP_Rowid, iTabCur, regOut);
}else{
int op;
int x;
if( IsVirtual(pTab) ){
op = OP_VColumn;
x = iCol;
#ifndef SQLITE_OMIT_GENERATED_COLUMNS
}else if( (pCol = &pTab->aCol[iCol])->colFlags & COLFLAG_VIRTUAL ){
Parse *pParse = sqlite3VdbeParser(v);
if( pCol->colFlags & COLFLAG_BUSY ){
sqlite3ErrorMsg(pParse, "generated column loop on \"%s\"", pCol->zName);
}else{
int savedSelfTab = pParse->iSelfTab;
pCol->colFlags |= COLFLAG_BUSY;
pParse->iSelfTab = iTabCur+1;
sqlite3ExprCodeGeneratedColumn(pParse, pCol, regOut);
pParse->iSelfTab = savedSelfTab;
pCol->colFlags &= ~COLFLAG_BUSY;
}
return;
#endif
}else if( !HasRowid(pTab) ){
testcase( iCol!=sqlite3TableColumnToStorage(pTab, iCol) );
x = sqlite3TableColumnToIndex(sqlite3PrimaryKeyIndex(pTab), iCol);
op = OP_Column;
}else{
x = sqlite3TableColumnToStorage(pTab,iCol);
testcase( x!=iCol );
op = OP_Column;
}
sqlite3VdbeAddOp3(v, op, iTabCur, x, regOut);
sqlite3ColumnDefault(v, pTab, iCol, regOut);
}
}
|
Safe
|
[
"CWE-476"
] |
sqlite
|
57f7ece78410a8aae86aa4625fb7556897db384c
|
9.417899069902006e+37
| 49 |
Fix a problem that comes up when using generated columns that evaluate to a
constant in an index and then making use of that index in a join.
FossilOrigin-Name: 8b12e95fec7ce6e0de82a04ca3dfcf1a8e62e233b7382aa28a8a9be6e862b1af
| 0 |
static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
int alen)
{
struct sock *sk = sock->sk;
struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
struct nfc_llcp_local *local;
struct nfc_dev *dev;
struct sockaddr_nfc_llcp llcp_addr;
int len, ret = 0;
if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
addr->sa_family != AF_NFC)
return -EINVAL;
pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
memset(&llcp_addr, 0, sizeof(llcp_addr));
len = min_t(unsigned int, sizeof(llcp_addr), alen);
memcpy(&llcp_addr, addr, len);
lock_sock(sk);
if (sk->sk_state != LLCP_CLOSED) {
ret = -EBADFD;
goto error;
}
dev = nfc_get_device(llcp_addr.dev_idx);
if (dev == NULL) {
ret = -ENODEV;
goto error;
}
local = nfc_llcp_find_local(dev);
if (local == NULL) {
ret = -ENODEV;
goto put_dev;
}
llcp_sock->dev = dev;
llcp_sock->local = nfc_llcp_local_get(local);
llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
nfc_llcp_sock_link(&local->raw_sockets, sk);
sk->sk_state = LLCP_BOUND;
put_dev:
nfc_put_device(dev);
error:
release_sock(sk);
return ret;
}
|
Safe
|
[
"CWE-276"
] |
linux
|
3a359798b176183ef09efb7a3dc59abad1cc7104
|
2.5512413882650192e+38
| 54 |
nfc: enforce CAP_NET_RAW for raw sockets
When creating a raw AF_NFC socket, CAP_NET_RAW needs to be checked
first.
Signed-off-by: Ori Nimron <orinimron123@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
DEFUN (address_family_vpnv4,
address_family_vpnv4_cmd,
"address-family vpnv4",
"Enter Address Family command mode\n"
"Address family\n")
{
vty->node = BGP_VPNV4_NODE;
return CMD_SUCCESS;
}
|
Safe
|
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
|
1.6320275462588933e+38
| 9 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <paul.jakma@sun.com>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
| 0 |
static int dualshock4_get_calibration_data(struct sony_sc *sc)
{
u8 *buf;
int ret;
short gyro_pitch_bias, gyro_pitch_plus, gyro_pitch_minus;
short gyro_yaw_bias, gyro_yaw_plus, gyro_yaw_minus;
short gyro_roll_bias, gyro_roll_plus, gyro_roll_minus;
short gyro_speed_plus, gyro_speed_minus;
short acc_x_plus, acc_x_minus;
short acc_y_plus, acc_y_minus;
short acc_z_plus, acc_z_minus;
int speed_2x;
int range_2g;
/* For Bluetooth we use a different request, which supports CRC.
* Note: in Bluetooth mode feature report 0x02 also changes the state
* of the controller, so that it sends input reports of type 0x11.
*/
if (sc->quirks & (DUALSHOCK4_CONTROLLER_USB | DUALSHOCK4_DONGLE)) {
buf = kmalloc(DS4_FEATURE_REPORT_0x02_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = hid_hw_raw_request(sc->hdev, 0x02, buf,
DS4_FEATURE_REPORT_0x02_SIZE,
HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret < 0)
goto err_stop;
} else {
u8 bthdr = 0xA3;
u32 crc;
u32 report_crc;
int retries;
buf = kmalloc(DS4_FEATURE_REPORT_0x05_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (retries = 0; retries < 3; retries++) {
ret = hid_hw_raw_request(sc->hdev, 0x05, buf,
DS4_FEATURE_REPORT_0x05_SIZE,
HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret < 0)
goto err_stop;
/* CRC check */
crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
crc = ~crc32_le(crc, buf, DS4_FEATURE_REPORT_0x05_SIZE-4);
report_crc = get_unaligned_le32(&buf[DS4_FEATURE_REPORT_0x05_SIZE-4]);
if (crc != report_crc) {
hid_warn(sc->hdev, "DualShock 4 calibration report's CRC check failed, received crc 0x%0x != 0x%0x\n",
report_crc, crc);
if (retries < 2) {
hid_warn(sc->hdev, "Retrying DualShock 4 get calibration report request\n");
continue;
} else {
ret = -EILSEQ;
goto err_stop;
}
} else {
break;
}
}
}
gyro_pitch_bias = get_unaligned_le16(&buf[1]);
gyro_yaw_bias = get_unaligned_le16(&buf[3]);
gyro_roll_bias = get_unaligned_le16(&buf[5]);
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
gyro_pitch_plus = get_unaligned_le16(&buf[7]);
gyro_pitch_minus = get_unaligned_le16(&buf[9]);
gyro_yaw_plus = get_unaligned_le16(&buf[11]);
gyro_yaw_minus = get_unaligned_le16(&buf[13]);
gyro_roll_plus = get_unaligned_le16(&buf[15]);
gyro_roll_minus = get_unaligned_le16(&buf[17]);
} else {
/* BT + Dongle */
gyro_pitch_plus = get_unaligned_le16(&buf[7]);
gyro_yaw_plus = get_unaligned_le16(&buf[9]);
gyro_roll_plus = get_unaligned_le16(&buf[11]);
gyro_pitch_minus = get_unaligned_le16(&buf[13]);
gyro_yaw_minus = get_unaligned_le16(&buf[15]);
gyro_roll_minus = get_unaligned_le16(&buf[17]);
}
gyro_speed_plus = get_unaligned_le16(&buf[19]);
gyro_speed_minus = get_unaligned_le16(&buf[21]);
acc_x_plus = get_unaligned_le16(&buf[23]);
acc_x_minus = get_unaligned_le16(&buf[25]);
acc_y_plus = get_unaligned_le16(&buf[27]);
acc_y_minus = get_unaligned_le16(&buf[29]);
acc_z_plus = get_unaligned_le16(&buf[31]);
acc_z_minus = get_unaligned_le16(&buf[33]);
/* Set gyroscope calibration and normalization parameters.
* Data values will be normalized to 1/DS4_GYRO_RES_PER_DEG_S degree/s.
*/
speed_2x = (gyro_speed_plus + gyro_speed_minus);
sc->ds4_calib_data[0].abs_code = ABS_RX;
sc->ds4_calib_data[0].bias = gyro_pitch_bias;
sc->ds4_calib_data[0].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
sc->ds4_calib_data[0].sens_denom = gyro_pitch_plus - gyro_pitch_minus;
sc->ds4_calib_data[1].abs_code = ABS_RY;
sc->ds4_calib_data[1].bias = gyro_yaw_bias;
sc->ds4_calib_data[1].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
sc->ds4_calib_data[1].sens_denom = gyro_yaw_plus - gyro_yaw_minus;
sc->ds4_calib_data[2].abs_code = ABS_RZ;
sc->ds4_calib_data[2].bias = gyro_roll_bias;
sc->ds4_calib_data[2].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
sc->ds4_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus;
/* Set accelerometer calibration and normalization parameters.
* Data values will be normalized to 1/DS4_ACC_RES_PER_G G.
*/
range_2g = acc_x_plus - acc_x_minus;
sc->ds4_calib_data[3].abs_code = ABS_X;
sc->ds4_calib_data[3].bias = acc_x_plus - range_2g / 2;
sc->ds4_calib_data[3].sens_numer = 2*DS4_ACC_RES_PER_G;
sc->ds4_calib_data[3].sens_denom = range_2g;
range_2g = acc_y_plus - acc_y_minus;
sc->ds4_calib_data[4].abs_code = ABS_Y;
sc->ds4_calib_data[4].bias = acc_y_plus - range_2g / 2;
sc->ds4_calib_data[4].sens_numer = 2*DS4_ACC_RES_PER_G;
sc->ds4_calib_data[4].sens_denom = range_2g;
range_2g = acc_z_plus - acc_z_minus;
sc->ds4_calib_data[5].abs_code = ABS_Z;
sc->ds4_calib_data[5].bias = acc_z_plus - range_2g / 2;
sc->ds4_calib_data[5].sens_numer = 2*DS4_ACC_RES_PER_G;
sc->ds4_calib_data[5].sens_denom = range_2g;
err_stop:
kfree(buf);
return ret;
}
|
Safe
|
[
"CWE-787"
] |
linux
|
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
|
2.6342767676471777e+38
| 139 |
HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: syzbot+403741a091bf41d4ae79@syzkaller.appspotmail.com
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
CC: <stable@vger.kernel.org>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
| 0 |
dns_zone_setdbtype(dns_zone_t *zone,
unsigned int dbargc, const char * const *dbargv)
{
isc_result_t result = ISC_R_SUCCESS;
char **argv = NULL;
unsigned int i;
REQUIRE(DNS_ZONE_VALID(zone));
REQUIRE(dbargc >= 1);
REQUIRE(dbargv != NULL);
LOCK_ZONE(zone);
/* Set up a new database argument list. */
argv = isc_mem_get(zone->mctx, dbargc * sizeof(*argv));
if (argv == NULL) {
goto nomem;
}
for (i = 0; i < dbargc; i++) {
argv[i] = NULL;
}
for (i = 0; i < dbargc; i++) {
argv[i] = isc_mem_strdup(zone->mctx, dbargv[i]);
if (argv[i] == NULL)
goto nomem;
}
/* Free the old list. */
zone_freedbargs(zone);
zone->db_argc = dbargc;
zone->db_argv = argv;
result = ISC_R_SUCCESS;
goto unlock;
nomem:
if (argv != NULL) {
for (i = 0; i < dbargc; i++) {
if (argv[i] != NULL) {
isc_mem_free(zone->mctx, argv[i]);
}
}
isc_mem_put(zone->mctx, argv, dbargc * sizeof(*argv));
}
result = ISC_R_NOMEMORY;
unlock:
UNLOCK_ZONE(zone);
return (result);
}
|
Safe
|
[
"CWE-327"
] |
bind9
|
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
|
7.9413001486681305e+37
| 50 |
Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key.
| 0 |
static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
{
struct domain_device *dev, *n;
list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
if (!dev_is_sata(dev))
continue;
sas_ata_wait_eh(dev);
/* if libata failed to power manage the device, tear it down */
if (ata_dev_disabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, func, -ENODEV);
}
}
|
Safe
|
[
"CWE-284"
] |
linux
|
0558f33c06bb910e2879e355192227a8e8f0219d
|
3.0219029277596285e+37
| 15 |
scsi: libsas: direct call probe and destruct
In commit 87c8331fcf72 ("[SCSI] libsas: prevent domain rediscovery
competing with ata error handling") introduced disco mutex to prevent
rediscovery competing with ata error handling and put the whole
revalidation in the mutex. But the rphy add/remove needs to wait for the
error handling which also grabs the disco mutex. This may leads to dead
lock.So the probe and destruct event were introduce to do the rphy
add/remove asynchronously and out of the lock.
The asynchronously processed workers makes the whole discovery process
not atomic, the other events may interrupt the process. For example,
if a loss of signal event inserted before the probe event, the
sas_deform_port() is called and the port will be deleted.
And sas_port_delete() may run before the destruct event, but the
port-x:x is the top parent of end device or expander. This leads to
a kernel WARNING such as:
[ 82.042979] sysfs group 'power' not found for kobject 'phy-1:0:22'
[ 82.042983] ------------[ cut here ]------------
[ 82.042986] WARNING: CPU: 54 PID: 1714 at fs/sysfs/group.c:237
sysfs_remove_group+0x94/0xa0
[ 82.043059] Call trace:
[ 82.043082] [<ffff0000082e7624>] sysfs_remove_group+0x94/0xa0
[ 82.043085] [<ffff00000864e320>] dpm_sysfs_remove+0x60/0x70
[ 82.043086] [<ffff00000863ee10>] device_del+0x138/0x308
[ 82.043089] [<ffff00000869a2d0>] sas_phy_delete+0x38/0x60
[ 82.043091] [<ffff00000869a86c>] do_sas_phy_delete+0x6c/0x80
[ 82.043093] [<ffff00000863dc20>] device_for_each_child+0x58/0xa0
[ 82.043095] [<ffff000008696f80>] sas_remove_children+0x40/0x50
[ 82.043100] [<ffff00000869d1bc>] sas_destruct_devices+0x64/0xa0
[ 82.043102] [<ffff0000080e93bc>] process_one_work+0x1fc/0x4b0
[ 82.043104] [<ffff0000080e96c0>] worker_thread+0x50/0x490
[ 82.043105] [<ffff0000080f0364>] kthread+0xfc/0x128
[ 82.043107] [<ffff0000080836c0>] ret_from_fork+0x10/0x50
Make probe and destruct a direct call in the disco and revalidate function,
but put them outside the lock. The whole discovery or revalidate won't
be interrupted by other events. And the DISCE_PROBE and DISCE_DESTRUCT
event are deleted as a result of the direct call.
Introduce a new list to destruct the sas_port and put the port delete after
the destruct. This makes sure the right order of destroying the sysfs
kobject and fix the warning above.
In sas_ex_revalidate_domain() have a loop to find all broadcasted
device, and sometimes we have a chance to find the same expander twice.
Because the sas_port will be deleted at the end of the whole revalidate
process, sas_port with the same name cannot be added before this.
Otherwise the sysfs will complain of creating duplicate filename. Since
the LLDD will send broadcast for every device change, we can only
process one expander's revalidation.
[mkp: kbuild test robot warning]
Signed-off-by: Jason Yan <yanaijie@huawei.com>
CC: John Garry <john.garry@huawei.com>
CC: Johannes Thumshirn <jthumshirn@suse.de>
CC: Ewan Milne <emilne@redhat.com>
CC: Christoph Hellwig <hch@lst.de>
CC: Tomas Henzl <thenzl@redhat.com>
CC: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
| 0 |
zfs_id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid,
char *buf, size_t len, boolean_t addok)
{
uint64_t fuid;
int domainid = 0;
if (domain && domain[0]) {
domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok);
if (domainid == -1)
return (SET_ERROR(ENOENT));
}
fuid = FUID_ENCODE(domainid, rid);
(void) snprintf(buf, len, "%llx", (longlong_t)fuid);
return (0);
}
|
Safe
|
[
"CWE-200",
"CWE-732"
] |
zfs
|
716b53d0a14c72bda16c0872565dd1909757e73f
|
1.5851394216156464e+37
| 15 |
FreeBSD: Fix UNIX permissions checking
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matt Macy <mmacy@FreeBSD.org>
Closes #10727
| 0 |
Create an initialization vector (IV) */
PHP_FUNCTION(mcrypt_create_iv)
{
char *iv;
long source = RANDOM;
long size;
int n = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &size, &source) == FAILURE) {
return;
}
if (size <= 0 || size >= INT_MAX) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot create an IV with a size of less than 1 or greater than %d", INT_MAX);
RETURN_FALSE;
}
iv = ecalloc(size + 1, 1);
if (source == RANDOM || source == URANDOM) {
#if PHP_WIN32
/* random/urandom equivalent on Windows */
BYTE *iv_b = (BYTE *) iv;
if (php_win32_get_random_bytes(iv_b, (size_t) size) == FAILURE){
efree(iv);
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Could not gather sufficient random data");
RETURN_FALSE;
}
n = size;
#else
int *fd = &MCG(fd[source]);
size_t read_bytes = 0;
if (*fd < 0) {
*fd = open(source == RANDOM ? "/dev/random" : "/dev/urandom", O_RDONLY);
if (*fd < 0) {
efree(iv);
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot open source device");
RETURN_FALSE;
}
}
while (read_bytes < size) {
n = read(*fd, iv + read_bytes, size - read_bytes);
if (n < 0) {
break;
}
read_bytes += n;
}
n = read_bytes;
if (n < size) {
efree(iv);
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Could not gather sufficient random data");
RETURN_FALSE;
}
#endif
} else {
n = size;
while (size) {
iv[--size] = (char) (255.0 * php_rand(TSRMLS_C) / RAND_MAX);
}
}
RETURN_STRINGL(iv, n, 0);
|
Vulnerable
|
[
"CWE-190"
] |
php-src
|
6c5211a0cef0cc2854eaa387e0eb036e012904d0
|
2.979420812918301e+38
| 64 |
Fix bug #72455: Heap Overflow due to integer overflows
| 1 |
void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout)
{
h2_beam_lock bl;
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
beam->timeout = timeout;
leave_yellow(beam, &bl);
}
}
|
Safe
|
[
"CWE-400"
] |
mod_h2
|
83a2e3866918ce6567a683eb4c660688d047ee81
|
2.3485511819273844e+38
| 9 |
* fixes a race condition where aborting streams triggers an unnecessary timeout.
| 0 |
static inline void memory_region_shift_read_access(uint64_t *value,
signed shift,
uint64_t mask,
uint64_t tmp)
{
if (shift >= 0) {
*value |= (tmp & mask) << shift;
} else {
*value |= (tmp & mask) >> -shift;
}
}
|
Safe
|
[
"CWE-476"
] |
unicorn
|
3d3deac5e6d38602b689c4fef5dac004f07a2e63
|
2.7949324608340962e+38
| 11 |
Fix crash when mapping a big memory and calling uc_close
| 0 |
void xt_proto_fini(int af)
{
#ifdef CONFIG_PROC_FS
char buf[XT_FUNCTION_MAXNAMELEN];
strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
proc_net_remove(buf);
strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TARGETS, sizeof(buf));
proc_net_remove(buf);
strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
proc_net_remove(buf);
#endif /*CONFIG_PROC_FS*/
}
|
Safe
|
[
"CWE-787"
] |
linux
|
9fa492cdc160cd27ce1046cb36f47d3b2b1efa21
|
1.9932906444758223e+38
| 18 |
[NETFILTER]: x_tables: simplify compat API
Split the xt_compat_match/xt_compat_target into smaller type-safe functions
performing just one operation. Handle all alignment and size-related
conversions centrally in these function instead of requiring each module to
implement a full-blown conversion function. Replace ->compat callback by
->compat_from_user and ->compat_to_user callbacks, responsible for
converting just a single private structure.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static OPJ_BOOL opj_j2k_write_first_tile_part(opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 total_data_size,
opj_stream_private_t *p_stream,
struct opj_event_mgr * p_manager)
{
OPJ_UINT32 l_nb_bytes_written = 0;
OPJ_UINT32 l_current_nb_bytes_written;
OPJ_BYTE * l_begin_data = 00;
opj_tcd_t * l_tcd = 00;
opj_cp_t * l_cp = 00;
l_tcd = p_j2k->m_tcd;
l_cp = &(p_j2k->m_cp);
l_tcd->cur_pino = 0;
/*Get number of tile parts*/
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = 0;
/* INDEX >> */
/* << INDEX */
l_current_nb_bytes_written = 0;
l_begin_data = p_data;
if (! opj_j2k_write_sot(p_j2k, p_data, total_data_size,
&l_current_nb_bytes_written, p_stream,
p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
total_data_size -= l_current_nb_bytes_written;
if (!OPJ_IS_CINEMA(l_cp->rsiz)) {
#if 0
for (compno = 1; compno < p_j2k->m_private_image->numcomps; compno++) {
l_current_nb_bytes_written = 0;
opj_j2k_write_coc_in_memory(p_j2k, compno, p_data, &l_current_nb_bytes_written,
p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
total_data_size -= l_current_nb_bytes_written;
l_current_nb_bytes_written = 0;
opj_j2k_write_qcc_in_memory(p_j2k, compno, p_data, &l_current_nb_bytes_written,
p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
total_data_size -= l_current_nb_bytes_written;
}
#endif
if (l_cp->tcps[p_j2k->m_current_tile_number].POC) {
l_current_nb_bytes_written = 0;
opj_j2k_write_poc_in_memory(p_j2k, p_data, &l_current_nb_bytes_written,
p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
total_data_size -= l_current_nb_bytes_written;
}
}
l_current_nb_bytes_written = 0;
if (! opj_j2k_write_sod(p_j2k, l_tcd, p_data, &l_current_nb_bytes_written,
total_data_size, p_stream, p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
* p_data_written = l_nb_bytes_written;
/* Writing Psot in SOT marker */
opj_write_bytes(l_begin_data + 6, l_nb_bytes_written,
4); /* PSOT */
if (OPJ_IS_CINEMA(l_cp->rsiz) || OPJ_IS_IMF(l_cp->rsiz)) {
opj_j2k_update_tlm(p_j2k, l_nb_bytes_written);
}
return OPJ_TRUE;
}
|
Safe
|
[
"CWE-20"
] |
openjpeg
|
73fdf28342e4594019af26eb6a347a34eceb6296
|
1.6544999797222551e+38
| 84 |
opj_j2k_write_sod(): avoid potential heap buffer overflow (fixes #1299) (probably master only)
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.