func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
int inet6_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk == NULL)
return -EINVAL;
/* Free mc lists */
ipv6_sock_mc_close(sk);
/* Free ac lists */
ipv6_sock_ac_close(sk);
return inet_release(sock);
}
|
Safe
|
[] |
net
|
5f81bd2e5d804ca93f3ec8873451b22d2f454721
|
1.3206880635666308e+38
| 15 |
ipv6: export a stub for IPv6 symbols used by vxlan
In case IPv6 is compiled as a module, introduce a stub
for ipv6_sock_mc_join and ipv6_sock_mc_drop etc.. It will be used
by vxlan module. Suggested by Ben.
This is an ugly but easy solution for now.
Cc: Ben Hutchings <bhutchings@solarflare.com>
Cc: Stephen Hemminger <stephen@networkplumber.org>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Cong Wang <amwang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
int unit_add_default_target_dependency(Unit *u, Unit *target) {
assert(u);
assert(target);
if (target->type != UNIT_TARGET)
return 0;
/* Only add the dependency if both units are loaded, so that
* that loop check below is reliable */
if (u->load_state != UNIT_LOADED ||
target->load_state != UNIT_LOADED)
return 0;
/* If either side wants no automatic dependencies, then let's
* skip this */
if (!u->default_dependencies ||
!target->default_dependencies)
return 0;
/* Don't create loops */
if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
return 0;
return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
}
|
Safe
|
[
"CWE-269"
] |
systemd
|
bf65b7e0c9fc215897b676ab9a7c9d1c688143ba
|
3.2533011573266257e+37
| 25 |
core: imply NNP and SUID/SGID restriction for DynamicUser=yes service
Let's be safe, rather than sorry. This way DynamicUser=yes services can
neither take benefit of, nor create SUID/SGID binaries.
Given that DynamicUser= is a recent addition only we should be able to
get away with turning this on, even though this is strictly speaking a
binary compatibility breakage.
| 0 |
QPDFObjectHandle::Members::~Members()
{
}
|
Safe
|
[
"CWE-399",
"CWE-674"
] |
qpdf
|
b4d6cf6836ce025ba1811b7bbec52680c7204223
|
5.505355478679771e+37
| 3 |
Limit depth of nesting in direct objects (fixes #202)
This fixes CVE-2018-9918.
| 0 |
\param referer Referer used, as a C-string.
\return Value of \c filename_local.
\note Use the \c libcurl library, or the external binaries \c wget or \c curl to perform the download.
**/
inline char *load_network(const char *const url, char *const filename_local,
const unsigned int timeout, const bool try_fallback,
const char *const referer) {
if (!url)
throw CImgArgumentException("cimg::load_network(): Specified URL is (null).");
if (!filename_local)
throw CImgArgumentException("cimg::load_network(): Specified destination string is (null).");
const char *const __ext = cimg::split_filename(url), *const _ext = (*__ext && __ext>url)?__ext - 1:__ext;
CImg<char> ext = CImg<char>::string(_ext);
std::FILE *file = 0;
*filename_local = 0;
if (ext._width>16 || !cimg::strncasecmp(ext,"cgi",3)) *ext = 0;
else cimg::strwindows_reserved(ext);
do {
cimg_snprintf(filename_local,256,"%s%c%s%s",
cimg::temporary_path(),cimg_file_separator,cimg::filenamerand(),ext._data);
if ((file=cimg::std_fopen(filename_local,"rb"))!=0) cimg::fclose(file);
} while (file);
#ifdef cimg_use_curl
const unsigned int omode = cimg::exception_mode();
cimg::exception_mode(0);
try {
CURL *curl = 0;
CURLcode res;
curl = curl_easy_init();
if (curl) {
file = cimg::fopen(filename_local,"wb");
curl_easy_setopt(curl,CURLOPT_URL,url);
curl_easy_setopt(curl,CURLOPT_WRITEFUNCTION,0);
curl_easy_setopt(curl,CURLOPT_WRITEDATA,file);
curl_easy_setopt(curl,CURLOPT_SSL_VERIFYPEER,0L);
curl_easy_setopt(curl,CURLOPT_SSL_VERIFYHOST,0L);
curl_easy_setopt(curl,CURLOPT_FOLLOWLOCATION,1L);
if (timeout) curl_easy_setopt(curl,CURLOPT_TIMEOUT,(long)timeout);
if (std::strchr(url,'?')) curl_easy_setopt(curl,CURLOPT_HTTPGET,1L);
if (referer) curl_easy_setopt(curl,CURLOPT_REFERER,referer);
res = curl_easy_perform(curl);
curl_easy_cleanup(curl);
cimg::fseek(file,0,SEEK_END); // Check if file size is 0
const cimg_ulong siz = cimg::ftell(file);
cimg::fclose(file);
if (siz>0 && res==CURLE_OK) {
cimg::exception_mode(omode);
return filename_local;
} else std::remove(filename_local);
}
} catch (...) { }
cimg::exception_mode(omode);
if (!try_fallback) throw CImgIOException("cimg::load_network(): Failed to load file '%s' with libcurl.",url);
#endif
CImg<char> command((unsigned int)std::strlen(url) + 64);
cimg::unused(try_fallback);
// Try with 'curl' first.
if (timeout) {
if (referer)
cimg_snprintf(command,command._width,"%s -e %s -m %u -f --silent --compressed -o \"%s\" \"%s\"",
cimg::curl_path(),referer,timeout,filename_local,
CImg<char>::string(url)._system_strescape().data());
else
cimg_snprintf(command,command._width,"%s -m %u -f --silent --compressed -o \"%s\" \"%s\"",
cimg::curl_path(),timeout,filename_local,
CImg<char>::string(url)._system_strescape().data());
} else {
if (referer)
cimg_snprintf(command,command._width,"%s -e %s -f --silent --compressed -o \"%s\" \"%s\"",
cimg::curl_path(),referer,filename_local,
CImg<char>::string(url)._system_strescape().data());
else
cimg_snprintf(command,command._width,"%s -f --silent --compressed -o \"%s\" \"%s\"",
cimg::curl_path(),filename_local,
CImg<char>::string(url)._system_strescape().data());
}
cimg::system(command);
if (!(file=cimg::std_fopen(filename_local,"rb"))) {
// Try with 'wget' otherwise.
if (timeout) {
if (referer)
cimg_snprintf(command,command._width,"%s --referer=%s -T %u -q -r -l 0 --no-cache -O \"%s\" \"%s\"",
cimg::wget_path(),referer,timeout,filename_local,
CImg<char>::string(url)._system_strescape().data());
else
cimg_snprintf(command,command._width,"%s -T %u -q -r -l 0 --no-cache -O \"%s\" \"%s\"",
cimg::wget_path(),timeout,filename_local,
CImg<char>::string(url)._system_strescape().data());
} else {
if (referer)
cimg_snprintf(command,command._width,"%s --referer=%s -q -r -l 0 --no-cache -O \"%s\" \"%s\"",
cimg::wget_path(),referer,filename_local,
CImg<char>::string(url)._system_strescape().data());
else
cimg_snprintf(command,command._width,"%s -q -r -l 0 --no-cache -O \"%s\" \"%s\"",
cimg::wget_path(),filename_local,
CImg<char>::string(url)._system_strescape().data());
}
cimg::system(command);
if (!(file=cimg::std_fopen(filename_local,"rb")))
throw CImgIOException("cimg::load_network(): Failed to load file '%s' with external commands "
"'wget' or 'curl'.",url);
cimg::fclose(file);
// Try gunzip it.
cimg_snprintf(command,command._width,"%s.gz",filename_local);
std::rename(filename_local,command);
cimg_snprintf(command,command._width,"%s --quiet \"%s.gz\"",
gunzip_path(),filename_local);
cimg::system(command);
file = cimg::std_fopen(filename_local,"rb");
if (!file) {
cimg_snprintf(command,command._width,"%s.gz",filename_local);
std::rename(command,filename_local);
file = cimg::std_fopen(filename_local,"rb");
}
}
cimg::fseek(file,0,SEEK_END); // Check if file size is 0
if (std::ftell(file)<=0)
throw CImgIOException("cimg::load_network(): Failed to load URL '%s' with external commands "
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
|
1.957564133596921e+38
| 127 |
.
| 0 |
CImg<Tlong> get_cumulate(const char *const axes) const {
return CImg<Tlong>(*this,false).cumulate(axes);
}
|
Safe
|
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
|
1.2796837751348589e+36
| 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
| 0 |
static GF_Node *lsr_read_data(GF_LASeRCodec *lsr, u32 node_tag)
{
GF_Node *elt = gf_node_new(lsr->sg, node_tag);
lsr_read_id(lsr, elt);
lsr_read_rare(lsr, elt);
lsr_read_any_attribute(lsr, elt, 1);
lsr_read_group_content(lsr, elt, 0);
return elt;
}
|
Safe
|
[
"CWE-190"
] |
gpac
|
faa75edde3dfeba1e2cf6ffa48e45a50f1042096
|
1.1166003593344495e+38
| 9 |
fixed #2213
| 0 |
void setListenerLimit(const uint32_t num_conns) {
config_helper_.addRuntimeOverride("envoy.resource_limits.listener.listener_0.connection_limit",
std::to_string(num_conns));
}
|
Safe
|
[
"CWE-400"
] |
envoy
|
dfddb529e914d794ac552e906b13d71233609bf7
|
3.0180739336150704e+38
| 4 |
listener: Add configurable accepted connection limits (#153)
Add support for per-listener limits on accepted connections.
Signed-off-by: Tony Allen <tony@allen.gg>
| 0 |
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
{
struct mem_cgroup *iter;
for_each_mem_cgroup_tree(iter, memcg)
mem_cgroup_oom_notify_cb(iter);
}
|
Safe
|
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
|
1.782616532483913e+38
| 7 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[akpm@linux-foundation.org: checkpatch fixes]
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Jones <davej@redhat.com>
Acked-by: Larry Woodman <lwoodman@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: <stable@vger.kernel.org> [2.6.38+]
Cc: Mark Salter <msalter@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
TEST_F(QueryPlannerTest, OrInexactCoveredMultikey) {
// true means multikey
addIndex(BSON("names" << 1), true);
runQuery(fromjson("{$or: [{names: 'dave'}, {names: /joe/}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists(
"{fetch: {filter: {$or: [{names: 'dave'}, {names: /joe/}]}, "
"node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
}
|
Safe
|
[] |
mongo
|
ee97c0699fd55b498310996ee002328e533681a3
|
1.4780526184023164e+38
| 11 |
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
| 0 |
test_util_memarea(void)
{
memarea_t *area = memarea_new();
char *p1, *p2, *p3, *p1_orig;
void *malloced_ptr = NULL;
int i;
test_assert(area);
p1_orig = p1 = memarea_alloc(area,64);
p2 = memarea_alloc_zero(area,52);
p3 = memarea_alloc(area,11);
test_assert(memarea_owns_ptr(area, p1));
test_assert(memarea_owns_ptr(area, p2));
test_assert(memarea_owns_ptr(area, p3));
/* Make sure we left enough space. */
test_assert(p1+64 <= p2);
test_assert(p2+52 <= p3);
/* Make sure we aligned. */
test_eq(((uintptr_t)p1) % sizeof(void*), 0);
test_eq(((uintptr_t)p2) % sizeof(void*), 0);
test_eq(((uintptr_t)p3) % sizeof(void*), 0);
test_assert(!memarea_owns_ptr(area, p3+8192));
test_assert(!memarea_owns_ptr(area, p3+30));
test_assert(tor_mem_is_zero(p2, 52));
/* Make sure we don't overalign. */
p1 = memarea_alloc(area, 1);
p2 = memarea_alloc(area, 1);
test_eq(p1+sizeof(void*), p2);
{
malloced_ptr = tor_malloc(64);
test_assert(!memarea_owns_ptr(area, malloced_ptr));
tor_free(malloced_ptr);
}
/* memarea_memdup */
{
malloced_ptr = tor_malloc(64);
crypto_rand((char*)malloced_ptr, 64);
p1 = memarea_memdup(area, malloced_ptr, 64);
test_assert(p1 != malloced_ptr);
test_memeq(p1, malloced_ptr, 64);
tor_free(malloced_ptr);
}
/* memarea_strdup. */
p1 = memarea_strdup(area,"");
p2 = memarea_strdup(area, "abcd");
test_assert(p1);
test_assert(p2);
test_streq(p1, "");
test_streq(p2, "abcd");
/* memarea_strndup. */
{
const char *s = "Ad ogni porta batte la morte e grida: il nome!";
/* (From Turandot, act 3.) */
size_t len = strlen(s);
p1 = memarea_strndup(area, s, 1000);
p2 = memarea_strndup(area, s, 10);
test_streq(p1, s);
test_assert(p2 >= p1 + len + 1);
test_memeq(s, p2, 10);
test_eq(p2[10], '\0');
p3 = memarea_strndup(area, s, len);
test_streq(p3, s);
p3 = memarea_strndup(area, s, len-1);
test_memeq(s, p3, len-1);
test_eq(p3[len-1], '\0');
}
memarea_clear(area);
p1 = memarea_alloc(area, 1);
test_eq(p1, p1_orig);
memarea_clear(area);
/* Check for running over an area's size. */
for (i = 0; i < 512; ++i) {
p1 = memarea_alloc(area, crypto_rand_int(5)+1);
test_assert(memarea_owns_ptr(area, p1));
}
memarea_assert_ok(area);
/* Make sure we can allocate a too-big object. */
p1 = memarea_alloc_zero(area, 9000);
p2 = memarea_alloc_zero(area, 16);
test_assert(memarea_owns_ptr(area, p1));
test_assert(memarea_owns_ptr(area, p2));
done:
memarea_drop_all(area);
tor_free(malloced_ptr);
}
|
Safe
|
[] |
tor
|
973c18bf0e84d14d8006a9ae97fde7f7fb97e404
|
1.864571420166671e+38
| 93 |
Fix assertion failure in tor_timegm.
Fixes bug 6811.
| 0 |
position_check(NCURSES_SP_DCLx int expected_y, int expected_x, char *legend)
/* check to see if the real cursor position matches the virtual */
{
char buf[20];
char *s;
int y, x;
if (!_nc_tracing || (expected_y < 0 && expected_x < 0))
return;
NCURSES_SP_NAME(_nc_flush) (NCURSES_SP_ARG);
memset(buf, '\0', sizeof(buf));
NCURSES_PUTP2_FLUSH("cpr", "\033[6n"); /* only works on ANSI-compatibles */
*(s = buf) = 0;
do {
int ask = sizeof(buf) - 1 - (s - buf);
int got = read(0, s, ask);
if (got == 0)
break;
s += got;
} while (strchr(buf, 'R') == 0);
_tracef("probe returned %s", _nc_visbuf(buf));
/* try to interpret as a position report */
if (sscanf(buf, "\033[%d;%dR", &y, &x) != 2) {
_tracef("position probe failed in %s", legend);
} else {
if (expected_x < 0)
expected_x = x - 1;
if (expected_y < 0)
expected_y = y - 1;
if (y - 1 != expected_y || x - 1 != expected_x) {
NCURSES_SP_NAME(beep) (NCURSES_SP_ARG);
NCURSES_SP_NAME(tputs) (NCURSES_SP_ARGx
TIPARM_2("\033[%d;%dH",
expected_y + 1,
expected_x + 1),
1, NCURSES_SP_NAME(_nc_outch));
_tracef("position seen (%d, %d) doesn't match expected one (%d, %d) in %s",
y - 1, x - 1, expected_y, expected_x, legend);
} else {
_tracef("position matches OK in %s", legend);
}
}
}
|
Safe
|
[] |
ncurses
|
790a85dbd4a81d5f5d8dd02a44d84f01512ef443
|
3.0757903570382927e+38
| 45 |
ncurses 6.2 - patch 20200531
+ correct configure version-check/warnng for g++ to allow for 10.x
+ re-enable "bel" in konsole-base (report by Nia Huang)
+ add linux-s entry (patch by Alexandre Montaron).
+ drop long-obsolete convert_configure.pl
+ add test/test_parm.c, for checking tparm changes.
+ improve parameter-checking for tparm, adding function _nc_tiparm() to
handle the most-used case, which accepts only numeric parameters
(report/testcase by "puppet-meteor").
+ use a more conservative estimate of the buffer-size in lib_tparm.c's
save_text() and save_number(), in case the sprintf() function
passes-through unexpected characters from a format specifier
(report/testcase by "puppet-meteor").
+ add a check for end-of-string in cvtchar to handle a malformed
string in infotocap (report/testcase by "puppet-meteor").
| 0 |
ins_horscroll(void)
{
pos_T tpos;
undisplay_dollar();
tpos = curwin->w_cursor;
if (gui_do_horiz_scroll(scrollbar_value, FALSE))
{
start_arrow(&tpos);
can_cindent = TRUE;
}
}
|
Safe
|
[
"CWE-122",
"CWE-787"
] |
vim
|
0971c7a4e537ea120a6bb2195960be8d0815e97b
|
3.804482043483558e+37
| 12 |
patch 8.2.5162: reading before the start of the line with BS in Replace mode
Problem: Reading before the start of the line with BS in Replace mode.
Solution: Check the cursor column is more than zero.
| 0 |
comics_document_render_pixbuf (EvDocument *document,
EvRenderContext *rc)
{
GdkPixbufLoader *loader;
GdkPixbuf *rotated_pixbuf, *tmp_pixbuf;
char **argv;
guchar buf[4096];
gboolean success;
gint outpipe = -1;
GPid child_pid;
gssize bytes;
gint width, height;
gchar *filename;
ComicsDocument *comics_document = COMICS_DOCUMENT (document);
if (!comics_document->decompress_tmp) {
argv = extract_argv (document, rc->page->index);
success = g_spawn_async_with_pipes (NULL, argv, NULL,
G_SPAWN_SEARCH_PATH |
G_SPAWN_STDERR_TO_DEV_NULL,
NULL, NULL,
&child_pid,
NULL, &outpipe, NULL, NULL);
g_strfreev (argv);
g_return_val_if_fail (success == TRUE, NULL);
loader = gdk_pixbuf_loader_new ();
g_signal_connect (loader, "size-prepared",
G_CALLBACK (render_pixbuf_size_prepared_cb),
&rc->scale);
while (outpipe >= 0) {
bytes = read (outpipe, buf, 4096);
if (bytes > 0) {
gdk_pixbuf_loader_write (loader, buf, bytes,
NULL);
} else if (bytes <= 0) {
close (outpipe);
gdk_pixbuf_loader_close (loader, NULL);
outpipe = -1;
}
}
tmp_pixbuf = gdk_pixbuf_loader_get_pixbuf (loader);
rotated_pixbuf =
gdk_pixbuf_rotate_simple (tmp_pixbuf,
360 - rc->rotation);
g_spawn_close_pid (child_pid);
g_object_unref (loader);
} else {
filename =
g_build_filename (comics_document->dir,
(char *) comics_document->page_names->pdata[rc->page->index],
NULL);
gdk_pixbuf_get_file_info (filename, &width, &height);
tmp_pixbuf =
gdk_pixbuf_new_from_file_at_size (
filename, width * (rc->scale) + 0.5,
height * (rc->scale) + 0.5, NULL);
rotated_pixbuf =
gdk_pixbuf_rotate_simple (tmp_pixbuf,
360 - rc->rotation);
g_free (filename);
g_object_unref (tmp_pixbuf);
}
return rotated_pixbuf;
}
|
Safe
|
[] |
atril
|
f4291fd62f7dfe6460d2406a979ccfac0c68dd59
|
7.854988323768476e+37
| 69 |
comics: make the files containing "--checkpoint-action=" unsupported
Fixes #257
| 0 |
void SFS_Expression(ScriptParser *parser)
{
u32 val = gf_bs_read_int(parser->bs, NUMBITS_EXPR_TYPE);
if (parser->codec->LastError) return;
switch(val) {
case ET_CURVED_EXPR:
SFS_AddString(parser, "(");
SFS_CompoundExpression(parser);
SFS_AddString(parser, ")");
break;
case ET_NEGATIVE:
SFS_AddString(parser, "-");
SFS_Expression(parser);
break;
case ET_NOT:
SFS_AddString(parser, "!");
SFS_Expression(parser);
break;
case ET_ONESCOMP:
SFS_AddString(parser, "~");
SFS_Expression(parser);
break;
case ET_INCREMENT:
SFS_AddString(parser, "++");
SFS_Expression(parser);
break;
case ET_DECREMENT:
SFS_AddString(parser, "--");
SFS_Expression(parser);
break;
case ET_POST_INCREMENT:
SFS_Expression(parser);
SFS_AddString(parser, "++");
break;
case ET_POST_DECREMENT:
SFS_Expression(parser);
SFS_AddString(parser, "--");
break;
case ET_CONDTEST:
SFS_Expression(parser);
SFS_AddString(parser, " ? ");
SFS_Expression(parser);
SFS_AddString(parser, " : ");
SFS_Expression(parser);
break;
case ET_STRING:
SFS_AddString(parser, "'");
SFS_GetString(parser);
SFS_AddString(parser, "'");
break;
case ET_NUMBER:
SFS_GetNumber(parser);
break;
case ET_IDENTIFIER:
SFS_Identifier(parser);
break;
case ET_FUNCTION_CALL:
SFS_FunctionCall(parser);
break;
case ET_NEW:
SFS_NewObject(parser);
break;
case ET_OBJECT_MEMBER_ACCESS:
SFS_ObjectMemberAccess(parser);
break;
case ET_OBJECT_METHOD_CALL:
SFS_ObjectMethodCall(parser);
break;
case ET_ARRAY_DEREFERENCE:
SFS_ArrayDeref(parser);
break;
case ET_MULTIPLY:
SFS_Expression(parser);
SFS_AddString(parser, "*");
SFS_Expression(parser);
break;
case ET_DIVIDE:
SFS_Expression(parser);
SFS_AddString(parser, "/");
SFS_Expression(parser);
break;
case ET_MOD:
SFS_Expression(parser);
SFS_AddString(parser, "%");
SFS_Expression(parser);
break;
case ET_PLUS:
SFS_Expression(parser);
SFS_AddString(parser, "+");
SFS_Expression(parser);
break;
case ET_MINUS:
SFS_Expression(parser);
SFS_AddString(parser, "-");
SFS_Expression(parser);
break;
case ET_LSHIFT:
SFS_Expression(parser);
SFS_AddString(parser, "<<");
SFS_Expression(parser);
break;
case ET_RSHIFT:
SFS_Expression(parser);
SFS_AddString(parser, ">>");
SFS_Expression(parser);
break;
case ET_RSHIFTFILL:
SFS_Expression(parser);
SFS_AddString(parser, ">>>");
SFS_Expression(parser);
break;
case ET_AND:
SFS_Expression(parser);
SFS_AddString(parser, "&");
SFS_Expression(parser);
break;
case ET_XOR:
SFS_Expression(parser);
SFS_AddString(parser, "^");
SFS_Expression(parser);
break;
case ET_OR:
SFS_Expression(parser);
SFS_AddString(parser, "|");
SFS_Expression(parser);
break;
case ET_LT:
SFS_Expression(parser);
SFS_AddString(parser, "<");
SFS_Expression(parser);
break;
case ET_LE:
SFS_Expression(parser);
SFS_AddString(parser, "<=");
SFS_Expression(parser);
break;
case ET_GT:
SFS_Expression(parser);
SFS_AddString(parser, ">");
SFS_Expression(parser);
break;
case ET_GE:
SFS_Expression(parser);
SFS_AddString(parser, ">=");
SFS_Expression(parser);
break;
case ET_EQ:
SFS_Expression(parser);
SFS_AddString(parser, "==");
SFS_Expression(parser);
break;
case ET_NE:
SFS_Expression(parser);
SFS_AddString(parser, "!=");
SFS_Expression(parser);
break;
case ET_LAND:
SFS_Expression(parser);
SFS_AddString(parser, "&&");
SFS_Expression(parser);
break;
case ET_LOR:
SFS_Expression(parser);
SFS_AddString(parser, "||");
SFS_Expression(parser);
break;
case ET_ASSIGN:
SFS_Expression(parser);
SFS_AddString(parser, "=");
SFS_Expression(parser);
break;
case ET_PLUSEQ:
SFS_Expression(parser);
SFS_AddString(parser, "+=");
SFS_Expression(parser);
break;
case ET_MINUSEQ:
SFS_Expression(parser);
SFS_AddString(parser, "-=");
SFS_Expression(parser);
break;
case ET_MULTIPLYEQ:
SFS_Expression(parser);
SFS_AddString(parser, "*=");
SFS_Expression(parser);
break;
case ET_DIVIDEEQ:
SFS_Expression(parser);
SFS_AddString(parser, "/=");
SFS_Expression(parser);
break;
case ET_MODEQ:
SFS_Expression(parser);
SFS_AddString(parser, "%=");
SFS_Expression(parser);
break;
case ET_LSHIFTEQ:
SFS_Expression(parser);
SFS_AddString(parser, "<<=");
SFS_Expression(parser);
break;
case ET_RSHIFTEQ:
SFS_Expression(parser);
SFS_AddString(parser, ">>=");
SFS_Expression(parser);
break;
case ET_RSHIFTFILLEQ:
SFS_Expression(parser);
SFS_AddString(parser, ">>>=");
SFS_Expression(parser);
break;
case ET_ANDEQ:
SFS_Expression(parser);
SFS_AddString(parser, "&=");
SFS_Expression(parser);
break;
case ET_XOREQ:
SFS_Expression(parser);
SFS_AddString(parser, "^=");
SFS_Expression(parser);
break;
case ET_OREQ:
SFS_Expression(parser);
SFS_AddString(parser, "|=");
SFS_Expression(parser);
break;
case ET_BOOLEAN:
SFS_GetBoolean(parser);
break;
case ET_VAR:
SFS_AddString(parser, "var ");
SFS_Arguments(parser, GF_TRUE);
break;
case ET_FUNCTION_ASSIGN:
SFS_AddString(parser, "function ");
SFS_Arguments(parser, GF_FALSE);
SFS_StatementBlock(parser, GF_TRUE);
break;
default:
assert(0);
break;
}
}
|
Safe
|
[
"CWE-94"
] |
gpac
|
b5741da08e88e8dcc8da0a7669b92405b9862850
|
4.753049156398897e+37
| 245 |
fix overflow on script_dec (#2052)
| 0 |
bool Item_param::convert_str_value(THD *thd)
{
bool rc= FALSE;
if (state == STRING_VALUE || state == LONG_DATA_VALUE)
{
rc= value.cs_info.convert_if_needed(thd, &str_value);
/* Here str_value is guaranteed to be in final_character_set_of_str_value */
/*
str_value_ptr is returned from val_str(). It must be not alloced
to prevent it's modification by val_str() invoker.
*/
str_value_ptr.set(str_value.ptr(), str_value.length(),
str_value.charset());
/* Synchronize item charset and length with value charset */
fix_charset_and_length_from_str_value(DERIVATION_COERCIBLE);
}
return rc;
}
|
Safe
|
[
"CWE-89"
] |
server
|
b5e16a6e0381b28b598da80b414168ce9a5016e5
|
2.2381260288889623e+38
| 19 |
MDEV-26061 MariaDB server crash at Field::set_default
* Item_default_value::fix_fields creates a copy of its argument's field.
* Field::default_value is changed when its expression is prepared in
unpack_vcol_info_from_frm()
This means we must unpack any vcol expression that includes DEFAULT(x)
strictly after unpacking x->default_value.
To avoid building and solving this dependency graph on every table open,
we update Item_default_value::field->default_value after all vcols
are unpacked and fixed.
| 0 |
static int bt_seq_show(struct seq_file *seq, void *v)
{
struct bt_seq_state *s = seq->private;
struct bt_sock_list *l = s->l;
if (v == SEQ_START_TOKEN) {
seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Src Dst Parent");
if (l->custom_seq_show) {
seq_putc(seq, ' ');
l->custom_seq_show(seq, v);
}
seq_putc(seq, '\n');
} else {
struct sock *sk = sk_entry(v);
struct bt_sock *bt = bt_sk(sk);
seq_printf(seq,
"%pK %-6d %-6u %-6u %-6u %-6lu %pMR %pMR %-6lu",
sk,
atomic_read(&sk->sk_refcnt),
sk_rmem_alloc_get(sk),
sk_wmem_alloc_get(sk),
from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk),
&bt->src,
&bt->dst,
bt->parent? sock_i_ino(bt->parent): 0LU);
if (l->custom_seq_show) {
seq_putc(seq, ' ');
l->custom_seq_show(seq, v);
}
seq_putc(seq, '\n');
}
return 0;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
4683f42fde3977bdb4e8a09622788cc8b5313778
|
1.858991589160663e+38
| 39 |
Bluetooth: fix possible info leak in bt_sock_recvmsg()
In case the socket is already shutting down, bt_sock_recvmsg() returns
with 0 without updating msg_namelen leading to net/socket.c leaking the
local, uninitialized sockaddr_storage variable to userland -- 128 bytes
of kernel stack memory.
Fix this by moving the msg_namelen assignment in front of the shutdown
test.
Cc: Marcel Holtmann <marcel@holtmann.org>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: Johan Hedberg <johan.hedberg@gmail.com>
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
int blk_rq_unmap_user(struct bio *bio)
{
struct bio *mapped_bio;
int ret = 0, ret2;
while (bio) {
mapped_bio = bio;
if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
mapped_bio = bio->bi_private;
ret2 = __blk_rq_unmap_user(mapped_bio);
if (ret2 && !ret)
ret = ret2;
mapped_bio = bio;
bio = bio->bi_next;
bio_put(mapped_bio);
}
return ret;
}
|
Safe
|
[
"CWE-416"
] |
linux
|
a0ac402cfcdc904f9772e1762b3fda112dcc56a0
|
1.84687435144988e+38
| 21 |
Don't feed anything but regular iovec's to blk_rq_map_user_iov
In theory we could map other things, but there's a reason that function
is called "user_iov". Using anything else (like splice can do) just
confuses it.
Reported-and-tested-by: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static void macio_ide_register_types(void)
{
type_register_static(&macio_ide_type_info);
}
|
Safe
|
[
"CWE-399"
] |
qemu
|
3251bdcf1c67427d964517053c3d185b46e618e8
|
1.7030318222755198e+38
| 4 |
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
| 0 |
static void qxl_exit_vga_mode(PCIQXLDevice *d)
{
if (d->mode != QXL_MODE_VGA) {
return;
}
dprint(d, 1, "%s\n", __FUNCTION__);
qxl_destroy_primary(d, QXL_SYNC);
}
|
Safe
|
[] |
qemu-kvm
|
5ff4e36c804157bd84af43c139f8cd3a59722db9
|
5.837868455922485e+37
| 8 |
qxl: async io support using new spice api
Some of the QXL port i/o commands are waiting for the spice server to
complete certain actions. Add async versions for these commands, so we
don't block the vcpu while the spice server processses the command.
Instead the qxl device will raise an IRQ when done.
The async command processing relies on an added QXLInterface::async_complete
and added QXLWorker::*_async additions, in spice server qxl >= 3.1
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Alon Levy <alevy@redhat.com>
| 0 |
*/
static void bfq_finish_requeue_request(struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd;
/*
* Requeue and finish hooks are invoked in blk-mq without
* checking whether the involved request is actually still
* referenced in the scheduler. To handle this fact, the
* following two checks make this function exit in case of
* spurious invocations, for which there is nothing to do.
*
* First, check whether rq has nothing to do with an elevator.
*/
if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
return;
/*
* rq either is not associated with any icq, or is an already
* requeued request that has not (yet) been re-inserted into
* a bfq_queue.
*/
if (!rq->elv.icq || !bfqq)
return;
bfqd = bfqq->bfqd;
if (rq->rq_flags & RQF_STARTED)
bfqg_stats_update_completion(bfqq_group(bfqq),
rq->start_time_ns,
rq->io_start_time_ns,
rq->cmd_flags);
if (likely(rq->rq_flags & RQF_STARTED)) {
unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags);
if (rq == bfqd->waited_rq)
bfq_update_inject_limit(bfqd, bfqq);
bfq_completed_request(bfqq, bfqd);
bfq_finish_requeue_request_body(bfqq);
spin_unlock_irqrestore(&bfqd->lock, flags);
} else {
/*
* Request rq may be still/already in the scheduler,
* in which case we need to remove it (this should
* never happen in case of requeue). And we cannot
* defer such a check and removal, to avoid
* inconsistencies in the time interval from the end
* of this function to the start of the deferred work.
* This situation seems to occur only in process
* context, as a consequence of a merge. In the
* current version of the code, this implies that the
* lock is held.
*/
if (!RB_EMPTY_NODE(&rq->rb_node)) {
bfq_remove_request(rq->q, rq);
bfqg_stats_update_io_remove(bfqq_group(bfqq),
rq->cmd_flags);
}
bfq_finish_requeue_request_body(bfqq);
}
/*
* Reset private fields. In case of a requeue, this allows
* this function to correctly do nothing if it is spuriously
* invoked again on this same request (see the check at the
* beginning of the function). Probably, a better general
* design would be to prevent blk-mq from invoking the requeue
* or finish hooks of an elevator, for a request that is not
* referred by that elevator.
*
* Resetting the following fields would break the
* request-insertion logic if rq is re-inserted into a bfq
* internal queue, without a re-preparation. Here we assume
* that re-insertions of requeued requests, without
* re-preparation, can happen only for pass_through or at_head
* requests (which are not re-inserted into bfq internal
* queues).
*/
rq->elv.priv[0] = NULL;
rq->elv.priv[1] = NULL;
|
Safe
|
[
"CWE-416"
] |
linux
|
2f95fa5c955d0a9987ffdc3a095e2f4e62c5f2a9
|
2.98684037738222e+38
| 87 |
block, bfq: fix use-after-free in bfq_idle_slice_timer_body
In bfq_idle_slice_timer func, bfqq = bfqd->in_service_queue is
not in bfqd-lock critical section. The bfqq, which is not
equal to NULL in bfq_idle_slice_timer, may be freed after passing
to bfq_idle_slice_timer_body. So we will access the freed memory.
In addition, considering the bfqq may be in race, we should
firstly check whether bfqq is in service before doing something
on it in bfq_idle_slice_timer_body func. If the bfqq in race is
not in service, it means the bfqq has been expired through
__bfq_bfqq_expire func, and wait_request flags has been cleared in
__bfq_bfqd_reset_in_service func. So we do not need to re-clear the
wait_request of bfqq which is not in service.
KASAN log is given as follows:
[13058.354613] ==================================================================
[13058.354640] BUG: KASAN: use-after-free in bfq_idle_slice_timer+0xac/0x290
[13058.354644] Read of size 8 at addr ffffa02cf3e63f78 by task fork13/19767
[13058.354646]
[13058.354655] CPU: 96 PID: 19767 Comm: fork13
[13058.354661] Call trace:
[13058.354667] dump_backtrace+0x0/0x310
[13058.354672] show_stack+0x28/0x38
[13058.354681] dump_stack+0xd8/0x108
[13058.354687] print_address_description+0x68/0x2d0
[13058.354690] kasan_report+0x124/0x2e0
[13058.354697] __asan_load8+0x88/0xb0
[13058.354702] bfq_idle_slice_timer+0xac/0x290
[13058.354707] __hrtimer_run_queues+0x298/0x8b8
[13058.354710] hrtimer_interrupt+0x1b8/0x678
[13058.354716] arch_timer_handler_phys+0x4c/0x78
[13058.354722] handle_percpu_devid_irq+0xf0/0x558
[13058.354731] generic_handle_irq+0x50/0x70
[13058.354735] __handle_domain_irq+0x94/0x110
[13058.354739] gic_handle_irq+0x8c/0x1b0
[13058.354742] el1_irq+0xb8/0x140
[13058.354748] do_wp_page+0x260/0xe28
[13058.354752] __handle_mm_fault+0x8ec/0x9b0
[13058.354756] handle_mm_fault+0x280/0x460
[13058.354762] do_page_fault+0x3ec/0x890
[13058.354765] do_mem_abort+0xc0/0x1b0
[13058.354768] el0_da+0x24/0x28
[13058.354770]
[13058.354773] Allocated by task 19731:
[13058.354780] kasan_kmalloc+0xe0/0x190
[13058.354784] kasan_slab_alloc+0x14/0x20
[13058.354788] kmem_cache_alloc_node+0x130/0x440
[13058.354793] bfq_get_queue+0x138/0x858
[13058.354797] bfq_get_bfqq_handle_split+0xd4/0x328
[13058.354801] bfq_init_rq+0x1f4/0x1180
[13058.354806] bfq_insert_requests+0x264/0x1c98
[13058.354811] blk_mq_sched_insert_requests+0x1c4/0x488
[13058.354818] blk_mq_flush_plug_list+0x2d4/0x6e0
[13058.354826] blk_flush_plug_list+0x230/0x548
[13058.354830] blk_finish_plug+0x60/0x80
[13058.354838] read_pages+0xec/0x2c0
[13058.354842] __do_page_cache_readahead+0x374/0x438
[13058.354846] ondemand_readahead+0x24c/0x6b0
[13058.354851] page_cache_sync_readahead+0x17c/0x2f8
[13058.354858] generic_file_buffered_read+0x588/0xc58
[13058.354862] generic_file_read_iter+0x1b4/0x278
[13058.354965] ext4_file_read_iter+0xa8/0x1d8 [ext4]
[13058.354972] __vfs_read+0x238/0x320
[13058.354976] vfs_read+0xbc/0x1c0
[13058.354980] ksys_read+0xdc/0x1b8
[13058.354984] __arm64_sys_read+0x50/0x60
[13058.354990] el0_svc_common+0xb4/0x1d8
[13058.354994] el0_svc_handler+0x50/0xa8
[13058.354998] el0_svc+0x8/0xc
[13058.354999]
[13058.355001] Freed by task 19731:
[13058.355007] __kasan_slab_free+0x120/0x228
[13058.355010] kasan_slab_free+0x10/0x18
[13058.355014] kmem_cache_free+0x288/0x3f0
[13058.355018] bfq_put_queue+0x134/0x208
[13058.355022] bfq_exit_icq_bfqq+0x164/0x348
[13058.355026] bfq_exit_icq+0x28/0x40
[13058.355030] ioc_exit_icq+0xa0/0x150
[13058.355035] put_io_context_active+0x250/0x438
[13058.355038] exit_io_context+0xd0/0x138
[13058.355045] do_exit+0x734/0xc58
[13058.355050] do_group_exit+0x78/0x220
[13058.355054] __wake_up_parent+0x0/0x50
[13058.355058] el0_svc_common+0xb4/0x1d8
[13058.355062] el0_svc_handler+0x50/0xa8
[13058.355066] el0_svc+0x8/0xc
[13058.355067]
[13058.355071] The buggy address belongs to the object at ffffa02cf3e63e70#012 which belongs to the cache bfq_queue of size 464
[13058.355075] The buggy address is located 264 bytes inside of#012 464-byte region [ffffa02cf3e63e70, ffffa02cf3e64040)
[13058.355077] The buggy address belongs to the page:
[13058.355083] page:ffff7e80b3cf9800 count:1 mapcount:0 mapping:ffff802db5c90780 index:0xffffa02cf3e606f0 compound_mapcount: 0
[13058.366175] flags: 0x2ffffe0000008100(slab|head)
[13058.370781] raw: 2ffffe0000008100 ffff7e80b53b1408 ffffa02d730c1c90 ffff802db5c90780
[13058.370787] raw: ffffa02cf3e606f0 0000000000370023 00000001ffffffff 0000000000000000
[13058.370789] page dumped because: kasan: bad access detected
[13058.370791]
[13058.370792] Memory state around the buggy address:
[13058.370797] ffffa02cf3e63e00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fb fb
[13058.370801] ffffa02cf3e63e80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370805] >ffffa02cf3e63f00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370808] ^
[13058.370811] ffffa02cf3e63f80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370815] ffffa02cf3e64000: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
[13058.370817] ==================================================================
[13058.370820] Disabling lock debugging due to kernel taint
Here, we directly pass the bfqd to bfq_idle_slice_timer_body func.
--
V2->V3: rewrite the comment as suggested by Paolo Valente
V1->V2: add one comment, and add Fixes and Reported-by tag.
Fixes: aee69d78d ("block, bfq: introduce the BFQ-v0 I/O scheduler as an extra scheduler")
Acked-by: Paolo Valente <paolo.valente@linaro.org>
Reported-by: Wang Wang <wangwang2@huawei.com>
Signed-off-by: Zhiqiang Liu <liuzhiqiang26@huawei.com>
Signed-off-by: Feilong Lin <linfeilong@huawei.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
|
Safe
|
[
"CWE-20"
] |
lz4
|
da5373197e84ee49d75b8334d4510689731d6e90
|
2.1589952328623145e+38
| 1 |
Fixed : issue 52 (reported by Ludwig Strigeus)
| 0 |
htmlSetMetaEncoding(htmlDocPtr doc, const xmlChar *encoding) {
htmlNodePtr cur, meta = NULL, head = NULL;
const xmlChar *content = NULL;
char newcontent[100];
newcontent[0] = 0;
if (doc == NULL)
return(-1);
/* html isn't a real encoding it's just libxml2 way to get entities */
if (!xmlStrcasecmp(encoding, BAD_CAST "html"))
return(-1);
if (encoding != NULL) {
snprintf(newcontent, sizeof(newcontent), "text/html; charset=%s",
(char *)encoding);
newcontent[sizeof(newcontent) - 1] = 0;
}
cur = doc->children;
/*
* Search the html
*/
while (cur != NULL) {
if ((cur->type == XML_ELEMENT_NODE) && (cur->name != NULL)) {
if (xmlStrcasecmp(cur->name, BAD_CAST"html") == 0)
break;
if (xmlStrcasecmp(cur->name, BAD_CAST"head") == 0)
goto found_head;
if (xmlStrcasecmp(cur->name, BAD_CAST"meta") == 0)
goto found_meta;
}
cur = cur->next;
}
if (cur == NULL)
return(-1);
cur = cur->children;
/*
* Search the head
*/
while (cur != NULL) {
if ((cur->type == XML_ELEMENT_NODE) && (cur->name != NULL)) {
if (xmlStrcasecmp(cur->name, BAD_CAST"head") == 0)
break;
if (xmlStrcasecmp(cur->name, BAD_CAST"meta") == 0) {
head = cur->parent;
goto found_meta;
}
}
cur = cur->next;
}
if (cur == NULL)
return(-1);
found_head:
head = cur;
if (cur->children == NULL)
goto create;
cur = cur->children;
found_meta:
/*
* Search and update all the remaining the meta elements carrying
* encoding information
*/
while (cur != NULL) {
if ((cur->type == XML_ELEMENT_NODE) && (cur->name != NULL)) {
if (xmlStrcasecmp(cur->name, BAD_CAST"meta") == 0) {
xmlAttrPtr attr = cur->properties;
int http;
const xmlChar *value;
content = NULL;
http = 0;
while (attr != NULL) {
if ((attr->children != NULL) &&
(attr->children->type == XML_TEXT_NODE) &&
(attr->children->next == NULL)) {
value = attr->children->content;
if ((!xmlStrcasecmp(attr->name, BAD_CAST"http-equiv"))
&& (!xmlStrcasecmp(value, BAD_CAST"Content-Type")))
http = 1;
else
{
if ((value != NULL) &&
(!xmlStrcasecmp(attr->name, BAD_CAST"content")))
content = value;
}
if ((http != 0) && (content != NULL))
break;
}
attr = attr->next;
}
if ((http != 0) && (content != NULL)) {
meta = cur;
break;
}
}
}
cur = cur->next;
}
create:
if (meta == NULL) {
if ((encoding != NULL) && (head != NULL)) {
/*
* Create a new Meta element with the right attributes
*/
meta = xmlNewDocNode(doc, NULL, BAD_CAST"meta", NULL);
if (head->children == NULL)
xmlAddChild(head, meta);
else
xmlAddPrevSibling(head->children, meta);
xmlNewProp(meta, BAD_CAST"http-equiv", BAD_CAST"Content-Type");
xmlNewProp(meta, BAD_CAST"content", BAD_CAST newcontent);
}
} else {
/* remove the meta tag if NULL is passed */
if (encoding == NULL) {
xmlUnlinkNode(meta);
xmlFreeNode(meta);
}
/* change the document only if there is a real encoding change */
else if (xmlStrcasestr(content, encoding) == NULL) {
xmlSetProp(meta, BAD_CAST"content", BAD_CAST newcontent);
}
}
return(0);
}
|
Safe
|
[
"CWE-79"
] |
libxml2
|
c1ba6f54d32b707ca6d91cb3257ce9de82876b6f
|
3.9283895887339527e+37
| 134 |
Revert "Do not URI escape in server side includes"
This reverts commit 960f0e275616cadc29671a218d7fb9b69eb35588.
This commit introduced
- an infinite loop, found by OSS-Fuzz, which could be easily fixed.
- an algorithm with quadratic runtime
- a security issue, see
https://bugzilla.gnome.org/show_bug.cgi?id=769760
A better approach is to add an option not to escape URLs at all
which libxml2 should have possibly done in the first place.
| 0 |
dns_free_all(void)
{
cached_resolve_t **ptr, **next, *item;
assert_cache_ok();
if (cached_resolve_pqueue) {
SMARTLIST_FOREACH(cached_resolve_pqueue, cached_resolve_t *, res,
{
if (res->state == CACHE_STATE_DONE)
_free_cached_resolve(res);
});
}
for (ptr = HT_START(cache_map, &cache_root); ptr != NULL; ptr = next) {
item = *ptr;
next = HT_NEXT_RMV(cache_map, &cache_root, ptr);
_free_cached_resolve(item);
}
HT_CLEAR(cache_map, &cache_root);
smartlist_free(cached_resolve_pqueue);
cached_resolve_pqueue = NULL;
tor_free(resolv_conf_fname);
}
|
Safe
|
[
"CWE-399"
] |
tor
|
62637fa22405278758febb1743da9af562524d4c
|
1.3063370413904372e+38
| 21 |
Avoid hard (impossible?)-to-trigger double-free in dns_resolve()
Fixes 6480; fix on 0.2.0.1-alpha; based on pseudonymous patch.
| 0 |
static int handle_invvpid(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 vmx_instruction_info;
unsigned long type, types;
gva_t gva;
struct x86_exception e;
int vpid;
if (!(vmx->nested.nested_vmx_secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) ||
!(vmx->nested.nested_vmx_vpid_caps & VMX_VPID_INVVPID_BIT)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
if (!nested_vmx_check_permission(vcpu))
return 1;
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
if (!(types & (1UL << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return 1;
}
/* according to the intel vmx instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global)
*/
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmx_instruction_info, false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
sizeof(u32), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
switch (type) {
case VMX_VPID_EXTENT_ALL_CONTEXT:
if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return 1;
}
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
nested_vmx_succeed(vcpu);
break;
default:
/* Trap single context invalidation invvpid calls */
BUG_ON(1);
break;
}
skip_emulated_instruction(vcpu);
return 1;
}
|
Safe
|
[
"CWE-399"
] |
linux
|
54a20552e1eae07aa240fa370a0293e006b5faed
|
2.1593586912084045e+38
| 61 |
KVM: x86: work around infinite loop in microcode when #AC is delivered
It was found that a guest can DoS a host by triggering an infinite
stream of "alignment check" (#AC) exceptions. This causes the
microcode to enter an infinite loop where the core never receives
another interrupt. The host kernel panics pretty quickly due to the
effects (CVE-2015-5307).
Signed-off-by: Eric Northup <digitaleric@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
__archive_read_ahead(struct archive_read *a, size_t min, ssize_t *avail)
{
return (__archive_read_filter_ahead(a->filter, min, avail));
}
|
Safe
|
[
"CWE-125"
] |
libarchive
|
e6c9668f3202215ddb71617b41c19b6f05acf008
|
6.397110360530657e+37
| 4 |
Add a check to archive_read_filter_consume to reject any
attempts to move the file pointer by a negative amount.
Note: Either this or commit 3865cf2 provides a fix for
Issue 394.
| 0 |
static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const size_t number_bins,
const unsigned short *lut,const unsigned short *pixels,size_t *histogram)
{
register const unsigned short
*p;
register ssize_t
i;
/*
Classify the pixels into a gray histogram.
*/
for (i=0; i < (ssize_t) number_bins; i++)
histogram[i]=0L;
p=pixels;
for (i=0; i < (ssize_t) tile_info->height; i++)
{
const unsigned short
*q;
q=p+tile_info->width;
while (p < q)
histogram[lut[*p++]]++;
q+=clahe_info->width;
p=q-tile_info->width;
}
}
|
Safe
|
[
"CWE-399",
"CWE-119",
"CWE-787"
] |
ImageMagick
|
d4fc44b58a14f76b1ac997517d742ee12c9dc5d3
|
2.8162904982725254e+37
| 28 |
https://github.com/ImageMagick/ImageMagick/issues/1611
| 0 |
int on_message_begin() {
num_fields_ = num_values_ = 0;
url_.Reset();
status_message_.Reset();
header_parsing_start_time_ = uv_hrtime();
return 0;
}
|
Vulnerable
|
[
"CWE-400"
] |
node
|
753f3b247ae2d24fee0b3f48b9ec3a5c308f0650
|
3.9821465886171334e+37
| 7 |
http: add requestTimeout
This commits introduces a new http.Server option called requestTimeout
with a default value in milliseconds of 0.
If requestTimeout is set to a positive value, the server will start a new
timer set to expire in requestTimeout milliseconds when a new connection
is established. The timer is also set again if new requests after the
first are received on the socket (this handles pipelining and keep-alive
cases).
The timer is cancelled when:
1. the request body is completely received by the server.
2. the response is completed. This handles the case where the
application responds to the client without consuming the request body.
3. the connection is upgraded, like in the WebSocket case.
If the timer expires, then the server responds with status code 408 and
closes the connection.
CVE-2020-8251
PR-URL: https://github.com/nodejs-private/node-private/pull/208
Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Reviewed-By: James M Snell <jasnell@gmail.com>
Reviewed-By: Robert Nagy <ronagy@icloud.com>
Reviewed-By: Mary Marchini <oss@mmarchini.me>
Co-Authored-By: Paolo Insogna <paolo@cowtech.it>
Co-Authored-By: Robert Nagy <ronagy@icloud.com>
| 1 |
int lpEncodeGetType(unsigned char *ele, uint32_t size, unsigned char *intenc, uint64_t *enclen) {
int64_t v;
if (lpStringToInt64((const char*)ele, size, &v)) {
if (v >= 0 && v <= 127) {
/* Single byte 0-127 integer. */
intenc[0] = v;
*enclen = 1;
} else if (v >= -4096 && v <= 4095) {
/* 13 bit integer. */
if (v < 0) v = ((int64_t)1<<13)+v;
intenc[0] = (v>>8)|LP_ENCODING_13BIT_INT;
intenc[1] = v&0xff;
*enclen = 2;
} else if (v >= -32768 && v <= 32767) {
/* 16 bit integer. */
if (v < 0) v = ((int64_t)1<<16)+v;
intenc[0] = LP_ENCODING_16BIT_INT;
intenc[1] = v&0xff;
intenc[2] = v>>8;
*enclen = 3;
} else if (v >= -8388608 && v <= 8388607) {
/* 24 bit integer. */
if (v < 0) v = ((int64_t)1<<24)+v;
intenc[0] = LP_ENCODING_24BIT_INT;
intenc[1] = v&0xff;
intenc[2] = (v>>8)&0xff;
intenc[3] = v>>16;
*enclen = 4;
} else if (v >= -2147483648 && v <= 2147483647) {
/* 32 bit integer. */
if (v < 0) v = ((int64_t)1<<32)+v;
intenc[0] = LP_ENCODING_32BIT_INT;
intenc[1] = v&0xff;
intenc[2] = (v>>8)&0xff;
intenc[3] = (v>>16)&0xff;
intenc[4] = v>>24;
*enclen = 5;
} else {
/* 64 bit integer. */
uint64_t uv = v;
intenc[0] = LP_ENCODING_64BIT_INT;
intenc[1] = uv&0xff;
intenc[2] = (uv>>8)&0xff;
intenc[3] = (uv>>16)&0xff;
intenc[4] = (uv>>24)&0xff;
intenc[5] = (uv>>32)&0xff;
intenc[6] = (uv>>40)&0xff;
intenc[7] = (uv>>48)&0xff;
intenc[8] = uv>>56;
*enclen = 9;
}
return LP_ENCODING_INT;
} else {
if (size < 64) *enclen = 1+size;
else if (size < 4096) *enclen = 2+size;
else *enclen = 5+size;
return LP_ENCODING_STRING;
}
}
|
Vulnerable
|
[
"CWE-190"
] |
redis
|
f6a40570fa63d5afdd596c78083d754081d80ae3
|
1.0568534504625596e+38
| 59 |
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error.
| 1 |
pk_transaction_update_detail_cb (PkBackend *backend,
PkUpdateDetail *item,
PkTransaction *transaction)
{
const gchar *changelog;
const gchar *issued;
const gchar *package_id;
const gchar *updated;
const gchar *update_text;
gchar **bugzilla_urls;
gchar **cve_urls;
gchar *empty[] = { NULL };
gchar **obsoletes;
gchar **updates;
gchar **vendor_urls;
g_return_if_fail (PK_IS_TRANSACTION (transaction));
g_return_if_fail (transaction->priv->tid != NULL);
/* add to results */
pk_results_add_update_detail (transaction->priv->results, item);
/* emit */
package_id = pk_update_detail_get_package_id (item);
updates = pk_update_detail_get_updates (item);
obsoletes = pk_update_detail_get_obsoletes (item);
vendor_urls = pk_update_detail_get_vendor_urls (item);
bugzilla_urls = pk_update_detail_get_bugzilla_urls (item);
cve_urls = pk_update_detail_get_cve_urls (item);
update_text = pk_update_detail_get_update_text (item);
changelog = pk_update_detail_get_changelog (item);
issued = pk_update_detail_get_issued (item);
updated = pk_update_detail_get_updated (item);
g_debug ("emitting update-detail for %s", package_id);
g_dbus_connection_emit_signal (transaction->priv->connection,
NULL,
transaction->priv->tid,
PK_DBUS_INTERFACE_TRANSACTION,
"UpdateDetail",
g_variant_new ("(s^as^as^as^as^asussuss)",
package_id,
updates != NULL ? updates : empty,
obsoletes != NULL ? obsoletes : empty,
vendor_urls != NULL ? vendor_urls : empty,
bugzilla_urls != NULL ? bugzilla_urls : empty,
cve_urls != NULL ? cve_urls : empty,
pk_update_detail_get_restart (item),
update_text != NULL ? update_text : "",
changelog != NULL ? changelog : "",
pk_update_detail_get_state (item),
issued != NULL ? issued : "",
updated != NULL ? updated : ""),
NULL);
}
|
Safe
|
[
"CWE-287"
] |
PackageKit
|
7e8a7905ea9abbd1f384f05f36a4458682cd4697
|
3.146836891639323e+38
| 54 |
Do not set JUST_REINSTALL on any kind of auth failure
If we try to continue the auth queue when it has been cancelled (or failed)
then we fall upon the obscure JUST_REINSTALL transaction flag which only the
DNF backend actually verifies.
Many thanks to Matthias Gerstner <mgerstner@suse.de> for spotting the problem.
| 0 |
lineBufferMinY (int y, int minY, int linesInLineBuffer)
{
return ((y - minY) / linesInLineBuffer) * linesInLineBuffer + minY;
}
|
Safe
|
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
|
2.668432862856245e+37
| 4 |
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <peterh@wetafx.co.nz>
| 0 |
send_enable_carbons(void **state)
{
prof_connect();
prof_input("/carbons on");
assert_true(stbbr_received(
"<iq id='*' type='set'><enable xmlns='urn:xmpp:carbons:2'/></iq>"
));
}
|
Safe
|
[
"CWE-20",
"CWE-346"
] |
profanity
|
8e75437a7e43d4c55e861691f74892e666e29b0b
|
3.273381791915743e+38
| 10 |
Add carbons from check
| 0 |
static void* swoole_unserialize_object(void *buffer, zval *return_value, zend_uchar bucket_len, zval *args, long flag)
{
zval property;
uint32_t arr_num = 0;
size_t name_len = *((unsigned short*) buffer);
CHECK_STEP;
if (!name_len)
{
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "illegal unserialize data");
return NULL;
}
buffer += 2;
zend_string *class_name;
if (flag == UNSERIALIZE_OBJECT_TO_STDCLASS)
{
class_name = swoole_string_init(ZEND_STRL("StdClass"));
}
else
{
class_name = swoole_string_init((char*) buffer, name_len);
}
buffer += name_len;
zend_class_entry *ce = swoole_try_get_ce(class_name);
swoole_string_release(class_name);
CHECK_STEP;
if (!ce)
{
return NULL;
}
buffer = get_array_real_len(buffer, bucket_len, &arr_num);
buffer = swoole_unserialize_arr(buffer, &property, arr_num, flag);
object_init_ex(return_value, ce);
zval *data, *d;
zend_string *key;
zend_ulong index;
ZEND_HASH_FOREACH_KEY_VAL(Z_ARRVAL(property), index, key, data)
{
const char *prop_name, *tmp;
size_t prop_len;
if (key)
{
if ((d = zend_hash_find(Z_OBJPROP_P(return_value), key)) != NULL)
{
if (Z_TYPE_P(d) == IS_INDIRECT)
{
d = Z_INDIRECT_P(d);
}
zval_dtor(d);
ZVAL_COPY(d, data);
}
else
{
zend_unmangle_property_name_ex(key, &tmp, &prop_name, &prop_len);
zend_update_property(ce, return_value, prop_name, prop_len, data);
}
// zend_hash_update(Z_OBJPROP_P(return_value),key,data);
// zend_update_property(ce, return_value, ZSTR_VAL(key), ZSTR_LEN(key), data);
}
else
{
zend_hash_next_index_insert(Z_OBJPROP_P(return_value), data);
}
}
ZEND_HASH_FOREACH_END();
zval_dtor(&property);
if (ce->constructor)
{
// zend_fcall_info fci = {0};
// zend_fcall_info_cache fcc = {0};
// fci.size = sizeof (zend_fcall_info);
// zval retval;
// ZVAL_UNDEF(&fci.function_name);
// fci.retval = &retval;
// fci.param_count = 0;
// fci.params = NULL;
// fci.no_separation = 1;
// fci.object = Z_OBJ_P(return_value);
//
// zend_fcall_info_args_ex(&fci, ce->constructor, args);
//
// fcc.initialized = 1;
// fcc.function_handler = ce->constructor;
// // fcc.calling_scope = EG(scope);
// fcc.called_scope = Z_OBJCE_P(return_value);
// fcc.object = Z_OBJ_P(return_value);
//
// if (zend_call_function(&fci, &fcc) == FAILURE)
// {
// zend_throw_exception_ex(NULL, 0, "could not call class constructor");
// }
// zend_fcall_info_args_clear(&fci, 1);
}
//call object __wakeup
if (zend_hash_str_exists(&ce->function_table, ZEND_STRL("__wakeup")))
{
zval ret, wakeup;
zend_string *fname = swoole_string_init(ZEND_STRL("__wakeup"));
Z_STR(wakeup) = fname;
Z_TYPE_INFO(wakeup) = IS_STRING_EX;
call_user_function_ex(CG(function_table), return_value, &wakeup, &ret, 0, NULL, 1, NULL);
swoole_string_release(fname);
zval_ptr_dtor(&ret);
}
CHECK_STEP;
return buffer;
}
|
Safe
|
[
"CWE-200",
"CWE-502"
] |
swoole-src
|
4cdbce5d9bf2fe596bb6acd7d6611f9e8c253a76
|
8.472499960762676e+37
| 116 |
add buffer end check
| 0 |
static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
void *private)
{
pmd_t *pmd;
unsigned long next;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
split_huge_page_pmd(vma->vm_mm, pmd);
if (pmd_none_or_clear_bad(pmd))
continue;
if (check_pte_range(vma, pmd, addr, next, nodes,
flags, private))
return -EIO;
} while (pmd++, addr = next, addr != end);
return 0;
}
|
Vulnerable
|
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
|
3.402204461985025e+38
| 20 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[akpm@linux-foundation.org: checkpatch fixes]
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Jones <davej@redhat.com>
Acked-by: Larry Woodman <lwoodman@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: <stable@vger.kernel.org> [2.6.38+]
Cc: Mark Salter <msalter@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 1 |
__init int intel_pmu_init(void)
{
union cpuid10_edx edx;
union cpuid10_eax eax;
union cpuid10_ebx ebx;
struct event_constraint *c;
unsigned int unused;
int version;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
case 0x6:
return p6_pmu_init();
case 0xb:
return knc_pmu_init();
case 0xf:
return p4_pmu_init();
}
return -ENODEV;
}
/*
* Check whether the Architectural PerfMon supports
* Branch Misses Retired hw_event or not.
*/
cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
return -ENODEV;
version = eax.split.version_id;
if (version < 2)
x86_pmu = core_pmu;
else
x86_pmu = intel_pmu;
x86_pmu.version = version;
x86_pmu.num_counters = eax.split.num_counters;
x86_pmu.cntval_bits = eax.split.bit_width;
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
x86_pmu.events_maskl = ebx.full;
x86_pmu.events_mask_len = eax.split.mask_length;
x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
*/
if (version > 1)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
/*
* v2 and above have a perf capabilities MSR
*/
if (version > 1) {
u64 capabilities;
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
x86_pmu.intel_cap.capabilities = capabilities;
}
intel_ds_init();
x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
/*
* Install the hw-cache-events table:
*/
switch (boot_cpu_data.x86_model) {
case 14: /* 65 nm core solo/duo, "Yonah" */
pr_cont("Core events, ");
break;
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
x86_add_quirk(intel_clovertown_quirk);
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
case 29: /* six-core 45 nm xeon "Dunnington" */
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
intel_pmu_lbr_init_core();
x86_pmu.event_constraints = intel_core2_event_constraints;
x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
pr_cont("Core2 events, ");
break;
case 26: /* 45 nm nehalem, "Bloomfield" */
case 30: /* 45 nm nehalem, "Lynnfield" */
case 46: /* 45 nm nehalem-ex, "Beckton" */
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_nehalem_event_constraints;
x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
x86_add_quirk(intel_nehalem_quirk);
pr_cont("Nehalem events, ");
break;
case 28: /* Atom */
case 38: /* Lincroft */
case 39: /* Penwell */
case 53: /* Cloverview */
case 54: /* Cedarview */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
intel_pmu_lbr_init_atom();
x86_pmu.event_constraints = intel_gen_event_constraints;
x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
pr_cont("Atom events, ");
break;
case 37: /* 32 nm nehalem, "Clarkdale" */
case 44: /* 32 nm nehalem, "Gulftown" */
case 47: /* 32 nm Xeon E7 */
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_westmere_event_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
x86_pmu.extra_regs = intel_westmere_extra_regs;
x86_pmu.er_flags |= ERF_HAS_RSP_1;
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
pr_cont("Westmere events, ");
break;
case 42: /* SandyBridge */
case 45: /* SandyBridge, "Romely-EP" */
x86_add_quirk(intel_sandybridge_quirk);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_snb();
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
/* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
pr_cont("SandyBridge events, ");
break;
case 58: /* IvyBridge */
case 62: /* IvyBridge EP */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_snb();
x86_pmu.event_constraints = intel_ivb_event_constraints;
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
pr_cont("IvyBridge events, ");
break;
default:
switch (x86_pmu.version) {
case 1:
x86_pmu.event_constraints = intel_v1_event_constraints;
pr_cont("generic architected perfmon v1, ");
break;
default:
/*
* default constraints for v2 and up
*/
x86_pmu.event_constraints = intel_gen_event_constraints;
pr_cont("generic architected perfmon, ");
break;
}
}
if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
}
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
}
x86_pmu.intel_ctrl |=
((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
if (x86_pmu.event_constraints) {
/*
* event on fixed counter2 (REF_CYCLES) only works on this
* counter, so do not extend mask to generic counters
*/
for_each_event_constraint(c, x86_pmu.event_constraints) {
if (c->cmask != X86_RAW_EVENT_MASK
|| c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
continue;
}
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
c->weight += x86_pmu.num_counters;
}
}
return 0;
}
|
Vulnerable
|
[
"CWE-20",
"CWE-401"
] |
linux
|
f1923820c447e986a9da0fc6bf60c1dccdf0408e
|
6.090618564422241e+37
| 259 |
perf/x86: Fix offcore_rsp valid mask for SNB/IVB
The valid mask for both offcore_response_0 and
offcore_response_1 was wrong for SNB/SNB-EP,
IVB/IVB-EP. It was possible to write to
reserved bit and cause a GP fault crashing
the kernel.
This patch fixes the problem by correctly marking the
reserved bits in the valid mask for all the processors
mentioned above.
A distinction between desktop and server parts is introduced
because bits 24-30 are only available on the server parts.
This version of the patch is just a rebase to perf/urgent tree
and should apply to older kernels as well.
Signed-off-by: Stephane Eranian <eranian@google.com>
Cc: peterz@infradead.org
Cc: jolsa@redhat.com
Cc: gregkh@linuxfoundation.org
Cc: security@kernel.org
Cc: ak@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| 1 |
static BOOL rdp_recv_server_set_keyboard_indicators_pdu(rdpRdp* rdp, wStream* s)
{
UINT16 unitId;
UINT16 ledFlags;
rdpContext* context = rdp->instance->context;
if (Stream_GetRemainingLength(s) < 4)
return FALSE;
Stream_Read_UINT16(s, unitId); /* unitId (2 bytes) */
Stream_Read_UINT16(s, ledFlags); /* ledFlags (2 bytes) */
IFCALL(context->update->SetKeyboardIndicators, context, ledFlags);
return TRUE;
}
|
Safe
|
[
"CWE-125"
] |
FreeRDP
|
9301bfe730c66180263248b74353daa99f5a969b
|
7.96403068177093e+37
| 14 |
Fixed #6007: Boundary checks in rdp_read_flow_control_pdu
| 0 |
grub_ext2_iterate_dir (grub_fshelp_node_t dir,
grub_fshelp_iterate_dir_hook_t hook, void *hook_data)
{
unsigned int fpos = 0;
struct grub_fshelp_node *diro = (struct grub_fshelp_node *) dir;
if (! diro->inode_read)
{
grub_ext2_read_inode (diro->data, diro->ino, &diro->inode);
if (grub_errno)
return 0;
}
/* Search the file. */
while (fpos < grub_le_to_cpu32 (diro->inode.size))
{
struct ext2_dirent dirent;
grub_ext2_read_file (diro, 0, 0, fpos, sizeof (struct ext2_dirent),
(char *) &dirent);
if (grub_errno)
return 0;
if (dirent.direntlen == 0)
return 0;
if (dirent.inode != 0 && dirent.namelen != 0)
{
char filename[MAX_NAMELEN + 1];
struct grub_fshelp_node *fdiro;
enum grub_fshelp_filetype type = GRUB_FSHELP_UNKNOWN;
grub_ext2_read_file (diro, 0, 0, fpos + sizeof (struct ext2_dirent),
dirent.namelen, filename);
if (grub_errno)
return 0;
fdiro = grub_malloc (sizeof (struct grub_fshelp_node));
if (! fdiro)
return 0;
fdiro->data = diro->data;
fdiro->ino = grub_le_to_cpu32 (dirent.inode);
filename[dirent.namelen] = '\0';
if (dirent.filetype != FILETYPE_UNKNOWN)
{
fdiro->inode_read = 0;
if (dirent.filetype == FILETYPE_DIRECTORY)
type = GRUB_FSHELP_DIR;
else if (dirent.filetype == FILETYPE_SYMLINK)
type = GRUB_FSHELP_SYMLINK;
else if (dirent.filetype == FILETYPE_REG)
type = GRUB_FSHELP_REG;
}
else
{
/* The filetype can not be read from the dirent, read
the inode to get more information. */
grub_ext2_read_inode (diro->data,
grub_le_to_cpu32 (dirent.inode),
&fdiro->inode);
if (grub_errno)
{
grub_free (fdiro);
return 0;
}
fdiro->inode_read = 1;
if ((grub_le_to_cpu16 (fdiro->inode.mode)
& FILETYPE_INO_MASK) == FILETYPE_INO_DIRECTORY)
type = GRUB_FSHELP_DIR;
else if ((grub_le_to_cpu16 (fdiro->inode.mode)
& FILETYPE_INO_MASK) == FILETYPE_INO_SYMLINK)
type = GRUB_FSHELP_SYMLINK;
else if ((grub_le_to_cpu16 (fdiro->inode.mode)
& FILETYPE_INO_MASK) == FILETYPE_INO_REG)
type = GRUB_FSHELP_REG;
}
if (hook (filename, type, fdiro, hook_data))
return 1;
}
fpos += grub_le_to_cpu16 (dirent.direntlen);
}
return 0;
}
|
Safe
|
[
"CWE-119"
] |
grub
|
ac8cac1dac50daaf1c390d701cca3b55e16ee768
|
2.4081518464105656e+38
| 92 |
* grub-core/fs/ext2.c: Remove variable length arrays.
| 0 |
g_opendir(Char *str, glob_t *pglob)
{
char buf[PATH_MAX];
if (!*str) {
buf[0] = '.';
buf[1] = 0;
} else {
if (g_Ctoc(str, buf, sizeof(buf))) {
return NULL;
}
}
if (pglob->gl_flags & GLOB_ALTDIRFUNC) {
return (*pglob->gl_opendir)(buf);
}
return opendir(buf);
}
|
Safe
|
[] |
pure-ftpd
|
0627004e23a24108785dc1506c5767392b90f807
|
2.0182695109892852e+38
| 19 |
BSD glob(): check max pattern length after having initialized pglob
| 0 |
static struct media_item *parse_media_folder(struct avrcp *session,
uint8_t *operands, uint16_t len)
{
struct avrcp_player *player = session->controller->player;
struct media_player *mp = player->user_data;
struct media_item *item;
uint16_t namelen;
char name[255];
uint64_t uid;
uint8_t type;
uint8_t playable;
if (len < 12)
return NULL;
uid = get_be64(&operands[0]);
type = operands[8];
playable = operands[9];
namelen = MIN(get_be16(&operands[12]), sizeof(name) - 1);
if (namelen > 0) {
memcpy(name, &operands[14], namelen);
name[namelen] = '\0';
}
item = media_player_create_folder(mp, name, type, uid);
if (!item)
return NULL;
media_item_set_playable(item, playable & 0x01);
return item;
}
|
Safe
|
[
"CWE-200"
] |
bluez
|
e2b0f0d8d63e1223bb714a9efb37e2257818268b
|
2.097543107955611e+38
| 33 |
avrcp: Fix not checking if params_len match number of received bytes
This makes sure the number of bytes in the params_len matches the
remaining bytes received so the code don't end up accessing invalid
memory.
| 0 |
static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *smi_msg,
int priority)
{
if (intf->curr_msg) {
if (priority > 0)
list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
else
list_add_tail(&smi_msg->link, &intf->xmit_msgs);
smi_msg = NULL;
} else {
intf->curr_msg = smi_msg;
}
return smi_msg;
}
|
Safe
|
[
"CWE-416",
"CWE-284"
] |
linux
|
77f8269606bf95fcb232ee86f6da80886f1dfae8
|
3.1803281406617427e+38
| 16 |
ipmi: fix use-after-free of user->release_barrier.rda
When we do the following test, we got oops in ipmi_msghandler driver
while((1))
do
service ipmievd restart & service ipmievd restart
done
---------------------------------------------------------------
[ 294.230186] Unable to handle kernel paging request at virtual address 0000803fea6ea008
[ 294.230188] Mem abort info:
[ 294.230190] ESR = 0x96000004
[ 294.230191] Exception class = DABT (current EL), IL = 32 bits
[ 294.230193] SET = 0, FnV = 0
[ 294.230194] EA = 0, S1PTW = 0
[ 294.230195] Data abort info:
[ 294.230196] ISV = 0, ISS = 0x00000004
[ 294.230197] CM = 0, WnR = 0
[ 294.230199] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000a1c1b75a
[ 294.230201] [0000803fea6ea008] pgd=0000000000000000
[ 294.230204] Internal error: Oops: 96000004 [#1] SMP
[ 294.235211] Modules linked in: nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm iw_cm dm_mirror dm_region_hash dm_log dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ghash_ce sha2_ce ses sha256_arm64 sha1_ce hibmc_drm hisi_sas_v2_hw enclosure sg hisi_sas_main sbsa_gwdt ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe ipmi_si mdio hns_dsaf ipmi_devintf ipmi_msghandler hns_enet_drv hns_mdio
[ 294.277745] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.0.0-rc2+ #113
[ 294.285511] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017
[ 294.292835] pstate: 80000005 (Nzcv daif -PAN -UAO)
[ 294.297695] pc : __srcu_read_lock+0x38/0x58
[ 294.301940] lr : acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.307853] sp : ffff00001001bc80
[ 294.311208] x29: ffff00001001bc80 x28: ffff0000117e5000
[ 294.316594] x27: 0000000000000000 x26: dead000000000100
[ 294.321980] x25: dead000000000200 x24: ffff803f6bd06800
[ 294.327366] x23: 0000000000000000 x22: 0000000000000000
[ 294.332752] x21: ffff00001001bd04 x20: ffff80df33d19018
[ 294.338137] x19: ffff80df33d19018 x18: 0000000000000000
[ 294.343523] x17: 0000000000000000 x16: 0000000000000000
[ 294.348908] x15: 0000000000000000 x14: 0000000000000002
[ 294.354293] x13: 0000000000000000 x12: 0000000000000000
[ 294.359679] x11: 0000000000000000 x10: 0000000000100000
[ 294.365065] x9 : 0000000000000000 x8 : 0000000000000004
[ 294.370451] x7 : 0000000000000000 x6 : ffff80df34558678
[ 294.375836] x5 : 000000000000000c x4 : 0000000000000000
[ 294.381221] x3 : 0000000000000001 x2 : 0000803fea6ea000
[ 294.386607] x1 : 0000803fea6ea008 x0 : 0000000000000001
[ 294.391994] Process swapper/3 (pid: 0, stack limit = 0x0000000083087293)
[ 294.398791] Call trace:
[ 294.401266] __srcu_read_lock+0x38/0x58
[ 294.405154] acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.410716] deliver_response+0x80/0xf8 [ipmi_msghandler]
[ 294.416189] deliver_local_response+0x28/0x68 [ipmi_msghandler]
[ 294.422193] handle_one_recv_msg+0x158/0xcf8 [ipmi_msghandler]
[ 294.432050] handle_new_recv_msgs+0xc0/0x210 [ipmi_msghandler]
[ 294.441984] smi_recv_tasklet+0x8c/0x158 [ipmi_msghandler]
[ 294.451618] tasklet_action_common.isra.5+0x88/0x138
[ 294.460661] tasklet_action+0x2c/0x38
[ 294.468191] __do_softirq+0x120/0x2f8
[ 294.475561] irq_exit+0x134/0x140
[ 294.482445] __handle_domain_irq+0x6c/0xc0
[ 294.489954] gic_handle_irq+0xb8/0x178
[ 294.497037] el1_irq+0xb0/0x140
[ 294.503381] arch_cpu_idle+0x34/0x1a8
[ 294.510096] do_idle+0x1d4/0x290
[ 294.516322] cpu_startup_entry+0x28/0x30
[ 294.523230] secondary_start_kernel+0x184/0x1d0
[ 294.530657] Code: d538d082 d2800023 8b010c81 8b020021 (c85f7c25)
[ 294.539746] ---[ end trace 8a7a880dee570b29 ]---
[ 294.547341] Kernel panic - not syncing: Fatal exception in interrupt
[ 294.556837] SMP: stopping secondary CPUs
[ 294.563996] Kernel Offset: disabled
[ 294.570515] CPU features: 0x002,21006008
[ 294.577638] Memory Limit: none
[ 294.587178] Starting crashdump kernel...
[ 294.594314] Bye!
Because the user->release_barrier.rda is freed in ipmi_destroy_user(), but
the refcount is not zero, when acquire_ipmi_user() uses user->release_barrier.rda
in __srcu_read_lock(), it causes oops.
Fix this by calling cleanup_srcu_struct() when the refcount is zero.
Fixes: e86ee2d44b44 ("ipmi: Rework locking and shutdown for hot remove")
Cc: stable@vger.kernel.org # 4.18
Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
Signed-off-by: Corey Minyard <cminyard@mvista.com>
| 0 |
impl_permission_manager_test (EphyPermissionManager *manager,
const char *host,
const char *type)
{
g_return_val_if_fail (type != NULL && type[0] != '\0', EPHY_PERMISSION_DEFAULT);
return (EphyPermission)0;
}
|
Safe
|
[] |
epiphany
|
3e0f7dea754381c5ad11a06ccc62eb153382b498
|
1.0768366726305527e+38
| 8 |
Report broken certs through the padlock icon
This uses a new feature in libsoup that reports through a
SoupMessageFlag whether the message is talking to a server that has a
trusted server.
Bug #600663
| 0 |
static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
struct sk_buff_head *list, enum skb_state state)
{
unsigned long flags;
enum skb_state old_state;
struct skb_data *entry = (struct skb_data *) skb->cb;
spin_lock_irqsave(&list->lock, flags);
old_state = entry->state;
entry->state = state;
__skb_unlink(skb, list);
/* defer_bh() is never called with list == &dev->done.
* spin_lock_nested() tells lockdep that it is OK to take
* dev->done.lock here with list->lock held.
*/
spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING);
__skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1)
tasklet_schedule(&dev->bh);
spin_unlock(&dev->done.lock);
spin_unlock_irqrestore(&list->lock, flags);
return old_state;
}
|
Safe
|
[
"CWE-703"
] |
linux
|
1666984c8625b3db19a9abc298931d35ab7bc64b
|
3.2672556436703372e+38
| 25 |
usbnet: cleanup after bind() in probe()
In case bind() works, but a later error forces bailing
in probe() in error cases work and a timer may be scheduled.
They must be killed. This fixes an error case related to
the double free reported in
http://www.spinics.net/lists/netdev/msg367669.html
and needs to go on top of Linus' fix to cdc-ncm.
Signed-off-by: Oliver Neukum <ONeukum@suse.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
R_API int r_bin_has_dbg_linenums(RBin *bin) {
RBinObject *o = r_bin_cur_object (bin);
return o? (R_BIN_DBG_LINENUMS & o->info->dbg_info): false;
}
|
Safe
|
[
"CWE-125"
] |
radare2
|
d31c4d3cbdbe01ea3ded16a584de94149ecd31d9
|
2.4160248489436615e+38
| 4 |
Fix #8748 - Fix oobread on string search
| 0 |
static int save_avio_options(AVFormatContext *s)
{
HLSContext *c = s->priv_data;
const char *opts[] = { "headers", "user_agent", "user-agent", "cookies", NULL }, **opt = opts;
uint8_t *buf;
int ret = 0;
while (*opt) {
if (av_opt_get(s->pb, *opt, AV_OPT_SEARCH_CHILDREN, &buf) >= 0) {
ret = av_dict_set(&c->avio_opts, *opt, buf,
AV_DICT_DONT_STRDUP_VAL);
if (ret < 0)
return ret;
}
opt++;
}
return ret;
}
|
Safe
|
[
"CWE-416"
] |
FFmpeg
|
6959358683c7533f586c07a766acc5fe9544d8b2
|
1.8155583768981452e+38
| 19 |
avformat/hls: check segment duration value of EXTINF
fix ticket: 8673
set the default EXTINF duration to 1ms if duration is smaller than 1ms
Signed-off-by: Steven Liu <lq@chinaffmpeg.org>
(cherry picked from commit 9dfb19baeb86a8bb02c53a441682c6e9a6e104cc)
| 0 |
static void cli_raw_tcon_done(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct cli_raw_tcon_state *state = tevent_req_data(
req, struct cli_raw_tcon_state);
NTSTATUS status;
status = cli_smb_recv(subreq, state, NULL, 2, NULL, &state->ret_vwv,
NULL, NULL);
TALLOC_FREE(subreq);
if (tevent_req_nterror(req, status)) {
return;
}
tevent_req_done(req);
}
|
Safe
|
[
"CWE-94"
] |
samba
|
94295b7aa22d2544af5323bca70d3dcb97fd7c64
|
3.2203666185042828e+38
| 16 |
CVE-2016-2019: s3:libsmb: add comment regarding smbXcli_session_is_guest() with mandatory signing
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11860
Signed-off-by: Stefan Metzmacher <metze@samba.org>
| 0 |
handlerton *ha_default_tmp_handlerton(THD *thd)
{
plugin_ref plugin= ha_default_tmp_plugin(thd);
DBUG_ASSERT(plugin);
handlerton *hton= plugin_hton(plugin);
DBUG_ASSERT(hton);
return hton;
}
|
Safe
|
[
"CWE-416"
] |
server
|
af810407f78b7f792a9bb8c47c8c532eb3b3a758
|
8.896423021322487e+37
| 8 |
MDEV-28098 incorrect key in "dup value" error after long unique
reset errkey after using it, so that it wouldn't affect
the next error message in the next statement
| 0 |
int __mnt_want_write(struct vfsmount *m)
{
struct mount *mnt = real_mount(m);
int ret = 0;
preempt_disable();
mnt_inc_writers(mnt);
/*
* The store to mnt_inc_writers must be visible before we pass
* MNT_WRITE_HOLD loop below, so that the slowpath can see our
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
cpu_relax();
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until
* MNT_WRITE_HOLD is cleared.
*/
smp_rmb();
if (mnt_is_readonly(m)) {
mnt_dec_writers(mnt);
ret = -EROFS;
}
preempt_enable();
return ret;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
427215d85e8d1476da1a86b8d67aceb485eb3631
|
2.0752416235977735e+38
| 29 |
ovl: prevent private clone if bind mount is not allowed
Add the following checks from __do_loopback() to clone_private_mount() as
well:
- verify that the mount is in the current namespace
- verify that there are no locked children
Reported-by: Alois Wohlschlager <alois1@gmx-topmail.de>
Fixes: c771d683a62e ("vfs: introduce clone_private_mount()")
Cc: <stable@vger.kernel.org> # v3.18
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
| 0 |
GF_Err txtc_Read(GF_Box *s, GF_BitStream *bs)
{
u32 size, i;
char *str;
GF_TextConfigBox *ptr = (GF_TextConfigBox*)s;
size = (u32) ptr->size;
str = (char *)gf_malloc(sizeof(char)*size);
i=0;
while (size) {
str[i] = gf_bs_read_u8(bs);
size--;
if (!str[i])
break;
i++;
}
if (i) ptr->config = gf_strdup(str);
gf_free(str);
return GF_OK;
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
|
1.4264132210917136e+38
| 23 |
prevent dref memleak on invalid input (#1183)
| 0 |
static int verify_headers(const void *data, unsigned long size,
struct object *obj, struct fsck_options *options)
{
const char *buffer = (const char *)data;
unsigned long i;
for (i = 0; i < size; i++) {
switch (buffer[i]) {
case '\0':
return report(options, obj,
FSCK_MSG_NUL_IN_HEADER,
"unterminated header: NUL at offset %ld", i);
case '\n':
if (i + 1 < size && buffer[i + 1] == '\n')
return 0;
}
}
/*
* We did not find double-LF that separates the header
* and the body. Not having a body is not a crime but
* we do want to see the terminating LF for the last header
* line.
*/
if (size && buffer[size - 1] == '\n')
return 0;
return report(options, obj,
FSCK_MSG_UNTERMINATED_HEADER, "unterminated header");
}
|
Safe
|
[
"CWE-20",
"CWE-88",
"CWE-522"
] |
git
|
a124133e1e6ab5c7a9fef6d0e6bcb084e3455b46
|
3.3130663530080083e+38
| 30 |
fsck: detect submodule urls starting with dash
Urls with leading dashes can cause mischief on older
versions of Git. We should detect them so that they can be
rejected by receive.fsckObjects, preventing modern versions
of git from being a vector by which attacks can spread.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
| 0 |
static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
struct io_submit_state *state)
{
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req;
if (!percpu_ref_tryget(&ctx->refs))
return NULL;
if (!state) {
req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req))
goto fallback;
} else if (!state->free_reqs) {
size_t sz;
int ret;
sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
/*
* Bulk alloc is all-or-nothing. If we fail to get a batch,
* retry single alloc to be on the safe side.
*/
if (unlikely(ret <= 0)) {
state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
if (!state->reqs[0])
goto fallback;
ret = 1;
}
state->free_reqs = ret - 1;
state->cur_req = 1;
req = state->reqs[0];
} else {
req = state->reqs[state->cur_req];
state->free_reqs--;
state->cur_req++;
}
got_it:
req->file = NULL;
req->ctx = ctx;
req->flags = 0;
/* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2);
req->result = 0;
INIT_IO_WORK(&req->work, io_wq_submit_work);
return req;
fallback:
req = io_get_fallback_req(ctx);
if (req)
goto got_it;
percpu_ref_put(&ctx->refs);
return NULL;
}
|
Safe
|
[] |
linux
|
181e448d8709e517c9c7b523fcd209f24eb38ca7
|
7.359778334540746e+37
| 55 |
io_uring: async workers should inherit the user creds
If we don't inherit the original task creds, then we can confuse users
like fuse that pass creds in the request header. See link below on
identical aio issue.
Link: https://lore.kernel.org/linux-fsdevel/26f0d78e-99ca-2f1b-78b9-433088053a61@scylladb.com/T/#u
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
TEST_F(ExprMatchTest, OrMatchesCorrectly) {
createMatcher(fromjson("{$expr: {$or: [{$lte: ['$x', 3]}, {$gte: ['$y', 4]}]}}"));
ASSERT_TRUE(matches(BSON("x" << 3)));
ASSERT_TRUE(matches(BSON("y" << 5)));
ASSERT_FALSE(matches(BSON("x" << 10)));
}
|
Safe
|
[] |
mongo
|
ee97c0699fd55b498310996ee002328e533681a3
|
2.7729832507387083e+38
| 8 |
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
| 0 |
Status AuthorizationManagerImpl::_initializeUserFromPrivilegeDocument(User* user,
const BSONObj& privDoc) {
V2UserDocumentParser parser;
std::string userName = parser.extractUserNameFromUserDocument(privDoc);
if (userName != user->getName().getUser()) {
return Status(ErrorCodes::BadValue,
mongoutils::str::stream() << "User name from privilege document \""
<< userName
<< "\" doesn't match name of provided User \""
<< user->getName().getUser()
<< "\"");
}
Status status = parser.initializeUserCredentialsFromUserDocument(user, privDoc);
if (!status.isOK()) {
return status;
}
status = parser.initializeUserRolesFromUserDocument(privDoc, user);
if (!status.isOK()) {
return status;
}
status = parser.initializeUserIndirectRolesFromUserDocument(privDoc, user);
if (!status.isOK()) {
return status;
}
status = parser.initializeUserPrivilegesFromUserDocument(privDoc, user);
if (!status.isOK()) {
return status;
}
status = parser.initializeAuthenticationRestrictionsFromUserDocument(privDoc, user);
if (!status.isOK()) {
return status;
}
return Status::OK();
}
|
Vulnerable
|
[
"CWE-613"
] |
mongo
|
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
|
2.774501183815332e+38
| 36 |
SERVER-38984 Validate unique User ID on UserCache hit
| 1 |
static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
{
if (inode->i_flags & S_IMMUTABLE)
perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
else
perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
if (inode->i_flags & S_APPEND)
perms->rootflags |= HFSPLUS_FLG_APPEND;
else
perms->rootflags &= ~HFSPLUS_FLG_APPEND;
HFSPLUS_I(inode).rootflags = perms->rootflags;
HFSPLUS_I(inode).userflags = perms->userflags;
perms->mode = cpu_to_be16(inode->i_mode);
perms->owner = cpu_to_be32(inode->i_uid);
perms->group = cpu_to_be32(inode->i_gid);
}
|
Safe
|
[
"CWE-119"
] |
linux-2.6
|
efc7ffcb4237f8cb9938909041c4ed38f6e1bf40
|
1.5691215325369725e+38
| 16 |
hfsplus: fix Buffer overflow with a corrupted image
When an hfsplus image gets corrupted it might happen that the catalog
namelength field gets b0rked. If we mount such an image the memcpy() in
hfsplus_cat_build_key_uni() writes more than the 255 that fit in the name
field. Depending on the size of the overwritten data, we either only get
memory corruption or also trigger an oops like this:
[ 221.628020] BUG: unable to handle kernel paging request at c82b0000
[ 221.629066] IP: [<c022d4b1>] hfsplus_find_cat+0x10d/0x151
[ 221.629066] *pde = 0ea29163 *pte = 082b0160
[ 221.629066] Oops: 0002 [#1] PREEMPT DEBUG_PAGEALLOC
[ 221.629066] Modules linked in:
[ 221.629066]
[ 221.629066] Pid: 4845, comm: mount Not tainted (2.6.27-rc4-00123-gd3ee1b4-dirty #28)
[ 221.629066] EIP: 0060:[<c022d4b1>] EFLAGS: 00010206 CPU: 0
[ 221.629066] EIP is at hfsplus_find_cat+0x10d/0x151
[ 221.629066] EAX: 00000029 EBX: 00016210 ECX: 000042c2 EDX: 00000002
[ 221.629066] ESI: c82d70ca EDI: c82b0000 EBP: c82d1bcc ESP: c82d199c
[ 221.629066] DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068
[ 221.629066] Process mount (pid: 4845, ti=c82d1000 task=c8224060 task.ti=c82d1000)
[ 221.629066] Stack: c080b3c4 c82aa8f8 c82d19c2 00016210 c080b3be c82d1bd4 c82aa8f0 00000300
[ 221.629066] 01000000 750008b1 74006e00 74006900 65006c00 c82d6400 c013bd35 c8224060
[ 221.629066] 00000036 00000046 c82d19f0 00000082 c8224548 c8224060 00000036 c0d653cc
[ 221.629066] Call Trace:
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c01302d2>] ? __kernel_text_address+0x1b/0x27
[ 221.629066] [<c010487a>] ? dump_trace+0xca/0xd6
[ 221.629066] [<c0109e32>] ? save_stack_address+0x0/0x2c
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c013b571>] ? save_trace+0x37/0x8d
[ 221.629066] [<c013b62e>] ? add_lock_to_list+0x67/0x8d
[ 221.629066] [<c013ea1c>] ? validate_chain+0x8a4/0x9f4
[ 221.629066] [<c013553d>] ? down+0xc/0x2f
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c013da5d>] ? mark_held_locks+0x43/0x5a
[ 221.629066] [<c013dc3a>] ? trace_hardirqs_on+0xb/0xd
[ 221.629066] [<c013dbf4>] ? trace_hardirqs_on_caller+0xf4/0x12f
[ 221.629066] [<c06abec8>] ? _spin_unlock_irqrestore+0x42/0x58
[ 221.629066] [<c013555c>] ? down+0x2b/0x2f
[ 221.629066] [<c022aa68>] ? hfsplus_iget+0xa0/0x154
[ 221.629066] [<c022b0b9>] ? hfsplus_fill_super+0x280/0x447
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c041c9e4>] ? string+0x2b/0x74
[ 221.629066] [<c041cd16>] ? vsnprintf+0x2e9/0x512
[ 221.629066] [<c010487a>] ? dump_trace+0xca/0xd6
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c013b571>] ? save_trace+0x37/0x8d
[ 221.629066] [<c013b62e>] ? add_lock_to_list+0x67/0x8d
[ 221.629066] [<c013ea1c>] ? validate_chain+0x8a4/0x9f4
[ 221.629066] [<c01354d3>] ? up+0xc/0x2f
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c041cfb7>] ? snprintf+0x1b/0x1d
[ 221.629066] [<c01ba466>] ? disk_name+0x25/0x67
[ 221.629066] [<c0183960>] ? get_sb_bdev+0xcd/0x10b
[ 221.629066] [<c016ad92>] ? kstrdup+0x2a/0x4c
[ 221.629066] [<c022a7b3>] ? hfsplus_get_sb+0x13/0x15
[ 221.629066] [<c022ae39>] ? hfsplus_fill_super+0x0/0x447
[ 221.629066] [<c0183583>] ? vfs_kern_mount+0x3b/0x76
[ 221.629066] [<c0183602>] ? do_kern_mount+0x32/0xba
[ 221.629066] [<c01960d4>] ? do_new_mount+0x46/0x74
[ 221.629066] [<c0196277>] ? do_mount+0x175/0x193
[ 221.629066] [<c013dbf4>] ? trace_hardirqs_on_caller+0xf4/0x12f
[ 221.629066] [<c01663b2>] ? __get_free_pages+0x1e/0x24
[ 221.629066] [<c06ac07b>] ? lock_kernel+0x19/0x8c
[ 221.629066] [<c01962e6>] ? sys_mount+0x51/0x9b
[ 221.629066] [<c01962f9>] ? sys_mount+0x64/0x9b
[ 221.629066] [<c01038bd>] ? sysenter_do_call+0x12/0x31
[ 221.629066] =======================
[ 221.629066] Code: 89 c2 c1 e2 08 c1 e8 08 09 c2 8b 85 e8 fd ff ff 66 89 50 06 89 c7 53 83 c7 08 56 57 68 c4 b3 80 c0 e8 8c 5c ef ff 89 d9 c1 e9 02 <f3> a5 89 d9 83 e1 03 74 02 f3 a4 83 c3 06 8b 95 e8 fd ff ff 0f
[ 221.629066] EIP: [<c022d4b1>] hfsplus_find_cat+0x10d/0x151 SS:ESP 0068:c82d199c
[ 221.629066] ---[ end trace e417a1d67f0d0066 ]---
Since hfsplus_cat_build_key_uni() returns void and only has one callsite,
the check is performed at the callsite.
Signed-off-by: Eric Sesterhenn <snakebyte@gmx.de>
Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
void Tensor::CheckTypeAndIsAligned(DataType expected_dtype) const {
CHECK_EQ(dtype(), expected_dtype)
<< " " << DataTypeString(expected_dtype) << " expected, got "
<< DataTypeString(dtype());
CHECK(IsAligned()) << "ptr = " << base<void>();
}
|
Safe
|
[
"CWE-345"
] |
tensorflow
|
abcced051cb1bd8fb05046ac3b6023a7ebcc4578
|
1.0241396976575473e+38
| 6 |
Prevent crashes when loading tensor slices with unsupported types.
Also fix the `Tensor(const TensorShape&)` constructor swapping the LOG(FATAL)
messages for the unset and unsupported types.
PiperOrigin-RevId: 392695027
Change-Id: I4beda7db950db951d273e3259a7c8534ece49354
| 0 |
static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
{
int retval;
struct inode *inode;
struct buffer_head *bh;
struct ext4_dir_entry_2 *de;
handle_t *handle = NULL;
if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb))))
return -EIO;
/* Initialize quotas before so that eventual writes go in
* separate transaction */
retval = dquot_initialize(dir);
if (retval)
return retval;
retval = dquot_initialize(d_inode(dentry));
if (retval)
return retval;
retval = -ENOENT;
bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
if (IS_ERR(bh))
return PTR_ERR(bh);
if (!bh)
goto end_rmdir;
inode = d_inode(dentry);
retval = -EFSCORRUPTED;
if (le32_to_cpu(de->inode) != inode->i_ino)
goto end_rmdir;
retval = -ENOTEMPTY;
if (!ext4_empty_dir(inode))
goto end_rmdir;
handle = ext4_journal_start(dir, EXT4_HT_DIR,
EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
handle = NULL;
goto end_rmdir;
}
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_rmdir;
if (!EXT4_DIR_LINK_EMPTY(inode))
ext4_warning_inode(inode,
"empty directory '%.*s' has too many links (%u)",
dentry->d_name.len, dentry->d_name.name,
inode->i_nlink);
inode_inc_iversion(inode);
clear_nlink(inode);
/* There's no need to set i_disksize: the fact that i_nlink is
* zero will ensure that the right thing happens during any
* recovery. */
inode->i_size = 0;
ext4_orphan_add(handle, inode);
inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
retval = ext4_mark_inode_dirty(handle, inode);
if (retval)
goto end_rmdir;
ext4_dec_count(handle, dir);
ext4_update_dx_flag(dir);
retval = ext4_mark_inode_dirty(handle, dir);
#ifdef CONFIG_UNICODE
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
* negative dentries at ext4_lookup(), when it is better
* supported by the VFS for the CI case.
*/
if (IS_CASEFOLDED(dir))
d_invalidate(dentry);
#endif
end_rmdir:
brelse(bh);
if (handle)
ext4_journal_stop(handle);
return retval;
}
|
Safe
|
[
"CWE-125"
] |
linux
|
5872331b3d91820e14716632ebb56b1399b34fe1
|
2.1289405404689936e+38
| 88 |
ext4: fix potential negative array index in do_split()
If for any reason a directory passed to do_split() does not have enough
active entries to exceed half the size of the block, we can end up
iterating over all "count" entries without finding a split point.
In this case, count == move, and split will be zero, and we will
attempt a negative index into map[].
Guard against this by detecting this case, and falling back to
split-to-half-of-count instead; in this case we will still have
plenty of space (> half blocksize) in each split block.
Fixes: ef2b02d3e617 ("ext34: ensure do_split leaves enough free space in both blocks")
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/f53e246b-647c-64bb-16ec-135383c70ad7@redhat.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
| 0 |
void rfbLogPerror(const char *str)
{
rfbErr("%s: %s\n", str, strerror(errno));
}
|
Safe
|
[] |
libvncserver
|
804335f9d296440bb708ca844f5d89b58b50b0c6
|
3.2153901393457156e+38
| 4 |
Thread safety for zrle, zlib, tight.
Proposed tight security type fix for debian bug 517422.
| 0 |
QPDF::calculateHPageOffset(
std::map<int, QPDFXRefEntry> const& xref,
std::map<int, qpdf_offset_t> const& lengths,
std::map<int, int> const& obj_renumber)
{
// Page Offset Hint Table
// We are purposely leaving some values set to their initial zero
// values.
std::vector<QPDFObjectHandle> const& pages = getAllPages();
size_t npages = pages.size();
CHPageOffset& cph = this->m->c_page_offset_data;
std::vector<CHPageOffsetEntry>& cphe = cph.entries;
// Calculate minimum and maximum values for number of objects per
// page and page length.
int min_nobjects = cphe.at(0).nobjects;
int max_nobjects = min_nobjects;
int min_length = outputLengthNextN(
pages.at(0).getObjectID(), min_nobjects, lengths, obj_renumber);
int max_length = min_length;
int max_shared = cphe.at(0).nshared_objects;
HPageOffset& ph = this->m->page_offset_hints;
std::vector<HPageOffsetEntry>& phe = ph.entries;
// npages is the size of the existing pages array.
phe = std::vector<HPageOffsetEntry>(npages);
for (unsigned int i = 0; i < npages; ++i)
{
// Calculate values for each page, assigning full values to
// the delta items. They will be adjusted later.
// Repeat calculations for page 0 so we can assign to phe[i]
// without duplicating those assignments.
int nobjects = cphe.at(i).nobjects;
int length = outputLengthNextN(
pages.at(i).getObjectID(), nobjects, lengths, obj_renumber);
int nshared = cphe.at(i).nshared_objects;
min_nobjects = std::min(min_nobjects, nobjects);
max_nobjects = std::max(max_nobjects, nobjects);
min_length = std::min(min_length, length);
max_length = std::max(max_length, length);
max_shared = std::max(max_shared, nshared);
phe.at(i).delta_nobjects = nobjects;
phe.at(i).delta_page_length = length;
phe.at(i).nshared_objects = nshared;
}
ph.min_nobjects = min_nobjects;
int in_page0_id = pages.at(0).getObjectID();
int out_page0_id = (*(obj_renumber.find(in_page0_id))).second;
ph.first_page_offset = (*(xref.find(out_page0_id))).second.getOffset();
ph.nbits_delta_nobjects = nbits(max_nobjects - min_nobjects);
ph.min_page_length = min_length;
ph.nbits_delta_page_length = nbits(max_length - min_length);
ph.nbits_nshared_objects = nbits(max_shared);
ph.nbits_shared_identifier =
nbits(this->m->c_shared_object_data.nshared_total);
ph.shared_denominator = 4; // doesn't matter
// It isn't clear how to compute content offset and content
// length. Since we are not interleaving page objects with the
// content stream, we'll use the same values for content length as
// page length. We will use 0 as content offset because this is
// what Adobe does (implementation note 127) and pdlin as well.
ph.nbits_delta_content_length = ph.nbits_delta_page_length;
ph.min_content_length = ph.min_page_length;
for (size_t i = 0; i < npages; ++i)
{
// Adjust delta entries
if ((phe.at(i).delta_nobjects < min_nobjects) ||
(phe.at(i).delta_page_length < min_length))
{
stopOnError("found too small delta nobjects or delta page length"
" while writing linearization data");
}
phe.at(i).delta_nobjects -= min_nobjects;
phe.at(i).delta_page_length -= min_length;
phe.at(i).delta_content_length = phe.at(i).delta_page_length;
for (size_t j = 0; j < toS(cphe.at(i).nshared_objects); ++j)
{
phe.at(i).shared_identifiers.push_back(
cphe.at(i).shared_identifiers.at(j));
phe.at(i).shared_numerators.push_back(0);
}
}
}
|
Safe
|
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
|
2.5599315385352426e+38
| 95 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
| 0 |
static int myisamchk(MI_CHECK *param, char * filename)
{
int error,lock_type,recreate;
int rep_quick= param->testflag & (T_QUICK | T_FORCE_UNIQUENESS);
MI_INFO *info;
File datafile;
char llbuff[22],llbuff2[22];
my_bool state_updated=0;
MYISAM_SHARE *share;
DBUG_ENTER("myisamchk");
param->out_flag=error=param->warning_printed=param->error_printed=
recreate=0;
datafile=0;
param->isam_file_name=filename; /* For error messages */
if (!(info=mi_open(filename,
(param->testflag & (T_DESCRIPT | T_READONLY)) ?
O_RDONLY : O_RDWR,
HA_OPEN_FOR_REPAIR |
((param->testflag & T_WAIT_FOREVER) ?
HA_OPEN_WAIT_IF_LOCKED :
(param->testflag & T_DESCRIPT) ?
HA_OPEN_IGNORE_IF_LOCKED : HA_OPEN_ABORT_IF_LOCKED))))
{
/* Avoid twice printing of isam file name */
param->error_printed=1;
switch (my_errno) {
case HA_ERR_CRASHED:
mi_check_print_error(param,"'%s' doesn't have a correct index definition. You need to recreate it before you can do a repair",filename);
break;
case HA_ERR_NOT_A_TABLE:
mi_check_print_error(param,"'%s' is not a MyISAM-table",filename);
break;
case HA_ERR_CRASHED_ON_USAGE:
mi_check_print_error(param,"'%s' is marked as crashed",filename);
break;
case HA_ERR_CRASHED_ON_REPAIR:
mi_check_print_error(param,"'%s' is marked as crashed after last repair",filename);
break;
case HA_ERR_OLD_FILE:
mi_check_print_error(param,"'%s' is an old type of MyISAM-table", filename);
break;
case HA_ERR_END_OF_FILE:
mi_check_print_error(param,"Couldn't read complete header from '%s'", filename);
break;
case EAGAIN:
mi_check_print_error(param,"'%s' is locked. Use -w to wait until unlocked",filename);
break;
case ENOENT:
mi_check_print_error(param,"File '%s' doesn't exist",filename);
break;
case EACCES:
mi_check_print_error(param,"You don't have permission to use '%s'",filename);
break;
default:
mi_check_print_error(param,"%d when opening MyISAM-table '%s'",
my_errno,filename);
break;
}
DBUG_RETURN(1);
}
share=info->s;
share->options&= ~HA_OPTION_READ_ONLY_DATA; /* We are modifing it */
share->tot_locks-= share->r_locks;
share->r_locks=0;
/*
Skip the checking of the file if:
We are using --fast and the table is closed properly
We are using --check-only-changed-tables and the table hasn't changed
*/
if (param->testflag & (T_FAST | T_CHECK_ONLY_CHANGED))
{
my_bool need_to_check= mi_is_crashed(info) || share->state.open_count != 0;
if ((param->testflag & (T_REP_ANY | T_SORT_RECORDS)) &&
((share->state.changed & (STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR) ||
!(param->testflag & T_CHECK_ONLY_CHANGED))))
need_to_check=1;
if (info->s->base.keys && info->state->records)
{
if ((param->testflag & T_STATISTICS) &&
(share->state.changed & STATE_NOT_ANALYZED))
need_to_check=1;
if ((param->testflag & T_SORT_INDEX) &&
(share->state.changed & STATE_NOT_SORTED_PAGES))
need_to_check=1;
if ((param->testflag & T_REP_BY_SORT) &&
(share->state.changed & STATE_NOT_OPTIMIZED_KEYS))
need_to_check=1;
}
if ((param->testflag & T_CHECK_ONLY_CHANGED) &&
(share->state.changed & (STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR)))
need_to_check=1;
if (!need_to_check)
{
if (!(param->testflag & T_SILENT) || param->testflag & T_INFO)
printf("MyISAM file: %s is already checked\n",filename);
if (mi_close(info))
{
mi_check_print_error(param,"%d when closing MyISAM-table '%s'",
my_errno,filename);
DBUG_RETURN(1);
}
DBUG_RETURN(0);
}
}
if ((param->testflag & (T_REP_ANY | T_STATISTICS |
T_SORT_RECORDS | T_SORT_INDEX)) &&
(((param->testflag & T_UNPACK) &&
share->data_file_type == COMPRESSED_RECORD) ||
mi_uint2korr(share->state.header.state_info_length) !=
MI_STATE_INFO_SIZE ||
mi_uint2korr(share->state.header.base_info_length) !=
MI_BASE_INFO_SIZE ||
mi_is_any_intersect_keys_active(param->keys_in_use, share->base.keys,
~share->state.key_map) ||
test_if_almost_full(info) ||
info->s->state.header.file_version[3] != myisam_file_magic[3] ||
(set_collation &&
set_collation->number != share->state.header.language) ||
myisam_block_size != MI_KEY_BLOCK_LENGTH))
{
if (set_collation)
param->language= set_collation->number;
if (recreate_table(param, &info,filename))
{
(void) fprintf(stderr,
"MyISAM-table '%s' is not fixed because of errors\n",
filename);
return(-1);
}
recreate=1;
if (!(param->testflag & T_REP_ANY))
{
param->testflag|=T_REP_BY_SORT; /* if only STATISTICS */
if (!(param->testflag & T_SILENT))
printf("- '%s' has old table-format. Recreating index\n",filename);
rep_quick|=T_QUICK;
}
share=info->s;
share->tot_locks-= share->r_locks;
share->r_locks=0;
}
if (param->testflag & T_DESCRIPT)
{
param->total_files++;
param->total_records+=info->state->records;
param->total_deleted+=info->state->del;
descript(param, info, filename);
}
else
{
if (!stopwords_inited++)
ft_init_stopwords();
if (!(param->testflag & T_READONLY))
lock_type = F_WRLCK; /* table is changed */
else
lock_type= F_RDLCK;
if (info->lock_type == F_RDLCK)
info->lock_type=F_UNLCK; /* Read only table */
if (_mi_readinfo(info,lock_type,0))
{
mi_check_print_error(param,"Can't lock indexfile of '%s', error: %d",
filename,my_errno);
param->error_printed=0;
goto end2;
}
/*
_mi_readinfo() has locked the table.
We mark the table as locked (without doing file locks) to be able to
use functions that only works on locked tables (like row caching).
*/
mi_lock_database(info, F_EXTRA_LCK);
datafile=info->dfile;
if (param->testflag & (T_REP_ANY | T_SORT_RECORDS | T_SORT_INDEX))
{
if (param->testflag & T_REP_ANY)
{
ulonglong tmp=share->state.key_map;
mi_copy_keys_active(share->state.key_map, share->base.keys,
param->keys_in_use);
if (tmp != share->state.key_map)
info->update|=HA_STATE_CHANGED;
}
if (rep_quick && chk_del(param, info, param->testflag & ~T_VERBOSE))
{
if (param->testflag & T_FORCE_CREATE)
{
rep_quick=0;
mi_check_print_info(param,"Creating new data file\n");
}
else
{
error=1;
mi_check_print_error(param,
"Quick-recover aborted; Run recovery without switch 'q'");
}
}
if (!error)
{
if ((param->testflag & (T_REP_BY_SORT | T_REP_PARALLEL)) &&
(mi_is_any_key_active(share->state.key_map) ||
(rep_quick && !param->keys_in_use && !recreate)) &&
mi_test_if_sort_rep(info, info->state->records,
info->s->state.key_map,
param->force_sort))
{
if (param->testflag & T_REP_BY_SORT)
error=mi_repair_by_sort(param,info,filename,rep_quick);
else
error=mi_repair_parallel(param,info,filename,rep_quick);
state_updated=1;
}
else if (param->testflag & T_REP_ANY)
error=mi_repair(param, info,filename,rep_quick);
}
if (!error && param->testflag & T_SORT_RECORDS)
{
/*
The data file is nowadays reopened in the repair code so we should
soon remove the following reopen-code
*/
#ifndef TO_BE_REMOVED
if (param->out_flag & O_NEW_DATA)
{ /* Change temp file to org file */
(void) my_close(info->dfile,MYF(MY_WME)); /* Close new file */
error|=change_to_newfile(filename, MI_NAME_DEXT, DATA_TMP_EXT, MYF(0));
if (mi_open_datafile(info,info->s, NULL, -1))
error=1;
param->out_flag&= ~O_NEW_DATA; /* We are using new datafile */
param->read_cache.file=info->dfile;
}
#endif
if (! error)
{
uint key;
/*
We can't update the index in mi_sort_records if we have a
prefix compressed or fulltext index
*/
my_bool update_index=1;
for (key=0 ; key < share->base.keys; key++)
if (share->keyinfo[key].flag & (HA_BINARY_PACK_KEY|HA_FULLTEXT))
update_index=0;
error=mi_sort_records(param,info,filename,param->opt_sort_key,
/* what is the following parameter for ? */
(my_bool) !(param->testflag & T_REP),
update_index);
datafile=info->dfile; /* This is now locked */
if (!error && !update_index)
{
if (param->verbose)
puts("Table had a compressed index; We must now recreate the index");
error=mi_repair_by_sort(param,info,filename,1);
}
}
}
if (!error && param->testflag & T_SORT_INDEX)
error=mi_sort_index(param,info,filename);
if (!error)
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR);
else
mi_mark_crashed(info);
}
else if ((param->testflag & T_CHECK) || !(param->testflag & T_AUTO_INC))
{
if (!(param->testflag & T_SILENT) || param->testflag & T_INFO)
printf("Checking MyISAM file: %s\n",filename);
if (!(param->testflag & T_SILENT))
printf("Data records: %7s Deleted blocks: %7s\n",
llstr(info->state->records,llbuff),
llstr(info->state->del,llbuff2));
error =chk_status(param,info);
mi_intersect_keys_active(share->state.key_map, param->keys_in_use);
error =chk_size(param,info);
if (!error || !(param->testflag & (T_FAST | T_FORCE_CREATE)))
error|=chk_del(param, info,param->testflag);
if ((!error || (!(param->testflag & (T_FAST | T_FORCE_CREATE)) &&
!param->start_check_pos)))
{
error|=chk_key(param, info);
if (!error && (param->testflag & (T_STATISTICS | T_AUTO_INC)))
error=update_state_info(param, info,
((param->testflag & T_STATISTICS) ?
UPDATE_STAT : 0) |
((param->testflag & T_AUTO_INC) ?
UPDATE_AUTO_INC : 0));
}
if ((!rep_quick && !error) ||
!(param->testflag & (T_FAST | T_FORCE_CREATE)))
{
if (param->testflag & (T_EXTEND | T_MEDIUM))
(void) init_key_cache(dflt_key_cache,opt_key_cache_block_size,
param->use_buffers, 0, 0);
(void) init_io_cache(¶m->read_cache,datafile,
(uint) param->read_buffer_length,
READ_CACHE,
(param->start_check_pos ?
param->start_check_pos :
share->pack.header_length),
1,
MYF(MY_WME));
lock_memory(param);
if ((info->s->options & (HA_OPTION_PACK_RECORD |
HA_OPTION_COMPRESS_RECORD)) ||
(param->testflag & (T_EXTEND | T_MEDIUM)))
error|=chk_data_link(param, info, param->testflag & T_EXTEND);
error|=flush_blocks(param, share->key_cache, share->kfile);
(void) end_io_cache(¶m->read_cache);
}
if (!error)
{
if ((share->state.changed & STATE_CHANGED) &&
(param->testflag & T_UPDATE_STATE))
info->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR);
}
else if (!mi_is_crashed(info) &&
(param->testflag & T_UPDATE_STATE))
{ /* Mark crashed */
mi_mark_crashed(info);
info->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
}
}
}
if ((param->testflag & T_AUTO_INC) ||
((param->testflag & T_REP_ANY) && info->s->base.auto_key))
update_auto_increment_key(param, info,
(my_bool) !test(param->testflag & T_AUTO_INC));
if (!(param->testflag & T_DESCRIPT))
{
if (info->update & HA_STATE_CHANGED && ! (param->testflag & T_READONLY))
error|=update_state_info(param, info,
UPDATE_OPEN_COUNT |
(((param->testflag & T_REP_ANY) ?
UPDATE_TIME : 0) |
(state_updated ? UPDATE_STAT : 0) |
((param->testflag & T_SORT_RECORDS) ?
UPDATE_SORT : 0)));
(void) lock_file(param, share->kfile,0L,F_UNLCK,"indexfile",filename);
info->update&= ~HA_STATE_CHANGED;
}
mi_lock_database(info, F_UNLCK);
end2:
if (mi_close(info))
{
mi_check_print_error(param,"%d when closing MyISAM-table '%s'",my_errno,filename);
DBUG_RETURN(1);
}
if (error == 0)
{
if (param->out_flag & O_NEW_DATA)
error|=change_to_newfile(filename,MI_NAME_DEXT,DATA_TMP_EXT,
((param->testflag & T_BACKUP_DATA) ?
MYF(MY_REDEL_MAKE_BACKUP) : MYF(0)));
if (param->out_flag & O_NEW_INDEX)
error|=change_to_newfile(filename, MI_NAME_IEXT, INDEX_TMP_EXT, MYF(0));
}
(void) fflush(stdout); (void) fflush(stderr);
if (param->error_printed)
{
if (param->testflag & (T_REP_ANY | T_SORT_RECORDS | T_SORT_INDEX))
{
(void) fprintf(stderr,
"MyISAM-table '%s' is not fixed because of errors\n",
filename);
if (param->testflag & T_REP_ANY)
(void) fprintf(stderr,
"Try fixing it by using the --safe-recover (-o), the --force (-f) option or by not using the --quick (-q) flag\n");
}
else if (!(param->error_printed & 2) &&
!(param->testflag & T_FORCE_CREATE))
(void) fprintf(stderr,
"MyISAM-table '%s' is corrupted\nFix it using switch \"-r\" or \"-o\"\n",
filename);
}
else if (param->warning_printed &&
! (param->testflag & (T_REP_ANY | T_SORT_RECORDS | T_SORT_INDEX |
T_FORCE_CREATE)))
(void) fprintf(stderr, "MyISAM-table '%s' is usable but should be fixed\n",
filename);
(void) fflush(stderr);
DBUG_RETURN(error);
} /* myisamchk */
|
Vulnerable
|
[
"CWE-362"
] |
mysql-server
|
4e5473862e6852b0f3802b0cd0c6fa10b5253291
|
8.974492442578604e+37
| 395 |
Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD)
is created. When repair finishes, this file is renamed to the original
.MYD file. The problem was that during this rename, we copied the
stats from the old file to the new file with chmod/chown. If a user
managed to replace the temporary file before chmod/chown was executed,
it was possible to get an arbitrary file with the privileges of the
mysql user.
This patch fixes the problem by not copying stats from the old
file to the new file. This is not needed as the new file was
created with the correct stats. This fix only changes server
behavior - external utilities such as myisamchk still does
chmod/chown.
No test case provided since the problem involves synchronization
with file system operations.
| 1 |
static void init_config_defines(apr_pool_t *pconf)
{
saved_server_config_defines = ap_server_config_defines;
/* Use apr_array_copy instead of apr_array_copy_hdr because it does not
* protect from the way unset_define removes entries.
*/
ap_server_config_defines = apr_array_copy(pconf, ap_server_config_defines);
}
|
Safe
|
[
"CWE-416",
"CWE-284"
] |
httpd
|
4cc27823899e070268b906ca677ee838d07cf67a
|
2.3515276219033043e+38
| 8 |
core: Disallow Methods' registration at run time (.htaccess), they may be
used only if registered at init time (httpd.conf).
Calling ap_method_register() in children processes is not the right scope
since it won't be shared for all requests.
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1807655 13f79535-47bb-0310-9956-ffa450edef68
| 0 |
u64 gf_isom_get_media_data_size(GF_ISOFile *movie, u32 trackNumber)
{
u32 i;
u64 size;
GF_SampleSizeBox *stsz;
GF_TrackBox *tk = gf_isom_get_track_from_file(movie, trackNumber);
if (!tk) return 0;
stsz = tk->Media->information->sampleTable->SampleSize;
if (!stsz) return 0;
if (stsz->sampleSize) return stsz->sampleSize*stsz->sampleCount;
size = 0;
for (i=0; i<stsz->sampleCount; i++) size += stsz->sizes[i];
return size;
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
f0a41d178a2dc5ac185506d9fa0b0a58356b16f7
|
1.4080478797562022e+38
| 14 |
fixed #2120
| 0 |
void styl_box_del(GF_Box *s)
{
GF_TextStyleBox*ptr = (GF_TextStyleBox*)s;
if (ptr->styles) gf_free(ptr->styles);
gf_free(ptr);
}
|
Safe
|
[
"CWE-476"
] |
gpac
|
d527325a9b72218612455a534a508f9e1753f76e
|
2.7283604370589147e+38
| 6 |
fixed #1768
| 0 |
int gg_notify_ex(struct gg_session *sess, uin_t *userlist, char *types, int count)
{
struct gg_notify *n;
uin_t *u;
char *t;
int i, res = 0;
gg_debug_session(sess, GG_DEBUG_FUNCTION, "** gg_notify_ex(%p, %p, %p, %d);\n", sess, userlist, types, count);
if (!sess) {
errno = EFAULT;
return -1;
}
if (sess->state != GG_STATE_CONNECTED) {
errno = ENOTCONN;
return -1;
}
if (sess->protocol_version >= GG_PROTOCOL_110)
return gg_notify105_ex(sess, userlist, types, count);
if (!userlist || !count)
return gg_send_packet(sess, GG_LIST_EMPTY, NULL);
while (count > 0) {
int part_count, packet_type;
if (count > 400) {
part_count = 400;
packet_type = GG_NOTIFY_FIRST;
} else {
part_count = count;
packet_type = GG_NOTIFY_LAST;
}
if (!(n = (struct gg_notify*) malloc(sizeof(*n) * part_count)))
return -1;
for (u = userlist, t = types, i = 0; i < part_count; u++, t++, i++) {
n[i].uin = gg_fix32(*u);
if (types == NULL)
n[i].dunno1 = GG_USER_NORMAL;
else
n[i].dunno1 = *t;
}
if (gg_send_packet(sess, packet_type, n, sizeof(*n) * part_count, NULL) == -1) {
free(n);
res = -1;
break;
}
count -= part_count;
userlist += part_count;
if (types != NULL)
types += part_count;
free(n);
}
return res;
}
|
Safe
|
[
"CWE-310"
] |
libgadu
|
23644f1fb8219031b3cac93289a588b05f90226b
|
2.9858373818152134e+38
| 63 |
Poprawka ograniczania długości opisu.
| 0 |
static int property_get_timer_slack_nsec(
sd_bus *bus,
const char *path,
const char *interface,
const char *property,
sd_bus_message *reply,
void *userdata,
sd_bus_error *error) {
ExecContext *c = userdata;
uint64_t u;
assert(bus);
assert(reply);
assert(c);
if (c->timer_slack_nsec != NSEC_INFINITY)
u = (uint64_t) c->timer_slack_nsec;
else
u = (uint64_t) prctl(PR_GET_TIMERSLACK);
return sd_bus_message_append(reply, "t", u);
}
|
Safe
|
[
"CWE-269"
] |
systemd
|
f69567cbe26d09eac9d387c0be0fc32c65a83ada
|
1.6139984510234558e+38
| 23 |
core: expose SUID/SGID restriction as new unit setting RestrictSUIDSGID=
| 0 |
const char *http_parse_stsline(struct http_msg *msg,
unsigned int state, const char *ptr, const char *end,
unsigned int *ret_ptr, unsigned int *ret_state)
{
const char *msg_start = msg->chn->buf->p;
switch (state) {
case HTTP_MSG_RPVER:
http_msg_rpver:
if (likely(HTTP_IS_VER_TOKEN(*ptr)))
EAT_AND_JUMP_OR_RETURN(http_msg_rpver, HTTP_MSG_RPVER);
if (likely(HTTP_IS_SPHT(*ptr))) {
msg->sl.st.v_l = ptr - msg_start;
EAT_AND_JUMP_OR_RETURN(http_msg_rpver_sp, HTTP_MSG_RPVER_SP);
}
state = HTTP_MSG_ERROR;
break;
case HTTP_MSG_RPVER_SP:
http_msg_rpver_sp:
if (likely(!HTTP_IS_LWS(*ptr))) {
msg->sl.st.c = ptr - msg_start;
goto http_msg_rpcode;
}
if (likely(HTTP_IS_SPHT(*ptr)))
EAT_AND_JUMP_OR_RETURN(http_msg_rpver_sp, HTTP_MSG_RPVER_SP);
/* so it's a CR/LF, this is invalid */
state = HTTP_MSG_ERROR;
break;
case HTTP_MSG_RPCODE:
http_msg_rpcode:
if (likely(!HTTP_IS_LWS(*ptr)))
EAT_AND_JUMP_OR_RETURN(http_msg_rpcode, HTTP_MSG_RPCODE);
if (likely(HTTP_IS_SPHT(*ptr))) {
msg->sl.st.c_l = ptr - msg_start - msg->sl.st.c;
EAT_AND_JUMP_OR_RETURN(http_msg_rpcode_sp, HTTP_MSG_RPCODE_SP);
}
/* so it's a CR/LF, so there is no reason phrase */
msg->sl.st.c_l = ptr - msg_start - msg->sl.st.c;
http_msg_rsp_reason:
/* FIXME: should we support HTTP responses without any reason phrase ? */
msg->sl.st.r = ptr - msg_start;
msg->sl.st.r_l = 0;
goto http_msg_rpline_eol;
case HTTP_MSG_RPCODE_SP:
http_msg_rpcode_sp:
if (likely(!HTTP_IS_LWS(*ptr))) {
msg->sl.st.r = ptr - msg_start;
goto http_msg_rpreason;
}
if (likely(HTTP_IS_SPHT(*ptr)))
EAT_AND_JUMP_OR_RETURN(http_msg_rpcode_sp, HTTP_MSG_RPCODE_SP);
/* so it's a CR/LF, so there is no reason phrase */
goto http_msg_rsp_reason;
case HTTP_MSG_RPREASON:
http_msg_rpreason:
if (likely(!HTTP_IS_CRLF(*ptr)))
EAT_AND_JUMP_OR_RETURN(http_msg_rpreason, HTTP_MSG_RPREASON);
msg->sl.st.r_l = ptr - msg_start - msg->sl.st.r;
http_msg_rpline_eol:
/* We have seen the end of line. Note that we do not
* necessarily have the \n yet, but at least we know that we
* have EITHER \r OR \n, otherwise the response would not be
* complete. We can then record the response length and return
* to the caller which will be able to register it.
*/
msg->sl.st.l = ptr - msg_start - msg->sol;
return ptr;
#ifdef DEBUG_FULL
default:
fprintf(stderr, "FIXME !!!! impossible state at %s:%d = %d\n", __FILE__, __LINE__, state);
exit(1);
#endif
}
http_msg_ood:
/* out of valid data */
if (ret_state)
*ret_state = state;
if (ret_ptr)
*ret_ptr = ptr - msg_start;
return NULL;
}
|
Safe
|
[] |
haproxy
|
aae75e3279c6c9bd136413a72dafdcd4986bb89a
|
1.556440973923099e+38
| 90 |
BUG/CRITICAL: using HTTP information in tcp-request content may crash the process
During normal HTTP request processing, request buffers are realigned if
there are less than global.maxrewrite bytes available after them, in
order to leave enough room for rewriting headers after the request. This
is done in http_wait_for_request().
However, if some HTTP inspection happens during a "tcp-request content"
rule, this realignment is not performed. In theory this is not a problem
because empty buffers are always aligned and TCP inspection happens at
the beginning of a connection. But with HTTP keep-alive, it also happens
at the beginning of each subsequent request. So if a second request was
pipelined by the client before the first one had a chance to be forwarded,
the second request will not be realigned. Then, http_wait_for_request()
will not perform such a realignment either because the request was
already parsed and marked as such. The consequence of this, is that the
rewrite of a sufficient number of such pipelined, unaligned requests may
leave less room past the request been processed than the configured
reserve, which can lead to a buffer overflow if request processing appends
some data past the end of the buffer.
A number of conditions are required for the bug to be triggered :
- HTTP keep-alive must be enabled ;
- HTTP inspection in TCP rules must be used ;
- some request appending rules are needed (reqadd, x-forwarded-for)
- since empty buffers are always realigned, the client must pipeline
enough requests so that the buffer always contains something till
the point where there is no more room for rewriting.
While such a configuration is quite unlikely to be met (which is
confirmed by the bug's lifetime), a few people do use these features
together for very specific usages. And more importantly, writing such
a configuration and the request to attack it is trivial.
A quick workaround consists in forcing keep-alive off by adding
"option httpclose" or "option forceclose" in the frontend. Alternatively,
disabling HTTP-based TCP inspection rules enough if the application
supports it.
At first glance, this bug does not look like it could lead to remote code
execution, as the overflowing part is controlled by the configuration and
not by the user. But some deeper analysis should be performed to confirm
this. And anyway, corrupting the process' memory and crashing it is quite
trivial.
Special thanks go to Yves Lafon from the W3C who reported this bug and
deployed significant efforts to collect the relevant data needed to
understand it in less than one week.
CVE-2013-1912 was assigned to this issue.
Note that 1.4 is also affected so the fix must be backported.
| 0 |
static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
sctp_addip_param_t *asconf_param,
int no_err)
{
sctp_addip_param_t *asconf_ack_param;
sctp_errhdr_t *err_param;
int length;
int asconf_ack_len;
__be16 err_code;
if (no_err)
err_code = SCTP_ERROR_NO_ERROR;
else
err_code = SCTP_ERROR_REQ_REFUSED;
asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
/* Skip the addiphdr from the asconf_ack chunk and store a pointer to
* the first asconf_ack parameter.
*/
length = sizeof(sctp_addiphdr_t);
asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data +
length);
asconf_ack_len -= length;
while (asconf_ack_len > 0) {
if (asconf_ack_param->crr_id == asconf_param->crr_id) {
switch(asconf_ack_param->param_hdr.type) {
case SCTP_PARAM_SUCCESS_REPORT:
return SCTP_ERROR_NO_ERROR;
case SCTP_PARAM_ERR_CAUSE:
length = sizeof(sctp_addip_param_t);
err_param = (void *)asconf_ack_param + length;
asconf_ack_len -= length;
if (asconf_ack_len > 0)
return err_param->cause;
else
return SCTP_ERROR_INV_PARAM;
break;
default:
return SCTP_ERROR_INV_PARAM;
}
}
length = ntohs(asconf_ack_param->param_hdr.length);
asconf_ack_param = (void *)asconf_ack_param + length;
asconf_ack_len -= length;
}
return err_code;
}
|
Safe
|
[] |
linux
|
196d67593439b03088913227093e374235596e33
|
2.4541098071616855e+38
| 52 |
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <michele@acksyn.org>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
on_page(int x, int y)
{
if (term->flags & TERM_CAN_CLIP)
return TRUE;
if ((0 < x && x < term->xmax) && (0 < y && y < term->ymax))
return TRUE;
return FALSE;
}
|
Safe
|
[
"CWE-787"
] |
gnuplot
|
963c7df3e0c5266efff260d0dff757dfe03d3632
|
3.720156692942226e+37
| 10 |
Better error handling for faulty font syntax
A missing close-quote in an enhanced text font specification could
cause a segfault.
Bug #2303
| 0 |
WORD_DESC *
command_substitute (string, quoted)
char *string;
int quoted;
{
pid_t pid, old_pid, old_pipeline_pgrp, old_async_pid;
char *istring, *s;
int result, fildes[2], function_value, pflags, rc, tflag;
WORD_DESC *ret;
istring = (char *)NULL;
/* Don't fork () if there is no need to. In the case of no command to
run, just return NULL. */
#if 1
for (s = string; s && *s && (shellblank (*s) || *s == '\n'); s++)
;
if (s == 0 || *s == 0)
return ((WORD_DESC *)NULL);
#else
if (!string || !*string || (string[0] == '\n' && !string[1]))
return ((WORD_DESC *)NULL);
#endif
if (wordexp_only && read_but_dont_execute)
{
last_command_exit_value = EX_WEXPCOMSUB;
jump_to_top_level (EXITPROG);
}
/* We're making the assumption here that the command substitution will
eventually run a command from the file system. Since we'll run
maybe_make_export_env in this subshell before executing that command,
the parent shell and any other shells it starts will have to remake
the environment. If we make it before we fork, other shells won't
have to. Don't bother if we have any temporary variable assignments,
though, because the export environment will be remade after this
command completes anyway, but do it if all the words to be expanded
are variable assignments. */
if (subst_assign_varlist == 0 || garglist == 0)
maybe_make_export_env (); /* XXX */
/* Flags to pass to parse_and_execute() */
pflags = (interactive && sourcelevel == 0) ? SEVAL_RESETLINE : 0;
/* Pipe the output of executing STRING into the current shell. */
if (pipe (fildes) < 0)
{
sys_error ("%s", _("cannot make pipe for command substitution"));
goto error_exit;
}
old_pid = last_made_pid;
#if defined (JOB_CONTROL)
old_pipeline_pgrp = pipeline_pgrp;
/* Don't reset the pipeline pgrp if we're already a subshell in a pipeline. */
if ((subshell_environment & SUBSHELL_PIPE) == 0)
pipeline_pgrp = shell_pgrp;
cleanup_the_pipeline ();
#endif /* JOB_CONTROL */
old_async_pid = last_asynchronous_pid;
pid = make_child ((char *)NULL, subshell_environment&SUBSHELL_ASYNC);
last_asynchronous_pid = old_async_pid;
if (pid == 0)
{
/* Reset the signal handlers in the child, but don't free the
trap strings. Set a flag noting that we have to free the
trap strings if we run trap to change a signal disposition. */
reset_signal_handlers ();
if (ISINTERRUPT)
{
kill (getpid (), SIGINT);
CLRINTERRUPT; /* if we're ignoring SIGINT somehow */
}
QUIT; /* catch any interrupts we got post-fork */
subshell_environment |= SUBSHELL_RESETTRAP;
}
#if defined (JOB_CONTROL)
/* XXX DO THIS ONLY IN PARENT ? XXX */
set_sigchld_handler ();
stop_making_children ();
if (pid != 0)
pipeline_pgrp = old_pipeline_pgrp;
#else
stop_making_children ();
#endif /* JOB_CONTROL */
if (pid < 0)
{
sys_error (_("cannot make child for command substitution"));
error_exit:
last_made_pid = old_pid;
FREE (istring);
close (fildes[0]);
close (fildes[1]);
return ((WORD_DESC *)NULL);
}
if (pid == 0)
{
/* The currently executing shell is not interactive. */
interactive = 0;
set_sigint_handler (); /* XXX */
free_pushed_string_input ();
/* Discard buffered stdio output before replacing the underlying file
descriptor. */
fpurge (stdout);
if (dup2 (fildes[1], 1) < 0)
{
sys_error ("%s", _("command_substitute: cannot duplicate pipe as fd 1"));
exit (EXECUTION_FAILURE);
}
/* If standard output is closed in the parent shell
(such as after `exec >&-'), file descriptor 1 will be
the lowest available file descriptor, and end up in
fildes[0]. This can happen for stdin and stderr as well,
but stdout is more important -- it will cause no output
to be generated from this command. */
if ((fildes[1] != fileno (stdin)) &&
(fildes[1] != fileno (stdout)) &&
(fildes[1] != fileno (stderr)))
close (fildes[1]);
if ((fildes[0] != fileno (stdin)) &&
(fildes[0] != fileno (stdout)) &&
(fildes[0] != fileno (stderr)))
close (fildes[0]);
#ifdef __CYGWIN__
/* Let stdio know the fd may have changed from text to binary mode, and
make sure to preserve stdout line buffering. */
freopen (NULL, "w", stdout);
sh_setlinebuf (stdout);
#endif /* __CYGWIN__ */
/* This is a subshell environment. */
subshell_environment |= SUBSHELL_COMSUB;
/* Many shells do not appear to inherit the -v option for command
substitutions. */
change_flag ('v', FLAG_OFF);
/* When inherit_errexit option is not enabled, command substitution does
not inherit the -e flag. It is enabled when Posix mode is enabled */
if (inherit_errexit == 0)
{
builtin_ignoring_errexit = 0;
change_flag ('e', FLAG_OFF);
}
set_shellopts ();
/* If we are expanding a redirection, we can dispose of any temporary
environment we received, since redirections are not supposed to have
access to the temporary environment. We will have to see whether this
affects temporary environments supplied to `eval', but the temporary
environment gets copied to builtin_env at some point. */
if (expanding_redir)
{
flush_temporary_env ();
expanding_redir = 0;
}
remove_quoted_escapes (string);
startup_state = 2; /* see if we can avoid a fork */
/* Give command substitution a place to jump back to on failure,
so we don't go back up to main (). */
result = setjmp_nosigs (top_level);
/* If we're running a command substitution inside a shell function,
trap `return' so we don't return from the function in the subshell
and go off to never-never land. */
if (result == 0 && return_catch_flag)
function_value = setjmp_nosigs (return_catch);
else
function_value = 0;
if (result == ERREXIT)
rc = last_command_exit_value;
else if (result == EXITPROG)
rc = last_command_exit_value;
else if (result)
rc = EXECUTION_FAILURE;
else if (function_value)
rc = return_catch_value;
else
{
subshell_level++;
rc = parse_and_execute (string, "command substitution", pflags|SEVAL_NOHIST);
subshell_level--;
}
last_command_exit_value = rc;
rc = run_exit_trap ();
#if defined (PROCESS_SUBSTITUTION)
unlink_fifo_list ();
#endif
exit (rc);
}
else
{
#if defined (JOB_CONTROL) && defined (PGRP_PIPE)
close_pgrp_pipe ();
#endif /* JOB_CONTROL && PGRP_PIPE */
close (fildes[1]);
tflag = 0;
istring = read_comsub (fildes[0], quoted, &tflag);
close (fildes[0]);
current_command_subst_pid = pid;
last_command_exit_value = wait_for (pid);
last_command_subst_pid = pid;
last_made_pid = old_pid;
#if defined (JOB_CONTROL)
/* If last_command_exit_value > 128, then the substituted command
was terminated by a signal. If that signal was SIGINT, then send
SIGINT to ourselves. This will break out of loops, for instance. */
if (last_command_exit_value == (128 + SIGINT) && last_command_exit_signal == SIGINT)
kill (getpid (), SIGINT);
/* wait_for gives the terminal back to shell_pgrp. If some other
process group should have it, give it away to that group here.
pipeline_pgrp is non-zero only while we are constructing a
pipeline, so what we are concerned about is whether or not that
pipeline was started in the background. A pipeline started in
the background should never get the tty back here. We duplicate
the conditions that wait_for tests to make sure we only give
the terminal back to pipeline_pgrp under the conditions that wait_for
gave it to shell_pgrp. If wait_for doesn't mess with the terminal
pgrp, we should not either. */
if (interactive && pipeline_pgrp != (pid_t)0 && running_in_background == 0 &&
(subshell_environment & (SUBSHELL_ASYNC|SUBSHELL_PIPE)) == 0)
give_terminal_to (pipeline_pgrp, 0);
#endif /* JOB_CONTROL */
ret = alloc_word_desc ();
ret->word = istring;
ret->flags = tflag;
return ret;
}
|
Safe
|
[
"CWE-20"
] |
bash
|
4f747edc625815f449048579f6e65869914dd715
|
1.9395416092754758e+38
| 255 |
Bash-4.4 patch 7
| 0 |
void LinkResolver::resolve_invokehandle(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {
assert(EnableInvokeDynamic, "");
// This guy is reached from InterpreterRuntime::resolve_invokehandle.
KlassHandle resolved_klass;
Symbol* method_name = NULL;
Symbol* method_signature = NULL;
KlassHandle current_klass;
resolve_pool(resolved_klass, method_name, method_signature, current_klass, pool, index, CHECK);
if (TraceMethodHandles) {
ResourceMark rm(THREAD);
tty->print_cr("resolve_invokehandle %s %s", method_name->as_C_string(), method_signature->as_C_string());
}
resolve_handle_call(result, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
}
|
Safe
|
[] |
jdk8u
|
f14e35d20e1a4d0f507f05838844152f2242c6d3
|
1.2277529880664623e+37
| 14 |
8281866: Enhance MethodHandle invocations
Reviewed-by: andrew
Backport-of: d974d9da365f787f67971d88c79371c8b0769f75
| 0 |
void tcp_init_sock(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
tp->out_of_order_queue = RB_ROOT;
sk->tcp_rtx_queue = RB_ROOT;
tcp_init_xmit_timers(sk);
INIT_LIST_HEAD(&tp->tsq_node);
INIT_LIST_HEAD(&tp->tsorted_sent_queue);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
* algorithms that we must have the following bandaid to talk
* efficiently to them. -DaveM
*/
tp->snd_cwnd = TCP_INIT_CWND;
/* There's a bubble in the pipe until at least the first ACK. */
tp->app_limited = ~0U;
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
*/
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_clamp = ~0;
tp->mss_cache = TCP_MSS_DEFAULT;
tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
tcp_assign_congestion_control(sk);
tp->tsoffset = 0;
tp->rack.reo_wnd_steps = 1;
sk->sk_state = TCP_CLOSE;
sk->sk_write_space = sk_stream_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
icsk->icsk_sync_mss = tcp_sync_mss;
sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
sk_sockets_allocated_inc(sk);
sk->sk_route_forced_caps = NETIF_F_GSO;
}
|
Safe
|
[
"CWE-190"
] |
net
|
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
|
1.1052834356476164e+38
| 51 |
tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Jonathan Looney <jtl@netflix.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Bruce Curtis <brucec@netflix.com>
Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void query_send(IRC_SERVER_REC *server, int query)
{
SERVER_QUERY_REC *rec;
IRC_CHANNEL_REC *chanrec;
GSList *chans;
char *cmd, *chanstr_commas, *chanstr;
int onlyone, count;
rec = server->chanqueries;
/* get the list of channels to query */
onlyone = (server->no_multi_who && query == CHANNEL_QUERY_WHO) ||
(server->no_multi_mode && CHANNEL_IS_MODE_QUERY(query));
if (onlyone) {
chans = rec->queries[query];
rec->queries[query] =
g_slist_remove_link(rec->queries[query], chans);
chanrec = chans->data;
chanstr_commas = g_strdup(chanrec->name);
chanstr = g_strdup(chanrec->name);
count = 1;
} else {
char *chanstr_spaces;
chans = rec->queries[query];
count = g_slist_length(chans);
if (count > server->max_query_chans) {
GSList *lastchan;
lastchan = g_slist_nth(rec->queries[query],
server->max_query_chans-1);
count = server->max_query_chans;
rec->queries[query] = lastchan->next;
lastchan->next = NULL;
} else {
rec->queries[query] = NULL;
}
chanstr_commas = gslistptr_to_string(chans, G_STRUCT_OFFSET(IRC_CHANNEL_REC, name), ",");
chanstr_spaces = gslistptr_to_string(chans, G_STRUCT_OFFSET(IRC_CHANNEL_REC, name), " ");
chanstr = g_strconcat(chanstr_commas, " ", chanstr_spaces, NULL);
g_free(chanstr_spaces);
}
rec->current_query_type = query;
rec->current_queries = chans;
switch (query) {
case CHANNEL_QUERY_MODE:
cmd = g_strdup_printf("MODE %s", chanstr_commas);
/* the stop-event is received once for each channel,
and we want to print 329 event (channel created). */
server_redirect_event(server, "mode channel", count,
chanstr, -1, "chanquery abort",
"event 324", "chanquery mode",
"event 329", "event 329",
"", "chanquery abort", NULL);
break;
case CHANNEL_QUERY_WHO:
cmd = g_strdup_printf("WHO %s", chanstr_commas);
server_redirect_event(server, "who",
server->one_endofwho ? 1 : count,
chanstr, -1,
"chanquery abort",
"event 315", "chanquery who end",
"event 352", "silent event who",
"", "chanquery abort", NULL);
break;
case CHANNEL_QUERY_BMODE:
cmd = g_strdup_printf("MODE %s b", chanstr_commas);
/* check all the multichannel problems with all
mode requests - if channels are joined manually
irssi could ask modes separately but afterwards
join the two b/e/I modes together */
server_redirect_event(server, "mode b", count, chanstr, -1,
"chanquery abort",
"event 367", "chanquery ban",
"event 368", "chanquery ban end",
"", "chanquery abort", NULL);
break;
default:
cmd = NULL;
}
irc_send_cmd(server, cmd);
g_free(chanstr);
g_free(chanstr_commas);
g_free(cmd);
}
|
Safe
|
[
"CWE-416"
] |
irssi
|
43e44d553d44e313003cee87e6ea5e24d68b84a1
|
2.0162921028433562e+38
| 99 |
Merge branch 'security' into 'master'
Security
Closes GL#12, GL#13, GL#14, GL#15, GL#16
See merge request irssi/irssi!23
| 0 |
void Chapters::Display::Clear() {
delete[] m_string;
m_string = NULL;
delete[] m_language;
m_language = NULL;
delete[] m_country;
m_country = NULL;
}
|
Safe
|
[
"CWE-20"
] |
libvpx
|
34d54b04e98dd0bac32e9aab0fbda0bf501bc742
|
1.8487153310650313e+38
| 10 |
update libwebm to libwebm-1.0.0.27-358-gdbf1d10
changelog:
https://chromium.googlesource.com/webm/libwebm/+log/libwebm-1.0.0.27-351-g9f23fbc..libwebm-1.0.0.27-358-gdbf1d10
Change-Id: I28a6b3ae02a53fb1f2029eee11e9449afb94c8e3
| 0 |
void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
mmu_notifier_mm_destroy(mm);
free_mm(mm);
}
|
Safe
|
[
"CWE-264"
] |
linux-2.6
|
2d5516cbb9daf7d0e342a2e3b0fc6f8c39a81205
|
1.1815784455210242e+38
| 8 |
copy_process: fix CLONE_PARENT && parent_exec_id interaction
CLONE_PARENT can fool the ->self_exec_id/parent_exec_id logic. If we
re-use the old parent, we must also re-use ->parent_exec_id to make
sure exit_notify() sees the right ->xxx_exec_id's when the CLONE_PARENT'ed
task exits.
Also, move down the "p->parent_exec_id = p->self_exec_id" thing, to place
two different cases together.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Serge E. Hallyn <serge@hallyn.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static int usb_qdev_exit(DeviceState *qdev)
{
USBDevice *dev = USB_DEVICE(qdev);
if (dev->attached) {
usb_device_detach(dev);
}
usb_device_handle_destroy(dev);
if (dev->port) {
usb_release_port(dev);
}
return 0;
}
|
Safe
|
[
"CWE-119"
] |
qemu
|
9f8e9895c504149d7048e9fc5eb5cbb34b16e49a
|
4.198323212547812e+37
| 13 |
usb: sanity check setup_index+setup_len in post_load
CVE-2013-4541
s->setup_len and s->setup_index are fed into usb_packet_copy as
size/offset into s->data_buf, it's possible for invalid state to exploit
this to load arbitrary data.
setup_len and setup_index should be checked to make sure
they are not negative.
Cc: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
| 0 |
int show_rsa_public_key(THD *thd, SHOW_VAR *var, char *buff)
{
var->type= SHOW_CHAR;
var->value= const_cast<char *>(g_rsa_keys.get_public_key_as_pem());
return 0;
}
|
Safe
|
[] |
mysql-server
|
25d1b7e03b9b375a243fabdf0556c063c7282361
|
2.2888902634458398e+38
| 7 |
Bug #22722946: integer overflow may lead to wrong results in get_56_lenc_string
| 0 |
void quicklistRotate(quicklist *quicklist) {
if (quicklist->count <= 1)
return;
/* First, get the tail entry */
unsigned char *p = ziplistIndex(quicklist->tail->zl, -1);
unsigned char *value;
long long longval;
unsigned int sz;
char longstr[32] = {0};
ziplistGet(p, &value, &sz, &longval);
/* If value found is NULL, then ziplistGet populated longval instead */
if (!value) {
/* Write the longval as a string so we can re-add it */
sz = ll2string(longstr, sizeof(longstr), longval);
value = (unsigned char *)longstr;
}
/* Add tail entry to head (must happen before tail is deleted). */
quicklistPushHead(quicklist, value, sz);
/* If quicklist has only one node, the head ziplist is also the
* tail ziplist and PushHead() could have reallocated our single ziplist,
* which would make our pre-existing 'p' unusable. */
if (quicklist->len == 1) {
p = ziplistIndex(quicklist->tail->zl, -1);
}
/* Remove tail entry. */
quicklistDelIndex(quicklist, quicklist->tail, &p);
}
|
Safe
|
[
"CWE-190"
] |
redis
|
f6a40570fa63d5afdd596c78083d754081d80ae3
|
7.722489151759531e+37
| 32 |
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error.
| 0 |
ArgParser::argCheckLinearization()
{
o.check_linearization = true;
o.require_outfile = false;
}
|
Safe
|
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
|
1.7581448792894898e+38
| 5 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
| 0 |
Context_identity()
:Context(IDENTITY_SUBST, &type_handler_long_blob, &my_charset_bin) { }
|
Safe
|
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
|
1.4482830264954913e+38
| 2 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <serg@mariadb.org>
| 0 |
int Field_float::store(const char *from,uint len,CHARSET_INFO *cs)
{
int error;
Field_float::store(get_double(from, len, cs, &error));
return error;
}
|
Safe
|
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
|
1.2946718074578154e+38
| 6 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
| 0 |
httpd_compress(isc_httpd_t *httpd) {
z_stream zstr;
isc_region_t r;
int ret;
int inputlen;
inputlen = isc_buffer_usedlength(&httpd->bodybuffer);
alloc_compspace(httpd, inputlen);
isc_buffer_region(&httpd->compbuffer, &r);
/*
* We're setting output buffer size to input size so it fails if the
* compressed data size would be bigger than the input size.
*/
memset(&zstr, 0, sizeof(zstr));
zstr.total_in = zstr.avail_in = zstr.total_out = zstr.avail_out =
inputlen;
zstr.next_in = isc_buffer_base(&httpd->bodybuffer);
zstr.next_out = r.base;
ret = deflateInit(&zstr, Z_DEFAULT_COMPRESSION);
if (ret == Z_OK) {
ret = deflate(&zstr, Z_FINISH);
}
deflateEnd(&zstr);
if (ret == Z_STREAM_END) {
isc_buffer_add(&httpd->compbuffer, inputlen - zstr.avail_out);
return (ISC_R_SUCCESS);
} else {
return (ISC_R_FAILURE);
}
}
|
Safe
|
[] |
bind9
|
d4c5d1c650ae0e97a083b0ce7a705c20fc001f07
|
8.550413993352122e+37
| 33 |
Fix statistics channel multiple request processing with non-empty bodies
When the HTTP request has a body part after the HTTP headers, it is
not getting processed and is being prepended to the next request's data,
which results in an error when trying to parse it.
Improve the httpd.c:process_request() function with the following
additions:
1. Require that HTTP POST requests must have Content-Length header.
2. When Content-Length header is set, extract its value, and make sure
that it is valid and that the whole request's body is received before
processing the request.
3. Discard the request's body by consuming Content-Length worth of data
in the buffer.
(cherry picked from commit c2bbdc8a648c9630b2c9cea5227ad5c309c2ade5)
| 0 |
size_t OpenMP4Source(char *filename, uint32_t traktype, uint32_t traksubtype) //RAW or within MP4
{
mp4object *mp4 = (mp4object *)malloc(sizeof(mp4object));
if (mp4 == NULL) return 0;
memset(mp4, 0, sizeof(mp4object));
struct stat64 mp4stat;
stat64(filename, &mp4stat);
mp4->filesize = mp4stat.st_size;
if (mp4->filesize < 64) return 0;
#ifdef _WINDOWS
fopen_s(&mp4->mediafp, filename, "rb");
#else
mp4->mediafp = fopen(filename, "rb");
#endif
if (mp4->mediafp)
{
uint32_t qttag, qtsize32, skip, type = 0, subtype = 0, num;
size_t len;
int32_t nest = 0;
uint64_t nestsize[MAX_NEST_LEVEL] = { 0 };
uint64_t lastsize = 0, qtsize;
do
{
len = fread(&qtsize32, 1, 4, mp4->mediafp);
len += fread(&qttag, 1, 4, mp4->mediafp);
mp4->filepos += len;
if (len == 8 && mp4->filepos < mp4->filesize)
{
if (!VALID_FOURCC(qttag))
{
CloseSource((size_t)mp4);
mp4 = NULL;
break;
}
qtsize32 = BYTESWAP32(qtsize32);
if (qtsize32 == 1) // 64-bit Atom
{
len = fread(&qtsize, 1, 8, mp4->mediafp);
mp4->filepos += len;
qtsize = BYTESWAP64(qtsize) - 8;
}
else
qtsize = qtsize32;
nest++;
if (qtsize < 8) break;
if (nest >= MAX_NEST_LEVEL) break;
nestsize[nest] = qtsize;
lastsize = qtsize;
#if PRINT_MP4_STRUCTURE
for (int i = 1; i < nest; i++) printf(" ");
printf("%c%c%c%c (%lld)\n", (qttag & 0xff), ((qttag >> 8) & 0xff), ((qttag >> 16) & 0xff), ((qttag >> 24) & 0xff), qtsize);
if (qttag == MAKEID('m', 'd', 'a', 't') ||
qttag == MAKEID('f', 't', 'y', 'p') ||
qttag == MAKEID('u', 'd', 't', 'a') ||
qttag == MAKEID('f', 'r', 'e', 'e'))
{
LongSeek(mp4, qtsize - 8);
NESTSIZE(qtsize);
continue;
}
#else
if (qttag != MAKEID('m', 'o', 'o', 'v') && //skip over all but these atoms
qttag != MAKEID('m', 'v', 'h', 'd') &&
qttag != MAKEID('t', 'r', 'a', 'k') &&
qttag != MAKEID('m', 'd', 'i', 'a') &&
qttag != MAKEID('m', 'd', 'h', 'd') &&
qttag != MAKEID('m', 'i', 'n', 'f') &&
qttag != MAKEID('g', 'm', 'i', 'n') &&
qttag != MAKEID('d', 'i', 'n', 'f') &&
qttag != MAKEID('a', 'l', 'i', 's') &&
qttag != MAKEID('s', 't', 's', 'd') &&
qttag != MAKEID('s', 't', 'b', 'l') &&
qttag != MAKEID('s', 't', 't', 's') &&
qttag != MAKEID('s', 't', 's', 'c') &&
qttag != MAKEID('s', 't', 's', 'z') &&
qttag != MAKEID('s', 't', 'c', 'o') &&
qttag != MAKEID('c', 'o', '6', '4') &&
qttag != MAKEID('h', 'd', 'l', 'r'))
{
LongSeek(mp4, qtsize - 8);
NESTSIZE(qtsize);
}
else
#endif
if (qttag == MAKEID('m', 'v', 'h', 'd')) //mvhd movie header
{
len = fread(&skip, 1, 4, mp4->mediafp);
len += fread(&skip, 1, 4, mp4->mediafp);
len += fread(&skip, 1, 4, mp4->mediafp);
len += fread(&mp4->clockdemon, 1, 4, mp4->mediafp); mp4->clockdemon = BYTESWAP32(mp4->clockdemon);
len += fread(&mp4->clockcount, 1, 4, mp4->mediafp); mp4->clockcount = BYTESWAP32(mp4->clockcount);
mp4->filepos += len;
LongSeek(mp4, qtsize - 8 - len); // skip over mvhd
NESTSIZE(qtsize);
}
else if (qttag == MAKEID('m', 'd', 'h', 'd')) //mdhd media header
{
media_header md;
len = fread(&md, 1, sizeof(md), mp4->mediafp);
if (len == sizeof(md))
{
md.creation_time = BYTESWAP32(md.creation_time);
md.modification_time = BYTESWAP32(md.modification_time);
md.time_scale = BYTESWAP32(md.time_scale);
md.duration = BYTESWAP32(md.duration);
mp4->trak_clockdemon = md.time_scale;
mp4->trak_clockcount = md.duration;
if (mp4->videolength == 0.0) // Get the video length from the first track
{
mp4->videolength = (float)((double)mp4->trak_clockcount / (double)mp4->trak_clockdemon);
}
}
mp4->filepos += len;
LongSeek(mp4, qtsize - 8 - len); // skip over mvhd
NESTSIZE(qtsize);
}
else if (qttag == MAKEID('h', 'd', 'l', 'r')) //hldr
{
uint32_t temp;
len = fread(&skip, 1, 4, mp4->mediafp);
len += fread(&skip, 1, 4, mp4->mediafp);
len += fread(&temp, 1, 4, mp4->mediafp); // type will be 'meta' for the correct trak.
if (temp != MAKEID('a', 'l', 'i', 's') && temp != MAKEID('u', 'r', 'l', ' '))
type = temp;
mp4->filepos += len;
LongSeek(mp4, qtsize - 8 - len); // skip over hldr
NESTSIZE(qtsize);
}
else if (qttag == MAKEID('s', 't', 's', 'd')) //read the sample decription to determine the type of metadata
{
if (type == traktype) //like meta
{
len = fread(&skip, 1, 4, mp4->mediafp);
len += fread(&skip, 1, 4, mp4->mediafp);
len += fread(&skip, 1, 4, mp4->mediafp);
len += fread(&subtype, 1, 4, mp4->mediafp); // type will be 'meta' for the correct trak.
if (len == 16)
{
if (subtype != traksubtype) // MP4 metadata
{
type = 0; // MP4
}
}
mp4->filepos += len;
LongSeek(mp4, qtsize - 8 - len); // skip over stsd
}
else
LongSeek(mp4, qtsize - 8);
NESTSIZE(qtsize);
}
else if (qttag == MAKEID('s', 't', 's', 'c')) // metadata stsc - offset chunks
{
if (type == traktype) // meta
{
len = fread(&skip, 1, 4, mp4->mediafp);
len += fread(&num, 1, 4, mp4->mediafp);
num = BYTESWAP32(num);
if (num * 12 <= qtsize - 8 - len)
{
mp4->metastsc_count = num;
if (mp4->metastsc) free(mp4->metastsc);
if (num > 0)
{
mp4->metastsc = (SampleToChunk *)malloc(num * sizeof(SampleToChunk));
if (mp4->metastsc)
{
len += fread(mp4->metastsc, 1, num * sizeof(SampleToChunk), mp4->mediafp);
do
{
num--;
mp4->metastsc[num].chunk_num = BYTESWAP32(mp4->metastsc[num].chunk_num);
mp4->metastsc[num].samples = BYTESWAP32(mp4->metastsc[num].samples);
mp4->metastsc[num].id = BYTESWAP32(mp4->metastsc[num].id);
} while (num > 0);
}
}
else
{
//size of null
CloseSource((size_t)mp4);
mp4 = NULL;
break;
}
}
mp4->filepos += len;
LongSeek(mp4, qtsize - 8 - len); // skip over stsx
}
else
LongSeek(mp4, qtsize - 8);
NESTSIZE(qtsize);
}
else if (qttag == MAKEID('s', 't', 's', 'z')) // metadata stsz - sizes
{
if (type == traktype) // meta
{
uint32_t equalsamplesize;
len = fread(&skip, 1, 4, mp4->mediafp);
len += fread(&equalsamplesize, 1, 4, mp4->mediafp);
len += fread(&num, 1, 4, mp4->mediafp);
num = BYTESWAP32(num);
if (num * 4 <= qtsize - 8 - len)
{
mp4->metasize_count = num;
if (mp4->metasizes) free(mp4->metasizes);
if(num > 0)
{
mp4->metasizes = (uint32_t *)malloc(num * 4);
if (mp4->metasizes)
{
if (equalsamplesize == 0)
{
len += fread(mp4->metasizes, 1, num * 4, mp4->mediafp);
do
{
num--;
mp4->metasizes[num] = BYTESWAP32(mp4->metasizes[num]);
} while (num > 0);
}
else
{
equalsamplesize = BYTESWAP32(equalsamplesize);
do
{
num--;
mp4->metasizes[num] = equalsamplesize;
} while (num > 0);
}
}
}
else
{
//size of null
CloseSource((size_t)mp4);
mp4 = NULL;
break;
}
}
mp4->filepos += len;
LongSeek(mp4, qtsize - 8 - len); // skip over stsz
}
else
LongSeek(mp4, qtsize - 8);
NESTSIZE(qtsize);
}
else if (qttag == MAKEID('s', 't', 'c', 'o')) // metadata stco - offsets
{
if (type == traktype) // meta
{
len = fread(&skip, 1, 4, mp4->mediafp);
len += fread(&num, 1, 4, mp4->mediafp);
num = BYTESWAP32(num);
if (num * 4 <= qtsize - 8 - len)
{
uint32_t metastco_count = num;
if (mp4->metastsc_count > 0 && num != mp4->metasize_count)
{
mp4->indexcount = num;
if (mp4->metaoffsets) free(mp4->metaoffsets);
if(num > 0)
{
mp4->metaoffsets = (uint64_t *)malloc(num * 8);
if (mp4->metaoffsets)
{
uint32_t *metaoffsets32 = NULL;
metaoffsets32 = (uint32_t *)malloc(num * 4);
if (metaoffsets32)
{
uint64_t fileoffset = 0;
int stsc_pos = 0;
int stco_pos = 0;
int repeat = 1;
len += fread(metaoffsets32, 1, num * 4, mp4->mediafp);
do
{
num--;
metaoffsets32[num] = BYTESWAP32(metaoffsets32[num]);
} while (num > 0);
mp4->metaoffsets[0] = fileoffset = metaoffsets32[stco_pos];
num = 1;
while (num < mp4->indexcount)
{
if ((uint32_t)repeat == mp4->metastsc[stsc_pos].samples)
{
if ((uint32_t)stco_pos + 1 < metastco_count)
{
stco_pos++;
fileoffset = (uint64_t)metaoffsets32[stco_pos];
}
else
{
fileoffset += (uint64_t)mp4->metasizes[num - 1];
}
if ((uint32_t)stsc_pos + 1 < mp4->metastsc_count)
if (mp4->metastsc[stsc_pos + 1].chunk_num == (uint32_t)stco_pos + 1)
stsc_pos++;
repeat = 1;
}
else
{
fileoffset += (uint64_t)mp4->metasizes[num - 1];
repeat++;
}
mp4->metaoffsets[num] = fileoffset;
//int delta = metaoffsets[num] - metaoffsets[num - 1];
//printf("%3d:%08x, delta = %08x\n", num, (int)fileoffset, delta);
num++;
}
if (mp4->metastsc) free(mp4->metastsc);
mp4->metastsc = NULL;
mp4->metastsc_count = 0;
free(metaoffsets32);
}
}
}
else
{
//size of null
CloseSource((size_t)mp4);
mp4 = NULL;
break;
}
}
else
{
mp4->indexcount = num;
if (mp4->metaoffsets) free(mp4->metaoffsets);
if (num > 0)
{
mp4->metaoffsets = (uint64_t *)malloc(num * 8);
if (mp4->metaoffsets)
{
uint32_t *metaoffsets32 = NULL;
metaoffsets32 = (uint32_t *)malloc(num * 4);
if (metaoffsets32)
{
size_t readlen = fread(metaoffsets32, 1, num * 4, mp4->mediafp);
len += readlen;
do
{
num--;
mp4->metaoffsets[num] = BYTESWAP32(metaoffsets32[num]);
} while (num > 0);
free(metaoffsets32);
}
}
}
else
{
//size of null
CloseSource((size_t)mp4);
mp4 = NULL;
break;
}
}
}
mp4->filepos += len;
LongSeek(mp4, qtsize - 8 - len); // skip over stco
}
else
LongSeek(mp4, qtsize - 8);
NESTSIZE(qtsize);
}
else if (qttag == MAKEID('c', 'o', '6', '4')) // metadata stco - offsets
{
if (type == traktype) // meta
{
len = fread(&skip, 1, 4, mp4->mediafp);
len += fread(&num, 1, 4, mp4->mediafp);
num = BYTESWAP32(num);
if(num == 0)
{
//size of null
CloseSource((size_t)mp4);
mp4 = NULL;
break;
}
if (num * 8 <= qtsize - 8 - len)
{
if (mp4->metastsc_count > 0 && num != mp4->metasize_count)
{
mp4->indexcount = mp4->metasize_count;
if (mp4->metaoffsets) free(mp4->metaoffsets);
if (mp4->metasize_count)
{
mp4->metaoffsets = (uint64_t *)malloc(mp4->metasize_count * 8);
if (mp4->metaoffsets)
{
uint64_t *metaoffsets64 = NULL;
metaoffsets64 = (uint64_t *)malloc(num * 8);
if (metaoffsets64)
{
uint64_t fileoffset = 0;
int stsc_pos = 0;
int stco_pos = 0;
len += fread(metaoffsets64, 1, num * 8, mp4->mediafp);
do
{
num--;
metaoffsets64[num] = BYTESWAP64(metaoffsets64[num]);
} while (num > 0);
fileoffset = metaoffsets64[0];
mp4->metaoffsets[0] = fileoffset;
//printf("%3d:%08x, delta = %08x\n", 0, (int)fileoffset, 0);
num = 1;
while (num < mp4->metasize_count)
{
if (num != mp4->metastsc[stsc_pos].chunk_num - 1 && 0 == (num - (mp4->metastsc[stsc_pos].chunk_num - 1)) % mp4->metastsc[stsc_pos].samples)
{
stco_pos++;
fileoffset = (uint64_t)metaoffsets64[stco_pos];
}
else
{
fileoffset += (uint64_t)mp4->metasizes[num - 1];
}
mp4->metaoffsets[num] = fileoffset;
//int delta = metaoffsets[num] - metaoffsets[num - 1];
//printf("%3d:%08x, delta = %08x\n", num, (int)fileoffset, delta);
num++;
}
if (mp4->metastsc) free(mp4->metastsc);
mp4->metastsc = NULL;
mp4->metastsc_count = 0;
free(metaoffsets64);
}
}
}
else
{
//size of null
CloseSource((size_t)mp4);
mp4 = NULL;
break;
}
}
else
{
mp4->indexcount = num;
if (mp4->metaoffsets) free(mp4->metaoffsets);
mp4->metaoffsets = (uint64_t *)malloc(num * 8);
if (mp4->metaoffsets)
{
len += fread(mp4->metaoffsets, 1, num * 8, mp4->mediafp);
do
{
num--;
mp4->metaoffsets[num] = BYTESWAP64(mp4->metaoffsets[num]);
} while (num > 0);
}
}
}
mp4->filepos += len;
LongSeek(mp4, qtsize - 8 - len); // skip over stco
}
else
LongSeek(mp4, qtsize - 8);
NESTSIZE(qtsize);
}
else if (qttag == MAKEID('s', 't', 't', 's')) // time to samples
{
if (type == traktype) // meta
{
uint32_t totaldur = 0, samples = 0;
int32_t entries = 0;
len = fread(&skip, 1, 4, mp4->mediafp);
len += fread(&num, 1, 4, mp4->mediafp);
num = BYTESWAP32(num);
if (num * 8 <= qtsize - 8 - len)
{
entries = num;
mp4->meta_clockdemon = mp4->trak_clockdemon;
mp4->meta_clockcount = mp4->trak_clockcount;
while (entries > 0)
{
int32_t samplecount;
int32_t duration;
len += fread(&samplecount, 1, 4, mp4->mediafp);
samplecount = BYTESWAP32(samplecount);
len += fread(&duration, 1, 4, mp4->mediafp);
duration = BYTESWAP32(duration);
samples += samplecount;
entries--;
totaldur += duration;
mp4->metadatalength += (double)((double)samplecount * (double)duration / (double)mp4->meta_clockdemon);
}
mp4->basemetadataduration = mp4->metadatalength * (double)mp4->meta_clockdemon / (double)samples;
}
mp4->filepos += len;
LongSeek(mp4, qtsize - 8 - len); // skip over stco
}
else
LongSeek(mp4, qtsize - 8);
NESTSIZE(qtsize);
}
else
{
NESTSIZE(8);
}
}
else
{
break;
}
} while (len > 0);
if (mp4)
{
if (mp4->metasizes == NULL || mp4->metaoffsets == NULL)
{
CloseSource((size_t)mp4);
mp4 = NULL;
}
}
}
else
{
// printf("Could not open %s for input\n", filename);
// exit(1);
free(mp4);
mp4 = NULL;
}
return (size_t)mp4;
}
|
Safe
|
[
"CWE-125",
"CWE-369",
"CWE-787"
] |
gpmf-parser
|
341f12cd5b97ab419e53853ca00176457c9f1681
|
2.452300534386766e+37
| 584 |
fixed many security issues with the too crude mp4 reader
| 0 |
get_text_gray_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
/* This version is for reading text-format PGM files with any maxval and
converting to extended RGB */
{
ppm_source_ptr source = (ppm_source_ptr)sinfo;
FILE *infile = source->pub.input_file;
register JSAMPROW ptr;
register JSAMPLE *rescale = source->rescale;
JDIMENSION col;
unsigned int maxval = source->maxval;
register int rindex = rgb_red[cinfo->in_color_space];
register int gindex = rgb_green[cinfo->in_color_space];
register int bindex = rgb_blue[cinfo->in_color_space];
register int aindex = alpha_index[cinfo->in_color_space];
register int ps = rgb_pixelsize[cinfo->in_color_space];
ptr = source->pub.buffer[0];
if (maxval == MAXJSAMPLE) {
if (aindex >= 0)
GRAY_RGB_READ_LOOP(read_pbm_integer(cinfo, infile, maxval),
ptr[aindex] = 0xFF;)
else
GRAY_RGB_READ_LOOP(read_pbm_integer(cinfo, infile, maxval),)
} else {
if (aindex >= 0)
GRAY_RGB_READ_LOOP(rescale[read_pbm_integer(cinfo, infile, maxval)],
ptr[aindex] = 0xFF;)
else
GRAY_RGB_READ_LOOP(rescale[read_pbm_integer(cinfo, infile, maxval)],)
}
return 1;
}
|
Safe
|
[
"CWE-200",
"CWE-125"
] |
libjpeg-turbo
|
9c78a04df4e44ef6487eee99c4258397f4fdca55
|
2.884549343310117e+38
| 32 |
cjpeg: Fix OOB read caused by malformed 8-bit BMP
... in which one or more of the color indices is out of range for the
number of palette entries.
Fix partly borrowed from jpeg-9c. This commit also adopts Guido's
JERR_PPM_OUTOFRANGE enum value in lieu of our project-specific
JERR_PPM_TOOLARGE enum value.
Fixes #258
| 0 |
void run() {
intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
BSONObj specObj = BSON("" << spec());
BSONElement specElement = specObj.firstElement();
VariablesParseState vps = expCtx->variablesParseState;
intrusive_ptr<Expression> expression = Expression::parseOperand(expCtx, specElement, vps);
ASSERT_BSONOBJ_EQ(constify(spec()), expressionToBson(expression));
ASSERT_BSONOBJ_EQ(BSON("" << expectedResult()), toBson(expression->evaluate(Document())));
}
|
Safe
|
[
"CWE-835"
] |
mongo
|
0a076417d1d7fba3632b73349a1fd29a83e68816
|
1.133238137335204e+38
| 9 |
SERVER-38070 fix infinite loop in agg expression
| 0 |
static int verify_ee(const gnutls_datum_t *raw_crt, gnutls_certificate_type_t crt_type,
dane_cert_type_t ctype, dane_match_type_t match, gnutls_datum_t * data,
unsigned int *verify)
{
gnutls_datum_t pubkey = {NULL, 0};
int ret;
if (ctype == DANE_CERT_X509 && crt_type == GNUTLS_CRT_X509) {
if (!matches(raw_crt, data, match)) {
gnutls_assert();
*verify |= DANE_VERIFY_CERT_DIFFERS;
}
} else if (ctype == DANE_CERT_PK && crt_type == GNUTLS_CRT_X509) {
ret = crt_to_pubkey(raw_crt, &pubkey);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
if (!matches(&pubkey, data, match)) {
gnutls_assert();
*verify |= DANE_VERIFY_CERT_DIFFERS;
}
} else {
ret = gnutls_assert_val(DANE_E_UNKNOWN_DANE_DATA);
goto cleanup;
}
ret = 0;
cleanup:
free(pubkey.data);
return ret;
}
|
Safe
|
[
"CWE-119"
] |
gnutls
|
ed51e5e53cfbab3103d6b7b85b7ba4515e4f30c3
|
2.6185661534812822e+38
| 36 |
Adding dane_raw_tlsa to allow initialization of dane_query_t from DANE records based on external DNS resolutions. Also fixing a buffer overflow.
Signed-off-by: Nikos Mavrogiannopoulos <nmav@gnutls.org>
| 0 |
static int decode_delegation_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
stateid->type = NFS4_DELEGATION_STATEID_TYPE;
return decode_stateid(xdr, stateid);
}
|
Safe
|
[
"CWE-787"
] |
linux
|
b4487b93545214a9db8cbf32e86411677b0cca21
|
2.1110094195644708e+37
| 5 |
nfs: Fix getxattr kernel panic and memory overflow
Move the buffer size check to decode_attr_security_label() before memcpy()
Only call memcpy() if the buffer is large enough
Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
Signed-off-by: Jeffrey Mitchell <jeffrey.mitchell@starlab.io>
[Trond: clean up duplicate test of label->len != 0]
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
| 0 |
array_value (s, quoted, flags, rtype, indp)
char *s;
int quoted, flags, *rtype;
arrayind_t *indp;
{
return (array_value_internal (s, quoted, flags|AV_ALLOWALL, rtype, indp));
}
|
Safe
|
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
|
1.9885495566063353e+38
| 7 |
bash-4.4-rc2 release
| 0 |
static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
int retry_count = 30;
writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
do {
if (arcmsr_hbaB_wait_msgint_ready(acb))
break;
else {
retry_count--;
printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
timeout,retry count down = %d \n", acb->host->host_no, retry_count);
}
} while (retry_count != 0);
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
|
2.2845553368730528e+38
| 15 |
scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer()
We need to put an upper bound on "user_len" so the memcpy() doesn't
overflow.
Cc: <stable@vger.kernel.org>
Reported-by: Marco Grassi <marco.gra@gmail.com>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Tomas Henzl <thenzl@redhat.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
| 0 |
internalEntityProcessor(XML_Parser parser,
const char *s,
const char *end,
const char **nextPtr)
{
ENTITY *entity;
const char *textStart, *textEnd;
const char *next;
enum XML_Error result;
OPEN_INTERNAL_ENTITY *openEntity = openInternalEntities;
if (!openEntity)
return XML_ERROR_UNEXPECTED_STATE;
entity = openEntity->entity;
textStart = ((char *)entity->textPtr) + entity->processed;
textEnd = (char *)(entity->textPtr + entity->textLen);
/* Set a safe default value in case 'next' does not get set */
next = textStart;
#ifdef XML_DTD
if (entity->is_param) {
int tok = XmlPrologTok(internalEncoding, textStart, textEnd, &next);
result = doProlog(parser, internalEncoding, textStart, textEnd, tok,
next, &next, XML_FALSE);
}
else
#endif /* XML_DTD */
result = doContent(parser, openEntity->startTagLevel, internalEncoding,
textStart, textEnd, &next, XML_FALSE);
if (result != XML_ERROR_NONE)
return result;
else if (textEnd != next && ps_parsing == XML_SUSPENDED) {
entity->processed = (int)(next - (char *)entity->textPtr);
return result;
}
else {
entity->open = XML_FALSE;
openInternalEntities = openEntity->next;
/* put openEntity back in list of free instances */
openEntity->next = freeInternalEntities;
freeInternalEntities = openEntity;
}
#ifdef XML_DTD
if (entity->is_param) {
int tok;
processor = prologProcessor;
tok = XmlPrologTok(encoding, s, end, &next);
return doProlog(parser, encoding, s, end, tok, next, nextPtr,
(XML_Bool)!ps_finalBuffer);
}
else
#endif /* XML_DTD */
{
processor = contentProcessor;
/* see externalEntityContentProcessor vs contentProcessor */
return doContent(parser, parentParser ? 1 : 0, encoding, s, end,
nextPtr, (XML_Bool)!ps_finalBuffer);
}
}
|
Safe
|
[
"CWE-611"
] |
libexpat
|
c4bf96bb51dd2a1b0e185374362ee136fe2c9d7f
|
2.8884318098465154e+38
| 61 |
xmlparse.c: Fix external entity infinite loop bug (CVE-2017-9233)
| 0 |
static sctp_disposition_t sctp_sf_violation_ctsn(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
|
Safe
|
[] |
linux
|
196d67593439b03088913227093e374235596e33
|
1.778489943097657e+38
| 13 |
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <michele@acksyn.org>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void rtreeCheckReset(RtreeCheck *pCheck, sqlite3_stmt *pStmt){
int rc = sqlite3_reset(pStmt);
if( pCheck->rc==SQLITE_OK ) pCheck->rc = rc;
}
|
Safe
|
[
"CWE-125"
] |
sqlite
|
e41fd72acc7a06ce5a6a7d28154db1ffe8ba37a8
|
1.419819681263672e+38
| 4 |
Enhance the rtreenode() function of rtree (used for testing) so that it
uses the newer sqlite3_str object for better performance and improved
error reporting.
FossilOrigin-Name: 90acdbfce9c088582d5165589f7eac462b00062bbfffacdcc786eb9cf3ea5377
| 0 |
TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathBad) {
initialize();
Buffer::OwnedImpl buffer("GET * HTTP/1.1\r\nHost: bah\r\n\r\n");
expect400(Protocol::Http11, true, buffer, "http1.invalid_url");
}
|
Safe
|
[
"CWE-770"
] |
envoy
|
7ca28ff7d46454ae930e193d97b7d08156b1ba59
|
2.418879869147941e+38
| 6 |
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <avd@google.com>
| 0 |
static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
const unsigned char *iv, int enc)
{
EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
if (!iv && !key)
return 1;
if (key) {
aesni_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f) aesni_encrypt);
gctx->ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
/*
* If we have an iv can set it directly, otherwise use saved IV.
*/
if (iv == NULL && gctx->iv_set)
iv = gctx->iv;
if (iv) {
CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
gctx->iv_set = 1;
}
gctx->key_set = 1;
} else {
/* If key set use IV, otherwise copy */
if (gctx->key_set)
CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
else
memcpy(gctx->iv, iv, gctx->ivlen);
gctx->iv_set = 1;
gctx->iv_gen = 0;
}
return 1;
}
|
Safe
|
[] |
openssl
|
1a3701f4fe0530a40ec073cd78d02cfcc26c0f8e
|
2.14592171959775e+38
| 31 |
Sanity check EVP_CTRL_AEAD_TLS_AAD
The various implementations of EVP_CTRL_AEAD_TLS_AAD expect a buffer of at
least 13 bytes long. Add sanity checks to ensure that the length is at
least that. Also add a new constant (EVP_AEAD_TLS1_AAD_LEN) to evp.h to
represent this length. Thanks to Kevin Wojtysiak (Int3 Solutions) and
Paramjot Oberoi (Int3 Solutions) for reporting this issue.
Reviewed-by: Andy Polyakov <appro@openssl.org>
(cherry picked from commit c8269881093324b881b81472be037055571f73f3)
Conflicts:
ssl/record/ssl3_record.c
| 0 |
CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
char *data_offset;
int rc = 0;
__u16 params, param_offset, offset, byte_count, count;
cFYI(1, "Set Times (via SetFileInfo)");
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
params = 6;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
count = sizeof(FILE_BASIC_INFO);
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find max SMB PDU from sess */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
byte_count = 3 /* pad */ + params + count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->Fid = fid;
if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
else
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
|
Safe
|
[
"CWE-362",
"CWE-119",
"CWE-189"
] |
linux
|
9438fabb73eb48055b58b89fc51e0bc4db22fabd
|
1.478371723338584e+38
| 60 |
cifs: fix possible memory corruption in CIFSFindNext
The name_len variable in CIFSFindNext is a signed int that gets set to
the resume_name_len in the cifs_search_info. The resume_name_len however
is unsigned and for some infolevels is populated directly from a 32 bit
value sent by the server.
If the server sends a very large value for this, then that value could
look negative when converted to a signed int. That would make that
value pass the PATH_MAX check later in CIFSFindNext. The name_len would
then be used as a length value for a memcpy. It would then be treated
as unsigned again, and the memcpy scribbles over a ton of memory.
Fix this by making the name_len an unsigned value in CIFSFindNext.
Cc: <stable@kernel.org>
Reported-by: Darren Lavender <dcl@hppine99.gbr.hp.com>
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Steve French <sfrench@us.ibm.com>
| 0 |
QPDFWriter::disableIncompatibleEncryption(int major, int minor,
int extension_level)
{
if (! this->m->encrypted)
{
return;
}
bool disable = false;
if (compareVersions(major, minor, 1, 3) < 0)
{
disable = true;
}
else
{
int V = atoi(this->m->encryption_dictionary["/V"].c_str());
int R = atoi(this->m->encryption_dictionary["/R"].c_str());
if (compareVersions(major, minor, 1, 4) < 0)
{
if ((V > 1) || (R > 2))
{
disable = true;
}
}
else if (compareVersions(major, minor, 1, 5) < 0)
{
if ((V > 2) || (R > 3))
{
disable = true;
}
}
else if (compareVersions(major, minor, 1, 6) < 0)
{
if (this->m->encrypt_use_aes)
{
disable = true;
}
}
else if ((compareVersions(major, minor, 1, 7) < 0) ||
((compareVersions(major, minor, 1, 7) == 0) &&
extension_level < 3))
{
if ((V >= 5) || (R >= 5))
{
disable = true;
}
}
}
if (disable)
{
QTC::TC("qpdf", "QPDFWriter forced version disabled encryption");
this->m->encrypted = false;
}
}
|
Vulnerable
|
[
"CWE-125"
] |
qpdf
|
1868a10f8b06631362618bfc85ca8646da4b4b71
|
1.4956781409912678e+38
| 54 |
Replace all atoi calls with QUtil::string_to_int
The latter catches underflow/overflow.
| 1 |
update_strings (SNDFILE * outfile, const METADATA_INFO * info)
{
if (info->title != NULL)
sf_set_string (outfile, SF_STR_TITLE, info->title) ;
if (info->copyright != NULL)
sf_set_string (outfile, SF_STR_COPYRIGHT, info->copyright) ;
if (info->artist != NULL)
sf_set_string (outfile, SF_STR_ARTIST, info->artist) ;
if (info->comment != NULL)
sf_set_string (outfile, SF_STR_COMMENT, info->comment) ;
if (info->date != NULL)
sf_set_string (outfile, SF_STR_DATE, info->date) ;
if (info->album != NULL)
sf_set_string (outfile, SF_STR_ALBUM, info->album) ;
if (info->license != NULL)
sf_set_string (outfile, SF_STR_LICENSE, info->license) ;
} /* update_strings */
|
Safe
|
[
"CWE-125"
] |
libsndfile
|
2d54514a4f6437b67829717c05472d2e3300a258
|
3.6694341401875817e+37
| 24 |
sfe_copy_data_fp: check value of "max" variable for being normal
and check elements of the data[] array for being finite.
Both checks use functions provided by the <math.h> header as declared
by the C99 standard.
Fixes #317
CVE-2017-14245
CVE-2017-14246
| 0 |
void AES::AsmDecrypt(const byte* inBlock, byte* outBlock, void* boxes) const
{
PROLOG()
#ifdef OLD_GCC_OFFSET
AS2( mov edx, DWORD PTR [ecx + 60] ) // rounds
AS2( lea edi, [ecx + 64] ) // rk
#else
AS2( mov edx, DWORD PTR [ecx + 56] ) // rounds
AS2( lea edi, [ecx + 60] ) // rk
#endif
AS1( dec edx )
AS2( movd mm6, edi ) // save rk
AS2( movd mm5, edx ) // save rounds
AS2( mov eax, DWORD PTR [esi] )
AS2( mov ebx, DWORD PTR [esi + 4] )
AS2( mov ecx, DWORD PTR [esi + 8] )
AS2( mov edx, DWORD PTR [esi + 12] )
AS1( bswap eax )
AS1( bswap ebx )
AS1( bswap ecx )
AS1( bswap edx )
AS2( xor eax, DWORD PTR [edi] ) // s0
AS2( xor ebx, DWORD PTR [edi + 4] ) // s1
AS2( xor ecx, DWORD PTR [edi + 8] ) // s2
AS2( xor edx, DWORD PTR [edi + 12] ) // s3
#ifdef _MSC_VER
AS1( loop2: ) // loop2
#else
AS1(2: ) // loop2
#endif
/* Put0 (mm0) =
Td0[GETBYTE(get0, rs24)] ^
Td1[GETBYTE(get3, rs16)] ^
Td2[GETBYTE(get2, rs 8)] ^
Td3[GETBYTE(tet1, )]
*/
AS2( mov esi, eax )
AS2( shr esi, 24 )
AS2( mov esi, DWORD PTR [ebp + esi*4] )
AS2( mov edi, edx )
AS2( shr edi, 16 )
AS2( and edi, 255 )
AS2( xor esi, DWORD PTR [ebp + 1024 + edi*4] )
AS2( movzx edi, ch )
AS2( xor esi, DWORD PTR [ebp + 2048 + edi*4] )
AS2( movzx edi, bl )
AS2( xor esi, DWORD PTR [ebp + 3072 + edi*4] )
AS2( movd mm0, esi )
/* Put1 (mm1) =
Td0[GETBYTE(get1, rs24)] ^
Td1[GETBYTE(get0, rs16)] ^
Td2[GETBYTE(get3, rs 8)] ^
Td3[GETBYTE(tet2, )]
*/
AS2( mov esi, ebx )
AS2( shr esi, 24 )
AS2( mov esi, DWORD PTR [ebp + esi*4] )
AS2( mov edi, eax )
AS2( shr edi, 16 )
AS2( and edi, 255 )
AS2( xor esi, DWORD PTR [ebp + 1024 + edi*4] )
AS2( movzx edi, dh )
AS2( xor esi, DWORD PTR [ebp + 2048 + edi*4] )
AS2( movzx edi, cl )
AS2( xor esi, DWORD PTR [ebp + 3072 + edi*4] )
AS2( movd mm1, esi )
/* Put2 (mm2) =
Td0[GETBYTE(get2, rs24)] ^
Td1[GETBYTE(get1, rs16)] ^
Td2[GETBYTE(get0, rs 8)] ^
Td3[GETBYTE(tet3, )]
*/
AS2( mov esi, ecx )
AS2( shr esi, 24 )
AS2( mov esi, DWORD PTR [ebp + esi*4] )
AS2( mov edi, ebx )
AS2( shr edi, 16 )
AS2( and edi, 255 )
AS2( xor esi, DWORD PTR [ebp + 1024 + edi*4] )
AS2( movzx edi, ah )
AS2( xor esi, DWORD PTR [ebp + 2048 + edi*4] )
AS2( movzx edi, dl )
AS2( xor esi, DWORD PTR [ebp + 3072 + edi*4] )
AS2( movd mm2, esi )
/* Put3 (edx) =
Td0[GETBYTE(get3, rs24)] ^
Td1[GETBYTE(get2, rs16)] ^
Td2[GETBYTE(get1, rs 8)] ^
Td3[GETBYTE(tet0, )]
*/
AS2( mov esi, edx )
AS2( shr esi, 24 )
AS2( mov edx, DWORD PTR [ebp + esi*4] )
AS2( mov edi, ecx )
AS2( shr edi, 16 )
AS2( and edi, 255 )
AS2( xor edx, DWORD PTR [ebp + 1024 + edi*4] )
AS2( movzx esi, bh )
AS2( xor edx, DWORD PTR [ebp + 2048 + esi*4] )
AS2( movzx edi, al )
AS2( xor edx, DWORD PTR [ebp + 3072 + edi*4] )
// xOr
AS2( movd esi, mm6 ) // rk
AS2( add esi, 16 )
AS2( movd mm6, esi ) // save back
AS2( movd eax, mm0 )
AS2( movd ebx, mm1 )
AS2( movd ecx, mm2 )
AS2( xor eax, DWORD PTR [esi] )
AS2( xor ebx, DWORD PTR [esi + 4] )
AS2( xor ecx, DWORD PTR [esi + 8] )
AS2( xor edx, DWORD PTR [esi + 12] )
AS2( movd edi, mm5 )
AS1( dec edi )
AS2( movd mm5, edi )
#ifdef _MSC_VER
AS1( jnz loop2) // loop2
#else
AS1( jnz 2b ) // loop2
#endif
// last round
/*
Put0 (mm0) =
(Td4[get0, rs24] & 0xff000000) ^ h = 4278190080
(Td4[get3, rs16] & 0x00ff0000) ^ h = 16711680
(Td4[get2, rs 8] & 0x0000ff00) ^ h = 65280
(Td4[get1, rs 0] & 0x000000ff) h = 255
*/
AS2( mov esi, eax )
AS2( shr esi, 24 )
AS2( mov esi, DWORD PTR [ebp + 4096 + esi*4] )
AS2( and esi, 4278190080 )
AS2( mov edi, edx )
AS2( shr edi, 16 )
AS2( and edi, 255 )
AS2( mov edi, DWORD PTR [ebp + 4096 + edi*4] )
AS2( and edi, 16711680 )
AS2( xor esi, edi )
AS2( movzx edi, ch )
AS2( mov edi, DWORD PTR [ebp + 4096 + edi*4] )
AS2( and edi, 65280 )
AS2( xor esi, edi )
AS2( movzx edi, bl )
AS2( mov edi, DWORD PTR [ebp + 4096 + edi*4] )
AS2( and edi, 255 )
AS2( xor esi, edi )
AS2( movd mm0, esi )
/*
Put1 (mm1) =
(Td4[get1, rs24] & 0xff000000) ^ h = 4278190080
(Td4[get0, rs16] & 0x00ff0000) ^ h = 16711680
(Td4[get3, rs 8] & 0x0000ff00) ^ h = 65280
(Td4[get2, rs 0] & 0x000000ff) h = 255
*/
AS2( mov esi, ebx )
AS2( shr esi, 24 )
AS2( mov esi, DWORD PTR [ebp + 4096 + esi*4] )
AS2( and esi, 4278190080 )
AS2( mov edi, eax )
AS2( shr edi, 16 )
AS2( and edi, 255 )
AS2( mov edi, DWORD PTR [ebp + 4096 + edi*4] )
AS2( and edi, 16711680 )
AS2( xor esi, edi )
AS2( movzx edi, dh )
AS2( mov edi, DWORD PTR [ebp + 4096 + edi*4] )
AS2( and edi, 65280 )
AS2( xor esi, edi )
AS2( movzx edi, cl )
AS2( mov edi, DWORD PTR [ebp + 4096 + edi*4] )
AS2( and edi, 255 )
AS2( xor esi, edi )
AS2( movd mm1, esi )
/*
Put2 (mm2) =
(Td4[get2, rs24] & 0xff000000) ^ h = 4278190080
(Td4[get1, rs16] & 0x00ff0000) ^ h = 16711680
(Td4[get0, rs 8] & 0x0000ff00) ^ h = 65280
(Td4[get3, rs 0] & 0x000000ff) h = 255
*/
AS2( mov esi, ecx )
AS2( shr esi, 24 )
AS2( mov esi, DWORD PTR [ebp + 4096 + esi*4] )
AS2( and esi, 4278190080 )
AS2( mov edi, ebx )
AS2( shr edi, 16 )
AS2( and edi, 255 )
AS2( mov edi, DWORD PTR [ebp + 4096 + edi*4] )
AS2( and edi, 16711680 )
AS2( xor esi, edi )
AS2( movzx edi, ah )
AS2( mov edi, DWORD PTR [ebp + 4096 + edi*4] )
AS2( and edi, 65280 )
AS2( xor esi, edi )
AS2( movzx edi, dl )
AS2( mov edi, DWORD PTR [ebp + 4096 + edi*4] )
AS2( and edi, 255 )
AS2( xor esi, edi )
AS2( movd mm2, esi )
/*
Put3 (edx) =
(Td4[get3, rs24] & 0xff000000) ^ h = 4278190080
(Td4[get2, rs16] & 0x00ff0000) ^ h = 16711680
(Td4[get1, rs 8] & 0x0000ff00) ^ h = 65280
(Td4[get0, rs 0] & 0x000000ff) h = 255
*/
AS2( mov esi, edx )
AS2( shr esi, 24 )
AS2( mov edx, DWORD PTR [ebp + 4096 + esi*4] )
AS2( and edx, 4278190080 )
AS2( mov edi, ecx )
AS2( shr edi, 16 )
AS2( and edi, 255 )
AS2( mov esi, DWORD PTR [ebp + 4096 + edi*4] )
AS2( and esi, 16711680 )
AS2( xor edx, esi )
AS2( movzx esi, bh )
AS2( mov edi, DWORD PTR [ebp + 4096 + esi*4] )
AS2( and edi, 65280 )
AS2( xor edx, edi )
AS2( movzx edi, al )
AS2( mov esi, DWORD PTR [ebp + 4096 + edi*4] )
AS2( and esi, 255 )
AS2( xor edx, esi )
// xOr
AS2( movd esi, mm6 ) // rk
AS2( add esi, 16 )
AS2( movd eax, mm0 )
AS2( movd ebx, mm1 )
AS2( movd ecx, mm2 )
AS2( xor eax, DWORD PTR [esi] )
AS2( xor ebx, DWORD PTR [esi + 4] )
AS2( xor ecx, DWORD PTR [esi + 8] )
AS2( xor edx, DWORD PTR [esi + 12] )
// end
AS2( movd ebp, mm7 )
// swap
AS1( bswap eax )
AS1( bswap ebx )
AS1( bswap ecx )
AS1( bswap edx )
// store
#ifdef __GNUC__
AS2( movd esi, mm4 ) // outBlock
#else
AS2( mov esi, DWORD PTR [ebp + 12] ) // outBlock
#endif
AS2( mov DWORD PTR [esi], eax )
AS2( mov DWORD PTR [esi + 4], ebx )
AS2( mov DWORD PTR [esi + 8], ecx )
AS2( mov DWORD PTR [esi + 12], edx )
EPILOG()
}
|
Safe
|
[] |
mysql-server
|
5c6169fb309981b564a17bee31b367a18866d674
|
1.681495671996252e+38
| 314 |
Bug #24740291: YASSL UPDATE TO 2.4.2
| 0 |
bool IsInitialBlockDownload()
{
if (pindexBest == NULL || nBestHeight < Checkpoints::GetTotalBlocksEstimate())
return true;
static int64 nLastUpdate;
static CBlockIndex* pindexLastBest;
if (pindexBest != pindexLastBest)
{
pindexLastBest = pindexBest;
nLastUpdate = GetTime();
}
return (GetTime() - nLastUpdate < 10 &&
pindexBest->GetBlockTime() < GetTime() - 24 * 60 * 60);
}
|
Safe
|
[
"CWE-16",
"CWE-787"
] |
bitcoin
|
a206b0ea12eb4606b93323268fc81a4f1f952531
|
3.3076015534041507e+38
| 14 |
Do not allow overwriting unspent transactions (BIP 30)
Introduce the following network rule:
* a block is not valid if it contains a transaction whose hash
already exists in the block chain, unless all that transaction's
outputs were already spent before said block.
Warning: this is effectively a network rule change, with potential
risk for forking the block chain. Leaving this unfixed carries the
same risk however, for attackers that can cause a reorganisation
in part of the network.
Thanks to Russell O'Connor and Ben Reeves.
| 0 |
bool fulltest(const unsigned char *hash, const unsigned char *target)
{
uint32_t *hash32 = (uint32_t *)hash;
uint32_t *target32 = (uint32_t *)target;
bool rc = true;
int i;
for (i = 28 / 4; i >= 0; i--) {
uint32_t h32tmp = le32toh(hash32[i]);
uint32_t t32tmp = le32toh(target32[i]);
if (h32tmp > t32tmp) {
rc = false;
break;
}
if (h32tmp < t32tmp) {
rc = true;
break;
}
}
if (opt_debug) {
unsigned char hash_swap[32], target_swap[32];
char *hash_str, *target_str;
swab256(hash_swap, hash);
swab256(target_swap, target);
hash_str = bin2hex(hash_swap, 32);
target_str = bin2hex(target_swap, 32);
applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s",
hash_str,
target_str,
rc ? "YES (hash <= target)" :
"no (false positive; hash > target)");
free(hash_str);
free(target_str);
}
return rc;
}
|
Safe
|
[
"CWE-20",
"CWE-703"
] |
sgminer
|
910c36089940e81fb85c65b8e63dcd2fac71470c
|
1.0827603287570154e+38
| 42 |
stratum: parse_notify(): Don't die on malformed bbversion/prev_hash/nbit/ntime.
Might have introduced a memory leak, don't have time to check. :(
Should the other hex2bin()'s be checked?
Thanks to Mick Ayzenberg <mick.dejavusecurity.com> for finding this.
| 0 |
static void pwq_unbound_release_workfn(struct work_struct *work)
{
struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
unbound_release_work);
struct workqueue_struct *wq = pwq->wq;
struct worker_pool *pool = pwq->pool;
bool is_last;
if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
return;
mutex_lock(&wq->mutex);
list_del_rcu(&pwq->pwqs_node);
is_last = list_empty(&wq->pwqs);
mutex_unlock(&wq->mutex);
mutex_lock(&wq_pool_mutex);
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
call_rcu_sched(&pwq->rcu, rcu_free_pwq);
/*
* If we're the last pwq going away, @wq is already dead and no one
* is gonna access it anymore. Schedule RCU free.
*/
if (is_last)
call_rcu_sched(&wq->rcu, rcu_free_wq);
}
|
Safe
|
[
"CWE-200"
] |
tip
|
dfb4357da6ddbdf57d583ba64361c9d792b0e0b1
|
1.723341150227521e+38
| 29 |
time: Remove CONFIG_TIMER_STATS
Currently CONFIG_TIMER_STATS exposes process information across namespaces:
kernel/time/timer_list.c print_timer():
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
/proc/timer_list:
#11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570
Given that the tracer can give the same information, this patch entirely
removes CONFIG_TIMER_STATS.
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: John Stultz <john.stultz@linaro.org>
Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: linux-doc@vger.kernel.org
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Xing Gao <xgao01@email.wm.edu>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Jessica Frazelle <me@jessfraz.com>
Cc: kernel-hardening@lists.openwall.com
Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Richard Cochran <richardcochran@gmail.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Michal Marek <mmarek@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Olof Johansson <olof@lixom.net>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-api@vger.kernel.org
Cc: Arjan van de Ven <arjan@linux.intel.com>
Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
| 0 |
int TS_REQ_add_ext(TS_REQ *a, X509_EXTENSION *ex, int loc)
{
return X509v3_add_ext(&a->extensions,ex,loc) != NULL;
}
|
Safe
|
[] |
openssl
|
c7235be6e36c4bef84594aa3b2f0561db84b63d8
|
1.2249591462589573e+38
| 4 |
RFC 3161 compliant time stamp request creation, response generation
and response verification.
Submitted by: Zoltan Glozik <zglozik@opentsa.org>
Reviewed by: Ulf Moeller
| 0 |
static struct sock *unix_find_other(struct net *net,
struct sockaddr_un *sunname, int len,
int type, unsigned hash, int *error)
{
struct sock *u;
struct path path;
int err = 0;
if (sunname->sun_path[0]) {
struct inode *inode;
err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
if (err)
goto fail;
inode = path.dentry->d_inode;
err = inode_permission(inode, MAY_WRITE);
if (err)
goto put_fail;
err = -ECONNREFUSED;
if (!S_ISSOCK(inode->i_mode))
goto put_fail;
u = unix_find_socket_byinode(inode);
if (!u)
goto put_fail;
if (u->sk_type == type)
touch_atime(path.mnt, path.dentry);
path_put(&path);
err = -EPROTOTYPE;
if (u->sk_type != type) {
sock_put(u);
goto fail;
}
} else {
err = -ECONNREFUSED;
u = unix_find_socket_byname(net, sunname, len, type, hash);
if (u) {
struct dentry *dentry;
dentry = unix_sk(u)->dentry;
if (dentry)
touch_atime(unix_sk(u)->mnt, dentry);
} else
goto fail;
}
return u;
put_fail:
path_put(&path);
fail:
*error = err;
return NULL;
}
|
Safe
|
[] |
linux-2.6
|
16e5726269611b71c930054ffe9b858c1cea88eb
|
2.8729369844376496e+38
| 54 |
af_unix: dont send SCM_CREDENTIALS by default
Since commit 7361c36c5224 (af_unix: Allow credentials to work across
user and pid namespaces) af_unix performance dropped a lot.
This is because we now take a reference on pid and cred in each write(),
and release them in read(), usually done from another process,
eventually from another cpu. This triggers false sharing.
# Events: 154K cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... .................. .........................
#
10.40% hackbench [kernel.kallsyms] [k] put_pid
8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg
7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg
6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock
4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb
4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns
4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred
2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm
2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count
1.75% hackbench [kernel.kallsyms] [k] fget_light
1.51% hackbench [kernel.kallsyms] [k]
__mutex_lock_interruptible_slowpath
1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb
This patch includes SCM_CREDENTIALS information in a af_unix message/skb
only if requested by the sender, [man 7 unix for details how to include
ancillary data using sendmsg() system call]
Note: This might break buggy applications that expected SCM_CREDENTIAL
from an unaware write() system call, and receiver not using SO_PASSCRED
socket option.
If SOCK_PASSCRED is set on source or destination socket, we still
include credentials for mere write() syscalls.
Performance boost in hackbench : more than 50% gain on a 16 thread
machine (2 quad-core cpus, 2 threads per core)
hackbench 20 thread 2000
4.228 sec instead of 9.102 sec
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
const struct btrfs_balance_args *bargs,
u64 allowed, const char *type)
{
if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
return true;
if (fs_info->sectorsize < PAGE_SIZE &&
bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) {
btrfs_err(fs_info,
"RAID56 is not yet supported for sectorsize %u with page size %lu",
fs_info->sectorsize, PAGE_SIZE);
return false;
}
/* Profile is valid and does not have bits outside of the allowed set */
if (alloc_profile_is_valid(bargs->target, 1) &&
(bargs->target & ~allowed) == 0)
return true;
btrfs_err(fs_info, "balance: invalid convert %s profile %s",
type, btrfs_bg_type_to_raid_name(bargs->target));
return false;
}
|
Safe
|
[
"CWE-476",
"CWE-703"
] |
linux
|
e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091
|
1.1628103481529111e+38
| 23 |
btrfs: fix NULL pointer dereference when deleting device by invalid id
[BUG]
It's easy to trigger NULL pointer dereference, just by removing a
non-existing device id:
# mkfs.btrfs -f -m single -d single /dev/test/scratch1 \
/dev/test/scratch2
# mount /dev/test/scratch1 /mnt/btrfs
# btrfs device remove 3 /mnt/btrfs
Then we have the following kernel NULL pointer dereference:
BUG: kernel NULL pointer dereference, address: 0000000000000000
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
PGD 0 P4D 0
Oops: 0000 [#1] PREEMPT SMP NOPTI
CPU: 9 PID: 649 Comm: btrfs Not tainted 5.14.0-rc3-custom+ #35
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_rm_device+0x4de/0x6b0 [btrfs]
btrfs_ioctl+0x18bb/0x3190 [btrfs]
? lock_is_held_type+0xa5/0x120
? find_held_lock.constprop.0+0x2b/0x80
? do_user_addr_fault+0x201/0x6a0
? lock_release+0xd2/0x2d0
? __x64_sys_ioctl+0x83/0xb0
__x64_sys_ioctl+0x83/0xb0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
[CAUSE]
Commit a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return
btrfs_device directly") moves the "missing" device path check into
btrfs_rm_device().
But btrfs_rm_device() itself can have case where it only receives
@devid, with NULL as @device_path.
In that case, calling strcmp() on NULL will trigger the NULL pointer
dereference.
Before that commit, we handle the "missing" case inside
btrfs_find_device_by_devspec(), which will not check @device_path at all
if @devid is provided, thus no way to trigger the bug.
[FIX]
Before calling strcmp(), also make sure @device_path is not NULL.
Fixes: a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly")
CC: stable@vger.kernel.org # 5.4+
Reported-by: butt3rflyh4ck <butterflyhuangxx@gmail.com>
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.