func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
rpc_C_VerifyRecoverInit (CK_X_FUNCTION_LIST *self,
p11_rpc_message *msg)
{
CK_SESSION_HANDLE session;
CK_MECHANISM mechanism;
CK_OBJECT_HANDLE key;
BEGIN_CALL (VerifyRecoverInit);
IN_ULONG (session);
IN_MECHANISM (mechanism);
IN_ULONG (key);
PROCESS_CALL ((self, session, &mechanism, key));
END_CALL;
}
|
Safe
|
[
"CWE-190"
] |
p11-kit
|
5307a1d21a50cacd06f471a873a018d23ba4b963
|
1.7545303753605985e+38
| 14 |
Check for arithmetic overflows before allocating
| 0 |
static int vgacon_font_set(struct vc_data *c, struct console_font *font,
unsigned int flags)
{
unsigned charcount = font->charcount;
int rc;
if (vga_video_type < VIDEO_TYPE_EGAM)
return -EINVAL;
if (font->width != VGA_FONTWIDTH ||
(charcount != 256 && charcount != 512))
return -EINVAL;
rc = vgacon_do_font_op(&vgastate, font->data, 1, charcount == 512);
if (rc)
return rc;
if (!(flags & KD_FONT_FLAG_DONT_RECALC))
rc = vgacon_adjust_height(c, font->height);
return rc;
}
|
Safe
|
[
"CWE-125"
] |
linux
|
973c096f6a85e5b5f2a295126ba6928d9a6afd45
|
3.316369352662856e+38
| 21 |
vgacon: remove software scrollback support
Yunhai Zhang recently fixed a VGA software scrollback bug in commit
ebfdfeeae8c0 ("vgacon: Fix for missing check in scrollback handling"),
but that then made people look more closely at some of this code, and
there were more problems on the vgacon side, but also the fbcon software
scrollback.
We don't really have anybody who maintains this code - probably because
nobody actually _uses_ it any more. Sure, people still use both VGA and
the framebuffer consoles, but they are no longer the main user
interfaces to the kernel, and haven't been for decades, so these kinds
of extra features end up bitrotting and not really being used.
So rather than try to maintain a likely unused set of code, I'll just
aggressively remove it, and see if anybody even notices. Maybe there
are people who haven't jumped on the whole GUI badnwagon yet, and think
it's just a fad. And maybe those people use the scrollback code.
If that turns out to be the case, we can resurrect this again, once
we've found the sucker^Wmaintainer for it who actually uses it.
Reported-by: NopNop Nop <nopitydays@gmail.com>
Tested-by: Willy Tarreau <w@1wt.eu>
Cc: 张云海 <zhangyunhai@nsfocus.com>
Acked-by: Andy Lutomirski <luto@amacapital.net>
Acked-by: Willy Tarreau <w@1wt.eu>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
**/
CImgList<T> get_split(const char axis, const int nb=-1) const {
CImgList<T> res;
if (is_empty()) return res;
const char _axis = cimg::lowercase(axis);
if (nb<0) { // Split by bloc size
const unsigned int dp = (unsigned int)(nb?-nb:1);
switch (_axis) {
case 'x': {
if (_width>dp) {
res.assign(_width/dp + (_width%dp?1:0),1,1);
const unsigned int pe = _width - dp;
cimg_pragma_openmp(parallel for cimg_openmp_if(res._width>=(cimg_openmp_sizefactor)*128 &&
_height*_depth*_spectrum>=128))
for (int p = 0; p<(int)pe; p+=dp)
get_crop(p,0,0,0,p + dp - 1,_height - 1,_depth - 1,_spectrum - 1).move_to(res[p/dp]);
get_crop((res._width - 1)*dp,0,0,0,_width - 1,_height - 1,_depth - 1,_spectrum - 1).move_to(res.back());
} else res.assign(*this);
} break;
case 'y': {
if (_height>dp) {
res.assign(_height/dp + (_height%dp?1:0),1,1);
const unsigned int pe = _height - dp;
cimg_pragma_openmp(parallel for cimg_openmp_if(res._width>=(cimg_openmp_sizefactor)*128 &&
_width*_depth*_spectrum>=128))
for (int p = 0; p<(int)pe; p+=dp)
get_crop(0,p,0,0,_width - 1,p + dp - 1,_depth - 1,_spectrum - 1).move_to(res[p/dp]);
get_crop(0,(res._width - 1)*dp,0,0,_width - 1,_height - 1,_depth - 1,_spectrum - 1).move_to(res.back());
} else res.assign(*this);
} break;
case 'z': {
if (_depth>dp) {
res.assign(_depth/dp + (_depth%dp?1:0),1,1);
const unsigned int pe = _depth - dp;
cimg_pragma_openmp(parallel for cimg_openmp_if(res._width>=(cimg_openmp_sizefactor)*128 &&
_width*_height*_spectrum>=128))
for (int p = 0; p<(int)pe; p+=dp)
get_crop(0,0,p,0,_width - 1,_height - 1,p + dp - 1,_spectrum - 1).move_to(res[p/dp]);
get_crop(0,0,(res._width - 1)*dp,0,_width - 1,_height - 1,_depth - 1,_spectrum - 1).move_to(res.back());
} else res.assign(*this);
} break;
case 'c' : {
if (_spectrum>dp) {
res.assign(_spectrum/dp + (_spectrum%dp?1:0),1,1);
const unsigned int pe = _spectrum - dp;
cimg_pragma_openmp(parallel for cimg_openmp_if(res._width>=(cimg_openmp_sizefactor)*128 &&
_width*_height*_depth>=128))
for (int p = 0; p<(int)pe; p+=dp)
get_crop(0,0,0,p,_width - 1,_height - 1,_depth - 1,p + dp - 1).move_to(res[p/dp]);
get_crop(0,0,0,(res._width - 1)*dp,_width - 1,_height - 1,_depth - 1,_spectrum - 1).move_to(res.back());
} else res.assign(*this);
}
}
} else if (nb>0) { // Split by number of (non-homogeneous) blocs
const unsigned int siz = _axis=='x'?_width:_axis=='y'?_height:_axis=='z'?_depth:_axis=='c'?_spectrum:0;
if ((unsigned int)nb>siz)
throw CImgArgumentException(_cimg_instance
"get_split(): Instance cannot be split along %c-axis into %u blocs.",
cimg_instance,
axis,nb);
if (nb==1) res.assign(*this);
else {
int err = (int)siz;
unsigned int _p = 0;
switch (_axis) {
case 'x' : {
cimg_forX(*this,p) if ((err-=nb)<=0) {
get_crop(_p,0,0,0,p,_height - 1,_depth - 1,_spectrum - 1).move_to(res);
err+=(int)siz;
_p = p + 1U;
}
} break;
case 'y' : {
cimg_forY(*this,p) if ((err-=nb)<=0) {
get_crop(0,_p,0,0,_width - 1,p,_depth - 1,_spectrum - 1).move_to(res);
err+=(int)siz;
_p = p + 1U;
}
} break;
case 'z' : {
cimg_forZ(*this,p) if ((err-=nb)<=0) {
get_crop(0,0,_p,0,_width - 1,_height - 1,p,_spectrum - 1).move_to(res);
err+=(int)siz;
_p = p + 1U;
}
} break;
case 'c' : {
cimg_forC(*this,p) if ((err-=nb)<=0) {
get_crop(0,0,0,_p,_width - 1,_height - 1,_depth - 1,p).move_to(res);
err+=(int)siz;
_p = p + 1U;
}
}
}
}
} else { // Split by egal values according to specified axis
T current = *_data;
switch (_axis) {
case 'x' : {
int i0 = 0;
cimg_forX(*this,i)
if ((*this)(i)!=current) { get_columns(i0,i - 1).move_to(res); i0 = i; current = (*this)(i); }
get_columns(i0,width() - 1).move_to(res);
} break;
case 'y' : {
int i0 = 0;
cimg_forY(*this,i)
if ((*this)(0,i)!=current) { get_rows(i0,i - 1).move_to(res); i0 = i; current = (*this)(0,i); }
get_rows(i0,height() - 1).move_to(res);
} break;
case 'z' : {
int i0 = 0;
cimg_forZ(*this,i)
if ((*this)(0,0,i)!=current) { get_slices(i0,i - 1).move_to(res); i0 = i; current = (*this)(0,0,i); }
get_slices(i0,depth() - 1).move_to(res);
} break;
case 'c' : {
int i0 = 0;
cimg_forC(*this,i)
if ((*this)(0,0,0,i)!=current) { get_channels(i0,i - 1).move_to(res); i0 = i; current = (*this)(0,0,0,i); }
get_channels(i0,spectrum() - 1).move_to(res);
} break;
default : {
longT i0 = 0;
cimg_foroff(*this,i)
if ((*this)[i]!=current) {
CImg<T>(_data + i0,1,(unsigned int)(i - i0)).move_to(res);
i0 = (longT)i; current = (*this)[i];
}
CImg<T>(_data + i0,1,(unsigned int)(size() - i0)).move_to(res);
}
}
}
return res;
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
|
1.2905004293017928e+38
| 135 |
.
| 0 |
void format_create_dest(TEXT_DEST_REC *dest,
void *server, const char *target,
int level, WINDOW_REC *window)
{
format_create_dest_tag(dest, server, NULL, target, level, window);
}
|
Safe
|
[
"CWE-476"
] |
irssi
|
6c6c42e3d1b49d90aacc0b67f8540471cae02a1d
|
2.5830069604907002e+38
| 6 |
Merge branch 'security' into 'master'
See merge request !7
| 0 |
WebSession(const QString& id)
: id(id)
{
updateTimestamp();
}
|
Safe
|
[
"CWE-20"
] |
qBittorrent
|
f5ad04766f4abaa78374ff03704316f8ce04627d
|
3.027066741251098e+38
| 5 |
[WebUI] Avoid clickjacking attacks
| 0 |
EXPORTED void mboxlist_open(const char *fname)
{
int ret, flags;
char *tofree = NULL;
if (!fname)
fname = config_getstring(IMAPOPT_MBOXLIST_DB_PATH);
/* create db file name */
if (!fname) {
tofree = strconcat(config_dir, FNAME_MBOXLIST, (char *)NULL);
fname = tofree;
}
flags = CYRUSDB_CREATE;
if (config_getswitch(IMAPOPT_IMPROVED_MBOXLIST_SORT)) {
flags |= CYRUSDB_MBOXSORT;
}
ret = cyrusdb_open(DB, fname, flags, &mbdb);
if (ret != 0) {
syslog(LOG_ERR, "DBERROR: opening %s: %s", fname,
cyrusdb_strerror(ret));
/* Exiting TEMPFAIL because Sendmail thinks this
EC_OSFILE == permanent failure. */
fatal("can't read mailboxes file", EC_TEMPFAIL);
}
free(tofree);
mboxlist_dbopen = 1;
}
|
Safe
|
[
"CWE-20"
] |
cyrus-imapd
|
6bd33275368edfa71ae117de895488584678ac79
|
2.7876856472538644e+38
| 32 |
mboxlist: fix uninitialised memory use where pattern is "Other Users"
| 0 |
int udp6_output(struct socket *so, struct mbuf *m, struct sockaddr_in6 *saddr,
struct sockaddr_in6 *daddr)
{
Slirp *slirp = m->slirp;
M_DUP_DEBUG(slirp, m, 0, sizeof(struct ip6) + sizeof(struct udphdr));
struct ip6 *ip;
struct udphdr *uh;
DEBUG_CALL("udp6_output");
DEBUG_ARG("so = %p", so);
DEBUG_ARG("m = %p", m);
/* adjust for header */
m->m_data -= sizeof(struct udphdr);
m->m_len += sizeof(struct udphdr);
uh = mtod(m, struct udphdr *);
m->m_data -= sizeof(struct ip6);
m->m_len += sizeof(struct ip6);
ip = mtod(m, struct ip6 *);
/* Build IP header */
ip->ip_pl = htons(m->m_len - sizeof(struct ip6));
ip->ip_nh = IPPROTO_UDP;
ip->ip_src = saddr->sin6_addr;
ip->ip_dst = daddr->sin6_addr;
/* Build UDP header */
uh->uh_sport = saddr->sin6_port;
uh->uh_dport = daddr->sin6_port;
uh->uh_ulen = ip->ip_pl;
uh->uh_sum = 0;
uh->uh_sum = ip6_cksum(m);
if (uh->uh_sum == 0) {
uh->uh_sum = 0xffff;
}
return ip6_output(so, m, 0);
}
|
Safe
|
[] |
libslirp
|
de71c15de66ba9350bf62c45b05f8fbff166517b
|
1.7100910702157063e+38
| 39 |
upd6: check udp6_input buffer size
Fixes: CVE-2021-3593
Fixes: https://gitlab.freedesktop.org/slirp/libslirp/-/issues/45
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
| 0 |
static bool hid_match_one_id(struct hid_device *hdev,
const struct hid_device_id *id)
{
return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
(id->group == HID_GROUP_ANY || id->group == hdev->group) &&
(id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
(id->product == HID_ANY_ID || id->product == hdev->product);
}
|
Safe
|
[
"CWE-125"
] |
linux
|
50220dead1650609206efe91f0cc116132d59b3f
|
7.742104186137572e+37
| 8 |
HID: core: prevent out-of-bound readings
Plugging a Logitech DJ receiver with KASAN activated raises a bunch of
out-of-bound readings.
The fields are allocated up to MAX_USAGE, meaning that potentially, we do
not have enough fields to fit the incoming values.
Add checks and silence KASAN.
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
| 0 |
ofputil_encode_table_desc_request(enum ofp_version ofp_version)
{
struct ofpbuf *request = NULL;
if (ofp_version >= OFP14_VERSION) {
request = ofpraw_alloc(OFPRAW_OFPST14_TABLE_DESC_REQUEST,
ofp_version, 0);
} else {
ovs_fatal(0, "dump-table-desc needs OpenFlow 1.4 or later "
"(\'-O OpenFlow14\')");
}
return request;
}
|
Safe
|
[
"CWE-772"
] |
ovs
|
77ad4225d125030420d897c873e4734ac708c66b
|
1.7314539122657195e+38
| 14 |
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <bshastry@sec.t-labs.tu-berlin.de>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Justin Pettit <jpettit@ovn.org>
| 0 |
static int verify_userspi_info(struct xfrm_userspi_info *p)
{
switch (p->info.id.proto) {
case IPPROTO_AH:
case IPPROTO_ESP:
break;
case IPPROTO_COMP:
/* IPCOMP spi is 16-bits. */
if (p->max >= 0x10000)
return -EINVAL;
break;
default:
return -EINVAL;
}
if (p->min > p->max)
return -EINVAL;
return 0;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
1f86840f897717f86d523a13e99a447e6a5d2fa5
|
8.397115031662567e+37
| 22 |
xfrm_user: fix info leak in copy_to_user_tmpl()
The memory used for the template copy is a local stack variable. As
struct xfrm_user_tmpl contains multiple holes added by the compiler for
alignment, not initializing the memory will lead to leaking stack bytes
to userland. Add an explicit memset(0) to avoid the info leak.
Initial version of the patch by Brad Spengler.
Cc: Brad Spengler <spender@grsecurity.net>
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int dnxhd_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
DNXHDContext *ctx = avctx->priv_data;
ThreadFrame frame = { .f = data };
AVFrame *picture = data;
int first_field = 1;
int ret, i;
ff_dlog(avctx, "frame size %d\n", buf_size);
for (i = 0; i < avctx->thread_count; i++)
ctx->rows[i].format = -1;
decode_coding_unit:
if ((ret = dnxhd_decode_header(ctx, picture, buf, buf_size, first_field)) < 0)
return ret;
if ((avctx->width || avctx->height) &&
(ctx->width != avctx->width || ctx->height != avctx->height)) {
av_log(avctx, AV_LOG_WARNING, "frame size changed: %dx%d -> %ux%u\n",
avctx->width, avctx->height, ctx->width, ctx->height);
first_field = 1;
}
if (avctx->pix_fmt != AV_PIX_FMT_NONE && avctx->pix_fmt != ctx->pix_fmt) {
av_log(avctx, AV_LOG_WARNING, "pix_fmt changed: %s -> %s\n",
av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(ctx->pix_fmt));
first_field = 1;
}
avctx->pix_fmt = ctx->pix_fmt;
ret = ff_set_dimensions(avctx, ctx->width, ctx->height);
if (ret < 0)
return ret;
if (first_field) {
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
return ret;
picture->pict_type = AV_PICTURE_TYPE_I;
picture->key_frame = 1;
}
ctx->buf_size = buf_size - ctx->data_offset;
ctx->buf = buf + ctx->data_offset;
avctx->execute2(avctx, dnxhd_decode_row, picture, NULL, ctx->mb_height);
if (first_field && picture->interlaced_frame) {
buf += ctx->cid_table->coding_unit_size;
buf_size -= ctx->cid_table->coding_unit_size;
first_field = 0;
goto decode_coding_unit;
}
ret = 0;
for (i = 0; i < avctx->thread_count; i++) {
ret += ctx->rows[i].errors;
ctx->rows[i].errors = 0;
}
if (ctx->act) {
static int act_warned;
int format = ctx->rows[0].format;
for (i = 1; i < avctx->thread_count; i++) {
if (ctx->rows[i].format != format &&
ctx->rows[i].format != -1 /* not run */) {
format = 2;
break;
}
}
switch (format) {
case -1:
case 2:
if (!act_warned) {
act_warned = 1;
av_log(ctx->avctx, AV_LOG_ERROR,
"Unsupported: variable ACT flag.\n");
}
break;
case 0:
ctx->pix_fmt = ctx->bit_depth==10
? AV_PIX_FMT_GBRP10 : AV_PIX_FMT_GBRP12;
break;
case 1:
ctx->pix_fmt = ctx->bit_depth==10
? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV444P12;
break;
}
}
avctx->pix_fmt = ctx->pix_fmt;
if (ret) {
av_log(ctx->avctx, AV_LOG_ERROR, "%d lines with errors\n", ret);
return AVERROR_INVALIDDATA;
}
*got_frame = 1;
return avpkt->size;
}
|
Safe
|
[
"CWE-125",
"CWE-252"
] |
FFmpeg
|
7150f9575671f898382c370acae35f9087a30ba1
|
1.3659680876880487e+38
| 99 |
avcodec/dnxhddec: check and propagate function return value
Similar to CVE-2013-0868, here return value check for 'init_vlc' is needed.
crafted DNxHD data can cause unspecified impact.
Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
| 0 |
struct fuse_context *fuse_get_context(void)
{
return &fuse_get_context_internal()->ctx;
}
|
Safe
|
[] |
ntfs-3g
|
fb28eef6f1c26170566187c1ab7dc913a13ea43c
|
2.4338727891375796e+38
| 4 |
Hardened the checking of directory offset requested by a readdir
When asked for the next directory entries, make sure the chunk offset
is within valid values, otherwise return no more entries in chunk.
| 0 |
asmlinkage long sys_setuid(uid_t uid)
{
int old_euid = current->euid;
int old_ruid, old_suid, new_suid;
int retval;
retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
if (retval)
return retval;
old_ruid = current->uid;
old_suid = current->suid;
new_suid = old_suid;
if (capable(CAP_SETUID)) {
if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
return -EAGAIN;
new_suid = uid;
} else if ((uid != current->uid) && (uid != new_suid))
return -EPERM;
if (old_euid != uid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
current->fsuid = current->euid = uid;
current->suid = new_suid;
key_fsuid_changed(current);
proc_id_connector(current, PROC_EVENT_UID);
return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
}
|
Safe
|
[
"CWE-20"
] |
linux-2.6
|
9926e4c74300c4b31dee007298c6475d33369df0
|
6.1150799898324865e+37
| 33 |
CPU time limit patch / setrlimit(RLIMIT_CPU, 0) cheat fix
As discovered here today, the change in Kernel 2.6.17 intended to inhibit
users from setting RLIMIT_CPU to 0 (as that is equivalent to unlimited) by
"cheating" and setting it to 1 in such a case, does not make a difference,
as the check is done in the wrong place (too late), and only applies to the
profiling code.
On all systems I checked running kernels above 2.6.17, no matter what the
hard and soft CPU time limits were before, a user could escape them by
issuing in the shell (sh/bash/zsh) "ulimit -t 0", and then the user's
process was not ever killed.
Attached is a trivial patch to fix that. Simply moving the check to a
slightly earlier location (specifically, before the line that actually
assigns the limit - *old_rlim = new_rlim), does the trick.
Do note that at least the zsh (but not ash, dash, or bash) shell has the
problem of "caching" the limits set by the ulimit command, so when running
zsh the fix will not immediately be evident - after entering "ulimit -t 0",
"ulimit -a" will show "-t: cpu time (seconds) 0", even though the actual
limit as returned by getrlimit(...) will be 1. It can be verified by
opening a subshell (which will not have the values of the parent shell in
cache) and checking in it, or just by running a CPU intensive command like
"echo '65536^1048576' | bc" and verifying that it dumps core after one
second.
Regardless of whether that is a misfeature in the shell, perhaps it would
be better to return -EINVAL from setrlimit in such a case instead of
cheating and setting to 1, as that does not really reflect the actual state
of the process anymore. I do not however know what the ground for that
decision was in the original 2.6.17 change, and whether there would be any
"backward" compatibility issues, so I preferred not to touch that right
now.
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
TEST_P(ProtocolIntegrationTest, ReqRespSizeStats) {
config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {
RELEASE_ASSERT(bootstrap.mutable_static_resources()->clusters_size() == 1, "");
auto& cluster = *bootstrap.mutable_static_resources()->mutable_clusters(0);
cluster.mutable_track_cluster_stats()->set_request_response_sizes(true);
});
initialize();
codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http"))));
Http::TestRequestHeaderMapImpl request_headers{
{":method", "GET"}, {":path", "/found"}, {":scheme", "http"}, {":authority", "foo.com"}};
Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}};
auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 0, 0,
TestUtility::DefaultTimeout);
EXPECT_TRUE(upstream_request_->complete());
ASSERT_TRUE(response->complete());
EXPECT_EQ("200", response->headers().getStatusValue());
test_server_->waitUntilHistogramHasSamples("cluster.cluster_0.upstream_rq_headers_size");
test_server_->waitUntilHistogramHasSamples("cluster.cluster_0.upstream_rs_headers_size");
}
|
Safe
|
[
"CWE-416"
] |
envoy
|
148de954ed3585d8b4298b424aa24916d0de6136
|
2.4874662156407817e+38
| 21 |
CVE-2021-43825
Response filter manager crash
Signed-off-by: Yan Avlasov <yavlasov@google.com>
| 0 |
REDIS_STATIC quicklistNode *_quicklistSplitNode(quicklistNode *node, int offset,
int after) {
size_t zl_sz = node->sz;
quicklistNode *new_node = quicklistCreateNode();
new_node->zl = zmalloc(zl_sz);
/* Copy original ziplist so we can split it */
memcpy(new_node->zl, node->zl, zl_sz);
/* -1 here means "continue deleting until the list ends" */
int orig_start = after ? offset + 1 : 0;
int orig_extent = after ? -1 : offset;
int new_start = after ? 0 : offset;
int new_extent = after ? offset + 1 : -1;
D("After %d (%d); ranges: [%d, %d], [%d, %d]", after, offset, orig_start,
orig_extent, new_start, new_extent);
node->zl = ziplistDeleteRange(node->zl, orig_start, orig_extent);
node->count = ziplistLen(node->zl);
quicklistNodeUpdateSz(node);
new_node->zl = ziplistDeleteRange(new_node->zl, new_start, new_extent);
new_node->count = ziplistLen(new_node->zl);
quicklistNodeUpdateSz(new_node);
D("After split lengths: orig (%d), new (%d)", node->count, new_node->count);
return new_node;
}
|
Safe
|
[
"CWE-190"
] |
redis
|
f6a40570fa63d5afdd596c78083d754081d80ae3
|
2.7919878959244408e+38
| 30 |
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error.
| 0 |
MagickExport void XHighlightRectangle(Display *display,Window window,
GC annotate_context,const RectangleInfo *highlight_info)
{
assert(display != (Display *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(window != (Window) NULL);
assert(annotate_context != (GC) NULL);
assert(highlight_info != (RectangleInfo *) NULL);
if ((highlight_info->width < 4) || (highlight_info->height < 4))
return;
(void) XDrawRectangle(display,window,annotate_context,(int) highlight_info->x,
(int) highlight_info->y,(unsigned int) highlight_info->width-1,
(unsigned int) highlight_info->height-1);
(void) XDrawRectangle(display,window,annotate_context,(int) highlight_info->x+
1,(int) highlight_info->y+1,(unsigned int) highlight_info->width-3,
(unsigned int) highlight_info->height-3);
}
|
Safe
|
[
"CWE-401"
] |
ImageMagick6
|
13801f5d0bd7a6fdb119682d34946636afdb2629
|
1.7289964342311366e+38
| 17 |
https://github.com/ImageMagick/ImageMagick/issues/1531
| 0 |
ruby_brace_glob_with_enc(const char *str, int flags, ruby_glob_func *func, VALUE arg, rb_encoding *enc)
{
return ruby_brace_glob0(str, flags & ~GLOB_VERBOSE, func, arg, enc);
}
|
Safe
|
[
"CWE-22"
] |
ruby
|
143eb22f1877815dd802f7928959c5f93d4c7bb3
|
1.4982696067395802e+38
| 4 |
merge revision(s) 62989:
dir.c: check NUL bytes
* dir.c (GlobPathValue): should be used in rb_push_glob only.
other methods should use FilePathValue.
https://hackerone.com/reports/302338
* dir.c (rb_push_glob): expand GlobPathValue
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_2_2@63015 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
| 0 |
static void cil_reset_user(struct cil_user *user)
{
/* reset the bounds to NULL during a re-resolve */
user->bounds = NULL;
user->dftlevel = NULL;
user->range = NULL;
}
|
Safe
|
[
"CWE-416"
] |
selinux
|
f34d3d30c8325e4847a6b696fe7a3936a8a361f3
|
1.1789426256371007e+38
| 7 |
libsepol/cil: Destroy classperms list when resetting classpermission
Nicolas Iooss reports:
A few months ago, OSS-Fuzz found a crash in the CIL compiler, which
got reported as
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=28648 (the title
is misleading, or is caused by another issue that conflicts with the
one I report in this message). Here is a minimized CIL policy which
reproduces the issue:
(class CLASS (PERM))
(classorder (CLASS))
(sid SID)
(sidorder (SID))
(user USER)
(role ROLE)
(type TYPE)
(category CAT)
(categoryorder (CAT))
(sensitivity SENS)
(sensitivityorder (SENS))
(sensitivitycategory SENS (CAT))
(allow TYPE self (CLASS (PERM)))
(roletype ROLE TYPE)
(userrole USER ROLE)
(userlevel USER (SENS))
(userrange USER ((SENS)(SENS (CAT))))
(sidcontext SID (USER ROLE TYPE ((SENS)(SENS))))
(classpermission CLAPERM)
(optional OPT
(roletype nonexistingrole nonexistingtype)
(classpermissionset CLAPERM (CLASS (PERM)))
)
The CIL policy fuzzer (which mimics secilc built with clang Address
Sanitizer) reports:
==36541==ERROR: AddressSanitizer: heap-use-after-free on address
0x603000004f98 at pc 0x56445134c842 bp 0x7ffe2a256590 sp
0x7ffe2a256588
READ of size 8 at 0x603000004f98 thread T0
#0 0x56445134c841 in __cil_verify_classperms
/selinux/libsepol/src/../cil/src/cil_verify.c:1620:8
#1 0x56445134a43e in __cil_verify_classpermission
/selinux/libsepol/src/../cil/src/cil_verify.c:1650:9
#2 0x56445134a43e in __cil_pre_verify_helper
/selinux/libsepol/src/../cil/src/cil_verify.c:1715:8
#3 0x5644513225ac in cil_tree_walk_core
/selinux/libsepol/src/../cil/src/cil_tree.c:272:9
#4 0x564451322ab1 in cil_tree_walk
/selinux/libsepol/src/../cil/src/cil_tree.c:316:7
#5 0x5644513226af in cil_tree_walk_core
/selinux/libsepol/src/../cil/src/cil_tree.c:284:9
#6 0x564451322ab1 in cil_tree_walk
/selinux/libsepol/src/../cil/src/cil_tree.c:316:7
#7 0x5644512b88fd in cil_pre_verify
/selinux/libsepol/src/../cil/src/cil_post.c:2510:7
#8 0x5644512b88fd in cil_post_process
/selinux/libsepol/src/../cil/src/cil_post.c:2524:7
#9 0x5644511856ff in cil_compile
/selinux/libsepol/src/../cil/src/cil.c:564:7
The classperms list of a classpermission rule is created and filled
in when classpermissionset rules are processed, so it doesn't own any
part of the list and shouldn't retain any of it when it is reset.
Destroy the classperms list (without destroying the data in it) when
resetting a classpermission rule.
Reported-by: Nicolas Iooss <nicolas.iooss@m4x.org>
Signed-off-by: James Carter <jwcart2@gmail.com>
| 0 |
compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
unsigned int *size,
struct xt_table_info *newinfo, unsigned char *base)
{
struct xt_entry_target *t;
struct ip6t_entry *de;
unsigned int origsize;
int h;
struct xt_entry_match *ematch;
origsize = *size;
de = *dstptr;
memcpy(de, e, sizeof(struct ip6t_entry));
memcpy(&de->counters, &e->counters, sizeof(e->counters));
*dstptr += sizeof(struct ip6t_entry);
*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
xt_ematch_foreach(ematch, e)
xt_compat_match_from_user(ematch, dstptr, size);
de->target_offset = e->target_offset - (origsize - *size);
t = compat_ip6t_get_target(e);
xt_compat_target_from_user(t, dstptr, size);
de->next_offset = e->next_offset - (origsize - *size);
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if ((unsigned char *)de - base < newinfo->hook_entry[h])
newinfo->hook_entry[h] -= origsize - *size;
if ((unsigned char *)de - base < newinfo->underflow[h])
newinfo->underflow[h] -= origsize - *size;
}
}
|
Safe
|
[
"CWE-476"
] |
linux
|
57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
|
8.335814990378964e+37
| 33 |
netfilter: add back stackpointer size checks
The rationale for removing the check is only correct for rulesets
generated by ip(6)tables.
In iptables, a jump can only occur to a user-defined chain, i.e.
because we size the stack based on number of user-defined chains we
cannot exceed stack size.
However, the underlying binary format has no such restriction,
and the validation step only ensures that the jump target is a
valid rule start point.
IOW, its possible to build a rule blob that has no user-defined
chains but does contain a jump.
If this happens, no jump stack gets allocated and crash occurs
because no jumpstack was allocated.
Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset")
Reported-by: syzbot+e783f671527912cd9403@syzkaller.appspotmail.com
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
| 0 |
Expression_Obj Parser::parse_relation()
{
NESTING_GUARD(nestings);
advanceToNextToken();
ParserState state(pstate);
// parse the left hand side expression
Expression_Obj lhs = parse_expression();
std::vector<Expression_Obj> operands;
std::vector<Operand> operators;
// if it's a singleton, return it (don't wrap it)
while (peek< alternatives <
kwd_eq,
kwd_neq,
kwd_gte,
kwd_gt,
kwd_lte,
kwd_lt
> >(position))
{
// is directly adjancent to expression?
bool left_ws = peek < css_comments >() != NULL;
// parse the operator
enum Sass_OP op
= lex<kwd_eq>() ? Sass_OP::EQ
: lex<kwd_neq>() ? Sass_OP::NEQ
: lex<kwd_gte>() ? Sass_OP::GTE
: lex<kwd_lte>() ? Sass_OP::LTE
: lex<kwd_gt>() ? Sass_OP::GT
: lex<kwd_lt>() ? Sass_OP::LT
// we checked the possibilities on top of fn
: Sass_OP::EQ;
// is directly adjacent to expression?
bool right_ws = peek < css_comments >() != NULL;
operators.push_back({ op, left_ws, right_ws });
operands.push_back(parse_expression());
}
// we are called recursively for list, so we first
// fold inner binary expression which has delayed
// correctly set to zero. After folding we also unwrap
// single nested items. So we cannot set delay on the
// returned result here, as we have lost nestings ...
Expression_Obj ex = fold_operands(lhs, operands, operators);
state.offset = pstate - state + pstate.offset;
ex->pstate(state);
return ex;
}
|
Safe
|
[
"CWE-125"
] |
libsass
|
eb15533b07773c30dc03c9d742865604f47120ef
|
3.290605407266615e+38
| 46 |
Fix memory leak in `parse_ie_keyword_arg`
`kwd_arg` would never get freed when there was a parse error in
`parse_ie_keyword_arg`.
Closes #2656
| 0 |
static void n_tty_check_throttle(struct tty_struct *tty)
{
if (tty->driver->type == TTY_DRIVER_TYPE_PTY)
return;
/*
* Check the remaining room for the input canonicalization
* mode. We don't want to throttle the driver if we're in
* canonical mode and don't have a newline yet!
*/
while (1) {
int throttled;
tty_set_flow_change(tty, TTY_THROTTLE_SAFE);
if (receive_room(tty) >= TTY_THRESHOLD_THROTTLE)
break;
throttled = tty_throttle_safe(tty);
if (!throttled)
break;
}
__tty_set_flow_change(tty, 0);
}
|
Safe
|
[
"CWE-362"
] |
tty
|
4291086b1f081b869c6d79e5b7441633dc3ace00
|
2.7043982203603533e+37
| 20 |
n_tty: Fix n_tty_write crash when echoing in raw mode
The tty atomic_write_lock does not provide an exclusion guarantee for
the tty driver if the termios settings are LECHO & !OPOST. And since
it is unexpected and not allowed to call TTY buffer helpers like
tty_insert_flip_string concurrently, this may lead to crashes when
concurrect writers call pty_write. In that case the following two
writers:
* the ECHOing from a workqueue and
* pty_write from the process
race and can overflow the corresponding TTY buffer like follows.
If we look into tty_insert_flip_string_fixed_flag, there is:
int space = __tty_buffer_request_room(port, goal, flags);
struct tty_buffer *tb = port->buf.tail;
...
memcpy(char_buf_ptr(tb, tb->used), chars, space);
...
tb->used += space;
so the race of the two can result in something like this:
A B
__tty_buffer_request_room
__tty_buffer_request_room
memcpy(buf(tb->used), ...)
tb->used += space;
memcpy(buf(tb->used), ...) ->BOOM
B's memcpy is past the tty_buffer due to the previous A's tb->used
increment.
Since the N_TTY line discipline input processing can output
concurrently with a tty write, obtain the N_TTY ldisc output_lock to
serialize echo output with normal tty writes. This ensures the tty
buffer helper tty_insert_flip_string is not called concurrently and
everything is fine.
Note that this is nicely reproducible by an ordinary user using
forkpty and some setup around that (raw termios + ECHO). And it is
present in kernels at least after commit
d945cb9cce20ac7143c2de8d88b187f62db99bdc (pty: Rework the pty layer to
use the normal buffering logic) in 2.6.31-rc3.
js: add more info to the commit log
js: switch to bool
js: lock unconditionally
js: lock only the tty->ops->write call
References: CVE-2014-0196
Reported-and-tested-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Peter Hurley <peter@hurleysoftware.com>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
virtual void resizeEvent(QResizeEvent * e) {
QMessageBox::resizeEvent(e);
setMaximumSize(QWIDGETSIZE_MAX, QWIDGETSIZE_MAX);
if (QWidget *textEdit = findChild<QTextEdit *>()) {
textEdit->setMaximumHeight(QWIDGETSIZE_MAX);
}
}
|
Safe
|
[
"CWE-22"
] |
Sigil
|
0979ba8d10c96ebca330715bfd4494ea0e019a8f
|
3.03166463271026e+38
| 7 |
harden plugin unzipping to zip-slip attacks
| 0 |
R_API ut64 r_bin_java_float_cp_calc_size(RBinJavaCPTypeObj *obj) {
ut64 size = 0;
// tag
size += 1;
// obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1);
size += 4;
return size;
}
|
Safe
|
[
"CWE-119",
"CWE-788"
] |
radare2
|
6c4428f018d385fc80a33ecddcb37becea685dd5
|
7.806946846567369e+37
| 8 |
Improve boundary checks to fix oobread segfaults ##crash
* Reported by Cen Zhang via huntr.dev
* Reproducer: bins/fuzzed/javaoob-havoc.class
| 0 |
Method* LinkResolver::lookup_method_in_interfaces(const LinkInfo& cp_info) {
InstanceKlass *ik = InstanceKlass::cast(cp_info.resolved_klass());
// Specify 'true' in order to skip default methods when searching the
// interfaces. Function lookup_method_in_klasses() already looked for
// the method in the default methods table.
return ik->lookup_method_in_all_interfaces(cp_info.name(), cp_info.signature(), Klass::skip_defaults);
}
|
Safe
|
[] |
jdk11u
|
132745902a4601dc64b2c8ca112ca30292feccb4
|
3.998743720487592e+37
| 8 |
8281866: Enhance MethodHandle invocations
Reviewed-by: mbaesken
Backport-of: d974d9da365f787f67971d88c79371c8b0769f75
| 0 |
_hb_ot_layout_get_glyph_property (hb_face_t *face,
hb_codepoint_t glyph)
{
hb_ot_layout_class_t klass;
const GDEF &gdef = _get_gdef (face);
klass = gdef.get_glyph_class (glyph);
if (!klass && glyph < face->ot_layout.new_gdef.len)
klass = face->ot_layout.new_gdef.klasses[glyph];
switch (klass) {
default:
case GDEF::UnclassifiedGlyph: return HB_OT_LAYOUT_GLYPH_CLASS_UNCLASSIFIED;
case GDEF::BaseGlyph: return HB_OT_LAYOUT_GLYPH_CLASS_BASE_GLYPH;
case GDEF::LigatureGlyph: return HB_OT_LAYOUT_GLYPH_CLASS_LIGATURE;
case GDEF::ComponentGlyph: return HB_OT_LAYOUT_GLYPH_CLASS_COMPONENT;
case GDEF::MarkGlyph:
klass = gdef.get_mark_attachment_type (glyph);
return HB_OT_LAYOUT_GLYPH_CLASS_MARK + (klass << 8);
}
}
|
Safe
|
[
"CWE-119"
] |
pango
|
797d46714d27f147277fdd5346648d838c68fb8c
|
2.8916604226094086e+38
| 22 |
[HB/GDEF] Fix bug in building synthetic GDEF table
| 0 |
void rtnl_af_register(struct rtnl_af_ops *ops)
{
rtnl_lock();
list_add_tail_rcu(&ops->list, &rtnl_af_ops);
rtnl_unlock();
}
|
Safe
|
[
"CWE-476"
] |
linux
|
f428fe4a04cc339166c8bbd489789760de3a0cee
|
2.821335832547825e+37
| 6 |
rtnetlink: give a user socket to get_target_net()
This function is used from two places: rtnl_dump_ifinfo and
rtnl_getlink. In rtnl_getlink(), we give a request skb into
get_target_net(), but in rtnl_dump_ifinfo, we give a response skb
into get_target_net().
The problem here is that NETLINK_CB() isn't initialized for the response
skb. In both cases we can get a user socket and give it instead of skb
into get_target_net().
This bug was found by syzkaller with this call-trace:
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 1 PID: 3149 Comm: syzkaller140561 Not tainted 4.15.0-rc4-mm1+ #47
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
Google 01/01/2011
RIP: 0010:__netlink_ns_capable+0x8b/0x120 net/netlink/af_netlink.c:868
RSP: 0018:ffff8801c880f348 EFLAGS: 00010206
RAX: dffffc0000000000 RBX: 0000000000000000 RCX: ffffffff8443f900
RDX: 000000000000007b RSI: ffffffff86510f40 RDI: 00000000000003d8
RBP: ffff8801c880f360 R08: 0000000000000000 R09: 1ffff10039101e4f
R10: 0000000000000000 R11: 0000000000000001 R12: ffffffff86510f40
R13: 000000000000000c R14: 0000000000000004 R15: 0000000000000011
FS: 0000000001a1a880(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020151000 CR3: 00000001c9511005 CR4: 00000000001606e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
netlink_ns_capable+0x26/0x30 net/netlink/af_netlink.c:886
get_target_net+0x9d/0x120 net/core/rtnetlink.c:1765
rtnl_dump_ifinfo+0x2e5/0xee0 net/core/rtnetlink.c:1806
netlink_dump+0x48c/0xce0 net/netlink/af_netlink.c:2222
__netlink_dump_start+0x4f0/0x6d0 net/netlink/af_netlink.c:2319
netlink_dump_start include/linux/netlink.h:214 [inline]
rtnetlink_rcv_msg+0x7f0/0xb10 net/core/rtnetlink.c:4485
netlink_rcv_skb+0x21e/0x460 net/netlink/af_netlink.c:2441
rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:4540
netlink_unicast_kernel net/netlink/af_netlink.c:1308 [inline]
netlink_unicast+0x4be/0x6a0 net/netlink/af_netlink.c:1334
netlink_sendmsg+0xa4a/0xe60 net/netlink/af_netlink.c:1897
Cc: Jiri Benc <jbenc@redhat.com>
Fixes: 79e1ad148c84 ("rtnetlink: use netnsid to query interface")
Signed-off-by: Andrei Vagin <avagin@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
int dhcp6_option_append_ia(uint8_t **buf, size_t *buflen, const DHCP6IA *ia) {
uint16_t len;
uint8_t *ia_hdr;
size_t iaid_offset, ia_buflen, ia_addrlen = 0;
DHCP6Address *addr;
int r;
assert_return(buf, -EINVAL);
assert_return(*buf, -EINVAL);
assert_return(buflen, -EINVAL);
assert_return(ia, -EINVAL);
switch (ia->type) {
case SD_DHCP6_OPTION_IA_NA:
len = DHCP6_OPTION_IA_NA_LEN;
iaid_offset = offsetof(DHCP6IA, ia_na);
break;
case SD_DHCP6_OPTION_IA_TA:
len = DHCP6_OPTION_IA_TA_LEN;
iaid_offset = offsetof(DHCP6IA, ia_ta);
break;
default:
return -EINVAL;
}
if (*buflen < offsetof(DHCP6Option, data) + len)
return -ENOBUFS;
ia_hdr = *buf;
ia_buflen = *buflen;
*buf += sizeof(DHCP6Option);
*buflen -= sizeof(DHCP6Option);
memcpy(*buf, (char*) ia + iaid_offset, len);
*buf += len;
*buflen -= len;
LIST_FOREACH(addresses, addr, ia->addresses) {
r = option_append_hdr(buf, buflen, SD_DHCP6_OPTION_IAADDR,
sizeof(addr->iaaddr));
if (r < 0)
return r;
memcpy(*buf, &addr->iaaddr, sizeof(addr->iaaddr));
*buf += sizeof(addr->iaaddr);
*buflen -= sizeof(addr->iaaddr);
ia_addrlen += sizeof(DHCP6Option) + sizeof(addr->iaaddr);
}
r = option_append_hdr(&ia_hdr, &ia_buflen, ia->type, len + ia_addrlen);
if (r < 0)
return r;
return 0;
}
|
Safe
|
[
"CWE-120"
] |
systemd
|
4dac5eaba4e419b29c97da38a8b1f82336c2c892
|
3.331487640671103e+38
| 61 |
dhcp6: make sure we have enough space for the DHCP6 option header
Fixes a vulnerability originally discovered by Felix Wilhelm from
Google.
CVE-2018-15688
LP: #1795921
https://bugzilla.redhat.com/show_bug.cgi?id=1639067
| 0 |
static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
{
/* FIXME:
* The LWT state is currently rebuilt for delete requests which
* results in a new bpf_prog instance. Comparing names for now.
*/
if (!a->name && !b->name)
return 0;
if (!a->name || !b->name)
return 1;
return strcmp(a->name, b->name);
}
|
Safe
|
[] |
net
|
6c8991f41546c3c472503dff1ea9daaddf9331c2
|
2.120063552675384e+38
| 14 |
net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <xmu@redhat.com>
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
void SSL::set_sessionID(const opaque* sessionID)
{
memcpy(secure_.use_connection().sessionID_, sessionID, ID_LEN);
secure_.use_connection().sessionID_Set_ = true;
}
|
Safe
|
[
"CWE-254"
] |
mysql-server
|
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
|
2.9541443746178032e+38
| 5 |
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
| 0 |
static PHP_FUNCTION(libxml_use_internal_errors)
{
xmlStructuredErrorFunc current_handler;
zend_bool use_errors=0, retval;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|b", &use_errors) == FAILURE) {
return;
}
current_handler = xmlStructuredError;
if (current_handler && current_handler == php_libxml_structured_error_handler) {
retval = 1;
} else {
retval = 0;
}
if (ZEND_NUM_ARGS() == 0) {
RETURN_BOOL(retval);
}
if (use_errors == 0) {
xmlSetStructuredErrorFunc(NULL, NULL);
if (LIBXML(error_list)) {
zend_llist_destroy(LIBXML(error_list));
efree(LIBXML(error_list));
LIBXML(error_list) = NULL;
}
} else {
xmlSetStructuredErrorFunc(NULL, php_libxml_structured_error_handler);
if (LIBXML(error_list) == NULL) {
LIBXML(error_list) = (zend_llist *) emalloc(sizeof(zend_llist));
zend_llist_init(LIBXML(error_list), sizeof(xmlError), (llist_dtor_func_t) _php_libxml_free_error, 0);
}
}
RETURN_BOOL(retval);
}
|
Safe
|
[
"CWE-200"
] |
php-src
|
8e76d0404b7f664ee6719fd98f0483f0ac4669d6
|
5.042007031672325e+36
| 36 |
Fixed external entity loading
| 0 |
static const char *GetMagickPropertyLetter(ImageInfo *image_info,
Image *image,const char letter,ExceptionInfo *exception)
{
#define WarnNoImageReturn(format,arg) \
if (image == (Image *) NULL ) { \
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, \
"NoImageForProperty",format,arg); \
return((const char *) NULL); \
}
#define WarnNoImageInfoReturn(format,arg) \
if (image_info == (ImageInfo *) NULL ) { \
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, \
"NoImageInfoForProperty",format,arg); \
return((const char *) NULL); \
}
char
value[MagickPathExtent]; /* formatted string to store as an artifact */
const char
*string; /* return a string already stored somewher */
if ((image != (Image *) NULL) && (image->debug != MagickFalse))
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
else
if ((image_info != (ImageInfo *) NULL) &&
(image_info->debug != MagickFalse))
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s","no-images");
*value='\0'; /* formatted string */
string=(char *) NULL; /* constant string reference */
/*
Get properities that are directly defined by images.
*/
switch (letter)
{
case 'b': /* image size read in - in bytes */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatMagickSize(image->extent,MagickFalse,"B",MagickPathExtent,
value);
if (image->extent == 0)
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",
MagickPathExtent,value);
break;
}
case 'c': /* image comment property - empty string by default */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=GetImageProperty(image,"comment",exception);
if ( string == (const char *) NULL )
string="";
break;
}
case 'd': /* Directory component of filename */
{
WarnNoImageReturn("\"%%%c\"",letter);
GetPathComponent(image->magick_filename,HeadPath,value);
if (*value == '\0')
string="";
break;
}
case 'e': /* Filename extension (suffix) of image file */
{
WarnNoImageReturn("\"%%%c\"",letter);
GetPathComponent(image->magick_filename,ExtensionPath,value);
if (*value == '\0')
string="";
break;
}
case 'f': /* Filename without directory component */
{
WarnNoImageReturn("\"%%%c\"",letter);
GetPathComponent(image->magick_filename,TailPath,value);
if (*value == '\0')
string="";
break;
}
case 'g': /* Image geometry, canvas and offset %Wx%H+%X+%Y */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) image->page.width,(double)
image->page.height,(double) image->page.x,(double) image->page.y);
break;
}
case 'h': /* Image height (current) */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
(image->rows != 0 ? image->rows : image->magick_rows));
break;
}
case 'i': /* Filename last used for an image (read or write) */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=image->filename;
break;
}
case 'k': /* Number of unique colors */
{
/*
FUTURE: ensure this does not generate the formatted comment!
*/
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetNumberColors(image,(FILE *) NULL,exception));
break;
}
case 'l': /* Image label property - empty string by default */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=GetImageProperty(image,"label",exception);
if (string == (const char *) NULL)
string="";
break;
}
case 'm': /* Image format (file magick) */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=image->magick;
break;
}
case 'n': /* Number of images in the list. */
{
if ( image != (Image *) NULL )
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageListLength(image));
else
string="0"; /* no images or scenes */
break;
}
case 'o': /* Output Filename - for delegate use only */
WarnNoImageInfoReturn("\"%%%c\"",letter);
string=image_info->filename;
break;
case 'p': /* Image index in current image list */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageIndexInList(image));
break;
}
case 'q': /* Quantum depth of image in memory */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
MAGICKCORE_QUANTUM_DEPTH);
break;
}
case 'r': /* Image storage class, colorspace, and alpha enabled. */
{
ColorspaceType
colorspace;
WarnNoImageReturn("\"%%%c\"",letter);
colorspace=image->colorspace;
if (SetImageGray(image,exception) != MagickFalse)
colorspace=GRAYColorspace; /* FUTURE: this is IMv6 not IMv7 */
(void) FormatLocaleString(value,MagickPathExtent,"%s %s %s",
CommandOptionToMnemonic(MagickClassOptions,(ssize_t)
image->storage_class),CommandOptionToMnemonic(MagickColorspaceOptions,
(ssize_t) colorspace),image->alpha_trait != UndefinedPixelTrait ?
"Alpha" : "");
break;
}
case 's': /* Image scene number */
{
#if 0 /* this seems non-sensical -- simplifing */
if (image_info->number_scenes != 0)
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image_info->scene);
else if (image != (Image *) NULL)
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->scene);
else
string="0";
#else
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->scene);
#endif
break;
}
case 't': /* Base filename without directory or extention */
{
WarnNoImageReturn("\"%%%c\"",letter);
GetPathComponent(image->magick_filename,BasePath,value);
if (*value == '\0')
string="";
break;
}
case 'u': /* Unique filename */
{
WarnNoImageInfoReturn("\"%%%c\"",letter);
string=image_info->unique;
break;
}
case 'w': /* Image width (current) */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
(image->columns != 0 ? image->columns : image->magick_columns));
break;
}
case 'x': /* Image horizontal resolution (with units) */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",
fabs(image->resolution.x) > MagickEpsilon ? image->resolution.x : 72.0);
break;
}
case 'y': /* Image vertical resolution (with units) */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",
fabs(image->resolution.y) > MagickEpsilon ? image->resolution.y : 72.0);
break;
}
case 'z': /* Image depth as read in */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->depth);
break;
}
case 'A': /* Image alpha channel */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=CommandOptionToMnemonic(MagickPixelTraitOptions,(ssize_t)
image->alpha_trait);
break;
}
case 'C': /* Image compression method. */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=CommandOptionToMnemonic(MagickCompressOptions,(ssize_t)
image->compression);
break;
}
case 'D': /* Image dispose method. */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=CommandOptionToMnemonic(MagickDisposeOptions,(ssize_t)
image->dispose);
break;
}
case 'G': /* Image size as geometry = "%wx%h" */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20gx%.20g",(double)
image->magick_columns,(double) image->magick_rows);
break;
}
case 'H': /* layer canvas height */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->page.height);
break;
}
case 'M': /* Magick filename - filename given incl. coder & read mods */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=image->magick_filename;
break;
}
case 'O': /* layer canvas offset with sign = "+%X+%Y" */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%+ld%+ld",(long)
image->page.x,(long) image->page.y);
break;
}
case 'P': /* layer canvas page size = "%Wx%H" */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20gx%.20g",(double)
image->page.width,(double) image->page.height);
break;
}
case 'Q': /* image compression quality */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
(image->quality == 0 ? 92 : image->quality));
break;
}
case 'S': /* Number of scenes in image list. */
{
WarnNoImageInfoReturn("\"%%%c\"",letter);
#if 0 /* What is this number? -- it makes no sense - simplifing */
if (image_info->number_scenes == 0)
string="2147483647";
else if ( image != (Image *) NULL )
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image_info->scene+image_info->number_scenes);
else
string="0";
#else
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
(image_info->number_scenes == 0 ? 2147483647 :
image_info->number_scenes));
#endif
break;
}
case 'T': /* image time delay for animations */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->delay);
break;
}
case 'U': /* Image resolution units. */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=CommandOptionToMnemonic(MagickResolutionOptions,(ssize_t)
image->units);
break;
}
case 'W': /* layer canvas width */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->page.width);
break;
}
case 'X': /* layer canvas X offset */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%+.20g",(double)
image->page.x);
break;
}
case 'Y': /* layer canvas Y offset */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%+.20g",(double)
image->page.y);
break;
}
case '%': /* percent escaped */
{
string="%";
break;
}
case '@': /* Trim bounding box, without actually Trimming! */
{
RectangleInfo
page;
WarnNoImageReturn("\"%%%c\"",letter);
page=GetImageBoundingBox(image,exception);
(void) FormatLocaleString(value,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) page.width,(double) page.height,
(double) page.x,(double)page.y);
break;
}
case '#':
{
/*
Image signature.
*/
WarnNoImageReturn("\"%%%c\"",letter);
(void) SignatureImage(image,exception);
string=GetImageProperty(image,"signature",exception);
break;
}
}
if (string != (char *) NULL)
return(string);
if (*value != '\0')
{
/*
Create a cloned copy of result.
*/
if (image != (Image *) NULL)
{
(void) SetImageArtifact(image,"get-property",value);
return(GetImageArtifact(image,"get-property"));
}
else
{
(void) SetImageOption(image_info,"get-property",value);
return(GetImageOption(image_info,"get-property"));
}
}
return((char *) NULL);
}
|
Safe
|
[
"CWE-125"
] |
ImageMagick
|
dd84447b63a71fa8c3f47071b09454efc667767b
|
3.1524397084073503e+38
| 388 |
Prevent buffer overflow (bug report from Ibrahim el-sayed)
| 0 |
static int compare_TrustDomainInfoInfoEx(struct lsa_TrustDomainInfoInfoEx *e1, struct lsa_TrustDomainInfoInfoEx *e2)
{
return strcasecmp_m(e1->netbios_name.string, e2->netbios_name.string);
}
|
Safe
|
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
|
1.2780348783411295e+38
| 4 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz>
| 0 |
int tls12_get_sigandhash(unsigned char *p, const EVP_PKEY *pk, const EVP_MD *md)
{
int sig_id, md_id;
if (!md)
return 0;
md_id = tls12_find_id(EVP_MD_type(md), tls12_md,
sizeof(tls12_md)/sizeof(tls12_lookup));
if (md_id == -1)
return 0;
sig_id = tls12_get_sigid(pk);
if (sig_id == -1)
return 0;
p[0] = (unsigned char)md_id;
p[1] = (unsigned char)sig_id;
return 1;
}
|
Safe
|
[] |
openssl
|
96db9023b881d7cd9f379b0c154650d6c108e9a3
|
1.809299464881051e+38
| 16 |
Add heartbeat extension bounds check.
A missing bounds check in the handling of the TLS heartbeat extension
can be used to reveal up to 64k of memory to a connected client or
server.
Thanks for Neel Mehta of Google Security for discovering this bug and to
Adam Langley <agl@chromium.org> and Bodo Moeller <bmoeller@acm.org> for
preparing the fix (CVE-2014-0160)
| 0 |
static inline void prepare_key(unsigned flags, char *old_key, size_t old_len, char **new_key, size_t *new_len TSRMLS_DC)
{
zval zv;
INIT_PZVAL(&zv);
ZVAL_STRINGL(&zv, old_key, old_len, 1);
if (flags & PHP_HTTP_PARAMS_URLENCODED) {
prepare_urlencoded(&zv TSRMLS_CC);
}
if (flags & PHP_HTTP_PARAMS_ESCAPED) {
if (flags & PHP_HTTP_PARAMS_RFC5988) {
prepare_rfc5988(&zv TSRMLS_CC);
} else {
prepare_escaped(&zv TSRMLS_CC);
}
}
*new_key = Z_STRVAL(zv);
*new_len = Z_STRLEN(zv);
}
|
Safe
|
[
"CWE-399",
"CWE-704"
] |
ext-http
|
17137d4ab1ce81a2cee0fae842340a344ef3da83
|
3.354385047225857e+38
| 22 |
fix bug #73055
| 0 |
int test_gf2m_mod_inv(BIO *bp,BN_CTX *ctx)
{
BIGNUM *a,*b[2],*c,*d;
int i, j, ret = 0;
int p0[] = {163,7,6,3,0,-1};
int p1[] = {193,15,0,-1};
a=BN_new();
b[0]=BN_new();
b[1]=BN_new();
c=BN_new();
d=BN_new();
BN_GF2m_arr2poly(p0, b[0]);
BN_GF2m_arr2poly(p1, b[1]);
for (i=0; i<num0; i++)
{
BN_bntest_rand(a, 512, 0, 0);
for (j=0; j < 2; j++)
{
BN_GF2m_mod_inv(c, a, b[j], ctx);
BN_GF2m_mod_mul(d, a, c, b[j], ctx);
#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
if (bp != NULL)
{
if (!results)
{
BN_print(bp,a);
BIO_puts(bp, " * ");
BN_print(bp,c);
BIO_puts(bp," - 1 % ");
BN_print(bp,b[j]);
BIO_puts(bp,"\n");
}
}
#endif
/* Test that ((1/a)*a) = 1. */
if(!BN_is_one(d))
{
fprintf(stderr,"GF(2^m) modular inversion test failed!\n");
goto err;
}
}
}
ret = 1;
err:
BN_free(a);
BN_free(b[0]);
BN_free(b[1]);
BN_free(c);
BN_free(d);
return ret;
}
|
Safe
|
[
"CWE-310"
] |
openssl
|
a7a44ba55cb4f884c6bc9ceac90072dea38e66d0
|
2.5009173352934015e+38
| 54 |
Fix for CVE-2014-3570 (with minor bn_asm.c revamp).
Reviewed-by: Emilia Kasper <emilia@openssl.org>
| 0 |
static int cmd_mv(void *data, const char *input) {
return r_syscmd_mv (input)? 1: 0;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
radare2
|
65000a7fd9eea62359e6d6714f17b94a99a82edd
|
3.432269948209233e+36
| 3 |
Fix #7723 - crash in ext2 GRUB code because of variable size array in stack
| 0 |
bool UTFstring::operator==(const UTFstring& _aStr) const
{
if ((_Data == NULL) && (_aStr._Data == NULL))
return true;
if ((_Data == NULL) || (_aStr._Data == NULL))
return false;
return wcscmp_internal(_Data, _aStr._Data);
}
|
Safe
|
[
"CWE-200",
"CWE-703"
] |
libebml
|
ababb64e0c792ad2a314245233db0833ba12036b
|
3.1799937258049283e+38
| 8 |
EbmlUnicodeString: don't read beyond end of string
The conversion from an UTF-8 encoded string into a wchar_t one was
reading from beyond the end of the source buffer if the length indicated
by a UTF-8 character's first byte exceeds the number of bytes actually
present afterwards.
Fixes the issue reported as Cisco TALOS-CAN-0036.
| 0 |
bool Chapters::Atom::ExpandDisplaysArray() {
if (m_displays_size > m_displays_count)
return true; // nothing else to do
const int size = (m_displays_size == 0) ? 1 : 2 * m_displays_size;
Display* const displays = new (std::nothrow) Display[size];
if (displays == NULL)
return false;
for (int idx = 0; idx < m_displays_count; ++idx) {
m_displays[idx].ShallowCopy(displays[idx]);
}
delete[] m_displays;
m_displays = displays;
m_displays_size = size;
return true;
}
|
Safe
|
[
"CWE-20"
] |
libvpx
|
34d54b04e98dd0bac32e9aab0fbda0bf501bc742
|
1.732962994086885e+37
| 21 |
update libwebm to libwebm-1.0.0.27-358-gdbf1d10
changelog:
https://chromium.googlesource.com/webm/libwebm/+log/libwebm-1.0.0.27-351-g9f23fbc..libwebm-1.0.0.27-358-gdbf1d10
Change-Id: I28a6b3ae02a53fb1f2029eee11e9449afb94c8e3
| 0 |
ipcp_bits_lattice::meet_with_1 (widest_int value, widest_int mask,
unsigned precision)
{
gcc_assert (constant_p ());
widest_int old_mask = m_mask;
m_mask = (m_mask | mask) | (m_value ^ value);
if (wi::sext (m_mask, precision) == -1)
return set_to_bottom ();
return m_mask != old_mask;
}
|
Safe
|
[
"CWE-20"
] |
gcc
|
a09ccc22459c565814f79f96586fe4ad083fe4eb
|
3.079482904238646e+38
| 13 |
Avoid segfault when doing IPA-VRP but not IPA-CP (PR 93015)
2019-12-21 Martin Jambor <mjambor@suse.cz>
PR ipa/93015
* ipa-cp.c (ipcp_store_vr_results): Check that info exists
testsuite/
* gcc.dg/lto/pr93015_0.c: New test.
From-SVN: r279695
| 0 |
sftp_dir sftp_opendir(sftp_session sftp, const char *path){
sftp_message msg = NULL;
sftp_file file = NULL;
sftp_dir dir = NULL;
sftp_status_message status;
ssh_string path_s;
ssh_buffer payload;
uint32_t id;
payload = ssh_buffer_new();
if (payload == NULL) {
ssh_set_error_oom(sftp->session);
return NULL;
}
path_s = ssh_string_from_char(path);
if (path_s == NULL) {
ssh_set_error_oom(sftp->session);
ssh_buffer_free(payload);
return NULL;
}
id = sftp_get_new_id(sftp);
if (buffer_add_u32(payload, id) < 0 ||
buffer_add_ssh_string(payload, path_s) < 0) {
ssh_set_error_oom(sftp->session);
ssh_buffer_free(payload);
ssh_string_free(path_s);
return NULL;
}
ssh_string_free(path_s);
if (sftp_packet_write(sftp, SSH_FXP_OPENDIR, payload) < 0) {
ssh_buffer_free(payload);
return NULL;
}
ssh_buffer_free(payload);
while (msg == NULL) {
if (sftp_read_and_dispatch(sftp) < 0) {
/* something nasty has happened */
return NULL;
}
msg = sftp_dequeue(sftp, id);
}
switch (msg->packet_type) {
case SSH_FXP_STATUS:
status = parse_status_msg(msg);
sftp_message_free(msg);
if (status == NULL) {
return NULL;
}
sftp_set_error(sftp, status->status);
ssh_set_error(sftp->session, SSH_REQUEST_DENIED,
"SFTP server: %s", status->errormsg);
status_msg_free(status);
return NULL;
case SSH_FXP_HANDLE:
file = parse_handle_msg(msg);
sftp_message_free(msg);
if (file != NULL) {
dir = malloc(sizeof(struct sftp_dir_struct));
if (dir == NULL) {
ssh_set_error_oom(sftp->session);
return NULL;
}
ZERO_STRUCTP(dir);
dir->sftp = sftp;
dir->name = strdup(path);
if (dir->name == NULL) {
SAFE_FREE(dir);
SAFE_FREE(file);
return NULL;
}
dir->handle = file->handle;
SAFE_FREE(file);
}
return dir;
default:
ssh_set_error(sftp->session, SSH_FATAL,
"Received message %d during opendir!", msg->packet_type);
sftp_message_free(msg);
}
return NULL;
}
|
Safe
|
[] |
libssh
|
4d8420f3282ed07fc99fc5e930c17df27ef1e9b2
|
8.68501502260181e+37
| 88 |
sftp: Fix bug in sftp_mkdir not returning on error.
resolves: #84
(cherry picked from commit a92c97b2e17715c1b3cdd693d14af6c3311d8e44)
| 0 |
static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
{
struct io_sq_data *sq = NULL;
bool has_lock;
int i;
/*
* Avoid ABBA deadlock between the seq lock and the io_uring mutex,
* since fdinfo case grabs it in the opposite direction of normal use
* cases. If we fail to get the lock, we just don't iterate any
* structures that could be going away outside the io_uring mutex.
*/
has_lock = mutex_trylock(&ctx->uring_lock);
if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
sq = ctx->sq_data;
if (!sq->thread)
sq = NULL;
}
seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
struct file *f = *io_fixed_file_slot(ctx->file_data, i);
if (f)
seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
else
seq_printf(m, "%5u: <none>\n", i);
}
seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
(unsigned int) buf->len);
}
if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
seq_printf(m, "Personalities:\n");
idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
}
seq_printf(m, "PollList:\n");
spin_lock_irq(&ctx->completion_lock);
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
struct hlist_head *list = &ctx->cancel_hash[i];
struct io_kiocb *req;
hlist_for_each_entry(req, list, hash_node)
seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
req->task->task_works != NULL);
}
spin_unlock_irq(&ctx->completion_lock);
if (has_lock)
mutex_unlock(&ctx->uring_lock);
|
Safe
|
[
"CWE-667"
] |
linux
|
3ebba796fa251d042be42b929a2d916ee5c34a49
|
1.3079030687414813e+38
| 56 |
io_uring: ensure that SQPOLL thread is started for exit
If we create it in a disabled state because IORING_SETUP_R_DISABLED is
set on ring creation, we need to ensure that we've kicked the thread if
we're exiting before it's been explicitly disabled. Otherwise we can run
into a deadlock where exit is waiting go park the SQPOLL thread, but the
SQPOLL thread itself is waiting to get a signal to start.
That results in the below trace of both tasks hung, waiting on each other:
INFO: task syz-executor458:8401 blocked for more than 143 seconds.
Not tainted 5.11.0-next-20210226-syzkaller #0
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:syz-executor458 state:D stack:27536 pid: 8401 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread_park fs/io_uring.c:7115 [inline]
io_sq_thread_park+0xd5/0x130 fs/io_uring.c:7103
io_uring_cancel_task_requests+0x24c/0xd90 fs/io_uring.c:8745
__io_uring_files_cancel+0x110/0x230 fs/io_uring.c:8840
io_uring_files_cancel include/linux/io_uring.h:47 [inline]
do_exit+0x299/0x2a60 kernel/exit.c:780
do_group_exit+0x125/0x310 kernel/exit.c:922
__do_sys_exit_group kernel/exit.c:933 [inline]
__se_sys_exit_group kernel/exit.c:931 [inline]
__x64_sys_exit_group+0x3a/0x50 kernel/exit.c:931
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x43e899
RSP: 002b:00007ffe89376d48 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7
RAX: ffffffffffffffda RBX: 00000000004af2f0 RCX: 000000000043e899
RDX: 000000000000003c RSI: 00000000000000e7 RDI: 0000000000000000
RBP: 0000000000000000 R08: ffffffffffffffc0 R09: 0000000010000000
R10: 0000000000008011 R11: 0000000000000246 R12: 00000000004af2f0
R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000001
INFO: task iou-sqp-8401:8402 can't die for more than 143 seconds.
task:iou-sqp-8401 state:D stack:30272 pid: 8402 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread+0x27d/0x1ae0 fs/io_uring.c:6717
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294
INFO: task iou-sqp-8401:8402 blocked for more than 143 seconds.
Reported-by: syzbot+fb5458330b4442f2090d@syzkaller.appspotmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
int Http2Session::OnNghttpError(nghttp2_session* handle,
const char* message,
size_t len,
void* user_data) {
// Unfortunately, this is currently the only way for us to know if
// the session errored because the peer is not an http2 peer.
Http2Session* session = static_cast<Http2Session*>(user_data);
Debug(session, "Error '%.*s'", len, message);
if (strncmp(message, BAD_PEER_MESSAGE, len) == 0) {
Environment* env = session->env();
Isolate* isolate = env->isolate();
HandleScope scope(isolate);
Local<Context> context = env->context();
Context::Scope context_scope(context);
Local<Value> arg = Integer::New(isolate, NGHTTP2_ERR_PROTO);
session->MakeCallback(env->error_string(), 1, &arg);
}
return 0;
}
|
Safe
|
[
"CWE-416"
] |
node
|
7f178663ebffc82c9f8a5a1b6bf2da0c263a30ed
|
3.1744509424146992e+38
| 19 |
src: use unique_ptr for WriteWrap
This commit attempts to avoid a use-after-free error by using unqiue_ptr
and passing a reference to it.
CVE-ID: CVE-2020-8265
Fixes: https://github.com/nodejs-private/node-private/issues/227
PR-URL: https://github.com/nodejs-private/node-private/pull/238
Reviewed-By: Michael Dawson <midawson@redhat.com>
Reviewed-By: Tobias Nießen <tniessen@tnie.de>
Reviewed-By: Richard Lau <rlau@redhat.com>
| 0 |
static int __init early_coherent_pool(char *p)
{
atomic_pool_size = memparse(p, &p);
return 0;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
6829e274a623187c24f7cfc0e3d35f25d087fcc5
|
9.318828501641325e+37
| 5 |
arm64: dma-mapping: always clear allocated buffers
Buffers allocated by dma_alloc_coherent() are always zeroed on Alpha,
ARM (32bit), MIPS, PowerPC, x86/x86_64 and probably other architectures.
It turned out that some drivers rely on this 'feature'. Allocated buffer
might be also exposed to userspace with dma_mmap() call, so clearing it
is desired from security point of view to avoid exposing random memory
to userspace. This patch unifies dma_alloc_coherent() behavior on ARM64
architecture with other implementations by unconditionally zeroing
allocated buffer.
Cc: <stable@vger.kernel.org> # v3.14+
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
| 0 |
SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
const struct timespec __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 new_tp;
if (!kc || !kc->clock_set)
return -EINVAL;
if (get_timespec64(&new_tp, tp))
return -EFAULT;
return kc->clock_set(which_clock, &new_tp);
}
|
Safe
|
[
"CWE-125"
] |
linux
|
cef31d9af908243421258f1df35a4a644604efbe
|
1.2955106141012578e+38
| 14 |
posix-timer: Properly check sigevent->sigev_notify
timer_create() specifies via sigevent->sigev_notify the signal delivery for
the new timer. The valid modes are SIGEV_NONE, SIGEV_SIGNAL, SIGEV_THREAD
and (SIGEV_SIGNAL | SIGEV_THREAD_ID).
The sanity check in good_sigevent() is only checking the valid combination
for the SIGEV_THREAD_ID bit, i.e. SIGEV_SIGNAL, but if SIGEV_THREAD_ID is
not set it accepts any random value.
This has no real effects on the posix timer and signal delivery code, but
it affects show_timer() which handles the output of /proc/$PID/timers. That
function uses a string array to pretty print sigev_notify. The access to
that array has no bound checks, so random sigev_notify cause access beyond
the array bounds.
Add proper checks for the valid notify modes and remove the SIGEV_THREAD_ID
masking from various code pathes as SIGEV_NONE can never be set in
combination with SIGEV_THREAD_ID.
Reported-by: Eric Biggers <ebiggers3@gmail.com>
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Reported-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: John Stultz <john.stultz@linaro.org>
Cc: stable@vger.kernel.org
| 0 |
ptaGetArrays(PTA *pta,
NUMA **pnax,
NUMA **pnay)
{
l_int32 i, n;
NUMA *nax, *nay;
PROCNAME("ptaGetArrays");
if (!pnax && !pnay)
return ERROR_INT("no output requested", procName, 1);
if (pnax) *pnax = NULL;
if (pnay) *pnay = NULL;
if (!pta)
return ERROR_INT("pta not defined", procName, 1);
if ((n = ptaGetCount(pta)) == 0)
return ERROR_INT("pta is empty", procName, 1);
if (pnax) {
if ((nax = numaCreate(n)) == NULL)
return ERROR_INT("nax not made", procName, 1);
*pnax = nax;
for (i = 0; i < n; i++)
nax->array[i] = pta->x[i];
nax->n = n;
}
if (pnay) {
if ((nay = numaCreate(n)) == NULL)
return ERROR_INT("nay not made", procName, 1);
*pnay = nay;
for (i = 0; i < n; i++)
nay->array[i] = pta->y[i];
nay->n = n;
}
return 0;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
leptonica
|
ee301cb2029db8a6289c5295daa42bba7715e99a
|
5.43861596674902e+37
| 36 |
Security fixes: expect final changes for release 1.75.3.
* Fixed a debian security issue with fscanf() reading a string with
possible buffer overflow.
* There were also a few similar situations with sscanf().
| 0 |
static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
{
struct mlx5_fw_reporter_ctx fw_reporter_ctx;
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
health = container_of(work, struct mlx5_core_health, fatal_report_work);
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_enter_error_state(dev, false);
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
if (mlx5_health_try_recover(dev))
mlx5_core_err(dev, "health recovery failed\n");
return;
}
fw_reporter_ctx.err_synd = health->synd;
fw_reporter_ctx.miss_counter = health->miss_counter;
devlink_health_report(health->fw_fatal_reporter,
"FW fatal error reported", &fw_reporter_ctx);
}
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
linux
|
c7ed6d0183d5ea9bc31bcaeeba4070bd62546471
|
2.0480288491950175e+38
| 22 |
net/mlx5: fix memory leak in mlx5_fw_fatal_reporter_dump
In mlx5_fw_fatal_reporter_dump if mlx5_crdump_collect fails the
allocated memory for cr_data must be released otherwise there will be
memory leak. To fix this, this commit changes the return instruction
into goto error handling.
Fixes: 9b1f29823605 ("net/mlx5: Add support for FW fatal reporter dump")
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
| 0 |
rs_filter_init(RSFilter *self)
{
RS_DEBUG(FILTERS, "rs_filter_init(%p)", self);
self->previous = NULL;
self->next_filters = NULL;
self->enabled = TRUE;
}
|
Safe
|
[
"CWE-59"
] |
rawstudio
|
9c2cd3c93c05d009a91d84eedbb85873b0cb505d
|
7.205871628059658e+37
| 7 |
Fixes insecure use of temporary file (CVE-2014-4978).
| 0 |
void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
{
#ifdef CONFIG_PPC64
unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
#ifdef CONFIG_PPC_BOOK3S_64
if (!radix_enabled())
preload_new_slb_context(start, sp);
#endif
#endif
/*
* If we exec out of a kernel thread then thread.regs will not be
* set. Do it now.
*/
if (!current->thread.regs) {
struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
current->thread.regs = regs - 1;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Clear any transactional state, we're exec()ing. The cause is
* not important as there will never be a recheckpoint so it's not
* user visible.
*/
if (MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(0);
#endif
memset(regs->gpr, 0, sizeof(regs->gpr));
regs->ctr = 0;
regs->link = 0;
regs->xer = 0;
regs->ccr = 0;
regs->gpr[1] = sp;
/*
* We have just cleared all the nonvolatile GPRs, so make
* FULL_REGS(regs) return true. This is necessary to allow
* ptrace to examine the thread immediately after exec.
*/
regs->trap &= ~1UL;
#ifdef CONFIG_PPC32
regs->mq = 0;
regs->nip = start;
regs->msr = MSR_USER;
#else
if (!is_32bit_task()) {
unsigned long entry;
if (is_elf2_task()) {
/* Look ma, no function descriptors! */
entry = start;
/*
* Ulrich says:
* The latest iteration of the ABI requires that when
* calling a function (at its global entry point),
* the caller must ensure r12 holds the entry point
* address (so that the function can quickly
* establish addressability).
*/
regs->gpr[12] = start;
/* Make sure that's restored on entry to userspace. */
set_thread_flag(TIF_RESTOREALL);
} else {
unsigned long toc;
/* start is a relocated pointer to the function
* descriptor for the elf _start routine. The first
* entry in the function descriptor is the entry
* address of _start and the second entry is the TOC
* value we need to use.
*/
__get_user(entry, (unsigned long __user *)start);
__get_user(toc, (unsigned long __user *)start+1);
/* Check whether the e_entry function descriptor entries
* need to be relocated before we can use them.
*/
if (load_addr != 0) {
entry += load_addr;
toc += load_addr;
}
regs->gpr[2] = toc;
}
regs->nip = entry;
regs->msr = MSR_USER64;
} else {
regs->nip = start;
regs->gpr[2] = 0;
regs->msr = MSR_USER32;
}
#endif
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
current->thread.load_slb = 0;
current->thread.load_fp = 0;
memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
current->thread.fp_save_area = NULL;
#ifdef CONFIG_ALTIVEC
memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
current->thread.vr_save_area = NULL;
current->thread.vrsave = 0;
current->thread.used_vr = 0;
current->thread.load_vec = 0;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
memset(current->thread.evr, 0, sizeof(current->thread.evr));
current->thread.acc = 0;
current->thread.spefscr = 0;
current->thread.used_spe = 0;
#endif /* CONFIG_SPE */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
current->thread.tm_tfhar = 0;
current->thread.tm_texasr = 0;
current->thread.tm_tfiar = 0;
current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
thread_pkey_regs_init(¤t->thread);
}
|
Safe
|
[
"CWE-862"
] |
linux
|
8205d5d98ef7f155de211f5e2eb6ca03d95a5a60
|
2.0478864535420798e+38
| 126 |
powerpc/tm: Fix FP/VMX unavailable exceptions inside a transaction
When we take an FP unavailable exception in a transaction we have to
account for the hardware FP TM checkpointed registers being
incorrect. In this case for this process we know the current and
checkpointed FP registers must be the same (since FP wasn't used
inside the transaction) hence in the thread_struct we copy the current
FP registers to the checkpointed ones.
This copy is done in tm_reclaim_thread(). We use thread->ckpt_regs.msr
to determine if FP was on when in userspace. thread->ckpt_regs.msr
represents the state of the MSR when exiting userspace. This is setup
by check_if_tm_restore_required().
Unfortunatley there is an optimisation in giveup_all() which returns
early if tsk->thread.regs->msr (via local variable `usermsr`) has
FP=VEC=VSX=SPE=0. This optimisation means that
check_if_tm_restore_required() is not called and hence
thread->ckpt_regs.msr is not updated and will contain an old value.
This can happen if due to load_fp=255 we start a userspace process
with MSR FP=1 and then we are context switched out. In this case
thread->ckpt_regs.msr will contain FP=1. If that same process is then
context switched in and load_fp overflows, MSR will have FP=0. If that
process now enters a transaction and does an FP instruction, the FP
unavailable will not update thread->ckpt_regs.msr (the bug) and MSR
FP=1 will be retained in thread->ckpt_regs.msr. tm_reclaim_thread()
will then not perform the required memcpy and the checkpointed FP regs
in the thread struct will contain the wrong values.
The code path for this happening is:
Userspace: Kernel
Start userspace
with MSR FP/VEC/VSX/SPE=0 TM=1
< -----
...
tbegin
bne
fp instruction
FP unavailable
---- >
fp_unavailable_tm()
tm_reclaim_current()
tm_reclaim_thread()
giveup_all()
return early since FP/VMX/VSX=0
/* ckpt MSR not updated (Incorrect) */
tm_reclaim()
/* thread_struct ckpt FP regs contain junk (OK) */
/* Sees ckpt MSR FP=1 (Incorrect) */
no memcpy() performed
/* thread_struct ckpt FP regs not fixed (Incorrect) */
tm_recheckpoint()
/* Put junk in hardware checkpoint FP regs */
....
< -----
Return to userspace
with MSR TM=1 FP=1
with junk in the FP TM checkpoint
TM rollback
reads FP junk
This is a data integrity problem for the current process as the FP
registers are corrupted. It's also a security problem as the FP
registers from one process may be leaked to another.
This patch moves up check_if_tm_restore_required() in giveup_all() to
ensure thread->ckpt_regs.msr is updated correctly.
A simple testcase to replicate this will be posted to
tools/testing/selftests/powerpc/tm/tm-poison.c
Similarly for VMX.
This fixes CVE-2019-15030.
Fixes: f48e91e87e67 ("powerpc/tm: Fix FP and VMX register corruption")
Cc: stable@vger.kernel.org # 4.12+
Signed-off-by: Gustavo Romero <gromero@linux.vnet.ibm.com>
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190904045529.23002-1-gromero@linux.vnet.ibm.com
| 0 |
Field_time_with_dec(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg,
const LEX_CSTRING *field_name_arg,
uint dec_arg)
:Field_time(ptr_arg, MIN_TIME_WIDTH + dec_arg + MY_TEST(dec_arg),
null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg),
dec(dec_arg)
{
DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
}
|
Safe
|
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
|
4.4199688137664265e+36
| 10 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <serg@mariadb.org>
| 0 |
print_ipcp_config_options(netdissect_options *ndo,
const u_char *p, int length)
{
int len, opt;
u_int compproto, ipcomp_subopttotallen, ipcomp_subopt, ipcomp_suboptlen;
if (length < 2)
return 0;
ND_TCHECK2(*p, 2);
len = p[1];
opt = p[0];
if (length < len)
return 0;
if (len < 2) {
ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u (length bogus, should be >= 2)",
tok2str(ipcpopt_values,"unknown",opt),
opt,
len));
return 0;
}
ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u",
tok2str(ipcpopt_values,"unknown",opt),
opt,
len));
switch (opt) {
case IPCPOPT_2ADDR: /* deprecated */
if (len != 10) {
ND_PRINT((ndo, " (length bogus, should be = 10)"));
return len;
}
ND_TCHECK2(*(p + 6), 4);
ND_PRINT((ndo, ": src %s, dst %s",
ipaddr_string(ndo, p + 2),
ipaddr_string(ndo, p + 6)));
break;
case IPCPOPT_IPCOMP:
if (len < 4) {
ND_PRINT((ndo, " (length bogus, should be >= 4)"));
return 0;
}
ND_TCHECK_16BITS(p+2);
compproto = EXTRACT_16BITS(p+2);
ND_PRINT((ndo, ": %s (0x%02x):",
tok2str(ipcpopt_compproto_values, "Unknown", compproto),
compproto));
switch (compproto) {
case PPP_VJC:
/* XXX: VJ-Comp parameters should be decoded */
break;
case IPCPOPT_IPCOMP_HDRCOMP:
if (len < IPCPOPT_IPCOMP_MINLEN) {
ND_PRINT((ndo, " (length bogus, should be >= %u)",
IPCPOPT_IPCOMP_MINLEN));
return 0;
}
ND_TCHECK2(*(p + 2), IPCPOPT_IPCOMP_MINLEN);
ND_PRINT((ndo, "\n\t TCP Space %u, non-TCP Space %u" \
", maxPeriod %u, maxTime %u, maxHdr %u",
EXTRACT_16BITS(p+4),
EXTRACT_16BITS(p+6),
EXTRACT_16BITS(p+8),
EXTRACT_16BITS(p+10),
EXTRACT_16BITS(p+12)));
/* suboptions present ? */
if (len > IPCPOPT_IPCOMP_MINLEN) {
ipcomp_subopttotallen = len - IPCPOPT_IPCOMP_MINLEN;
p += IPCPOPT_IPCOMP_MINLEN;
ND_PRINT((ndo, "\n\t Suboptions, length %u", ipcomp_subopttotallen));
while (ipcomp_subopttotallen >= 2) {
ND_TCHECK2(*p, 2);
ipcomp_subopt = *p;
ipcomp_suboptlen = *(p+1);
/* sanity check */
if (ipcomp_subopt == 0 ||
ipcomp_suboptlen == 0 )
break;
/* XXX: just display the suboptions for now */
ND_PRINT((ndo, "\n\t\t%s Suboption #%u, length %u",
tok2str(ipcpopt_compproto_subopt_values,
"Unknown",
ipcomp_subopt),
ipcomp_subopt,
ipcomp_suboptlen));
ipcomp_subopttotallen -= ipcomp_suboptlen;
p += ipcomp_suboptlen;
}
}
break;
default:
break;
}
break;
case IPCPOPT_ADDR: /* those options share the same format - fall through */
case IPCPOPT_MOBILE4:
case IPCPOPT_PRIDNS:
case IPCPOPT_PRINBNS:
case IPCPOPT_SECDNS:
case IPCPOPT_SECNBNS:
if (len != 6) {
ND_PRINT((ndo, " (length bogus, should be = 6)"));
return 0;
}
ND_TCHECK2(*(p + 2), 4);
ND_PRINT((ndo, ": %s", ipaddr_string(ndo, p + 2)));
break;
default:
/*
* Unknown option; dump it as raw bytes now if we're
* not going to do so below.
*/
if (ndo->ndo_vflag < 2)
print_unknown_data(ndo, &p[2], "\n\t ", len - 2);
break;
}
if (ndo->ndo_vflag > 1)
print_unknown_data(ndo, &p[2], "\n\t ", len - 2); /* exclude TLV header */
return len;
trunc:
ND_PRINT((ndo, "[|ipcp]"));
return 0;
}
|
Safe
|
[
"CWE-703",
"CWE-770"
] |
tcpdump
|
32027e199368dad9508965aae8cd8de5b6ab5231
|
2.9554712950062275e+38
| 134 |
PPP: When un-escaping, don't allocate a too-large buffer.
The buffer should be big enough to hold the captured data, but it
doesn't need to be big enough to hold the entire on-the-network packet,
if we haven't captured all of it.
(backported from commit e4add0b010ed6f2180dcb05a13026242ed935334)
| 0 |
static void tcp_mark_lost_retrans(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int cnt = 0;
u32 new_low_seq = tp->snd_nxt;
u32 received_upto = tcp_highest_sack_seq(tp);
if (!tcp_is_fack(tp) || !tp->retrans_out ||
!after(received_upto, tp->lost_retrans_low) ||
icsk->icsk_ca_state != TCP_CA_Recovery)
return;
tcp_for_write_queue(skb, sk) {
u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
if (skb == tcp_send_head(sk))
break;
if (cnt == tp->retrans_out)
break;
if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
continue;
if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
continue;
/* TODO: We would like to get rid of tcp_is_fack(tp) only
* constraint here (see above) but figuring out that at
* least tp->reordering SACK blocks reside between ack_seq
* and received_upto is not easy task to do cheaply with
* the available datastructures.
*
* Whether FACK should check here for tp->reordering segs
* in-between one could argue for either way (it would be
* rather simple to implement as we could count fack_count
* during the walk and do tp->fackets_out - fack_count).
*/
if (after(received_upto, ack_seq)) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb);
tcp_skb_mark_lost_uncond_verify(tp, skb);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
} else {
if (before(ack_seq, new_low_seq))
new_low_seq = ack_seq;
cnt += tcp_skb_pcount(skb);
}
}
if (tp->retrans_out)
tp->lost_retrans_low = new_low_seq;
}
|
Safe
|
[] |
net-next
|
fdf5af0daf8019cec2396cdef8fb042d80fe71fa
|
1.7842250790244198e+38
| 54 |
tcp: drop SYN+FIN messages
Denys Fedoryshchenko reported that SYN+FIN attacks were bringing his
linux machines to their limits.
Dont call conn_request() if the TCP flags includes SYN flag
Reported-by: Denys Fedoryshchenko <denys@visp.net.lb>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
parse_wbxml_tag_defined (proto_tree *tree, tvbuff_t *tvb, guint32 offset,
guint32 str_tbl, guint8 *level, guint8 *codepage_stag, guint8 *codepage_attr,
const wbxml_decoding *map)
{
guint32 tvb_len = tvb_reported_length (tvb);
guint32 off = offset;
guint32 len;
guint str_len;
guint32 ent;
guint32 idx;
guint8 peek;
guint32 tag_len; /* Length of the index (uintvar) from a LITERAL tag */
guint8 tag_save_known = 0; /* Will contain peek & 0x3F (tag identity) */
guint8 tag_new_known = 0; /* Will contain peek & 0x3F (tag identity) */
const char *tag_save_literal; /* Will contain the LITERAL tag identity */
const char *tag_new_literal; /* Will contain the LITERAL tag identity */
guint8 parsing_tag_content = FALSE; /* Are we parsing content from a
tag with content: <x>Content</x>
The initial state is FALSE.
This state will trigger recursion. */
tag_save_literal = NULL; /* Prevents compiler warning */
DebugLog(("parse_wbxml_tag_defined (level = %u, offset = %u)\n", *level, offset));
while (off < tvb_len) {
peek = tvb_get_guint8 (tvb, off);
DebugLog(("STAG: (top of while) level = %3u, peek = 0x%02X, off = %u, tvb_len = %u\n", *level, peek, off, tvb_len));
if ((peek & 0x3F) < 4) switch (peek) { /* Global tokens in state = STAG
but not the LITERAL tokens */
case 0x00: /* SWITCH_PAGE */
*codepage_stag = tvb_get_guint8 (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 2,
" | Tag | T -->%3d "
"| SWITCH_PAGE (Tag code page) "
"|",
*codepage_stag);
off += 2;
break;
case 0x01: /* END: only possible for Tag with Content */
if (tag_save_known) { /* Known TAG */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| END (Known Tag 0x%02X) "
"| %s</%s>",
*level, *codepage_stag,
tag_save_known, Indent (*level),
tag_save_literal); /* We already looked it up! */
} else { /* Literal TAG */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| END (Literal Tag) "
"| %s</%s>",
*level, *codepage_stag, Indent (*level),
tag_save_literal ? tag_save_literal : "");
}
(*level)--;
off++;
/* Reset code page: not needed as return from recursion */
DebugLog(("STAG: level = %u, Return: len = %u\n", *level, off - offset));
return (off - offset);
case 0x02: /* ENTITY */
ent = tvb_get_guintvar (tvb, off+1, &len);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| ENTITY "
"| %s'&#%u;'",
*level, *codepage_stag, Indent (*level), ent);
off += 1+len;
break;
case 0x03: /* STR_I */
len = tvb_strsize (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| STR_I (Inline string) "
"| %s\'%s\'",
*level, *codepage_stag, Indent(*level),
tvb_format_text (tvb, off+1, len-1));
off += 1+len;
break;
case 0x40: /* EXT_I_0 */
case 0x41: /* EXT_I_1 */
case 0x42: /* EXT_I_2 */
/* Extension tokens */
len = tvb_strsize (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| EXT_I_%1x (Extension Token) "
"| %s(%s: \'%s\')",
*level, *codepage_stag,
peek & 0x0f, Indent (*level),
map_token (map->global, 0, peek),
tvb_format_text (tvb, off+1, len-1));
off += 1+len;
break;
case 0x43: /* PI */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| PI (XML Processing Instruction) "
"| %s<?xml",
*level, *codepage_stag, Indent (*level));
len = parse_wbxml_attribute_list_defined (tree, tvb, off,
str_tbl, *level, codepage_attr, map);
/* Check that there is still room in packet */
off += len;
if (off >= tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: len = %u (short frame)\n", *level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (PI) "
"| %s?>",
*level, *codepage_stag, Indent (*level));
break;
case 0x80: /* EXT_T_0 */
case 0x81: /* EXT_T_1 */
case 0x82: /* EXT_T_2 */
/* Extension tokens */
idx = tvb_get_guintvar (tvb, off+1, &len);
{ char *s;
if (map->ext_t[peek & 0x03])
s = (map->ext_t[peek & 0x03])(tvb, idx, str_tbl);
else
s = wmem_strdup_printf(wmem_packet_scope(), "EXT_T_%1x (%s)", peek & 0x03,
map_token (map->global, 0, peek));
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| EXT_T_%1x (Extension Token) "
"| %s%s",
*level, *codepage_stag, peek & 0x0f, Indent (*level),
s);
}
off += 1+len;
break;
case 0x83: /* STR_T */
idx = tvb_get_guintvar (tvb, off+1, &len);
str_len = tvb_strsize (tvb, str_tbl+idx);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| STR_T (Tableref string) "
"| %s\'%s\'",
*level, *codepage_stag, Indent (*level),
tvb_format_text (tvb, str_tbl+idx, str_len-1));
off += 1+len;
break;
case 0xC0: /* EXT_0 */
case 0xC1: /* EXT_1 */
case 0xC2: /* EXT_2 */
/* Extension tokens */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| EXT_%1x (Extension Token) "
"| %s(%s)",
*level, *codepage_stag, peek & 0x0f, Indent (*level),
map_token (map->global, 0, peek));
off++;
break;
case 0xC3: /* OPAQUE - WBXML 1.1 and newer */
if (tvb_get_guint8 (tvb, 0)) { /* WBXML 1.x (x > 0) */
char *str;
if (tag_save_known) { /* Knwon tag */
if (map->opaque_binary_tag) {
str = map->opaque_binary_tag(tvb, off + 1,
tag_save_known, *codepage_stag, &len);
} else {
str = default_opaque_binary_tag(tvb, off + 1,
tag_save_known, *codepage_stag, &len);
}
} else { /* lITERAL tag */
if (map->opaque_literal_tag) {
str = map->opaque_literal_tag(tvb, off + 1,
tag_save_literal, *codepage_stag, &len);
} else {
str = default_opaque_literal_tag(tvb, off + 1,
tag_save_literal, *codepage_stag, &len);
}
}
proto_tree_add_text (tree, tvb, off, 1 + len,
" %3d | Tag | T %3d "
"| OPAQUE (Opaque data) "
"| %s%s",
*level, *codepage_stag, Indent (*level), str);
off += 1 + len;
} else { /* WBXML 1.0 - RESERVED_2 token (invalid) */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| RESERVED_2 (Invalid Token!) "
"| WBXML 1.0 parsing stops here.",
*level, *codepage_stag);
/* Stop processing as it is impossible to parse now */
off = tvb_len;
DebugLog(("STAG: level = %u, Return: len = %u\n", *level, off - offset));
return (off - offset);
}
break;
/* No default clause, as all cases have been treated */
} else { /* LITERAL or Known TAG */
/* We must store the initial tag, and also retrieve the new tag.
* For efficiency reasons, we store the literal tag representation
* for known tags too, so we can easily close the tag without the
* need of a new lookup and avoiding storage of token codepage.
*
* There are 4 possibilities:
*
* 1. Known tag followed by a known tag
* 2. Known tag followed by a LITERAL tag
* 3. LITERAL tag followed by Known tag
* 4. LITERAL tag followed by LITERAL tag
*/
/* Store the new tag */
tag_len = 0;
if ((peek & 0x3F) == 4) { /* LITERAL */
DebugLog(("STAG: LITERAL tag (peek = 0x%02X, off = %u) - TableRef follows!\n", peek, off));
idx = tvb_get_guintvar (tvb, off+1, &tag_len);
str_len = tvb_strsize (tvb, str_tbl+idx);
tag_new_literal = (const gchar*)tvb_get_ptr (tvb, str_tbl+idx, str_len);
tag_new_known = 0; /* invalidate known tag_new */
} else { /* Known tag */
tag_new_known = peek & 0x3F;
tag_new_literal = map_token (map->tags, *codepage_stag,
tag_new_known);
/* Stored looked up tag name string */
}
/* Parsing of TAG starts HERE */
if (peek & 0x40) { /* Content present */
/* Content follows
* [!] An explicit END token is expected in these cases!
* ==> Recursion possible if we encounter a tag with content;
* recursion will return at the explicit END token.
*/
if (parsing_tag_content) { /* Recurse */
DebugLog(("STAG: Tag in Tag - RECURSE! (off = %u)\n", off));
/* Do not process the attribute list:
* recursion will take care of it */
(*level)++;
len = parse_wbxml_tag_defined (tree, tvb, off, str_tbl,
level, codepage_stag, codepage_attr, map);
off += len;
} else { /* Now we will have content to parse */
/* Save the start tag so we can properly close it later. */
if ((peek & 0x3F) == 4) { /* Literal tag */
tag_save_literal = tag_new_literal;
tag_save_known = 0;
} else { /* Known tag */
tag_save_known = tag_new_known;
tag_save_literal = tag_new_literal;
/* The last statement avoids needless lookups */
}
/* Process the attribute list if present */
if (peek & 0x80) { /* Content and Attribute list present */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02X (AC) "
"| %s<%s",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL_AC (Literal tag) (AC) "
"| %s<%s",
*level, *codepage_stag, Indent (*level), tag_new_literal);
off += 1 + tag_len;
}
len = parse_wbxml_attribute_list_defined (tree, tvb,
off, str_tbl, *level, codepage_attr, map);
/* Check that there is still room in packet */
off += len;
if (off >= tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: len = %u (short frame)\n",
*level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (attribute list) "
"| %s>",
*level, *codepage_stag, Indent (*level));
} else { /* Content, no Attribute list */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02X (.C) "
"| %s<%s>",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL_C (Literal Tag) (.C) "
"| %s<%s>",
*level, *codepage_stag, Indent (*level),
tag_new_literal);
off += 1 + tag_len;
}
}
/* The data that follows in the parsing process
* represents content for the opening tag
* we've just processed in the lines above.
* Next time we encounter a tag with content: recurse
*/
parsing_tag_content = TRUE;
DebugLog(("Tag in Tag - No recursion this time! (off = %u)\n", off));
}
} else { /* No Content */
DebugLog(("<Tag/> in Tag - No recursion! (off = %u)\n", off));
(*level)++;
if (peek & 0x80) { /* No Content, Attribute list present */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02X (A.) "
"| %s<%s",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
len = parse_wbxml_attribute_list_defined (tree, tvb,
off, str_tbl, *level, codepage_attr, map);
/* Check that there is still room in packet */
off += len;
if (off > tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: len = %u (short frame)\n", *level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (Known Tag) "
"| %s/>",
*level, *codepage_stag, Indent (*level));
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL_A (Literal Tag) (A.) "
"| %s<%s",
*level, *codepage_stag, Indent (*level), tag_new_literal);
off += 1 + tag_len;
len = parse_wbxml_attribute_list_defined (tree, tvb,
off, str_tbl, *level, codepage_attr, map);
/* Check that there is still room in packet */
off += len;
if (off >= tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: len = %u (short frame)\n", *level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (Literal Tag) "
"| %s/>",
*level, *codepage_stag, Indent (*level));
}
} else { /* No Content, No Attribute list */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02x (..) "
"| %s<%s />",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL (Literal Tag) (..) "
"| %s<%s />",
*level, *codepage_stag, Indent (*level),
tag_new_literal);
off += 1 + tag_len;
}
}
(*level)--;
/* TODO: Do I have to reset code page here? */
}
} /* if (tag & 0x3F) >= 5 */
} /* while */
DebugLog(("STAG: level = %u, Return: len = %u (end of function body)\n", *level, off - offset));
return (off - offset);
}
|
Vulnerable
|
[
"CWE-399",
"CWE-119",
"CWE-787"
] |
wireshark
|
b8e0d416898bb975a02c1b55883342edc5b4c9c0
|
1.7601264850477227e+36
| 399 |
WBXML: add a basic sanity check for offset overflow
This is a naive approach allowing to detact that something went wrong,
without the need to replace all proto_tree_add_text() calls as what was
done in master-2.0 branch.
Bug: 12408
Change-Id: Ia14905005e17ae322c2fc639ad5e491fa08b0108
Reviewed-on: https://code.wireshark.org/review/15310
Reviewed-by: Michael Mann <mmann78@netscape.net>
Reviewed-by: Pascal Quantin <pascal.quantin@gmail.com>
| 1 |
TfLiteRegistration* Register_SUM() { return Register_SUM_REF(); }
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
|
4.779714171105605e+37
| 1 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
| 0 |
snmp_reset_var_buffers(netsnmp_variable_list * var)
{
while (var) {
if (var->name != var->name_loc) {
if(NULL != var->name)
free(var->name);
var->name = var->name_loc;
var->name_length = 0;
}
if (var->val.string != var->buf) {
if (NULL != var->val.string)
free(var->val.string);
var->val.string = var->buf;
var->val_len = 0;
}
var = var->next_variable;
}
}
|
Safe
|
[
"CWE-415"
] |
net-snmp
|
5f881d3bf24599b90d67a45cae7a3eb099cd71c9
|
7.772717878841087e+37
| 18 |
libsnmp, USM: Introduce a reference count in struct usmStateReference
This patch fixes https://sourceforge.net/p/net-snmp/bugs/2956/.
| 0 |
irc_server_get_prefix_char_for_mode (struct t_irc_server *server, char mode)
{
const char *prefix_chars;
int index;
if (server)
{
prefix_chars = irc_server_get_prefix_chars (server);
index = irc_server_get_prefix_mode_index (server, mode);
if (index >= 0)
return prefix_chars[index];
}
return ' ';
}
|
Safe
|
[
"CWE-120",
"CWE-787"
] |
weechat
|
40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f
|
1.2066789552597557e+38
| 15 |
irc: fix crash when a new message 005 is received with longer nick prefixes
Thanks to Stuart Nevans Locke for reporting the issue.
| 0 |
SQLiteDBManager::SQLiteDBManager() : db_(nullptr) {
sqlite3_soft_heap_limit64(1);
setDisabledTables(Flag::getValue("disable_tables"));
setEnabledTables(Flag::getValue("enable_tables"));
}
|
Safe
|
[
"CWE-77",
"CWE-295"
] |
osquery
|
c3f9a3dae22d43ed3b4f6a403cbf89da4cba7c3c
|
2.6060461096872007e+38
| 5 |
Merge pull request from GHSA-4g56-2482-x7q8
* Proposed fix for attach tables vulnerability
* Add authorizer to ATC tables and cleanups
- Add unit test for authorizer function
| 0 |
int run_diff_files(struct rev_info *revs, unsigned int option)
{
int entries, i;
int diff_unmerged_stage = revs->max_count;
int silent_on_removed = option & DIFF_SILENT_ON_REMOVED;
unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED)
? CE_MATCH_RACY_IS_DIRTY : 0);
char symcache[PATH_MAX];
if (diff_unmerged_stage < 0)
diff_unmerged_stage = 2;
entries = active_nr;
symcache[0] = '\0';
for (i = 0; i < entries; i++) {
struct stat st;
unsigned int oldmode, newmode;
struct cache_entry *ce = active_cache[i];
int changed;
if (DIFF_OPT_TST(&revs->diffopt, QUIET) &&
DIFF_OPT_TST(&revs->diffopt, HAS_CHANGES))
break;
if (!ce_path_match(ce, revs->prune_data))
continue;
if (ce_stage(ce)) {
struct combine_diff_path *dpath;
int num_compare_stages = 0;
size_t path_len;
path_len = ce_namelen(ce);
dpath = xmalloc(combine_diff_path_size(5, path_len));
dpath->path = (char *) &(dpath->parent[5]);
dpath->next = NULL;
dpath->len = path_len;
memcpy(dpath->path, ce->name, path_len);
dpath->path[path_len] = '\0';
hashclr(dpath->sha1);
memset(&(dpath->parent[0]), 0,
sizeof(struct combine_diff_parent)*5);
changed = check_removed(ce, &st);
if (!changed)
dpath->mode = ce_mode_from_stat(ce, st.st_mode);
else {
if (changed < 0) {
perror(ce->name);
continue;
}
if (silent_on_removed)
continue;
}
while (i < entries) {
struct cache_entry *nce = active_cache[i];
int stage;
if (strcmp(ce->name, nce->name))
break;
/* Stage #2 (ours) is the first parent,
* stage #3 (theirs) is the second.
*/
stage = ce_stage(nce);
if (2 <= stage) {
int mode = nce->ce_mode;
num_compare_stages++;
hashcpy(dpath->parent[stage-2].sha1, nce->sha1);
dpath->parent[stage-2].mode = ce_mode_from_stat(nce, mode);
dpath->parent[stage-2].status =
DIFF_STATUS_MODIFIED;
}
/* diff against the proper unmerged stage */
if (stage == diff_unmerged_stage)
ce = nce;
i++;
}
/*
* Compensate for loop update
*/
i--;
if (revs->combine_merges && num_compare_stages == 2) {
show_combined_diff(dpath, 2,
revs->dense_combined_merges,
revs);
free(dpath);
continue;
}
free(dpath);
dpath = NULL;
/*
* Show the diff for the 'ce' if we found the one
* from the desired stage.
*/
diff_unmerge(&revs->diffopt, ce->name, 0, null_sha1);
if (ce_stage(ce) != diff_unmerged_stage)
continue;
}
if (ce_uptodate(ce))
continue;
changed = check_removed(ce, &st);
if (changed) {
if (changed < 0) {
perror(ce->name);
continue;
}
if (silent_on_removed)
continue;
diff_addremove(&revs->diffopt, '-', ce->ce_mode,
ce->sha1, ce->name, NULL);
continue;
}
changed = ce_match_stat(ce, &st, ce_option);
if (!changed) {
ce_mark_uptodate(ce);
if (!DIFF_OPT_TST(&revs->diffopt, FIND_COPIES_HARDER))
continue;
}
oldmode = ce->ce_mode;
newmode = ce_mode_from_stat(ce, st.st_mode);
diff_change(&revs->diffopt, oldmode, newmode,
ce->sha1, (changed ? null_sha1 : ce->sha1),
ce->name, NULL);
}
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
return 0;
}
|
Vulnerable
|
[
"CWE-119"
] |
git
|
fd55a19eb1d49ae54008d932a65f79cd6fda45c9
|
2.0922369214443064e+38
| 137 |
Fix buffer overflow in git diff
If PATH_MAX on your system is smaller than a path stored, it may cause
buffer overflow and stack corruption in diff_addremove() and diff_change()
functions when running git-diff
Signed-off-by: Dmitry Potapov <dpotapov@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
| 1 |
evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
int error)
{
bool shortcut_for_distinct= join_tab->shortcut_for_distinct;
ha_rows found_records=join->found_records;
COND *select_cond= join_tab->select_cond;
bool select_cond_result= TRUE;
DBUG_ENTER("evaluate_join_record");
DBUG_PRINT("enter",
("evaluate_join_record join: %p join_tab: %p"
" cond: %p error: %d alias %s",
join, join_tab, select_cond, error,
join_tab->table->alias.ptr()));
if (error > 0 || (join->thd->is_error())) // Fatal error
DBUG_RETURN(NESTED_LOOP_ERROR);
if (error < 0)
DBUG_RETURN(NESTED_LOOP_NO_MORE_ROWS);
if (join->thd->check_killed()) // Aborted by user
{
join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
}
join_tab->tracker->r_rows++;
if (select_cond)
{
select_cond_result= MY_TEST(select_cond->val_int());
/* check for errors evaluating the condition */
if (join->thd->is_error())
DBUG_RETURN(NESTED_LOOP_ERROR);
}
if (!select_cond || select_cond_result)
{
/*
There is no select condition or the attached pushed down
condition is true => a match is found.
*/
join_tab->tracker->r_rows_after_where++;
bool found= 1;
while (join_tab->first_unmatched && found)
{
/*
The while condition is always false if join_tab is not
the last inner join table of an outer join operation.
*/
JOIN_TAB *first_unmatched= join_tab->first_unmatched;
/*
Mark that a match for current outer table is found.
This activates push down conditional predicates attached
to the all inner tables of the outer join.
*/
first_unmatched->found= 1;
for (JOIN_TAB *tab= first_unmatched; tab <= join_tab; tab++)
{
/*
Check whether 'not exists' optimization can be used here.
If tab->table->reginfo.not_exists_optimize is set to true
then WHERE contains a conjunctive predicate IS NULL over
a non-nullable field of tab. When activated this predicate
will filter out all records with matches for the left part
of the outer join whose inner tables start from the
first_unmatched table and include table tab. To safely use
'not exists' optimization we have to check that the
IS NULL predicate is really activated, i.e. all guards
that wrap it are in the 'open' state.
*/
bool not_exists_opt_is_applicable=
tab->table->reginfo.not_exists_optimize;
for (JOIN_TAB *first_upper= first_unmatched->first_upper;
not_exists_opt_is_applicable && first_upper;
first_upper= first_upper->first_upper)
{
if (!first_upper->found)
not_exists_opt_is_applicable= false;
}
/* Check all predicates that has just been activated. */
/*
Actually all predicates non-guarded by first_unmatched->found
will be re-evaluated again. It could be fixed, but, probably,
it's not worth doing now.
*/
if (tab->select_cond)
{
const longlong res= tab->select_cond->val_int();
if (join->thd->is_error())
DBUG_RETURN(NESTED_LOOP_ERROR);
if (!res)
{
/* The condition attached to table tab is false */
if (tab == join_tab)
{
found= 0;
if (not_exists_opt_is_applicable)
DBUG_RETURN(NESTED_LOOP_NO_MORE_ROWS);
}
else
{
/*
Set a return point if rejected predicate is attached
not to the last table of the current nest level.
*/
join->return_tab= tab;
if (not_exists_opt_is_applicable)
DBUG_RETURN(NESTED_LOOP_NO_MORE_ROWS);
else
DBUG_RETURN(NESTED_LOOP_OK);
}
}
}
}
/*
Check whether join_tab is not the last inner table
for another embedding outer join.
*/
if ((first_unmatched= first_unmatched->first_upper) &&
first_unmatched->last_inner != join_tab)
first_unmatched= 0;
join_tab->first_unmatched= first_unmatched;
}
JOIN_TAB *return_tab= join->return_tab;
join_tab->found_match= TRUE;
if (join_tab->check_weed_out_table && found)
{
int res= join_tab->check_weed_out_table->sj_weedout_check_row(join->thd);
DBUG_PRINT("info", ("weedout_check: %d", res));
if (res == -1)
DBUG_RETURN(NESTED_LOOP_ERROR);
else if (res == 1)
found= FALSE;
}
else if (join_tab->do_firstmatch)
{
/*
We should return to the join_tab->do_firstmatch after we have
enumerated all the suffixes for current prefix row combination
*/
return_tab= join_tab->do_firstmatch;
}
/*
It was not just a return to lower loop level when one
of the newly activated predicates is evaluated as false
(See above join->return_tab= tab).
*/
join->join_examined_rows++;
DBUG_PRINT("counts", ("join->examined_rows++: %lu found: %d",
(ulong) join->join_examined_rows, (int) found));
if (found)
{
enum enum_nested_loop_state rc;
/* A match from join_tab is found for the current partial join. */
rc= (*join_tab->next_select)(join, join_tab+1, 0);
join->thd->get_stmt_da()->inc_current_row_for_warning();
if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
DBUG_RETURN(rc);
if (return_tab < join->return_tab)
join->return_tab= return_tab;
/* check for errors evaluating the condition */
if (join->thd->is_error())
DBUG_RETURN(NESTED_LOOP_ERROR);
if (join->return_tab < join_tab)
DBUG_RETURN(NESTED_LOOP_OK);
/*
Test if this was a SELECT DISTINCT query on a table that
was not in the field list; In this case we can abort if
we found a row, as no new rows can be added to the result.
*/
if (shortcut_for_distinct && found_records != join->found_records)
DBUG_RETURN(NESTED_LOOP_NO_MORE_ROWS);
DBUG_RETURN(NESTED_LOOP_OK);
}
}
else
{
/*
The condition pushed down to the table join_tab rejects all rows
with the beginning coinciding with the current partial join.
*/
join->join_examined_rows++;
}
join->thd->get_stmt_da()->inc_current_row_for_warning();
join_tab->read_record.unlock_row(join_tab);
DBUG_RETURN(NESTED_LOOP_OK);
}
|
Safe
|
[
"CWE-89"
] |
server
|
5ba77222e9fe7af8ff403816b5338b18b342053c
|
2.972057019327028e+38
| 198 |
MDEV-21028 Server crashes in Query_arena::set_query_arena upon SELECT from view
if the view has algorithm=temptable it is not updatable,
so DEFAULT() for its fields is meaningless,
and thus it's NULL or 0/'' for NOT NULL columns.
| 0 |
sys_var *find_sys_var_ex(THD *thd, const char *str, size_t length,
bool throw_error, bool locked)
{
sys_var *var;
sys_var_pluginvar *pi= NULL;
plugin_ref plugin;
DBUG_ENTER("find_sys_var_ex");
DBUG_PRINT("enter", ("var '%.*s'", (int)length, str));
if (!locked)
mysql_mutex_lock(&LOCK_plugin);
mysql_prlock_rdlock(&LOCK_system_variables_hash);
if ((var= intern_find_sys_var(str, length)) &&
(pi= var->cast_pluginvar()))
{
mysql_prlock_unlock(&LOCK_system_variables_hash);
LEX *lex= thd ? thd->lex : 0;
if (!(plugin= intern_plugin_lock(lex, plugin_int_to_ref(pi->plugin))))
var= NULL; /* failed to lock it, it must be uninstalling */
else
if (!(plugin_state(plugin) & PLUGIN_IS_READY))
{
/* initialization not completed */
var= NULL;
intern_plugin_unlock(lex, plugin);
}
}
else
mysql_prlock_unlock(&LOCK_system_variables_hash);
if (!locked)
mysql_mutex_unlock(&LOCK_plugin);
if (!throw_error && !var)
my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), (int)length, (char*) str);
DBUG_RETURN(var);
}
|
Safe
|
[
"CWE-416"
] |
server
|
c05fd700970ad45735caed3a6f9930d4ce19a3bd
|
2.1533641136635583e+38
| 36 |
MDEV-26323 use-after-poison issue of MariaDB server
| 0 |
static void se_io_cb(void *context, u8 *apdu, size_t apdu_len, int err)
{
struct se_io_ctx *ctx = context;
struct sk_buff *msg;
void *hdr;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
kfree(ctx);
return;
}
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
NFC_CMD_SE_IO);
if (!hdr)
goto free_msg;
if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, ctx->dev_idx) ||
nla_put_u32(msg, NFC_ATTR_SE_INDEX, ctx->se_idx) ||
nla_put(msg, NFC_ATTR_SE_APDU, apdu_len, apdu))
goto nla_put_failure;
genlmsg_end(msg, hdr);
genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
kfree(ctx);
return;
nla_put_failure:
free_msg:
nlmsg_free(msg);
kfree(ctx);
return;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
385097a3675749cbc9e97c085c0e5dfe4269ca51
|
3.4349227106903056e+37
| 37 |
nfc: Ensure presence of required attributes in the deactivate_target handler
Check that the NFC_ATTR_TARGET_INDEX attributes (in addition to
NFC_ATTR_DEVICE_INDEX) are provided by the netlink client prior to
accessing them. This prevents potential unhandled NULL pointer dereference
exceptions which can be triggered by malicious user-mode programs,
if they omit one or both of these attributes.
Signed-off-by: Young Xiao <92siuyang@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
TEST_F(QueryPlannerTest, MaxWithoutIndex) {
runInvalidQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
}
|
Safe
|
[] |
mongo
|
ee97c0699fd55b498310996ee002328e533681a3
|
4.841566675002282e+37
| 3 |
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
| 0 |
static int sctp_setsockopt_auth_key(struct sock *sk,
char __user *optval,
int optlen)
{
struct sctp_authkey *authkey;
struct sctp_association *asoc;
int ret;
if (optlen <= sizeof(struct sctp_authkey))
return -EINVAL;
authkey = kmalloc(optlen, GFP_KERNEL);
if (!authkey)
return -ENOMEM;
if (copy_from_user(authkey, optval, optlen)) {
ret = -EFAULT;
goto out;
}
asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
ret = -EINVAL;
goto out;
}
ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
out:
kfree(authkey);
return ret;
}
|
Vulnerable
|
[] |
linux-2.6
|
5e739d1752aca4e8f3e794d431503bfca3162df4
|
1.138039425234339e+37
| 31 |
sctp: fix potential panics in the SCTP-AUTH API.
All of the SCTP-AUTH socket options could cause a panic
if the extension is disabled and the API is envoked.
Additionally, there were some additional assumptions that
certain pointers would always be valid which may not
always be the case.
This patch hardens the API and address all of the crash
scenarios.
Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 1 |
virDomainActualNetDefValidate(const virDomainNetDef *net)
{
/* Unlike virDomainNetDefValidate(), which is a static function
* called internally to this file, virDomainActualNetDefValidate()
* is a public function that can be called from a hypervisor after
* it has completely setup the NetDef for use by a domain,
* including possibly allocating a port from the network driver
* (which could change the effective/"actual" type of the NetDef,
* thus changing what should/shouldn't be allowed by validation).
*
* This function should contain validations not specific to a
* particular hypervisor (e.g. whether or not specifying bandwidth
* is allowed for a type of interface), but *not*
* hypervisor-specific things.
*/
char macstr[VIR_MAC_STRING_BUFLEN];
virDomainNetType actualType = virDomainNetGetActualType(net);
const virNetDevVPortProfile *vport = virDomainNetGetActualVirtPortProfile(net);
const virNetDevBandwidth *bandwidth = virDomainNetGetActualBandwidth(net);
virMacAddrFormat(&net->mac, macstr);
if (virDomainNetGetActualVlan(net)) {
/* vlan configuration via libvirt is only supported for PCI
* Passthrough SR-IOV devices (hostdev or macvtap passthru
* mode) and openvswitch bridges. Otherwise log an error and
* fail
*/
if (!(actualType == VIR_DOMAIN_NET_TYPE_HOSTDEV ||
(actualType == VIR_DOMAIN_NET_TYPE_DIRECT &&
virDomainNetGetActualDirectMode(net) == VIR_NETDEV_MACVLAN_MODE_PASSTHRU) ||
(actualType == VIR_DOMAIN_NET_TYPE_BRIDGE &&
vport && vport->virtPortType == VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH))) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("interface %s - vlan tag not supported for this connection type"),
macstr);
return -1;
}
}
/* bandwidth configuration via libvirt is not supported for
* hostdev network devices
*/
if (bandwidth && actualType == VIR_DOMAIN_NET_TYPE_HOSTDEV) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("interface %s - bandwidth settings are not supported "
"for hostdev interfaces"),
macstr);
return -1;
}
if (virDomainNetDefValidatePortOptions(macstr, actualType, vport,
virDomainNetGetActualPortOptionsIsolated(net)) < 0) {
return -1;
}
return 0;
}
|
Safe
|
[
"CWE-212"
] |
libvirt
|
a5b064bf4b17a9884d7d361733737fb614ad8979
|
2.852479054954224e+38
| 58 |
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <hhan@redhat.com>
Signed-off-by: Peter Krempa <pkrempa@redhat.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
| 0 |
static int show_numa_map(struct seq_file *m, void *v, int is_pid)
{
struct numa_maps_private *numa_priv = m->private;
struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
struct vm_area_struct *vma = v;
struct numa_maps *md = &numa_priv->md;
struct file *file = vma->vm_file;
struct mm_struct *mm = vma->vm_mm;
struct mm_walk walk = {
.hugetlb_entry = gather_hugetlb_stats,
.pmd_entry = gather_pte_stats,
.private = md,
.mm = mm,
};
struct mempolicy *pol;
char buffer[64];
int nid;
if (!mm)
return 0;
/* Ensure we start with an empty set of numa_maps statistics. */
memset(md, 0, sizeof(*md));
pol = __get_vma_policy(vma, vma->vm_start);
if (pol) {
mpol_to_str(buffer, sizeof(buffer), pol);
mpol_cond_put(pol);
} else {
mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
}
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
if (file) {
seq_puts(m, " file=");
seq_path(m, &file->f_path, "\n\t= ");
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
seq_puts(m, " heap");
} else {
pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
if (tid != 0) {
/*
* Thread stack in /proc/PID/task/TID/maps or
* the main process stack.
*/
if (!is_pid || (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack))
seq_puts(m, " stack");
else
seq_printf(m, " stack:%d", tid);
}
}
if (is_vm_hugetlb_page(vma))
seq_puts(m, " huge");
/* mmap_sem is held by m_start */
walk_page_vma(vma, &walk);
if (!md->pages)
goto out;
if (md->anon)
seq_printf(m, " anon=%lu", md->anon);
if (md->dirty)
seq_printf(m, " dirty=%lu", md->dirty);
if (md->pages != md->anon && md->pages != md->dirty)
seq_printf(m, " mapped=%lu", md->pages);
if (md->mapcount_max > 1)
seq_printf(m, " mapmax=%lu", md->mapcount_max);
if (md->swapcache)
seq_printf(m, " swapcache=%lu", md->swapcache);
if (md->active < md->pages && !is_vm_hugetlb_page(vma))
seq_printf(m, " active=%lu", md->active);
if (md->writeback)
seq_printf(m, " writeback=%lu", md->writeback);
for_each_node_state(nid, N_MEMORY)
if (md->node[nid])
seq_printf(m, " N%d=%lu", nid, md->node[nid]);
seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
out:
seq_putc(m, '\n');
m_cache_vma(m, vma);
return 0;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
ab676b7d6fbf4b294bf198fb27ade5b0e865c7ce
|
2.3444968110382104e+38
| 94 |
pagemap: do not leak physical addresses to non-privileged userspace
As pointed by recent post[1] on exploiting DRAM physical imperfection,
/proc/PID/pagemap exposes sensitive information which can be used to do
attacks.
This disallows anybody without CAP_SYS_ADMIN to read the pagemap.
[1] http://googleprojectzero.blogspot.com/2015/03/exploiting-dram-rowhammer-bug-to-gain.html
[ Eventually we might want to do anything more finegrained, but for now
this is the simple model. - Linus ]
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Acked-by: Andy Lutomirski <luto@amacapital.net>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Mark Seaborn <mseaborn@chromium.org>
Cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
is_mbc_ambiguous(OnigCaseFoldType flag,
const UChar** pp, const UChar* end)
{
return onigenc_mbn_is_mbc_ambiguous(ONIG_ENCODING_SJIS, flag, pp, end);
}
|
Safe
|
[
"CWE-125"
] |
oniguruma
|
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
|
2.36204912572096e+38
| 6 |
onig-5.9.2
| 0 |
check_called_node_in_look_behind(Node* node, int not)
{
int r;
r = 0;
switch (NODE_TYPE(node)) {
case NODE_LIST:
case NODE_ALT:
do {
r = check_called_node_in_look_behind(NODE_CAR(node), not);
} while (r == 0 && IS_NOT_NULL(node = NODE_CDR(node)));
break;
case NODE_QUANT:
r = check_called_node_in_look_behind(NODE_BODY(node), not);
break;
case NODE_BAG:
{
BagNode* en = BAG_(node);
if (en->type == BAG_MEMORY) {
if (NODE_IS_MARK1(node))
return 0;
else {
NODE_STATUS_ADD(node, MARK1);
r = check_called_node_in_look_behind(NODE_BODY(node), not);
NODE_STATUS_REMOVE(node, MARK1);
}
}
else {
r = check_called_node_in_look_behind(NODE_BODY(node), not);
if (r == 0 && en->type == BAG_IF_ELSE) {
if (IS_NOT_NULL(en->te.Then)) {
r = check_called_node_in_look_behind(en->te.Then, not);
if (r != 0) break;
}
if (IS_NOT_NULL(en->te.Else)) {
r = check_called_node_in_look_behind(en->te.Else, not);
}
}
}
}
break;
case NODE_ANCHOR:
if (IS_NOT_NULL(NODE_BODY(node)))
r = check_called_node_in_look_behind(NODE_BODY(node), not);
break;
case NODE_GIMMICK:
if (NODE_IS_ABSENT_WITH_SIDE_EFFECTS(node) != 0)
return 1;
break;
default:
break;
}
return r;
}
|
Safe
|
[
"CWE-787"
] |
oniguruma
|
cbe9f8bd9cfc6c3c87a60fbae58fa1a85db59df0
|
2.4489894538583636e+38
| 62 |
#207: Out-of-bounds write
| 0 |
static GList *completion_nicks_nonstrict(CHANNEL_REC *channel,
const char *nick,
const char *suffix,
const int match_case)
{
GSList *nicks, *tmp;
GList *list;
char *tnick, *str, *in, *out;
int len, str_len, tmplen;
g_return_val_if_fail(channel != NULL, NULL);
list = NULL;
/* get all nicks from current channel, strip non alnum chars,
compare again and add to completion list on matching */
len = strlen(nick);
nicks = nicklist_getnicks(channel);
str_len = 80; str = g_malloc(str_len+1);
for (tmp = nicks; tmp != NULL; tmp = tmp->next) {
NICK_REC *rec = tmp->data;
tmplen = strlen(rec->nick);
if (tmplen > str_len) {
str_len = tmplen*2;
str = g_realloc(str, str_len+1);
}
/* remove non alnum chars from nick */
in = rec->nick; out = str;
while (*in != '\0') {
if (i_isalnum(*in))
*out++ = *in;
in++;
}
*out = '\0';
/* add to list if 'cleaned' nick matches */
if ((match_case? strncmp(str, nick, len)
: g_ascii_strncasecmp(str, nick, len)) == 0) {
tnick = g_strconcat(rec->nick, suffix, NULL);
if (completion_lowercase)
ascii_strdown(tnick);
if (glist_find_icase_string(list, tnick) == NULL)
list = g_list_append(list, tnick);
else
g_free(tnick);
}
}
g_free(str);
g_slist_free(nicks);
return list;
}
|
Safe
|
[
"CWE-416"
] |
irssi
|
36564717c9f701e3a339da362ab46d220d27e0c1
|
3.2072625915581183e+38
| 57 |
Merge branch 'security' into 'master'
Security
See merge request irssi/irssi!34
(cherry picked from commit b0d9cb33cd9ef9da7c331409e8b7c57a6f3aef3f)
| 0 |
TPM2B_PUBLIC_Unmarshal(TPM2B_PUBLIC *target, BYTE **buffer, INT32 *size, BOOL allowNull)
{
TPM_RC rc = TPM_RC_SUCCESS;
INT32 startSize;
if (rc == TPM_RC_SUCCESS) {
rc = UINT16_Unmarshal(&target->size, buffer, size);
}
if (rc == TPM_RC_SUCCESS) {
if (target->size == 0) {
rc = TPM_RC_SIZE;
}
}
if (rc == TPM_RC_SUCCESS) {
startSize = *size;
}
if (rc == TPM_RC_SUCCESS) {
rc = TPMT_PUBLIC_Unmarshal(&target->publicArea, buffer, size, allowNull);
}
if (rc == TPM_RC_SUCCESS) {
if (target->size != startSize - *size) {
rc = TPM_RC_SIZE;
}
}
return rc;
}
|
Vulnerable
|
[
"CWE-787"
] |
libtpms
|
f16250b35aff6995e540143a9858c9cf0d1f9573
|
5.768582498047901e+37
| 26 |
tpm2: Reset TPM2B buffer sizes after test fails for valid buffer size
Reset the buffer size indicator in a TPM2B type of buffer after it failed
the test for the maximum buffer size it allows. This prevents having bad
buffer sizes in memory that can come to haunt us when writing the volatile
state for example.
Signed-off-by: Stefan Berger <stefanb@linux.ibm.com>
| 1 |
void ContentLine_Analyzer::CheckNUL()
{
// If this is the first byte seen on this connection,
// and if the connection's state is PARTIAL, then we've
// intercepted a keep-alive, and shouldn't complain
// about it. Note that for PARTIAL connections, the
// starting sequence number is adjusted as though there
// had been an initial SYN, so we check for whether
// the connection has at most two bytes so far.
TCP_Analyzer* tcp =
static_cast<TCP_ApplicationAnalyzer*>(Parent())->TCP();
if ( tcp )
{
TCP_Endpoint* endp = IsOrig() ? tcp->Orig() : tcp->Resp();
if ( endp->state == TCP_ENDPOINT_PARTIAL &&
endp->LastSeq() - endp->StartSeq() <= 2 )
; // Ignore it.
else
{
if ( ! suppress_weirds && Conn()->FlagEvent(NUL_IN_LINE) )
Conn()->Weird("NUL_in_line");
flag_NULs = 0;
}
}
}
|
Safe
|
[
"CWE-787"
] |
bro
|
6c0f101a62489b1c5927b4ed63b0e1d37db40282
|
2.800006725492052e+38
| 27 |
Patch OOB write in content-line analyzer.
A combination of packets can trigger an out of bound write of '0' byte
in the content-line analyzer.
This bug was found by Frank Meier.
Addresses BIT-1856.
| 0 |
static int assess_user(
const struct security_assessor *a,
const struct security_info *info,
const void *data,
uint64_t *ret_badness,
char **ret_description) {
_cleanup_free_ char *d = NULL;
uint64_t b;
assert(ret_badness);
assert(ret_description);
if (streq_ptr(info->user, NOBODY_USER_NAME)) {
d = strdup("Service runs under as '" NOBODY_USER_NAME "' user, which should not be used for services");
b = 9;
} else if (info->dynamic_user && !STR_IN_SET(info->user, "0", "root")) {
d = strdup("Service runs under a transient non-root user identity");
b = 0;
} else if (info->user && !STR_IN_SET(info->user, "0", "root", "")) {
d = strdup("Service runs under a static non-root user identity");
b = 0;
} else {
*ret_badness = 10;
*ret_description = NULL;
return 0;
}
if (!d)
return log_oom();
*ret_badness = b;
*ret_description = TAKE_PTR(d);
return 0;
}
|
Safe
|
[
"CWE-269"
] |
systemd
|
9d880b70ba5c6ca83c82952f4c90e86e56c7b70c
|
1.924825105721605e+38
| 36 |
analyze: check for RestrictSUIDSGID= in "systemd-analyze security"
And let's give it a heigh weight, since it pretty much can be used for
bad things only.
| 0 |
nfssvc_encode_statfsres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_statfsres *resp)
{
struct kstatfs *stat = &resp->stats;
*p++ = htonl(NFSSVC_MAXBLKSIZE_V2); /* max transfer size */
*p++ = htonl(stat->f_bsize);
*p++ = htonl(stat->f_blocks);
*p++ = htonl(stat->f_bfree);
*p++ = htonl(stat->f_bavail);
return xdr_ressize_check(rqstp, p);
}
|
Safe
|
[
"CWE-119",
"CWE-703"
] |
linux
|
13bf9fbff0e5e099e2b6f003a0ab8ae145436309
|
1.4453735687794777e+38
| 12 |
nfsd: stricter decoding of write-like NFSv2/v3 ops
The NFSv2/v3 code does not systematically check whether we decode past
the end of the buffer. This generally appears to be harmless, but there
are a few places where we do arithmetic on the pointers involved and
don't account for the possibility that a length could be negative. Add
checks to catch these.
Reported-by: Tuomas Haanpää <thaan@synopsys.com>
Reported-by: Ari Kauppi <ari@synopsys.com>
Reviewed-by: NeilBrown <neilb@suse.com>
Cc: stable@vger.kernel.org
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
| 0 |
virSecuritySELinuxRestoreHostdevLabel(virSecurityManager *mgr,
virDomainDef *def,
virDomainHostdevDef *dev,
const char *vroot)
{
virSecurityLabelDef *secdef;
secdef = virDomainDefGetSecurityLabelDef(def, SECURITY_SELINUX_NAME);
if (!secdef || !secdef->relabel)
return 0;
switch (dev->mode) {
case VIR_DOMAIN_HOSTDEV_MODE_SUBSYS:
return virSecuritySELinuxRestoreHostdevSubsysLabel(mgr, dev, vroot);
case VIR_DOMAIN_HOSTDEV_MODE_CAPABILITIES:
return virSecuritySELinuxRestoreHostdevCapsLabel(mgr, dev, vroot);
default:
return 0;
}
}
|
Safe
|
[
"CWE-732"
] |
libvirt
|
15073504dbb624d3f6c911e85557019d3620fdb2
|
2.8845498125074887e+38
| 23 |
security: fix SELinux label generation logic
A process can access a file if the set of MCS categories
for the file is equal-to *or* a subset-of, the set of
MCS categories for the process.
If there are two VMs:
a) svirt_t:s0:c117
b) svirt_t:s0:c117,c720
Then VM (b) is able to access files labelled for VM (a).
IOW, we must discard case where the categories are equal
because that is a subset of many other valid category pairs.
Fixes: https://gitlab.com/libvirt/libvirt/-/issues/153
CVE-2021-3631
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
| 0 |
empe_mp_signed_parse (EMailParserExtension *extension,
EMailParser *parser,
CamelMimePart *part,
GString *part_id,
GCancellable *cancellable,
GQueue *out_mail_parts)
{
CamelMimePart *cpart = NULL;
CamelMultipart *multipart;
CamelCipherContext *cipher = NULL;
CamelContentType *content_type;
CamelSession *session;
guint32 validity_type;
CamelCipherValidity *valid;
const gchar *protocol = NULL;
GError *local_error = NULL;
gint i, nparts, len;
gboolean secured;
/* If the part is application/pgp-signature sub-part then skip it. */
if (!CAMEL_IS_MULTIPART (part)) {
content_type = camel_mime_part_get_content_type (part);
if (camel_content_type_is (
content_type, "application", "pgp-signature")) {
return TRUE;
}
}
multipart = (CamelMultipart *) camel_medium_get_content ((CamelMedium *) part);
if (CAMEL_IS_MULTIPART_SIGNED (multipart)) {
cpart = camel_multipart_get_part (
multipart, CAMEL_MULTIPART_SIGNED_CONTENT);
}
if (cpart == NULL) {
e_mail_parser_error (
parser, out_mail_parts,
_("Could not parse MIME message. "
"Displaying as source."));
e_mail_parser_parse_part_as (
parser, part, part_id,
"application/vnd.evolution.source",
cancellable, out_mail_parts);
return TRUE;
}
content_type = camel_data_wrapper_get_mime_type_field (
CAMEL_DATA_WRAPPER (multipart));
if (content_type != NULL)
protocol = camel_content_type_param (content_type, "protocol");
session = e_mail_parser_get_session (parser);
/* FIXME: Should be done via a plugin interface */
/* FIXME: duplicated in em-format-html-display.c */
if (protocol != NULL) {
#ifdef ENABLE_SMIME
if (g_ascii_strcasecmp ("application/x-pkcs7-signature", protocol) == 0
|| g_ascii_strcasecmp ("application/pkcs7-signature", protocol) == 0) {
cipher = camel_smime_context_new (session);
validity_type = E_MAIL_PART_VALIDITY_SMIME;
} else {
#endif
if (g_ascii_strcasecmp ("application/pgp-signature", protocol) == 0) {
cipher = camel_gpg_context_new (session);
validity_type = E_MAIL_PART_VALIDITY_PGP;
}
#ifdef ENABLE_SMIME
}
#endif
}
if (cipher == NULL) {
e_mail_parser_error (
parser, out_mail_parts,
_("Unsupported signature format"));
e_mail_parser_parse_part_as (
parser, part, part_id, "multipart/mixed",
cancellable, out_mail_parts);
return TRUE;
}
valid = camel_cipher_context_verify_sync (
cipher, part, cancellable, &local_error);
if (local_error != NULL) {
e_mail_parser_error (
parser, out_mail_parts,
_("Error verifying signature: %s"),
local_error->message);
e_mail_parser_parse_part_as (
parser, part, part_id, "multipart/mixed",
cancellable, out_mail_parts);
g_object_unref (cipher);
g_error_free (local_error);
return TRUE;
}
nparts = camel_multipart_get_number (multipart);
secured = FALSE;
len = part_id->len;
for (i = 0; i < nparts; i++) {
GQueue work_queue = G_QUEUE_INIT;
GList *head, *link;
CamelMimePart *subpart;
subpart = camel_multipart_get_part (multipart, i);
g_string_append_printf (part_id, ".signed.%d", i);
g_warn_if_fail (e_mail_parser_parse_part (
parser, subpart, part_id, cancellable, &work_queue));
g_string_truncate (part_id, len);
if (!secured)
secured = e_mail_part_is_secured (subpart);
head = g_queue_peek_head_link (&work_queue);
for (link = head; link != NULL; link = g_list_next (link)) {
EMailPart *mail_part = link->data;
e_mail_part_update_validity (
mail_part, valid,
validity_type | E_MAIL_PART_VALIDITY_SIGNED);
/* Do not traverse sub-messages */
if (g_str_has_suffix (e_mail_part_get_id (mail_part), ".rfc822"))
link = e_mail_formatter_find_rfc822_end_iter (link);
}
e_queue_transfer (&work_queue, out_mail_parts);
}
/* Add a widget with details about the encryption, but only when
* the encrypted isn't itself secured, in that case it has created
* the button itself. */
if (!secured) {
GQueue work_queue = G_QUEUE_INIT;
EMailPart *mail_part;
g_string_append (part_id, ".signed.button");
e_mail_parser_parse_part_as (
parser, part, part_id,
"application/vnd.evolution.secure-button",
cancellable, &work_queue);
mail_part = g_queue_peek_head (&work_queue);
if (mail_part != NULL)
e_mail_part_update_validity (
mail_part, valid,
validity_type | E_MAIL_PART_VALIDITY_SIGNED);
e_queue_transfer (&work_queue, out_mail_parts);
g_string_truncate (part_id, len);
}
camel_cipher_validity_free (valid);
g_object_unref (cipher);
return TRUE;
}
|
Safe
|
[
"CWE-347"
] |
evolution
|
f66cd3e1db301d264563b4222a3574e2e58e2b85
|
3.254325762202445e+38
| 170 |
eds-I#3 - [GPG] Mails that are not encrypted look encrypted
Related to https://gitlab.gnome.org/GNOME/evolution-data-server/issues/3
| 0 |
int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
{
int err = 0;
hw_atl_utils_hw_chip_features_init(self,
&self->chip_features);
self->fw_ver_actual = hw_atl_utils_get_fw_version(self);
if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, self->fw_ver_actual)) {
*fw_ops = &aq_fw_1x_ops;
} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, self->fw_ver_actual)) {
*fw_ops = &aq_fw_2x_ops;
} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, self->fw_ver_actual)) {
*fw_ops = &aq_fw_2x_ops;
} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_4X, self->fw_ver_actual)) {
*fw_ops = &aq_fw_2x_ops;
} else {
aq_pr_err("Bad FW version detected: %x\n",
self->fw_ver_actual);
return -EOPNOTSUPP;
}
self->aq_fw_ops = *fw_ops;
err = self->aq_fw_ops->init(self);
return err;
}
|
Safe
|
[
"CWE-787"
] |
net
|
b922f622592af76b57cbc566eaeccda0b31a3496
|
4.683474994330679e+37
| 27 |
atlantic: Fix OOB read and write in hw_atl_utils_fw_rpc_wait
This bug report shows up when running our research tools. The
reports is SOOB read, but it seems SOOB write is also possible
a few lines below.
In details, fw.len and sw.len are inputs coming from io. A len
over the size of self->rpc triggers SOOB. The patch fixes the
bugs by adding sanity checks.
The bugs are triggerable with compromised/malfunctioning devices.
They are potentially exploitable given they first leak up to
0xffff bytes and able to overwrite the region later.
The patch is tested with QEMU emulater.
This is NOT tested with a real device.
Attached is the log we found by fuzzing.
BUG: KASAN: slab-out-of-bounds in
hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic]
Read of size 4 at addr ffff888016260b08 by task modprobe/213
CPU: 0 PID: 213 Comm: modprobe Not tainted 5.6.0 #1
Call Trace:
dump_stack+0x76/0xa0
print_address_description.constprop.0+0x16/0x200
? hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic]
? hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic]
__kasan_report.cold+0x37/0x7c
? aq_hw_read_reg_bit+0x60/0x70 [atlantic]
? hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic]
kasan_report+0xe/0x20
hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic]
hw_atl_utils_fw_rpc_call+0x95/0x130 [atlantic]
hw_atl_utils_fw_rpc_wait+0x176/0x210 [atlantic]
hw_atl_utils_mpi_create+0x229/0x2e0 [atlantic]
? hw_atl_utils_fw_rpc_wait+0x210/0x210 [atlantic]
? hw_atl_utils_initfw+0x9f/0x1c8 [atlantic]
hw_atl_utils_initfw+0x12a/0x1c8 [atlantic]
aq_nic_ndev_register+0x88/0x650 [atlantic]
? aq_nic_ndev_init+0x235/0x3c0 [atlantic]
aq_pci_probe+0x731/0x9b0 [atlantic]
? aq_pci_func_init+0xc0/0xc0 [atlantic]
local_pci_probe+0xd3/0x160
pci_device_probe+0x23f/0x3e0
Reported-by: Brendan Dolan-Gavitt <brendandg@nyu.edu>
Signed-off-by: Zekun Shen <bruceshenzk@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
~RequestNote() {
delete errorReport;
}
|
Safe
|
[
"CWE-59"
] |
passenger
|
9dda49f4a3ebe9bafc48da1bd45799f30ce19566
|
4.956508608897744e+37
| 3 |
Fixed a problem with graceful web server restarts.
This problem was introduced in 4.0.6 during the attempt to fix issue #910.
| 0 |
static int advance (char const **str, gboolean utf8)
{
if (utf8) {
gunichar c;
c = g_utf8_get_char(*str);
*str = g_utf8_next_char(*str);
return unichar_isprint(c) ? mk_wcwidth(c) : 1;
} else {
*str += 1;
return 1;
}
}
|
Safe
|
[
"CWE-476"
] |
irssi
|
6c6c42e3d1b49d90aacc0b67f8540471cae02a1d
|
2.8527314324414385e+37
| 15 |
Merge branch 'security' into 'master'
See merge request !7
| 0 |
isdn_ppp_cleanup(void)
{
int i;
for (i = 0; i < ISDN_MAX_CHANNELS; i++)
kfree(ippp_table[i]);
#ifdef CONFIG_ISDN_MPP
kfree(isdn_ppp_bundle_arr);
#endif /* CONFIG_ISDN_MPP */
}
|
Safe
|
[] |
linux
|
4ab42d78e37a294ac7bc56901d563c642e03c4ae
|
1.668502932802223e+38
| 12 |
ppp, slip: Validate VJ compression slot parameters completely
Currently slhc_init() treats out-of-range values of rslots and tslots
as equivalent to 0, except that if tslots is too large it will
dereference a null pointer (CVE-2015-7799).
Add a range-check at the top of the function and make it return an
ERR_PTR() on error instead of NULL. Change the callers accordingly.
Compile-tested only.
Reported-by: 郭永刚 <guoyonggang@360.cn>
References: http://article.gmane.org/gmane.comp.security.oss.general/17908
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct nftables_pernet *nft_net = nft_pernet(info->net);
struct netlink_ext_ack *extack = info->extack;
unsigned int size, i, n, ulen = 0, usize = 0;
u8 genmask = nft_genmask_next(info->net);
struct nft_rule *rule, *old_rule = NULL;
struct nft_expr_info *expr_info = NULL;
u8 family = info->nfmsg->nfgen_family;
struct nft_flow_rule *flow = NULL;
struct net *net = info->net;
struct nft_userdata *udata;
struct nft_table *table;
struct nft_chain *chain;
struct nft_trans *trans;
u64 handle, pos_handle;
struct nft_expr *expr;
struct nft_ctx ctx;
struct nlattr *tmp;
int err, rem;
lockdep_assert_held(&nft_net->commit_mutex);
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask,
NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_TABLE]);
return PTR_ERR(table);
}
if (nla[NFTA_RULE_CHAIN]) {
chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN],
genmask);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
}
if (nft_chain_is_bound(chain))
return -EOPNOTSUPP;
} else if (nla[NFTA_RULE_CHAIN_ID]) {
chain = nft_chain_lookup_byid(net, nla[NFTA_RULE_CHAIN_ID]);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN_ID]);
return PTR_ERR(chain);
}
} else {
return -EINVAL;
}
if (nla[NFTA_RULE_HANDLE]) {
handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
rule = __nft_rule_lookup(chain, handle);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return PTR_ERR(rule);
}
if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return -EEXIST;
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
old_rule = rule;
else
return -EOPNOTSUPP;
} else {
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE) ||
info->nlh->nlmsg_flags & NLM_F_REPLACE)
return -EINVAL;
handle = nf_tables_alloc_handle(table);
if (chain->use == UINT_MAX)
return -EOVERFLOW;
if (nla[NFTA_RULE_POSITION]) {
pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
old_rule = __nft_rule_lookup(chain, pos_handle);
if (IS_ERR(old_rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
return PTR_ERR(old_rule);
}
} else if (nla[NFTA_RULE_POSITION_ID]) {
old_rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_POSITION_ID]);
if (IS_ERR(old_rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION_ID]);
return PTR_ERR(old_rule);
}
}
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
n = 0;
size = 0;
if (nla[NFTA_RULE_EXPRESSIONS]) {
expr_info = kvmalloc_array(NFT_RULE_MAXEXPRS,
sizeof(struct nft_expr_info),
GFP_KERNEL);
if (!expr_info)
return -ENOMEM;
nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) {
err = -EINVAL;
if (nla_type(tmp) != NFTA_LIST_ELEM)
goto err_release_expr;
if (n == NFT_RULE_MAXEXPRS)
goto err_release_expr;
err = nf_tables_expr_parse(&ctx, tmp, &expr_info[n]);
if (err < 0) {
NL_SET_BAD_ATTR(extack, tmp);
goto err_release_expr;
}
size += expr_info[n].ops->size;
n++;
}
}
/* Check for overflow of dlen field */
err = -EFBIG;
if (size >= 1 << 12)
goto err_release_expr;
if (nla[NFTA_RULE_USERDATA]) {
ulen = nla_len(nla[NFTA_RULE_USERDATA]);
if (ulen > 0)
usize = sizeof(struct nft_userdata) + ulen;
}
err = -ENOMEM;
rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL_ACCOUNT);
if (rule == NULL)
goto err_release_expr;
nft_activate_next(net, rule);
rule->handle = handle;
rule->dlen = size;
rule->udata = ulen ? 1 : 0;
if (ulen) {
udata = nft_userdata(rule);
udata->len = ulen - 1;
nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen);
}
expr = nft_expr_first(rule);
for (i = 0; i < n; i++) {
err = nf_tables_newexpr(&ctx, &expr_info[i], expr);
if (err < 0) {
NL_SET_BAD_ATTR(extack, expr_info[i].attr);
goto err_release_rule;
}
if (expr_info[i].ops->validate)
nft_validate_state_update(net, NFT_VALIDATE_NEED);
expr_info[i].ops = NULL;
expr = nft_expr_next(expr);
}
if (chain->flags & NFT_CHAIN_HW_OFFLOAD) {
flow = nft_flow_rule_create(net, rule);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto err_release_rule;
}
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
err = nft_delrule(&ctx, old_rule);
if (err < 0)
goto err_destroy_flow_rule;
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (trans == NULL) {
err = -ENOMEM;
goto err_destroy_flow_rule;
}
list_add_tail_rcu(&rule->list, &old_rule->list);
} else {
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (!trans) {
err = -ENOMEM;
goto err_destroy_flow_rule;
}
if (info->nlh->nlmsg_flags & NLM_F_APPEND) {
if (old_rule)
list_add_rcu(&rule->list, &old_rule->list);
else
list_add_tail_rcu(&rule->list, &chain->rules);
} else {
if (old_rule)
list_add_tail_rcu(&rule->list, &old_rule->list);
else
list_add_rcu(&rule->list, &chain->rules);
}
}
kvfree(expr_info);
chain->use++;
if (flow)
nft_trans_flow_rule(trans) = flow;
if (nft_net->validate_state == NFT_VALIDATE_DO)
return nft_table_validate(net, table);
return 0;
err_destroy_flow_rule:
if (flow)
nft_flow_rule_destroy(flow);
err_release_rule:
nf_tables_rule_release(&ctx, rule);
err_release_expr:
for (i = 0; i < n; i++) {
if (expr_info[i].ops) {
module_put(expr_info[i].ops->type->owner);
if (expr_info[i].ops->type->release_ops)
expr_info[i].ops->type->release_ops(expr_info[i].ops);
}
}
kvfree(expr_info);
return err;
}
|
Safe
|
[] |
net
|
520778042ccca019f3ffa136dd0ca565c486cedd
|
3.8184105950881243e+37
| 227 |
netfilter: nf_tables: disallow non-stateful expression in sets earlier
Since 3e135cd499bf ("netfilter: nft_dynset: dynamic stateful expression
instantiation"), it is possible to attach stateful expressions to set
elements.
cd5125d8f518 ("netfilter: nf_tables: split set destruction in deactivate
and destroy phase") introduces conditional destruction on the object to
accomodate transaction semantics.
nft_expr_init() calls expr->ops->init() first, then check for
NFT_STATEFUL_EXPR, this stills allows to initialize a non-stateful
lookup expressions which points to a set, which might lead to UAF since
the set is not properly detached from the set->binding for this case.
Anyway, this combination is non-sense from nf_tables perspective.
This patch fixes this problem by checking for NFT_STATEFUL_EXPR before
expr->ops->init() is called.
The reporter provides a KASAN splat and a poc reproducer (similar to
those autogenerated by syzbot to report use-after-free errors). It is
unknown to me if they are using syzbot or if they use similar automated
tool to locate the bug that they are reporting.
For the record, this is the KASAN splat.
[ 85.431824] ==================================================================
[ 85.432901] BUG: KASAN: use-after-free in nf_tables_bind_set+0x81b/0xa20
[ 85.433825] Write of size 8 at addr ffff8880286f0e98 by task poc/776
[ 85.434756]
[ 85.434999] CPU: 1 PID: 776 Comm: poc Tainted: G W 5.18.0+ #2
[ 85.436023] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
Fixes: 0b2d8a7b638b ("netfilter: nf_tables: add helper functions for expression handling")
Reported-and-tested-by: Aaron Adams <edg-e@nccgroup.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
| 0 |
GF_Err wide_box_dump(GF_Box *a, FILE * trace)
{
gf_isom_box_dump_start(a, "WideBox", trace);
gf_fprintf(trace, ">\n");
gf_isom_box_dump_done("WideBox", a, trace);
return GF_OK;
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
|
7.46562702005175e+36
| 7 |
fixed #2138
| 0 |
void BCast(const CPUDevice& dev,
typename TTypes<typename Functor::out_type, NDIMS>::Tensor out,
typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in0,
typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast0,
typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in1,
typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast1,
bool* error) {
typename Functor::func func(error);
auto lhs = in0.broadcast(bcast0);
auto rhs = in1.broadcast(bcast1);
Assign(dev, out, lhs.binaryExpr(rhs, func));
}
|
Safe
|
[
"CWE-476",
"CWE-787"
] |
tensorflow
|
93f428fd1768df147171ed674fee1fc5ab8309ec
|
1.3513654895253732e+38
| 12 |
Fix nullptr deref and heap OOB access in binary cwise ops.
PiperOrigin-RevId: 387936777
Change-Id: I608b8074cec36a982cca622b7144cb2c43e6e19f
| 0 |
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* shape,
TfLiteTensor* output) {
const int shape_rank = SizeOfDimension(shape, 0);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(shape_rank);
const auto* shape_data = GetTensorData<IndicesT>(shape);
for (int i = 0; i < shape_rank; i++) {
output_shape->data[i] = shape_data[i];
}
return context->ResizeTensor(context, output, output_shape);
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
|
7.1008589468334345e+37
| 12 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
| 0 |
ScanLineInputFile::rawPixelData (int firstScanLine,
const char *&pixelData,
int &pixelDataSize)
{
try
{
Lock lock (*_streamData);
if (firstScanLine < _data->minY || firstScanLine > _data->maxY)
{
throw IEX_NAMESPACE::ArgExc ("Tried to read scan line outside "
"the image file's data window.");
}
int minY = lineBufferMinY
(firstScanLine, _data->minY, _data->linesInBuffer);
readPixelData
(_streamData, _data, minY, _data->lineBuffers[0]->buffer, pixelDataSize);
pixelData = _data->lineBuffers[0]->buffer;
}
catch (IEX_NAMESPACE::BaseExc &e)
{
REPLACE_EXC (e, "Error reading pixel data from image "
"file \"" << fileName() << "\". " << e.what());
throw;
}
}
|
Safe
|
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
|
7.909099218488576e+37
| 29 |
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <peterh@wetafx.co.nz>
| 0 |
const char *gf_hevc_get_profile_name(u8 video_prof)
{
switch (video_prof) {
case 0x01:
return "Main";
case 0x02:
return "Main 10";
case 0x03:
return "Main Still Picture";
default:
return "Unknown";
}
}
|
Safe
|
[
"CWE-190",
"CWE-787"
] |
gpac
|
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
|
2.0683756462261719e+37
| 13 |
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
| 0 |
finish_display (const char *id,
GdmDisplay *display,
GdmManager *manager)
{
if (gdm_display_get_status (display) == GDM_DISPLAY_MANAGED)
gdm_display_unmanage (display);
gdm_display_finish (display);
}
|
Safe
|
[] |
gdm
|
ff98b2817014684ae1acec78ff06f0f461a56a9f
|
1.8853415644161915e+38
| 8 |
manager: if falling back to X11 retry autologin
Right now, we get one shot to autologin. If it fails, we fall back to
the greeter. We should give it another go if the reason for the failure
was wayland fallback to X.
https://bugzilla.gnome.org/show_bug.cgi?id=780520
| 0 |
bool __fastcall TCommandSet::GetChangesDirectory(TFSCommand Cmd)
{
CHECK_CMD;
return CommandSet[Cmd].ChangesDirectory;
}
|
Safe
|
[
"CWE-20"
] |
winscp
|
49d876f2c5fc00bcedaa986a7cf6dedd6bf16f54
|
3.109994060172e+38
| 5 |
Bug 1675: Prevent SCP server sending files that were not requested
https://winscp.net/tracker/1675
Source commit: 4aa587620973bf793fb6e783052277c0f7be4b55
| 0 |
cmsStage* CMSEXPORT cmsPipelineGetPtrToLastStage(const cmsPipeline* lut)
{
cmsStage *mpe, *Anterior = NULL;
for (mpe = lut ->Elements; mpe != NULL; mpe = mpe ->Next)
Anterior = mpe;
return Anterior;
}
|
Safe
|
[] |
Little-CMS
|
b0d5ffd4ad91cf8683ee106f13742db3dc66599a
|
2.7534835022865908e+38
| 9 |
Memory Squeezing: LCMS2: CLUTElemDup
Check for allocation failures and tidy up if found.
| 0 |
void mutt_merge_envelopes(ENVELOPE* base, ENVELOPE** extra)
{
/* copies each existing element if necessary, and sets the element
* to NULL in the source so that mutt_free_envelope doesn't leave us
* with dangling pointers. */
#define MOVE_ELEM(h) if (!base->h) { base->h = (*extra)->h; (*extra)->h = NULL; }
MOVE_ELEM(return_path);
MOVE_ELEM(from);
MOVE_ELEM(to);
MOVE_ELEM(cc);
MOVE_ELEM(bcc);
MOVE_ELEM(sender);
MOVE_ELEM(reply_to);
MOVE_ELEM(mail_followup_to);
MOVE_ELEM(list_post);
MOVE_ELEM(message_id);
MOVE_ELEM(supersedes);
MOVE_ELEM(date);
MOVE_ELEM(x_label);
if (!base->refs_changed)
{
MOVE_ELEM(references);
}
if (!base->irt_changed)
{
MOVE_ELEM(in_reply_to);
}
/* real_subj is subordinate to subject */
if (!base->subject)
{
base->subject = (*extra)->subject;
base->real_subj = (*extra)->real_subj;
(*extra)->subject = NULL;
(*extra)->real_subj = NULL;
}
/* spam and user headers should never be hashed, and the new envelope may
* have better values. Use new versions regardless. */
mutt_buffer_free (&base->spam);
mutt_free_list (&base->userhdrs);
MOVE_ELEM(spam);
MOVE_ELEM(userhdrs);
#undef MOVE_ELEM
mutt_free_envelope(extra);
}
|
Safe
|
[
"CWE-668"
] |
mutt
|
6d0624411a979e2e1d76af4dd97d03f47679ea4a
|
1.2970180220669599e+38
| 46 |
use a 64-bit random value in temporary filenames.
closes #3158
| 0 |
ncp_get_volume_root(struct ncp_server *server,
const char *volname, __u32* volume, __le32* dirent, __le32* dosdirent)
{
int result;
ncp_dbg(1, "looking up vol %s\n", volname);
ncp_init_request(server);
ncp_add_byte(server, 22); /* Subfunction: Generate dir handle */
ncp_add_byte(server, 0); /* DOS namespace */
ncp_add_byte(server, 0); /* reserved */
ncp_add_byte(server, 0); /* reserved */
ncp_add_byte(server, 0); /* reserved */
ncp_add_byte(server, 0); /* faked volume number */
ncp_add_dword(server, 0); /* faked dir_base */
ncp_add_byte(server, 0xff); /* Don't have a dir_base */
ncp_add_byte(server, 1); /* 1 path component */
ncp_add_pstring(server, volname);
if ((result = ncp_request(server, 87)) != 0) {
ncp_unlock_server(server);
return result;
}
*dirent = *dosdirent = ncp_reply_dword(server, 4);
*volume = ncp_reply_byte(server, 8);
ncp_unlock_server(server);
return 0;
}
|
Safe
|
[
"CWE-119"
] |
staging
|
4c41aa24baa4ed338241d05494f2c595c885af8f
|
2.2447561427388495e+38
| 29 |
staging: ncpfs: memory corruption in ncp_read_kernel()
If the server is malicious then *bytes_read could be larger than the
size of the "target" buffer. It would lead to memory corruption when we
do the memcpy().
Reported-by: Dr Silvio Cesare of InfoSect <Silvio Cesare <silvio.cesare@gmail.com>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
get_validity_string (PKT_public_key *pk, PKT_user_id *uid)
{
int trustlevel;
if (!pk)
return "err"; /* Just in case a NULL PK is passed. */
trustlevel = get_validity (pk, uid);
if ((trustlevel & TRUST_FLAG_REVOKED))
return _("revoked");
return trust_value_to_string (trustlevel);
}
|
Safe
|
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
|
7.732419208640811e+37
| 12 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <wk@gnupg.org>
| 0 |
void Display::slotAuthInfo(const QString &message, Auth::Info info) {
// TODO: presentable to the user, eventually
Q_UNUSED(info);
qWarning() << "Authentication information:" << message;
}
|
Safe
|
[
"CWE-613",
"CWE-287",
"CWE-284"
] |
sddm
|
147cec383892d143b5e02daa70f1e7def50f5d98
|
1.3662604957638194e+38
| 5 |
Fix authentication when reusing an existing session
- Check the success value before unlocking the session
- Don't attempt to use the nonexistant "sddm-check" PAM service
| 0 |
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
{
struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
char *orig_data = kstrdup(data, GFP_KERNEL);
struct buffer_head *bh;
struct ext4_super_block *es = NULL;
struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
unsigned long offset = 0;
unsigned long journal_devnum = 0;
unsigned long def_mount_opts;
struct inode *root;
const char *descr;
int ret = -ENOMEM;
int blocksize, clustersize;
unsigned int db_count;
unsigned int i;
int needs_recovery, has_huge_files, has_bigalloc;
__u64 blocks_count;
int err = 0;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
ext4_group_t first_not_zeroed;
if ((data && !orig_data) || !sbi)
goto out_free_base;
sbi->s_daxdev = dax_dev;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
if (!sbi->s_blockgroup_lock)
goto out_free_base;
sb->s_fs_info = sbi;
sbi->s_sb = sb;
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
sbi->s_sb_block = sb_block;
if (sb->s_bdev->bd_part)
sbi->s_sectors_written_start =
part_stat_read(sb->s_bdev->bd_part, sectors[1]);
/* Cleanup superblock name */
strreplace(sb->s_id, '/', '!');
/* -EINVAL is default */
ret = -EINVAL;
blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
if (!blocksize) {
ext4_msg(sb, KERN_ERR, "unable to set blocksize");
goto out_fail;
}
/*
* The ext4 superblock will not be buffer aligned for other than 1kB
* block sizes. We need to calculate the offset from buffer start.
*/
if (blocksize != EXT4_MIN_BLOCK_SIZE) {
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
offset = do_div(logical_sb_block, blocksize);
} else {
logical_sb_block = sb_block;
}
if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
ext4_msg(sb, KERN_ERR, "unable to read superblock");
goto out_fail;
}
/*
* Note: s_es must be initialized as soon as possible because
* some ext4 macro-instructions depend on its value
*/
es = (struct ext4_super_block *) (bh->b_data + offset);
sbi->s_es = es;
sb->s_magic = le16_to_cpu(es->s_magic);
if (sb->s_magic != EXT4_SUPER_MAGIC)
goto cantfind_ext4;
sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
/* Warn if metadata_csum and gdt_csum are both set. */
if (ext4_has_feature_metadata_csum(sb) &&
ext4_has_feature_gdt_csum(sb))
ext4_warning(sb, "metadata_csum and uninit_bg are "
"redundant flags; please run fsck.");
/* Check for a known checksum algorithm */
if (!ext4_verify_csum_type(sb, es)) {
ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
"unknown checksum algorithm.");
silent = 1;
goto cantfind_ext4;
}
/* Load the checksum driver */
if (ext4_has_feature_metadata_csum(sb) ||
ext4_has_feature_ea_inode(sb)) {
sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
if (IS_ERR(sbi->s_chksum_driver)) {
ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
ret = PTR_ERR(sbi->s_chksum_driver);
sbi->s_chksum_driver = NULL;
goto failed_mount;
}
}
/* Check superblock checksum */
if (!ext4_superblock_csum_verify(sb, es)) {
ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
"invalid superblock checksum. Run e2fsck?");
silent = 1;
ret = -EFSBADCRC;
goto cantfind_ext4;
}
/* Precompute checksum seed for all metadata */
if (ext4_has_feature_csum_seed(sb))
sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
sizeof(es->s_uuid));
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
set_opt(sb, INIT_INODE_TABLE);
if (def_mount_opts & EXT4_DEFM_DEBUG)
set_opt(sb, DEBUG);
if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
set_opt(sb, GRPID);
if (def_mount_opts & EXT4_DEFM_UID16)
set_opt(sb, NO_UID32);
/* xattr user namespace & acls are now defaulted on */
set_opt(sb, XATTR_USER);
#ifdef CONFIG_EXT4_FS_POSIX_ACL
set_opt(sb, POSIX_ACL);
#endif
/* don't forget to enable journal_csum when metadata_csum is enabled. */
if (ext4_has_metadata_csum(sb))
set_opt(sb, JOURNAL_CHECKSUM);
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
set_opt(sb, JOURNAL_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
set_opt(sb, ORDERED_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
set_opt(sb, WRITEBACK_DATA);
if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
set_opt(sb, ERRORS_PANIC);
else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
set_opt(sb, ERRORS_CONT);
else
set_opt(sb, ERRORS_RO);
/* block_validity enabled by default; disable with noblock_validity */
set_opt(sb, BLOCK_VALIDITY);
if (def_mount_opts & EXT4_DEFM_DISCARD)
set_opt(sb, DISCARD);
sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
set_opt(sb, BARRIER);
/*
* enable delayed allocation by default
* Use -o nodelalloc to turn it off
*/
if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
set_opt(sb, DELALLOC);
/*
* set default s_li_wait_mult for lazyinit, for the case there is
* no mount option specified.
*/
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
if (sbi->s_es->s_mount_opts[0]) {
char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
sizeof(sbi->s_es->s_mount_opts),
GFP_KERNEL);
if (!s_mount_opts)
goto failed_mount;
if (!parse_options(s_mount_opts, sb, &journal_devnum,
&journal_ioprio, 0)) {
ext4_msg(sb, KERN_WARNING,
"failed to parse options in superblock: %s",
s_mount_opts);
}
kfree(s_mount_opts);
}
sbi->s_def_mount_opt = sbi->s_mount_opt;
if (!parse_options((char *) data, sb, &journal_devnum,
&journal_ioprio, 0))
goto failed_mount;
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
"with data=journal disables delayed "
"allocation and O_DIRECT support!\n");
if (test_opt2(sb, EXPLICIT_DELALLOC)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"both data=journal and delalloc");
goto failed_mount;
}
if (test_opt(sb, DIOREAD_NOLOCK)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"both data=journal and dioread_nolock");
goto failed_mount;
}
if (test_opt(sb, DAX)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"both data=journal and dax");
goto failed_mount;
}
if (ext4_has_feature_encrypt(sb)) {
ext4_msg(sb, KERN_WARNING,
"encrypted files will use data=ordered "
"instead of data journaling mode");
}
if (test_opt(sb, DELALLOC))
clear_opt(sb, DELALLOC);
} else {
sb->s_iflags |= SB_I_CGROUPWB;
}
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
(ext4_has_compat_features(sb) ||
ext4_has_ro_compat_features(sb) ||
ext4_has_incompat_features(sb)))
ext4_msg(sb, KERN_WARNING,
"feature flags set on rev 0 fs, "
"running e2fsck is recommended");
if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
set_opt2(sb, HURD_COMPAT);
if (ext4_has_feature_64bit(sb)) {
ext4_msg(sb, KERN_ERR,
"The Hurd can't support 64-bit file systems");
goto failed_mount;
}
/*
* ea_inode feature uses l_i_version field which is not
* available in HURD_COMPAT mode.
*/
if (ext4_has_feature_ea_inode(sb)) {
ext4_msg(sb, KERN_ERR,
"ea_inode feature is not supported for Hurd");
goto failed_mount;
}
}
if (IS_EXT2_SB(sb)) {
if (ext2_feature_set_ok(sb))
ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
"using the ext4 subsystem");
else {
/*
* If we're probing be silent, if this looks like
* it's actually an ext[34] filesystem.
*/
if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
goto failed_mount;
ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
"to feature incompatibilities");
goto failed_mount;
}
}
if (IS_EXT3_SB(sb)) {
if (ext3_feature_set_ok(sb))
ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
"using the ext4 subsystem");
else {
/*
* If we're probing be silent, if this looks like
* it's actually an ext4 filesystem.
*/
if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
goto failed_mount;
ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
"to feature incompatibilities");
goto failed_mount;
}
}
/*
* Check feature flags regardless of the revision level, since we
* previously didn't change the revision level when setting the flags,
* so there is a chance incompat flags are set on a rev 0 filesystem.
*/
if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
goto failed_mount;
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
"Unsupported filesystem blocksize %d (%d log_block_size)",
blocksize, le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
if (le32_to_cpu(es->s_log_block_size) >
(EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
ext4_msg(sb, KERN_ERR,
"Invalid log block size: %u",
le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
ext4_msg(sb, KERN_ERR,
"Number of reserved GDT blocks insanely large: %d",
le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
goto failed_mount;
}
if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
if (ext4_has_feature_inline_data(sb)) {
ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
" that may contain inline data");
sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
}
err = bdev_dax_supported(sb, blocksize);
if (err) {
ext4_msg(sb, KERN_ERR,
"DAX unsupported by block device. Turning off DAX.");
sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
}
}
if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
es->s_encryption_level);
goto failed_mount;
}
if (sb->s_blocksize != blocksize) {
/* Validate the filesystem blocksize */
if (!sb_set_blocksize(sb, blocksize)) {
ext4_msg(sb, KERN_ERR, "bad block size %d",
blocksize);
goto failed_mount;
}
brelse(bh);
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
offset = do_div(logical_sb_block, blocksize);
bh = sb_bread_unmovable(sb, logical_sb_block);
if (!bh) {
ext4_msg(sb, KERN_ERR,
"Can't read superblock on 2nd try");
goto failed_mount;
}
es = (struct ext4_super_block *)(bh->b_data + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
ext4_msg(sb, KERN_ERR,
"Magic mismatch, very weird!");
goto failed_mount;
}
}
has_huge_files = ext4_has_feature_huge_file(sb);
sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
has_huge_files);
sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
} else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
(!is_power_of_2(sbi->s_inode_size)) ||
(sbi->s_inode_size > blocksize)) {
ext4_msg(sb, KERN_ERR,
"unsupported inode size: %d",
sbi->s_inode_size);
goto failed_mount;
}
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
}
sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
if (ext4_has_feature_64bit(sb)) {
if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
!is_power_of_2(sbi->s_desc_size)) {
ext4_msg(sb, KERN_ERR,
"unsupported descriptor size %lu",
sbi->s_desc_size);
goto failed_mount;
}
} else
sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0)
goto cantfind_ext4;
if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
sbi->s_inodes_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
sbi->s_blocks_per_group);
goto failed_mount;
}
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
sbi->s_sbh = bh;
sbi->s_mount_state = le16_to_cpu(es->s_state);
sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
for (i = 0; i < 4; i++)
sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
sbi->s_def_hash_version = es->s_def_hash_version;
if (ext4_has_feature_dir_index(sb)) {
i = le32_to_cpu(es->s_flags);
if (i & EXT2_FLAGS_UNSIGNED_HASH)
sbi->s_hash_unsigned = 3;
else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
#ifdef __CHAR_UNSIGNED__
if (!sb_rdonly(sb))
es->s_flags |=
cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
sbi->s_hash_unsigned = 3;
#else
if (!sb_rdonly(sb))
es->s_flags |=
cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
#endif
}
}
/* Handle clustersize */
clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
has_bigalloc = ext4_has_feature_bigalloc(sb);
if (has_bigalloc) {
if (clustersize < blocksize) {
ext4_msg(sb, KERN_ERR,
"cluster size (%d) smaller than "
"block size (%d)", clustersize, blocksize);
goto failed_mount;
}
if (le32_to_cpu(es->s_log_cluster_size) >
(EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
ext4_msg(sb, KERN_ERR,
"Invalid log cluster size: %u",
le32_to_cpu(es->s_log_cluster_size));
goto failed_mount;
}
sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
le32_to_cpu(es->s_log_block_size);
sbi->s_clusters_per_group =
le32_to_cpu(es->s_clusters_per_group);
if (sbi->s_clusters_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR,
"#clusters per group too big: %lu",
sbi->s_clusters_per_group);
goto failed_mount;
}
if (sbi->s_blocks_per_group !=
(sbi->s_clusters_per_group * (clustersize / blocksize))) {
ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
"clusters per group (%lu) inconsistent",
sbi->s_blocks_per_group,
sbi->s_clusters_per_group);
goto failed_mount;
}
} else {
if (clustersize != blocksize) {
ext4_warning(sb, "fragment/cluster size (%d) != "
"block size (%d)", clustersize,
blocksize);
clustersize = blocksize;
}
if (sbi->s_blocks_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR,
"#blocks per group too big: %lu",
sbi->s_blocks_per_group);
goto failed_mount;
}
sbi->s_clusters_per_group = sbi->s_blocks_per_group;
sbi->s_cluster_bits = 0;
}
sbi->s_cluster_ratio = clustersize / blocksize;
/* Do we have standard group size of clustersize * 8 blocks ? */
if (sbi->s_blocks_per_group == clustersize << 3)
set_opt2(sb, STD_GROUP_SIZE);
/*
* Test whether we have more sectors than will fit in sector_t,
* and whether the max offset is addressable by the page cache.
*/
err = generic_check_addressable(sb->s_blocksize_bits,
ext4_blocks_count(es));
if (err) {
ext4_msg(sb, KERN_ERR, "filesystem"
" too large to mount safely on this system");
if (sizeof(sector_t) < 8)
ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
goto failed_mount;
}
if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext4;
/* check blocks count against device size */
blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
if (blocks_count && ext4_blocks_count(es) > blocks_count) {
ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
"exceeds size of device (%llu blocks)",
ext4_blocks_count(es), blocks_count);
goto failed_mount;
}
/*
* It makes no sense for the first data block to be beyond the end
* of the filesystem.
*/
if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
"block %u is beyond end of filesystem (%llu)",
le32_to_cpu(es->s_first_data_block),
ext4_blocks_count(es));
goto failed_mount;
}
blocks_count = (ext4_blocks_count(es) -
le32_to_cpu(es->s_first_data_block) +
EXT4_BLOCKS_PER_GROUP(sb) - 1);
do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
"(block count %llu, first data block %u, "
"blocks per group %lu)", sbi->s_groups_count,
ext4_blocks_count(es),
le32_to_cpu(es->s_first_data_block),
EXT4_BLOCKS_PER_GROUP(sb));
goto failed_mount;
}
sbi->s_groups_count = blocks_count;
sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
if (ext4_has_feature_meta_bg(sb)) {
if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
ext4_msg(sb, KERN_WARNING,
"first meta block group too large: %u "
"(group descriptor block count %u)",
le32_to_cpu(es->s_first_meta_bg), db_count);
goto failed_mount;
}
}
sbi->s_group_desc = kvmalloc(db_count *
sizeof(struct buffer_head *),
GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
ext4_msg(sb, KERN_ERR, "not enough memory");
ret = -ENOMEM;
goto failed_mount;
}
bgl_lock_init(sbi->s_blockgroup_lock);
/* Pre-read the descriptors into the buffer cache */
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logical_sb_block, i);
sb_breadahead(sb, block);
}
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logical_sb_block, i);
sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
if (!sbi->s_group_desc[i]) {
ext4_msg(sb, KERN_ERR,
"can't read group descriptor %d", i);
db_count = i;
goto failed_mount2;
}
}
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
ret = -EFSCORRUPTED;
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
/* Register extent status tree shrinker */
if (ext4_es_register_shrinker(sbi))
goto failed_mount3;
sbi->s_stripe = ext4_get_stripe_size(sbi);
sbi->s_extent_max_zeroout_kb = 32;
/*
* set up enough so that it can read an inode
*/
sb->s_op = &ext4_sops;
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
sb->s_cop = &ext4_cryptops;
#endif
#ifdef CONFIG_QUOTA
sb->dq_op = &ext4_quota_operations;
if (ext4_has_feature_quota(sb))
sb->s_qcop = &dquot_quotactl_sysfile_ops;
else
sb->s_qcop = &ext4_qctl_operations;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
#endif
memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);
sb->s_root = NULL;
needs_recovery = (es->s_last_orphan != 0 ||
ext4_has_feature_journal_needs_recovery(sb));
if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
goto failed_mount3a;
/*
* The first inode we look at is the journal inode. Don't try
* root first: it may be modified in the journal!
*/
if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
err = ext4_load_journal(sb, es, journal_devnum);
if (err)
goto failed_mount3a;
} else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
ext4_has_feature_journal_needs_recovery(sb)) {
ext4_msg(sb, KERN_ERR, "required journal recovery "
"suppressed and not mounted read-only");
goto failed_mount_wq;
} else {
/* Nojournal mode, all journal mount options are illegal */
if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"journal_checksum, fs mounted w/o journal");
goto failed_mount_wq;
}
if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"journal_async_commit, fs mounted w/o journal");
goto failed_mount_wq;
}
if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"commit=%lu, fs mounted w/o journal",
sbi->s_commit_interval / HZ);
goto failed_mount_wq;
}
if (EXT4_MOUNT_DATA_FLAGS &
(sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"data=, fs mounted w/o journal");
goto failed_mount_wq;
}
sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
clear_opt(sb, JOURNAL_CHECKSUM);
clear_opt(sb, DATA_FLAGS);
sbi->s_journal = NULL;
needs_recovery = 0;
goto no_journal;
}
if (ext4_has_feature_64bit(sb) &&
!jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_64BIT)) {
ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
goto failed_mount_wq;
}
if (!set_journal_csum_feature_set(sb)) {
ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
"feature set");
goto failed_mount_wq;
}
/* We have now updated the journal if required, so we can
* validate the data journaling mode. */
switch (test_opt(sb, DATA_FLAGS)) {
case 0:
/* No mode set, assume a default based on the journal
* capabilities: ORDERED_DATA if the journal can
* cope, else JOURNAL_DATA
*/
if (jbd2_journal_check_available_features
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
set_opt(sb, ORDERED_DATA);
else
set_opt(sb, JOURNAL_DATA);
break;
case EXT4_MOUNT_ORDERED_DATA:
case EXT4_MOUNT_WRITEBACK_DATA:
if (!jbd2_journal_check_available_features
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
ext4_msg(sb, KERN_ERR, "Journal does not support "
"requested data journaling mode");
goto failed_mount_wq;
}
default:
break;
}
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"journal_async_commit in data=ordered mode");
goto failed_mount_wq;
}
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
no_journal:
if (!test_opt(sb, NO_MBCACHE)) {
sbi->s_ea_block_cache = ext4_xattr_create_cache();
if (!sbi->s_ea_block_cache) {
ext4_msg(sb, KERN_ERR,
"Failed to create ea_block_cache");
goto failed_mount_wq;
}
if (ext4_has_feature_ea_inode(sb)) {
sbi->s_ea_inode_cache = ext4_xattr_create_cache();
if (!sbi->s_ea_inode_cache) {
ext4_msg(sb, KERN_ERR,
"Failed to create ea_inode_cache");
goto failed_mount_wq;
}
}
}
if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
(blocksize != PAGE_SIZE)) {
ext4_msg(sb, KERN_ERR,
"Unsupported blocksize for fs encryption");
goto failed_mount_wq;
}
if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
!ext4_has_feature_encrypt(sb)) {
ext4_set_feature_encrypt(sb);
ext4_commit_super(sb, 1);
}
/*
* Get the # of file system overhead blocks from the
* superblock if present.
*/
if (es->s_overhead_clusters)
sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
else {
err = ext4_calculate_overhead(sb);
if (err)
goto failed_mount_wq;
}
/*
* The maximum number of concurrent works can be high and
* concurrency isn't really necessary. Limit it to 1.
*/
EXT4_SB(sb)->rsv_conversion_wq =
alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!EXT4_SB(sb)->rsv_conversion_wq) {
printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
ret = -ENOMEM;
goto failed_mount4;
}
/*
* The jbd2_journal_load will have done any necessary log recovery,
* so we can safely mount the rest of the filesystem now.
*/
root = ext4_iget(sb, EXT4_ROOT_INO);
if (IS_ERR(root)) {
ext4_msg(sb, KERN_ERR, "get root inode failed");
ret = PTR_ERR(root);
root = NULL;
goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
iput(root);
goto failed_mount4;
}
sb->s_root = d_make_root(root);
if (!sb->s_root) {
ext4_msg(sb, KERN_ERR, "get root dentry failed");
ret = -ENOMEM;
goto failed_mount4;
}
if (ext4_setup_super(sb, es, sb_rdonly(sb)))
sb->s_flags |= SB_RDONLY;
/* determine the minimum size of new large inodes, if present */
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
sbi->s_want_extra_isize == 0) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
if (ext4_has_feature_extra_isize(sb)) {
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_want_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_want_extra_isize);
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_min_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_min_extra_isize);
}
}
/* Check if enough inode space is available */
if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
sbi->s_inode_size) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
ext4_msg(sb, KERN_INFO, "required extra inode space not"
"available");
}
ext4_set_resv_clusters(sb);
err = ext4_setup_system_zone(sb);
if (err) {
ext4_msg(sb, KERN_ERR, "failed to initialize system "
"zone (%d)", err);
goto failed_mount4a;
}
ext4_ext_init(sb);
err = ext4_mb_init(sb);
if (err) {
ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
err);
goto failed_mount5;
}
block = ext4_count_free_clusters(sb);
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, block));
err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
GFP_KERNEL);
if (!err) {
unsigned long freei = ext4_count_free_inodes(sb);
sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
GFP_KERNEL);
}
if (!err)
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb), GFP_KERNEL);
if (!err)
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
GFP_KERNEL);
if (!err)
err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount6;
}
if (ext4_has_feature_flex_bg(sb))
if (!ext4_fill_flex_info(sb)) {
ext4_msg(sb, KERN_ERR,
"unable to initialize "
"flex_bg meta info!");
goto failed_mount6;
}
err = ext4_register_li_request(sb, first_not_zeroed);
if (err)
goto failed_mount6;
err = ext4_register_sysfs(sb);
if (err)
goto failed_mount7;
#ifdef CONFIG_QUOTA
/* Enable quota usage during mount. */
if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
err = ext4_enable_quotas(sb);
if (err)
goto failed_mount8;
}
#endif /* CONFIG_QUOTA */
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
if (needs_recovery) {
ext4_msg(sb, KERN_INFO, "recovery complete");
ext4_mark_recovery_complete(sb, es);
}
if (EXT4_SB(sb)->s_journal) {
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
descr = " journalled data mode";
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
descr = " ordered data mode";
else
descr = " writeback data mode";
} else
descr = "out journal";
if (test_opt(sb, DISCARD)) {
struct request_queue *q = bdev_get_queue(sb->s_bdev);
if (!blk_queue_discard(q))
ext4_msg(sb, KERN_WARNING,
"mounting with \"discard\" option, but "
"the device does not support discard");
}
if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
"Opts: %.*s%s%s", descr,
(int) sizeof(sbi->s_es->s_mount_opts),
sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
if (es->s_error_count)
mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
kfree(orig_data);
return 0;
cantfind_ext4:
if (!silent)
ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
goto failed_mount;
#ifdef CONFIG_QUOTA
failed_mount8:
ext4_unregister_sysfs(sb);
#endif
failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
ext4_mb_release(sb);
if (sbi->s_flex_groups)
kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
failed_mount5:
ext4_ext_release(sb);
ext4_release_system_zone(sb);
failed_mount4a:
dput(sb->s_root);
sb->s_root = NULL;
failed_mount4:
ext4_msg(sb, KERN_ERR, "mount failed");
if (EXT4_SB(sb)->rsv_conversion_wq)
destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
failed_mount_wq:
if (sbi->s_ea_inode_cache) {
ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
sbi->s_ea_inode_cache = NULL;
}
if (sbi->s_ea_block_cache) {
ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
sbi->s_ea_block_cache = NULL;
}
if (sbi->s_journal) {
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
}
failed_mount3a:
ext4_es_unregister_shrinker(sbi);
failed_mount3:
del_timer_sync(&sbi->s_err_report);
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
kvfree(sbi->s_group_desc);
failed_mount:
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
#ifdef CONFIG_QUOTA
for (i = 0; i < EXT4_MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
#endif
ext4_blkdev_remove(sbi);
brelse(bh);
out_fail:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
out_free_base:
kfree(sbi);
kfree(orig_data);
fs_put_dax(dax_dev);
return err ? err : ret;
}
|
Vulnerable
|
[] |
linux
|
a45403b51582a87872927a3e0fc0a389c26867f1
|
6.9659727464699835e+37
| 1,027 |
ext4: always initialize the crc32c checksum driver
The extended attribute code now uses the crc32c checksum for hashing
purposes, so we should just always always initialize it. We also want
to prevent NULL pointer dereferences if one of the metadata checksum
features is enabled after the file sytsem is originally mounted.
This issue has been assigned CVE-2018-1094.
https://bugzilla.kernel.org/show_bug.cgi?id=199183
https://bugzilla.redhat.com/show_bug.cgi?id=1560788
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: stable@vger.kernel.org
| 1 |
int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
struct in6_addr *daddr, *final_p = NULL, final;
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct flowi fl;
struct dst_entry *dst;
int addr_len = msg->msg_namelen;
int ulen = len;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int err;
int connected = 0;
int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
/* destination address check */
if (sin6) {
if (addr_len < offsetof(struct sockaddr, sa_data))
return -EINVAL;
switch (sin6->sin6_family) {
case AF_INET6:
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
daddr = &sin6->sin6_addr;
break;
case AF_INET:
goto do_udp_sendmsg;
case AF_UNSPEC:
msg->msg_name = sin6 = NULL;
msg->msg_namelen = addr_len = 0;
daddr = NULL;
break;
default:
return -EINVAL;
}
} else if (!up->pending) {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &np->daddr;
} else
daddr = NULL;
if (daddr) {
if (ipv6_addr_v4mapped(daddr)) {
struct sockaddr_in sin;
sin.sin_family = AF_INET;
sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
sin.sin_addr.s_addr = daddr->s6_addr32[3];
msg->msg_name = &sin;
msg->msg_namelen = sizeof(sin);
do_udp_sendmsg:
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
return udp_sendmsg(iocb, sk, msg, len);
}
}
if (up->pending == AF_INET)
return udp_sendmsg(iocb, sk, msg, len);
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX - sizeof(struct udphdr))
return -EMSGSIZE;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET6)) {
release_sock(sk);
return -EAFNOSUPPORT;
}
dst = NULL;
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
memset(&fl, 0, sizeof(fl));
if (sin6) {
if (sin6->sin6_port == 0)
return -EINVAL;
fl.fl_ip_dport = sin6->sin6_port;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
if (flowlabel == NULL)
return -EINVAL;
daddr = &flowlabel->dst;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &np->daddr))
daddr = &np->daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
fl.oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
fl.fl_ip_dport = inet->inet_dport;
daddr = &np->daddr;
fl.fl6_flowlabel = np->flow_label;
connected = 1;
}
if (!fl.oif)
fl.oif = sk->sk_bound_dev_if;
if (!fl.oif)
fl.oif = np->sticky_pktinfo.ipi6_ifindex;
fl.mark = sk->sk_mark;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(*opt);
err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit,
&tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
if (flowlabel == NULL)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
connected = 0;
}
if (opt == NULL)
opt = np->opt;
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl.proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
ipv6_addr_copy(&fl.fl6_dst, daddr);
else
fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
fl.fl_ip_sport = inet->inet_sport;
/* merge ip6_build_xmit from ip6_output */
if (opt && opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
ipv6_addr_copy(&final, &fl.fl6_dst);
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
final_p = &final;
connected = 0;
}
if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) {
fl.oif = np->mcast_oif;
connected = 0;
}
security_sk_classify_flow(sk, &fl);
err = ip6_sk_dst_lookup(sk, &dst, &fl);
if (err)
goto out;
if (final_p)
ipv6_addr_copy(&fl.fl6_dst, final_p);
err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
if (err < 0) {
if (err == -EREMOTE)
err = ip6_dst_blackhole(sk, &dst, &fl);
if (err < 0)
goto out;
}
if (hlimit < 0) {
if (ipv6_addr_is_multicast(&fl.fl6_dst))
hlimit = np->mcast_hops;
else
hlimit = np->hop_limit;
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
}
if (tclass < 0)
tclass = np->tclass;
if (dontfrag < 0)
dontfrag = np->dontfrag;
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
err = -EINVAL;
goto out;
}
up->pending = AF_INET6;
do_append_data:
up->len += ulen;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl,
(struct rt6_info*)dst,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
if (err)
udp_v6_flush_pending_frames(sk);
else if (!corkreq)
err = udp_v6_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
if (dst) {
if (connected) {
ip6_dst_store(sk, dst,
ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ?
&np->daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_equal(&fl.fl6_src, &np->saddr) ?
&np->saddr :
#endif
NULL);
} else {
dst_release(dst);
}
dst = NULL;
}
if (err > 0)
err = np->recverr ? net_xmit_errno(err) : 0;
release_sock(sk);
out:
dst_release(dst);
fl6_sock_release(flowlabel);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
|
Safe
|
[
"CWE-400"
] |
linux-2.6
|
c377411f2494a931ff7facdbb3a6839b1266bcf6
|
3.863200852385317e+37
| 298 |
net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void AuthPAMUserPwdRspFunc(rfbClientPtr cl)
{
CARD32 userLen;
CARD32 pwdLen;
char userBuf[MAX_USER_LEN + 1];
char pwdBuf[MAX_PWD_LEN + 1];
int n;
const char *emsg;
n = ReadExact(cl, (char *)&userLen, sizeof(userLen));
if (n <= 0) {
if (n != 0)
rfbLogPerror("AuthPAMUserPwdRspFunc: read error");
rfbCloseClient(cl);
return;
}
userLen = Swap32IfLE(userLen);
n = ReadExact(cl, (char *)&pwdLen, sizeof(pwdLen));
if (n <= 0) {
if (n != 0)
rfbLogPerror("AuthPAMUserPwdRspFunc: read error");
rfbCloseClient(cl);
return;
}
pwdLen = Swap32IfLE(pwdLen);
if ((userLen > MAX_USER_LEN) || (pwdLen > MAX_PWD_LEN)) {
rfbLogPerror("AuthPAMUserPwdRspFunc: excessively large user name or password in response");
rfbCloseClient(cl);
return;
}
n = ReadExact(cl, userBuf, userLen);
if (n <= 0) {
if (n != 0)
rfbLogPerror("AuthPAMUserPwdRspFunc: error reading user name");
rfbCloseClient(cl);
return;
}
userBuf[userLen] = '\0';
n = ReadExact(cl, pwdBuf, pwdLen);
if (n <= 0) {
if (n != 0)
rfbLogPerror("AuthPAMUserPwdRspFunc: error reading password");
rfbCloseClient(cl);
return;
}
pwdBuf[pwdLen] = '\0';
if (rfbAuthUserACL) {
UserList *p = userACL;
if (p == NULL)
rfbLog("WARNING: User ACL is empty. No users will be allowed to log in with Unix Login authentication.\n");
while (p != NULL) {
if (!strcmp(p->name, userBuf))
break;
p = p->next;
}
if (p == NULL) {
rfbLog("User '%s' is not in the ACL and has been denied access\n",
userBuf);
rfbClientAuthFailed(cl, "User denied access");
return;
}
cl->viewOnly = p->viewOnly;
} else {
struct passwd pbuf;
struct passwd *pw;
char buf[256];
if (getpwuid_r(getuid(), &pbuf, buf, sizeof(buf), &pw) != 0)
FatalError("AuthPAMUserPwdRspFunc: getpwuid_r failed: %s",
strerror(errno));
if (strcmp(pbuf.pw_name, userBuf)) {
rfbLog("User '%s' denied access (not the session owner)\n", userBuf);
rfbLog(" Enable user ACL to grant access to other users.\n");
rfbClientAuthFailed(cl, "User denied access");
return;
}
}
if (rfbPAMAuthenticate(cl, pamServiceName, userBuf, pwdBuf, &emsg))
rfbClientAuthSucceeded(cl, rfbAuthUnixLogin);
else
rfbClientAuthFailed(cl, (char *)emsg);
}
|
Safe
|
[
"CWE-787"
] |
turbovnc
|
cea98166008301e614e0d36776bf9435a536136e
|
2.7242784691459128e+38
| 93 |
Server: Fix two issues identified by ASan
1. If the TLSPlain and X509Plain security types were both disabled, then
rfbOptPamAuth() would overflow the name field in the secTypes
structure when testing the "none" security type, since the name of
that security type has less than five characters. This issue was
innocuous, since the overflow was fully contained within the secTypes
structure, but the ASan error caused Xvnc to abort, which made it
difficult to detect other errors.
2. If an ill-behaved RFB client sent the TurboVNC Server a fence
message with more than 64 bytes, then the TurboVNC Server would
try to read that message and subsequently overflow the stack before
it detected that the payload was too large. This could never have
occurred with any of the VNC viewers that currently support the RFB
flow control extensions (TigerVNC and TurboVNC, namely.) This issue
was also innocuous, since the stack overflow affected two variables
(newScreens and errMsg) that were never accessed before the function
returned.
| 0 |
HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
Handle<Object> value) {
if (!pointer->is_set()) {
HConstant* constant = new(zone()) HConstant(value,
Representation::Tagged());
constant->InsertAfter(GetConstantUndefined());
pointer->set(constant);
}
return pointer->get();
}
|
Safe
|
[] |
node
|
fd80a31e0697d6317ce8c2d289575399f4e06d21
|
5.357258305540863e+37
| 10 |
deps: backport 5f836c from v8 upstream
Original commit message:
Fix Hydrogen bounds check elimination
When combining bounds checks, they must all be moved before the first load/store
that they are guarding.
BUG=chromium:344186
LOG=y
R=svenpanne@chromium.org
Review URL: https://codereview.chromium.org/172093002
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
fix #8070
| 0 |
char* PE_(r_bin_pe_get_subsystem)(struct PE_(r_bin_pe_obj_t)* bin) {
char* subsystem = NULL;
if (bin && bin->nt_headers) {
switch (bin->nt_headers->optional_header.Subsystem) {
case PE_IMAGE_SUBSYSTEM_NATIVE:
subsystem = "Native"; break;
case PE_IMAGE_SUBSYSTEM_WINDOWS_GUI:
subsystem = "Windows GUI"; break;
case PE_IMAGE_SUBSYSTEM_WINDOWS_CUI:
subsystem = "Windows CUI"; break;
case PE_IMAGE_SUBSYSTEM_POSIX_CUI:
subsystem = "POSIX CUI"; break;
case PE_IMAGE_SUBSYSTEM_WINDOWS_CE_GUI:
subsystem = "Windows CE GUI"; break;
case PE_IMAGE_SUBSYSTEM_EFI_APPLICATION:
subsystem = "EFI Application"; break;
case PE_IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER:
subsystem = "EFI Boot Service Driver"; break;
case PE_IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER:
subsystem = "EFI Runtime Driver"; break;
case PE_IMAGE_SUBSYSTEM_EFI_ROM:
subsystem = "EFI ROM"; break;
case PE_IMAGE_SUBSYSTEM_XBOX:
subsystem = "XBOX"; break;
default:
subsystem = "Unknown"; break;
}
}
return subsystem? strdup (subsystem): NULL;
}
|
Safe
|
[
"CWE-125"
] |
radare2
|
4e1cf0d3e6f6fe2552a269def0af1cd2403e266c
|
3.357086371703978e+38
| 30 |
Fix crash in pe
| 0 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, 0);
TfLiteTensor* hits = GetOutput(context, node, 1);
const TfLiteTensor* lookup = GetInput(context, node, 0);
const TfLiteTensor* key = GetInput(context, node, 1);
const TfLiteTensor* value = GetInput(context, node, 2);
const int num_rows = SizeOfDimension(value, 0);
const int row_bytes = value->bytes / num_rows;
void* pointer = nullptr;
DynamicBuffer buf;
for (int i = 0; i < SizeOfDimension(lookup, 0); i++) {
int idx = -1;
pointer = bsearch(&(lookup->data.i32[i]), key->data.i32, num_rows,
sizeof(int32_t), greater);
if (pointer != nullptr) {
idx = (reinterpret_cast<char*>(pointer) - (key->data.raw)) /
sizeof(int32_t);
}
if (idx >= num_rows || idx < 0) {
if (output->type == kTfLiteString) {
buf.AddString(nullptr, 0);
} else {
memset(output->data.raw + i * row_bytes, 0, row_bytes);
}
hits->data.uint8[i] = 0;
} else {
if (output->type == kTfLiteString) {
buf.AddString(GetString(value, idx));
} else {
memcpy(output->data.raw + i * row_bytes,
value->data.raw + idx * row_bytes, row_bytes);
}
hits->data.uint8[i] = 1;
}
}
if (output->type == kTfLiteString) {
buf.WriteToTensorAsVector(output);
}
return kTfLiteOk;
}
|
Vulnerable
|
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
|
2.8685349449324763e+38
| 44 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
| 1 |
ofpacts_check(struct ofpact ofpacts[], size_t ofpacts_len,
struct flow *flow, ofp_port_t max_ports,
uint8_t table_id, uint8_t n_tables,
enum ofputil_protocol *usable_protocols)
{
struct ofpact *a;
ovs_be16 dl_type = flow->dl_type;
ovs_be16 vlan_tci = flow->vlan_tci;
uint8_t nw_proto = flow->nw_proto;
enum ofperr error = 0;
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
error = ofpact_check__(usable_protocols, a, flow,
max_ports, table_id, n_tables);
if (error) {
break;
}
}
/* Restore fields that may have been modified. */
flow->dl_type = dl_type;
flow->vlan_tci = vlan_tci;
flow->nw_proto = nw_proto;
return error;
}
|
Safe
|
[
"CWE-125"
] |
ovs
|
9237a63c47bd314b807cda0bd2216264e82edbe8
|
2.6519284232102677e+38
| 24 |
ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Justin Pettit <jpettit@ovn.org>
| 0 |
static int ext4_write_dquot(struct dquot *dquot)
{
int ret, err;
handle_t *handle;
struct inode *inode;
inode = dquot_to_inode(dquot);
handle = ext4_journal_start(inode,
EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_commit(dquot);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
return ret;
}
|
Safe
|
[
"CWE-20"
] |
linux-2.6
|
4ec110281379826c5cf6ed14735e47027c3c5765
|
2.5622501483566117e+38
| 17 |
ext4: Add sanity checks for the superblock before mounting the filesystem
This avoids insane superblock configurations that could lead to kernel
oops due to null pointer derefences.
http://bugzilla.kernel.org/show_bug.cgi?id=12371
Thanks to David Maciejak at Fortinet's FortiGuard Global Security
Research Team who discovered this bug independently (but at
approximately the same time) as Thiemo Nagel, who submitted the patch.
Signed-off-by: Thiemo Nagel <thiemo.nagel@ph.tum.de>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@kernel.org
| 0 |
mbfl_buffer_converter_reset(mbfl_buffer_converter *convd)
{
mbfl_memory_device_reset(&convd->device);
}
|
Safe
|
[
"CWE-119"
] |
php-src
|
64f42c73efc58e88671ad76b6b6bc8e2b62713e1
|
2.763573799352209e+38
| 4 |
Fixed bug #71906: AddressSanitizer: negative-size-param (-1) in mbfl_strcut
| 0 |
CImg<floatT> get_elevation3d(CImgList<tf>& primitives, CImgList<tc>& colors, const CImg<te>& elevation) const {
if (!is_sameXY(elevation) || elevation._depth>1 || elevation._spectrum>1)
throw CImgArgumentException(_cimg_instance
"get_elevation3d(): Instance and specified elevation (%u,%u,%u,%u,%p) "
"have incompatible dimensions.",
cimg_instance,
elevation._width,elevation._height,elevation._depth,
elevation._spectrum,elevation._data);
if (is_empty()) return *this;
float m, M = (float)max_min(m);
if (M==m) ++M;
colors.assign();
const unsigned int size_x1 = _width - 1, size_y1 = _height - 1;
for (unsigned int y = 0; y<size_y1; ++y)
for (unsigned int x = 0; x<size_x1; ++x) {
const unsigned char
r = (unsigned char)(((*this)(x,y,0) - m)*255/(M-m)),
g = (unsigned char)(_spectrum>1?((*this)(x,y,1) - m)*255/(M-m):r),
b = (unsigned char)(_spectrum>2?((*this)(x,y,2) - m)*255/(M-m):_spectrum>1?0:r);
CImg<tc>::vector((tc)r,(tc)g,(tc)b).move_to(colors);
}
const typename CImg<te>::_functor2d_int func(elevation);
return elevation3d(primitives,func,0,0,_width - 1.f,_height - 1.f,_width,_height);
}
|
Safe
|
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
|
5.478860598132887e+37
| 24 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.