func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
ngx_http_lua_ngx_location_capture_multi(lua_State *L)
{
ngx_http_request_t *r;
ngx_http_request_t *sr = NULL; /* subrequest object */
ngx_http_post_subrequest_t *psr;
ngx_http_lua_ctx_t *sr_ctx;
ngx_http_lua_ctx_t *ctx;
ngx_array_t *extra_vars;
ngx_str_t uri;
ngx_str_t args;
ngx_str_t extra_args;
ngx_uint_t flags;
u_char *p;
u_char *q;
size_t len;
size_t nargs;
int rc;
int n;
int always_forward_body = 0;
ngx_uint_t method;
ngx_http_request_body_t *body;
int type;
ngx_buf_t *b;
unsigned vars_action;
ngx_uint_t nsubreqs;
ngx_uint_t index;
size_t sr_statuses_len;
size_t sr_headers_len;
size_t sr_bodies_len;
size_t sr_flags_len;
size_t ofs1, ofs2;
unsigned custom_ctx;
ngx_http_lua_co_ctx_t *coctx;
ngx_http_lua_post_subrequest_data_t *psr_data;
n = lua_gettop(L);
if (n != 1) {
return luaL_error(L, "only one argument is expected, but got %d", n);
}
luaL_checktype(L, 1, LUA_TTABLE);
nsubreqs = lua_objlen(L, 1);
if (nsubreqs == 0) {
return luaL_error(L, "at least one subrequest should be specified");
}
r = ngx_http_lua_get_req(L);
if (r == NULL) {
return luaL_error(L, "no request object found");
}
#if (NGX_HTTP_V2)
if (r->main->stream) {
return luaL_error(L, "http2 requests not supported yet");
}
#endif
ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module);
if (ctx == NULL) {
return luaL_error(L, "no ctx found");
}
ngx_http_lua_check_context(L, ctx, NGX_HTTP_LUA_CONTEXT_REWRITE
| NGX_HTTP_LUA_CONTEXT_ACCESS
| NGX_HTTP_LUA_CONTEXT_CONTENT);
coctx = ctx->cur_co_ctx;
if (coctx == NULL) {
return luaL_error(L, "no co ctx found");
}
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"lua location capture, uri:\"%V\" c:%ud", &r->uri,
r->main->count);
sr_statuses_len = nsubreqs * sizeof(ngx_int_t);
sr_headers_len = nsubreqs * sizeof(ngx_http_headers_out_t *);
sr_bodies_len = nsubreqs * sizeof(ngx_str_t);
sr_flags_len = nsubreqs * sizeof(uint8_t);
p = ngx_pcalloc(r->pool, sr_statuses_len + sr_headers_len +
sr_bodies_len + sr_flags_len);
if (p == NULL) {
return luaL_error(L, "no memory");
}
coctx->sr_statuses = (void *) p;
p += sr_statuses_len;
coctx->sr_headers = (void *) p;
p += sr_headers_len;
coctx->sr_bodies = (void *) p;
p += sr_bodies_len;
coctx->sr_flags = (void *) p;
coctx->nsubreqs = nsubreqs;
coctx->pending_subreqs = 0;
extra_vars = NULL;
for (index = 0; index < nsubreqs; index++) {
coctx->pending_subreqs++;
lua_rawgeti(L, 1, index + 1);
if (lua_isnil(L, -1)) {
return luaL_error(L, "only array-like tables are allowed");
}
dd("queries query: top %d", lua_gettop(L));
if (lua_type(L, -1) != LUA_TTABLE) {
return luaL_error(L, "the query argument %d is not a table, "
"but a %s",
index, lua_typename(L, lua_type(L, -1)));
}
nargs = lua_objlen(L, -1);
if (nargs != 1 && nargs != 2) {
return luaL_error(L, "query argument %d expecting one or "
"two arguments", index);
}
lua_rawgeti(L, 2, 1); /* queries query uri */
dd("queries query uri: %d", lua_gettop(L));
dd("first arg in first query: %s", lua_typename(L, lua_type(L, -1)));
body = NULL;
ngx_str_null(&extra_args);
if (extra_vars != NULL) {
/* flush out existing elements in the array */
extra_vars->nelts = 0;
}
vars_action = 0;
custom_ctx = 0;
if (nargs == 2) {
/* check out the options table */
lua_rawgeti(L, 2, 2); /* queries query uri opts */
dd("queries query uri opts: %d", lua_gettop(L));
if (lua_type(L, 4) != LUA_TTABLE) {
return luaL_error(L, "expecting table as the 2nd argument for "
"subrequest %d, but got %s", index,
luaL_typename(L, 4));
}
dd("queries query uri opts: %d", lua_gettop(L));
/* check the args option */
lua_getfield(L, 4, "args");
type = lua_type(L, -1);
switch (type) {
case LUA_TTABLE:
ngx_http_lua_process_args_option(r, L, -1, &extra_args);
break;
case LUA_TNIL:
/* do nothing */
break;
case LUA_TNUMBER:
case LUA_TSTRING:
extra_args.data = (u_char *) lua_tolstring(L, -1, &len);
extra_args.len = len;
break;
default:
return luaL_error(L, "Bad args option value");
}
lua_pop(L, 1);
dd("queries query uri opts: %d", lua_gettop(L));
/* check the vars option */
lua_getfield(L, 4, "vars");
switch (lua_type(L, -1)) {
case LUA_TTABLE:
ngx_http_lua_process_vars_option(r, L, -1, &extra_vars);
dd("post process vars top: %d", lua_gettop(L));
break;
case LUA_TNIL:
/* do nothing */
break;
default:
return luaL_error(L, "Bad vars option value");
}
lua_pop(L, 1);
dd("queries query uri opts: %d", lua_gettop(L));
/* check the share_all_vars option */
lua_getfield(L, 4, "share_all_vars");
switch (lua_type(L, -1)) {
case LUA_TNIL:
/* do nothing */
break;
case LUA_TBOOLEAN:
if (lua_toboolean(L, -1)) {
vars_action |= NGX_HTTP_LUA_SHARE_ALL_VARS;
}
break;
default:
return luaL_error(L, "Bad share_all_vars option value");
}
lua_pop(L, 1);
dd("queries query uri opts: %d", lua_gettop(L));
/* check the copy_all_vars option */
lua_getfield(L, 4, "copy_all_vars");
switch (lua_type(L, -1)) {
case LUA_TNIL:
/* do nothing */
break;
case LUA_TBOOLEAN:
if (lua_toboolean(L, -1)) {
vars_action |= NGX_HTTP_LUA_COPY_ALL_VARS;
}
break;
default:
return luaL_error(L, "Bad copy_all_vars option value");
}
lua_pop(L, 1);
dd("queries query uri opts: %d", lua_gettop(L));
/* check the "forward_body" option */
lua_getfield(L, 4, "always_forward_body");
always_forward_body = lua_toboolean(L, -1);
lua_pop(L, 1);
dd("always forward body: %d", always_forward_body);
/* check the "method" option */
lua_getfield(L, 4, "method");
type = lua_type(L, -1);
if (type == LUA_TNIL) {
method = NGX_HTTP_GET;
} else {
if (type != LUA_TNUMBER) {
return luaL_error(L, "Bad http request method");
}
method = (ngx_uint_t) lua_tonumber(L, -1);
}
lua_pop(L, 1);
dd("queries query uri opts: %d", lua_gettop(L));
/* check the "ctx" option */
lua_getfield(L, 4, "ctx");
type = lua_type(L, -1);
if (type != LUA_TNIL) {
if (type != LUA_TTABLE) {
return luaL_error(L, "Bad ctx option value type %s, "
"expected a Lua table",
lua_typename(L, type));
}
custom_ctx = 1;
} else {
lua_pop(L, 1);
}
dd("queries query uri opts ctx?: %d", lua_gettop(L));
/* check the "body" option */
lua_getfield(L, 4, "body");
type = lua_type(L, -1);
if (type != LUA_TNIL) {
if (type != LUA_TSTRING && type != LUA_TNUMBER) {
return luaL_error(L, "Bad http request body");
}
body = ngx_pcalloc(r->pool, sizeof(ngx_http_request_body_t));
if (body == NULL) {
return luaL_error(L, "no memory");
}
q = (u_char *) lua_tolstring(L, -1, &len);
dd("request body: [%.*s]", (int) len, q);
if (len) {
b = ngx_create_temp_buf(r->pool, len);
if (b == NULL) {
return luaL_error(L, "no memory");
}
b->last = ngx_copy(b->last, q, len);
body->bufs = ngx_alloc_chain_link(r->pool);
if (body->bufs == NULL) {
return luaL_error(L, "no memory");
}
body->bufs->buf = b;
body->bufs->next = NULL;
body->buf = b;
}
}
lua_pop(L, 1); /* pop the body */
/* stack: queries query uri opts ctx? */
lua_remove(L, 4);
/* stack: queries query uri ctx? */
dd("queries query uri ctx?: %d", lua_gettop(L));
} else {
method = NGX_HTTP_GET;
}
/* stack: queries query uri ctx? */
p = (u_char *) luaL_checklstring(L, 3, &len);
uri.data = ngx_palloc(r->pool, len);
if (uri.data == NULL) {
return luaL_error(L, "memory allocation error");
}
ngx_memcpy(uri.data, p, len);
uri.len = len;
ngx_str_null(&args);
flags = 0;
rc = ngx_http_parse_unsafe_uri(r, &uri, &args, &flags);
if (rc != NGX_OK) {
dd("rc = %d", (int) rc);
return luaL_error(L, "unsafe uri in argument #1: %s", p);
}
if (args.len == 0) {
if (extra_args.len) {
p = ngx_palloc(r->pool, extra_args.len);
if (p == NULL) {
return luaL_error(L, "no memory");
}
ngx_memcpy(p, extra_args.data, extra_args.len);
args.data = p;
args.len = extra_args.len;
}
} else if (extra_args.len) {
/* concatenate the two parts of args together */
len = args.len + (sizeof("&") - 1) + extra_args.len;
p = ngx_palloc(r->pool, len);
if (p == NULL) {
return luaL_error(L, "no memory");
}
q = ngx_copy(p, args.data, args.len);
*q++ = '&';
ngx_memcpy(q, extra_args.data, extra_args.len);
args.data = p;
args.len = len;
}
ofs1 = ngx_align(sizeof(ngx_http_post_subrequest_t), sizeof(void *));
ofs2 = ngx_align(sizeof(ngx_http_lua_ctx_t), sizeof(void *));
p = ngx_palloc(r->pool, ofs1 + ofs2
+ sizeof(ngx_http_lua_post_subrequest_data_t));
if (p == NULL) {
return luaL_error(L, "no memory");
}
psr = (ngx_http_post_subrequest_t *) p;
p += ofs1;
sr_ctx = (ngx_http_lua_ctx_t *) p;
ngx_http_lua_assert((void *) sr_ctx == ngx_align_ptr(sr_ctx,
sizeof(void *)));
p += ofs2;
psr_data = (ngx_http_lua_post_subrequest_data_t *) p;
ngx_http_lua_assert((void *) psr_data == ngx_align_ptr(psr_data,
sizeof(void *)));
ngx_memzero(sr_ctx, sizeof(ngx_http_lua_ctx_t));
/* set by ngx_memzero:
* sr_ctx->run_post_subrequest = 0
* sr_ctx->free = NULL
* sr_ctx->body = NULL
*/
psr_data->ctx = sr_ctx;
psr_data->pr_co_ctx = coctx;
psr->handler = ngx_http_lua_post_subrequest;
psr->data = psr_data;
rc = ngx_http_lua_subrequest(r, &uri, &args, &sr, psr, 0);
if (rc != NGX_OK) {
return luaL_error(L, "failed to issue subrequest: %d", (int) rc);
}
ngx_http_lua_init_ctx(sr, sr_ctx);
sr_ctx->capture = 1;
sr_ctx->index = index;
sr_ctx->last_body = &sr_ctx->body;
sr_ctx->vm_state = ctx->vm_state;
ngx_http_set_ctx(sr, sr_ctx, ngx_http_lua_module);
rc = ngx_http_lua_adjust_subrequest(sr, method, always_forward_body,
body, vars_action, extra_vars);
if (rc != NGX_OK) {
ngx_http_lua_cancel_subreq(sr);
return luaL_error(L, "failed to adjust the subrequest: %d",
(int) rc);
}
dd("queries query uri opts ctx? %d", lua_gettop(L));
/* stack: queries query uri ctx? */
if (custom_ctx) {
ngx_http_lua_ngx_set_ctx_helper(L, sr, sr_ctx, -1);
lua_pop(L, 3);
} else {
lua_pop(L, 2);
}
/* stack: queries */
}
if (extra_vars) {
ngx_array_destroy(extra_vars);
}
ctx->no_abort = 1;
return lua_yield(L, 0);
}
|
Safe
|
[
"CWE-444"
] |
lua-nginx-module
|
9ab38e8ee35fc08a57636b1b6190dca70b0076fa
|
1.7922138449341912e+38
| 507 |
bugfix: prevented request smuggling in the ngx.location.capture API.
Signed-off-by: Yichun Zhang (agentzh) <yichun@openresty.com>
| 0 |
static int LogCompare(const void *x,const void *y)
{
register const char
**p,
**q;
p=(const char **) x;
q=(const char **) y;
return(LocaleCompare(*p,*q));
}
|
Safe
|
[
"CWE-476"
] |
ImageMagick
|
107ce8577e818cf4801e5a59641cb769d645cc95
|
1.2618549198803832e+36
| 10 |
https://github.com/ImageMagick/ImageMagick/issues/1224
| 0 |
int findDirUplevelToDirContainingFile(J9StringBuffer **result, char *pathEnvar, char pathSeparator, char *fileInPath, int upLevels, int elementsToSkip) {
char *paths;
int rc;
/* Get the list of paths */
paths = getenv(pathEnvar);
if (!paths) {
return FALSE;
}
/* find the directory */
rc = findDirContainingFile(result, paths, pathSeparator, fileInPath, elementsToSkip);
/* Now move upLevel to it - this may not work for directories of form
/aaa/bbb/.. ... and so on.
If that is a problem, could always use /.. to move up.
*/
if (rc) {
for (; upLevels > 0; upLevels--) {
truncatePath(jvmBufferData(*result));
}
}
return rc;
}
|
Safe
|
[
"CWE-119"
] |
openj9
|
0971f22d88f42cf7332364ad7430e9bd8681c970
|
8.663418745168608e+37
| 24 |
Clean up jio_snprintf and jio_vfprintf
Fixes https://bugs.eclipse.org/bugs/show_bug.cgi?id=543659
Signed-off-by: Peter Bain <peter_bain@ca.ibm.com>
| 0 |
void HttpsUpstream::attach_downstream(std::unique_ptr<Downstream> downstream) {
assert(!downstream_);
downstream_ = std::move(downstream);
}
|
Safe
|
[] |
nghttp2
|
319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c
|
3.120158970835905e+38
| 4 |
nghttpx: Fix request stall
Fix request stall if backend connection is reused and buffer is full.
| 0 |
static int b_size (lua_State *L) {
Header h;
const char *fmt = luaL_checkstring(L, 1);
size_t pos = 0;
defaultoptions(&h);
while (*fmt) {
int opt = *fmt++;
size_t size = optsize(L, opt, &fmt);
pos += gettoalign(pos, &h, opt, size);
if (opt == 's')
luaL_argerror(L, 1, "option 's' has no fixed size");
else if (opt == 'c' && size == 0)
luaL_argerror(L, 1, "option 'c0' has no fixed size");
if (!isalnum(opt))
controloptions(L, opt, &fmt, &h);
pos += size;
}
lua_pushinteger(L, pos);
return 1;
}
|
Safe
|
[
"CWE-190",
"CWE-787"
] |
redis
|
1eb08bcd4634ae42ec45e8284923ac048beaa4c3
|
1.0390387159624438e+38
| 20 |
Security: update Lua struct package for security.
During an auditing Apple found that the "struct" Lua package
we ship with Redis (http://www.inf.puc-rio.br/~roberto/struct/) contains
a security problem. A bound-checking statement fails because of integer
overflow. The bug exists since we initially integrated this package with
Lua, when scripting was introduced, so every version of Redis with
EVAL/EVALSHA capabilities exposed is affected.
Instead of just fixing the bug, the library was updated to the latest
version shipped by the author.
| 0 |
pq_init(void)
{
PqSendBufferSize = PQ_SEND_BUFFER_SIZE;
PqSendBuffer = MemoryContextAlloc(TopMemoryContext, PqSendBufferSize);
PqSendPointer = PqSendStart = PqRecvPointer = PqRecvLength = 0;
PqCommBusy = false;
PqCommReadingMsg = false;
DoingCopyOut = false;
on_proc_exit(socket_close, 0);
}
|
Safe
|
[
"CWE-89"
] |
postgres
|
2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b
|
2.7291004338016074e+38
| 10 |
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
| 0 |
TEE_Result syscall_authenc_init(unsigned long state, const void *nonce,
size_t nonce_len, size_t tag_len,
size_t aad_len, size_t payload_len)
{
TEE_Result res;
struct tee_cryp_state *cs;
struct tee_ta_session *sess;
struct tee_obj *o;
struct tee_cryp_obj_secret *key;
res = tee_ta_get_current_session(&sess);
if (res != TEE_SUCCESS)
return res;
res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx),
TEE_MEMORY_ACCESS_READ |
TEE_MEMORY_ACCESS_ANY_OWNER,
(uaddr_t)nonce, nonce_len);
if (res != TEE_SUCCESS)
return res;
res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs);
if (res != TEE_SUCCESS)
return res;
res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o);
if (res != TEE_SUCCESS)
return res;
if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0)
return TEE_ERROR_BAD_PARAMETERS;
key = o->attr;
res = crypto_authenc_init(cs->ctx, cs->algo, cs->mode,
(uint8_t *)(key + 1), key->key_size,
nonce, nonce_len, tag_len, aad_len,
payload_len);
if (res != TEE_SUCCESS)
return res;
cs->ctx_finalize = (tee_cryp_ctx_finalize_func_t)crypto_authenc_final;
return TEE_SUCCESS;
}
|
Vulnerable
|
[
"CWE-327"
] |
optee_os
|
34a08bec755670ea0490cb53bbc68058cafc69b6
|
2.6062019562480178e+38
| 42 |
cryp: prevent direct calls to update and final functions
With inconsistent or malformed data it has been possible to call
"update" and "final" crypto functions directly. Using a fuzzer tool [1]
we have seen that this results in asserts, i.e., a crash that
potentially could leak sensitive information.
By setting the state (initialized) in the crypto context (i.e., the
tee_cryp_state) at the end of all syscall_*_init functions and then add
a check of the state at the beginning of all update and final functions,
we prevent direct entrance to the "update" and "final" functions.
[1] https://github.com/MartijnB/optee_fuzzer
Fixes: OP-TEE-2019-0021
Signed-off-by: Joakim Bech <joakim.bech@linaro.org>
Reported-by: Martijn Bogaard <bogaard@riscure.com>
Acked-by: Jerome Forissier <jerome.forissier@linaro.org>
Reviewed-by: Jens Wiklander <jens.wiklander@linaro.org>
| 1 |
static apr_byte_t oidc_validate_post_logout_url(request_rec *r, const char *url,
char **err_str, char **err_desc) {
apr_uri_t uri;
const char *c_host = NULL;
if (apr_uri_parse(r->pool, url, &uri) != APR_SUCCESS) {
*err_str = apr_pstrdup(r->pool, "Malformed URL");
*err_desc = apr_psprintf(r->pool, "Logout URL malformed: %s", url);
oidc_error(r, "%s: %s", *err_str, *err_desc);
return FALSE;
}
c_host = oidc_get_current_url_host(r);
if ((uri.hostname != NULL)
&& ((strstr(c_host, uri.hostname) == NULL)
|| (strstr(uri.hostname, c_host) == NULL))) {
*err_str = apr_pstrdup(r->pool, "Invalid Request");
*err_desc =
apr_psprintf(r->pool,
"logout value \"%s\" does not match the hostname of the current request \"%s\"",
apr_uri_unparse(r->pool, &uri, 0), c_host);
oidc_error(r, "%s: %s", *err_str, *err_desc);
return FALSE;
} else if ((uri.hostname == NULL) && (strstr(url, "/") != url)) {
*err_str = apr_pstrdup(r->pool, "Malformed URL");
*err_desc =
apr_psprintf(r->pool,
"No hostname was parsed and it does not seem to be relative, i.e starting with '/': %s",
url);
oidc_error(r, "%s: %s", *err_str, *err_desc);
return FALSE;
}
/* validate the URL to prevent HTTP header splitting */
if (((strstr(url, "\n") != NULL) || strstr(url, "\r") != NULL)) {
*err_str = apr_pstrdup(r->pool, "Invalid Request");
*err_desc =
apr_psprintf(r->pool,
"logout value \"%s\" contains illegal \"\n\" or \"\r\" character(s)",
url);
oidc_error(r, "%s: %s", *err_str, *err_desc);
return FALSE;
}
return TRUE;
}
|
Safe
|
[
"CWE-601"
] |
mod_auth_openidc
|
ce37080c6aea30aabae8b4a9b4eea7808445cc8e
|
1.6804805248777223e+38
| 46 |
2.4.0.2 oops
Signed-off-by: Hans Zandbelt <hans.zandbelt@zmartzone.eu>
| 0 |
sf_read_float (SNDFILE *sndfile, float *ptr, sf_count_t len)
{ SF_PRIVATE *psf ;
sf_count_t count, extra ;
VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ;
if (psf->file.mode == SFM_WRITE)
{ psf->error = SFE_NOT_READMODE ;
return 0 ;
} ;
if (len % psf->sf.channels)
{ psf->error = SFE_BAD_READ_ALIGN ;
return 0 ;
} ;
if (len <= 0 || psf->read_current >= psf->sf.frames)
{ psf_memset (ptr, 0, len * sizeof (float)) ;
return 0 ;
} ;
if (psf->read_float == NULL || psf->seek == NULL)
{ psf->error = SFE_UNIMPLEMENTED ;
return 0 ;
} ;
if (psf->last_op != SFM_READ)
if (psf->seek (psf, SFM_READ, psf->read_current) < 0)
return 0 ;
count = psf->read_float (psf, ptr, len) ;
if (psf->read_current + count / psf->sf.channels <= psf->sf.frames)
psf->read_current += count / psf->sf.channels ;
else
{ count = (psf->sf.frames - psf->read_current) * psf->sf.channels ;
extra = len - count ;
psf_memset (ptr + count, 0, extra * sizeof (float)) ;
psf->read_current = psf->sf.frames ;
} ;
psf->last_op = SFM_READ ;
return count ;
} /* sf_read_float */
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
libsndfile
|
708e996c87c5fae77b104ccfeb8f6db784c32074
|
3.6097577580360706e+37
| 45 |
src/ : Move to a variable length header buffer
Previously, the `psf->header` buffer was a fixed length specified by
`SF_HEADER_LEN` which was set to `12292`. This was problematic for
two reasons; this value was un-necessarily large for the majority
of files and too small for some others.
Now the size of the header buffer starts at 256 bytes and grows as
necessary up to a maximum of 100k.
| 0 |
set_tmx(VALUE self, struct tmx *tmx)
{
get_d1(self);
tmx->dat = (void *)dat;
tmx->funcs = &tmx_funcs;
}
|
Safe
|
[] |
date
|
3959accef8da5c128f8a8e2fd54e932a4fb253b0
|
3.9028670077923257e+37
| 6 |
Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301
| 0 |
static inline int may_create(struct inode *dir, struct dentry *child)
{
struct user_namespace *s_user_ns;
audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
s_user_ns = dir->i_sb->s_user_ns;
if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
!kgid_has_mapping(s_user_ns, current_fsgid()))
return -EOVERFLOW;
return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}
|
Safe
|
[
"CWE-362",
"CWE-399"
] |
linux
|
49d31c2f389acfe83417083e1208422b4091cd9e
|
2.1453777288328676e+38
| 14 |
dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
| 0 |
comparetup_cluster(const SortTuple *a, const SortTuple *b,
Tuplesortstate *state)
{
SortSupport sortKey = state->sortKeys;
HeapTuple ltup;
HeapTuple rtup;
TupleDesc tupDesc;
int nkey;
int32 compare;
Datum datum1,
datum2;
bool isnull1,
isnull2;
AttrNumber leading = state->indexInfo->ii_KeyAttrNumbers[0];
/* Be prepared to compare additional sort keys */
ltup = (HeapTuple) a->tuple;
rtup = (HeapTuple) b->tuple;
tupDesc = state->tupDesc;
/* Compare the leading sort key, if it's simple */
if (leading != 0)
{
compare = ApplySortComparator(a->datum1, a->isnull1,
b->datum1, b->isnull1,
sortKey);
if (compare != 0)
return compare;
if (sortKey->abbrev_converter)
{
datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
compare = ApplySortAbbrevFullComparator(datum1, isnull1,
datum2, isnull2,
sortKey);
}
if (compare != 0 || state->nKeys == 1)
return compare;
/* Compare additional columns the hard way */
sortKey++;
nkey = 1;
}
else
{
/* Must compare all keys the hard way */
nkey = 0;
}
if (state->indexInfo->ii_Expressions == NULL)
{
/* If not expression index, just compare the proper heap attrs */
for (; nkey < state->nKeys; nkey++, sortKey++)
{
AttrNumber attno = state->indexInfo->ii_KeyAttrNumbers[nkey];
datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
compare = ApplySortComparator(datum1, isnull1,
datum2, isnull2,
sortKey);
if (compare != 0)
return compare;
}
}
else
{
/*
* In the expression index case, compute the whole index tuple and
* then compare values. It would perhaps be faster to compute only as
* many columns as we need to compare, but that would require
* duplicating all the logic in FormIndexDatum.
*/
Datum l_index_values[INDEX_MAX_KEYS];
bool l_index_isnull[INDEX_MAX_KEYS];
Datum r_index_values[INDEX_MAX_KEYS];
bool r_index_isnull[INDEX_MAX_KEYS];
TupleTableSlot *ecxt_scantuple;
/* Reset context each time to prevent memory leakage */
ResetPerTupleExprContext(state->estate);
ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple;
ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false);
FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
l_index_values, l_index_isnull);
ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false);
FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
r_index_values, r_index_isnull);
for (; nkey < state->nKeys; nkey++, sortKey++)
{
compare = ApplySortComparator(l_index_values[nkey],
l_index_isnull[nkey],
r_index_values[nkey],
r_index_isnull[nkey],
sortKey);
if (compare != 0)
return compare;
}
}
return 0;
}
|
Safe
|
[
"CWE-209"
] |
postgres
|
804b6b6db4dcfc590a468e7be390738f9f7755fb
|
1.1544849277596637e+38
| 109 |
Fix column-privilege leak in error-message paths
While building error messages to return to the user,
BuildIndexValueDescription, ExecBuildSlotValueDescription and
ri_ReportViolation would happily include the entire key or entire row in
the result returned to the user, even if the user didn't have access to
view all of the columns being included.
Instead, include only those columns which the user is providing or which
the user has select rights on. If the user does not have any rights
to view the table or any of the columns involved then no detail is
provided and a NULL value is returned from BuildIndexValueDescription
and ExecBuildSlotValueDescription. Note that, for key cases, the user
must have access to all of the columns for the key to be shown; a
partial key will not be returned.
Further, in master only, do not return any data for cases where row
security is enabled on the relation and row security should be applied
for the user. This required a bit of refactoring and moving of things
around related to RLS- note the addition of utils/misc/rls.c.
Back-patch all the way, as column-level privileges are now in all
supported versions.
This has been assigned CVE-2014-8161, but since the issue and the patch
have already been publicized on pgsql-hackers, there's no point in trying
to hide this commit.
| 0 |
test_bson_clear (void)
{
bson_t *doc = NULL;
bson_clear (&doc);
BSON_ASSERT (doc == NULL);
doc = bson_new ();
BSON_ASSERT (doc != NULL);
bson_clear (&doc);
BSON_ASSERT (doc == NULL);
}
|
Safe
|
[
"CWE-125"
] |
libbson
|
42900956dc461dfe7fb91d93361d10737c1602b3
|
3.2686031338631117e+38
| 12 |
CDRIVER-2269 Check for zero string length in codewscope
| 0 |
CImg<T>& draw_text(const int x0, const int y0,
const char *const text,
const int, const tc *const background_color,
const float opacity, const CImgList<t>& font, ...) {
if (!font) return *this;
CImg<charT> tmp(2048);
std::va_list ap; va_start(ap,font);
cimg_vsnprintf(tmp,tmp._width,text,ap); va_end(ap);
return _draw_text(x0,y0,tmp,(tc*)0,background_color,opacity,font,false);
}
|
Safe
|
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
|
4.96641925124956e+37
| 10 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
| 0 |
static unsigned int read_le32(const unsigned char *p)
{
return ((unsigned int) p[0])
| ((unsigned int) p[1] << 8)
| ((unsigned int) p[2] << 16)
| ((unsigned int) p[3] << 24);
}
|
Safe
|
[
"CWE-787"
] |
fluent-bit
|
cadff53c093210404aed01c4cf586adb8caa07af
|
1.3045644637482011e+38
| 7 |
gzip: fix compression size calculation (oss-fuzz 27261)
Signed-off-by: davkor <david@adalogics.com>
| 0 |
process_close(u_int32_t id)
{
int r, handle, ret, status = SSH2_FX_FAILURE;
if ((r = get_handle(iqueue, &handle)) != 0)
fatal("%s: buffer error: %s", __func__, ssh_err(r));
debug3("request %u: close handle %u", id, handle);
handle_log_close(handle, NULL);
ret = handle_close(handle);
status = (ret == -1) ? errno_to_portable(errno) : SSH2_FX_OK;
send_status(id, status);
}
|
Safe
|
[
"CWE-732",
"CWE-703",
"CWE-269"
] |
src
|
a6981567e8e215acc1ef690c8dbb30f2d9b00a19
|
2.329238594777156e+38
| 13 |
disallow creation (of empty files) in read-only mode; reported by
Michal Zalewski, feedback & ok deraadt@
| 0 |
DEFUN (no_neighbor_capability_dynamic,
no_neighbor_capability_dynamic_cmd,
NO_NEIGHBOR_CMD2 "capability dynamic",
NO_STR
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"Advertise capability to the peer\n"
"Advertise dynamic capability to this neighbor\n")
{
return peer_flag_unset_vty (vty, argv[0], PEER_FLAG_DYNAMIC_CAPABILITY);
}
|
Safe
|
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
|
2.8644130186056127e+38
| 11 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <paul.jakma@sun.com>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
| 0 |
TRIO_PUBLIC_STRING int trio_string_length TRIO_ARGS1((self), trio_string_t* self)
{
assert(self);
if (self->length == 0)
{
self->length = trio_length(self->content);
}
return self->length;
}
|
Safe
|
[
"CWE-190",
"CWE-125"
] |
FreeRDP
|
05cd9ea2290d23931f615c1b004d4b2e69074e27
|
1.6628806666576737e+38
| 10 |
Fixed TrioParse and trio_length limts.
CVE-2020-4030 thanks to @antonio-morales for finding this.
| 0 |
void dm_destroy_immediate(struct mapped_device *md)
{
__dm_destroy(md, false);
}
|
Safe
|
[
"CWE-362"
] |
linux
|
b9a41d21dceadf8104812626ef85dc56ee8a60ed
|
1.832165201179498e+38
| 4 |
dm: fix race between dm_get_from_kobject() and __dm_destroy()
The following BUG_ON was hit when testing repeat creation and removal of
DM devices:
kernel BUG at drivers/md/dm.c:2919!
CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44
Call Trace:
[<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a
[<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e
[<ffffffff817b46d1>] ? mutex_lock+0x26/0x44
[<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf
[<ffffffff811de257>] kernfs_seq_show+0x23/0x25
[<ffffffff81199118>] seq_read+0x16f/0x325
[<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f
[<ffffffff8117b625>] __vfs_read+0x26/0x9d
[<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44
[<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9
[<ffffffff8117be9d>] vfs_read+0x8f/0xcf
[<ffffffff81193e34>] ? __fdget_pos+0x12/0x41
[<ffffffff8117c686>] SyS_read+0x4b/0x76
[<ffffffff817b606e>] system_call_fastpath+0x12/0x71
The bug can be easily triggered, if an extra delay (e.g. 10ms) is added
between the test of DMF_FREEING & DMF_DELETING and dm_get() in
dm_get_from_kobject().
To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and
dm_get() are done in an atomic way, so _minor_lock is used.
The other callers of dm_get() have also been checked to be OK: some
callers invoke dm_get() under _minor_lock, some callers invoke it under
_hash_lock, and dm_start_request() invoke it after increasing
md->open_count.
Cc: stable@vger.kernel.org
Signed-off-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
| 0 |
correct_total (int *weights,
int n_x,
int n_y,
int total,
double overall_alpha)
{
int correction = (int)(0.5 + 65536 * overall_alpha) - total;
int remaining, c, d, i;
if (correction != 0)
{
remaining = correction;
for (d = 1, c = correction; c != 0 && remaining != 0; d++, c = correction / d)
for (i = n_x * n_y - 1; i >= 0 && c != 0 && remaining != 0; i--)
if (*(weights + i) + c >= 0)
{
*(weights + i) += c;
remaining -= c;
if ((0 < remaining && remaining < c) ||
(0 > remaining && remaining > c))
c = remaining;
}
}
}
|
Safe
|
[] |
gdk-pixbuf
|
ffec86ed5010c5a2be14f47b33bcf4ed3169a199
|
1.740224447478345e+38
| 24 |
pixops: Be more careful about integer overflow
Our loader code is supposed to handle out-of-memory and overflow
situations gracefully, reporting errors instead of aborting. But
if you load an image at a specific size, we also execute our
scaling code, which was not careful enough about overflow in some
places.
This commit makes the scaling code silently return if it fails to
allocate filter tables. This is the best we can do, since
gdk_pixbuf_scale() is not taking a GError.
https://bugzilla.gnome.org/show_bug.cgi?id=752297
| 0 |
uninit_util(void)
{
#if defined(_MSC_VER) && defined (_DEBUG)
_CrtCheckMemory();
#endif
if (stats_drift_file) {
free(stats_drift_file);
free(stats_temp_file);
stats_drift_file = NULL;
stats_temp_file = NULL;
}
if (key_file_name) {
free(key_file_name);
key_file_name = NULL;
}
filegen_unregister("peerstats");
filegen_unregister("loopstats");
filegen_unregister("clockstats");
filegen_unregister("rawstats");
filegen_unregister("sysstats");
filegen_unregister("protostats");
#ifdef AUTOKEY
filegen_unregister("cryptostats");
#endif /* AUTOKEY */
#ifdef DEBUG_TIMING
filegen_unregister("timingstats");
#endif /* DEBUG_TIMING */
#if defined(_MSC_VER) && defined (_DEBUG)
_CrtCheckMemory();
#endif
}
|
Safe
|
[
"CWE-20"
] |
ntp
|
52e977d79a0c4ace997e5c74af429844da2f27be
|
2.241883598018502e+37
| 32 |
[Bug 1773] openssl not detected during ./configure.
[Bug 1774] Segfaults if cryptostats enabled and built without OpenSSL.
| 0 |
int format_get_length(const char *str)
{
GString *tmp;
int len;
gboolean utf8;
int adv = 0;
g_return_val_if_fail(str != NULL, 0);
utf8 = is_utf8() && g_utf8_validate(str, -1, NULL);
tmp = g_string_new(NULL);
len = 0;
while (*str != '\0') {
if (*str == '%' && str[1] != '\0') {
str++;
if (*str != '%') {
adv = format_expand_styles(tmp, &str, NULL);
str += adv;
if (adv)
continue;
}
/* %% or unknown %code, written as-is */
if (*str != '%')
len++;
}
len += advance(&str, utf8);
}
g_string_free(tmp, TRUE);
return len;
}
|
Safe
|
[
"CWE-476"
] |
irssi
|
6c6c42e3d1b49d90aacc0b67f8540471cae02a1d
|
3.1129104061054036e+38
| 34 |
Merge branch 'security' into 'master'
See merge request !7
| 0 |
TEST_P(Http2CodecImplTest, InvalidContinueWithFinAllowed) {
stream_error_on_invalid_http_messaging_ = true;
initialize();
MockStreamCallbacks request_callbacks;
request_encoder_->getStream().addCallbacks(request_callbacks);
TestRequestHeaderMapImpl request_headers;
HttpTestUtility::addDefaultHeaders(request_headers);
EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));
request_encoder_->encodeHeaders(request_headers, true);
// Buffer client data to avoid mock recursion causing lifetime issues.
ON_CALL(server_connection_, write(_, _))
.WillByDefault(
Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); }));
TestResponseHeaderMapImpl continue_headers{{":status", "100"}};
response_encoder_->encodeHeaders(continue_headers, true);
// Flush pending data.
EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _));
setupDefaultConnectionMocks();
auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_);
EXPECT_TRUE(status.ok());
EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value());
expectDetailsRequest("http2.violation.of.messaging.rule");
}
|
Vulnerable
|
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
|
2.8397875002823365e+38
| 29 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <mklein@lyft.com>
| 1 |
void OSDService::send_pg_created(pg_t pgid)
{
dout(20) << __func__ << dendl;
if (osdmap->require_osd_release >= CEPH_RELEASE_LUMINOUS) {
monc->send_mon_message(new MOSDPGCreated(pgid));
}
}
|
Safe
|
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
|
2.6221522217957024e+38
| 7 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <sage@redhat.com>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
| 0 |
connection_ap_handshake_rewrite_and_attach(entry_connection_t *conn,
origin_circuit_t *circ,
crypt_path_t *cpath)
{
socks_request_t *socks = conn->socks_request;
const or_options_t *options = get_options();
connection_t *base_conn = ENTRY_TO_CONN(conn);
time_t now = time(NULL);
rewrite_result_t rr;
/* First we'll do the rewrite part. Let's see if we get a reasonable
* answer.
*/
memset(&rr, 0, sizeof(rr));
connection_ap_handshake_rewrite(conn,&rr);
if (rr.should_close) {
/* connection_ap_handshake_rewrite told us to close the connection:
* either because it sent back an answer, or because it sent back an
* error */
connection_mark_unattached_ap(conn, rr.end_reason);
if (END_STREAM_REASON_DONE == (rr.end_reason & END_STREAM_REASON_MASK))
return 0;
else
return -1;
}
const time_t map_expires = rr.map_expires;
const int automap = rr.automap;
const addressmap_entry_source_t exit_source = rr.exit_source;
/* Now see whether the hostname is bogus. This could happen because of an
* onion hostname whose format we don't recognize. */
hostname_type_t addresstype;
if (!parse_extended_hostname(socks->address, &addresstype)) {
control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
escaped(socks->address));
if (addresstype == BAD_HOSTNAME) {
conn->socks_request->socks_extended_error_code = SOCKS5_HS_BAD_ADDRESS;
}
connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
return -1;
}
/* If this is a .exit hostname, strip off the .name.exit part, and
* see whether we're willing to connect there, and and otherwise handle the
* .exit address.
*
* We'll set chosen_exit_name and/or close the connection as appropriate.
*/
if (addresstype == EXIT_HOSTNAME) {
/* If StrictNodes is not set, then .exit overrides ExcludeNodes but
* not ExcludeExitNodes. */
routerset_t *excludeset = options->StrictNodes ?
options->ExcludeExitNodesUnion_ : options->ExcludeExitNodes;
const node_t *node = NULL;
/* If this .exit was added by an AUTOMAP, then it came straight from
* a user. That's not safe. */
if (exit_source == ADDRMAPSRC_AUTOMAP) {
/* Whoops; this one is stale. It must have gotten added earlier?
* (Probably this is not possible, since AllowDotExit no longer
* exists.) */
log_warn(LD_APP,"Stale automapped address for '%s.exit'. Refusing.",
safe_str_client(socks->address));
control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
escaped(socks->address));
connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
tor_assert_nonfatal_unreached();
return -1;
}
/* Double-check to make sure there are no .exits coming from
* impossible/weird sources. */
if (exit_source == ADDRMAPSRC_DNS || exit_source == ADDRMAPSRC_NONE) {
/* It shouldn't be possible to get a .exit address from any of these
* sources. */
log_warn(LD_BUG,"Address '%s.exit', with impossible source for the "
".exit part. Refusing.",
safe_str_client(socks->address));
control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
escaped(socks->address));
connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
return -1;
}
tor_assert(!automap);
/* Now, find the character before the .(name) part.
* (The ".exit" part got stripped off by "parse_extended_hostname").
*
* We're going to put the exit name into conn->chosen_exit_name, and
* look up a node correspondingly. */
char *s = strrchr(socks->address,'.');
if (s) {
/* The address was of the form "(stuff).(name).exit */
if (s[1] != '\0') {
/* Looks like a real .exit one. */
conn->chosen_exit_name = tor_strdup(s+1);
node = node_get_by_nickname(conn->chosen_exit_name, 0);
if (exit_source == ADDRMAPSRC_TRACKEXIT) {
/* We 5 tries before it expires the addressmap */
conn->chosen_exit_retries = TRACKHOSTEXITS_RETRIES;
}
*s = 0;
} else {
/* Oops, the address was (stuff)..exit. That's not okay. */
log_warn(LD_APP,"Malformed exit address '%s.exit'. Refusing.",
safe_str_client(socks->address));
control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
escaped(socks->address));
connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
return -1;
}
} else {
/* It looks like they just asked for "foo.exit". That's a special
* form that means (foo's address).foo.exit. */
conn->chosen_exit_name = tor_strdup(socks->address);
node = node_get_by_nickname(conn->chosen_exit_name, 0);
if (node) {
*socks->address = 0;
node_get_address_string(node, socks->address, sizeof(socks->address));
}
}
/* Now make sure that the chosen exit exists... */
if (!node) {
log_warn(LD_APP,
"Unrecognized relay in exit address '%s.exit'. Refusing.",
safe_str_client(socks->address));
connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
return -1;
}
/* ...and make sure that it isn't excluded. */
if (routerset_contains_node(excludeset, node)) {
log_warn(LD_APP,
"Excluded relay in exit address '%s.exit'. Refusing.",
safe_str_client(socks->address));
connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
return -1;
}
/* XXXX-1090 Should we also allow foo.bar.exit if ExitNodes is set and
Bar is not listed in it? I say yes, but our revised manpage branch
implies no. */
}
/* Now, we handle everything that isn't a .onion address. */
if (addresstype != ONION_V2_HOSTNAME && addresstype != ONION_V3_HOSTNAME) {
/* Not a hidden-service request. It's either a hostname or an IP,
* possibly with a .exit that we stripped off. We're going to check
* if we're allowed to connect/resolve there, and then launch the
* appropriate request. */
/* Check for funny characters in the address. */
if (address_is_invalid_destination(socks->address, 1)) {
control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
escaped(socks->address));
log_warn(LD_APP,
"Destination '%s' seems to be an invalid hostname. Failing.",
safe_str_client(socks->address));
connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
return -1;
}
/* socks->address is a non-onion hostname or IP address.
* If we can't do any non-onion requests, refuse the connection.
* If we have a hostname but can't do DNS, refuse the connection.
* If we have an IP address, but we can't use that address family,
* refuse the connection.
*
* If we can do DNS requests, and we can use at least one address family,
* then we have to resolve the address first. Then we'll know if it
* resolves to a usable address family. */
/* First, check if all non-onion traffic is disabled */
if (!conn->entry_cfg.dns_request && !conn->entry_cfg.ipv4_traffic
&& !conn->entry_cfg.ipv6_traffic) {
log_warn(LD_APP, "Refusing to connect to non-hidden-service hostname "
"or IP address %s because Port has OnionTrafficOnly set (or "
"NoDNSRequest, NoIPv4Traffic, and NoIPv6Traffic).",
safe_str_client(socks->address));
connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
return -1;
}
/* Then check if we have a hostname or IP address, and whether DNS or
* the IP address family are permitted. Reject if not. */
tor_addr_t dummy_addr;
int socks_family = tor_addr_parse(&dummy_addr, socks->address);
/* family will be -1 for a non-onion hostname that's not an IP */
if (socks_family == -1) {
if (!conn->entry_cfg.dns_request) {
log_warn(LD_APP, "Refusing to connect to hostname %s "
"because Port has NoDNSRequest set.",
safe_str_client(socks->address));
connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
return -1;
}
} else if (socks_family == AF_INET) {
if (!conn->entry_cfg.ipv4_traffic) {
log_warn(LD_APP, "Refusing to connect to IPv4 address %s because "
"Port has NoIPv4Traffic set.",
safe_str_client(socks->address));
connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
return -1;
}
} else if (socks_family == AF_INET6) {
if (!conn->entry_cfg.ipv6_traffic) {
log_warn(LD_APP, "Refusing to connect to IPv6 address %s because "
"Port has NoIPv6Traffic set.",
safe_str_client(socks->address));
connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
return -1;
}
} else {
tor_assert_nonfatal_unreached_once();
}
/* See if this is a hostname lookup that we can answer immediately.
* (For example, an attempt to look up the IP address for an IP address.)
*/
if (socks->command == SOCKS_COMMAND_RESOLVE) {
tor_addr_t answer;
/* Reply to resolves immediately if we can. */
if (tor_addr_parse(&answer, socks->address) >= 0) {/* is it an IP? */
/* remember _what_ is supposed to have been resolved. */
strlcpy(socks->address, rr.orig_address, sizeof(socks->address));
connection_ap_handshake_socks_resolved_addr(conn, &answer, -1,
map_expires);
connection_mark_unattached_ap(conn,
END_STREAM_REASON_DONE |
END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED);
return 0;
}
tor_assert(!automap);
rep_hist_note_used_resolve(now); /* help predict this next time */
} else if (socks->command == SOCKS_COMMAND_CONNECT) {
/* Now see if this is a connect request that we can reject immediately */
tor_assert(!automap);
/* Don't allow connections to port 0. */
if (socks->port == 0) {
log_notice(LD_APP,"Application asked to connect to port 0. Refusing.");
connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
return -1;
}
/* You can't make connections to internal addresses, by default.
* Exceptions are begindir requests (where the address is meaningless),
* or cases where you've hand-configured a particular exit, thereby
* making the local address meaningful. */
if (options->ClientRejectInternalAddresses &&
!conn->use_begindir && !conn->chosen_exit_name && !circ) {
/* If we reach this point then we don't want to allow internal
* addresses. Check if we got one. */
tor_addr_t addr;
if (tor_addr_hostname_is_local(socks->address) ||
(tor_addr_parse(&addr, socks->address) >= 0 &&
tor_addr_is_internal(&addr, 0))) {
/* If this is an explicit private address with no chosen exit node,
* then we really don't want to try to connect to it. That's
* probably an error. */
if (conn->is_transparent_ap) {
#define WARN_INTRVL_LOOP 300
static ratelim_t loop_warn_limit = RATELIM_INIT(WARN_INTRVL_LOOP);
char *m;
if ((m = rate_limit_log(&loop_warn_limit, approx_time()))) {
log_warn(LD_NET,
"Rejecting request for anonymous connection to private "
"address %s on a TransPort or NATDPort. Possible loop "
"in your NAT rules?%s", safe_str_client(socks->address),
m);
tor_free(m);
}
} else {
#define WARN_INTRVL_PRIV 300
static ratelim_t priv_warn_limit = RATELIM_INIT(WARN_INTRVL_PRIV);
char *m;
if ((m = rate_limit_log(&priv_warn_limit, approx_time()))) {
log_warn(LD_NET,
"Rejecting SOCKS request for anonymous connection to "
"private address %s.%s",
safe_str_client(socks->address),m);
tor_free(m);
}
}
connection_mark_unattached_ap(conn, END_STREAM_REASON_PRIVATE_ADDR);
return -1;
}
} /* end "if we should check for internal addresses" */
/* Okay. We're still doing a CONNECT, and it wasn't a private
* address. Here we do special handling for literal IP addresses,
* to see if we should reject this preemptively, and to set up
* fields in conn->entry_cfg to tell the exit what AF we want. */
{
tor_addr_t addr;
/* XXX Duplicate call to tor_addr_parse. */
if (tor_addr_parse(&addr, socks->address) >= 0) {
/* If we reach this point, it's an IPv4 or an IPv6 address. */
sa_family_t family = tor_addr_family(&addr);
if ((family == AF_INET && ! conn->entry_cfg.ipv4_traffic) ||
(family == AF_INET6 && ! conn->entry_cfg.ipv6_traffic)) {
/* You can't do an IPv4 address on a v6-only socks listener,
* or vice versa. */
log_warn(LD_NET, "Rejecting SOCKS request for an IP address "
"family that this listener does not support.");
connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
return -1;
} else if (family == AF_INET6 && socks->socks_version == 4) {
/* You can't make a socks4 request to an IPv6 address. Socks4
* doesn't support that. */
log_warn(LD_NET, "Rejecting SOCKS4 request for an IPv6 address.");
connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
return -1;
} else if (socks->socks_version == 4 &&
!conn->entry_cfg.ipv4_traffic) {
/* You can't do any kind of Socks4 request when IPv4 is forbidden.
*
* XXX raise this check outside the enclosing block? */
log_warn(LD_NET, "Rejecting SOCKS4 request on a listener with "
"no IPv4 traffic supported.");
connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
return -1;
} else if (family == AF_INET6) {
/* Tell the exit: we won't accept any ipv4 connection to an IPv6
* address. */
conn->entry_cfg.ipv4_traffic = 0;
} else if (family == AF_INET) {
/* Tell the exit: we won't accept any ipv6 connection to an IPv4
* address. */
conn->entry_cfg.ipv6_traffic = 0;
}
}
}
/* we never allow IPv6 answers on socks4. (TODO: Is this smart?) */
if (socks->socks_version == 4)
conn->entry_cfg.ipv6_traffic = 0;
/* Still handling CONNECT. Now, check for exit enclaves. (Which we
* don't do on BEGIN_DIR, or when there is a chosen exit.)
*
* TODO: Should we remove this? Exit enclaves are nutty and don't
* work very well
*/
if (!conn->use_begindir && !conn->chosen_exit_name && !circ) {
/* see if we can find a suitable enclave exit */
const node_t *r =
router_find_exact_exit_enclave(socks->address, socks->port);
if (r) {
log_info(LD_APP,
"Redirecting address %s to exit at enclave router %s",
safe_str_client(socks->address), node_describe(r));
/* use the hex digest, not nickname, in case there are two
routers with this nickname */
conn->chosen_exit_name =
tor_strdup(hex_str(r->identity, DIGEST_LEN));
conn->chosen_exit_optional = 1;
}
}
/* Still handling CONNECT: warn or reject if it's using a dangerous
* port. */
if (!conn->use_begindir && !conn->chosen_exit_name && !circ)
if (consider_plaintext_ports(conn, socks->port) < 0)
return -1;
/* Remember the port so that we will predict that more requests
there will happen in the future. */
if (!conn->use_begindir) {
/* help predict this next time */
rep_hist_note_used_port(now, socks->port);
}
} else if (socks->command == SOCKS_COMMAND_RESOLVE_PTR) {
rep_hist_note_used_resolve(now); /* help predict this next time */
/* no extra processing needed */
} else {
/* We should only be doing CONNECT, RESOLVE, or RESOLVE_PTR! */
tor_fragile_assert();
}
/* Okay. At this point we've set chosen_exit_name if needed, rewritten the
* address, and decided not to reject it for any number of reasons. Now
* mark the connection as waiting for a circuit, and try to attach it!
*/
base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT;
/* If we were given a circuit to attach to, try to attach. Otherwise,
* try to find a good one and attach to that. */
int rv;
if (circ) {
rv = connection_ap_handshake_attach_chosen_circuit(conn, circ, cpath);
} else {
/* We'll try to attach it at the next event loop, or whenever
* we call connection_ap_attach_pending() */
connection_ap_mark_as_pending_circuit(conn);
rv = 0;
}
/* If the above function returned 0 then we're waiting for a circuit.
* if it returned 1, we're attached. Both are okay. But if it returned
* -1, there was an error, so make sure the connection is marked, and
* return -1. */
if (rv < 0) {
if (!base_conn->marked_for_close)
connection_mark_unattached_ap(conn, END_STREAM_REASON_CANT_ATTACH);
return -1;
}
return 0;
} else {
/* If we get here, it's a request for a .onion address! */
tor_assert(addresstype == ONION_V2_HOSTNAME ||
addresstype == ONION_V3_HOSTNAME);
tor_assert(!automap);
if (addresstype == ONION_V2_HOSTNAME) {
log_warn(LD_PROTOCOL,
"Warning! You've just connected to a v2 onion address. These "
"addresses are deprecated for security reasons, and are no "
"longer supported in Tor. Please encourage the site operator "
"to upgrade. For more information see "
"https://blog.torproject.org/v2-deprecation-timeline");
}
return connection_ap_handle_onion(conn, socks, circ, addresstype);
}
return 0; /* unreached but keeps the compiler happy */
}
|
Safe
|
[
"CWE-532"
] |
tor
|
80c404c4b79f3bcba3fc4585d4c62a62a04f3ed9
|
4.86214959283785e+37
| 433 |
Log warning when connecting to soon-to-be-deprecated v2 onions.
| 0 |
blkid_partition blkid_partlist_get_partition_by_partno(blkid_partlist ls, int n)
{
int i, nparts;
blkid_partition par;
nparts = blkid_partlist_numof_partitions(ls);
for (i = 0; i < nparts; i++) {
par = blkid_partlist_get_partition(ls, i);
if (n == blkid_partition_get_partno(par))
return par;
}
return NULL;
}
|
Safe
|
[] |
util-linux
|
50d1594c2e6142a3b51d2143c74027480df082e0
|
1.6790777207589204e+38
| 13 |
libblkid: avoid non-empty recursion in EBR
This is extension to the patch 7164a1c34d18831ac61c6744ad14ce916d389b3f.
We also need to detect non-empty recursion in the EBR chain. It's
possible to create standard valid logical partitions and in the last one
points back to the EBR chain. In this case all offsets will be non-empty.
Unfortunately, it's valid to create logical partitions that are not in
the "disk order" (sorted by start offset). So link somewhere back is
valid, but this link cannot points to already existing partition
(otherwise we will see recursion).
This patch forces libblkid to ignore duplicate logical partitions, the
duplicate chain segment is interpreted as non-data segment, after 100
iterations with non-data segments it will break the loop -- no memory
is allocated in this case by the loop.
Addresses: https://bugzilla.redhat.com/show_bug.cgi?id=1349536
References: http://seclists.org/oss-sec/2016/q3/40
Signed-off-by: Karel Zak <kzak@redhat.com>
| 0 |
static void dummy_draw_rule(DviContext *dvi, int x, int y, Uint w, Uint h, int f)
{
}
|
Safe
|
[
"CWE-20"
] |
evince
|
d4139205b010ed06310d14284e63114e88ec6de2
|
6.853641129052861e+37
| 3 |
backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643.
| 0 |
static int userfaultfd_release(struct inode *inode, struct file *file)
{
struct userfaultfd_ctx *ctx = file->private_data;
struct mm_struct *mm = ctx->mm;
struct vm_area_struct *vma, *prev;
/* len == 0 means wake all */
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
WRITE_ONCE(ctx->released, true);
if (!mmget_not_zero(mm))
goto wakeup;
/*
* Flush page faults out of all CPUs. NOTE: all page faults
* must be retried without returning VM_FAULT_SIGBUS if
* userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
* changes while handle_userfault released the mmap_sem. So
* it's critical that released is set to true (above), before
* taking the mmap_sem for writing.
*/
down_write(&mm->mmap_sem);
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
cond_resched();
BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
!!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
if (vma->vm_userfaultfd_ctx.ctx != ctx) {
prev = vma;
continue;
}
new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
new_flags, vma->anon_vma,
vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
NULL_VM_UFFD_CTX);
if (prev)
vma = prev;
else
prev = vma;
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
up_write(&mm->mmap_sem);
mmput(mm);
wakeup:
/*
* After no new page faults can wait on this fault_*wqh, flush
* the last page faults that may have been already waiting on
* the fault_*wqh.
*/
spin_lock(&ctx->fault_pending_wqh.lock);
__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
spin_unlock(&ctx->fault_pending_wqh.lock);
/* Flush pending events that may still wait on event_wqh */
wake_up_all(&ctx->event_wqh);
wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
userfaultfd_ctx_put(ctx);
return 0;
}
|
Vulnerable
|
[
"CWE-362",
"CWE-703",
"CWE-667"
] |
linux
|
04f5866e41fb70690e28397487d8bd8eea7d712a
|
1.9551824729983098e+37
| 65 |
coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/alpine.LSU.2.11.1707191716030.2055@eggly.anvils
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/20190325224949.11068-1-aarcange@redhat.com
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reported-by: Jann Horn <jannh@google.com>
Suggested-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Jann Horn <jannh@google.com>
Acked-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 1 |
do_bottom_half_rx(struct fst_card_info *card)
{
struct fst_port_info *port;
int pi;
int rx_count = 0;
/* Check for rx completions on all ports on this card */
dbg(DBG_RX, "do_bottom_half_rx\n");
for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) {
if (!port->run)
continue;
while (!(FST_RDB(card, rxDescrRing[pi][port->rxpos].bits)
& DMA_OWN) && !(card->dmarx_in_progress)) {
if (rx_count > fst_max_reads) {
/*
* Don't spend forever in receive processing
* Schedule another event
*/
fst_q_work_item(&fst_work_intq, card->card_no);
tasklet_schedule(&fst_int_task);
break; /* Leave the loop */
}
fst_intr_rx(card, port);
rx_count++;
}
}
}
|
Safe
|
[
"CWE-399"
] |
linux
|
96b340406724d87e4621284ebac5e059d67b2194
|
5.256937256587248e+37
| 28 |
farsync: fix info leak in ioctl
The fst_get_iface() code fails to initialize the two padding bytes of
struct sync_serial_settings after the ->loopback member. Add an explicit
memset(0) before filling the structure to avoid the info leak.
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
void add(const Buffer::Instance& data) override {
const StringBuffer& src = dynamic_cast<const StringBuffer&>(data);
add(src.start(), src.size_);
}
|
Safe
|
[
"CWE-401"
] |
envoy
|
5eba69a1f375413fb93fab4173f9c393ac8c2818
|
1.6795222198933883e+38
| 4 |
[buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144)
Signed-off-by: antonio <avd@google.com>
| 0 |
lt_dlopenadvise (const char *filename, lt_dladvise advise)
{
lt_dlhandle handle = 0;
int errors = 0;
const char * saved_error = 0;
LT__GETERROR (saved_error);
/* Can't have symbols hidden and visible at the same time! */
if (advise && advise->is_symlocal && advise->is_symglobal)
{
LT__SETERROR (CONFLICTING_FLAGS);
return 0;
}
if (!filename
|| !advise
|| !advise->try_ext
|| has_library_ext (filename))
{
/* Just incase we missed a code path in try_dlopen() that reports
an error, but forgot to reset handle... */
if (try_dlopen (&handle, filename, NULL, advise) != 0)
return 0;
return handle;
}
else if (filename && *filename)
{
/* First try appending ARCHIVE_EXT. */
errors += try_dlopen (&handle, filename, archive_ext, advise);
/* If we found FILENAME, stop searching -- whether we were able to
load the file as a module or not. If the file exists but loading
failed, it is better to return an error message here than to
report FILE_NOT_FOUND when the alternatives (foo.so etc) are not
in the module search path. */
if (handle || ((errors > 0) && !file_not_found ()))
return handle;
#if defined(LT_MODULE_EXT)
/* Try appending SHLIB_EXT. */
LT__SETERRORSTR (saved_error);
errors = try_dlopen (&handle, filename, shlib_ext, advise);
/* As before, if the file was found but loading failed, return now
with the current error message. */
if (handle || ((errors > 0) && !file_not_found ()))
return handle;
#endif
}
/* Still here? Then we really did fail to locate any of the file
names we tried. */
LT__SETERROR (FILE_NOT_FOUND);
return 0;
}
|
Safe
|
[] |
libtool
|
e91f7b960032074a55fc91273c1917e3082b5338
|
3.5351440739254206e+37
| 58 |
Don't load module.la from current directory by default.
* libltdl/ltdl.c (try_dlopen): Do not attempt to load an
unqualified module.la file from the current directory (by
default) since doing so is insecure and is not compliant with
the documentation.
* tests/testsuite.at: Qualify access to module.la file in
current directory so that test passes.
| 0 |
static Value performFormatDouble(ExpressionContext* const expCtx, Value inputValue) {
double doubleValue = inputValue.getDouble();
if (std::isinf(doubleValue)) {
return Value(std::signbit(doubleValue) ? "-Infinity"_sd : "Infinity"_sd);
} else if (std::isnan(doubleValue)) {
return Value("NaN"_sd);
} else if (doubleValue == 0.0 && std::signbit(doubleValue)) {
return Value("-0"_sd);
} else {
return Value(static_cast<std::string>(str::stream() << doubleValue));
}
}
|
Safe
|
[] |
mongo
|
1772b9a0393b55e6a280a35e8f0a1f75c014f301
|
2.4351155250018874e+37
| 13 |
SERVER-49404 Enforce additional checks in $arrayToObject
| 0 |
static int cdrom_print_info(const char *header, int val, char *info,
int *pos, enum cdrom_print_option option)
{
const int max_size = sizeof(cdrom_sysctl_settings.info);
struct cdrom_device_info *cdi;
int ret;
ret = scnprintf(info + *pos, max_size - *pos, header);
if (!ret)
return 1;
*pos += ret;
list_for_each_entry(cdi, &cdrom_list, list) {
switch (option) {
case CTL_NAME:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%s", cdi->name);
break;
case CTL_SPEED:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%d", cdi->speed);
break;
case CTL_SLOTS:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%d", cdi->capacity);
break;
case CTL_CAPABILITY:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%d", CDROM_CAN(val) != 0);
break;
default:
pr_info("invalid option%d\n", option);
return 1;
}
if (!ret)
return 1;
*pos += ret;
}
return 0;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
9de4ee40547fd315d4a0ed1dd15a2fa3559ad707
|
5.131954460780448e+36
| 42 |
cdrom: information leak in cdrom_ioctl_media_changed()
This cast is wrong. "cdi->capacity" is an int and "arg" is an unsigned
long. The way the check is written now, if one of the high 32 bits is
set then we could read outside the info->slots[] array.
This bug is pretty old and it predates git.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: stable@vger.kernel.org
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
void CZNC::DestroyInstance() {
delete s_pZNC;
s_pZNC = nullptr;
}
|
Safe
|
[
"CWE-20"
] |
znc
|
64613bc8b6b4adf1e32231f9844d99cd512b8973
|
3.2254521543100845e+38
| 4 |
Don't crash if user specified invalid encoding.
This is CVE-2019-9917
| 0 |
static bool list_has_sctp_addr(const struct list_head *list,
union sctp_addr *ipaddr)
{
struct sctp_transport *addr;
list_for_each_entry(addr, list, transports) {
if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
return true;
}
return false;
}
|
Safe
|
[] |
linux
|
196d67593439b03088913227093e374235596e33
|
1.0613928806460642e+37
| 12 |
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <michele@acksyn.org>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
unpack_gz_stream(transformer_state_t *xstate)
{
uint32_t v32;
IF_DESKTOP(long long) int total, n;
DECLARE_STATE;
#if !ENABLE_FEATURE_SEAMLESS_Z
if (check_signature16(xstate, GZIP_MAGIC))
return -1;
#else
if (xstate->check_signature) {
uint16_t magic2;
if (full_read(xstate->src_fd, &magic2, 2) != 2) {
bad_magic:
bb_error_msg("invalid magic");
return -1;
}
if (magic2 == COMPRESS_MAGIC) {
xstate->check_signature = 0;
return unpack_Z_stream(xstate);
}
if (magic2 != GZIP_MAGIC)
goto bad_magic;
}
#endif
total = 0;
ALLOC_STATE;
to_read = -1;
// bytebuffer_max = 0x8000;
bytebuffer = xmalloc(bytebuffer_max);
gunzip_src_fd = xstate->src_fd;
again:
if (!check_header_gzip(PASS_STATE xstate)) {
bb_error_msg("corrupted data");
total = -1;
goto ret;
}
n = inflate_unzip_internal(PASS_STATE xstate);
if (n < 0) {
total = -1;
goto ret;
}
total += n;
if (!top_up(PASS_STATE 8)) {
bb_error_msg("corrupted data");
total = -1;
goto ret;
}
/* Validate decompression - crc */
v32 = buffer_read_le_u32(PASS_STATE_ONLY);
if ((~gunzip_crc) != v32) {
bb_error_msg("crc error");
total = -1;
goto ret;
}
/* Validate decompression - size */
v32 = buffer_read_le_u32(PASS_STATE_ONLY);
if ((uint32_t)gunzip_bytes_out != v32) {
bb_error_msg("incorrect length");
total = -1;
}
if (!top_up(PASS_STATE 2))
goto ret; /* EOF */
if (bytebuffer[bytebuffer_offset] == 0x1f
&& bytebuffer[bytebuffer_offset + 1] == 0x8b
) {
bytebuffer_offset += 2;
goto again;
}
/* GNU gzip says: */
/*bb_error_msg("decompression OK, trailing garbage ignored");*/
ret:
free(bytebuffer);
DEALLOC_STATE;
return total;
}
|
Safe
|
[
"CWE-476"
] |
busybox
|
1de25a6e87e0e627aa34298105a3d17c60a1f44e
|
2.3747767079347127e+38
| 87 |
unzip: test for bad archive SEGVing
function old new delta
huft_build 1296 1300 +4
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
| 0 |
int security_getprocattr(struct task_struct *p, char *name, char **value)
{
return security_ops->getprocattr(p, name, value);
}
|
Safe
|
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
|
1.905009449504816e+38
| 4 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: James Morris <jmorris@namei.org>
| 0 |
void mpol_fix_fork_child_flag(struct task_struct *p)
{
if (p->mempolicy)
p->flags |= PF_MEMPOLICY;
else
p->flags &= ~PF_MEMPOLICY;
}
|
Safe
|
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
|
1.0613184191801687e+38
| 7 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[akpm@linux-foundation.org: checkpatch fixes]
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Jones <davej@redhat.com>
Acked-by: Larry Woodman <lwoodman@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: <stable@vger.kernel.org> [2.6.38+]
Cc: Mark Salter <msalter@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static void execute_ncq_command(NCQTransferState *ncq_tfs)
{
AHCIDevice *ad = ncq_tfs->drive;
IDEState *ide_state = &ad->port.ifs[0];
int port = ad->port_no;
g_assert(is_ncq(ncq_tfs->cmd));
ncq_tfs->halt = false;
switch (ncq_tfs->cmd) {
case READ_FPDMA_QUEUED:
DPRINTF(port, "NCQ reading %d sectors from LBA %"PRId64", tag %d\n",
ncq_tfs->sector_count, ncq_tfs->lba, ncq_tfs->tag);
DPRINTF(port, "tag %d aio read %"PRId64"\n",
ncq_tfs->tag, ncq_tfs->lba);
dma_acct_start(ide_state->blk, &ncq_tfs->acct,
&ncq_tfs->sglist, BLOCK_ACCT_READ);
ncq_tfs->aiocb = dma_blk_read(ide_state->blk, &ncq_tfs->sglist,
ncq_tfs->lba << BDRV_SECTOR_BITS,
BDRV_SECTOR_SIZE,
ncq_cb, ncq_tfs);
break;
case WRITE_FPDMA_QUEUED:
DPRINTF(port, "NCQ writing %d sectors to LBA %"PRId64", tag %d\n",
ncq_tfs->sector_count, ncq_tfs->lba, ncq_tfs->tag);
DPRINTF(port, "tag %d aio write %"PRId64"\n",
ncq_tfs->tag, ncq_tfs->lba);
dma_acct_start(ide_state->blk, &ncq_tfs->acct,
&ncq_tfs->sglist, BLOCK_ACCT_WRITE);
ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist,
ncq_tfs->lba << BDRV_SECTOR_BITS,
BDRV_SECTOR_SIZE,
ncq_cb, ncq_tfs);
break;
default:
DPRINTF(port, "error: unsupported NCQ command (0x%02x) received\n",
ncq_tfs->cmd);
ncq_err(ncq_tfs);
}
}
|
Safe
|
[
"CWE-772",
"CWE-401"
] |
qemu
|
d68f0f778e7f4fbd674627274267f269e40f0b04
|
2.4199128723603454e+38
| 44 |
ide: ahci: call cleanup function in ahci unit
This can avoid memory leak when hotunplug the ahci device.
Signed-off-by: Li Qiang <liqiang6-s@360.cn>
Message-id: 1488449293-80280-4-git-send-email-liqiang6-s@360.cn
Signed-off-by: John Snow <jsnow@redhat.com>
| 0 |
CallResult<Handle<BigStorage>> getForInPropertyNames(
Runtime *runtime,
Handle<JSObject> obj,
uint32_t &beginIndex,
uint32_t &endIndex) {
Handle<HiddenClass> clazz(runtime, obj->getClass(runtime));
// Fast case: Check the cache.
MutableHandle<BigStorage> arr(runtime, clazz->getForInCache(runtime));
if (arr) {
beginIndex = matchesProtoClasses(runtime, obj, arr);
if (beginIndex) {
// Cache is valid for this object, so use it.
endIndex = arr->size();
return arr;
}
// Invalid for this object. We choose to clear the cache since the
// changes to the prototype chain probably affect other objects too.
clazz->clearForInCache(runtime);
// Clear arr to slightly reduce risk of OOM from allocation below.
arr = nullptr;
}
// Slow case: Build the array of properties.
auto ownPropEstimate = clazz->getNumProperties();
auto arrRes = obj->shouldCacheForIn(runtime)
? BigStorage::createLongLived(runtime, ownPropEstimate)
: BigStorage::create(runtime, ownPropEstimate);
if (LLVM_UNLIKELY(arrRes == ExecutionStatus::EXCEPTION)) {
return ExecutionStatus::EXCEPTION;
}
arr = std::move(*arrRes);
if (setProtoClasses(runtime, obj, arr) == ExecutionStatus::EXCEPTION) {
return ExecutionStatus::EXCEPTION;
}
beginIndex = arr->size();
// If obj or any of its prototypes are unsuitable for caching, then
// beginIndex is 0 and we return an array with only the property names.
bool canCache = beginIndex;
auto end = appendAllPropertyNames(obj, runtime, arr, beginIndex);
if (end == ExecutionStatus::EXCEPTION) {
return ExecutionStatus::EXCEPTION;
}
endIndex = *end;
// Avoid degenerate memory explosion: if > 75% of the array is properties
// or classes from prototypes, then don't cache it.
const bool tooMuchProto = *end / 4 > ownPropEstimate;
if (canCache && !tooMuchProto) {
assert(beginIndex > 0 && "cached array must start with proto classes");
#ifdef HERMES_SLOW_DEBUG
assert(beginIndex == matchesProtoClasses(runtime, obj, arr) && "matches");
#endif
clazz->setForInCache(*arr, runtime);
}
return arr;
}
|
Safe
|
[
"CWE-843",
"CWE-125"
] |
hermes
|
fe52854cdf6725c2eaa9e125995da76e6ceb27da
|
1.2681947394694483e+38
| 56 |
[CVE-2020-1911] Look up HostObject computed properties on the right object in the prototype chain.
Summary:
The change in the hermes repository fixes the security vulnerability
CVE-2020-1911. This vulnerability only affects applications which
allow evaluation of uncontrolled, untrusted JavaScript code not
shipped with the app, so React Native apps will generally not be affected.
This revision includes a test for the bug. The test is generic JSI
code, so it is included in the hermes and react-native repositories.
Changelog: [Internal]
Reviewed By: tmikov
Differential Revision: D23322992
fbshipit-source-id: 4e88c974afe1ad33a263f9cac03e9dc98d33649a
| 0 |
void Magick::Image::gamma(const double gammaRed_,const double gammaGreen_,
const double gammaBlue_)
{
modifyImage();
GetPPException;
GetAndSetPPChannelMask(RedChannel);
(void) GammaImage(image(),gammaRed_,exceptionInfo);
SetPPChannelMask(GreenChannel);
(void) GammaImage(image(),gammaGreen_,exceptionInfo);
SetPPChannelMask(BlueChannel);
(void) GammaImage(image(),gammaBlue_,exceptionInfo);
RestorePPChannelMask;
ThrowImageException;
}
|
Safe
|
[
"CWE-416"
] |
ImageMagick
|
8c35502217c1879cb8257c617007282eee3fe1cc
|
1.0222603881862272e+37
| 14 |
Added missing return to avoid use after free.
| 0 |
AlgorithmEnum* ZRtp::findBestPubkey(ZrtpPacketHello *hello) {
AlgorithmEnum* peerIntersect[ZrtpConfigure::maxNoOfAlgos+1];
AlgorithmEnum* ownIntersect[ZrtpConfigure::maxNoOfAlgos+1];
// Build list of own pubkey algorithm names, must follow the order
// defined in RFC 6189, chapter 4.1.2.
const char *orderedAlgos[] = {dh2k, ec25, dh3k, ec38};
int numOrderedAlgos = sizeof(orderedAlgos) / sizeof(const char*);
int numAlgosPeer = hello->getNumPubKeys();
if (numAlgosPeer == 0) {
hash = &zrtpHashes.getByName(mandatoryHash); // set mandatory hash
return &zrtpPubKeys.getByName(mandatoryPubKey);
}
// Build own list of intersecting algos, keep own order or algorithms
// The list must include real public key algorithms only, so skip mult-stream mode, preshared and alike.
int numAlgosOwn = configureAlgos.getNumConfiguredAlgos(PubKeyAlgorithm);
int numOwnIntersect = 0;
for (int i = 0; i < numAlgosOwn; i++) {
ownIntersect[numOwnIntersect] = &configureAlgos.getAlgoAt(PubKeyAlgorithm, i);
if (*(int32_t*)(ownIntersect[numOwnIntersect]->getName()) == *(int32_t*)mult) {
continue; // skip multi-stream mode
}
for (int ii = 0; ii < numAlgosPeer; ii++) {
if (*(int32_t*)(ownIntersect[numOwnIntersect]->getName()) == *(int32_t*)(zrtpPubKeys.getByName((const char*)hello->getPubKeyType(ii)).getName())) {
numOwnIntersect++;
break;
}
}
}
// Build list of peer's intersecting algos: take own list as input, order according to sequence in hello packet (peer's order)
int numPeerIntersect = 0;
for (int i = 0; i < numAlgosPeer; i++) {
peerIntersect[numPeerIntersect] = &zrtpPubKeys.getByName((const char*)hello->getPubKeyType(i));
for (int ii = 0; ii < numOwnIntersect; ii++) {
if (*(int32_t*)(ownIntersect[ii]->getName()) == *(int32_t*)(peerIntersect[numPeerIntersect]->getName())) {
numPeerIntersect++;
break;
}
}
}
if (numPeerIntersect == 0) {
// If we don't find a common algorithm - use the mandatory algorithms
hash = &zrtpHashes.getByName(mandatoryHash);
return &zrtpPubKeys.getByName(mandatoryPubKey);
}
AlgorithmEnum* useAlgo;
if (numPeerIntersect > 1 && *(int32_t*)(ownIntersect[0]->getName()) != *(int32_t*)(peerIntersect[0]->getName())) {
int own, peer;
const int32_t *name = (int32_t*)ownIntersect[0]->getName();
for (own = 0; own < numOrderedAlgos; own++) {
if (*name == *(int32_t*)orderedAlgos[own])
break;
}
name = (int32_t*)peerIntersect[0]->getName();
for (peer = 0; peer < numOrderedAlgos; peer++) {
if (*name == *(int32_t*)orderedAlgos[peer])
break;
}
if (own < peer) {
useAlgo = ownIntersect[0];
}
else {
useAlgo = peerIntersect[0];
}
// find fastest of conf vs intersecting
}
else {
useAlgo = peerIntersect[0];
}
// select a corresponding strong hash if necessary.
if (*(int32_t*)(useAlgo->getName()) == *(int32_t*)ec38) {
hash = getStrongHashOffered(hello);
cipher = getStrongCipherOffered(hello);
}
else {
hash = findBestHash(hello);
}
return useAlgo;
}
|
Safe
|
[
"CWE-119"
] |
ZRTPCPP
|
c8617100f359b217a974938c5539a1dd8a120b0e
|
7.2996964985811055e+37
| 82 |
Fix vulnerabilities found and reported by Mark Dowd
- limit length of memcpy
- limit number of offered algorithms in Hello packet
- length check in PING packet
- fix a small coding error
| 0 |
int handler::read_first_row(uchar * buf, uint primary_key)
{
int error;
DBUG_ENTER("handler::read_first_row");
/*
If there is very few deleted rows in the table, find the first row by
scanning the table.
TODO remove the test for HA_READ_ORDER
*/
if (stats.deleted < 10 || primary_key >= MAX_KEY ||
!(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
{
if (likely(!(error= ha_rnd_init(1))))
{
error= ha_rnd_next(buf);
const int end_error= ha_rnd_end();
if (likely(!error))
error= end_error;
}
}
else
{
/* Find the first row through the primary key */
if (likely(!(error= ha_index_init(primary_key, 0))))
{
error= ha_index_first(buf);
const int end_error= ha_index_end();
if (likely(!error))
error= end_error;
}
}
DBUG_RETURN(error);
}
|
Safe
|
[
"CWE-416"
] |
server
|
af810407f78b7f792a9bb8c47c8c532eb3b3a758
|
2.9757226699079187e+38
| 34 |
MDEV-28098 incorrect key in "dup value" error after long unique
reset errkey after using it, so that it wouldn't affect
the next error message in the next statement
| 0 |
static my_bool list_open_tables_callback(TDC_element *element,
list_open_tables_arg *arg)
{
char *db= (char*) element->m_key;
char *table_name= (char*) element->m_key + strlen((char*) element->m_key) + 1;
if (arg->db && my_strcasecmp(system_charset_info, arg->db, db))
return FALSE;
if (arg->wild && wild_compare(table_name, arg->wild, 0))
return FALSE;
/* Check if user has SELECT privilege for any column in the table */
arg->table_list.db= db;
arg->table_list.table_name= table_name;
arg->table_list.grant.privilege= 0;
if (check_table_access(arg->thd, SELECT_ACL, &arg->table_list, TRUE, 1, TRUE))
return FALSE;
if (!(*arg->start_list= (OPEN_TABLE_LIST *) arg->thd->alloc(
sizeof(**arg->start_list) + element->m_key_length)))
return TRUE;
strmov((*arg->start_list)->table=
strmov(((*arg->start_list)->db= (char*) ((*arg->start_list) + 1)),
db) + 1, table_name);
(*arg->start_list)->in_use= 0;
mysql_mutex_lock(&element->LOCK_table_share);
All_share_tables_list::Iterator it(element->all_tables);
TABLE *table;
while ((table= it++))
if (table->in_use)
++(*arg->start_list)->in_use;
mysql_mutex_unlock(&element->LOCK_table_share);
(*arg->start_list)->locked= 0; /* Obsolete. */
arg->start_list= &(*arg->start_list)->next;
*arg->start_list= 0;
return FALSE;
}
|
Safe
|
[] |
server
|
0168d1eda30dad4b517659422e347175eb89e923
|
3.333254147441842e+38
| 40 |
MDEV-25766 Unused CTE lead to a crash in find_field_in_tables/find_order_in_list
Do not assume that subquery Item always present.
| 0 |
IsTypeInUINT64 (
IN EFI_HII_VALUE *Value
)
{
switch (Value->Type) {
case EFI_IFR_TYPE_NUM_SIZE_8:
case EFI_IFR_TYPE_NUM_SIZE_16:
case EFI_IFR_TYPE_NUM_SIZE_32:
case EFI_IFR_TYPE_NUM_SIZE_64:
case EFI_IFR_TYPE_BOOLEAN:
return TRUE;
default:
return FALSE;
}
}
|
Safe
|
[] |
edk2
|
f1d78c489a39971b5aac5d2fc8a39bfa925c3c5d
|
9.472106839544743e+37
| 16 |
MdeModulePkg/DisplayEngine: Zero memory before free (CVE-2019-14558)
REF: https://bugzilla.tianocore.org/show_bug.cgi?id=1611
Cc: Liming Gao <liming.gao@intel.com>
Cc: Eric Dong <eric.dong@intel.com>
Cc: Jian J Wang <jian.j.wang@intel.com>
Signed-off-by: Dandan Bi <dandan.bi@intel.com>
Reviewed-by: Eric Dong <eric.dong@intel.com>
Reviewed-by: Jian J Wang <jian.j.wang@intel.com>
| 0 |
u32 gf_media_avc_reformat_sei(u8 *buffer, u32 nal_size, Bool isobmf_rewrite, AVCState *avc)
{
u32 ptype, psize, hdr, var;
u32 start;
GF_BitStream *bs;
GF_BitStream *bs_dest = NULL;
u8 nhdr;
Bool sei_removed = GF_FALSE;
char store;
hdr = buffer[0];
if ((hdr & 0x1F) != GF_AVC_NALU_SEI) return 0;
if (isobmf_rewrite) bs_dest = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ);
gf_bs_enable_emulation_byte_removal(bs, GF_TRUE);
nhdr = gf_bs_read_int(bs, 8);
if (bs_dest) gf_bs_write_int(bs_dest, nhdr, 8);
/*parse SEI*/
while (gf_bs_available(bs)) {
Bool do_copy;
ptype = 0;
while (1) {
u8 v = gf_bs_read_int(bs, 8);
ptype += v;
if (v != 0xFF) break;
}
psize = 0;
while (1) {
u8 v = gf_bs_read_int(bs, 8);
psize += v;
if (v != 0xFF) break;
}
start = (u32)gf_bs_get_position(bs);
do_copy = 1;
if (start + psize >= nal_size) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message type %d size error (%d but %d remain), keeping full SEI untouched\n", ptype, psize, nal_size - start));
if (bs_dest) gf_bs_del(bs_dest);
return nal_size;
}
switch (ptype) {
/*remove SEI messages forbidden in MP4*/
case 3: /*filler data*/
case 10: /*sub_seq info*/
case 11: /*sub_seq_layer char*/
case 12: /*sub_seq char*/
do_copy = 0;
sei_removed = GF_TRUE;
break;
case 5: /*user unregistered */
store = buffer[start + psize];
buffer[start + psize] = 0;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[avc-h264] SEI user message %s\n", buffer + start + 16));
buffer[start + psize] = store;
break;
case 6: /*recovery point*/
avc_parse_recovery_point_sei(bs, avc);
break;
case 1: /*pic_timing*/
avc_parse_pic_timing_sei(bs, avc);
break;
case 0: /*buffering period*/
case 2: /*pan scan rect*/
case 4: /*user registered ITU t35*/
case 7: /*def_rec_pic_marking_repetition*/
case 8: /*spare_pic*/
case 9: /*scene info*/
case 13: /*full frame freeze*/
case 14: /*full frame freeze release*/
case 15: /*full frame snapshot*/
case 16: /*progressive refinement segment start*/
case 17: /*progressive refinement segment end*/
case 18: /*motion constrained slice group*/
default: /*add all unknown SEIs*/
break;
}
if (do_copy && bs_dest) {
var = ptype;
while (var >= 255) {
gf_bs_write_int(bs_dest, 0xFF, 8);
var -= 255;
}
gf_bs_write_int(bs_dest, var, 8);
var = psize;
while (var >= 255) {
gf_bs_write_int(bs_dest, 0xFF, 8);
var -= 255;
}
gf_bs_write_int(bs_dest, var, 8);
gf_bs_seek(bs, start);
//bs_read_data does not skip EPB, read byte per byte
var = psize;
while (var) {
gf_bs_write_u8(bs_dest, gf_bs_read_u8(bs));
var--;
}
}
else {
gf_bs_seek(bs, start);
//bs_skip_bytes does not skip EPB, skip byte per byte
while (psize) {
gf_bs_read_u8(bs);
psize--;
}
}
if (gf_bs_available(bs) <= 2) {
var = gf_bs_read_int(bs, 8);
if (var != 0x80) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message has less than 2 bytes remaining but no end of sei found\n"));
}
if (bs_dest) gf_bs_write_int(bs_dest, 0x80, 8);
break;
}
}
gf_bs_del(bs);
//we cannot compare final size and original size since original may have EPB and final does not yet have them
if (bs_dest && sei_removed) {
u8 *dst_no_epb = NULL;
u32 dst_no_epb_size = 0;
gf_bs_get_content(bs_dest, &dst_no_epb, &dst_no_epb_size);
nal_size = gf_media_nalu_add_emulation_bytes(buffer, dst_no_epb, dst_no_epb_size);
}
if (bs_dest) gf_bs_del(bs_dest);
return nal_size;
}
|
Safe
|
[
"CWE-190",
"CWE-787"
] |
gpac
|
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
|
8.717691963391623e+37
| 140 |
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
| 0 |
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
int ctb_addr_ts)
{
HEVCLocalContext *lc = s->HEVClc;
int ctb_size = 1 << s->ps.sps->log2_ctb_size;
int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
if (s->ps.pps->entropy_coding_sync_enabled_flag) {
if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
lc->first_qp_group = 1;
lc->end_of_tiles_x = s->ps.sps->width;
} else if (s->ps.pps->tiles_enabled_flag) {
if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
lc->first_qp_group = 1;
}
} else {
lc->end_of_tiles_x = s->ps.sps->width;
}
lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
lc->boundary_flags = 0;
if (s->ps.pps->tiles_enabled_flag) {
if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
lc->boundary_flags |= BOUNDARY_LEFT_TILE;
if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
lc->boundary_flags |= BOUNDARY_UPPER_TILE;
if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
} else {
if (ctb_addr_in_slice <= 0)
lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
if (ctb_addr_in_slice < s->ps.sps->ctb_width)
lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
}
lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
}
|
Safe
|
[
"CWE-476"
] |
FFmpeg
|
9ccc633068c6fe76989f487c8932bd11886ad65b
|
3.554410814522193e+37
| 48 |
avcodec/hevcdec: Avoid only partly skiping duplicate first slices
Fixes: NULL pointer dereference and out of array access
Fixes: 13871/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HEVC_fuzzer-5746167087890432
Fixes: 13845/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HEVC_fuzzer-5650370728034304
This also fixes the return code for explode mode
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 54655623a82632e7624714d7b2a3e039dc5faa7e)
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
| 0 |
void append_unescaped(String *res, const char *pos, uint length)
{
const char *end= pos+length;
res->append('\'');
for (; pos != end ; pos++)
{
#if defined(USE_MB) && MYSQL_VERSION_ID < 40100
uint mblen;
if (use_mb(default_charset_info) &&
(mblen= my_ismbchar(default_charset_info, pos, end)))
{
res->append(pos, mblen);
pos+= mblen;
continue;
}
#endif
switch (*pos) {
case 0: /* Must be escaped for 'mysql' */
res->append('\\');
res->append('0');
break;
case '\n': /* Must be escaped for logs */
res->append('\\');
res->append('n');
break;
case '\r':
res->append('\\'); /* This gives better readability */
res->append('r');
break;
case '\\':
res->append('\\'); /* Because of the sql syntax */
res->append('\\');
break;
case '\'':
res->append('\''); /* Because of the sql syntax */
res->append('\'');
break;
default:
res->append(*pos);
break;
}
}
res->append('\'');
}
|
Safe
|
[
"CWE-416"
] |
server
|
4681b6f2d8c82b4ec5cf115e83698251963d80d5
|
5.999288615926603e+36
| 46 |
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do)
| 0 |
static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
{
#ifdef CONFIG_X86_64
unsigned long a;
#endif
int i;
/* I/O */
vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
if (enable_shadow_vmcs) {
vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
}
if (cpu_has_vmx_msr_bitmap())
vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
/* Control */
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
if (cpu_has_secondary_exec_ctrls())
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
vmx_secondary_exec_control(vmx));
if (vmx_cpu_uses_apicv(&vmx->vcpu)) {
vmcs_write64(EOI_EXIT_BITMAP0, 0);
vmcs_write64(EOI_EXIT_BITMAP1, 0);
vmcs_write64(EOI_EXIT_BITMAP2, 0);
vmcs_write64(EOI_EXIT_BITMAP3, 0);
vmcs_write16(GUEST_INTR_STATUS, 0);
vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
}
if (ple_gap) {
vmcs_write32(PLE_GAP, ple_gap);
vmx->ple_window = ple_window;
vmx->ple_window_dirty = true;
}
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
vmx_set_constant_host_state(vmx);
#ifdef CONFIG_X86_64
rdmsrl(MSR_FS_BASE, a);
vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
rdmsrl(MSR_GS_BASE, a);
vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
#else
vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
#endif
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
u32 index = vmx_msr_index[i];
u32 data_low, data_high;
int j = vmx->nmsrs;
if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
if (wrmsr_safe(index, data_low, data_high) < 0)
continue;
vmx->guest_msrs[j].index = i;
vmx->guest_msrs[j].data = 0;
vmx->guest_msrs[j].mask = -1ull;
++vmx->nmsrs;
}
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
/* 22.2.1, 20.8.1 */
vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
set_cr4_guest_host_mask(vmx);
if (vmx_xsaves_supported())
vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
return 0;
}
|
Safe
|
[
"CWE-399"
] |
linux
|
54a20552e1eae07aa240fa370a0293e006b5faed
|
2.8581230516014086e+38
| 102 |
KVM: x86: work around infinite loop in microcode when #AC is delivered
It was found that a guest can DoS a host by triggering an infinite
stream of "alignment check" (#AC) exceptions. This causes the
microcode to enter an infinite loop where the core never receives
another interrupt. The host kernel panics pretty quickly due to the
effects (CVE-2015-5307).
Signed-off-by: Eric Northup <digitaleric@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
mm_start_pam(Authctxt *authctxt)
{
Buffer m;
debug3("%s entering", __func__);
if (!options.use_pam)
fatal("UsePAM=no, but ended up in %s anyway", __func__);
buffer_init(&m);
mm_request_send(pmonitor->m_recvfd, MONITOR_REQ_PAM_START, &m);
buffer_free(&m);
}
|
Safe
|
[
"CWE-20",
"CWE-200"
] |
openssh-portable
|
d4697fe9a28dab7255c60433e4dd23cf7fce8a8b
|
1.8967724080347497e+38
| 13 |
Don't resend username to PAM; it already has it.
Pointed out by Moritz Jodeit; ok dtucker@
| 0 |
_gnutls_proc_openpgp_server_certificate (gnutls_session_t session,
uint8_t * data, size_t data_size)
{
int size, ret, len;
uint8_t *p = data;
cert_auth_info_t info;
gnutls_certificate_credentials_t cred;
ssize_t dsize = data_size;
int x, key_type;
gnutls_pcert_st *peer_certificate_list = NULL;
int peer_certificate_list_size = 0;
gnutls_datum_t tmp, akey = { NULL, 0 };
uint8_t subkey_id[GNUTLS_OPENPGP_KEYID_SIZE];
unsigned int subkey_id_set = 0;
cred = (gnutls_certificate_credentials_t)
_gnutls_get_cred (session->key, GNUTLS_CRD_CERTIFICATE, NULL);
if (cred == NULL)
{
gnutls_assert ();
return GNUTLS_E_INSUFFICIENT_CREDENTIALS;
}
if ((ret =
_gnutls_auth_info_set (session, GNUTLS_CRD_CERTIFICATE,
sizeof (cert_auth_info_st), 1)) < 0)
{
gnutls_assert ();
return ret;
}
info = _gnutls_get_auth_info (session);
if (data == NULL || data_size == 0)
{
gnutls_assert ();
return GNUTLS_E_NO_CERTIFICATE_FOUND;
}
DECR_LEN (dsize, 3);
size = _gnutls_read_uint24 (p);
p += 3;
if (size == 0)
{
gnutls_assert ();
/* no certificate was sent */
return GNUTLS_E_NO_CERTIFICATE_FOUND;
}
/* Read PGPKeyDescriptor */
DECR_LEN (dsize, 1);
key_type = *p;
p++;
/* Try to read the keyid if present */
if (key_type == PGP_KEY_FINGERPRINT_SUBKEY || key_type == PGP_KEY_SUBKEY)
{
/* check size */
if (*p != GNUTLS_OPENPGP_KEYID_SIZE)
{
gnutls_assert ();
return GNUTLS_E_UNSUPPORTED_CERTIFICATE_TYPE;
}
DECR_LEN (dsize, 1);
p++;
DECR_LEN (dsize, GNUTLS_OPENPGP_KEYID_SIZE);
memcpy (subkey_id, p, GNUTLS_OPENPGP_KEYID_SIZE);
p += GNUTLS_OPENPGP_KEYID_SIZE;
subkey_id_set = 1;
}
/* read the actual key or fingerprint */
if (key_type == PGP_KEY_FINGERPRINT
|| key_type == PGP_KEY_FINGERPRINT_SUBKEY)
{ /* the fingerprint */
DECR_LEN (dsize, 1);
len = (uint8_t) * p;
p++;
if (len != 20)
{
gnutls_assert ();
return GNUTLS_E_OPENPGP_FINGERPRINT_UNSUPPORTED;
}
DECR_LEN (dsize, 20);
/* request the actual key from our database, or
* a key server or anything.
*/
if ((ret =
_gnutls_openpgp_request_key (session, &akey, cred, p, 20)) < 0)
{
gnutls_assert ();
return ret;
}
tmp = akey;
peer_certificate_list_size++;
}
else if (key_type == PGP_KEY || key_type == PGP_KEY_SUBKEY)
{ /* the whole key */
/* Read the actual certificate */
DECR_LEN (dsize, 3);
len = _gnutls_read_uint24 (p);
p += 3;
if (len == 0)
{
gnutls_assert ();
/* no certificate was sent */
return GNUTLS_E_NO_CERTIFICATE_FOUND;
}
DECR_LEN (dsize, len);
peer_certificate_list_size++;
tmp.size = len;
tmp.data = p;
}
else
{
gnutls_assert ();
return GNUTLS_E_UNSUPPORTED_CERTIFICATE_TYPE;
}
/* ok we now have the peer's key in tmp datum
*/
if (peer_certificate_list_size == 0)
{
gnutls_assert ();
return GNUTLS_E_UNEXPECTED_PACKET_LENGTH;
}
peer_certificate_list =
gnutls_calloc (1,
sizeof (gnutls_pcert_st) * (peer_certificate_list_size));
if (peer_certificate_list == NULL)
{
gnutls_assert ();
ret = GNUTLS_E_MEMORY_ERROR;
goto cleanup;
}
ret =
gnutls_pcert_import_openpgp_raw (&peer_certificate_list[0],
&tmp,
GNUTLS_OPENPGP_FMT_RAW,
(subkey_id_set != 0) ? subkey_id : NULL,
0);
if (ret < 0)
{
gnutls_assert ();
goto cleanup;
}
ret =
_gnutls_copy_certificate_auth_info (info,
peer_certificate_list,
peer_certificate_list_size,
subkey_id_set,
(subkey_id_set !=
0) ? subkey_id : NULL);
if (ret < 0)
{
gnutls_assert ();
goto cleanup;
}
if ((ret =
_gnutls_check_key_usage (&peer_certificate_list[0],
gnutls_kx_get (session))) < 0)
{
gnutls_assert ();
goto cleanup;
}
ret = 0;
cleanup:
_gnutls_free_datum (&akey);
CLEAR_CERTS;
gnutls_free (peer_certificate_list);
return ret;
}
|
Safe
|
[
"CWE-399"
] |
gnutls
|
9c62f4feb2bdd6fbbb06eb0c60bfdea80d21bbb8
|
6.842281246769861e+37
| 196 |
Deinitialize the correct number of certificates. Reported by Remi Gacogne.
| 0 |
CImg<Tfloat> get_resize_object3d() const {
return CImg<Tfloat>(*this,false).resize_object3d();
}
|
Safe
|
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
|
2.700775411600837e+38
| 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
| 0 |
void jslGetTokenString(char *str, size_t len) {
if (lex->tk == LEX_ID) {
strncpy(str, "ID:", len);
strncat(str, jslGetTokenValueAsString(), len);
} else if (lex->tk == LEX_STR) {
strncpy(str, "String:'", len);
strncat(str, jslGetTokenValueAsString(), len);
strncat(str, "'", len);
} else
jslTokenAsString(lex->tk, str, len);
}
|
Vulnerable
|
[
"CWE-119",
"CWE-787"
] |
Espruino
|
0a7619875bf79877907205f6bee08465b89ff10b
|
2.8305252878394236e+38
| 11 |
Fix strncat/cpy bounding issues (fix #1425)
| 1 |
static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
int timeout)
{
irda_start_timer(&self->watchdog_timer, timeout, self,
iriap_watchdog_timer_expired);
}
|
Safe
|
[] |
linux-2.6
|
d370af0ef7951188daeb15bae75db7ba57c67846
|
2.482268571148017e+38
| 6 |
irda: validate peer name and attribute lengths
Length fields provided by a peer for names and attributes may be longer
than the destination array sizes. Validate lengths to prevent stack
buffer overflows.
Signed-off-by: Dan Rosenberg <drosenberg@vsecurity.com>
Cc: stable@kernel.org
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
const struct sock *other)
{
if (UNIXCB(skb).cred)
return;
if (test_bit(SOCK_PASSCRED, &sock->flags) ||
!other->sk_socket ||
test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
UNIXCB(skb).pid = get_pid(task_tgid(current));
UNIXCB(skb).cred = get_current_cred();
}
}
|
Safe
|
[] |
linux-2.6
|
16e5726269611b71c930054ffe9b858c1cea88eb
|
3.5959048635339524e+37
| 12 |
af_unix: dont send SCM_CREDENTIALS by default
Since commit 7361c36c5224 (af_unix: Allow credentials to work across
user and pid namespaces) af_unix performance dropped a lot.
This is because we now take a reference on pid and cred in each write(),
and release them in read(), usually done from another process,
eventually from another cpu. This triggers false sharing.
# Events: 154K cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... .................. .........................
#
10.40% hackbench [kernel.kallsyms] [k] put_pid
8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg
7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg
6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock
4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb
4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns
4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred
2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm
2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count
1.75% hackbench [kernel.kallsyms] [k] fget_light
1.51% hackbench [kernel.kallsyms] [k]
__mutex_lock_interruptible_slowpath
1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb
This patch includes SCM_CREDENTIALS information in a af_unix message/skb
only if requested by the sender, [man 7 unix for details how to include
ancillary data using sendmsg() system call]
Note: This might break buggy applications that expected SCM_CREDENTIAL
from an unaware write() system call, and receiver not using SO_PASSCRED
socket option.
If SOCK_PASSCRED is set on source or destination socket, we still
include credentials for mere write() syscalls.
Performance boost in hackbench : more than 50% gain on a 16 thread
machine (2 quad-core cpus, 2 threads per core)
hackbench 20 thread 2000
4.228 sec instead of 9.102 sec
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
MYSQL *rpl_connect_master(MYSQL *mysql)
{
THD *thd= current_thd;
char password[MAX_PASSWORD_LENGTH + 1];
int password_size= sizeof(password);
Master_info *mi= my_pthread_getspecific_ptr(Master_info*, RPL_MASTER_INFO);
if (!mi)
{
sql_print_error("'rpl_connect_master' must be called in slave I/O thread context.");
return NULL;
}
bool allocated= false;
if (!mysql)
{
if(!(mysql= mysql_init(NULL)))
{
sql_print_error("rpl_connect_master: failed in mysql_init()");
return NULL;
}
allocated= true;
}
/*
XXX: copied from connect_to_master, this function should not
change the slave status, so we cannot use connect_to_master
directly
TODO: make this part a seperate function to eliminate duplication
*/
mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, (char *) &slave_net_timeout);
mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, (char *) &slave_net_timeout);
if (mi->bind_addr[0])
{
DBUG_PRINT("info",("bind_addr: %s", mi->bind_addr));
mysql_options(mysql, MYSQL_OPT_BIND, mi->bind_addr);
}
#ifdef HAVE_OPENSSL
if (mi->ssl)
{
mysql_ssl_set(mysql,
mi->ssl_key[0]?mi->ssl_key:0,
mi->ssl_cert[0]?mi->ssl_cert:0,
mi->ssl_ca[0]?mi->ssl_ca:0,
mi->ssl_capath[0]?mi->ssl_capath:0,
mi->ssl_cipher[0]?mi->ssl_cipher:0);
mysql_options(mysql, MYSQL_OPT_SSL_CRL,
mi->ssl_crl[0] ? mi->ssl_crl : 0);
mysql_options(mysql, MYSQL_OPT_SSL_CRLPATH,
mi->ssl_crlpath[0] ? mi->ssl_crlpath : 0);
mysql_options(mysql, MYSQL_OPT_SSL_VERIFY_SERVER_CERT,
&mi->ssl_verify_server_cert);
}
#endif
mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset_info->csname);
/* This one is not strictly needed but we have it here for completeness */
mysql_options(mysql, MYSQL_SET_CHARSET_DIR, (char *) charsets_dir);
if (mi->is_start_plugin_auth_configured())
{
DBUG_PRINT("info", ("Slaving is using MYSQL_DEFAULT_AUTH %s",
mi->get_start_plugin_auth()));
mysql_options(mysql, MYSQL_DEFAULT_AUTH, mi->get_start_plugin_auth());
}
if (mi->is_start_plugin_dir_configured())
{
DBUG_PRINT("info", ("Slaving is using MYSQL_PLUGIN_DIR %s",
mi->get_start_plugin_dir()));
mysql_options(mysql, MYSQL_PLUGIN_DIR, mi->get_start_plugin_dir());
}
/* Set MYSQL_PLUGIN_DIR in case master asks for an external authentication plugin */
else if (opt_plugin_dir_ptr && *opt_plugin_dir_ptr)
mysql_options(mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir_ptr);
if (!mi->is_start_user_configured())
sql_print_warning("%s", ER(ER_INSECURE_CHANGE_MASTER));
const char *user= mi->get_user();
if (user == NULL
|| user[0] == 0
|| mi->get_password(password, &password_size)
|| io_slave_killed(thd, mi)
|| !mysql_real_connect(mysql, mi->host, user,
password, 0, mi->port, 0, 0))
{
if (!io_slave_killed(thd, mi))
sql_print_error("rpl_connect_master: error connecting to master: %s (server_error: %d)",
mysql_error(mysql), mysql_errno(mysql));
if (allocated)
mysql_close(mysql); // this will free the object
return NULL;
}
return mysql;
}
|
Safe
|
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
|
3.1538622424418436e+38
| 100 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
| 0 |
pixFewColorsOctcubeQuant2(PIX *pixs,
l_int32 level,
NUMA *na,
l_int32 ncolors,
l_int32 *pnerrors)
{
l_int32 w, h, wpls, wpld, i, j, nerrors;
l_int32 ncubes, depth, cindex, oval;
l_int32 rval, gval, bval;
l_int32 *octarray;
l_uint32 octindex;
l_uint32 *rtab, *gtab, *btab;
l_uint32 *lines, *lined, *datas, *datad, *ppixel;
l_uint32 *colorarray;
PIX *pixd;
PIXCMAP *cmap;
PROCNAME("pixFewColorsOctcubeQuant2");
if (!pixs)
return (PIX *)ERROR_PTR("pixs not defined", procName, NULL);
if (pixGetDepth(pixs) != 32)
return (PIX *)ERROR_PTR("pixs not 32 bpp", procName, NULL);
if (level < 3 || level > 6)
return (PIX *)ERROR_PTR("level not in {4, 5, 6}", procName, NULL);
if (ncolors > 256)
return (PIX *)ERROR_PTR("ncolors > 256", procName, NULL);
if (pnerrors)
*pnerrors = UNDEF;
pixd = NULL;
/* Represent the image with a set of leaf octcubes
* at 'level', one for each color. */
rtab = gtab = btab = NULL;
makeRGBToIndexTables(level, &rtab, >ab, &btab);
/* The octarray will give a ptr from the octcube to the colorarray */
ncubes = numaGetCount(na);
octarray = (l_int32 *)LEPT_CALLOC(ncubes, sizeof(l_int32));
/* The colorarray will hold the colors of the first pixel
* that lands in the leaf octcube. After filling, it is
* used to generate the colormap. */
colorarray = (l_uint32 *)LEPT_CALLOC(ncolors + 1, sizeof(l_uint32));
if (!octarray || !colorarray) {
L_ERROR("octarray or colorarray not made\n", procName);
goto cleanup_arrays;
}
/* Determine the output depth from the number of colors */
pixGetDimensions(pixs, &w, &h, NULL);
datas = pixGetData(pixs);
wpls = pixGetWpl(pixs);
if (ncolors <= 4)
depth = 2;
else if (ncolors <= 16)
depth = 4;
else /* ncolors <= 256 */
depth = 8;
if ((pixd = pixCreate(w, h, depth)) == NULL) {
L_ERROR("pixd not made\n", procName);
goto cleanup_arrays;
}
pixCopyResolution(pixd, pixs);
pixCopyInputFormat(pixd, pixs);
datad = pixGetData(pixd);
wpld = pixGetWpl(pixd);
/* For each pixel, get the octree index for its leaf octcube.
* Check if a pixel has already been found in this octcube.
* ~ If not yet found, save that color in the colorarray
* and save the cindex in the octarray.
* ~ If already found, compare the pixel color with the
* color in the colorarray, and note if it differs.
* Then set the dest pixel value to the cindex - 1, which
* will be the cmap index for this color. */
cindex = 1; /* start with 1 */
nerrors = 0;
for (i = 0; i < h; i++) {
lines = datas + i * wpls;
lined = datad + i * wpld;
for (j = 0; j < w; j++) {
ppixel = lines + j;
extractRGBValues(*ppixel, &rval, &gval, &bval);
octindex = rtab[rval] | gtab[gval] | btab[bval];
oval = octarray[octindex];
if (oval == 0) {
octarray[octindex] = cindex;
colorarray[cindex] = *ppixel;
setPixelLow(lined, j, depth, cindex - 1);
cindex++;
} else { /* already have seen this color; is it unique? */
setPixelLow(lined, j, depth, oval - 1);
if (colorarray[oval] != *ppixel)
nerrors++;
}
}
}
if (pnerrors)
*pnerrors = nerrors;
#if DEBUG_FEW_COLORS
lept_stderr("ncubes = %d, ncolors = %d\n", ncubes, ncolors);
for (i = 0; i < ncolors; i++)
lept_stderr("color[%d] = %x\n", i, colorarray[i + 1]);
#endif /* DEBUG_FEW_COLORS */
/* Make the colormap. */
cmap = pixcmapCreate(depth);
for (i = 0; i < ncolors; i++) {
ppixel = colorarray + i + 1;
extractRGBValues(*ppixel, &rval, &gval, &bval);
pixcmapAddColor(cmap, rval, gval, bval);
}
pixSetColormap(pixd, cmap);
cleanup_arrays:
LEPT_FREE(octarray);
LEPT_FREE(colorarray);
LEPT_FREE(rtab);
LEPT_FREE(gtab);
LEPT_FREE(btab);
return pixd;
}
|
Safe
|
[
"CWE-125"
] |
leptonica
|
5ee24b398bb67666f6d173763eaaedd9c36fb1e5
|
7.073374215036865e+37
| 127 |
Fixed issue 22140 in oss-fuzz: Heap-buffer-overflow
* color quantized pix must be 8 bpp before extra colors are added.
| 0 |
__ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
{
struct net_device *dev = rx->sdata->dev;
struct sk_buff *skb = rx->skb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
struct sk_buff_head frame_list;
struct ethhdr ethhdr;
const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
check_da = NULL;
check_sa = NULL;
} else switch (rx->sdata->vif.type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
check_da = NULL;
break;
case NL80211_IFTYPE_STATION:
if (!rx->sta ||
!test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
check_sa = NULL;
break;
case NL80211_IFTYPE_MESH_POINT:
check_sa = NULL;
break;
default:
break;
}
skb->dev = dev;
__skb_queue_head_init(&frame_list);
if (ieee80211_data_to_8023_exthdr(skb, ðhdr,
rx->sdata->vif.addr,
rx->sdata->vif.type,
data_offset))
return RX_DROP_UNUSABLE;
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
rx->sdata->vif.type,
rx->local->hw.extra_tx_headroom,
check_da, check_sa);
while (!skb_queue_empty(&frame_list)) {
rx->skb = __skb_dequeue(&frame_list);
if (!ieee80211_frame_allowed(rx, fc)) {
dev_kfree_skb(rx->skb);
continue;
}
ieee80211_deliver_skb(rx);
}
return RX_QUEUED;
}
|
Safe
|
[] |
linux
|
588f7d39b3592a36fb7702ae3b8bdd9be4621e2f
|
5.712468390160195e+37
| 57 |
mac80211: drop robust management frames from unknown TA
When receiving a robust management frame, drop it if we don't have
rx->sta since then we don't have a security association and thus
couldn't possibly validate the frame.
Cc: stable@vger.kernel.org
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
| 0 |
void tcp_rcv_space_adjust(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
int time;
int copied;
time = tcp_time_stamp - tp->rcvq_space.time;
if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
return;
/* Number of bytes copied to user in last RTT */
copied = tp->copied_seq - tp->rcvq_space.seq;
if (copied <= tp->rcvq_space.space)
goto new_measure;
/* A bit of theory :
* copied = bytes received in previous RTT, our base window
* To cope with packet losses, we need a 2x factor
* To cope with slow start, and sender growing its cwin by 100 %
* every RTT, we need a 4x factor, because the ACK we are sending
* now is for the next RTT, not the current one :
* <prev RTT . ><current RTT .. ><next RTT .... >
*/
if (sysctl_tcp_moderate_rcvbuf &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
int rcvwin, rcvmem, rcvbuf;
/* minimal window to cope with packet losses, assuming
* steady state. Add some cushion because of small variations.
*/
rcvwin = (copied << 1) + 16 * tp->advmss;
/* If rate increased by 25%,
* assume slow start, rcvwin = 3 * copied
* If rate increased by 50%,
* assume sender can use 2x growth, rcvwin = 4 * copied
*/
if (copied >=
tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) {
if (copied >=
tp->rcvq_space.space + (tp->rcvq_space.space >> 1))
rcvwin <<= 1;
else
rcvwin += (rcvwin >> 1);
}
rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
while (tcp_win_from_space(rcvmem) < tp->advmss)
rcvmem += 128;
rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
if (rcvbuf > sk->sk_rcvbuf) {
sk->sk_rcvbuf = rcvbuf;
/* Make the window clamp follow along. */
tp->window_clamp = rcvwin;
}
}
tp->rcvq_space.space = copied;
new_measure:
tp->rcvq_space.seq = tp->copied_seq;
tp->rcvq_space.time = tcp_time_stamp;
}
|
Safe
|
[
"CWE-703",
"CWE-189"
] |
linux
|
8b8a321ff72c785ed5e8b4cf6eda20b35d427390
|
1.26521628098823e+38
| 65 |
tcp: fix zero cwnd in tcp_cwnd_reduction
Patch 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode
conditionally") introduced a bug that cwnd may become 0 when both
inflight and sndcnt are 0 (cwnd = inflight + sndcnt). This may lead
to a div-by-zero if the connection starts another cwnd reduction
phase by setting tp->prior_cwnd to the current cwnd (0) in
tcp_init_cwnd_reduction().
To prevent this we skip PRR operation when nothing is acked or
sacked. Then cwnd must be positive in all cases as long as ssthresh
is positive:
1) The proportional reduction mode
inflight > ssthresh > 0
2) The reduction bound mode
a) inflight == ssthresh > 0
b) inflight < ssthresh
sndcnt > 0 since newly_acked_sacked > 0 and inflight < ssthresh
Therefore in all cases inflight and sndcnt can not both be 0.
We check invalid tp->prior_cwnd to avoid potential div0 bugs.
In reality this bug is triggered only with a sequence of less common
events. For example, the connection is terminating an ECN-triggered
cwnd reduction with an inflight 0, then it receives reordered/old
ACKs or DSACKs from prior transmission (which acks nothing). Or the
connection is in fast recovery stage that marks everything lost,
but fails to retransmit due to local issues, then receives data
packets from other end which acks nothing.
Fixes: 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally")
Reported-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
char **lxc_normalize_path(const char *path)
{
char **components;
char **p;
size_t components_len = 0;
size_t pos = 0;
components = lxc_string_split(path, '/');
if (!components)
return NULL;
for (p = components; *p; p++)
components_len++;
/* resolve '.' and '..' */
for (pos = 0; pos < components_len; ) {
if (!strcmp(components[pos], ".") || (!strcmp(components[pos], "..") && pos == 0)) {
/* eat this element */
free(components[pos]);
memmove(&components[pos], &components[pos+1], sizeof(char *) * (components_len - pos));
components_len--;
} else if (!strcmp(components[pos], "..")) {
/* eat this and the previous element */
free(components[pos - 1]);
free(components[pos]);
memmove(&components[pos-1], &components[pos+1], sizeof(char *) * (components_len - pos));
components_len -= 2;
pos--;
} else {
pos++;
}
}
return components;
}
|
Safe
|
[
"CWE-59",
"CWE-61"
] |
lxc
|
592fd47a6245508b79fe6ac819fe6d3b2c1289be
|
2.2368453732995725e+38
| 34 |
CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Acked-by: Stéphane Graber <stgraber@ubuntu.com>
| 0 |
void MemoryInfo(MemoryTracker* tracker) const override {
tracker->TrackField("parser_buffer", parser_buffer);
}
|
Safe
|
[
"CWE-400"
] |
node
|
753f3b247ae2d24fee0b3f48b9ec3a5c308f0650
|
2.6403704720790608e+38
| 3 |
http: add requestTimeout
This commits introduces a new http.Server option called requestTimeout
with a default value in milliseconds of 0.
If requestTimeout is set to a positive value, the server will start a new
timer set to expire in requestTimeout milliseconds when a new connection
is established. The timer is also set again if new requests after the
first are received on the socket (this handles pipelining and keep-alive
cases).
The timer is cancelled when:
1. the request body is completely received by the server.
2. the response is completed. This handles the case where the
application responds to the client without consuming the request body.
3. the connection is upgraded, like in the WebSocket case.
If the timer expires, then the server responds with status code 408 and
closes the connection.
CVE-2020-8251
PR-URL: https://github.com/nodejs-private/node-private/pull/208
Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Reviewed-By: James M Snell <jasnell@gmail.com>
Reviewed-By: Robert Nagy <ronagy@icloud.com>
Reviewed-By: Mary Marchini <oss@mmarchini.me>
Co-Authored-By: Paolo Insogna <paolo@cowtech.it>
Co-Authored-By: Robert Nagy <ronagy@icloud.com>
| 0 |
static char *get_len(char *p,
struct SYMBOL *s)
{
int l1, l2, d;
char *error_txt = NULL;
if (strcmp(p, "auto") == 0) { /* L:auto */
ulen = 15120; // 2*2*2*2*3*3*3*5*7
s->u.length.base_length = -1;
return error_txt;
}
l1 = 0;
l2 = 1;
if (sscanf(p, "%d /%d ", &l1, &l2) != 2
|| l1 == 0) {
s->u.length.base_length = ulen ? ulen : BASE_LEN / 8;
return "Bad unit note length: unchanged";
}
if (l2 == 0) {
error_txt = "Bad length divisor, set to 4";
l2 = 4;
}
d = BASE_LEN / l2;
if (d * l2 != BASE_LEN) {
error_txt = "Length incompatible with BASE, using 1/8";
d = BASE_LEN / 8;
} else {
d *= l1;
if (l1 != 1
|| (l2 & (l2 - 1))) {
error_txt = "Incorrect unit note length, using 1/8";
d = BASE_LEN / 8;
}
}
s->u.length.base_length = d;
return error_txt;
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
abcm2ps
|
3169ace6d63f6f517a64e8df0298f44a490c4a15
|
9.823369092746666e+37
| 38 |
fix: crash when accidental without a note at start of line after K:
Issue #84.
| 0 |
bit_write_3BLL (Bit_Chain *dat, BITCODE_BLL value)
{
// 64bit into how many bytes?
int i;
int len = 0;
BITCODE_BLL umax = 0xf000000000000000ULL;
for (i = 16; i; i--, umax >>= 8)
{
if (value & umax)
{
len = i;
break;
}
}
bit_write_3B (dat, len);
for (i = 0; i < len; i++)
{
// least significant byte first
bit_write_RC (dat, value & 0xFF);
value >>= 8;
}
}
|
Safe
|
[
"CWE-703",
"CWE-125"
] |
libredwg
|
95cc9300430d35feb05b06a9badf678419463dbe
|
1.4715830818807283e+38
| 22 |
encode: protect from stack under-flow
From GH #178 fuzzing
| 0 |
check_top_offset(void)
{
lineoff_T loff;
int n;
long so = get_scrolloff_value();
if (curwin->w_cursor.lnum < curwin->w_topline + so
#ifdef FEAT_FOLDING
|| hasAnyFolding(curwin)
#endif
)
{
loff.lnum = curwin->w_cursor.lnum;
#ifdef FEAT_DIFF
loff.fill = 0;
n = curwin->w_topfill; // always have this context
#else
n = 0;
#endif
// Count the visible screen lines above the cursor line.
while (n < so)
{
topline_back(&loff);
// Stop when included a line above the window.
if (loff.lnum < curwin->w_topline
#ifdef FEAT_DIFF
|| (loff.lnum == curwin->w_topline && loff.fill > 0)
#endif
)
break;
n += loff.height;
}
if (n < so)
return TRUE;
}
return FALSE;
}
|
Safe
|
[
"CWE-122"
] |
vim
|
777e7c21b7627be80961848ac560cb0a9978ff43
|
3.104671561485424e+38
| 37 |
patch 8.2.3564: invalid memory access when scrolling without valid screen
Problem: Invalid memory access when scrolling without a valid screen.
Solution: Do not set VALID_BOTLINE in w_valid.
| 0 |
FLAC__bool format_input(FLAC__int32 *dest[], unsigned wide_samples, FLAC__bool is_big_endian, FLAC__bool is_unsigned_samples, unsigned channels, unsigned bps, unsigned shift, size_t *channel_map)
{
unsigned wide_sample, sample, channel;
FLAC__int32 *out[FLAC__MAX_CHANNELS];
if(0 == channel_map) {
for(channel = 0; channel < channels; channel++)
out[channel] = dest[channel];
}
else {
for(channel = 0; channel < channels; channel++)
out[channel] = dest[channel_map[channel]];
}
if(bps == 8) {
if(is_unsigned_samples) {
for(sample = wide_sample = 0; wide_sample < wide_samples; wide_sample++)
for(channel = 0; channel < channels; channel++, sample++)
out[channel][wide_sample] = (FLAC__int32)ubuffer.u8[sample] - 0x80;
}
else {
for(sample = wide_sample = 0; wide_sample < wide_samples; wide_sample++)
for(channel = 0; channel < channels; channel++, sample++)
out[channel][wide_sample] = (FLAC__int32)ubuffer.s8[sample];
}
}
else if(bps == 16) {
if(is_big_endian != is_big_endian_host_) {
unsigned char tmp;
const unsigned bytes = wide_samples * channels * (bps >> 3);
unsigned b;
for(b = 0; b < bytes; b += 2) {
tmp = ubuffer.u8[b];
ubuffer.u8[b] = ubuffer.u8[b+1];
ubuffer.u8[b+1] = tmp;
}
}
if(is_unsigned_samples) {
for(sample = wide_sample = 0; wide_sample < wide_samples; wide_sample++)
for(channel = 0; channel < channels; channel++, sample++)
out[channel][wide_sample] = ubuffer.u16[sample] - 0x8000;
}
else {
for(sample = wide_sample = 0; wide_sample < wide_samples; wide_sample++)
for(channel = 0; channel < channels; channel++, sample++)
out[channel][wide_sample] = ubuffer.s16[sample];
}
}
else if(bps == 24) {
if(!is_big_endian) {
unsigned char tmp;
const unsigned bytes = wide_samples * channels * (bps >> 3);
unsigned b;
for(b = 0; b < bytes; b += 3) {
tmp = ubuffer.u8[b];
ubuffer.u8[b] = ubuffer.u8[b+2];
ubuffer.u8[b+2] = tmp;
}
}
if(is_unsigned_samples) {
unsigned b;
for(b = sample = wide_sample = 0; wide_sample < wide_samples; wide_sample++)
for(channel = 0; channel < channels; channel++, sample++) {
FLAC__int32 t;
t = ubuffer.u8[b++]; t <<= 8;
t |= ubuffer.u8[b++]; t <<= 8;
t |= ubuffer.u8[b++];
t -= 0x800000;
out[channel][wide_sample] = t;
}
}
else {
unsigned b;
for(b = sample = wide_sample = 0; wide_sample < wide_samples; wide_sample++)
for(channel = 0; channel < channels; channel++, sample++) {
FLAC__int32 t;
t = ubuffer.s8[b++]; t <<= 8;
t |= ubuffer.u8[b++]; t <<= 8;
t |= ubuffer.u8[b++];
out[channel][wide_sample] = t;
}
}
}
else {
FLAC__ASSERT(0);
}
if(shift > 0) {
FLAC__int32 mask = (1<<shift)-1;
for(wide_sample = 0; wide_sample < wide_samples; wide_sample++)
for(channel = 0; channel < channels; channel++) {
if(out[channel][wide_sample] & mask) {
flac__utils_printf(stderr, 1, "ERROR during read, sample data (channel#%u sample#%u = %d) has non-zero least-significant bits\n WAVE/AIFF header said the last %u bits are not significant and should be zero.\n", channel, wide_sample, out[channel][wide_sample], shift);
return false;
}
out[channel][wide_sample] >>= shift;
}
}
return true;
}
|
Safe
|
[] |
flac
|
c06a44969c1145242a22f75fc8fb2e8b54c55303
|
3.2573687490160186e+38
| 99 |
flac : Fix for https://sourceforge.net/p/flac/bugs/425/
* flac/encode.c : Validate num_tracks field of cuesheet.
* libFLAC/stream_encoder.c : Add check for a NULL pointer.
* flac/encode.c : Improve bounds checking.
Closes: https://sourceforge.net/p/flac/bugs/425/
| 0 |
static void DestroyQuantumPixels(QuantumInfo *quantum_info)
{
register ssize_t
i;
ssize_t
extent;
assert(quantum_info != (QuantumInfo *) NULL);
assert(quantum_info->signature == MagickCoreSignature);
assert(quantum_info->pixels != (MemoryInfo **) NULL);
extent=(ssize_t) quantum_info->extent;
for (i=0; i < (ssize_t) quantum_info->number_threads; i++)
if (quantum_info->pixels[i] != (MemoryInfo *) NULL)
{
unsigned char
*pixels;
/*
Did we overrun our quantum buffer?
*/
pixels=(unsigned char *) GetVirtualMemoryBlob(quantum_info->pixels[i]);
assert(pixels[extent] == QuantumSignature);
quantum_info->pixels[i]=RelinquishVirtualMemory(
quantum_info->pixels[i]);
}
quantum_info->pixels=(MemoryInfo **) RelinquishMagickMemory(
quantum_info->pixels);
}
|
Safe
|
[
"CWE-190"
] |
ImageMagick
|
f60d59cc3a7e3402d403361e0985ffa56f746a82
|
2.6644145472832363e+38
| 29 |
https://github.com/ImageMagick/ImageMagick/issues/1727
| 0 |
static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
struct macsec_tx_sa *tx_sa)
{
struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
u64_stats_update_begin(&txsc_stats->syncp);
if (tx_sc->encrypt) {
txsc_stats->stats.OutOctetsEncrypted += skb->len;
txsc_stats->stats.OutPktsEncrypted++;
this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
} else {
txsc_stats->stats.OutOctetsProtected += skb->len;
txsc_stats->stats.OutPktsProtected++;
this_cpu_inc(tx_sa->stats->OutPktsProtected);
}
u64_stats_update_end(&txsc_stats->syncp);
}
|
Safe
|
[
"CWE-119"
] |
net
|
5294b83086cc1c35b4efeca03644cf9d12282e5b
|
3.035165870354626e+38
| 17 |
macsec: dynamically allocate space for sglist
We call skb_cow_data, which is good anyway to ensure we can actually
modify the skb as such (another error from prior). Now that we have the
number of fragments required, we can safely allocate exactly that amount
of memory.
Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver")
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Acked-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
struct hci_conn *hcon;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
if (ev->status)
return;
hci_dev_lock(hdev);
hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
if (hcon) {
hcon->state = BT_CLOSED;
hci_conn_del(hcon);
}
hci_dev_unlock(hdev);
}
|
Safe
|
[
"CWE-290"
] |
linux
|
3ca44c16b0dcc764b641ee4ac226909f5c421aa3
|
1.166131818700412e+38
| 21 |
Bluetooth: Consolidate encryption handling in hci_encrypt_cfm
This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection
state is BT_CONFIG so callers don't have to check the state.
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
| 0 |
void smtp_server_connection_reply_lines(struct smtp_server_connection *conn,
unsigned int status,
const char *enh_code,
const char *const *text_lines)
{
struct smtp_reply reply;
i_zero(&reply);
reply.status = status;
reply.text_lines = text_lines;
if (!smtp_reply_parse_enhanced_code(
enh_code, &reply.enhanced_code, NULL))
reply.enhanced_code = SMTP_REPLY_ENH_CODE(status / 100, 0, 0);
T_BEGIN {
string_t *str;
e_debug(conn->event, "Sent: %s", smtp_reply_log(&reply));
str = t_str_new(256);
smtp_reply_write(str, &reply);
o_stream_nsend(conn->conn.output, str_data(str), str_len(str));
} T_END;
}
|
Safe
|
[
"CWE-77"
] |
core
|
321c339756f9b2b98fb7326359d1333adebb5295
|
2.010346797487016e+38
| 25 |
lib-smtp: smtp-server-connection - Fix STARTTLS command injection vulnerability.
The input handler kept reading more commands even though the input was locked by
the STARTTLS command, thereby causing it to read the command pipelined beyond
STARTTLS. This causes a STARTTLS command injection vulerability.
| 0 |
dfaexec (struct dfa *d, char const *begin, char *end,
int allow_nl, int *count, int *backref)
{
int s, s1; /* Current state. */
unsigned char const *p; /* Current input character. */
int **trans, *t; /* Copy of d->trans so it can be optimized
into a register. */
unsigned char eol = eolbyte; /* Likewise for eolbyte. */
unsigned char saved_end;
if (! d->tralloc)
build_state_zero(d);
s = s1 = 0;
p = (unsigned char const *) begin;
trans = d->trans;
saved_end = *(unsigned char *) end;
*end = eol;
if (d->mb_cur_max > 1)
{
MALLOC(mblen_buf, end - begin + 2);
MALLOC(inputwcs, end - begin + 2);
memset(&mbs, 0, sizeof(mbstate_t));
prepare_wc_buf ((const char *) p, end);
}
for (;;)
{
if (d->mb_cur_max > 1)
while ((t = trans[s]) != NULL)
{
if (p > buf_end)
break;
s1 = s;
SKIP_REMAINS_MB_IF_INITIAL_STATE(s, p);
if (d->states[s].mbps.nelem == 0)
{
s = t[*p++];
continue;
}
/* Falling back to the glibc matcher in this case gives
better performance (up to 25% better on [a-z], for
example) and enables support for collating symbols and
equivalence classes. */
if (backref)
{
*backref = 1;
free(mblen_buf);
free(inputwcs);
*end = saved_end;
return (char *) p;
}
/* Can match with a multibyte character (and multi character
collating element). Transition table might be updated. */
s = transit_state(d, s, &p);
trans = d->trans;
}
else
{
while ((t = trans[s]) != NULL)
{
s1 = t[*p++];
if ((t = trans[s1]) == NULL)
{
int tmp = s; s = s1; s1 = tmp; /* swap */
break;
}
s = t[*p++];
}
}
if (s >= 0 && (char *) p <= end && d->fails[s])
{
if (d->success[s] & sbit[*p])
{
if (backref)
*backref = (d->states[s].backref != 0);
if (d->mb_cur_max > 1)
{
free(mblen_buf);
free(inputwcs);
}
*end = saved_end;
return (char *) p;
}
s1 = s;
if (d->mb_cur_max > 1)
{
/* Can match with a multibyte character (and multicharacter
collating element). Transition table might be updated. */
s = transit_state(d, s, &p);
trans = d->trans;
}
else
s = d->fails[s][*p++];
continue;
}
/* If the previous character was a newline, count it. */
if ((char *) p <= end && p[-1] == eol)
{
if (count)
++*count;
if (d->mb_cur_max > 1)
prepare_wc_buf ((const char *) p, end);
}
/* Check if we've run off the end of the buffer. */
if ((char *) p > end)
{
if (d->mb_cur_max > 1)
{
free(mblen_buf);
free(inputwcs);
}
*end = saved_end;
return NULL;
}
if (s >= 0)
{
build_state(s, d);
trans = d->trans;
continue;
}
if (p[-1] == eol && allow_nl)
{
s = d->newlines[s1];
continue;
}
s = 0;
}
}
|
Vulnerable
|
[
"CWE-189"
] |
grep
|
cbbc1a45b9f843c811905c97c90a5d31f8e6c189
|
2.6398573386516015e+38
| 141 |
grep: fix some core dumps with long lines etc.
These problems mostly occur because the code attempts to stuff
sizes into int or into unsigned int; this doesn't work on most
64-bit hosts and the errors can lead to core dumps.
* NEWS: Document this.
* src/dfa.c (token): Typedef to ptrdiff_t, since the enum's
range could be as small as -128 .. 127 on practical hosts.
(position.index): Now size_t, not unsigned int.
(leaf_set.elems): Now size_t *, not unsigned int *.
(dfa_state.hash, struct mb_char_classes.nchars, .nch_classes)
(.nranges, .nequivs, .ncoll_elems, struct dfa.cindex, .calloc, .tindex)
(.talloc, .depth, .nleaves, .nregexps, .nmultibyte_prop, .nmbcsets):
(.mbcsets_alloc): Now size_t, not int.
(dfa_state.first_end): Now token, not int.
(state_num): New type.
(struct mb_char_classes.cset): Now ptrdiff_t, not int.
(struct dfa.utf8_anychar_classes): Now token[5], not int[5].
(struct dfa.sindex, .salloc, .tralloc): Now state_num, not int.
(struct dfa.trans, .realtrans, .fails): Now state_num **, not int **.
(struct dfa.newlines): Now state_num *, not int *.
(prtok): Don't assume 'token' is no wider than int.
(lexleft, parens, depth): Now size_t, not int.
(charclass_index, nsubtoks)
(parse_bracket_exp, addtok, copytoks, closure, insert, merge, delete)
(state_index, epsclosure, state_separate_contexts)
(dfaanalyze, dfastate, build_state, realloc_trans_if_necessary)
(transit_state_singlebyte, match_anychar, match_mb_charset)
(check_matching_with_multibyte_ops, transit_state_consume_1char)
(transit_state, dfaexec, free_mbdata, dfaoptimize, dfafree)
(freelist, enlist, addlists, inboth, dfamust):
Don't assume indexes fit in 'int'.
(lex): Avoid overflow in string-to-{hi,lo} conversions.
(dfaanalyze): Redo indexing so that it works with size_t values,
which cannot go negative.
* src/dfa.h (dfaexec): Count argument is now size_t *, not int *.
(dfastate): State numbers are now ptrdiff_t, not int.
* src/dfasearch.c: Include "intprops.h", for TYPE_MAXIMUM.
(kwset_exact_matches): Now size_t, not int.
(EGexecute): Don't assume indexes fit in 'int'.
Check for overflow before converting a ptrdiff_t to a regoff_t,
as regoff_t is narrower than ptrdiff_t in 64-bit glibc (contra POSIX).
Check for memory exhaustion in re_search rather than treating
it merely as failure to match; use xalloc_die () to report any error.
* src/kwset.c (struct trie.accepting): Now size_t, not unsigned int.
(struct kwset.words): Now ptrdiff_t, not int.
* src/kwset.h (struct kwsmatch.index): Now size_t, not int.
| 1 |
static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2)
{
memcpy(a1, a2, sizeof(struct in6_addr));
}
|
Safe
|
[
"CWE-703"
] |
linux
|
87c48fa3b4630905f98268dde838ee43626a060c
|
1.4692110210642867e+38
| 4 |
ipv6: make fragment identifications less predictable
IPv6 fragment identification generation is way beyond what we use for
IPv4 : It uses a single generator. Its not scalable and allows DOS
attacks.
Now inetpeer is IPv6 aware, we can use it to provide a more secure and
scalable frag ident generator (per destination, instead of system wide)
This patch :
1) defines a new secure_ipv6_id() helper
2) extends inet_getid() to provide 32bit results
3) extends ipv6_select_ident() with a new dest parameter
Reported-by: Fernando Gont <fernando@gont.com.ar>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
void __fastcall TSCPFileSystem::DetectReturnVar()
{
// This suppose that something was already executed (probably SkipStartupMessage())
// or return code variable is already set on start up.
try
{
// #60 17.10.01: "status" and "?" switched
UnicodeString ReturnVars[2] = { L"status", L"?" };
UnicodeString NewReturnVar = L"";
FTerminal->LogEvent(L"Detecting variable containing return code of last command.");
for (int Index = 0; Index < 2; Index++)
{
bool Success = true;
try
{
FTerminal->LogEvent(FORMAT(L"Trying \"$%s\".", (ReturnVars[Index])));
ExecCommand(fsVarValue, ARRAYOFCONST((ReturnVars[Index])));
if ((Output->Count != 1) || (StrToIntDef(Output->Strings[0], 256) > 255))
{
FTerminal->LogEvent(L"The response is not numerical exit code");
Abort();
}
}
catch (EFatal &E)
{
// if fatal error occurs, we need to exit ...
throw;
}
catch (Exception &E)
{
// ...otherwise, we will try next variable (if any)
Success = false;
}
if (Success)
{
NewReturnVar = ReturnVars[Index];
break;
}
}
if (NewReturnVar.IsEmpty())
{
EXCEPTION;
}
else
{
FCommandSet->ReturnVar = NewReturnVar;
FTerminal->LogEvent(FORMAT(L"Return code variable \"%s\" selected.",
(FCommandSet->ReturnVar)));
}
}
catch (Exception &E)
{
FTerminal->CommandError(&E, LoadStr(DETECT_RETURNVAR_ERROR));
}
}
|
Safe
|
[
"CWE-20"
] |
winscp
|
49d876f2c5fc00bcedaa986a7cf6dedd6bf16f54
|
3.2413292559565953e+38
| 59 |
Bug 1675: Prevent SCP server sending files that were not requested
https://winscp.net/tracker/1675
Source commit: 4aa587620973bf793fb6e783052277c0f7be4b55
| 0 |
static void error(char *err) {
printf("%s\n", err);
exit(1);
}
|
Safe
|
[
"CWE-190"
] |
redis
|
789f10156009b404950ad717642a9496ed887083
|
1.1671297025381057e+38
| 4 |
Fix integer overflow in intset (CVE-2021-29478)
An integer overflow bug in Redis 6.2 could be exploited to corrupt the heap and
potentially result with remote code execution.
The vulnerability involves changing the default set-max-intset-entries
configuration value, creating a large set key that consists of integer values
and using the COPY command to duplicate it.
The integer overflow bug exists in all versions of Redis starting with 2.6,
where it could result with a corrupted RDB or DUMP payload, but not exploited
through COPY (which did not exist before 6.2).
(cherry picked from commit 29900d4e6bccdf3691bedf0ea9a5d84863fa3592)
| 0 |
Section* Binary::section_from_offset(uint64_t offset) {
return const_cast<Section*>(static_cast<const Binary*>(this)->section_from_offset(offset));
}
|
Safe
|
[
"CWE-703"
] |
LIEF
|
7acf0bc4224081d4f425fcc8b2e361b95291d878
|
2.9964282380766296e+38
| 3 |
Resolve #764
| 0 |
zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
cred_t *cr, zfs_fuid_type_t type)
{
/*
* The Linux port only supports POSIX IDs, use the passed id.
*/
return (fuid);
}
|
Safe
|
[
"CWE-200",
"CWE-732"
] |
zfs
|
716b53d0a14c72bda16c0872565dd1909757e73f
|
1.4509181073436942e+38
| 8 |
FreeBSD: Fix UNIX permissions checking
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matt Macy <mmacy@FreeBSD.org>
Closes #10727
| 0 |
rb_locale_str_new_cstr(const char *ptr)
{
return rb_external_str_new_with_enc(ptr, strlen(ptr), rb_locale_encoding());
}
|
Safe
|
[
"CWE-119"
] |
ruby
|
1c2ef610358af33f9ded3086aa2d70aac03dcac5
|
1.0867399829785854e+38
| 4 |
* string.c (rb_str_justify): CVE-2009-4124.
Fixes a bug reported by
Emmanouel Kellinis <Emmanouel.Kellinis AT kpmg.co.uk>, KPMG London;
Patch by nobu.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@26038 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
| 0 |
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
struct user_namespace *user_ns)
{
mm->mmap = NULL;
mm->mm_rb = RB_ROOT;
mm->vmacache_seqnum = 0;
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
mmap_init_lock(mm);
INIT_LIST_HEAD(&mm->mmlist);
mm->core_state = NULL;
mm_pgtables_bytes_init(mm);
mm->map_count = 0;
mm->locked_vm = 0;
atomic_set(&mm->has_pinned, 0);
atomic64_set(&mm->pinned_vm, 0);
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
spin_lock_init(&mm->arg_lock);
mm_init_cpumask(mm);
mm_init_aio(mm);
mm_init_owner(mm, p);
RCU_INIT_POINTER(mm->exe_file, NULL);
mmu_notifier_subscriptions_init(mm);
init_tlb_flush_pending(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
mm->pmd_huge_pte = NULL;
#endif
mm_init_uprobes_state(mm);
if (current->mm) {
mm->flags = current->mm->flags & MMF_INIT_MASK;
mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
} else {
mm->flags = default_dump_filter;
mm->def_flags = 0;
}
if (mm_alloc_pgd(mm))
goto fail_nopgd;
if (init_new_context(p, mm))
goto fail_nocontext;
mm->user_ns = get_user_ns(user_ns);
return mm;
fail_nocontext:
mm_free_pgd(mm);
fail_nopgd:
free_mm(mm);
return NULL;
}
|
Safe
|
[
"CWE-665",
"CWE-362"
] |
linux
|
b4e00444cab4c3f3fec876dc0cccc8cbb0d1a948
|
2.3603710046588658e+38
| 53 |
fork: fix copy_process(CLONE_PARENT) race with the exiting ->real_parent
current->group_leader->exit_signal may change during copy_process() if
current->real_parent exits.
Move the assignment inside tasklist_lock to avoid the race.
Signed-off-by: Eddy Wu <eddy_wu@trendmicro.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
aspath_snmp_pathseg (struct aspath *as, size_t *varlen)
{
#define SNMP_PATHSEG_MAX 1024
if (!snmp_stream)
snmp_stream = stream_new (SNMP_PATHSEG_MAX);
else
stream_reset (snmp_stream);
if (!as)
{
*varlen = 0;
return NULL;
}
aspath_put (snmp_stream, as, 0); /* use 16 bit for now here */
*varlen = stream_get_endp (snmp_stream);
return stream_pnt(snmp_stream);
}
|
Safe
|
[
"CWE-20"
] |
quagga
|
7a42b78be9a4108d98833069a88e6fddb9285008
|
1.483855267873481e+38
| 19 |
bgpd: Fix AS_PATH size calculation for long paths
If you have an AS_PATH with more entries than
what can be written into a single AS_SEGMENT_MAX
it needs to be broken up. The code that noticed
that the AS_PATH needs to be broken up was not
correctly calculating the size of the resulting
message. This patch addresses this issue.
| 0 |
**/
CImg<T>& assign(const unsigned int size_x, const unsigned int size_y,
const unsigned int size_z, const unsigned int size_c,
const char *const values, const bool repeat_values) {
return assign(size_x,size_y,size_z,size_c).fill(values,repeat_values);
|
Safe
|
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
|
1.3197543308425297e+38
| 5 |
Fix other issues in 'CImg<T>::load_bmp()'.
| 0 |
XML_SetExternalEntityRefHandler(XML_Parser parser,
XML_ExternalEntityRefHandler handler)
{
if (parser != NULL)
externalEntityRefHandler = handler;
}
|
Safe
|
[
"CWE-611"
] |
libexpat
|
c4bf96bb51dd2a1b0e185374362ee136fe2c9d7f
|
2.0958339546125745e+38
| 6 |
xmlparse.c: Fix external entity infinite loop bug (CVE-2017-9233)
| 0 |
static struct server_data *find_server(int index,
const char *server,
int protocol)
{
GSList *list;
debug("index %d server %s proto %d", index, server, protocol);
for (list = server_list; list; list = list->next) {
struct server_data *data = list->data;
if (index < 0 && data->index < 0 &&
g_str_equal(data->server, server) &&
data->protocol == protocol)
return data;
if (index < 0 ||
data->index < 0 || !data->server)
continue;
if (data->index == index &&
g_str_equal(data->server, server) &&
data->protocol == protocol)
return data;
}
return NULL;
}
|
Safe
|
[
"CWE-119"
] |
connman
|
5c281d182ecdd0a424b64f7698f32467f8f67b71
|
3.101853425207145e+38
| 28 |
dnsproxy: Fix crash on malformed DNS response
If the response query string is malformed, we might access memory
pass the end of "name" variable in parse_response().
| 0 |
ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
{
struct sta_info *sta = tx->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_local *local = tx->local;
if (unlikely(!sta))
return TX_CONTINUE;
if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
test_sta_flag(sta, WLAN_STA_PS_DELIVER)) &&
!(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
int ac = skb_get_queue_mapping(tx->skb);
if (ieee80211_is_mgmt(hdr->frame_control) &&
!ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
return TX_CONTINUE;
}
ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
sta->sta.addr, sta->sta.aid, ac);
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
/* sync with ieee80211_sta_ps_deliver_wakeup */
spin_lock(&sta->ps_lock);
/*
* STA woke up the meantime and all the frames on ps_tx_buf have
* been queued to pending queue. No reordering can happen, go
* ahead and Tx the packet.
*/
if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
!test_sta_flag(sta, WLAN_STA_PS_DRIVER) &&
!test_sta_flag(sta, WLAN_STA_PS_DELIVER)) {
spin_unlock(&sta->ps_lock);
return TX_CONTINUE;
}
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
ps_dbg(tx->sdata,
"STA %pM TX buffer for AC %d full - dropping oldest frame\n",
sta->sta.addr, ac);
ieee80211_free_txskb(&local->hw, old);
} else
tx->local->total_ps_buffered++;
info->control.jiffies = jiffies;
info->control.vif = &tx->sdata->vif;
info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
spin_unlock(&sta->ps_lock);
if (!timer_pending(&local->sta_cleanup))
mod_timer(&local->sta_cleanup,
round_jiffies(jiffies +
STA_INFO_CLEANUP_INTERVAL));
/*
* We queued up some frames, so the TIM bit might
* need to be set, recalculate it.
*/
sta_info_recalc_tim(sta);
return TX_QUEUED;
} else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
ps_dbg(tx->sdata,
"STA %pM in PS mode, but polling/in SP -> send frame\n",
sta->sta.addr);
}
return TX_CONTINUE;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
bddc0c411a45d3718ac535a070f349be8eca8d48
|
7.905524242092648e+37
| 77 |
mac80211: Fix NULL ptr deref for injected rate info
The commit cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx
queue") moved the code to validate the radiotap header from
ieee80211_monitor_start_xmit to ieee80211_parse_tx_radiotap. This made is
possible to share more code with the new Tx queue selection code for
injected frames. But at the same time, it now required the call of
ieee80211_parse_tx_radiotap at the beginning of functions which wanted to
handle the radiotap header. And this broke the rate parser for radiotap
header parser.
The radiotap parser for rates is operating most of the time only on the
data in the actual radiotap header. But for the 802.11a/b/g rates, it must
also know the selected band from the chandef information. But this
information is only written to the ieee80211_tx_info at the end of the
ieee80211_monitor_start_xmit - long after ieee80211_parse_tx_radiotap was
already called. The info->band information was therefore always 0
(NL80211_BAND_2GHZ) when the parser code tried to access it.
For a 5GHz only device, injecting a frame with 802.11a rates would cause a
NULL pointer dereference because local->hw.wiphy->bands[NL80211_BAND_2GHZ]
would most likely have been NULL when the radiotap parser searched for the
correct rate index of the driver.
Cc: stable@vger.kernel.org
Reported-by: Ben Greear <greearb@candelatech.com>
Fixes: cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx queue")
Signed-off-by: Mathy Vanhoef <Mathy.Vanhoef@kuleuven.be>
[sven@narfation.org: added commit message]
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Link: https://lore.kernel.org/r/20210530133226.40587-1-sven@narfation.org
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
| 0 |
static void d_lru_del(struct dentry *dentry)
{
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags &= ~DCACHE_LRU_LIST;
this_cpu_dec(nr_dentry_unused);
WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
}
|
Safe
|
[
"CWE-362",
"CWE-399"
] |
linux
|
49d31c2f389acfe83417083e1208422b4091cd9e
|
2.9053499978878734e+38
| 7 |
dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
| 0 |
static SLJIT_INLINE void return_with_partial_match(compiler_common *common, struct sljit_label *quit)
{
DEFINE_COMPILER;
sljit_s32 mov_opcode;
SLJIT_COMPILE_ASSERT(STR_END == SLJIT_S0, str_end_must_be_saved_reg0);
SLJIT_ASSERT(common->start_used_ptr != 0 && common->start_ptr != 0
&& (common->mode == PCRE2_JIT_PARTIAL_SOFT ? common->hit_start != 0 : common->hit_start == 0));
OP1(SLJIT_MOV, SLJIT_R1, 0, ARGUMENTS, 0);
OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP),
common->mode == PCRE2_JIT_PARTIAL_SOFT ? common->hit_start : common->start_ptr);
OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, PCRE2_ERROR_PARTIAL);
/* Store match begin and end. */
OP1(SLJIT_MOV, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, begin));
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, startchar_ptr), SLJIT_R2, 0);
OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, match_data));
mov_opcode = (sizeof(PCRE2_SIZE) == 4) ? SLJIT_MOV_U32 : SLJIT_MOV;
OP2(SLJIT_SUB, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_S1, 0);
#if PCRE2_CODE_UNIT_WIDTH == 16 || PCRE2_CODE_UNIT_WIDTH == 32
OP2(SLJIT_ASHR, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, UCHAR_SHIFT);
#endif
OP1(mov_opcode, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(pcre2_match_data, ovector), SLJIT_R2, 0);
OP2(SLJIT_SUB, STR_END, 0, STR_END, 0, SLJIT_S1, 0);
#if PCRE2_CODE_UNIT_WIDTH == 16 || PCRE2_CODE_UNIT_WIDTH == 32
OP2(SLJIT_ASHR, STR_END, 0, STR_END, 0, SLJIT_IMM, UCHAR_SHIFT);
#endif
OP1(mov_opcode, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(pcre2_match_data, ovector) + sizeof(PCRE2_SIZE), STR_END, 0);
JUMPTO(SLJIT_JUMP, quit);
}
|
Safe
|
[
"CWE-125"
] |
php-src
|
8947fd9e9fdce87cd6c59817b1db58e789538fe9
|
2.5691859285886606e+38
| 35 |
Fix #78338: Array cross-border reading in PCRE
We backport r1092 from pcre2.
| 0 |
collapse_uids( KBNODE *keyblock )
{
KBNODE uid1;
int any=0;
for(uid1=*keyblock;uid1;uid1=uid1->next)
{
KBNODE uid2;
if(is_deleted_kbnode(uid1))
continue;
if(uid1->pkt->pkttype!=PKT_USER_ID)
continue;
for(uid2=uid1->next;uid2;uid2=uid2->next)
{
if(is_deleted_kbnode(uid2))
continue;
if(uid2->pkt->pkttype!=PKT_USER_ID)
continue;
if(cmp_user_ids(uid1->pkt->pkt.user_id,
uid2->pkt->pkt.user_id)==0)
{
/* We have a duplicated uid */
KBNODE sig1,last;
any=1;
/* Now take uid2's signatures, and attach them to
uid1 */
for(last=uid2;last->next;last=last->next)
{
if(is_deleted_kbnode(last))
continue;
if(last->next->pkt->pkttype==PKT_USER_ID
|| last->next->pkt->pkttype==PKT_PUBLIC_SUBKEY
|| last->next->pkt->pkttype==PKT_SECRET_SUBKEY)
break;
}
/* Snip out uid2 */
(find_prev_kbnode(*keyblock,uid2,0))->next=last->next;
/* Now put uid2 in place as part of uid1 */
last->next=uid1->next;
uid1->next=uid2;
delete_kbnode(uid2);
/* Now dedupe uid1 */
for(sig1=uid1->next;sig1;sig1=sig1->next)
{
KBNODE sig2;
if(is_deleted_kbnode(sig1))
continue;
if(sig1->pkt->pkttype==PKT_USER_ID
|| sig1->pkt->pkttype==PKT_PUBLIC_SUBKEY
|| sig1->pkt->pkttype==PKT_SECRET_SUBKEY)
break;
if(sig1->pkt->pkttype!=PKT_SIGNATURE)
continue;
for(sig2=sig1->next,last=sig1;sig2;last=sig2,sig2=sig2->next)
{
if(is_deleted_kbnode(sig2))
continue;
if(sig2->pkt->pkttype==PKT_USER_ID
|| sig2->pkt->pkttype==PKT_PUBLIC_SUBKEY
|| sig2->pkt->pkttype==PKT_SECRET_SUBKEY)
break;
if(sig2->pkt->pkttype!=PKT_SIGNATURE)
continue;
if(cmp_signatures(sig1->pkt->pkt.signature,
sig2->pkt->pkt.signature)==0)
{
/* We have a match, so delete the second
signature */
delete_kbnode(sig2);
sig2=last;
}
}
}
}
}
}
commit_kbnode(keyblock);
if(any && !opt.quiet)
{
const char *key="???";
if ((uid1 = find_kbnode (*keyblock, PKT_PUBLIC_KEY)) )
key = keystr_from_pk (uid1->pkt->pkt.public_key);
else if ((uid1 = find_kbnode( *keyblock, PKT_SECRET_KEY)) )
key = keystr_from_pk (uid1->pkt->pkt.public_key);
log_info (_("key %s: duplicated user ID detected - merged\n"), key);
}
return any;
}
|
Safe
|
[
"CWE-20"
] |
gnupg
|
f0b33b6fb8e0586e9584a7a409dcc31263776a67
|
2.1124555637301538e+38
| 111 |
gpg: Import only packets which are allowed in a keyblock.
* g10/import.c (valid_keyblock_packet): New.
(read_block): Store only valid packets.
--
A corrupted key, which for example included a mangled public key
encrypted packet, used to corrupt the keyring. This change skips all
packets which are not allowed in a keyblock.
GnuPG-bug-id: 1455
(cherry-picked from commit f795a0d59e197455f8723c300eebf59e09853efa)
| 0 |
void CLASS stretch()
{
ushort newdim, (*img)[4], *pix0, *pix1;
int row, col, c;
double rc, frac;
if (pixel_aspect == 1) return;
dcraw_message (DCRAW_VERBOSE,_("Stretching the image...\n"));
if (pixel_aspect < 1) {
newdim = height / pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (width*newdim, sizeof *img);
merror (img, "stretch()");
for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c*width];
if (c+1 < height) pix1 += width*4;
for (col=0; col < width; col++, pix0+=4, pix1+=4)
FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
height = newdim;
} else {
newdim = width * pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (height*newdim, sizeof *img);
merror (img, "stretch()");
for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c];
if (c+1 < width) pix1 += 4;
for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4)
FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
width = newdim;
}
free (image);
image = img;
}
|
Safe
|
[
"CWE-189"
] |
rawstudio
|
983bda1f0fa5fa86884381208274198a620f006e
|
3.1628626612770374e+38
| 36 |
Avoid overflow in ljpeg_start().
| 0 |
static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
{
tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0);
gen_op_mov_reg_v(size, reg, cpu_tmp0);
}
|
Safe
|
[
"CWE-94"
] |
qemu
|
30663fd26c0307e414622c7a8607fbc04f92ec14
|
1.4077306260485192e+38
| 5 |
tcg/i386: Check the size of instruction being translated
This fixes the bug: 'user-to-root privesc inside VM via bad translation
caching' reported by Jann Horn here:
https://bugs.chromium.org/p/project-zero/issues/detail?id=1122
Reviewed-by: Richard Henderson <rth@twiddle.net>
CC: Peter Maydell <peter.maydell@linaro.org>
CC: Paolo Bonzini <pbonzini@redhat.com>
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Pranith Kumar <bobby.prani@gmail.com>
Message-Id: <20170323175851.14342-1-bobby.prani@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static void test_fetch_double()
{
int rc;
myheader("test_fetch_double");
rc= mysql_query(mysql, "DROP TABLE IF EXISTS test_bind_fetch");
myquery(rc);
rc= mysql_query(mysql, "CREATE TABLE test_bind_fetch(c1 double(5, 2), "
"c2 double unsigned, c3 double unsigned, "
"c4 double unsigned, c5 double unsigned, "
"c6 double unsigned, c7 double unsigned)");
myquery(rc);
bind_fetch(3);
}
|
Safe
|
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
|
1.8807251832929702e+38
| 18 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
| 0 |
static void carray(JF, js_Ast *list)
{
while (list) {
emitline(J, F, list->a);
cexp(J, F, list->a);
emit(J, F, OP_INITARRAY);
list = list->b;
}
}
|
Safe
|
[
"CWE-703",
"CWE-787"
] |
mujs
|
df8559e7bdbc6065276e786217eeee70f28fce66
|
2.351570276750707e+38
| 9 |
Bug 704749: Clear jump list after patching jump addresses.
Since we can emit a statement multiple times when compiling try/finally
we have to use a new patch list for each instance.
| 0 |
static int mb86a20s_get_blk_error(struct dvb_frontend *fe,
unsigned layer,
u32 *error, u32 *count)
{
struct mb86a20s_state *state = fe->demodulator_priv;
int rc, val;
u32 collect_rate;
dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
if (layer >= NUM_LAYERS)
return -EINVAL;
/* Check if the PER measures are already available */
rc = mb86a20s_writereg(state, 0x50, 0xb8);
if (rc < 0)
return rc;
rc = mb86a20s_readreg(state, 0x51);
if (rc < 0)
return rc;
/* Check if data is available for that layer */
if (!(rc & (1 << layer))) {
dev_dbg(&state->i2c->dev,
"%s: block counts for layer %c aren't available yet.\n",
__func__, 'A' + layer);
return -EBUSY;
}
/* Read Packet error Count */
rc = mb86a20s_writereg(state, 0x50, 0xb9 + layer * 2);
if (rc < 0)
return rc;
rc = mb86a20s_readreg(state, 0x51);
if (rc < 0)
return rc;
*error = rc << 8;
rc = mb86a20s_writereg(state, 0x50, 0xba + layer * 2);
if (rc < 0)
return rc;
rc = mb86a20s_readreg(state, 0x51);
if (rc < 0)
return rc;
*error |= rc;
dev_dbg(&state->i2c->dev, "%s: block error for layer %c: %d.\n",
__func__, 'A' + layer, *error);
/* Read Bit Count */
rc = mb86a20s_writereg(state, 0x50, 0xb2 + layer * 2);
if (rc < 0)
return rc;
rc = mb86a20s_readreg(state, 0x51);
if (rc < 0)
return rc;
*count = rc << 8;
rc = mb86a20s_writereg(state, 0x50, 0xb3 + layer * 2);
if (rc < 0)
return rc;
rc = mb86a20s_readreg(state, 0x51);
if (rc < 0)
return rc;
*count |= rc;
dev_dbg(&state->i2c->dev,
"%s: block count for layer %c: %d.\n",
__func__, 'A' + layer, *count);
/*
* As we get TMCC data from the frontend, we can better estimate the
* BER bit counters, in order to do the BER measure during a longer
* time. Use those data, if available, to update the bit count
* measure.
*/
if (!state->estimated_rate[layer])
goto reset_measurement;
collect_rate = state->estimated_rate[layer] / 204 / 8;
if (collect_rate < 32)
collect_rate = 32;
if (collect_rate > 65535)
collect_rate = 65535;
if (collect_rate != *count) {
dev_dbg(&state->i2c->dev,
"%s: updating PER counter on layer %c to %d.\n",
__func__, 'A' + layer, collect_rate);
/* Stop PER measurement */
rc = mb86a20s_writereg(state, 0x50, 0xb0);
if (rc < 0)
return rc;
rc = mb86a20s_writereg(state, 0x51, 0x00);
if (rc < 0)
return rc;
/* Update this layer's counter */
rc = mb86a20s_writereg(state, 0x50, 0xb2 + layer * 2);
if (rc < 0)
return rc;
rc = mb86a20s_writereg(state, 0x51, collect_rate >> 8);
if (rc < 0)
return rc;
rc = mb86a20s_writereg(state, 0x50, 0xb3 + layer * 2);
if (rc < 0)
return rc;
rc = mb86a20s_writereg(state, 0x51, collect_rate & 0xff);
if (rc < 0)
return rc;
/* start PER measurement */
rc = mb86a20s_writereg(state, 0x50, 0xb0);
if (rc < 0)
return rc;
rc = mb86a20s_writereg(state, 0x51, 0x07);
if (rc < 0)
return rc;
/* Reset all counters to collect new data */
rc = mb86a20s_writereg(state, 0x50, 0xb1);
if (rc < 0)
return rc;
rc = mb86a20s_writereg(state, 0x51, 0x07);
if (rc < 0)
return rc;
rc = mb86a20s_writereg(state, 0x51, 0x00);
return rc;
}
reset_measurement:
/* Reset counter to collect new data */
rc = mb86a20s_writereg(state, 0x50, 0xb1);
if (rc < 0)
return rc;
rc = mb86a20s_readreg(state, 0x51);
if (rc < 0)
return rc;
val = rc;
rc = mb86a20s_writereg(state, 0x51, val | (1 << layer));
if (rc < 0)
return rc;
rc = mb86a20s_writereg(state, 0x51, val & ~(1 << layer));
return rc;
}
|
Safe
|
[
"CWE-119"
] |
media_tree
|
eca2d34b9d2ce70165a50510659838e28ca22742
|
1.960113351510944e+38
| 146 |
[media] mb86a20s: apply mask to val after checking for read failure
Appling the mask 0x0f to the immediate return of the call to
mb86a20s_readreg will always result in a positive value, meaning that the
check of ret < 0 will never work. Instead, check for a -ve return value
first, and then mask val with 0x0f.
Kudos to Mauro Carvalho Chehab for spotting the mistake in my original fix.
Signed-off-by: Colin Ian King <colin.king@canonical.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
| 0 |
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
if (sysctl_perf_cpu_time_max_percent == 100 ||
sysctl_perf_cpu_time_max_percent == 0) {
printk(KERN_WARNING
"perf: Dynamic interrupt throttling disabled, can hang your system!\n");
WRITE_ONCE(perf_sample_allowed_ns, 0);
} else {
update_perf_cpu_limits();
}
return 0;
}
|
Safe
|
[
"CWE-401"
] |
tip
|
7bdb157cdebbf95a1cd94ed2e01b338714075d00
|
1.8117857444439657e+38
| 19 |
perf/core: Fix a memory leak in perf_event_parse_addr_filter()
As shown through runtime testing, the "filename" allocation is not
always freed in perf_event_parse_addr_filter().
There are three possible ways that this could happen:
- It could be allocated twice on subsequent iterations through the loop,
- or leaked on the success path,
- or on the failure path.
Clean up the code flow to make it obvious that 'filename' is always
freed in the reallocation path and in the two return paths as well.
We rely on the fact that kfree(NULL) is NOP and filename is initialized
with NULL.
This fixes the leak. No other side effects expected.
[ Dan Carpenter: cleaned up the code flow & added a changelog. ]
[ Ingo Molnar: updated the changelog some more. ]
Fixes: 375637bc5249 ("perf/core: Introduce address range filtering")
Signed-off-by: "kiyin(尹亮)" <kiyin@tencent.com>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: "Srivatsa S. Bhat" <srivatsa@csail.mit.edu>
Cc: Anthony Liguori <aliguori@amazon.com>
--
kernel/events/core.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
| 0 |
static void __prep_account_new_huge_page(struct hstate *h, int nid)
{
lockdep_assert_held(&hugetlb_lock);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
}
|
Safe
|
[] |
linux
|
a4a118f2eead1d6c49e00765de89878288d4b890
|
3.221093774733018e+38
| 6 |
hugetlbfs: flush TLBs correctly after huge_pmd_unshare
When __unmap_hugepage_range() calls to huge_pmd_unshare() succeed, a TLB
flush is missing. This TLB flush must be performed before releasing the
i_mmap_rwsem, in order to prevent an unshared PMDs page from being
released and reused before the TLB flush took place.
Arguably, a comprehensive solution would use mmu_gather interface to
batch the TLB flushes and the PMDs page release, however it is not an
easy solution: (1) try_to_unmap_one() and try_to_migrate_one() also call
huge_pmd_unshare() and they cannot use the mmu_gather interface; and (2)
deferring the release of the page reference for the PMDs page until
after i_mmap_rwsem is dropeed can confuse huge_pmd_unshare() into
thinking PMDs are shared when they are not.
Fix __unmap_hugepage_range() by adding the missing TLB flush, and
forcing a flush when unshare is successful.
Fixes: 24669e58477e ("hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages)" # 3.6
Signed-off-by: Nadav Amit <namit@vmware.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static int mov_get_rawvideo_codec_tag(AVFormatContext *s, MOVTrack *track)
{
int tag = track->par->codec_tag;
int i;
enum AVPixelFormat pix_fmt;
for (i = 0; i < FF_ARRAY_ELEMS(mov_pix_fmt_tags); i++) {
if (track->par->format == mov_pix_fmt_tags[i].pix_fmt) {
tag = mov_pix_fmt_tags[i].tag;
track->par->bits_per_coded_sample = mov_pix_fmt_tags[i].bps;
if (track->par->codec_tag == mov_pix_fmt_tags[i].tag)
break;
}
}
pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_mov,
track->par->bits_per_coded_sample);
if (tag == MKTAG('r','a','w',' ') &&
track->par->format != pix_fmt &&
track->par->format != AV_PIX_FMT_GRAY8 &&
track->par->format != AV_PIX_FMT_NONE)
av_log(s, AV_LOG_ERROR, "%s rawvideo cannot be written to mov, output file will be unreadable\n",
av_get_pix_fmt_name(track->par->format));
return tag;
}
|
Safe
|
[
"CWE-125"
] |
FFmpeg
|
95556e27e2c1d56d9e18f5db34d6f756f3011148
|
3.273086766631211e+38
| 25 |
avformat/movenc: Do not pass AVCodecParameters in avpriv_request_sample
Fixes: out of array read
Fixes: ffmpeg_crash_8.avi
Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
| 0 |
nat_select_range_tuple(struct conntrack *ct, const struct conn *conn,
struct conn *nat_conn)
{
enum { MIN_NAT_EPHEMERAL_PORT = 1024,
MAX_NAT_EPHEMERAL_PORT = 65535 };
uint16_t min_port;
uint16_t max_port;
uint16_t first_port;
uint32_t hash = nat_range_hash(conn, ct->hash_basis);
if ((conn->nat_info->nat_action & NAT_ACTION_SRC) &&
(!(conn->nat_info->nat_action & NAT_ACTION_SRC_PORT))) {
min_port = ntohs(conn->key.src.port);
max_port = ntohs(conn->key.src.port);
first_port = min_port;
} else if ((conn->nat_info->nat_action & NAT_ACTION_DST) &&
(!(conn->nat_info->nat_action & NAT_ACTION_DST_PORT))) {
min_port = ntohs(conn->key.dst.port);
max_port = ntohs(conn->key.dst.port);
first_port = min_port;
} else {
uint16_t deltap = conn->nat_info->max_port - conn->nat_info->min_port;
uint32_t port_index = hash % (deltap + 1);
first_port = conn->nat_info->min_port + port_index;
min_port = conn->nat_info->min_port;
max_port = conn->nat_info->max_port;
}
uint32_t deltaa = 0;
uint32_t address_index;
struct ct_addr ct_addr;
memset(&ct_addr, 0, sizeof ct_addr);
struct ct_addr max_ct_addr;
memset(&max_ct_addr, 0, sizeof max_ct_addr);
max_ct_addr = conn->nat_info->max_addr;
if (conn->key.dl_type == htons(ETH_TYPE_IP)) {
deltaa = ntohl(conn->nat_info->max_addr.ipv4_aligned) -
ntohl(conn->nat_info->min_addr.ipv4_aligned);
address_index = hash % (deltaa + 1);
ct_addr.ipv4_aligned = htonl(
ntohl(conn->nat_info->min_addr.ipv4_aligned) + address_index);
} else {
deltaa = nat_ipv6_addrs_delta(&conn->nat_info->min_addr.ipv6_aligned,
&conn->nat_info->max_addr.ipv6_aligned);
/* deltaa must be within 32 bits for full hash coverage. A 64 or
* 128 bit hash is unnecessary and hence not used here. Most code
* is kept common with V4; nat_ipv6_addrs_delta() will do the
* enforcement via max_ct_addr. */
max_ct_addr = conn->nat_info->min_addr;
nat_ipv6_addr_increment(&max_ct_addr.ipv6_aligned, deltaa);
address_index = hash % (deltaa + 1);
ct_addr.ipv6_aligned = conn->nat_info->min_addr.ipv6_aligned;
nat_ipv6_addr_increment(&ct_addr.ipv6_aligned, address_index);
}
uint16_t port = first_port;
bool all_ports_tried = false;
/* For DNAT, we don't use ephemeral ports. */
bool ephemeral_ports_tried = conn->nat_info->nat_action & NAT_ACTION_DST
? true : false;
struct ct_addr first_addr = ct_addr;
while (true) {
if (conn->nat_info->nat_action & NAT_ACTION_SRC) {
nat_conn->rev_key.dst.addr = ct_addr;
} else {
nat_conn->rev_key.src.addr = ct_addr;
}
if ((conn->key.nw_proto == IPPROTO_ICMP) ||
(conn->key.nw_proto == IPPROTO_ICMPV6)) {
all_ports_tried = true;
} else if (conn->nat_info->nat_action & NAT_ACTION_SRC) {
nat_conn->rev_key.dst.port = htons(port);
} else {
nat_conn->rev_key.src.port = htons(port);
}
bool new_insert = nat_conn_keys_insert(&ct->nat_conn_keys, nat_conn,
ct->hash_basis);
if (new_insert) {
return true;
} else if (!all_ports_tried) {
if (min_port == max_port) {
all_ports_tried = true;
} else if (port == max_port) {
port = min_port;
} else {
port++;
}
if (port == first_port) {
all_ports_tried = true;
}
} else {
if (memcmp(&ct_addr, &max_ct_addr, sizeof ct_addr)) {
if (conn->key.dl_type == htons(ETH_TYPE_IP)) {
ct_addr.ipv4_aligned = htonl(
ntohl(ct_addr.ipv4_aligned) + 1);
} else {
nat_ipv6_addr_increment(&ct_addr.ipv6_aligned, 1);
}
} else {
ct_addr = conn->nat_info->min_addr;
}
if (!memcmp(&ct_addr, &first_addr, sizeof ct_addr)) {
if (!ephemeral_ports_tried) {
ephemeral_ports_tried = true;
ct_addr = conn->nat_info->min_addr;
first_addr = ct_addr;
min_port = MIN_NAT_EPHEMERAL_PORT;
max_port = MAX_NAT_EPHEMERAL_PORT;
} else {
break;
}
}
first_port = min_port;
port = first_port;
all_ports_tried = false;
}
}
return false;
}
|
Safe
|
[
"CWE-400"
] |
ovs
|
abd7a457652e6734902720fe6a5dddb3fc0d1e3b
|
2.3403100136970926e+38
| 124 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <joakim.hindersson@elastx.se>
Acked-by: Ilya Maximets <i.maximets@ovn.org>
Signed-off-by: Flavio Leitner <fbl@sysclose.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
| 0 |
bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
bool any_combination_will_do, uint number, bool no_errors)
{
TABLE_LIST *tl;
TABLE_LIST *first_not_own_table= thd->lex->first_not_own_table();
Security_context *sctx= thd->security_ctx;
uint i;
ulong orig_want_access= want_access;
DBUG_ENTER("check_grant");
DBUG_ASSERT(number > 0);
/*
Walk through the list of tables that belong to the query and save the
requested access (orig_want_privilege) to be able to use it when
checking access rights to the underlying tables of a view. Our grant
system gradually eliminates checked bits from want_privilege and thus
after all checks are done we can no longer use it.
The check that first_not_own_table is not reached is for the case when
the given table list refers to the list for prelocking (contains tables
of other queries). For simple queries first_not_own_table is 0.
*/
for (i= 0, tl= tables;
i < number && tl != first_not_own_table;
tl= tl->next_global, i++)
{
/*
Save a copy of the privileges without the SHOW_VIEW_ACL attribute.
It will be checked during making view.
*/
tl->grant.orig_want_privilege= (want_access & ~SHOW_VIEW_ACL);
}
mysql_rwlock_rdlock(&LOCK_grant);
for (tl= tables;
tl && number-- && tl != first_not_own_table;
tl= tl->next_global)
{
TABLE_LIST *const t_ref=
tl->correspondent_table ? tl->correspondent_table : tl;
sctx = MY_TEST(t_ref->security_ctx) ? t_ref->security_ctx :
thd->security_ctx;
const ACL_internal_table_access *access=
get_cached_table_access(&t_ref->grant.m_internal,
t_ref->get_db_name(),
t_ref->get_table_name());
if (access)
{
switch(access->check(orig_want_access, &t_ref->grant.privilege))
{
case ACL_INTERNAL_ACCESS_GRANTED:
/*
Grant all access to the table to skip column checks.
Depend on the controls in the P_S table itself.
*/
t_ref->grant.privilege|= TMP_TABLE_ACLS;
t_ref->grant.want_privilege= 0;
continue;
case ACL_INTERNAL_ACCESS_DENIED:
goto err;
case ACL_INTERNAL_ACCESS_CHECK_GRANT:
break;
}
}
want_access= orig_want_access;
want_access&= ~sctx->master_access;
if (!want_access)
continue; // ok
if (!(~t_ref->grant.privilege & want_access) ||
t_ref->is_anonymous_derived_table() || t_ref->schema_table)
{
/*
It is subquery in the FROM clause. VIEW set t_ref->derived after
table opening, but this function always called before table opening.
*/
if (!t_ref->referencing_view)
{
/*
If it's a temporary table created for a subquery in the FROM
clause, or an INFORMATION_SCHEMA table, drop the request for
a privilege.
*/
t_ref->grant.want_privilege= 0;
}
continue;
}
if (is_temporary_table(t_ref))
{
/*
If this table list element corresponds to a pre-opened temporary
table skip checking of all relevant table-level privileges for it.
Note that during creation of temporary table we still need to check
if user has CREATE_TMP_ACL.
*/
t_ref->grant.privilege|= TMP_TABLE_ACLS;
t_ref->grant.want_privilege= 0;
continue;
}
GRANT_TABLE *grant_table= table_hash_search(sctx->get_host()->ptr(),
sctx->get_ip()->ptr(),
t_ref->get_db_name(),
sctx->priv_user,
t_ref->get_table_name(),
FALSE);
if (!grant_table)
{
want_access &= ~t_ref->grant.privilege;
goto err; // No grants
}
/*
For SHOW COLUMNS, SHOW INDEX it is enough to have some
privileges on any column combination on the table.
*/
if (any_combination_will_do)
continue;
t_ref->grant.grant_table= grant_table; // Remember for column test
t_ref->grant.version= grant_version;
t_ref->grant.privilege|= grant_table->privs;
t_ref->grant.want_privilege= ((want_access & COL_ACLS) & ~t_ref->grant.privilege);
if (!(~t_ref->grant.privilege & want_access))
continue;
if (want_access & ~(grant_table->cols | t_ref->grant.privilege))
{
want_access &= ~(grant_table->cols | t_ref->grant.privilege);
goto err; // impossible
}
}
mysql_rwlock_unlock(&LOCK_grant);
DBUG_RETURN(FALSE);
err:
mysql_rwlock_unlock(&LOCK_grant);
if (!no_errors) // Not a silent skip of table
{
char command[128];
get_privilege_desc(command, sizeof(command), want_access);
my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0),
command,
sctx->priv_user,
sctx->host_or_ip,
tl ? tl->get_table_name() : "unknown");
}
DBUG_RETURN(TRUE);
}
|
Safe
|
[] |
mysql-server
|
25d1b7e03b9b375a243fabdf0556c063c7282361
|
1.8188321549148626e+38
| 154 |
Bug #22722946: integer overflow may lead to wrong results in get_56_lenc_string
| 0 |
internalSubsetSplit(void *ctx, const xmlChar *name,
const xmlChar *ExternalID, const xmlChar *SystemID)
{
xmlSchemaSAXPlugPtr ctxt = (xmlSchemaSAXPlugPtr) ctx;
if ((ctxt != NULL) && (ctxt->user_sax != NULL) &&
(ctxt->user_sax->internalSubset != NULL))
ctxt->user_sax->internalSubset(ctxt->user_data, name, ExternalID,
SystemID);
}
|
Safe
|
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
|
4.88853933628803e+37
| 9 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
| 0 |
TEST_F(PlaintextRecordTest, TestDataRemaining) {
addToQueue("16030100050123456789160301");
auto msg = read_.read(queue_);
EXPECT_EQ(msg->type, ContentType::handshake);
expectSame(msg->fragment, "0123456789");
EXPECT_EQ(queue_.chainLength(), 3);
expectSame(queue_.move(), "160301");
}
|
Safe
|
[
"CWE-119",
"CWE-835",
"CWE-787"
] |
fizz
|
40bbb161e72fb609608d53b9d64c56bb961a6ee2
|
7.518876459466766e+36
| 8 |
Avoid arithmetic operation on uint16 read from the wire.
Summary:
This could overflow previously.
CVE-2019-3560
Reviewed By: yfeldblum
Differential Revision: D14152362
fbshipit-source-id: c0ebb3fc59b49c7c23e6bcb90458c19cd891be65
| 0 |
void migrate_page_copy(struct page *newpage, struct page *page)
{
int cpupid;
if (PageHuge(page) || PageTransHuge(page))
copy_huge_page(newpage, page);
else
copy_highpage(newpage, page);
if (PageError(page))
SetPageError(newpage);
if (PageReferenced(page))
SetPageReferenced(newpage);
if (PageUptodate(page))
SetPageUptodate(newpage);
if (TestClearPageActive(page)) {
VM_BUG_ON_PAGE(PageUnevictable(page), page);
SetPageActive(newpage);
} else if (TestClearPageUnevictable(page))
SetPageUnevictable(newpage);
if (PageChecked(page))
SetPageChecked(newpage);
if (PageMappedToDisk(page))
SetPageMappedToDisk(newpage);
if (PageDirty(page)) {
clear_page_dirty_for_io(page);
/*
* Want to mark the page and the radix tree as dirty, and
* redo the accounting that clear_page_dirty_for_io undid,
* but we can't use set_page_dirty because that function
* is actually a signal that all of the page has become dirty.
* Whereas only part of our page may be dirty.
*/
if (PageSwapBacked(page))
SetPageDirty(newpage);
else
__set_page_dirty_nobuffers(newpage);
}
if (page_is_young(page))
set_page_young(newpage);
if (page_is_idle(page))
set_page_idle(newpage);
/*
* Copy NUMA information to the new page, to prevent over-eager
* future migrations of this same page.
*/
cpupid = page_cpupid_xchg_last(page, -1);
page_cpupid_xchg_last(newpage, cpupid);
ksm_migrate_page(newpage, page);
/*
* Please do not reorder this without considering how mm/ksm.c's
* get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
*/
if (PageSwapCache(page))
ClearPageSwapCache(page);
ClearPagePrivate(page);
set_page_private(page, 0);
/*
* If any waiters have accumulated on the new page then
* wake them up.
*/
if (PageWriteback(newpage))
end_page_writeback(newpage);
}
|
Vulnerable
|
[
"CWE-476"
] |
linux
|
42cb14b110a5698ccf26ce59c4441722605a3743
|
1.3539036144701031e+38
| 69 |
mm: migrate dirty page without clear_page_dirty_for_io etc
clear_page_dirty_for_io() has accumulated writeback and memcg subtleties
since v2.6.16 first introduced page migration; and the set_page_dirty()
which completed its migration of PageDirty, later had to be moderated to
__set_page_dirty_nobuffers(); then PageSwapBacked had to skip that too.
No actual problems seen with this procedure recently, but if you look into
what the clear_page_dirty_for_io(page)+set_page_dirty(newpage) is actually
achieving, it turns out to be nothing more than moving the PageDirty flag,
and its NR_FILE_DIRTY stat from one zone to another.
It would be good to avoid a pile of irrelevant decrementations and
incrementations, and improper event counting, and unnecessary descent of
the radix_tree under tree_lock (to set the PAGECACHE_TAG_DIRTY which
radix_tree_replace_slot() left in place anyway).
Do the NR_FILE_DIRTY movement, like the other stats movements, while
interrupts still disabled in migrate_page_move_mapping(); and don't even
bother if the zone is the same. Do the PageDirty movement there under
tree_lock too, where old page is frozen and newpage not yet visible:
bearing in mind that as soon as newpage becomes visible in radix_tree, an
un-page-locked set_page_dirty() might interfere (or perhaps that's just
not possible: anything doing so should already hold an additional
reference to the old page, preventing its migration; but play safe).
But we do still need to transfer PageDirty in migrate_page_copy(), for
those who don't go the mapping route through migrate_page_move_mapping().
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 1 |
static MagickBooleanType IsPDF(const unsigned char *magick,const size_t offset)
{
if (offset < 5)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"%PDF-",5) == 0)
return(MagickTrue);
return(MagickFalse);
}
|
Safe
|
[
"CWE-617"
] |
ImageMagick
|
2001aabeab8a921a4fa0b1932f565aef6ae69f84
|
3.17532043299013e+38
| 8 |
https://github.com/ImageMagick/ImageMagick/issues/674
| 0 |
static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *attrs[NL80211_ATTR_CQM_MAX + 1];
struct nlattr *cqm;
int err;
cqm = info->attrs[NL80211_ATTR_CQM];
if (!cqm)
return -EINVAL;
err = nla_parse_nested_deprecated(attrs, NL80211_ATTR_CQM_MAX, cqm,
nl80211_attr_cqm_policy,
info->extack);
if (err)
return err;
if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] &&
attrs[NL80211_ATTR_CQM_RSSI_HYST]) {
const s32 *thresholds =
nla_data(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
int len = nla_len(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
u32 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
if (len % 4)
return -EINVAL;
return nl80211_set_cqm_rssi(info, thresholds, len / 4,
hysteresis);
}
if (attrs[NL80211_ATTR_CQM_TXE_RATE] &&
attrs[NL80211_ATTR_CQM_TXE_PKTS] &&
attrs[NL80211_ATTR_CQM_TXE_INTVL]) {
u32 rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]);
u32 pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]);
u32 intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]);
return nl80211_set_cqm_txe(info, rate, pkts, intvl);
}
return -EINVAL;
}
|
Safe
|
[
"CWE-120"
] |
linux
|
f88eb7c0d002a67ef31aeb7850b42ff69abc46dc
|
1.3252100214281032e+38
| 42 |
nl80211: validate beacon head
We currently don't validate the beacon head, i.e. the header,
fixed part and elements that are to go in front of the TIM
element. This means that the variable elements there can be
malformed, e.g. have a length exceeding the buffer size, but
most downstream code from this assumes that this has already
been checked.
Add the necessary checks to the netlink policy.
Cc: stable@vger.kernel.org
Fixes: ed1b6cc7f80f ("cfg80211/nl80211: add beacon settings")
Link: https://lore.kernel.org/r/1569009255-I7ac7fbe9436e9d8733439eab8acbbd35e55c74ef@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.