func
string
target
string
cwe
list
project
string
commit_id
string
hash
string
size
int64
message
string
vul
int64
void UrlParser::parse(char ch) { switch(_state) { case state_0: if (ch == '=') _state = state_value; else if (ch == '&') ; else if (ch == '%') _state = state_keyesc; else { _key = ch; _state = state_key; } break; case state_key: if (ch == '=') _state = state_value; else if (ch == '&') { _q.add(_key); _key.clear(); _state = state_0; } else if (ch == '%') _state = state_keyesc; else _key += ch; break; case state_value: if (ch == '%') _state = state_valueesc; else if (ch == '&') { _q.add(_key, _value); _key.clear(); _value.clear(); _state = state_0; } else if (ch == '+') _value += ' '; else _value += ch; break; case state_keyesc: case state_valueesc: if (ch >= '0' && ch <= '9') { ++_cnt; _v = (_v << 4) + (ch - '0'); } else if (ch >= 'a' && ch <= 'f') { ++_cnt; _v = (_v << 4) + (ch - 'a' + 10); } else if (ch >= 'A' && ch <= 'F') { ++_cnt; _v = (_v << 4) + (ch - 'A' + 10); } else { if (_cnt == 0) { if (_state == state_keyesc) { _key += '%'; _state = state_key; } else { _value += '%'; _state = state_value; } } else { if (_state == state_keyesc) { _key += static_cast<char>(_v); _state = state_key; } else { _value += static_cast<char>(_v); _state = state_value; } _cnt = 0; _v = 0; } parse(ch); break; } if (_cnt >= 2) { if (_state == state_keyesc) { _key += static_cast<char>(_v); _state = state_key; } else { _value += static_cast<char>(_v); _state = state_value; } _cnt = 0; _v = 0; } break; } }
Safe
[ "CWE-399" ]
cxxtools
142bb2589dc184709857c08c1e10570947c444e3
1.3322126312792012e+38
123
fix parsing double % in query parameters
0
mark_trusted_task_done (GObject *source_object, GAsyncResult *res, gpointer user_data) { MarkTrustedJob *job = user_data; g_object_unref (job->file); if (job->done_callback) { job->done_callback (!job_aborted ((CommonJob *) job), job->done_callback_data); } finalize_common ((CommonJob *) job); }
Vulnerable
[ "CWE-20" ]
nautilus
1630f53481f445ada0a455e9979236d31a8d3bb0
1.3829108675870285e+38
16
mime-actions: use file metadata for trusting desktop files Currently we only trust desktop files that have the executable bit set, and don't replace the displayed icon or the displayed name until it's trusted, which prevents for running random programs by a malicious desktop file. However, the executable permission is preserved if the desktop file comes from a compressed file. To prevent this, add a metadata::trusted metadata to the file once the user acknowledges the file as trusted. This adds metadata to the file, which cannot be added unless it has access to the computer. Also remove the SHEBANG "trusted" content we were putting inside the desktop file, since that doesn't add more security since it can come with the file itself. https://bugzilla.gnome.org/show_bug.cgi?id=777991
1
static struct server_data *create_server(int index, const char *domain, const char *server, int protocol) { struct server_data *data; struct addrinfo hints, *rp; int ret; DBG("index %d server %s", index, server); data = g_try_new0(struct server_data, 1); if (!data) { connman_error("Failed to allocate server %s data", server); return NULL; } data->index = index; if (domain) data->domains = g_list_append(data->domains, g_strdup(domain)); data->server = g_strdup(server); data->protocol = protocol; memset(&hints, 0, sizeof(hints)); switch (protocol) { case IPPROTO_UDP: hints.ai_socktype = SOCK_DGRAM; break; case IPPROTO_TCP: hints.ai_socktype = SOCK_STREAM; break; default: destroy_server(data); return NULL; } hints.ai_family = AF_UNSPEC; hints.ai_flags = AI_NUMERICSERV | AI_NUMERICHOST; ret = getaddrinfo(data->server, "53", &hints, &rp); if (ret) { connman_error("Failed to parse server %s address: %s\n", data->server, gai_strerror(ret)); destroy_server(data); return NULL; } /* Do not blindly copy this code elsewhere; it doesn't loop over the results using ->ai_next as it should. That's OK in *this* case because it was a numeric lookup; we *know* there's only one. */ data->server_addr_len = rp->ai_addrlen; switch (rp->ai_family) { case AF_INET: data->server_addr = (struct sockaddr *) g_try_new0(struct sockaddr_in, 1); break; case AF_INET6: data->server_addr = (struct sockaddr *) g_try_new0(struct sockaddr_in6, 1); break; default: connman_error("Wrong address family %d", rp->ai_family); break; } if (!data->server_addr) { freeaddrinfo(rp); destroy_server(data); return NULL; } memcpy(data->server_addr, rp->ai_addr, rp->ai_addrlen); freeaddrinfo(rp); if (server_create_socket(data) != 0) { destroy_server(data); return NULL; } if (protocol == IPPROTO_UDP) { if (__connman_service_index_is_default(data->index) || __connman_service_index_is_split_routing( data->index)) { data->enabled = true; DBG("Adding DNS server %s", data->server); enable_fallback(false); } server_list = g_slist_append(server_list, data); } return data; }
Safe
[ "CWE-119" ]
connman
5c281d182ecdd0a424b64f7698f32467f8f67b71
2.285691498857453e+37
95
dnsproxy: Fix crash on malformed DNS response If the response query string is malformed, we might access memory pass the end of "name" variable in parse_response().
0
void georadiusGeneric(client *c, int flags) { robj *key = c->argv[1]; robj *storekey = NULL; int storedist = 0; /* 0 for STORE, 1 for STOREDIST. */ /* Look up the requested zset */ robj *zobj = NULL; if ((zobj = lookupKeyReadOrReply(c, key, shared.emptyarray)) == NULL || checkType(c, zobj, OBJ_ZSET)) { return; } /* Find long/lat to use for radius search based on inquiry type */ int base_args; double xy[2] = { 0 }; if (flags & RADIUS_COORDS) { base_args = 6; if (extractLongLatOrReply(c, c->argv + 2, xy) == C_ERR) return; } else if (flags & RADIUS_MEMBER) { base_args = 5; robj *member = c->argv[2]; if (longLatFromMember(zobj, member, xy) == C_ERR) { addReplyError(c, "could not decode requested zset member"); return; } } else { addReplyError(c, "Unknown georadius search type"); return; } /* Extract radius and units from arguments */ double radius_meters = 0, conversion = 1; if ((radius_meters = extractDistanceOrReply(c, c->argv + base_args - 2, &conversion)) < 0) { return; } /* Discover and populate all optional parameters. */ int withdist = 0, withhash = 0, withcoords = 0; int sort = SORT_NONE; long long count = 0; if (c->argc > base_args) { int remaining = c->argc - base_args; for (int i = 0; i < remaining; i++) { char *arg = c->argv[base_args + i]->ptr; if (!strcasecmp(arg, "withdist")) { withdist = 1; } else if (!strcasecmp(arg, "withhash")) { withhash = 1; } else if (!strcasecmp(arg, "withcoord")) { withcoords = 1; } else if (!strcasecmp(arg, "asc")) { sort = SORT_ASC; } else if (!strcasecmp(arg, "desc")) { sort = SORT_DESC; } else if (!strcasecmp(arg, "count") && (i+1) < remaining) { if (getLongLongFromObjectOrReply(c, c->argv[base_args+i+1], &count, NULL) != C_OK) return; if (count <= 0) { addReplyError(c,"COUNT must be > 0"); return; } i++; } else if (!strcasecmp(arg, "store") && (i+1) < remaining && !(flags & RADIUS_NOSTORE)) { storekey = c->argv[base_args+i+1]; storedist = 0; i++; } else if (!strcasecmp(arg, "storedist") && (i+1) < remaining && !(flags & RADIUS_NOSTORE)) { storekey = c->argv[base_args+i+1]; storedist = 1; i++; } else { addReply(c, shared.syntaxerr); return; } } } /* Trap options not compatible with STORE and STOREDIST. */ if (storekey && (withdist || withhash || withcoords)) { addReplyError(c, "STORE option in GEORADIUS is not compatible with " "WITHDIST, WITHHASH and WITHCOORDS options"); return; } /* COUNT without ordering does not make much sense, force ASC * ordering if COUNT was specified but no sorting was requested. */ if (count != 0 && sort == SORT_NONE) sort = SORT_ASC; /* Get all neighbor geohash boxes for our radius search */ GeoHashRadius georadius = geohashGetAreasByRadiusWGS84(xy[0], xy[1], radius_meters); /* Search the zset for all matching points */ geoArray *ga = geoArrayCreate(); membersOfAllNeighbors(zobj, georadius, xy[0], xy[1], radius_meters, ga); /* If no matching results, the user gets an empty reply. */ if (ga->used == 0 && storekey == NULL) { addReply(c,shared.emptyarray); geoArrayFree(ga); return; } long result_length = ga->used; long returned_items = (count == 0 || result_length < count) ? result_length : count; long option_length = 0; /* Process [optional] requested sorting */ if (sort == SORT_ASC) { qsort(ga->array, result_length, sizeof(geoPoint), sort_gp_asc); } else if (sort == SORT_DESC) { qsort(ga->array, result_length, sizeof(geoPoint), sort_gp_desc); } if (storekey == NULL) { /* No target key, return results to user. */ /* Our options are self-contained nested multibulk replies, so we * only need to track how many of those nested replies we return. */ if (withdist) option_length++; if (withcoords) option_length++; if (withhash) option_length++; /* The array len we send is exactly result_length. The result is * either all strings of just zset members *or* a nested multi-bulk * reply containing the zset member string _and_ all the additional * options the user enabled for this request. */ addReplyArrayLen(c, returned_items); /* Finally send results back to the caller */ int i; for (i = 0; i < returned_items; i++) { geoPoint *gp = ga->array+i; gp->dist /= conversion; /* Fix according to unit. */ /* If we have options in option_length, return each sub-result * as a nested multi-bulk. Add 1 to account for result value * itself. */ if (option_length) addReplyArrayLen(c, option_length + 1); addReplyBulkSds(c,gp->member); gp->member = NULL; if (withdist) addReplyDoubleDistance(c, gp->dist); if (withhash) addReplyLongLong(c, gp->score); if (withcoords) { addReplyArrayLen(c, 2); addReplyHumanLongDouble(c, gp->longitude); addReplyHumanLongDouble(c, gp->latitude); } } } else { /* Target key, create a sorted set with the results. */ robj *zobj; zset *zs; int i; size_t maxelelen = 0; if (returned_items) { zobj = createZsetObject(); zs = zobj->ptr; } for (i = 0; i < returned_items; i++) { zskiplistNode *znode; geoPoint *gp = ga->array+i; gp->dist /= conversion; /* Fix according to unit. */ double score = storedist ? gp->dist : gp->score; size_t elelen = sdslen(gp->member); if (maxelelen < elelen) maxelelen = elelen; znode = zslInsert(zs->zsl,score,gp->member); serverAssert(dictAdd(zs->dict,gp->member,&znode->score) == DICT_OK); gp->member = NULL; } if (returned_items) { zsetConvertToZiplistIfNeeded(zobj,maxelelen); setKey(c,c->db,storekey,zobj); decrRefCount(zobj); notifyKeyspaceEvent(NOTIFY_ZSET,"georadiusstore",storekey, c->db->id); server.dirty += returned_items; } else if (dbDelete(c->db,storekey)) { signalModifiedKey(c,c->db,storekey); notifyKeyspaceEvent(NOTIFY_GENERIC,"del",storekey,c->db->id); server.dirty++; } addReplyLongLong(c, returned_items); } geoArrayFree(ga); }
Vulnerable
[ "CWE-190" ]
redis
f6a40570fa63d5afdd596c78083d754081d80ae3
1.8784928554385607e+38
212
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628) - fix possible heap corruption in ziplist and listpack resulting by trying to allocate more than the maximum size of 4GB. - prevent ziplist (hash and zset) from reaching size of above 1GB, will be converted to HT encoding, that's not a useful size. - prevent listpack (stream) from reaching size of above 1GB. - XADD will start a new listpack if the new record may cause the previous listpack to grow over 1GB. - XADD will respond with an error if a single stream record is over 1GB - List type (ziplist in quicklist) was truncating strings that were over 4GB, now it'll respond with an error.
1
static void xsltFixImportedCompSteps(xsltStylesheetPtr master, xsltStylesheetPtr style) { xsltStylesheetPtr res; xmlHashScan(style->templatesHash, xsltNormalizeCompSteps, master); master->extrasNr += style->extrasNr; for (res = style->imports; res != NULL; res = res->next) { xsltFixImportedCompSteps(master, res); } }
Safe
[]
libxslt
e03553605b45c88f0b4b2980adfbbb8f6fca2fd6
3.099848626645288e+38
9
Fix security framework bypass xsltCheckRead and xsltCheckWrite return -1 in case of error but callers don't check for this condition and allow access. With a specially crafted URL, xsltCheckRead could be tricked into returning an error because of a supposedly invalid URL that would still be loaded succesfully later on. Fixes #12. Thanks to Felix Wilhelm for the report.
0
static int nbd_negotiate_send_rep_list(QIOChannel *ioc, NBDExport *exp) { size_t name_len, desc_len; uint32_t len; const char *name = exp->name ? exp->name : ""; const char *desc = exp->description ? exp->description : ""; int rc; TRACE("Advertising export name '%s' description '%s'", name, desc); name_len = strlen(name); desc_len = strlen(desc); len = name_len + desc_len + sizeof(len); rc = nbd_negotiate_send_rep_len(ioc, NBD_REP_SERVER, NBD_OPT_LIST, len); if (rc < 0) { return rc; } len = cpu_to_be32(name_len); if (nbd_write(ioc, &len, sizeof(len), NULL) < 0) { LOG("write failed (name length)"); return -EINVAL; } if (nbd_write(ioc, name, name_len, NULL) < 0) { LOG("write failed (name buffer)"); return -EINVAL; } if (nbd_write(ioc, desc, desc_len, NULL) < 0) { LOG("write failed (description buffer)"); return -EINVAL; } return 0; }
Safe
[ "CWE-20" ]
qemu
2b0bbc4f8809c972bad134bc1a2570dbb01dea0b
3.313862106202685e+38
32
nbd/server: get rid of nbd_negotiate_read and friends Functions nbd_negotiate_{read,write,drop_sync} were introduced in 1a6245a5b, when nbd_rwv (was nbd_wr_sync) was working through qemu_co_sendv_recvv (the path is nbd_wr_sync -> qemu_co_{recv/send} -> qemu_co_send_recv -> qemu_co_sendv_recvv), which just yields, without setting any handlers. But starting from ff82911cd nbd_rwv (was nbd_wr_syncv) works through qio_channel_yield() which sets handlers, so watchers are redundant in nbd_negotiate_{read,write,drop_sync}, then, let's just use nbd_{read,write,drop} functions. Functions nbd_{read,write,drop} has errp parameter, which is unused in this patch. This will be fixed later. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-Id: <20170602150150.258222-4-vsementsov@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
static int __init setup_fail_page_alloc(char *str) { return setup_fault_attr(&fail_page_alloc.attr, str); }
Safe
[]
linux
400e22499dd92613821374c8c6c88c7225359980
1.9015943545838014e+38
4
mm: don't warn about allocations which stall for too long Commit 63f53dea0c98 ("mm: warn about allocations which stall for too long") was a great step for reducing possibility of silent hang up problem caused by memory allocation stalls. But this commit reverts it, for it is possible to trigger OOM lockup and/or soft lockups when many threads concurrently called warn_alloc() (in order to warn about memory allocation stalls) due to current implementation of printk(), and it is difficult to obtain useful information due to limitation of synchronous warning approach. Current printk() implementation flushes all pending logs using the context of a thread which called console_unlock(). printk() should be able to flush all pending logs eventually unless somebody continues appending to printk() buffer. Since warn_alloc() started appending to printk() buffer while waiting for oom_kill_process() to make forward progress when oom_kill_process() is processing pending logs, it became possible for warn_alloc() to force oom_kill_process() loop inside printk(). As a result, warn_alloc() significantly increased possibility of preventing oom_kill_process() from making forward progress. ---------- Pseudo code start ---------- Before warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } goto retry; After warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else if (waited_for_10seconds()) { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- Although waited_for_10seconds() becomes true once per 10 seconds, unbounded number of threads can call waited_for_10seconds() at the same time. Also, since threads doing waited_for_10seconds() keep doing almost busy loop, the thread doing print_one_log() can use little CPU resource. Therefore, this situation can be simplified like ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- when printk() is called faster than print_one_log() can process a log. One of possible mitigation would be to introduce a new lock in order to make sure that no other series of printk() (either oom_kill_process() or warn_alloc()) can append to printk() buffer when one series of printk() (either oom_kill_process() or warn_alloc()) is already in progress. Such serialization will also help obtaining kernel messages in readable form. ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { mutex_lock(&oom_printk_lock); while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_printk_lock); mutex_unlock(&oom_lock) } else { if (mutex_trylock(&oom_printk_lock)) { atomic_inc(&printk_pending_logs); mutex_unlock(&oom_printk_lock); } } goto retry; ---------- Pseudo code end ---------- But this commit does not go that direction, for we don't want to introduce a new lock dependency, and we unlikely be able to obtain useful information even if we serialized oom_kill_process() and warn_alloc(). Synchronous approach is prone to unexpected results (e.g. too late [1], too frequent [2], overlooked [3]). As far as I know, warn_alloc() never helped with providing information other than "something is going wrong". I want to consider asynchronous approach which can obtain information during stalls with possibly relevant threads (e.g. the owner of oom_lock and kswapd-like threads) and serve as a trigger for actions (e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump of stalling KVM guest for diagnostic purpose). This commit temporarily loses ability to report e.g. OOM lockup due to unable to invoke the OOM killer due to !__GFP_FS allocation request. But asynchronous approach will be able to detect such situation and emit warning. Thus, let's remove warn_alloc(). [1] https://bugzilla.kernel.org/show_bug.cgi?id=192981 [2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com [3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever")) Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Reported-by: Cong Wang <xiyou.wangcong@gmail.com> Reported-by: yuwang.yuwang <yuwang.yuwang@alibaba-inc.com> Reported-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
static int tableAndColumnIndex( SrcList *pSrc, /* Array of tables to search */ int N, /* Number of tables in pSrc->a[] to search */ const char *zCol, /* Name of the column we are looking for */ int *piTab, /* Write index of pSrc->a[] here */ int *piCol /* Write index of pSrc->a[*piTab].pTab->aCol[] here */ ){ int i; /* For looping over tables in pSrc */ int iCol; /* Index of column matching zCol */ assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */ for(i=0; i<N; i++){ iCol = columnIndex(pSrc->a[i].pTab, zCol); if( iCol>=0 ){ if( piTab ){ *piTab = i; *piCol = iCol; } return 1; } } return 0; }
Safe
[ "CWE-20" ]
sqlite
e59c562b3f6894f84c715772c4b116d7b5c01348
2.9542028842196735e+38
23
Fix a crash that could occur if a sub-select that uses both DISTINCT and window functions also used an ORDER BY that is the same as its select list. FossilOrigin-Name: bcdd66c1691955c697f3d756c2b035acfe98f6aad72e90b0021bab6e9023b3ba
0
static int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)) return -EINVAL; req->shutdown.how = READ_ONCE(sqe->len); return 0; }
Safe
[ "CWE-416" ]
linux
9cae36a094e7e9d6e5fe8b6dcd4642138b3eb0c7
1.2791393129932985e+38
10
io_uring: reinstate the inflight tracking After some debugging, it was realized that we really do still need the old inflight tracking for any file type that has io_uring_fops assigned. If we don't, then trivial circular references will mean that we never get the ctx cleaned up and hence it'll leak. Just bring back the inflight tracking, which then also means we can eliminate the conditional dropping of the file when task_work is queued. Fixes: d5361233e9ab ("io_uring: drop the old style inflight file tracking") Signed-off-by: Jens Axboe <axboe@kernel.dk>
0
void lzw_context_destroy(struct lzw_ctx *ctx) { free(ctx); }
Safe
[ "CWE-125", "CWE-787" ]
chafa
e6ce3746cdcf0836b9dae659a5aed15d73a080d8
1.797259772607244e+38
4
libnsgif: fix oob in lzw_decode
0
static Upvaldesc *allocupvalue (FuncState *fs) { Proto *f = fs->f; int oldsize = f->sizeupvalues; checklimit(fs, fs->nups + 1, MAXUPVAL, "upvalues"); luaM_growvector(fs->ls->L, f->upvalues, fs->nups, f->sizeupvalues, Upvaldesc, MAXUPVAL, "upvalues"); while (oldsize < f->sizeupvalues) f->upvalues[oldsize++].name = NULL; return &f->upvalues[fs->nups++]; }
Safe
[ "CWE-125" ]
lua
1f3c6f4534c6411313361697d98d1145a1f030fa
2.4804023664327644e+38
10
Bug: Lua can generate wrong code when _ENV is <const>
0
static int lo_read_transfer(struct loop_device *lo, struct request *rq, loff_t pos) { struct bio_vec bvec, b; struct req_iterator iter; struct iov_iter i; struct page *page; ssize_t len; int ret = 0; page = alloc_page(GFP_NOIO); if (unlikely(!page)) return -ENOMEM; rq_for_each_segment(bvec, rq, iter) { loff_t offset = pos; b.bv_page = page; b.bv_offset = 0; b.bv_len = bvec.bv_len; iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); if (len < 0) { ret = len; goto out_free_page; } ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page, bvec.bv_offset, len, offset >> 9); if (ret) goto out_free_page; flush_dcache_page(bvec.bv_page); if (len != bvec.bv_len) { struct bio *bio; __rq_for_each_bio(bio, rq) zero_fill_bio(bio); break; } } ret = 0; out_free_page: __free_page(page); return ret; }
Safe
[ "CWE-416", "CWE-362" ]
linux
ae6650163c66a7eff1acd6eb8b0f752dcfa8eba5
1.087440915276054e+38
49
loop: fix concurrent lo_open/lo_release 范龙飞 reports that KASAN can report a use-after-free in __lock_acquire. The reason is due to insufficient serialization in lo_release(), which will continue to use the loop device even after it has decremented the lo_refcnt to zero. In the meantime, another process can come in, open the loop device again as it is being shut down. Confusion ensues. Reported-by: 范龙飞 <long7573@126.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
0
void assertContents(const intrusive_ptr<Testable>& expr, const BSONArray& expectedContents) { ASSERT_BSONOBJ_EQ(constify(BSON("$testable" << expectedContents)), expressionToBson(expr)); }
Safe
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
3.239666802918738e+38
3
SERVER-38070 fix infinite loop in agg expression
0
host_ntoa(int type, const void *arg, uschar *buffer, int *portptr) { uschar *yield; /* The new world. It is annoying that we have to fish out the address from different places in the block, depending on what kind of address it is. It is also a pain that inet_ntop() returns a const uschar *, whereas the IPv4 function inet_ntoa() returns just uschar *, and some picky compilers insist on warning if one assigns a const uschar * to a uschar *. Hence the casts. */ #if HAVE_IPV6 uschar addr_buffer[46]; if (type < 0) { int family = ((struct sockaddr *)arg)->sa_family; if (family == AF_INET6) { struct sockaddr_in6 *sk = (struct sockaddr_in6 *)arg; yield = US inet_ntop(family, &(sk->sin6_addr), CS addr_buffer, sizeof(addr_buffer)); if (portptr != NULL) *portptr = ntohs(sk->sin6_port); } else { struct sockaddr_in *sk = (struct sockaddr_in *)arg; yield = US inet_ntop(family, &(sk->sin_addr), CS addr_buffer, sizeof(addr_buffer)); if (portptr != NULL) *portptr = ntohs(sk->sin_port); } } else { yield = US inet_ntop(type, arg, CS addr_buffer, sizeof(addr_buffer)); } /* If the result is a mapped IPv4 address, show it in V4 format. */ if (Ustrncmp(yield, "::ffff:", 7) == 0) yield += 7; #else /* HAVE_IPV6 */ /* The old world */ if (type < 0) { yield = US inet_ntoa(((struct sockaddr_in *)arg)->sin_addr); if (portptr != NULL) *portptr = ntohs(((struct sockaddr_in *)arg)->sin_port); } else yield = US inet_ntoa(*((struct in_addr *)arg)); #endif /* If there is no buffer, put the string into some new store. */ if (!buffer) buffer = store_get(46, FALSE); /* Callers of this function with a non-NULL buffer must ensure that it is large enough to hold an IPv6 address, namely, at least 46 bytes. That's what makes this use of strcpy() OK. If the library returned apparently an apparently tainted string, clean it; we trust IP addresses. */ string_format_nt(buffer, 46, "%s", yield); return buffer; }
Safe
[ "CWE-787" ]
exim
d4bc023436e4cce7c23c5f8bb5199e178b4cc743
1.4105447195987704e+38
65
Fix host_name_lookup (Close 2747) Thanks to Nico R for providing a reproducing configuration. host_lookup = * message_size_limit = ${if def:sender_host_name {32M}{32M}} acl_smtp_connect = acl_smtp_connect acl_smtp_rcpt = acl_smtp_rcpt begin acl acl_smtp_connect: warn ratelimit = 256 / 1m / per_conn accept acl_smtp_rcpt: accept hosts = 127.0.0.* begin routers null: driver = accept transport = null begin transports null: driver = appendfile file = /dev/null Tested with swaks -f mailbox@example.org -t mailbox@example.org --pipe 'exim -bh 127.0.0.1 -C /opt/exim/etc/exim-bug.conf' The IP must have a PTR to "localhost." to reproduce it. (cherry picked from commit 20812729e3e47a193a21d326ecd036d67a8b2724)
0
Network::FilterStatus onData(Buffer::Instance& data, bool) override { onDataInternal(data); return Network::FilterStatus::StopIteration; }
Safe
[ "CWE-400" ]
envoy
542f84c66e9f6479bc31c6f53157c60472b25240
1.5662942882149233e+38
4
overload: Runtime configurable global connection limits (#147) Signed-off-by: Tony Allen <tony@allen.gg>
0
static double _mp_vargkth(CImg<doubleT>& vec) { const double val = (+vec).get_shared_points(1,vec.width() - 1). kth_smallest((ulongT)cimg::cut((longT)*vec - 1,(longT)0,(longT)vec.width() - 2)); cimg_for_inX(vec,1,vec.width()-1,ind) if (vec[ind]==val) return ind - 1.; return 1.; }
Safe
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
6.807702111054783e+37
6
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
0
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_block_rsv *block_rsv; int ret; if (atomic_read(&root->orphan_inodes) || root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) return; spin_lock(&root->orphan_lock); if (atomic_read(&root->orphan_inodes)) { spin_unlock(&root->orphan_lock); return; } if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) { spin_unlock(&root->orphan_lock); return; } block_rsv = root->orphan_block_rsv; root->orphan_block_rsv = NULL; spin_unlock(&root->orphan_lock); if (root->orphan_item_inserted && btrfs_root_refs(&root->root_item) > 0) { ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root, root->root_key.objectid); BUG_ON(ret); root->orphan_item_inserted = 0; } if (block_rsv) { WARN_ON(block_rsv->size > 0); btrfs_free_block_rsv(root, block_rsv); } }
Safe
[ "CWE-310" ]
linux-2.6
9c52057c698fb96f8f07e7a4bcf4801a092bda89
2.1291344477056448e+38
38
Btrfs: fix hash overflow handling The handling for directory crc hash overflows was fairly obscure, split_leaf returns EOVERFLOW when we try to extend the item and that is supposed to bubble up to userland. For a while it did so, but along the way we added better handling of errors and forced the FS readonly if we hit IO errors during the directory insertion. Along the way, we started testing only for EEXIST and the EOVERFLOW case was dropped. The end result is that we may force the FS readonly if we catch a directory hash bucket overflow. This fixes a few problem spots. First I add tests for EOVERFLOW in the places where we can safely just return the error up the chain. btrfs_rename is harder though, because it tries to insert the new directory item only after it has already unlinked anything the rename was going to overwrite. Rather than adding very complex logic, I added a helper to test for the hash overflow case early while it is still safe to bail out. Snapshot and subvolume creation had a similar problem, so they are using the new helper now too. Signed-off-by: Chris Mason <chris.mason@fusionio.com> Reported-by: Pascal Junod <pascal@junod.info>
0
static void convert_blocked_entry(GKeyFile *key_file, void *value) { g_key_file_set_boolean(key_file, "General", "Blocked", TRUE); }
Safe
[ "CWE-862", "CWE-863" ]
bluez
b497b5942a8beb8f89ca1c359c54ad67ec843055
1.4240949206366185e+38
4
adapter: Fix storing discoverable setting discoverable setting shall only be store when changed via Discoverable property and not when discovery client set it as that be considered temporary just for the lifetime of the discovery.
0
CImg<T>& _load_bmp(std::FILE *const file, const char *const filename) { if (!file && !filename) throw CImgArgumentException(_cimg_instance "load_bmp(): Specified filename is (null).", cimg_instance); std::FILE *const nfile = file?file:cimg::fopen(filename,"rb"); CImg<ucharT> header(54); cimg::fread(header._data,54,nfile); if (*header!='B' || header[1]!='M') { if (!file) cimg::fclose(nfile); throw CImgIOException(_cimg_instance "load_bmp(): Invalid BMP file '%s'.", cimg_instance, filename?filename:"(FILE*)"); } // Read header and pixel buffer int file_size = header[0x02] + (header[0x03]<<8) + (header[0x04]<<16) + (header[0x05]<<24), offset = header[0x0A] + (header[0x0B]<<8) + (header[0x0C]<<16) + (header[0x0D]<<24), header_size = header[0x0E] + (header[0x0F]<<8) + (header[0x10]<<16) + (header[0x11]<<24), dx = header[0x12] + (header[0x13]<<8) + (header[0x14]<<16) + (header[0x15]<<24), dy = header[0x16] + (header[0x17]<<8) + (header[0x18]<<16) + (header[0x19]<<24), compression = header[0x1E] + (header[0x1F]<<8) + (header[0x20]<<16) + (header[0x21]<<24), nb_colors = header[0x2E] + (header[0x2F]<<8) + (header[0x30]<<16) + (header[0x31]<<24), bpp = header[0x1C] + (header[0x1D]<<8); if (!file_size || file_size==offset) { cimg::fseek(nfile,0,SEEK_END); file_size = (int)cimg::ftell(nfile); cimg::fseek(nfile,54,SEEK_SET); } if (header_size>40) cimg::fseek(nfile,header_size - 40,SEEK_CUR); const int dx_bytes = (bpp==1)?(dx/8 + (dx%8?1:0)):((bpp==4)?(dx/2 + (dx%2)):(int)((longT)dx*bpp/8)), align_bytes = (4 - dx_bytes%4)%4; const ulongT cimg_iobuffer = (ulongT)24*1024*1024, buf_size = std::min((ulongT)cimg::abs(dy)*(dx_bytes + align_bytes),(ulongT)file_size - offset); CImg<intT> colormap; if (bpp<16) { if (!nb_colors) nb_colors = 1<<bpp; } else nb_colors = 0; if (nb_colors) { colormap.assign(nb_colors); cimg::fread(colormap._data,nb_colors,nfile); } const int xoffset = offset - 14 - header_size - 4*nb_colors; if (xoffset>0) cimg::fseek(nfile,xoffset,SEEK_CUR); CImg<ucharT> buffer; if (buf_size<cimg_iobuffer) { // buffer.assign(cimg::abs(dy)*(dx_bytes + align_bytes),1,1,1,0); buffer.assign(buf_size,1,1,1,0); cimg::fread(buffer._data,buf_size,nfile); } else buffer.assign(dx_bytes + align_bytes); unsigned char *ptrs = buffer; // Decompress buffer (if necessary) if (compression) { if (file) throw CImgIOException(_cimg_instance "load_bmp(): Unable to load compressed data from '(*FILE)' inputs.", cimg_instance); else { if (!file) cimg::fclose(nfile); return load_other(filename); } } // Read pixel data assign(dx,cimg::abs(dy),1,3,0); switch (bpp) { case 1 : { // Monochrome if (colormap._width>=2) for (int y = height() - 1; y>=0; --y) { if (buf_size>=cimg_iobuffer) { if (!cimg::fread(ptrs=buffer._data,dx_bytes,nfile)) break; cimg::fseek(nfile,align_bytes,SEEK_CUR); } unsigned char mask = 0x80, val = 0; cimg_forX(*this,x) { if (mask==0x80) val = *(ptrs++); const unsigned char *col = (unsigned char*)(colormap._data + (val&mask?1:0)); (*this)(x,y,2) = (T)*(col++); (*this)(x,y,1) = (T)*(col++); (*this)(x,y,0) = (T)*(col++); mask = cimg::ror(mask); } ptrs+=align_bytes; } } break; case 4 : { // 16 colors if (colormap._width>=16) for (int y = height() - 1; y>=0; --y) { if (buf_size>=cimg_iobuffer) { if (!cimg::fread(ptrs=buffer._data,dx_bytes,nfile)) break; cimg::fseek(nfile,align_bytes,SEEK_CUR); } unsigned char mask = 0xF0, val = 0; cimg_forX(*this,x) { if (mask==0xF0) val = *(ptrs++); const unsigned char color = (unsigned char)((mask<16)?(val&mask):((val&mask)>>4)); const unsigned char *col = (unsigned char*)(colormap._data + color); (*this)(x,y,2) = (T)*(col++); (*this)(x,y,1) = (T)*(col++); (*this)(x,y,0) = (T)*(col++); mask = cimg::ror(mask,4); } ptrs+=align_bytes; } } break; case 8 : { // 256 colors if (colormap._width>=256) for (int y = height() - 1; y>=0; --y) { if (buf_size>=cimg_iobuffer) { if (!cimg::fread(ptrs=buffer._data,dx_bytes,nfile)) break; cimg::fseek(nfile,align_bytes,SEEK_CUR); } cimg_forX(*this,x) { const unsigned char *col = (unsigned char*)(colormap._data + *(ptrs++)); (*this)(x,y,2) = (T)*(col++); (*this)(x,y,1) = (T)*(col++); (*this)(x,y,0) = (T)*(col++); } ptrs+=align_bytes; } } break; case 16 : { // 16 bits colors for (int y = height() - 1; y>=0; --y) { if (buf_size>=cimg_iobuffer) { if (!cimg::fread(ptrs=buffer._data,dx_bytes,nfile)) break; cimg::fseek(nfile,align_bytes,SEEK_CUR); } cimg_forX(*this,x) { const unsigned char c1 = *(ptrs++), c2 = *(ptrs++); const unsigned short col = (unsigned short)(c1|(c2<<8)); (*this)(x,y,2) = (T)(col&0x1F); (*this)(x,y,1) = (T)((col>>5)&0x1F); (*this)(x,y,0) = (T)((col>>10)&0x1F); } ptrs+=align_bytes; } } break; case 24 : { // 24 bits colors for (int y = height() - 1; y>=0; --y) { if (buf_size>=cimg_iobuffer) { if (!cimg::fread(ptrs=buffer._data,dx_bytes,nfile)) break; cimg::fseek(nfile,align_bytes,SEEK_CUR); } cimg_forX(*this,x) { (*this)(x,y,2) = (T)*(ptrs++); (*this)(x,y,1) = (T)*(ptrs++); (*this)(x,y,0) = (T)*(ptrs++); } ptrs+=align_bytes; } } break; case 32 : { // 32 bits colors for (int y = height() - 1; y>=0; --y) { if (buf_size>=cimg_iobuffer) { if (!cimg::fread(ptrs=buffer._data,dx_bytes,nfile)) break; cimg::fseek(nfile,align_bytes,SEEK_CUR); } cimg_forX(*this,x) { (*this)(x,y,2) = (T)*(ptrs++); (*this)(x,y,1) = (T)*(ptrs++); (*this)(x,y,0) = (T)*(ptrs++); ++ptrs; } ptrs+=align_bytes; } } break; } if (dy<0) mirror('y'); if (!file) cimg::fclose(nfile); return *this;
Safe
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
2.497058171224485e+38
173
.
0
GF_Box *gitn_box_new() { ISOM_DECL_BOX_ALLOC(GroupIdToNameBox, GF_ISOM_BOX_TYPE_GITN); return (GF_Box *)tmp; }
Safe
[ "CWE-787" ]
gpac
77510778516803b7f7402d7423c6d6bef50254c3
4.6473553099535685e+37
5
fixed #2255
0
brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { struct brcmf_cfg80211_info *cfg = ifp->drvr->config; s32 status; struct brcmf_escan_result_le *escan_result_le; struct brcmf_bss_info_le *bss_info_le; struct brcmf_bss_info_le *bss = NULL; u32 bi_length; struct brcmf_scan_results *list; u32 i; bool aborted; status = e->status; if (!test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { brcmf_err("scan not ready, bsscfgidx=%d\n", ifp->bsscfgidx); return -EPERM; } if (status == BRCMF_E_STATUS_PARTIAL) { brcmf_dbg(SCAN, "ESCAN Partial result\n"); escan_result_le = (struct brcmf_escan_result_le *) data; if (!escan_result_le) { brcmf_err("Invalid escan result (NULL pointer)\n"); goto exit; } if (le16_to_cpu(escan_result_le->bss_count) != 1) { brcmf_err("Invalid bss_count %d: ignoring\n", escan_result_le->bss_count); goto exit; } bss_info_le = &escan_result_le->bss_info_le; if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le)) goto exit; if (!cfg->scan_request) { brcmf_dbg(SCAN, "result without cfg80211 request\n"); goto exit; } bi_length = le32_to_cpu(bss_info_le->length); if (bi_length != (le32_to_cpu(escan_result_le->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) { brcmf_err("Invalid bss_info length %d: ignoring\n", bi_length); goto exit; } if (!(cfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) { if (le16_to_cpu(bss_info_le->capability) & WLAN_CAPABILITY_IBSS) { brcmf_err("Ignoring IBSS result\n"); goto exit; } } list = (struct brcmf_scan_results *) cfg->escan_info.escan_buf; if (bi_length > BRCMF_ESCAN_BUF_SIZE - list->buflen) { brcmf_err("Buffer is too small: ignoring\n"); goto exit; } for (i = 0; i < list->count; i++) { bss = bss ? (struct brcmf_bss_info_le *) ((unsigned char *)bss + le32_to_cpu(bss->length)) : list->bss_info_le; if (brcmf_compare_update_same_bss(cfg, bss, bss_info_le)) goto exit; } memcpy(&cfg->escan_info.escan_buf[list->buflen], bss_info_le, bi_length); list->version = le32_to_cpu(bss_info_le->version); list->buflen += bi_length; list->count++; } else { cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; if (brcmf_p2p_scan_finding_common_channel(cfg, NULL)) goto exit; if (cfg->scan_request) { brcmf_inform_bss(cfg); aborted = status != BRCMF_E_STATUS_SUCCESS; brcmf_notify_escan_complete(cfg, ifp, aborted, false); } else brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n", status); } exit: return 0; }
Safe
[ "CWE-119", "CWE-703" ]
linux
ded89912156b1a47d940a0c954c43afbabd0c42c
6.822396151680378e+37
94
brcmfmac: avoid potential stack overflow in brcmf_cfg80211_start_ap() User-space can choose to omit NL80211_ATTR_SSID and only provide raw IE TLV data. When doing so it can provide SSID IE with length exceeding the allowed size. The driver further processes this IE copying it into a local variable without checking the length. Hence stack can be corrupted and used as exploit. Cc: stable@vger.kernel.org # v4.7 Reported-by: Daxing Guo <freener.gdx@gmail.com> Reviewed-by: Hante Meuleman <hante.meuleman@broadcom.com> Reviewed-by: Pieter-Paul Giesberts <pieter-paul.giesberts@broadcom.com> Reviewed-by: Franky Lin <franky.lin@broadcom.com> Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
0
int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req) { int err = req->rsk_ops->rtx_syn_ack(parent, req); if (!err) req->num_retrans++; return err; }
Safe
[ "CWE-200", "CWE-415" ]
linux
657831ffc38e30092a2d5f03d385d710eb88b09a
1.4942869153872264e+38
8
dccp/tcp: do not inherit mc_list from parent syzkaller found a way to trigger double frees from ip_mc_drop_socket() It turns out that leave a copy of parent mc_list at accept() time, which is very bad. Very similar to commit 8b485ce69876 ("tcp: do not inherit fastopen_req from parent") Initial report from Pray3r, completed by Andrey one. Thanks a lot to them ! Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Pray3r <pray3r.z@gmail.com> Reported-by: Andrey Konovalov <andreyknvl@google.com> Tested-by: Andrey Konovalov <andreyknvl@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
request_new (const char *method, char *arg) { struct request *req = xnew0 (struct request); req->hcapacity = 8; req->headers = xnew_array (struct request_header, req->hcapacity); req->method = method; req->arg = arg; return req; }
Safe
[ "CWE-119" ]
wget
d892291fb8ace4c3b734ea5125770989c215df3f
1.7555977745740067e+38
9
Fix stack overflow in HTTP protocol handling (CVE-2017-13089) * src/http.c (skip_short_body): Return error on negative chunk size Reported-by: Antti Levomäki, Christian Jalio, Joonas Pihlaja from Forcepoint Reported-by: Juhani Eronen from Finnish National Cyber Security Centre
0
long keyctl_revoke_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); if (ret != -EACCES) goto error; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } } key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_revoke(key); key_ref_put(key_ref); error: return ret; }
Safe
[ "CWE-347" ]
linux
ee8f844e3c5a73b999edf733df1c529d6503ec2f
1.90076431300709e+38
29
KEYS: Disallow keyrings beginning with '.' to be joined as session keyrings This fixes CVE-2016-9604. Keyrings whose name begin with a '.' are special internal keyrings and so userspace isn't allowed to create keyrings by this name to prevent shadowing. However, the patch that added the guard didn't fix KEYCTL_JOIN_SESSION_KEYRING. Not only can that create dot-named keyrings, it can also subscribe to them as a session keyring if they grant SEARCH permission to the user. This, for example, allows a root process to set .builtin_trusted_keys as its session keyring, at which point it has full access because now the possessor permissions are added. This permits root to add extra public keys, thereby bypassing module verification. This also affects kexec and IMA. This can be tested by (as root): keyctl session .builtin_trusted_keys keyctl add user a a @s keyctl list @s which on my test box gives me: 2 keys in keyring: 180010936: ---lswrv 0 0 asymmetric: Build time autogenerated kernel key: ae3d4a31b82daa8e1a75b49dc2bba949fd992a05 801382539: --alswrv 0 0 user: a Fix this by rejecting names beginning with a '.' in the keyctl. Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Mimi Zohar <zohar@linux.vnet.ibm.com> cc: linux-ima-devel@lists.sourceforge.net cc: stable@vger.kernel.org
0
global_init_keywords(void) { /* global definitions mapping */ init_global_keywords(true); #ifdef _WITH_VRRP_ init_vrrp_keywords(false); #endif #ifdef _WITH_LVS_ init_check_keywords(false); #endif #ifdef _WITH_BFD_ init_bfd_keywords(false); #endif return keywords; }
Safe
[ "CWE-59", "CWE-61" ]
keepalived
04f2d32871bb3b11d7dc024039952f2fe2750306
2.97778707854305e+38
17
When opening files for write, ensure they aren't symbolic links Issue #1048 identified that if, for example, a non privileged user created a symbolic link from /etc/keepalvied.data to /etc/passwd, writing to /etc/keepalived.data (which could be invoked via DBus) would cause /etc/passwd to be overwritten. This commit stops keepalived writing to pathnames where the ultimate component is a symbolic link, by setting O_NOFOLLOW whenever opening a file for writing. This might break some setups, where, for example, /etc/keepalived.data was a symbolic link to /home/fred/keepalived.data. If this was the case, instead create a symbolic link from /home/fred/keepalived.data to /tmp/keepalived.data, so that the file is still accessible via /home/fred/keepalived.data. There doesn't appear to be a way around this backward incompatibility, since even checking if the pathname is a symbolic link prior to opening for writing would create a race condition. Signed-off-by: Quentin Armitage <quentin@armitage.org.uk>
0
void ga_command_state_init(GAState *s, GACommandState *cs) { if (!vss_initialized()) { ga_command_state_add(cs, NULL, guest_fsfreeze_cleanup); } }
Safe
[ "CWE-190" ]
qemu
141b197408ab398c4f474ac1a728ab316e921f2b
3.3419872922316444e+38
6
qga: check bytes count read by guest-file-read While reading file content via 'guest-file-read' command, 'qmp_guest_file_read' routine allocates buffer of count+1 bytes. It could overflow for large values of 'count'. Add check to avoid it. Reported-by: Fakhri Zulkifli <mohdfakhrizulkifli@gmail.com> Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org> Cc: qemu-stable@nongnu.org Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
0
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev) { struct device_domain_info *info; struct context_entry *context; struct dmar_domain *domain; unsigned long flags; u64 ctx_lo; int ret; domain = get_valid_domain_for_dev(sdev->dev); if (!domain) return -EINVAL; spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&iommu->lock); ret = -EINVAL; info = sdev->dev->archdata.iommu; if (!info || !info->pasid_supported) goto out; context = iommu_context_addr(iommu, info->bus, info->devfn, 0); if (WARN_ON(!context)) goto out; ctx_lo = context[0].lo; sdev->did = domain->iommu_did[iommu->seq_id]; sdev->sid = PCI_DEVID(info->bus, info->devfn); if (!(ctx_lo & CONTEXT_PASIDE)) { if (iommu->pasid_state_table) context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table); context[1].lo = (u64)virt_to_phys(info->pasid_table->table) | intel_iommu_get_pts(sdev->dev); wmb(); /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both * extended to permit requests-with-PASID if the PASIDE bit * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH, * however, the PASIDE bit is ignored and requests-with-PASID * are unconditionally blocked. Which makes less sense. * So convert from CONTEXT_TT_PASS_THROUGH to one of the new * "guest mode" translation types depending on whether ATS * is available or not. Annoyingly, we can't use the new * modes *unless* PASIDE is set. */ if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) { ctx_lo &= ~CONTEXT_TT_MASK; if (info->ats_supported) ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2; else ctx_lo |= CONTEXT_TT_PT_PASID << 2; } ctx_lo |= CONTEXT_PASIDE; if (iommu->pasid_state_table) ctx_lo |= CONTEXT_DINVE; if (info->pri_supported) ctx_lo |= CONTEXT_PRS; context[0].lo = ctx_lo; wmb(); iommu->flush.flush_context(iommu, sdev->did, sdev->sid, DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); } /* Enable PASID support in the device, if it wasn't already */ if (!info->pasid_enabled) iommu_enable_dev_iotlb(info); if (info->ats_enabled) { sdev->dev_iotlb = 1; sdev->qdep = info->ats_qdep; if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS) sdev->qdep = 0; } ret = 0; out: spin_unlock(&iommu->lock); spin_unlock_irqrestore(&device_domain_lock, flags); return ret; }
Safe
[]
linux
fb58fdcd295b914ece1d829b24df00a17a9624bc
6.979070639573532e+37
83
iommu/vt-d: Do not enable ATS for untrusted devices Currently Linux automatically enables ATS (Address Translation Service) for any device that supports it (and IOMMU is turned on). ATS is used to accelerate DMA access as the device can cache translations locally so there is no need to do full translation on IOMMU side. However, as pointed out in [1] ATS can be used to bypass IOMMU based security completely by simply sending PCIe read/write transaction with AT (Address Translation) field set to "translated". To mitigate this modify the Intel IOMMU code so that it does not enable ATS for any device that is marked as being untrusted. In case this turns out to cause performance issues we may selectively allow ATS based on user decision but currently use big hammer and disable it completely to be on the safe side. [1] https://www.repository.cam.ac.uk/handle/1810/274352 Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Joerg Roedel <jroedel@suse.de> Acked-by: Joerg Roedel <jroedel@suse.de>
0
static int StreamTcpTest10 (void) { Packet *p = SCMalloc(SIZE_OF_PACKET); FAIL_IF(unlikely(p == NULL)); Flow f; ThreadVars tv; StreamTcpThread stt; TCPHdr tcph; uint8_t payload[4]; memset(p, 0, SIZE_OF_PACKET); PacketQueue pq; memset(&pq,0,sizeof(PacketQueue)); memset (&f, 0, sizeof(Flow)); memset(&tv, 0, sizeof (ThreadVars)); memset(&stt, 0, sizeof (StreamTcpThread)); memset(&tcph, 0, sizeof (TCPHdr)); FLOW_INITIALIZE(&f); p->flow = &f; StreamTcpUTInit(&stt.ra_ctx); stream_config.async_oneside = TRUE; tcph.th_win = htons(5480); tcph.th_seq = htonl(10); tcph.th_ack = 0; tcph.th_flags = TH_SYN; p->tcph = &tcph; FAIL_IF(StreamTcpPacket(&tv, p, &stt, &pq) == -1); p->tcph->th_seq = htonl(11); p->tcph->th_ack = htonl(11); p->tcph->th_flags = TH_ACK; p->flowflags = FLOW_PKT_TOSERVER; FAIL_IF(StreamTcpPacket(&tv, p, &stt, &pq) == -1); p->tcph->th_seq = htonl(11); p->tcph->th_ack = htonl(11); p->tcph->th_flags = TH_ACK|TH_PUSH; p->flowflags = FLOW_PKT_TOSERVER; StreamTcpCreateTestPacket(payload, 0x42, 3, 4); /*BBB*/ p->payload = payload; p->payload_len = 3; FAIL_IF(StreamTcpPacket(&tv, p, &stt, &pq) == -1); p->tcph->th_seq = htonl(6); p->tcph->th_ack = htonl(11); p->tcph->th_flags = TH_ACK|TH_PUSH; p->flowflags = FLOW_PKT_TOSERVER; StreamTcpCreateTestPacket(payload, 0x42, 3, 4); /*BBB*/ p->payload = payload; p->payload_len = 3; FAIL_IF(StreamTcpPacket(&tv, p, &stt, &pq) == -1); FAIL_IF(((TcpSession *)(p->flow->protoctx))->state != TCP_ESTABLISHED); FAIL_IF(! (((TcpSession *)(p->flow->protoctx))->flags & STREAMTCP_FLAG_ASYNC)); FAIL_IF(((TcpSession *)(p->flow->protoctx))->client.last_ack != 6 && ((TcpSession *)(p->flow->protoctx))->server.next_seq != 11); StreamTcpSessionClear(p->flow->protoctx); SCFree(p); FLOW_DESTROY(&f); StreamTcpUTDeinit(stt.ra_ctx); PASS; }
Safe
[ "CWE-94" ]
suricata
1c63d3905852f746ccde7e2585600b2199cefb4b
2.1594795907578993e+38
73
stream: reject broken ACK packets Fix evasion posibility by rejecting packets with a broken ACK field. These packets have a non-0 ACK field, but do not have a ACK flag set. Bug #3324. Reported-by: Nicolas Adba (cherry picked from commit fa692df37a796c3330c81988d15ef1a219afc006)
0
static int kvm_ioctl_create_device(struct kvm *kvm, struct kvm_create_device *cd) { const struct kvm_device_ops *ops = NULL; struct kvm_device *dev; bool test = cd->flags & KVM_CREATE_DEVICE_TEST; int type; int ret; if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) return -ENODEV; type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); ops = kvm_device_ops_table[type]; if (ops == NULL) return -ENODEV; if (test) return 0; dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); if (!dev) return -ENOMEM; dev->ops = ops; dev->kvm = kvm; mutex_lock(&kvm->lock); ret = ops->create(dev, type); if (ret < 0) { mutex_unlock(&kvm->lock); kfree(dev); return ret; } list_add(&dev->vm_node, &kvm->devices); mutex_unlock(&kvm->lock); if (ops->init) ops->init(dev); kvm_get_kvm(kvm); ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); if (ret < 0) { kvm_put_kvm_no_destroy(kvm); mutex_lock(&kvm->lock); list_del(&dev->vm_node); mutex_unlock(&kvm->lock); ops->destroy(dev); return ret; } cd->fd = ret; return 0;
Safe
[ "CWE-459" ]
linux
683412ccf61294d727ead4a73d97397396e69a6b
1.657842929329252e+38
54
KVM: SEV: add cache flush to solve SEV cache incoherency issues Flush the CPU caches when memory is reclaimed from an SEV guest (where reclaim also includes it being unmapped from KVM's memslots). Due to lack of coherency for SEV encrypted memory, failure to flush results in silent data corruption if userspace is malicious/broken and doesn't ensure SEV guest memory is properly pinned and unpinned. Cache coherency is not enforced across the VM boundary in SEV (AMD APM vol.2 Section 15.34.7). Confidential cachelines, generated by confidential VM guests have to be explicitly flushed on the host side. If a memory page containing dirty confidential cachelines was released by VM and reallocated to another user, the cachelines may corrupt the new user at a later time. KVM takes a shortcut by assuming all confidential memory remain pinned until the end of VM lifetime. Therefore, KVM does not flush cache at mmu_notifier invalidation events. Because of this incorrect assumption and the lack of cache flushing, malicous userspace can crash the host kernel: creating a malicious VM and continuously allocates/releases unpinned confidential memory pages when the VM is running. Add cache flush operations to mmu_notifier operations to ensure that any physical memory leaving the guest VM get flushed. In particular, hook mmu_notifier_invalidate_range_start and mmu_notifier_release events and flush cache accordingly. The hook after releasing the mmu lock to avoid contention with other vCPUs. Cc: stable@vger.kernel.org Suggested-by: Sean Christpherson <seanjc@google.com> Reported-by: Mingwei Zhang <mizhang@google.com> Signed-off-by: Mingwei Zhang <mizhang@google.com> Message-Id: <20220421031407.2516575-4-mizhang@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
path_table_last_entry(struct path_table *pathtbl) { if (pathtbl->first == NULL) return (NULL); return (((struct isoent *)(void *) ((char *)(pathtbl->last) - offsetof(struct isoent, ptnext)))); }
Safe
[ "CWE-190" ]
libarchive
3014e19820ea53c15c90f9d447ca3e668a0b76c6
7.769341206591883e+37
7
Issue 711: Be more careful about verifying filename lengths when writing ISO9660 archives * Don't cast size_t to int, since this can lead to overflow on machines where sizeof(int) < sizeof(size_t) * Check a + b > limit by writing it as a > limit || b > limit || a + b > limit to avoid problems when a + b wraps around.
0
void HGraphBuilder::GenerateIsSmi(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HIsSmiAndBranch* result = new(zone()) HIsSmiAndBranch(value); return ast_context()->ReturnControl(result, call->id()); }
Safe
[]
node
fd80a31e0697d6317ce8c2d289575399f4e06d21
2.8305120635508265e+37
7
deps: backport 5f836c from v8 upstream Original commit message: Fix Hydrogen bounds check elimination When combining bounds checks, they must all be moved before the first load/store that they are guarding. BUG=chromium:344186 LOG=y R=svenpanne@chromium.org Review URL: https://codereview.chromium.org/172093002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 fix #8070
0
mailimf_resent_to_parse(const char * message, size_t length, size_t * indx, struct mailimf_to ** result) { struct mailimf_address_list * addr_list; struct mailimf_to * to; size_t cur_token; int r; int res; cur_token = * indx; r = mailimf_token_case_insensitive_parse(message, length, &cur_token, "Resent-To"); if (r != MAILIMF_NO_ERROR) { res = r; goto err; } r = mailimf_colon_parse(message, length, &cur_token); if (r != MAILIMF_NO_ERROR) { res = r; goto err; } r = mailimf_address_list_parse(message, length, &cur_token, &addr_list); if (r != MAILIMF_NO_ERROR) { res = r; goto err; } r = mailimf_unstrict_crlf_parse(message, length, &cur_token); if (r != MAILIMF_NO_ERROR) { res = r; goto free_addr_list; } to = mailimf_to_new(addr_list); if (to == NULL) { res = MAILIMF_ERROR_MEMORY; goto free_addr_list; } * result = to; * indx = cur_token; return MAILIMF_NO_ERROR; free_addr_list: mailimf_address_list_free(addr_list); err: return res; }
Safe
[ "CWE-476" ]
libetpan
1fe8fbc032ccda1db9af66d93016b49c16c1f22d
2.1180004187951624e+37
52
Fixed crash #274
0
static int io_send(struct io_kiocb *req, struct io_kiocb **nxt, bool force_nonblock) { #if defined(CONFIG_NET) struct socket *sock; int ret; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; sock = sock_from_file(req->file, &ret); if (sock) { struct io_sr_msg *sr = &req->sr_msg; struct msghdr msg; struct iovec iov; unsigned flags; ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter); if (ret) return ret; msg.msg_name = NULL; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; flags = req->sr_msg.msg_flags; if (flags & MSG_DONTWAIT) req->flags |= REQ_F_NOWAIT; else if (force_nonblock) flags |= MSG_DONTWAIT; msg.msg_flags = flags; ret = sock_sendmsg(sock, &msg); if (force_nonblock && ret == -EAGAIN) return -EAGAIN; if (ret == -ERESTARTSYS) ret = -EINTR; } io_cqring_add_event(req, ret); if (ret < 0) req_set_fail_links(req); io_put_req_find_next(req, nxt); return 0; #else return -EOPNOTSUPP; #endif }
Safe
[]
linux
ff002b30181d30cdfbca316dadd099c3ca0d739c
3.422907860733679e+37
50
io_uring: grab ->fs as part of async preparation This passes it in to io-wq, so it assumes the right fs_struct when executing async work that may need to do lookups. Cc: stable@vger.kernel.org # 5.3+ Signed-off-by: Jens Axboe <axboe@kernel.dk>
0
static unsigned long vmcs_readl(unsigned long field) { unsigned long value; asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX) : "=a"(value) : "d"(field) : "cc"); return value; }
Safe
[ "CWE-20" ]
linux-2.6
16175a796d061833aacfbd9672235f2d2725df65
2.6546212259727405e+37
8
KVM: VMX: Don't allow uninhibited access to EFER on i386 vmx_set_msr() does not allow i386 guests to touch EFER, but they can still do so through the default: label in the switch. If they set EFER_LME, they can oops the host. Fix by having EFER access through the normal channel (which will check for EFER_LME) even on i386. Reported-and-tested-by: Benjamin Gilbert <bgilbert@cs.cmu.edu> Cc: stable@kernel.org Signed-off-by: Avi Kivity <avi@redhat.com>
0
static bool cut(const double val) { return val<(double)min()?min():val>(double)max()?max():(bool)val; }
Safe
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
1.2903007045128728e+38
1
Fix other issues in 'CImg<T>::load_bmp()'.
0
static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; struct ccp_dm_workarea src, dst; struct ccp_op op; int ret; u8 *save; if (!ecc->u.pm.point_1.x || (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.point_1.y || (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { if (!ecc->u.pm.point_2.x || (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.point_2.y || (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; } else { if (!ecc->u.pm.domain_a || (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) if (!ecc->u.pm.scalar || (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; } if (!ecc->u.pm.result.x || (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.result.y || (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); /* Concatenate the modulus and the operands. Both the modulus and * the operands must be in little endian format. Since the input * is in big endian format it must be converted and placed in a * fixed length buffer. */ ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, DMA_TO_DEVICE); if (ret) return ret; /* Save the workarea address since it is updated in order to perform * the concatenation */ save = src.address; /* Copy the ECC modulus */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; /* Copy the first point X and Y coordinate */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0, ecc->u.pm.point_1.x_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0, ecc->u.pm.point_1.y_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; /* Set the first point Z coordinate to 1 */ *src.address = 0x01; src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { /* Copy the second point X and Y coordinate */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0, ecc->u.pm.point_2.x_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0, ecc->u.pm.point_2.y_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; /* Set the second point Z coordinate to 1 */ *src.address = 0x01; src.address += CCP_ECC_OPERAND_SIZE; } else { /* Copy the Domain "a" parameter */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0, ecc->u.pm.domain_a_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { /* Copy the scalar value */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.scalar, 0, ecc->u.pm.scalar_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; } } /* Restore the workarea address */ src.address = save; /* Prepare the output area for the operation */ ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; op.soc = 1; op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = src.length; op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = dst.length; op.u.ecc.function = cmd->u.ecc.function; ret = cmd_q->ccp->vdata->perform->ecc(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ecc->ecc_result = le16_to_cpup( (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { ret = -EIO; goto e_dst; } /* Save the workarea address since it is updated as we walk through * to copy the point math result */ save = dst.address; /* Save the ECC result X and Y coordinates */ ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0, CCP_ECC_MODULUS_BYTES); dst.address += CCP_ECC_OUTPUT_SIZE; ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, CCP_ECC_MODULUS_BYTES); /* Restore the workarea address */ dst.address = save; e_dst: ccp_dm_free(&dst); e_src: ccp_dm_free(&src); return ret; }
Safe
[ "CWE-703", "CWE-401" ]
linux
505d9dcb0f7ddf9d075e729523a33d38642ae680
1.807653374954786e+38
168
crypto: ccp - fix resource leaks in ccp_run_aes_gcm_cmd() There are three bugs in this code: 1) If we ccp_init_data() fails for &src then we need to free aad. Use goto e_aad instead of goto e_ctx. 2) The label to free the &final_wa was named incorrectly as "e_tag" but it should have been "e_final_wa". One error path leaked &final_wa. 3) The &tag was leaked on one error path. In that case, I added a free before the goto because the resource was local to that block. Fixes: 36cf515b9bbe ("crypto: ccp - Enable support for AES GCM on v5 CCPs") Reported-by: "minihanshen(沈明航)" <minihanshen@tencent.com> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Reviewed-by: John Allen <john.allen@amd.com> Tested-by: John Allen <john.allen@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
0
struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr, __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol, __u32 fwmark, __u32 flags) { struct ip_vs_dest *dest; struct ip_vs_service *svc; __be16 port = dport; svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport); if (!svc) return NULL; if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) port = 0; dest = ip_vs_lookup_dest(svc, daddr, port); if (!dest) dest = ip_vs_lookup_dest(svc, daddr, port ^ dport); if (dest) atomic_inc(&dest->refcnt); ip_vs_service_put(svc); return dest; }
Safe
[ "CWE-200" ]
linux
2d8a041b7bfe1097af21441cb77d6af95f4f4680
2.2968492216755386e+38
24
ipvs: fix info leak in getsockopt(IP_VS_SO_GET_TIMEOUT) If at least one of CONFIG_IP_VS_PROTO_TCP or CONFIG_IP_VS_PROTO_UDP is not set, __ip_vs_get_timeouts() does not fully initialize the structure that gets copied to userland and that for leaks up to 12 bytes of kernel stack. Add an explicit memset(0) before passing the structure to __ip_vs_get_timeouts() to avoid the info leak. Signed-off-by: Mathias Krause <minipli@googlemail.com> Cc: Wensong Zhang <wensong@linux-vs.org> Cc: Simon Horman <horms@verge.net.au> Cc: Julian Anastasov <ja@ssi.bg> Signed-off-by: David S. Miller <davem@davemloft.net>
0
QPDF_Array::releaseResolved() { for (std::vector<QPDFObjectHandle>::iterator iter = this->items.begin(); iter != this->items.end(); ++iter) { QPDFObjectHandle::ReleaseResolver::releaseResolved(*iter); } }
Safe
[ "CWE-787" ]
qpdf
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
2.140689104337751e+38
8
Fix sign and conversion warnings (major) This makes all integer type conversions that have potential data loss explicit with calls that do range checks and raise an exception. After this commit, qpdf builds with no warnings when -Wsign-conversion -Wconversion is used with gcc or clang or when -W3 -Wd4800 is used with MSVC. This significantly reduces the likelihood of potential crashes from bogus integer values. There are some parts of the code that take int when they should take size_t or an offset. Such places would make qpdf not support files with more than 2^31 of something that usually wouldn't be so large. In the event that such a file shows up and is valid, at least qpdf would raise an error in the right spot so the issue could be legitimately addressed rather than failing in some weird way because of a silent overflow condition.
0
static void _slurm_rpc_step_layout(slurm_msg_t *msg) { int error_code = SLURM_SUCCESS; slurm_msg_t response_msg; DEF_TIMERS; job_step_id_msg_t *req = (job_step_id_msg_t *)msg->data; slurm_step_layout_t *step_layout = NULL; /* Locks: Read config job, write node */ slurmctld_lock_t job_read_lock = { READ_LOCK, READ_LOCK, READ_LOCK, NO_LOCK, NO_LOCK }; uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurmctld_config.auth_info); struct job_record *job_ptr = NULL; struct step_record *step_ptr = NULL; START_TIMER; debug2("Processing RPC: REQUEST_STEP_LAYOUT, from uid=%d", uid); lock_slurmctld(job_read_lock); error_code = job_alloc_info(uid, req->job_id, &job_ptr); END_TIMER2("_slurm_rpc_step_layout"); /* return result */ if (error_code || (job_ptr == NULL)) { unlock_slurmctld(job_read_lock); if (error_code == ESLURM_ACCESS_DENIED) { error("Security vioation, REQUEST_STEP_LAYOUT for " "JobId=%u from uid=%u", req->job_id, uid); } else { if (slurmctld_conf.debug_flags & DEBUG_FLAG_STEPS) info("%s: JobId=%u, uid=%u: %s", __func__, req->job_id, uid, slurm_strerror(error_code)); } slurm_send_rc_msg(msg, error_code); return; } step_ptr = find_step_record(job_ptr, req->step_id); if (!step_ptr) { unlock_slurmctld(job_read_lock); if (slurmctld_conf.debug_flags & DEBUG_FLAG_STEPS) info("%s: JobId=%u.%u Not Found", __func__, req->job_id, req->step_id); slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID); return; } step_layout = slurm_step_layout_copy(step_ptr->step_layout); #ifdef HAVE_FRONT_END if (job_ptr->batch_host) step_layout->front_end = xstrdup(job_ptr->batch_host); #endif unlock_slurmctld(job_read_lock); slurm_msg_t_init(&response_msg); response_msg.flags = msg->flags; response_msg.protocol_version = msg->protocol_version; response_msg.msg_type = RESPONSE_STEP_LAYOUT; response_msg.data = step_layout; slurm_send_node_msg(msg->conn_fd, &response_msg); slurm_step_layout_destroy(step_layout); }
Safe
[ "CWE-20" ]
slurm
033dc0d1d28b8d2ba1a5187f564a01c15187eb4e
2.816139281605239e+38
62
Fix insecure handling of job requested gid. Only trust MUNGE signed values, unless the RPC was signed by SlurmUser or root. CVE-2018-10995.
0
TEST_F(ConnectionHandlerTest, TcpListenerInplaceUpdate) { InSequence s; uint64_t old_listener_tag = 1; uint64_t new_listener_tag = 2; Network::ListenerCallbacks* old_listener_callbacks; auto old_listener = new NiceMock<Network::MockListener>(); TestListener* old_test_listener = addListener(old_listener_tag, true, false, "test_listener", old_listener, &old_listener_callbacks); EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *old_test_listener); ASSERT_NE(old_test_listener, nullptr); Network::ListenerCallbacks* new_listener_callbacks = nullptr; auto overridden_filter_chain_manager = std::make_shared<NiceMock<Network::MockFilterChainManager>>(); TestListener* new_test_listener = addListener(new_listener_tag, true, false, "test_listener", /* Network::Listener */ nullptr, &new_listener_callbacks, nullptr, nullptr, Network::Socket::Type::Stream, std::chrono::milliseconds(15000), false, overridden_filter_chain_manager); handler_->addListener(old_listener_tag, *new_test_listener); ASSERT_EQ(new_listener_callbacks, nullptr) << "new listener should be inplace added and callback should not change"; Network::MockConnectionSocket* connection = new NiceMock<Network::MockConnectionSocket>(); EXPECT_CALL(manager_, findFilterChain(_)).Times(0); EXPECT_CALL(*overridden_filter_chain_manager, findFilterChain(_)).WillOnce(Return(nullptr)); old_listener_callbacks->onAccept(Network::ConnectionSocketPtr{connection}); EXPECT_CALL(*old_listener, onDestroy()); }
Safe
[ "CWE-400" ]
envoy
dfddb529e914d794ac552e906b13d71233609bf7
1.3591570311254545e+38
30
listener: Add configurable accepted connection limits (#153) Add support for per-listener limits on accepted connections. Signed-off-by: Tony Allen <tony@allen.gg>
0
ecma_op_container_has (ecma_value_t this_arg, /**< this argument */ ecma_value_t key_arg, /**< key argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) if ((map_object_p->u.class_prop.extra_info & ECMA_CONTAINER_FLAGS_WEAK) != 0 && !ecma_is_value_object (key_arg)) { return ECMA_VALUE_FALSE; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) */ if (ECMA_CONTAINER_GET_SIZE (container_p) == 0) { return ECMA_VALUE_FALSE; } ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, lit_id); return ecma_make_boolean_value (entry_p != NULL); } /* ecma_op_container_has */
Safe
[ "CWE-119", "CWE-125", "CWE-703" ]
jerryscript
c2b662170245a16f46ce02eae68815c325d99821
2.0686520151254245e+38
31
Fix adding entries to the internal buffer of a Map object (#3805) When appending the key/value pair separately, garbage collection could be triggered before the value is added, which could cause problems during marking. This patch changes insertion to add both values at the same time, which prevents partial entries from being present in the internal buffer. Fixes #3804. JerryScript-DCO-1.0-Signed-off-by: Dániel Bátyai dbatyai@inf.u-szeged.hu
0
include_class_new(mrb_state *mrb, struct RClass *m, struct RClass *super) { struct RClass *ic = (struct RClass*)mrb_obj_alloc(mrb, MRB_TT_ICLASS, mrb->class_class); if (m->tt == MRB_TT_ICLASS) { m = m->c; } MRB_CLASS_ORIGIN(m); ic->iv = m->iv; ic->mt = m->mt; ic->super = super; if (m->tt == MRB_TT_ICLASS) { ic->c = m->c; } else { ic->c = m; } return ic; }
Safe
[ "CWE-476", "CWE-415" ]
mruby
faa4eaf6803bd11669bc324b4c34e7162286bfa3
7.917973027240316e+37
18
`mrb_class_real()` did not work for `BasicObject`; fix #4037
0
static int cqspi_remove(struct platform_device *pdev) { struct cqspi_st *cqspi = platform_get_drvdata(pdev); int i; for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++) if (cqspi->f_pdata[i].registered) mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd); cqspi_controller_enable(cqspi, 0); clk_disable_unprepare(cqspi->clk); return 0; }
Safe
[ "CWE-119", "CWE-787" ]
linux
193e87143c290ec16838f5368adc0e0bc94eb931
2.2941206413362724e+38
15
mtd: spi-nor: Off by one in cqspi_setup_flash() There are CQSPI_MAX_CHIPSELECT elements in the ->f_pdata array so the > should be >=. Fixes: 140623410536 ('mtd: spi-nor: Add driver for Cadence Quad SPI Flash Controller') Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Reviewed-by: Marek Vasut <marex@denx.de> Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
0
hf_try_val_to_str(guint32 value, const header_field_info *hfinfo) { if (hfinfo->display & BASE_RANGE_STRING) return try_rval_to_str(value, (const range_string *) hfinfo->strings); if (hfinfo->display & BASE_EXT_STRING) { if (hfinfo->display & BASE_VAL64_STRING) return try_val64_to_str_ext(value, (val64_string_ext *) hfinfo->strings); else return try_val_to_str_ext(value, (value_string_ext *) hfinfo->strings); } if (hfinfo->display & BASE_VAL64_STRING) return try_val64_to_str(value, (const val64_string *) hfinfo->strings); if (hfinfo->display & BASE_UNIT_STRING) return unit_name_string_get_value(value, (const struct unit_name_string*) hfinfo->strings); return try_val_to_str(value, (const value_string *) hfinfo->strings); }
Safe
[ "CWE-401" ]
wireshark
a9fc769d7bb4b491efb61c699d57c9f35269d871
1.357369696977932e+38
20
epan: Fix a memory leak. Make sure _proto_tree_add_bits_ret_val allocates a bits array using the packet scope, otherwise we leak memory. Fixes #17032.
0
initConfigSettings(void) { cs.bEmitMsgOnClose = 0; cs.bEmitMsgOnOpen = 0; cs.wrkrMax = DFLT_wrkrMax; cs.bSuppOctetFram = 1; cs.iAddtlFrameDelim = TCPSRV_NO_ADDTL_DELIMITER; cs.maxFrameSize = 200000; cs.pszInputName = NULL; cs.pszBindRuleset = NULL; cs.pszInputName = NULL; cs.lstnIP = NULL; }
Safe
[ "CWE-787" ]
rsyslog
89955b0bcb1ff105e1374aad7e0e993faa6a038f
1.3167829098176666e+38
13
net bugfix: potential buffer overrun
0
static int sync_regs(struct kvm_vcpu *vcpu) { if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { __set_regs(vcpu, &vcpu->run->s.regs.regs); vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; } if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) return -EINVAL; vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; } if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { if (kvm_vcpu_ioctl_x86_set_vcpu_events( vcpu, &vcpu->run->s.regs.events)) return -EINVAL; vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; } return 0; }
Safe
[ "CWE-476" ]
linux
55749769fe608fa3f4a075e42e89d237c8e37637
2.250669425360143e+38
20
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty When dirty ring logging is enabled, any dirty logging without an active vCPU context will cause a kernel oops. But we've already declared that the shared_info page doesn't get dirty tracking anyway, since it would be kind of insane to mark it dirty every time we deliver an event channel interrupt. Userspace is supposed to just assume it's always dirty any time a vCPU can run or event channels are routed. So stop using the generic kvm_write_wall_clock() and just write directly through the gfn_to_pfn_cache that we already have set up. We can make kvm_write_wall_clock() static in x86.c again now, but let's not remove the 'sec_hi_ofs' argument even though it's not used yet. At some point we *will* want to use that for KVM guests too. Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region") Reported-by: butt3rflyh4ck <butterflyhuangxx@gmail.com> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Message-Id: <20211210163625.2886-6-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
static void xudc_set_clear_feature(struct xusb_udc *udc) { struct xusb_ep *ep0 = &udc->ep[0]; struct xusb_req *req = udc->req; struct xusb_ep *target_ep; u8 endpoint; u8 outinbit; u32 epcfgreg; int flag = (udc->setup.bRequest == USB_REQ_SET_FEATURE ? 1 : 0); int ret; switch (udc->setup.bRequestType) { case USB_RECIP_DEVICE: switch (udc->setup.wValue) { case USB_DEVICE_TEST_MODE: /* * The Test Mode will be executed * after the status phase. */ break; case USB_DEVICE_REMOTE_WAKEUP: if (flag) udc->remote_wkp = 1; else udc->remote_wkp = 0; break; default: xudc_ep0_stall(udc); break; } break; case USB_RECIP_ENDPOINT: if (!udc->setup.wValue) { endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; if (endpoint >= XUSB_MAX_ENDPOINTS) { xudc_ep0_stall(udc); return; } target_ep = &udc->ep[endpoint]; outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK; outinbit = outinbit >> 7; /* Make sure direction matches.*/ if (outinbit != target_ep->is_in) { xudc_ep0_stall(udc); return; } epcfgreg = udc->read_fn(udc->addr + target_ep->offset); if (!endpoint) { /* Clear the stall.*/ epcfgreg &= ~XUSB_EP_CFG_STALL_MASK; udc->write_fn(udc->addr, target_ep->offset, epcfgreg); } else { if (flag) { epcfgreg |= XUSB_EP_CFG_STALL_MASK; udc->write_fn(udc->addr, target_ep->offset, epcfgreg); } else { /* Unstall the endpoint.*/ epcfgreg &= ~(XUSB_EP_CFG_STALL_MASK | XUSB_EP_CFG_DATA_TOGGLE_MASK); udc->write_fn(udc->addr, target_ep->offset, epcfgreg); } } } break; default: xudc_ep0_stall(udc); return; } req->usb_req.length = 0; ret = __xudc_ep0_queue(ep0, req); if (ret == 0) return; dev_err(udc->dev, "Can't respond to SET/CLEAR FEATURE\n"); xudc_ep0_stall(udc); }
Safe
[ "CWE-20", "CWE-129" ]
linux
7f14c7227f342d9932f9b918893c8814f86d2a0d
2.157166562117127e+38
83
USB: gadget: validate endpoint index for xilinx udc Assure that host may not manipulate the index to point past endpoint array. Signed-off-by: Szymon Heidrich <szymon.heidrich@gmail.com> Cc: stable <stable@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
0
ews_store_unset_oof_settings_state (CamelSession *session, GCancellable *cancellable, gpointer user_data, GError **error) { CamelEwsStore *ews_store = user_data; EEwsConnection *connection; EEwsOofSettings *oof_settings; EEwsOofState state; GError *local_error = NULL; camel_operation_push_message (cancellable, _("Unsetting the “Out of Office” status")); connection = camel_ews_store_ref_connection (ews_store); oof_settings = e_ews_oof_settings_new_sync (connection, cancellable, &local_error); g_object_unref (connection); if (local_error != NULL) { g_propagate_error (error, local_error); camel_operation_pop_message (cancellable); return; } state = e_ews_oof_settings_get_state (oof_settings); if (state == E_EWS_OOF_STATE_DISABLED) { g_object_unref (oof_settings); camel_operation_pop_message (cancellable); return; } e_ews_oof_settings_set_state (oof_settings, E_EWS_OOF_STATE_DISABLED); e_ews_oof_settings_submit_sync (oof_settings, cancellable, error); g_object_unref (oof_settings); camel_operation_pop_message (cancellable); }
Safe
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
2.7769684329395e+38
36
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
0
static Bool parse_short_term_ref_pic_set(GF_BitStream *bs, HEVC_SPS *sps, u32 idx_rps) { u32 i; Bool inter_ref_pic_set_prediction_flag = 0; if (idx_rps != 0) inter_ref_pic_set_prediction_flag = gf_bs_read_int(bs, 1); if (inter_ref_pic_set_prediction_flag ) { HEVC_ReferencePictureSets *ref_ps, *rps; u32 delta_idx_minus1 = 0; u32 ref_idx; u32 delta_rps_sign; u32 abs_delta_rps_minus1, nb_ref_pics; s32 deltaRPS; u32 k = 0, k0 = 0, k1 = 0; if (idx_rps == sps->num_short_term_ref_pic_sets) delta_idx_minus1 = bs_get_ue(bs); assert(delta_idx_minus1 <= idx_rps - 1); ref_idx = idx_rps - 1 - delta_idx_minus1; delta_rps_sign = gf_bs_read_int(bs, 1); abs_delta_rps_minus1 = bs_get_ue(bs); deltaRPS = (1 - (delta_rps_sign<<1)) * (abs_delta_rps_minus1 + 1); rps = &sps->rps[idx_rps]; ref_ps = &sps->rps[ref_idx]; nb_ref_pics = ref_ps->num_negative_pics + ref_ps->num_positive_pics; for (i=0; i<=nb_ref_pics; i++) { s32 ref_idc; s32 used_by_curr_pic_flag = gf_bs_read_int(bs, 1); ref_idc = used_by_curr_pic_flag ? 1 : 0; if ( !used_by_curr_pic_flag ) { used_by_curr_pic_flag = gf_bs_read_int(bs, 1); ref_idc = used_by_curr_pic_flag << 1; } if ((ref_idc==1) || (ref_idc== 2)) { s32 deltaPOC = deltaRPS; if (i < nb_ref_pics) deltaPOC += ref_ps->delta_poc[i]; rps->delta_poc[k] = deltaPOC; if (deltaPOC < 0) k0++; else k1++; k++; } } rps->num_negative_pics = k0; rps->num_positive_pics = k1; } else { s32 prev = 0, poc = 0; sps->rps[idx_rps].num_negative_pics = bs_get_ue(bs); sps->rps[idx_rps].num_positive_pics = bs_get_ue(bs); if (sps->rps[idx_rps].num_negative_pics>16) return GF_FALSE; if (sps->rps[idx_rps].num_positive_pics>16) return GF_FALSE; for (i=0; i<sps->rps[idx_rps].num_negative_pics; i++) { u32 delta_poc_s0_minus1 = bs_get_ue(bs); poc = prev - delta_poc_s0_minus1 - 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; /*used_by_curr_pic_s1_flag[ i ] = */gf_bs_read_int(bs, 1); } for (i=0; i<sps->rps[idx_rps].num_positive_pics; i++) { u32 delta_poc_s1_minus1 = bs_get_ue(bs); poc = prev + delta_poc_s1_minus1 + 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; /*used_by_curr_pic_s1_flag[ i ] = */gf_bs_read_int(bs, 1); } } return GF_TRUE; }
Safe
[ "CWE-119", "CWE-787" ]
gpac
90dc7f853d31b0a4e9441cba97feccf36d8b69a4
2.3471217064746026e+38
75
fix some exploitable overflows (#994, #997)
0
GF_Err fiin_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; FDItemInformationBox *ptr = (FDItemInformationBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, gf_list_count(ptr->partition_entries) ); e = gf_isom_box_array_write(s, ptr->partition_entries, bs); if (e) return e; if (ptr->session_info) gf_isom_box_write((GF_Box*)ptr->session_info, bs); if (ptr->group_id_to_name) gf_isom_box_write((GF_Box*)ptr->group_id_to_name, bs); return GF_OK; }
Safe
[ "CWE-125" ]
gpac
bceb03fd2be95097a7b409ea59914f332fb6bc86
1.8229930617601655e+38
15
fixed 2 possible heap overflows (inc. #1088)
0
loc_offsets_compar (const void *ap, const void *bp) { dwarf_vma a = loc_offsets[*(const unsigned int *) ap]; dwarf_vma b = loc_offsets[*(const unsigned int *) bp]; int ret = (a > b) - (b > a); if (ret) return ret; a = loc_views[*(const unsigned int *) ap]; b = loc_views[*(const unsigned int *) bp]; ret = (a > b) - (b > a); return ret; }
Safe
[ "CWE-703" ]
binutils-gdb
695c6dfe7e85006b98c8b746f3fd5f913c94ebff
3.0734419943587815e+38
16
PR29370, infinite loop in display_debug_abbrev The PR29370 testcase is a fuzzed object file with multiple .trace_abbrev sections. Multiple .trace_abbrev or .debug_abbrev sections are not a violation of the DWARF standard. The DWARF5 standard even gives an example of multiple .debug_abbrev sections contained in groups. Caching and lookup of processed abbrevs thus needs to be done by section and offset rather than base and offset. (Why base anyway?) Or, since section contents are kept, by a pointer into the contents. PR 29370 * dwarf.c (struct abbrev_list): Replace abbrev_base and abbrev_offset with raw field. (find_abbrev_list_by_abbrev_offset): Delete. (find_abbrev_list_by_raw_abbrev): New function. (process_abbrev_set): Set list->raw and list->next. (find_and_process_abbrev_set): Replace abbrev list lookup with new function. Don't set list abbrev_base, abbrev_offset or next.
0
static int mov_read_elst(MOVContext *c, AVIOContext *pb, MOVAtom atom) { MOVStreamContext *sc; int i, edit_count, version; int64_t elst_entry_size; if (c->fc->nb_streams < 1 || c->ignore_editlist) return 0; sc = c->fc->streams[c->fc->nb_streams-1]->priv_data; version = avio_r8(pb); /* version */ avio_rb24(pb); /* flags */ edit_count = avio_rb32(pb); /* entries */ atom.size -= 8; elst_entry_size = version == 1 ? 20 : 12; if (atom.size != edit_count * elst_entry_size) { if (c->fc->strict_std_compliance >= FF_COMPLIANCE_STRICT) { av_log(c->fc, AV_LOG_ERROR, "Invalid edit list entry_count: %d for elst atom of size: %"PRId64" bytes.\n", edit_count, atom.size + 8); return AVERROR_INVALIDDATA; } else { edit_count = atom.size / elst_entry_size; if (edit_count * elst_entry_size != atom.size) { av_log(c->fc, AV_LOG_WARNING, "ELST atom of %"PRId64" bytes, bigger than %d entries.\n", atom.size, edit_count); } } } if (!edit_count) return 0; if (sc->elst_data) av_log(c->fc, AV_LOG_WARNING, "Duplicated ELST atom\n"); av_free(sc->elst_data); sc->elst_count = 0; sc->elst_data = av_malloc_array(edit_count, sizeof(*sc->elst_data)); if (!sc->elst_data) return AVERROR(ENOMEM); av_log(c->fc, AV_LOG_TRACE, "track[%u].edit_count = %i\n", c->fc->nb_streams - 1, edit_count); for (i = 0; i < edit_count && atom.size > 0 && !pb->eof_reached; i++) { MOVElst *e = &sc->elst_data[i]; if (version == 1) { e->duration = avio_rb64(pb); e->time = avio_rb64(pb); atom.size -= 16; } else { e->duration = avio_rb32(pb); /* segment duration */ e->time = (int32_t)avio_rb32(pb); /* media time */ atom.size -= 8; } e->rate = avio_rb32(pb) / 65536.0; atom.size -= 4; av_log(c->fc, AV_LOG_TRACE, "duration=%"PRId64" time=%"PRId64" rate=%f\n", e->duration, e->time, e->rate); if (e->time < 0 && e->time != -1 && c->fc->strict_std_compliance >= FF_COMPLIANCE_STRICT) { av_log(c->fc, AV_LOG_ERROR, "Track %d, edit %d: Invalid edit list media time=%"PRId64"\n", c->fc->nb_streams-1, i, e->time); return AVERROR_INVALIDDATA; } } sc->elst_count = i; return 0; }
Safe
[ "CWE-703" ]
FFmpeg
c953baa084607dd1d84c3bfcce3cf6a87c3e6e05
2.7723576307275284e+37
68
avformat/mov: Check count sums in build_open_gop_key_points() Fixes: ffmpeg.md Fixes: Out of array access Fixes: CVE-2022-2566 Found-by: Andy Nguyen <theflow@google.com> Found-by: 3pvd <3pvd@google.com> Reviewed-by: Andy Nguyen <theflow@google.com> Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
0
static uint32_t hpet_in_legacy_mode(HPETState *s) { return s->config & HPET_CFG_LEGACY; }
Safe
[ "CWE-119" ]
qemu
3f1c49e2136fa08ab1ef3183fd55def308829584
7.625305524118037e+37
4
hpet: fix buffer overrun on invalid state load CVE-2013-4527 hw/timer/hpet.c buffer overrun hpet is a VARRAY with a uint8 size but static array of 32 To fix, make sure num_timers is valid using VMSTATE_VALID hook. Reported-by: Anthony Liguori <anthony@codemonkey.ws> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
0
static inline u32 nfsd4_only_status_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) { return (op_encode_hdr_size) * sizeof(__be32); }
Safe
[ "CWE-20", "CWE-129" ]
linux
b550a32e60a4941994b437a8d662432a486235a5
2.5224903995047818e+38
4
nfsd: fix undefined behavior in nfsd4_layout_verify UBSAN: Undefined behaviour in fs/nfsd/nfs4proc.c:1262:34 shift exponent 128 is too large for 32-bit type 'int' Depending on compiler+architecture, this may cause the check for layout_type to succeed for overly large values (which seems to be the case with amd64). The large value will be later used in de-referencing nfsd4_layout_ops for function pointers. Reported-by: Jani Tuovila <tuovila@synopsys.com> Signed-off-by: Ari Kauppi <ari@synopsys.com> [colin.king@canonical.com: use LAYOUT_TYPE_MAX instead of 32] Cc: stable@vger.kernel.org Reviewed-by: Dan Carpenter <dan.carpenter@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
0
static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { struct file *file; struct socket *sock; *err = -EBADF; file = fget_light(fd, fput_needed); if (file) { sock = sock_from_file(file, err); if (sock) return sock; fput_light(file, *fput_needed); } return NULL; }
Safe
[]
linux-2.6
644595f89620ba8446cc555be336d24a34464950
4.0154247587118725e+37
15
compat: Handle COMPAT_USE_64BIT_TIME in net/socket.c Use helper functions aware of COMPAT_USE_64BIT_TIME to write struct timeval and struct timespec to userspace in net/socket.c. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
0
static void jp2_write_url(opj_cio_t *cio, char *Idx_file) { OPJ_UINT32 i; opj_jp2_box_t box; box.init_pos = cio_tell(cio); cio_skip(cio, 4); cio_write(cio, JP2_URL, 4); /* DBTL */ cio_write(cio, 0, 1); /* VERS */ cio_write(cio, 0, 3); /* FLAG */ if (Idx_file) { for (i = 0; i < strlen(Idx_file); i++) { cio_write(cio, Idx_file[i], 1); } } box.length = cio_tell(cio) - box.init_pos; cio_seek(cio, box.init_pos); cio_write(cio, box.length, 4); /* L */ cio_seek(cio, box.init_pos + box.length); }
Safe
[ "CWE-20" ]
openjpeg
4edb8c83374f52cd6a8f2c7c875e8ffacccb5fa5
1.2298374524172329e+38
22
Add support for generation of PLT markers in encoder * -PLT switch added to opj_compress * Add a opj_encoder_set_extra_options() function that accepts a PLT=YES option, and could be expanded later for other uses. ------- Testing with a Sentinel2 10m band, T36JTT_20160914T074612_B02.jp2, coming from S2A_MSIL1C_20160914T074612_N0204_R135_T36JTT_20160914T081456.SAFE Decompress it to TIFF: ``` opj_uncompress -i T36JTT_20160914T074612_B02.jp2 -o T36JTT_20160914T074612_B02.tif ``` Recompress it with similar parameters as original: ``` opj_compress -n 5 -c [256,256],[256,256],[256,256],[256,256],[256,256] -t 1024,1024 -PLT -i T36JTT_20160914T074612_B02.tif -o T36JTT_20160914T074612_B02_PLT.jp2 ``` Dump codestream detail with GDAL dump_jp2.py utility (https://github.com/OSGeo/gdal/blob/master/gdal/swig/python/samples/dump_jp2.py) ``` python dump_jp2.py T36JTT_20160914T074612_B02.jp2 > /tmp/dump_sentinel2_ori.txt python dump_jp2.py T36JTT_20160914T074612_B02_PLT.jp2 > /tmp/dump_sentinel2_openjpeg_plt.txt ``` The diff between both show very similar structure, and identical number of packets in PLT markers Now testing with Kakadu (KDU803_Demo_Apps_for_Linux-x86-64_200210) Full file decompression: ``` kdu_expand -i T36JTT_20160914T074612_B02_PLT.jp2 -o tmp.tif Consumed 121 tile-part(s) from a total of 121 tile(s). Consumed 80,318,806 codestream bytes (excluding any file format) = 5.329697 bits/pel. Processed using the multi-threaded environment, with 8 parallel threads of execution ``` Partial decompresson (presumably using PLT markers): ``` kdu_expand -i T36JTT_20160914T074612_B02.jp2 -o tmp.pgm -region "{0.5,0.5},{0.01,0.01}" kdu_expand -i T36JTT_20160914T074612_B02_PLT.jp2 -o tmp2.pgm -region "{0.5,0.5},{0.01,0.01}" diff tmp.pgm tmp2.pgm && echo "same !" ``` ------- Funded by ESA for S2-MPC project
0
void set_personality_ia32(bool x32) { /* inherit personality from parent */ /* Make sure to be in 32bit mode */ set_thread_flag(TIF_ADDR32); /* Mark the associated mm as containing 32-bit tasks. */ if (x32) { clear_thread_flag(TIF_IA32); set_thread_flag(TIF_X32); if (current->mm) current->mm->context.ia32_compat = TIF_X32; current->personality &= ~READ_IMPLIES_EXEC; /* is_compat_task() uses the presence of the x32 syscall bit flag to determine compat status */ current_thread_info()->status &= ~TS_COMPAT; } else { set_thread_flag(TIF_IA32); clear_thread_flag(TIF_X32); if (current->mm) current->mm->context.ia32_compat = TIF_IA32; current->personality |= force_personality32; /* Prepare the first "return" to user space */ current_thread_info()->status |= TS_COMPAT; } }
Safe
[ "CWE-200", "CWE-401" ]
linux
f647d7c155f069c1a068030255c300663516420e
1.7922881536120377e+38
27
x86_64, switch_to(): Load TLS descriptors before switching DS and ES Otherwise, if buggy user code points DS or ES into the TLS array, they would be corrupted after a context switch. This also significantly improves the comments and documents some gotchas in the code. Before this patch, the both tests below failed. With this patch, the es test passes, although the gsbase test still fails. ----- begin es test ----- /* * Copyright (c) 2014 Andy Lutomirski * GPL v2 */ static unsigned short GDT3(int idx) { return (idx << 3) | 3; } static int create_tls(int idx, unsigned int base) { struct user_desc desc = { .entry_number = idx, .base_addr = base, .limit = 0xfffff, .seg_32bit = 1, .contents = 0, /* Data, grow-up */ .read_exec_only = 0, .limit_in_pages = 1, .seg_not_present = 0, .useable = 0, }; if (syscall(SYS_set_thread_area, &desc) != 0) err(1, "set_thread_area"); return desc.entry_number; } int main() { int idx = create_tls(-1, 0); printf("Allocated GDT index %d\n", idx); unsigned short orig_es; asm volatile ("mov %%es,%0" : "=rm" (orig_es)); int errors = 0; int total = 1000; for (int i = 0; i < total; i++) { asm volatile ("mov %0,%%es" : : "rm" (GDT3(idx))); usleep(100); unsigned short es; asm volatile ("mov %%es,%0" : "=rm" (es)); asm volatile ("mov %0,%%es" : : "rm" (orig_es)); if (es != GDT3(idx)) { if (errors == 0) printf("[FAIL]\tES changed from 0x%hx to 0x%hx\n", GDT3(idx), es); errors++; } } if (errors) { printf("[FAIL]\tES was corrupted %d/%d times\n", errors, total); return 1; } else { printf("[OK]\tES was preserved\n"); return 0; } } ----- end es test ----- ----- begin gsbase test ----- /* * gsbase.c, a gsbase test * Copyright (c) 2014 Andy Lutomirski * GPL v2 */ static unsigned char *testptr, *testptr2; static unsigned char read_gs_testvals(void) { unsigned char ret; asm volatile ("movb %%gs:%1, %0" : "=r" (ret) : "m" (*testptr)); return ret; } int main() { int errors = 0; testptr = mmap((void *)0x200000000UL, 1, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0); if (testptr == MAP_FAILED) err(1, "mmap"); testptr2 = mmap((void *)0x300000000UL, 1, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0); if (testptr2 == MAP_FAILED) err(1, "mmap"); *testptr = 0; *testptr2 = 1; if (syscall(SYS_arch_prctl, ARCH_SET_GS, (unsigned long)testptr2 - (unsigned long)testptr) != 0) err(1, "ARCH_SET_GS"); usleep(100); if (read_gs_testvals() == 1) { printf("[OK]\tARCH_SET_GS worked\n"); } else { printf("[FAIL]\tARCH_SET_GS failed\n"); errors++; } asm volatile ("mov %0,%%gs" : : "r" (0)); if (read_gs_testvals() == 0) { printf("[OK]\tWriting 0 to gs worked\n"); } else { printf("[FAIL]\tWriting 0 to gs failed\n"); errors++; } usleep(100); if (read_gs_testvals() == 0) { printf("[OK]\tgsbase is still zero\n"); } else { printf("[FAIL]\tgsbase was corrupted\n"); errors++; } return errors == 0 ? 0 : 1; } ----- end gsbase test ----- Signed-off-by: Andy Lutomirski <luto@amacapital.net> Cc: <stable@vger.kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/509d27c9fec78217691c3dad91cec87e1006b34a.1418075657.git.luto@amacapital.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
0
void generate_field(std::ofstream& out, const t_field* field) { generate_field_name(out, field); generate_field_value(out, field); }
Safe
[ "CWE-20" ]
thrift
cfaadcc4adcfde2a8232c62ec89870b73ef40df1
2.901023422639499e+38
4
THRIFT-3231 CPP: Limit recursion depth to 64 Client: cpp Patch: Ben Craig <bencraig@apache.org>
0
static void ssl3_take_mac(SSL *s) { const char *sender; int slen; /* * If no new cipher setup return immediately: other functions will set * the appropriate error. */ if (s->s3->tmp.new_cipher == NULL) return; if (s->state & SSL_ST_CONNECT) { sender = s->method->ssl3_enc->server_finished_label; slen = s->method->ssl3_enc->server_finished_label_len; } else { sender = s->method->ssl3_enc->client_finished_label; slen = s->method->ssl3_enc->client_finished_label_len; } s->s3->tmp.peer_finish_md_len = s->method->ssl3_enc->final_finish_mac(s, sender, slen, s->s3->tmp.peer_finish_md); }
Safe
[ "CWE-125" ]
openssl
bb1a4866034255749ac578adb06a76335fc117b1
6.647140270980275e+37
23
Make message buffer slightly larger than message. Grow TLS/DTLS 16 bytes more than strictly necessary as a precaution against OOB reads. In most cases this will have no effect because the message buffer will be large enough already. Reviewed-by: Matt Caswell <matt@openssl.org> (cherry picked from commit 006a788c84e541c8920dd2ad85fb62b52185c519)
0
void file_checksum(const char *fname, const STRUCT_STAT *st_p, char *sum) { struct map_struct *buf; OFF_T i, len = st_p->st_size; md_context m; int32 remainder; int fd; memset(sum, 0, MAX_DIGEST_LEN); fd = do_open(fname, O_RDONLY, 0); if (fd == -1) return; buf = map_file(fd, len, MAX_MAP_SIZE, CSUM_CHUNK); switch (checksum_type) { case CSUM_MD5: md5_begin(&m); for (i = 0; i + CSUM_CHUNK <= len; i += CSUM_CHUNK) { md5_update(&m, (uchar *)map_ptr(buf, i, CSUM_CHUNK), CSUM_CHUNK); } remainder = (int32)(len - i); if (remainder > 0) md5_update(&m, (uchar *)map_ptr(buf, i, remainder), remainder); md5_result(&m, (uchar *)sum); break; case CSUM_MD4: case CSUM_MD4_OLD: case CSUM_MD4_BUSTED: case CSUM_MD4_ARCHAIC: mdfour_begin(&m); for (i = 0; i + CSUM_CHUNK <= len; i += CSUM_CHUNK) { mdfour_update(&m, (uchar *)map_ptr(buf, i, CSUM_CHUNK), CSUM_CHUNK); } /* Prior to version 27 an incorrect MD4 checksum was computed * by failing to call mdfour_tail() for block sizes that * are multiples of 64. This is fixed by calling mdfour_update() * even when there are no more bytes. */ remainder = (int32)(len - i); if (remainder > 0 || checksum_type > CSUM_MD4_BUSTED) mdfour_update(&m, (uchar *)map_ptr(buf, i, remainder), remainder); mdfour_result(&m, (uchar *)sum); break; default: rprintf(FERROR, "invalid checksum-choice for the --checksum option (%d)\n", checksum_type); exit_cleanup(RERR_UNSUPPORTED); } close(fd); unmap_file(buf); }
Safe
[ "CWE-354" ]
rsync
7b8a4ecd6ff9cdf4e5d3850ebf822f1e989255b3
2.9460110041619102e+38
60
Handle archaic checksums properly.
0
need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) { int hint; if (!intel_engine_has_timeslices(engine)) return false; if (list_is_last(&rq->sched.link, &engine->active.requests)) return false; hint = max(rq_prio(list_next_entry(rq, sched.link)), engine->execlists.queue_priority_hint); return hint >= effective_prio(rq); }
Safe
[]
linux
bc8a76a152c5f9ef3b48104154a65a68a8b76946
2.8332260239915367e+37
15
drm/i915/gen9: Clear residual context state on context switch Intel ID: PSIRT-TA-201910-001 CVEID: CVE-2019-14615 Intel GPU Hardware prior to Gen11 does not clear EU state during a context switch. This can result in information leakage between contexts. For Gen8 and Gen9, hardware provides a mechanism for fast cleardown of the EU state, by issuing a PIPE_CONTROL with bit 27 set. We can use this in a context batch buffer to explicitly cleardown the state on every context switch. As this workaround is already in place for gen8, we can borrow the code verbatim for Gen9. Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Akeem G Abodunrin <akeem.g.abodunrin@intel.com> Cc: Kumar Valsan Prathap <prathap.kumar.valsan@intel.com> Cc: Chris Wilson <chris.p.wilson@intel.com> Cc: Balestrieri Francesco <francesco.balestrieri@intel.com> Cc: Bloomfield Jon <jon.bloomfield@intel.com> Cc: Dutt Sudeep <sudeep.dutt@intel.com>
0
int MAIN(int argc, char **argv) { int off=0, clr = 0; SSL *con=NULL,*con2=NULL; X509_STORE *store = NULL; int s,k,width,state=0; char *cbuf=NULL,*sbuf=NULL,*mbuf=NULL; int cbuf_len,cbuf_off; int sbuf_len,sbuf_off; fd_set readfds,writefds; short port=PORT; int full_log=1; char *host=SSL_HOST_NAME; char *cert_file=NULL,*key_file=NULL; int cert_format = FORMAT_PEM, key_format = FORMAT_PEM; char *passarg = NULL, *pass = NULL; X509 *cert = NULL; EVP_PKEY *key = NULL; char *CApath=NULL,*CAfile=NULL,*cipher=NULL; int reconnect=0,badop=0,verify=SSL_VERIFY_NONE,bugs=0; int crlf=0; int write_tty,read_tty,write_ssl,read_ssl,tty_on,ssl_pending; SSL_CTX *ctx=NULL; int ret=1,in_init=1,i,nbio_test=0; int starttls_proto = PROTO_OFF; int prexit = 0, vflags = 0; SSL_METHOD *meth=NULL; #ifdef sock_type #undef sock_type #endif int sock_type=SOCK_STREAM; BIO *sbio; char *inrand=NULL; int mbuf_len=0; struct timeval timeout, *timeoutp; #ifndef OPENSSL_NO_ENGINE char *engine_id=NULL; char *ssl_client_engine_id=NULL; ENGINE *ssl_client_engine=NULL; #endif ENGINE *e=NULL; #if defined(OPENSSL_SYS_WINDOWS) || defined(OPENSSL_SYS_MSDOS) || defined(OPENSSL_SYS_NETWARE) struct timeval tv; #endif #ifndef OPENSSL_NO_TLSEXT char *servername = NULL; tlsextctx tlsextcbp = {NULL,0}; #endif char *sess_in = NULL; char *sess_out = NULL; struct sockaddr peer; int peerlen = sizeof(peer); int fallback_scsv = 0; int enable_timeouts = 0 ; long socket_mtu = 0; #ifndef OPENSSL_NO_JPAKE char *jpake_secret = NULL; #endif meth=SSLv23_client_method(); apps_startup(); c_Pause=0; c_quiet=0; c_ign_eof=0; c_debug=0; c_msg=0; c_showcerts=0; if (bio_err == NULL) bio_err=BIO_new_fp(stderr,BIO_NOCLOSE); if (!load_config(bio_err, NULL)) goto end; if ( ((cbuf=OPENSSL_malloc(BUFSIZZ)) == NULL) || ((sbuf=OPENSSL_malloc(BUFSIZZ)) == NULL) || ((mbuf=OPENSSL_malloc(BUFSIZZ)) == NULL)) { BIO_printf(bio_err,"out of memory\n"); goto end; } verify_depth=0; verify_error=X509_V_OK; #ifdef FIONBIO c_nbio=0; #endif argc--; argv++; while (argc >= 1) { if (strcmp(*argv,"-host") == 0) { if (--argc < 1) goto bad; host= *(++argv); } else if (strcmp(*argv,"-port") == 0) { if (--argc < 1) goto bad; port=atoi(*(++argv)); if (port == 0) goto bad; } else if (strcmp(*argv,"-connect") == 0) { if (--argc < 1) goto bad; if (!extract_host_port(*(++argv),&host,NULL,&port)) goto bad; } else if (strcmp(*argv,"-verify") == 0) { verify=SSL_VERIFY_PEER; if (--argc < 1) goto bad; verify_depth=atoi(*(++argv)); BIO_printf(bio_err,"verify depth is %d\n",verify_depth); } else if (strcmp(*argv,"-cert") == 0) { if (--argc < 1) goto bad; cert_file= *(++argv); } else if (strcmp(*argv,"-sess_out") == 0) { if (--argc < 1) goto bad; sess_out = *(++argv); } else if (strcmp(*argv,"-sess_in") == 0) { if (--argc < 1) goto bad; sess_in = *(++argv); } else if (strcmp(*argv,"-certform") == 0) { if (--argc < 1) goto bad; cert_format = str2fmt(*(++argv)); } else if (strcmp(*argv,"-crl_check") == 0) vflags |= X509_V_FLAG_CRL_CHECK; else if (strcmp(*argv,"-crl_check_all") == 0) vflags |= X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL; else if (strcmp(*argv,"-prexit") == 0) prexit=1; else if (strcmp(*argv,"-crlf") == 0) crlf=1; else if (strcmp(*argv,"-quiet") == 0) { c_quiet=1; c_ign_eof=1; } else if (strcmp(*argv,"-ign_eof") == 0) c_ign_eof=1; else if (strcmp(*argv,"-no_ign_eof") == 0) c_ign_eof=0; else if (strcmp(*argv,"-pause") == 0) c_Pause=1; else if (strcmp(*argv,"-debug") == 0) c_debug=1; #ifndef OPENSSL_NO_TLSEXT else if (strcmp(*argv,"-tlsextdebug") == 0) c_tlsextdebug=1; else if (strcmp(*argv,"-status") == 0) c_status_req=1; #endif #ifdef WATT32 else if (strcmp(*argv,"-wdebug") == 0) dbug_init(); #endif else if (strcmp(*argv,"-msg") == 0) c_msg=1; else if (strcmp(*argv,"-showcerts") == 0) c_showcerts=1; else if (strcmp(*argv,"-nbio_test") == 0) nbio_test=1; else if (strcmp(*argv,"-state") == 0) state=1; #ifndef OPENSSL_NO_SSL2 else if (strcmp(*argv,"-ssl2") == 0) meth=SSLv2_client_method(); #endif #ifndef OPENSSL_NO_SSL3 else if (strcmp(*argv,"-ssl3") == 0) meth=SSLv3_client_method(); #endif #ifndef OPENSSL_NO_TLS1 else if (strcmp(*argv,"-tls1") == 0) meth=TLSv1_client_method(); #endif #ifndef OPENSSL_NO_DTLS1 else if (strcmp(*argv,"-dtls1") == 0) { meth=DTLSv1_client_method(); sock_type=SOCK_DGRAM; } else if (strcmp(*argv,"-fallback_scsv") == 0) { fallback_scsv = 1; } else if (strcmp(*argv,"-timeout") == 0) enable_timeouts=1; else if (strcmp(*argv,"-mtu") == 0) { if (--argc < 1) goto bad; socket_mtu = atol(*(++argv)); } #endif else if (strcmp(*argv,"-bugs") == 0) bugs=1; else if (strcmp(*argv,"-keyform") == 0) { if (--argc < 1) goto bad; key_format = str2fmt(*(++argv)); } else if (strcmp(*argv,"-pass") == 0) { if (--argc < 1) goto bad; passarg = *(++argv); } else if (strcmp(*argv,"-key") == 0) { if (--argc < 1) goto bad; key_file= *(++argv); } else if (strcmp(*argv,"-reconnect") == 0) { reconnect=5; } else if (strcmp(*argv,"-CApath") == 0) { if (--argc < 1) goto bad; CApath= *(++argv); } else if (strcmp(*argv,"-CAfile") == 0) { if (--argc < 1) goto bad; CAfile= *(++argv); } else if (strcmp(*argv,"-no_tls1") == 0) off|=SSL_OP_NO_TLSv1; else if (strcmp(*argv,"-no_ssl3") == 0) off|=SSL_OP_NO_SSLv3; else if (strcmp(*argv,"-no_ssl2") == 0) off|=SSL_OP_NO_SSLv2; #ifndef OPENSSL_NO_TLSEXT else if (strcmp(*argv,"-no_ticket") == 0) { off|=SSL_OP_NO_TICKET; } #endif else if (strcmp(*argv,"-serverpref") == 0) off|=SSL_OP_CIPHER_SERVER_PREFERENCE; else if (strcmp(*argv,"-legacy_renegotiation") == 0) off|=SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION; else if (strcmp(*argv,"-legacy_server_connect") == 0) { off|=SSL_OP_LEGACY_SERVER_CONNECT; } else if (strcmp(*argv,"-no_legacy_server_connect") == 0) { clr|=SSL_OP_LEGACY_SERVER_CONNECT; } else if (strcmp(*argv,"-cipher") == 0) { if (--argc < 1) goto bad; cipher= *(++argv); } #ifdef FIONBIO else if (strcmp(*argv,"-nbio") == 0) { c_nbio=1; } #endif else if (strcmp(*argv,"-starttls") == 0) { if (--argc < 1) goto bad; ++argv; if (strcmp(*argv,"smtp") == 0) starttls_proto = PROTO_SMTP; else if (strcmp(*argv,"pop3") == 0) starttls_proto = PROTO_POP3; else if (strcmp(*argv,"imap") == 0) starttls_proto = PROTO_IMAP; else if (strcmp(*argv,"ftp") == 0) starttls_proto = PROTO_FTP; else if (strcmp(*argv, "xmpp") == 0) starttls_proto = PROTO_XMPP; else goto bad; } #ifndef OPENSSL_NO_ENGINE else if (strcmp(*argv,"-engine") == 0) { if (--argc < 1) goto bad; engine_id = *(++argv); } else if (strcmp(*argv,"-ssl_client_engine") == 0) { if (--argc < 1) goto bad; ssl_client_engine_id = *(++argv); } #endif else if (strcmp(*argv,"-rand") == 0) { if (--argc < 1) goto bad; inrand= *(++argv); } #ifndef OPENSSL_NO_TLSEXT else if (strcmp(*argv,"-servername") == 0) { if (--argc < 1) goto bad; servername= *(++argv); /* meth=TLSv1_client_method(); */ } #endif #ifndef OPENSSL_NO_JPAKE else if (strcmp(*argv,"-jpake") == 0) { if (--argc < 1) goto bad; jpake_secret = *++argv; } #endif else { BIO_printf(bio_err,"unknown option %s\n",*argv); badop=1; break; } argc--; argv++; } if (badop) { bad: sc_usage(); goto end; } OpenSSL_add_ssl_algorithms(); SSL_load_error_strings(); #ifndef OPENSSL_NO_ENGINE e = setup_engine(bio_err, engine_id, 1); if (ssl_client_engine_id) { ssl_client_engine = ENGINE_by_id(ssl_client_engine_id); if (!ssl_client_engine) { BIO_printf(bio_err, "Error getting client auth engine\n"); goto end; } } #endif if (!app_passwd(bio_err, passarg, NULL, &pass, NULL)) { BIO_printf(bio_err, "Error getting password\n"); goto end; } if (key_file == NULL) key_file = cert_file; if (key_file) { key = load_key(bio_err, key_file, key_format, 0, pass, e, "client certificate private key file"); if (!key) { ERR_print_errors(bio_err); goto end; } } if (cert_file) { cert = load_cert(bio_err,cert_file,cert_format, NULL, e, "client certificate file"); if (!cert) { ERR_print_errors(bio_err); goto end; } } if (!app_RAND_load_file(NULL, bio_err, 1) && inrand == NULL && !RAND_status()) { BIO_printf(bio_err,"warning, not much extra random data, consider using the -rand option\n"); } if (inrand != NULL) BIO_printf(bio_err,"%ld semi-random bytes loaded\n", app_RAND_load_files(inrand)); if (bio_c_out == NULL) { if (c_quiet && !c_debug && !c_msg) { bio_c_out=BIO_new(BIO_s_null()); } else { if (bio_c_out == NULL) bio_c_out=BIO_new_fp(stdout,BIO_NOCLOSE); } } ctx=SSL_CTX_new(meth); if (ctx == NULL) { ERR_print_errors(bio_err); goto end; } #ifndef OPENSSL_NO_ENGINE if (ssl_client_engine) { if (!SSL_CTX_set_client_cert_engine(ctx, ssl_client_engine)) { BIO_puts(bio_err, "Error setting client auth engine\n"); ERR_print_errors(bio_err); ENGINE_free(ssl_client_engine); goto end; } ENGINE_free(ssl_client_engine); } #endif if (bugs) SSL_CTX_set_options(ctx,SSL_OP_ALL|off); else SSL_CTX_set_options(ctx,off); if (clr) SSL_CTX_clear_options(ctx, clr); /* DTLS: partial reads end up discarding unread UDP bytes :-( * Setting read ahead solves this problem. */ if (sock_type == SOCK_DGRAM) SSL_CTX_set_read_ahead(ctx, 1); if (state) SSL_CTX_set_info_callback(ctx,apps_ssl_info_callback); if (cipher != NULL) if(!SSL_CTX_set_cipher_list(ctx,cipher)) { BIO_printf(bio_err,"error setting cipher list\n"); ERR_print_errors(bio_err); goto end; } #if 0 else SSL_CTX_set_cipher_list(ctx,getenv("SSL_CIPHER")); #endif SSL_CTX_set_verify(ctx,verify,verify_callback); if (!set_cert_key_stuff(ctx,cert,key)) goto end; if ((!SSL_CTX_load_verify_locations(ctx,CAfile,CApath)) || (!SSL_CTX_set_default_verify_paths(ctx))) { /* BIO_printf(bio_err,"error setting default verify locations\n"); */ ERR_print_errors(bio_err); /* goto end; */ } store = SSL_CTX_get_cert_store(ctx); X509_STORE_set_flags(store, vflags); #ifndef OPENSSL_NO_TLSEXT if (servername != NULL) { tlsextcbp.biodebug = bio_err; SSL_CTX_set_tlsext_servername_callback(ctx, ssl_servername_cb); SSL_CTX_set_tlsext_servername_arg(ctx, &tlsextcbp); } #endif con=SSL_new(ctx); if (sess_in) { SSL_SESSION *sess; BIO *stmp = BIO_new_file(sess_in, "r"); if (!stmp) { BIO_printf(bio_err, "Can't open session file %s\n", sess_in); ERR_print_errors(bio_err); goto end; } sess = PEM_read_bio_SSL_SESSION(stmp, NULL, 0, NULL); BIO_free(stmp); if (!sess) { BIO_printf(bio_err, "Can't open session file %s\n", sess_in); ERR_print_errors(bio_err); goto end; } SSL_set_session(con, sess); SSL_SESSION_free(sess); } if (fallback_scsv) SSL_set_mode(con, SSL_MODE_SEND_FALLBACK_SCSV); #ifndef OPENSSL_NO_TLSEXT if (servername != NULL) { if (!SSL_set_tlsext_host_name(con,servername)) { BIO_printf(bio_err,"Unable to set TLS servername extension.\n"); ERR_print_errors(bio_err); goto end; } } #endif #ifndef OPENSSL_NO_KRB5 if (con && (con->kssl_ctx = kssl_ctx_new()) != NULL) { kssl_ctx_setstring(con->kssl_ctx, KSSL_SERVER, host); } #endif /* OPENSSL_NO_KRB5 */ /* SSL_set_cipher_list(con,"RC4-MD5"); */ re_start: if (init_client(&s,host,port,sock_type) == 0) { BIO_printf(bio_err,"connect:errno=%d\n",get_last_socket_error()); SHUTDOWN(s); goto end; } BIO_printf(bio_c_out,"CONNECTED(%08X)\n",s); #ifdef FIONBIO if (c_nbio) { unsigned long l=1; BIO_printf(bio_c_out,"turning on non blocking io\n"); if (BIO_socket_ioctl(s,FIONBIO,&l) < 0) { ERR_print_errors(bio_err); goto end; } } #endif if (c_Pause & 0x01) con->debug=1; if ( SSL_version(con) == DTLS1_VERSION) { sbio=BIO_new_dgram(s,BIO_NOCLOSE); if (getsockname(s, &peer, (void *)&peerlen) < 0) { BIO_printf(bio_err, "getsockname:errno=%d\n", get_last_socket_error()); SHUTDOWN(s); goto end; } (void)BIO_ctrl_set_connected(sbio, 1, &peer); if ( enable_timeouts) { timeout.tv_sec = 0; timeout.tv_usec = DGRAM_RCV_TIMEOUT; BIO_ctrl(sbio, BIO_CTRL_DGRAM_SET_RECV_TIMEOUT, 0, &timeout); timeout.tv_sec = 0; timeout.tv_usec = DGRAM_SND_TIMEOUT; BIO_ctrl(sbio, BIO_CTRL_DGRAM_SET_SEND_TIMEOUT, 0, &timeout); } if (socket_mtu > 28) { SSL_set_options(con, SSL_OP_NO_QUERY_MTU); SSL_set_mtu(con, socket_mtu - 28); } else /* want to do MTU discovery */ BIO_ctrl(sbio, BIO_CTRL_DGRAM_MTU_DISCOVER, 0, NULL); } else sbio=BIO_new_socket(s,BIO_NOCLOSE); if (nbio_test) { BIO *test; test=BIO_new(BIO_f_nbio_test()); sbio=BIO_push(test,sbio); } if (c_debug) { con->debug=1; BIO_set_callback(sbio,bio_dump_callback); BIO_set_callback_arg(sbio,(char *)bio_c_out); } if (c_msg) { SSL_set_msg_callback(con, msg_cb); SSL_set_msg_callback_arg(con, bio_c_out); } #ifndef OPENSSL_NO_TLSEXT if (c_tlsextdebug) { SSL_set_tlsext_debug_callback(con, tlsext_cb); SSL_set_tlsext_debug_arg(con, bio_c_out); } if (c_status_req) { SSL_set_tlsext_status_type(con, TLSEXT_STATUSTYPE_ocsp); SSL_CTX_set_tlsext_status_cb(ctx, ocsp_resp_cb); SSL_CTX_set_tlsext_status_arg(ctx, bio_c_out); #if 0 { STACK_OF(OCSP_RESPID) *ids = sk_OCSP_RESPID_new_null(); OCSP_RESPID *id = OCSP_RESPID_new(); id->value.byKey = ASN1_OCTET_STRING_new(); id->type = V_OCSP_RESPID_KEY; ASN1_STRING_set(id->value.byKey, "Hello World", -1); sk_OCSP_RESPID_push(ids, id); SSL_set_tlsext_status_ids(con, ids); } #endif } #endif #ifndef OPENSSL_NO_JPAKE if (jpake_secret) jpake_client_auth(bio_c_out, sbio, jpake_secret); #endif SSL_set_bio(con,sbio,sbio); SSL_set_connect_state(con); /* ok, lets connect */ width=SSL_get_fd(con)+1; read_tty=1; write_tty=0; tty_on=0; read_ssl=1; write_ssl=1; cbuf_len=0; cbuf_off=0; sbuf_len=0; sbuf_off=0; /* This is an ugly hack that does a lot of assumptions */ /* We do have to handle multi-line responses which may come in a single packet or not. We therefore have to use BIO_gets() which does need a buffering BIO. So during the initial chitchat we do push a buffering BIO into the chain that is removed again later on to not disturb the rest of the s_client operation. */ if (starttls_proto == PROTO_SMTP) { int foundit=0; BIO *fbio = BIO_new(BIO_f_buffer()); BIO_push(fbio, sbio); /* wait for multi-line response to end from SMTP */ do { mbuf_len = BIO_gets(fbio,mbuf,BUFSIZZ); } while (mbuf_len>3 && mbuf[3]=='-'); /* STARTTLS command requires EHLO... */ BIO_printf(fbio,"EHLO openssl.client.net\r\n"); (void)BIO_flush(fbio); /* wait for multi-line response to end EHLO SMTP response */ do { mbuf_len = BIO_gets(fbio,mbuf,BUFSIZZ); if (strstr(mbuf,"STARTTLS")) foundit=1; } while (mbuf_len>3 && mbuf[3]=='-'); (void)BIO_flush(fbio); BIO_pop(fbio); BIO_free(fbio); if (!foundit) BIO_printf(bio_err, "didn't found starttls in server response," " try anyway...\n"); BIO_printf(sbio,"STARTTLS\r\n"); BIO_read(sbio,sbuf,BUFSIZZ); } else if (starttls_proto == PROTO_POP3) { BIO_read(sbio,mbuf,BUFSIZZ); BIO_printf(sbio,"STLS\r\n"); BIO_read(sbio,sbuf,BUFSIZZ); } else if (starttls_proto == PROTO_IMAP) { int foundit=0; BIO *fbio = BIO_new(BIO_f_buffer()); BIO_push(fbio, sbio); BIO_gets(fbio,mbuf,BUFSIZZ); /* STARTTLS command requires CAPABILITY... */ BIO_printf(fbio,". CAPABILITY\r\n"); (void)BIO_flush(fbio); /* wait for multi-line CAPABILITY response */ do { mbuf_len = BIO_gets(fbio,mbuf,BUFSIZZ); if (strstr(mbuf,"STARTTLS")) foundit=1; } while (mbuf_len>3 && mbuf[0]!='.'); (void)BIO_flush(fbio); BIO_pop(fbio); BIO_free(fbio); if (!foundit) BIO_printf(bio_err, "didn't found STARTTLS in server response," " try anyway...\n"); BIO_printf(sbio,". STARTTLS\r\n"); BIO_read(sbio,sbuf,BUFSIZZ); } else if (starttls_proto == PROTO_FTP) { BIO *fbio = BIO_new(BIO_f_buffer()); BIO_push(fbio, sbio); /* wait for multi-line response to end from FTP */ do { mbuf_len = BIO_gets(fbio,mbuf,BUFSIZZ); } while (mbuf_len>3 && mbuf[3]=='-'); (void)BIO_flush(fbio); BIO_pop(fbio); BIO_free(fbio); BIO_printf(sbio,"AUTH TLS\r\n"); BIO_read(sbio,sbuf,BUFSIZZ); } if (starttls_proto == PROTO_XMPP) { int seen = 0; BIO_printf(sbio,"<stream:stream " "xmlns:stream='http://etherx.jabber.org/streams' " "xmlns='jabber:client' to='%s' version='1.0'>", host); seen = BIO_read(sbio,mbuf,BUFSIZZ); mbuf[seen] = 0; while (!strstr(mbuf, "<starttls xmlns='urn:ietf:params:xml:ns:xmpp-tls'")) { if (strstr(mbuf, "/stream:features>")) goto shut; seen = BIO_read(sbio,mbuf,BUFSIZZ); mbuf[seen] = 0; } BIO_printf(sbio, "<starttls xmlns='urn:ietf:params:xml:ns:xmpp-tls'/>"); seen = BIO_read(sbio,sbuf,BUFSIZZ); sbuf[seen] = 0; if (!strstr(sbuf, "<proceed")) goto shut; mbuf[0] = 0; } for (;;) { FD_ZERO(&readfds); FD_ZERO(&writefds); if ((SSL_version(con) == DTLS1_VERSION) && DTLSv1_get_timeout(con, &timeout)) timeoutp = &timeout; else timeoutp = NULL; if (SSL_in_init(con) && !SSL_total_renegotiations(con)) { in_init=1; tty_on=0; } else { tty_on=1; if (in_init) { in_init=0; if (sess_out) { BIO *stmp = BIO_new_file(sess_out, "w"); if (stmp) { PEM_write_bio_SSL_SESSION(stmp, SSL_get_session(con)); BIO_free(stmp); } else BIO_printf(bio_err, "Error writing session file %s\n", sess_out); } print_stuff(bio_c_out,con,full_log); if (full_log > 0) full_log--; if (starttls_proto) { BIO_printf(bio_err,"%s",mbuf); /* We don't need to know any more */ starttls_proto = PROTO_OFF; } if (reconnect) { reconnect--; BIO_printf(bio_c_out,"drop connection and then reconnect\n"); SSL_shutdown(con); SSL_set_connect_state(con); SHUTDOWN(SSL_get_fd(con)); goto re_start; } } } ssl_pending = read_ssl && SSL_pending(con); if (!ssl_pending) { #if !defined(OPENSSL_SYS_WINDOWS) && !defined(OPENSSL_SYS_MSDOS) && !defined(OPENSSL_SYS_NETWARE) if (tty_on) { if (read_tty) FD_SET(fileno(stdin),&readfds); if (write_tty) FD_SET(fileno(stdout),&writefds); } if (read_ssl) FD_SET(SSL_get_fd(con),&readfds); if (write_ssl) FD_SET(SSL_get_fd(con),&writefds); #else if(!tty_on || !write_tty) { if (read_ssl) FD_SET(SSL_get_fd(con),&readfds); if (write_ssl) FD_SET(SSL_get_fd(con),&writefds); } #endif /* printf("mode tty(%d %d%d) ssl(%d%d)\n", tty_on,read_tty,write_tty,read_ssl,write_ssl);*/ /* Note: under VMS with SOCKETSHR the second parameter * is currently of type (int *) whereas under other * systems it is (void *) if you don't have a cast it * will choke the compiler: if you do have a cast then * you can either go for (int *) or (void *). */ #if defined(OPENSSL_SYS_WINDOWS) || defined(OPENSSL_SYS_MSDOS) /* Under Windows/DOS we make the assumption that we can * always write to the tty: therefore if we need to * write to the tty we just fall through. Otherwise * we timeout the select every second and see if there * are any keypresses. Note: this is a hack, in a proper * Windows application we wouldn't do this. */ i=0; if(!write_tty) { if(read_tty) { tv.tv_sec = 1; tv.tv_usec = 0; i=select(width,(void *)&readfds,(void *)&writefds, NULL,&tv); #if defined(OPENSSL_SYS_WINCE) || defined(OPENSSL_SYS_MSDOS) if(!i && (!_kbhit() || !read_tty) ) continue; #else if(!i && (!((_kbhit()) || (WAIT_OBJECT_0 == WaitForSingleObject(GetStdHandle(STD_INPUT_HANDLE), 0))) || !read_tty) ) continue; #endif } else i=select(width,(void *)&readfds,(void *)&writefds, NULL,timeoutp); } #elif defined(OPENSSL_SYS_NETWARE) if(!write_tty) { if(read_tty) { tv.tv_sec = 1; tv.tv_usec = 0; i=select(width,(void *)&readfds,(void *)&writefds, NULL,&tv); } else i=select(width,(void *)&readfds,(void *)&writefds, NULL,timeoutp); } #else i=select(width,(void *)&readfds,(void *)&writefds, NULL,timeoutp); #endif if ( i < 0) { BIO_printf(bio_err,"bad select %d\n", get_last_socket_error()); goto shut; /* goto end; */ } } if ((SSL_version(con) == DTLS1_VERSION) && DTLSv1_handle_timeout(con) > 0) { BIO_printf(bio_err,"TIMEOUT occured\n"); } if (!ssl_pending && FD_ISSET(SSL_get_fd(con),&writefds)) { k=SSL_write(con,&(cbuf[cbuf_off]), (unsigned int)cbuf_len); switch (SSL_get_error(con,k)) { case SSL_ERROR_NONE: cbuf_off+=k; cbuf_len-=k; if (k <= 0) goto end; /* we have done a write(con,NULL,0); */ if (cbuf_len <= 0) { read_tty=1; write_ssl=0; } else /* if (cbuf_len > 0) */ { read_tty=0; write_ssl=1; } break; case SSL_ERROR_WANT_WRITE: BIO_printf(bio_c_out,"write W BLOCK\n"); write_ssl=1; read_tty=0; break; case SSL_ERROR_WANT_READ: BIO_printf(bio_c_out,"write R BLOCK\n"); write_tty=0; read_ssl=1; write_ssl=0; break; case SSL_ERROR_WANT_X509_LOOKUP: BIO_printf(bio_c_out,"write X BLOCK\n"); break; case SSL_ERROR_ZERO_RETURN: if (cbuf_len != 0) { BIO_printf(bio_c_out,"shutdown\n"); goto shut; } else { read_tty=1; write_ssl=0; break; } case SSL_ERROR_SYSCALL: if ((k != 0) || (cbuf_len != 0)) { BIO_printf(bio_err,"write:errno=%d\n", get_last_socket_error()); goto shut; } else { read_tty=1; write_ssl=0; } break; case SSL_ERROR_SSL: ERR_print_errors(bio_err); goto shut; } } #if defined(OPENSSL_SYS_WINDOWS) || defined(OPENSSL_SYS_MSDOS) || defined(OPENSSL_SYS_NETWARE) /* Assume Windows/DOS can always write */ else if (!ssl_pending && write_tty) #else else if (!ssl_pending && FD_ISSET(fileno(stdout),&writefds)) #endif { #ifdef CHARSET_EBCDIC ascii2ebcdic(&(sbuf[sbuf_off]),&(sbuf[sbuf_off]),sbuf_len); #endif i=write(fileno(stdout),&(sbuf[sbuf_off]),sbuf_len); if (i <= 0) { BIO_printf(bio_c_out,"DONE\n"); goto shut; /* goto end; */ } sbuf_len-=i;; sbuf_off+=i; if (sbuf_len <= 0) { read_ssl=1; write_tty=0; } } else if (ssl_pending || FD_ISSET(SSL_get_fd(con),&readfds)) { #ifdef RENEG { static int iiii; if (++iiii == 52) { SSL_renegotiate(con); iiii=0; } } #endif #if 1 k=SSL_read(con,sbuf,1024 /* BUFSIZZ */ ); #else /* Demo for pending and peek :-) */ k=SSL_read(con,sbuf,16); { char zbuf[10240]; printf("read=%d pending=%d peek=%d\n",k,SSL_pending(con),SSL_peek(con,zbuf,10240)); } #endif switch (SSL_get_error(con,k)) { case SSL_ERROR_NONE: if (k <= 0) goto end; sbuf_off=0; sbuf_len=k; read_ssl=0; write_tty=1; break; case SSL_ERROR_WANT_WRITE: BIO_printf(bio_c_out,"read W BLOCK\n"); write_ssl=1; read_tty=0; break; case SSL_ERROR_WANT_READ: BIO_printf(bio_c_out,"read R BLOCK\n"); write_tty=0; read_ssl=1; if ((read_tty == 0) && (write_ssl == 0)) write_ssl=1; break; case SSL_ERROR_WANT_X509_LOOKUP: BIO_printf(bio_c_out,"read X BLOCK\n"); break; case SSL_ERROR_SYSCALL: BIO_printf(bio_err,"read:errno=%d\n",get_last_socket_error()); goto shut; case SSL_ERROR_ZERO_RETURN: BIO_printf(bio_c_out,"closed\n"); goto shut; case SSL_ERROR_SSL: ERR_print_errors(bio_err); goto shut; /* break; */ } } #if defined(OPENSSL_SYS_WINDOWS) || defined(OPENSSL_SYS_MSDOS) #if defined(OPENSSL_SYS_WINCE) || defined(OPENSSL_SYS_MSDOS) else if (_kbhit()) #else else if ((_kbhit()) || (WAIT_OBJECT_0 == WaitForSingleObject(GetStdHandle(STD_INPUT_HANDLE), 0))) #endif #elif defined (OPENSSL_SYS_NETWARE) else if (_kbhit()) #else else if (FD_ISSET(fileno(stdin),&readfds)) #endif { if (crlf) { int j, lf_num; i=read(fileno(stdin),cbuf,BUFSIZZ/2); lf_num = 0; /* both loops are skipped when i <= 0 */ for (j = 0; j < i; j++) if (cbuf[j] == '\n') lf_num++; for (j = i-1; j >= 0; j--) { cbuf[j+lf_num] = cbuf[j]; if (cbuf[j] == '\n') { lf_num--; i++; cbuf[j+lf_num] = '\r'; } } assert(lf_num == 0); } else i=read(fileno(stdin),cbuf,BUFSIZZ); if ((!c_ign_eof) && ((i <= 0) || (cbuf[0] == 'Q'))) { BIO_printf(bio_err,"DONE\n"); goto shut; } if ((!c_ign_eof) && (cbuf[0] == 'R')) { BIO_printf(bio_err,"RENEGOTIATING\n"); SSL_renegotiate(con); cbuf_len=0; } else { cbuf_len=i; cbuf_off=0; #ifdef CHARSET_EBCDIC ebcdic2ascii(cbuf, cbuf, i); #endif } write_ssl=1; read_tty=0; } } shut: SSL_shutdown(con); SHUTDOWN(SSL_get_fd(con)); ret=0; end: if(prexit) print_stuff(bio_c_out,con,1); if (con != NULL) SSL_free(con); if (con2 != NULL) SSL_free(con2); if (ctx != NULL) SSL_CTX_free(ctx); if (cert) X509_free(cert); if (key) EVP_PKEY_free(key); if (pass) OPENSSL_free(pass); if (cbuf != NULL) { OPENSSL_cleanse(cbuf,BUFSIZZ); OPENSSL_free(cbuf); } if (sbuf != NULL) { OPENSSL_cleanse(sbuf,BUFSIZZ); OPENSSL_free(sbuf); } if (mbuf != NULL) { OPENSSL_cleanse(mbuf,BUFSIZZ); OPENSSL_free(mbuf); } if (bio_c_out != NULL) { BIO_free(bio_c_out); bio_c_out=NULL; } apps_shutdown(); OPENSSL_EXIT(ret); }
Safe
[ "CWE-310" ]
openssl
c6a876473cbff0fd323c8abcaace98ee2d21863d
2.3462041075635662e+38
1,132
Support TLS_FALLBACK_SCSV. Reviewed-by: Stephen Henson <steve@openssl.org>
0
Item_string(THD *thd, const char *name_par, const char *str, uint length, CHARSET_INFO *cs, Derivation dv, uint repertoire): Item_basic_constant(thd) { str_value.set_or_copy_aligned(str, length, cs); fix_from_value(dv, Metadata(&str_value, repertoire)); set_name(thd, name_par, 0, system_charset_info); }
Safe
[ "CWE-617" ]
server
2e7891080667c59ac80f788eef4d59d447595772
7.6617826355763435e+37
8
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view This bug could manifest itself after pushing a where condition over a mergeable derived table / view / CTE DT into a grouping view / derived table / CTE V whose item list contained set functions with constant arguments such as MIN(2), SUM(1) etc. In such cases the field references used in the condition pushed into the view V that correspond set functions are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation of the virtual method const_item() for the class Item_direct_view_ref the wrapped set functions with constant arguments could be erroneously taken for constant items. This could lead to a wrong result set returned by the main select query in 10.2. In 10.4 where a possibility of pushing condition from HAVING into WHERE had been added this could cause a crash. Approved by Sergey Petrunya <sergey.petrunya@mariadb.com>
0
PHPAPI void php_add_session_var(zend_string *name) /* {{{ */ { IF_SESSION_VARS() { zval *sess_var = Z_REFVAL(PS(http_session_vars)); SEPARATE_ARRAY(sess_var); if (!zend_hash_exists(Z_ARRVAL_P(sess_var), name)) { zval empty_var; ZVAL_NULL(&empty_var); zend_hash_update(Z_ARRVAL_P(sess_var), name, &empty_var); } } }
Safe
[ "CWE-476" ]
php-src
d76f7c6c636b8240e06a1fa29eebb98ad005008a
1.992364882874259e+38
12
Fix bug #79221 - Null Pointer Dereference in PHP Session Upload Progress
0
static MonoMethodSignature* method_builder_to_signature (MonoImage *image, MonoReflectionMethodBuilder *method) { MonoMethodSignature *sig; sig = parameters_to_signature (image, method->parameters); sig->hasthis = method->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1; sig->ret = method->rtype? mono_reflection_type_get_handle ((MonoReflectionType*)method->rtype): &mono_defaults.void_class->byval_arg; sig->generic_param_count = method->generic_params ? mono_array_length (method->generic_params) : 0; return sig;
Safe
[ "CWE-20" ]
mono
4905ef1130feb26c3150b28b97e4a96752e0d399
8.974250733407605e+37
9
Handle invalid instantiation of generic methods. * verify.c: Add new function to internal verifier API to check method instantiations. * reflection.c (mono_reflection_bind_generic_method_parameters): Check the instantiation before returning it. Fixes #655847
0
dummy_symbol_get (location loc) { /* Incremented for each generated symbol. */ static int dummy_count = 0; char buf[32]; int len = snprintf (buf, sizeof buf, "$@%d", ++dummy_count); assure (len < sizeof buf); symbol *sym = symbol_get (buf, loc); sym->content->class = nterm_sym; sym->content->number = nnterms++; return sym; }
Vulnerable
[]
bison
b7aab2dbad43aaf14eebe78d54aafa245a000988
2.678319775850913e+38
12
fix: crash when redefining the EOF token Reported by Agency for Defense Development. https://lists.gnu.org/r/bug-bison/2020-08/msg00008.html On an empty such as %token FOO BAR FOO 0 %% input: %empty we crash because when we find FOO 0, we decrement ntokens (since FOO was discovered to be EOF, which is already known to be a token, so we increment ntokens for it, and need to cancel this). This "works well" when EOF is properly defined in one go, but here it is first defined and later only assign token code 0. In the meanwhile BAR was given the token number that we just decremented. To fix this, assign symbol numbers after parsing, not during parsing, so that we also saw all the explicit token codes. To maintain the current numbers (I'd like to keep no difference in the output, not just equivalence), we need to make sure the symbols are numbered in the same order: that of appearance in the source file. So we need the locations to be correct, which was almost the case, except for nterms that appeared several times as LHS (i.e., several times as "foo: ..."). Fixing the use of location_of_lhs sufficed (it appears it was intended for this use, but its implementation was unfinished: it was always set to "false" only). * src/symtab.c (symbol_location_as_lhs_set): Update location_of_lhs. (symbol_code_set): Remove broken hack that decremented ntokens. (symbol_class_set, dummy_symbol_get): Don't set number, ntokens and nnterms. (symbol_check_defined): Do it. (symbols): Don't count nsyms here. Actually, don't count nsyms at all: let it be done in... * src/reader.c (check_and_convert_grammar): here. Define nsyms from ntokens and nnterms after parsing. * tests/input.at (EOF redeclared): New. * examples/c/bistromathic/bistromathic.test: Adjust the traces: in "%nterm <double> exp %% input: ...", exp used to be numbered before input.
1
static void ProcessRadioTxDone( void ) { GetPhyParams_t getPhy; PhyParam_t phyParam; SetBandTxDoneParams_t txDone; if( MacCtx.NvmCtx->DeviceClass != CLASS_C ) { Radio.Sleep( ); } // Setup timers TimerSetValue( &MacCtx.RxWindowTimer1, MacCtx.RxWindow1Delay ); TimerStart( &MacCtx.RxWindowTimer1 ); TimerSetValue( &MacCtx.RxWindowTimer2, MacCtx.RxWindow2Delay ); TimerStart( &MacCtx.RxWindowTimer2 ); if( ( MacCtx.NvmCtx->DeviceClass == CLASS_C ) || ( MacCtx.NodeAckRequested == true ) ) { getPhy.Attribute = PHY_ACK_TIMEOUT; phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy ); TimerSetValue( &MacCtx.AckTimeoutTimer, MacCtx.RxWindow2Delay + phyParam.Value ); TimerStart( &MacCtx.AckTimeoutTimer ); } // Store last Tx channel MacCtx.NvmCtx->LastTxChannel = MacCtx.Channel; // Update last tx done time for the current channel txDone.Channel = MacCtx.Channel; if( MacCtx.NvmCtx->NetworkActivation == ACTIVATION_TYPE_NONE ) { txDone.Joined = false; } else { txDone.Joined = true; } txDone.LastTxDoneTime = TxDoneParams.CurTime; RegionSetBandTxDone( MacCtx.NvmCtx->Region, &txDone ); // Update Aggregated last tx done time MacCtx.NvmCtx->LastTxDoneTime = TxDoneParams.CurTime; if( MacCtx.NodeAckRequested == false ) { MacCtx.McpsConfirm.Status = LORAMAC_EVENT_INFO_STATUS_OK; } }
Safe
[ "CWE-120", "CWE-787" ]
LoRaMac-node
e3063a91daa7ad8a687223efa63079f0c24568e4
1.7515602232545126e+38
46
Added received buffer size checks.
0
/* {{{ PHP_MINFO_FUNCTION */ PHP_MINFO_FUNCTION(date) { const timelib_tzdb *tzdb = DATE_TIMEZONEDB; php_info_print_table_start(); php_info_print_table_row(2, "date/time support", "enabled"); php_info_print_table_row(2, "\"Olson\" Timezone Database Version", tzdb->version); php_info_print_table_row(2, "Timezone Database", php_date_global_timezone_db_enabled ? "external" : "internal"); php_info_print_table_row(2, "Default timezone", guess_timezone(tzdb TSRMLS_CC)); php_info_print_table_end(); DISPLAY_INI_ENTRIES();
Safe
[]
php-src
c377f1a715476934133f3254d1e0d4bf3743e2d2
5.189698096524784e+37
13
Fix bug #68942 (Use after free vulnerability in unserialize() with DateTimeZone)
0
static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,double *red, double *green,double *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=0.5*(0.01*percent_hue-1.0); blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); }
Safe
[ "CWE-835" ]
ImageMagick
a80ee0ee1a083b4991d12ed4c07b7c7c5890f329
3.087234365779839e+38
18
https://www.imagemagick.org/discourse-server/viewtopic.php?f=3&t=31506
0
static inline MemoryRegion *address_space_translate_cached( MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, hwaddr *plen, bool is_write, MemTxAttrs attrs) { MemoryRegionSection section; MemoryRegion *mr; IOMMUMemoryRegion *iommu_mr; AddressSpace *target_as; assert(!cache->ptr); *xlat = addr + cache->xlat; mr = cache->mrs.mr; iommu_mr = memory_region_get_iommu(mr); if (!iommu_mr) { /* MMIO region. */ return mr; } section = address_space_translate_iommu(iommu_mr, xlat, plen, NULL, is_write, true, &target_as, attrs); return section.mr; }
Safe
[ "CWE-787" ]
qemu
4bfb024bc76973d40a359476dc0291f46e435442
3.108322365832021e+38
24
memory: clamp cached translation in case it points to an MMIO region In using the address_space_translate_internal API, address_space_cache_init forgot one piece of advice that can be found in the code for address_space_translate_internal: /* MMIO registers can be expected to perform full-width accesses based only * on their address, without considering adjacent registers that could * decode to completely different MemoryRegions. When such registers * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO * regions overlap wildly. For this reason we cannot clamp the accesses * here. * * If the length is small (as is the case for address_space_ldl/stl), * everything works fine. If the incoming length is large, however, * the caller really has to do the clamping through memory_access_size. */ address_space_cache_init is exactly one such case where "the incoming length is large", therefore we need to clamp the resulting length---not to memory_access_size though, since we are not doing an access yet, but to the size of the resulting section. This ensures that subsequent accesses to the cached MemoryRegionSection will be in range. With this patch, the enclosed testcase notices that the used ring does not fit into the MSI-X table and prints a "qemu-system-x86_64: Cannot map used" error. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
const Tracing::CustomTagMap* ConnectionManagerImpl::ActiveStream::customTags() const { return tracing_custom_tags_.get(); }
Safe
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
2.60824053154758e+37
3
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <mklein@lyft.com>
0
ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid, struct list_head *bf_q, struct ath_buf *bf_first, struct sk_buff_head *tid_q, int *aggr_len) { #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) struct ath_buf *bf = bf_first, *bf_prev = NULL; int nframes = 0, ndelim; u16 aggr_limit = 0, al = 0, bpad = 0, al_delta, h_baw = tid->baw_size / 2; struct ieee80211_tx_info *tx_info; struct ath_frame_info *fi; struct sk_buff *skb; bool closed = false; bf = bf_first; aggr_limit = ath_lookup_rate(sc, bf, tid); do { skb = bf->bf_mpdu; fi = get_frame_info(skb); /* do not exceed aggregation limit */ al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; if (nframes) { if (aggr_limit < al + bpad + al_delta || ath_lookup_legacy(bf) || nframes >= h_baw) break; tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || !(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) break; } /* add padding for previous frame to aggregation length */ al += bpad + al_delta; /* * Get the delimiters needed to meet the MPDU * density for this node. */ ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen, !nframes); bpad = PADBYTES(al_delta) + (ndelim << 2); nframes++; bf->bf_next = NULL; /* link buffers of this frame to the aggregate */ if (!fi->baw_tracked) ath_tx_addto_baw(sc, tid, bf); bf->bf_state.ndelim = ndelim; __skb_unlink(skb, tid_q); list_add_tail(&bf->list, bf_q); if (bf_prev) bf_prev->bf_next = bf; bf_prev = bf; bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q); if (!bf) { closed = true; break; } } while (ath_tid_has_buffered(tid)); bf = bf_first; bf->bf_lastbf = bf_prev; if (bf == bf_prev) { al = get_frame_info(bf->bf_mpdu)->framelen; bf->bf_state.bf_type = BUF_AMPDU; } else { TX_STAT_INC(txq->axq_qnum, a_aggr); } *aggr_len = al; return closed; #undef PADBYTES }
Safe
[ "CWE-362", "CWE-241" ]
linux
21f8aaee0c62708654988ce092838aa7df4d25d8
3.1148372481566332e+38
83
ath9k: protect tid->sched check We check tid->sched without a lock taken on ath_tx_aggr_sleep(). That is race condition which can result of doing list_del(&tid->list) twice (second time with poisoned list node) and cause crash like shown below: [424271.637220] BUG: unable to handle kernel paging request at 00100104 [424271.637328] IP: [<f90fc072>] ath_tx_aggr_sleep+0x62/0xe0 [ath9k] ... [424271.639953] Call Trace: [424271.639998] [<f90f6900>] ? ath9k_get_survey+0x110/0x110 [ath9k] [424271.640083] [<f90f6942>] ath9k_sta_notify+0x42/0x50 [ath9k] [424271.640177] [<f809cfef>] sta_ps_start+0x8f/0x1c0 [mac80211] [424271.640258] [<c10f730e>] ? free_compound_page+0x2e/0x40 [424271.640346] [<f809e915>] ieee80211_rx_handlers+0x9d5/0x2340 [mac80211] [424271.640437] [<c112f048>] ? kmem_cache_free+0x1d8/0x1f0 [424271.640510] [<c1345a84>] ? kfree_skbmem+0x34/0x90 [424271.640578] [<c10fc23c>] ? put_page+0x2c/0x40 [424271.640640] [<c1345a84>] ? kfree_skbmem+0x34/0x90 [424271.640706] [<c1345a84>] ? kfree_skbmem+0x34/0x90 [424271.640787] [<f809dde3>] ? ieee80211_rx_handlers_result+0x73/0x1d0 [mac80211] [424271.640897] [<f80a07a0>] ieee80211_prepare_and_rx_handle+0x520/0xad0 [mac80211] [424271.641009] [<f809e22d>] ? ieee80211_rx_handlers+0x2ed/0x2340 [mac80211] [424271.641104] [<c13846ce>] ? ip_output+0x7e/0xd0 [424271.641182] [<f80a1057>] ieee80211_rx+0x307/0x7c0 [mac80211] [424271.641266] [<f90fa6ee>] ath_rx_tasklet+0x88e/0xf70 [ath9k] [424271.641358] [<f80a0f2c>] ? ieee80211_rx+0x1dc/0x7c0 [mac80211] [424271.641445] [<f90f82db>] ath9k_tasklet+0xcb/0x130 [ath9k] Bug report: https://bugzilla.kernel.org/show_bug.cgi?id=70551 Reported-and-tested-by: Max Sydorenko <maxim.stargazer@gmail.com> Cc: stable@vger.kernel.org Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
0
static int seed_from_timestamp_and_pid(uint32_t *seed) { #ifdef HAVE_GETTIMEOFDAY /* XOR of seconds and microseconds */ struct timeval tv; gettimeofday(&tv, NULL); *seed = (uint32_t)tv.tv_sec ^ (uint32_t)tv.tv_usec; #else /* Seconds only */ *seed = (uint32_t)time(NULL); #endif /* XOR with PID for more randomness */ #if defined(_WIN32) *seed ^= (uint32_t)_getpid(); #elif defined(HAVE_GETPID) *seed ^= (uint32_t)getpid(); #endif return 0; }
Safe
[ "CWE-310" ]
jansson
8f80c2d83808150724d31793e6ade92749b1faa4
2.2424559478255432e+38
20
CVE-2013-6401: Change hash function, randomize hashes Thanks to Florian Weimer and Eric Sesterhenn for reporting, reviewing and testing.
0
static int equal_email(const unsigned char *a, size_t a_len, const unsigned char *b, size_t b_len, unsigned int unused_flags) { size_t i = a_len; if (a_len != b_len) return 0; /* * We search backwards for the '@' character, so that we do not have to * deal with quoted local-parts. The domain part is compared in a * case-insensitive manner. */ while (i > 0) { --i; if (a[i] == '@' || b[i] == '@') { if (!equal_nocase(a + i, a_len - i, b + i, a_len - i, 0)) return 0; break; } } if (i == 0) i = a_len; return equal_case(a, i, b, i, 0); }
Safe
[ "CWE-125" ]
openssl
bb4d2ed4091408404e18b3326e3df67848ef63d0
1.1576833750550065e+38
24
Fix append_ia5 function to not assume NUL terminated strings ASN.1 strings may not be NUL terminated. Don't assume they are. CVE-2021-3712 Reviewed-by: Viktor Dukhovni <viktor@openssl.org> Reviewed-by: Paul Dale <pauli@openssl.org>
0
composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; int status = 0; u16 w_index = le16_to_cpu(ctrl->wIndex); u8 intf = w_index & 0xFF; u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); struct usb_function *f = NULL; u8 endp; if (w_length > USB_COMP_EP0_BUFSIZ) { if (ctrl->bRequestType & USB_DIR_IN) { /* Cast away the const, we are going to overwrite on purpose. */ __le16 *temp = (__le16 *)&ctrl->wLength; *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ); w_length = USB_COMP_EP0_BUFSIZ; } else { goto done; } } /* partial re-init of the response message; the function or the * gadget might need to intercept e.g. a control-OUT completion * when we delegate to it. */ req->zero = 0; req->context = cdev; req->complete = composite_setup_complete; req->length = 0; gadget->ep0->driver_data = cdev; /* * Don't let non-standard requests match any of the cases below * by accident. */ if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) goto unknown; switch (ctrl->bRequest) { /* we handle all standard USB descriptors */ case USB_REQ_GET_DESCRIPTOR: if (ctrl->bRequestType != USB_DIR_IN) goto unknown; switch (w_value >> 8) { case USB_DT_DEVICE: cdev->desc.bNumConfigurations = count_configs(cdev, USB_DT_DEVICE); cdev->desc.bMaxPacketSize0 = cdev->gadget->ep0->maxpacket; if (gadget_is_superspeed(gadget)) { if (gadget->speed >= USB_SPEED_SUPER) { cdev->desc.bcdUSB = cpu_to_le16(0x0320); cdev->desc.bMaxPacketSize0 = 9; } else { cdev->desc.bcdUSB = cpu_to_le16(0x0210); } } else { if (gadget->lpm_capable) cdev->desc.bcdUSB = cpu_to_le16(0x0201); else cdev->desc.bcdUSB = cpu_to_le16(0x0200); } value = min(w_length, (u16) sizeof cdev->desc); memcpy(req->buf, &cdev->desc, value); break; case USB_DT_DEVICE_QUALIFIER: if (!gadget_is_dualspeed(gadget) || gadget->speed >= USB_SPEED_SUPER) break; device_qual(cdev); value = min_t(int, w_length, sizeof(struct usb_qualifier_descriptor)); break; case USB_DT_OTHER_SPEED_CONFIG: if (!gadget_is_dualspeed(gadget) || gadget->speed >= USB_SPEED_SUPER) break; fallthrough; case USB_DT_CONFIG: value = config_desc(cdev, w_value); if (value >= 0) value = min(w_length, (u16) value); break; case USB_DT_STRING: value = get_string(cdev, req->buf, w_index, w_value & 0xff); if (value >= 0) value = min(w_length, (u16) value); break; case USB_DT_BOS: if (gadget_is_superspeed(gadget) || gadget->lpm_capable) { value = bos_desc(cdev); value = min(w_length, (u16) value); } break; case USB_DT_OTG: if (gadget_is_otg(gadget)) { struct usb_configuration *config; int otg_desc_len = 0; if (cdev->config) config = cdev->config; else config = list_first_entry( &cdev->configs, struct usb_configuration, list); if (!config) goto done; if (gadget->otg_caps && (gadget->otg_caps->otg_rev >= 0x0200)) otg_desc_len += sizeof( struct usb_otg20_descriptor); else otg_desc_len += sizeof( struct usb_otg_descriptor); value = min_t(int, w_length, otg_desc_len); memcpy(req->buf, config->descriptors[0], value); } break; } break; /* any number of configs can work */ case USB_REQ_SET_CONFIGURATION: if (ctrl->bRequestType != 0) goto unknown; if (gadget_is_otg(gadget)) { if (gadget->a_hnp_support) DBG(cdev, "HNP available\n"); else if (gadget->a_alt_hnp_support) DBG(cdev, "HNP on another port\n"); else VDBG(cdev, "HNP inactive\n"); } spin_lock(&cdev->lock); value = set_config(cdev, ctrl, w_value); spin_unlock(&cdev->lock); break; case USB_REQ_GET_CONFIGURATION: if (ctrl->bRequestType != USB_DIR_IN) goto unknown; if (cdev->config) *(u8 *)req->buf = cdev->config->bConfigurationValue; else *(u8 *)req->buf = 0; value = min(w_length, (u16) 1); break; /* function drivers must handle get/set altsetting */ case USB_REQ_SET_INTERFACE: if (ctrl->bRequestType != USB_RECIP_INTERFACE) goto unknown; if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; /* * If there's no get_alt() method, we know only altsetting zero * works. There is no need to check if set_alt() is not NULL * as we check this in usb_add_function(). */ if (w_value && !f->get_alt) break; spin_lock(&cdev->lock); value = f->set_alt(f, w_index, w_value); if (value == USB_GADGET_DELAYED_STATUS) { DBG(cdev, "%s: interface %d (%s) requested delayed status\n", __func__, intf, f->name); cdev->delayed_status++; DBG(cdev, "delayed_status count %d\n", cdev->delayed_status); } spin_unlock(&cdev->lock); break; case USB_REQ_GET_INTERFACE: if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) goto unknown; if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; /* lots of interfaces only need altsetting zero... */ value = f->get_alt ? f->get_alt(f, w_index) : 0; if (value < 0) break; *((u8 *)req->buf) = value; value = min(w_length, (u16) 1); break; case USB_REQ_GET_STATUS: if (gadget_is_otg(gadget) && gadget->hnp_polling_support && (w_index == OTG_STS_SELECTOR)) { if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_DEVICE)) goto unknown; *((u8 *)req->buf) = gadget->host_request_flag; value = 1; break; } /* * USB 3.0 additions: * Function driver should handle get_status request. If such cb * wasn't supplied we respond with default value = 0 * Note: function driver should supply such cb only for the * first interface of the function */ if (!gadget_is_superspeed(gadget)) goto unknown; if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE)) goto unknown; value = 2; /* This is the length of the get_status reply */ put_unaligned_le16(0, req->buf); if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; status = f->get_status ? f->get_status(f) : 0; if (status < 0) break; put_unaligned_le16(status & 0x0000ffff, req->buf); break; /* * Function drivers should handle SetFeature/ClearFeature * (FUNCTION_SUSPEND) request. function_suspend cb should be supplied * only for the first interface of the function */ case USB_REQ_CLEAR_FEATURE: case USB_REQ_SET_FEATURE: if (!gadget_is_superspeed(gadget)) goto unknown; if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE)) goto unknown; switch (w_value) { case USB_INTRF_FUNC_SUSPEND: if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; value = 0; if (f->func_suspend) value = f->func_suspend(f, w_index >> 8); if (value < 0) { ERROR(cdev, "func_suspend() returned error %d\n", value); value = 0; } break; } break; default: unknown: /* * OS descriptors handling */ if (cdev->use_os_string && cdev->os_desc_config && (ctrl->bRequestType & USB_TYPE_VENDOR) && ctrl->bRequest == cdev->b_vendor_code) { struct usb_configuration *os_desc_cfg; u8 *buf; int interface; int count = 0; req = cdev->os_desc_req; req->context = cdev; req->complete = composite_setup_complete; buf = req->buf; os_desc_cfg = cdev->os_desc_config; w_length = min_t(u16, w_length, USB_COMP_EP0_OS_DESC_BUFSIZ); memset(buf, 0, w_length); buf[5] = 0x01; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: if (w_index != 0x4 || (w_value >> 8)) break; buf[6] = w_index; /* Number of ext compat interfaces */ count = count_ext_compat(os_desc_cfg); buf[8] = count; count *= 24; /* 24 B/ext compat desc */ count += 16; /* header */ put_unaligned_le32(count, buf); value = w_length; if (w_length > 0x10) { value = fill_ext_compat(os_desc_cfg, buf); value = min_t(u16, w_length, value); } break; case USB_RECIP_INTERFACE: if (w_index != 0x5 || (w_value >> 8)) break; interface = w_value & 0xFF; if (interface >= MAX_CONFIG_INTERFACES || !os_desc_cfg->interface[interface]) break; buf[6] = w_index; count = count_ext_prop(os_desc_cfg, interface); put_unaligned_le16(count, buf + 8); count = len_ext_prop(os_desc_cfg, interface); put_unaligned_le32(count, buf); value = w_length; if (w_length > 0x0A) { value = fill_ext_prop(os_desc_cfg, interface, buf); if (value >= 0) value = min_t(u16, w_length, value); } break; } goto check_value; } VDBG(cdev, "non-core control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* functions always handle their interfaces and endpoints... * punt other recipients (other, WUSB, ...) to the current * configuration code. */ if (cdev->config) { list_for_each_entry(f, &cdev->config->functions, list) if (f->req_match && f->req_match(f, ctrl, false)) goto try_fun_setup; } else { struct usb_configuration *c; list_for_each_entry(c, &cdev->configs, list) list_for_each_entry(f, &c->functions, list) if (f->req_match && f->req_match(f, ctrl, true)) goto try_fun_setup; } f = NULL; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; break; case USB_RECIP_ENDPOINT: if (!cdev->config) break; endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); list_for_each_entry(f, &cdev->config->functions, list) { if (test_bit(endp, f->endpoints)) break; } if (&f->list == &cdev->config->functions) f = NULL; break; } try_fun_setup: if (f && f->setup) value = f->setup(f, ctrl); else { struct usb_configuration *c; c = cdev->config; if (!c) goto done; /* try current config's setup */ if (c->setup) { value = c->setup(c, ctrl); goto done; } /* try the only function in the current config */ if (!list_is_singular(&c->functions)) goto done; f = list_first_entry(&c->functions, struct usb_function, list); if (f->setup) value = f->setup(f, ctrl); } goto done; } check_value: /* respond with data transfer before status phase? */ if (value >= 0 && value != USB_GADGET_DELAYED_STATUS) { req->length = value; req->context = cdev; req->zero = value < w_length; value = composite_ep0_queue(cdev, req, GFP_ATOMIC); if (value < 0) { DBG(cdev, "ep_queue --> %d\n", value); req->status = 0; composite_setup_complete(gadget->ep0, req); } } else if (value == USB_GADGET_DELAYED_STATUS && w_length != 0) { WARN(cdev, "%s: Delayed status not supported for w_length != 0", __func__); } done: /* device either stalls (value < 0) or reports success */ return value; }
Safe
[ "CWE-476" ]
linux
75e5b4849b81e19e9efe1654b30d7f3151c33c2c
2.16734993810273e+38
425
USB: gadget: validate interface OS descriptor requests Stall the control endpoint in case provided index exceeds array size of MAX_CONFIG_INTERFACES or when the retrieved function pointer is null. Signed-off-by: Szymon Heidrich <szymon.heidrich@gmail.com> Cc: stable@kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
0
static void test_store_result2() { MYSQL_STMT *stmt; int rc; int nData; ulong length; MYSQL_BIND my_bind[1]; char query[MAX_TEST_QUERY_LENGTH]; myheader("test_store_result2"); rc= mysql_query(mysql, "DROP TABLE IF EXISTS test_store_result"); myquery(rc); rc= mysql_query(mysql, "CREATE TABLE test_store_result(col1 int , col2 varchar(50))"); myquery(rc); rc= mysql_query(mysql, "INSERT INTO test_store_result VALUES(10, 'venu'), (20, 'mysql')"); myquery(rc); rc= mysql_query(mysql, "INSERT INTO test_store_result(col2) VALUES('monty')"); myquery(rc); rc= mysql_commit(mysql); myquery(rc); /* We need to memset bind structure because mysql_stmt_bind_param checks all its members. */ memset(my_bind, 0, sizeof(my_bind)); my_bind[0].buffer_type= MYSQL_TYPE_LONG; my_bind[0].buffer= (void *) &nData; /* integer data */ my_bind[0].length= &length; my_bind[0].is_null= 0; strmov((char *)query , "SELECT col1 FROM test_store_result where col1= ?"); stmt= mysql_simple_prepare(mysql, query); check_stmt(stmt); rc= mysql_stmt_bind_param(stmt, my_bind); check_execute(stmt, rc); rc= mysql_stmt_bind_result(stmt, my_bind); check_execute(stmt, rc); nData= 10; length= 0; rc= mysql_stmt_execute(stmt); check_execute(stmt, rc); nData= 0; rc= mysql_stmt_store_result(stmt); check_execute(stmt, rc); rc= mysql_stmt_fetch(stmt); check_execute(stmt, rc); if (!opt_silent) fprintf(stdout, "\n row 1: %d", nData); DIE_UNLESS(nData == 10); rc= mysql_stmt_fetch(stmt); DIE_UNLESS(rc == MYSQL_NO_DATA); nData= 20; rc= mysql_stmt_execute(stmt); check_execute(stmt, rc); nData= 0; rc= mysql_stmt_store_result(stmt); check_execute(stmt, rc); rc= mysql_stmt_fetch(stmt); check_execute(stmt, rc); if (!opt_silent) fprintf(stdout, "\n row 1: %d", nData); DIE_UNLESS(nData == 20); rc= mysql_stmt_fetch(stmt); DIE_UNLESS(rc == MYSQL_NO_DATA); mysql_stmt_close(stmt); }
Safe
[ "CWE-416" ]
mysql-server
4797ea0b772d5f4c5889bc552424132806f46e93
2.419407573341557e+38
84
BUG#17512527: LIST HANDLING INCORRECT IN MYSQL_PRUNE_STMT_LIST() Analysis: --------- Invalid memory access maybe observed when using prepared statements if: a) The mysql client connection is lost after statement preparation is complete and b) There is at least one statement which is in initialized state but not prepared yet. When the client detects a closed connection, it calls end_server() to shutdown the connection. As part of the clean up, the mysql_prune_stmt_list() removes the statements which has transitioned beyond the initialized state and retains only the statements which are in a initialized state. During this processing, the initialized statements are moved from 'mysql->stmts' to a temporary 'pruned_list'. When moving the first 'INIT_DONE' element to the pruned_list, 'element->next' is set to NULL. Hence the rest of the list is never traversed and the statements which have transitioned beyond the initialized state are never invalidated. When the mysql_stmt_close() is called for the statement which is not invalidated; the statements list is updated in order to remove the statement. This would end up accessing freed memory(freed by the mysql_stmt_close() for a previous statement in the list). Fix: --- mysql_prune_stmt_list() called list_add() incorrectly to create a temporary list. The use case of list_add() is to add a single element to the front of the doubly linked list. mysql_prune_stmt_list() called list_add() by passing an entire list as the 'element'. mysql_prune_stmt_list() now uses list_delete() to remove the statement which has transitioned beyond the initialized phase. Thus the statement list would contain only elements where the the state of the statement is initialized. Note: Run the test with valgrind-mysqltest and leak-check=full option to see the invalid memory access.
0
long ssl2_ctx_callback_ctrl(SSL_CTX *ctx, int cmd, void (*fp) (void)) { return (0); }
Safe
[ "CWE-20" ]
openssl
86f8fb0e344d62454f8daf3e15236b2b59210756
6.970154383206869e+37
4
Fix reachable assert in SSLv2 servers. This assert is reachable for servers that support SSLv2 and export ciphers. Therefore, such servers can be DoSed by sending a specially crafted SSLv2 CLIENT-MASTER-KEY. Also fix s2_srvr.c to error out early if the key lengths are malformed. These lengths are sent unencrypted, so this does not introduce an oracle. CVE-2015-0293 This issue was discovered by Sean Burford (Google) and Emilia Käsper of the OpenSSL development team. Reviewed-by: Richard Levitte <levitte@openssl.org> Reviewed-by: Tim Hudson <tjh@openssl.org>
0
static int set_delay_drop(struct mlx5_ib_dev *dev) { int err = 0; mutex_lock(&dev->delay_drop.lock); if (dev->delay_drop.activate) goto out; err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout); if (err) goto out; dev->delay_drop.activate = true; out: mutex_unlock(&dev->delay_drop.lock); if (!err) atomic_inc(&dev->delay_drop.rqs_cnt); return err; }
Safe
[ "CWE-119", "CWE-787" ]
linux
0625b4ba1a5d4703c7fb01c497bd6c156908af00
1.17895191414221e+37
20
IB/mlx5: Fix leaking stack memory to userspace mlx5_ib_create_qp_resp was never initialized and only the first 4 bytes were written. Fixes: 41d902cb7c32 ("RDMA/mlx5: Fix definition of mlx5_ib_create_qp_resp") Cc: <stable@vger.kernel.org> Acked-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
0
*/ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, bool *fragstolen, int *delta_truesize) { int i, delta, len = from->len; *fragstolen = false; if (skb_cloned(to)) return false; if (len <= skb_tailroom(to)) { if (len) BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); *delta_truesize = 0; return true; } if (skb_has_frag_list(to) || skb_has_frag_list(from)) return false; if (skb_headlen(from) != 0) { struct page *page; unsigned int offset; if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) return false; if (skb_head_is_locked(from)) return false; delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); page = virt_to_head_page(from->head); offset = from->data - (unsigned char *)page_address(page); skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, page, offset, skb_headlen(from)); *fragstolen = true; } else { if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) return false; delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); } WARN_ON_ONCE(delta < len); memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, skb_shinfo(from)->frags, skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; if (!skb_cloned(from)) skb_shinfo(from)->nr_frags = 0; /* if the skb is not cloned this does nothing * since we set nr_frags to 0. */ for (i = 0; i < skb_shinfo(from)->nr_frags; i++) skb_frag_ref(from, i); to->truesize += delta; to->len += len; to->data_len += len; *delta_truesize = delta; return true;
Safe
[ "CWE-703", "CWE-125" ]
linux
8605330aac5a5785630aec8f64378a54891937cc
1.301446217129872e+38
70
tcp: fix SCM_TIMESTAMPING_OPT_STATS for normal skbs __sock_recv_timestamp can be called for both normal skbs (for receive timestamps) and for skbs on the error queue (for transmit timestamps). Commit 1c885808e456 (tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING) assumes any skb passed to __sock_recv_timestamp are from the error queue, containing OPT_STATS in the content of the skb. This results in accessing invalid memory or generating junk data. To fix this, set skb->pkt_type to PACKET_OUTGOING for packets on the error queue. This is safe because on the receive path on local sockets skb->pkt_type is never set to PACKET_OUTGOING. With that, copy OPT_STATS from a packet, only if its pkt_type is PACKET_OUTGOING. Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING") Reported-by: JongHwan Kim <zzoru007@gmail.com> Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Willem de Bruijn <willemb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
int managesieve_parser_finish_line (struct managesieve_parser *parser, unsigned int count, enum managesieve_parser_flags flags, const struct managesieve_arg **args_r) { const unsigned char *data; size_t data_size; int ret; ret = managesieve_parser_read_args(parser, count, flags, args_r); if (ret == -2) { /* we should have noticed end of everything except atom */ if (parser->cur_type == ARG_PARSE_ATOM) { data = i_stream_get_data(parser->input, &data_size); managesieve_parser_save_arg(parser, data, data_size); } } return finish_line(parser, count, args_r); }
Safe
[ "CWE-787" ]
pigeonhole
7ce9990a5e6ba59e89b7fe1c07f574279aed922c
6.661729742255613e+37
18
lib-managesieve: Don't accept strings with NULs ManageSieve doesn't allow NULs in strings. This fixes a bug with unescaping a string with NULs: str_unescape() could have been called for memory that points outside the allocated string, causing heap corruption. This could cause crashes or theoretically even result in remote code execution exploit. Found by Nick Roessler and Rafi Rubin
0
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : rng\n"); seq_printf(m, "seedsize : %u\n", alg->cra_rng.seedsize); }
Safe
[ "CWE-310" ]
linux
9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6
2.357965552854577e+37
5
crypto: user - fix info leaks in report API Three errors resulting in kernel memory disclosure: 1/ The structures used for the netlink based crypto algorithm report API are located on the stack. As snprintf() does not fill the remainder of the buffer with null bytes, those stack bytes will be disclosed to users of the API. Switch to strncpy() to fix this. 2/ crypto_report_one() does not initialize all field of struct crypto_user_alg. Fix this to fix the heap info leak. 3/ For the module name we should copy only as many bytes as module_name() returns -- not as much as the destination buffer could hold. But the current code does not and therefore copies random data from behind the end of the module name, as the module name is always shorter than CRYPTO_MAX_ALG_NAME. Also switch to use strncpy() to copy the algorithm's name and driver_name. They are strings, after all. Signed-off-by: Mathias Krause <minipli@googlemail.com> Cc: Steffen Klassert <steffen.klassert@secunet.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
0
static void ev_stream_submitted(h2_proxy_session *session, int stream_id, const char *msg) { switch (session->state) { case H2_PROXYS_ST_IDLE: case H2_PROXYS_ST_WAIT: transit(session, "stream submitted", H2_PROXYS_ST_BUSY); break; default: /* nop */ break; } }
Safe
[ "CWE-770" ]
mod_h2
dd05d49abe0f67512ce9ed5ba422d7711effecfb
2.8394941640693685e+38
13
* fixes Timeout vs. KeepAliveTimeout behaviour, see PR 63534 (for trunk now, mpm event backport to 2.4.x up for vote). * Fixes stream cleanup when connection throttling is in place. * Counts stream resets by client on streams initiated by client as cause for connection throttling. * Header length checks are now logged similar to HTTP/1.1 protocol handler (thanks @mkaufmann) * Header length is checked also on the merged value from several header instances and results in a 431 response.
0
static int on_invalid_frame_recv_cb(nghttp2_session *ngh2, const nghttp2_frame *frame, int error, void *userp) { h2_session *session = (h2_session *)userp; (void)ngh2; if (APLOGcdebug(session->c)) { char buffer[256]; h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, H2_SSSN_LOG(APLOGNO(03063), session, "recv invalid FRAME[%s], frames=%ld/%ld (r/s)"), buffer, (long)session->frames_received, (long)session->frames_sent); } return 0; }
Safe
[]
mod_h2
5e75e5685dd043fe93a5a08a15edd087a43f6968
9.086646061537201e+37
19
v1.11.0 -------------------------------------------------------------------------------- * connection IO event handling reworked. Instead of reacting on incoming bytes, the state machine now acts on incoming frames that are affecting it. This reduces state transitions. * pytest suite now covers some basic tests on h2 selection, GET and POST * started to add pytest suite from existing bash tests
0
TEST_F(Http1ClientConnectionImplTest, SimpleGet) { initialize(); MockResponseDecoder response_decoder; Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}}; request_encoder.encodeHeaders(headers, true); EXPECT_EQ("GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n", output); }
Safe
[ "CWE-770" ]
envoy
7ca28ff7d46454ae930e193d97b7d08156b1ba59
1.210061564939194e+37
13
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145) Signed-off-by: antonio <avd@google.com>
0
ldns_rr_list_rr_count(const ldns_rr_list *rr_list) { if (rr_list) { return rr_list->_rr_count; } else { return 0; } }
Safe
[ "CWE-415" ]
ldns
070b4595981f48a21cc6b4f5047fdc2d09d3da91
1.8802411189598215e+38
8
CAA and URI
0
static int brcmf_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev, const struct cfg80211_pmk_conf *conf) { struct brcmf_if *ifp; brcmf_dbg(TRACE, "enter\n"); /* expect using firmware supplicant for 1X */ ifp = netdev_priv(dev); if (WARN_ON(ifp->vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_1X)) return -EINVAL; return brcmf_set_pmk(ifp, conf->pmk, conf->pmk_len); }
Safe
[ "CWE-119", "CWE-787" ]
linux
8f44c9a41386729fea410e688959ddaa9d51be7c
2.713839117161889e+38
14
brcmfmac: fix possible buffer overflow in brcmf_cfg80211_mgmt_tx() The lower level nl80211 code in cfg80211 ensures that "len" is between 25 and NL80211_ATTR_FRAME (2304). We subtract DOT11_MGMT_HDR_LEN (24) from "len" so thats's max of 2280. However, the action_frame->data[] buffer is only BRCMF_FIL_ACTION_FRAME_SIZE (1800) bytes long so this memcpy() can overflow. memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], le16_to_cpu(action_frame->len)); Cc: stable@vger.kernel.org # 3.9.x Fixes: 18e2f61db3b70 ("brcmfmac: P2P action frame tx.") Reported-by: "freenerguo(郭大兴)" <freenerguo@tencent.com> Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
int ssl3_get_key_exchange(SSL *s) { #ifndef OPENSSL_NO_RSA unsigned char *q,md_buf[EVP_MAX_MD_SIZE*2]; #endif EVP_MD_CTX md_ctx; unsigned char *param,*p; int al,i,j,param_len,ok; long n,alg_k,alg_a; EVP_PKEY *pkey=NULL; #ifndef OPENSSL_NO_RSA RSA *rsa=NULL; #endif #ifndef OPENSSL_NO_DH DH *dh=NULL; #endif #ifndef OPENSSL_NO_ECDH EC_KEY *ecdh = NULL; BN_CTX *bn_ctx = NULL; EC_POINT *srvr_ecpoint = NULL; int curve_nid = 0; int encoded_pt_len = 0; #endif /* use same message size as in ssl3_get_certificate_request() * as ServerKeyExchange message may be skipped */ n=s->method->ssl_get_message(s, SSL3_ST_CR_KEY_EXCH_A, SSL3_ST_CR_KEY_EXCH_B, -1, s->max_cert_list, &ok); if (!ok) return((int)n); if (s->s3->tmp.message_type != SSL3_MT_SERVER_KEY_EXCHANGE) { #ifndef OPENSSL_NO_PSK /* In plain PSK ciphersuite, ServerKeyExchange can be omitted if no identity hint is sent. Set session->sess_cert anyway to avoid problems later.*/ if (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK) { s->session->sess_cert=ssl_sess_cert_new(); if (s->ctx->psk_identity_hint) OPENSSL_free(s->ctx->psk_identity_hint); s->ctx->psk_identity_hint = NULL; } #endif s->s3->tmp.reuse_message=1; return(1); } param=p=(unsigned char *)s->init_msg; if (s->session->sess_cert != NULL) { #ifndef OPENSSL_NO_RSA if (s->session->sess_cert->peer_rsa_tmp != NULL) { RSA_free(s->session->sess_cert->peer_rsa_tmp); s->session->sess_cert->peer_rsa_tmp=NULL; } #endif #ifndef OPENSSL_NO_DH if (s->session->sess_cert->peer_dh_tmp) { DH_free(s->session->sess_cert->peer_dh_tmp); s->session->sess_cert->peer_dh_tmp=NULL; } #endif #ifndef OPENSSL_NO_ECDH if (s->session->sess_cert->peer_ecdh_tmp) { EC_KEY_free(s->session->sess_cert->peer_ecdh_tmp); s->session->sess_cert->peer_ecdh_tmp=NULL; } #endif } else { s->session->sess_cert=ssl_sess_cert_new(); } param_len=0; alg_k=s->s3->tmp.new_cipher->algorithm_mkey; alg_a=s->s3->tmp.new_cipher->algorithm_auth; EVP_MD_CTX_init(&md_ctx); #ifndef OPENSSL_NO_PSK if (alg_k & SSL_kPSK) { char tmp_id_hint[PSK_MAX_IDENTITY_LEN+1]; al=SSL_AD_HANDSHAKE_FAILURE; n2s(p,i); param_len=i+2; /* Store PSK identity hint for later use, hint is used * in ssl3_send_client_key_exchange. Assume that the * maximum length of a PSK identity hint can be as * long as the maximum length of a PSK identity. */ if (i > PSK_MAX_IDENTITY_LEN) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_DATA_LENGTH_TOO_LONG); goto f_err; } if (param_len > n) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_PSK_IDENTITY_HINT_LENGTH); goto f_err; } /* If received PSK identity hint contains NULL * characters, the hint is truncated from the first * NULL. p may not be ending with NULL, so create a * NULL-terminated string. */ memcpy(tmp_id_hint, p, i); memset(tmp_id_hint+i, 0, PSK_MAX_IDENTITY_LEN+1-i); if (s->ctx->psk_identity_hint != NULL) OPENSSL_free(s->ctx->psk_identity_hint); s->ctx->psk_identity_hint = BUF_strdup(tmp_id_hint); if (s->ctx->psk_identity_hint == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto f_err; } p+=i; n-=param_len; } else #endif /* !OPENSSL_NO_PSK */ #ifndef OPENSSL_NO_RSA if (alg_k & SSL_kRSA) { if ((rsa=RSA_new()) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE); goto err; } n2s(p,i); param_len=i+2; if (param_len > n) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_MODULUS_LENGTH); goto f_err; } if (!(rsa->n=BN_bin2bn(p,i,rsa->n))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); goto err; } p+=i; n2s(p,i); param_len+=i+2; if (param_len > n) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_E_LENGTH); goto f_err; } if (!(rsa->e=BN_bin2bn(p,i,rsa->e))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); goto err; } p+=i; n-=param_len; /* this should be because we are using an export cipher */ if (alg_a & SSL_aRSA) pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509); else { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR); goto err; } s->session->sess_cert->peer_rsa_tmp=rsa; rsa=NULL; } #else /* OPENSSL_NO_RSA */ if (0) ; #endif #ifndef OPENSSL_NO_DH else if (alg_k & SSL_kEDH) { if ((dh=DH_new()) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_DH_LIB); goto err; } n2s(p,i); param_len=i+2; if (param_len > n) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_P_LENGTH); goto f_err; } if (!(dh->p=BN_bin2bn(p,i,NULL))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); goto err; } p+=i; n2s(p,i); param_len+=i+2; if (param_len > n) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_G_LENGTH); goto f_err; } if (!(dh->g=BN_bin2bn(p,i,NULL))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); goto err; } p+=i; n2s(p,i); param_len+=i+2; if (param_len > n) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_PUB_KEY_LENGTH); goto f_err; } if (!(dh->pub_key=BN_bin2bn(p,i,NULL))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); goto err; } p+=i; n-=param_len; #ifndef OPENSSL_NO_RSA if (alg_a & SSL_aRSA) pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509); #else if (0) ; #endif #ifndef OPENSSL_NO_DSA else if (alg_a & SSL_aDSS) pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_DSA_SIGN].x509); #endif /* else anonymous DH, so no certificate or pkey. */ s->session->sess_cert->peer_dh_tmp=dh; dh=NULL; } else if ((alg_k & SSL_kDHr) || (alg_k & SSL_kDHd)) { al=SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_TRIED_TO_USE_UNSUPPORTED_CIPHER); goto f_err; } #endif /* !OPENSSL_NO_DH */ #ifndef OPENSSL_NO_ECDH else if (alg_k & SSL_kEECDH) { EC_GROUP *ngroup; const EC_GROUP *group; if ((ecdh=EC_KEY_new()) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE); goto err; } /* Extract elliptic curve parameters and the * server's ephemeral ECDH public key. * Keep accumulating lengths of various components in * param_len and make sure it never exceeds n. */ /* XXX: For now we only support named (not generic) curves * and the ECParameters in this case is just three bytes. */ param_len=3; if ((param_len > n) || (*p != NAMED_CURVE_TYPE) || ((curve_nid = tls1_ec_curve_id2nid(*(p + 2))) == 0)) { al=SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS); goto f_err; } ngroup = EC_GROUP_new_by_curve_name(curve_nid); if (ngroup == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_EC_LIB); goto err; } if (EC_KEY_set_group(ecdh, ngroup) == 0) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_EC_LIB); goto err; } EC_GROUP_free(ngroup); group = EC_KEY_get0_group(ecdh); if (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) && (EC_GROUP_get_degree(group) > 163)) { al=SSL_AD_EXPORT_RESTRICTION; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_ECGROUP_TOO_LARGE_FOR_CIPHER); goto f_err; } p+=3; /* Next, get the encoded ECPoint */ if (((srvr_ecpoint = EC_POINT_new(group)) == NULL) || ((bn_ctx = BN_CTX_new()) == NULL)) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE); goto err; } encoded_pt_len = *p; /* length of encoded point */ p+=1; param_len += (1 + encoded_pt_len); if ((param_len > n) || (EC_POINT_oct2point(group, srvr_ecpoint, p, encoded_pt_len, bn_ctx) == 0)) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_ECPOINT); goto f_err; } n-=param_len; p+=encoded_pt_len; /* The ECC/TLS specification does not mention * the use of DSA to sign ECParameters in the server * key exchange message. We do support RSA and ECDSA. */ if (0) ; #ifndef OPENSSL_NO_RSA else if (alg_a & SSL_aRSA) pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509); #endif #ifndef OPENSSL_NO_ECDSA else if (alg_a & SSL_aECDSA) pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_ECC].x509); #endif /* else anonymous ECDH, so no certificate or pkey. */ EC_KEY_set_public_key(ecdh, srvr_ecpoint); s->session->sess_cert->peer_ecdh_tmp=ecdh; ecdh=NULL; BN_CTX_free(bn_ctx); bn_ctx = NULL; EC_POINT_free(srvr_ecpoint); srvr_ecpoint = NULL; } else if (alg_k) { al=SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE); goto f_err; } #endif /* !OPENSSL_NO_ECDH */ /* p points to the next byte, there are 'n' bytes left */ /* if it was signed, check the signature */ if (pkey != NULL) { n2s(p,i); n-=2; j=EVP_PKEY_size(pkey); if ((i != n) || (n > j) || (n <= 0)) { /* wrong packet length */ al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_WRONG_SIGNATURE_LENGTH); goto f_err; } #ifndef OPENSSL_NO_RSA if (pkey->type == EVP_PKEY_RSA) { int num; j=0; q=md_buf; for (num=2; num > 0; num--) { EVP_DigestInit_ex(&md_ctx,(num == 2) ?s->ctx->md5:s->ctx->sha1, NULL); EVP_DigestUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE); EVP_DigestUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE); EVP_DigestUpdate(&md_ctx,param,param_len); EVP_DigestFinal_ex(&md_ctx,q,(unsigned int *)&i); q+=i; j+=i; } i=RSA_verify(NID_md5_sha1, md_buf, j, p, n, pkey->pkey.rsa); if (i < 0) { al=SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_DECRYPT); goto f_err; } if (i == 0) { /* bad signature */ al=SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SIGNATURE); goto f_err; } } else #endif #ifndef OPENSSL_NO_DSA if (pkey->type == EVP_PKEY_DSA) { /* lets do DSS */ EVP_VerifyInit_ex(&md_ctx,EVP_dss1(), NULL); EVP_VerifyUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE); EVP_VerifyUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE); EVP_VerifyUpdate(&md_ctx,param,param_len); if (EVP_VerifyFinal(&md_ctx,p,(int)n,pkey) <= 0) { /* bad signature */ al=SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SIGNATURE); goto f_err; } } else #endif #ifndef OPENSSL_NO_ECDSA if (pkey->type == EVP_PKEY_EC) { /* let's do ECDSA */ EVP_VerifyInit_ex(&md_ctx,EVP_ecdsa(), NULL); EVP_VerifyUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE); EVP_VerifyUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE); EVP_VerifyUpdate(&md_ctx,param,param_len); if (EVP_VerifyFinal(&md_ctx,p,(int)n,pkey) <= 0) { /* bad signature */ al=SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SIGNATURE); goto f_err; } } else #endif { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR); goto err; } } else { if (!(alg_a & SSL_aNULL) && !(alg_k & SSL_kPSK)) /* aNULL or kPSK do not need public keys */ { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR); goto err; } /* still data left over */ if (n != 0) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_EXTRA_DATA_IN_MESSAGE); goto f_err; } } EVP_PKEY_free(pkey); EVP_MD_CTX_cleanup(&md_ctx); return(1); f_err: ssl3_send_alert(s,SSL3_AL_FATAL,al); err: EVP_PKEY_free(pkey); #ifndef OPENSSL_NO_RSA if (rsa != NULL) RSA_free(rsa); #endif #ifndef OPENSSL_NO_DH if (dh != NULL) DH_free(dh); #endif #ifndef OPENSSL_NO_ECDH BN_CTX_free(bn_ctx); EC_POINT_free(srvr_ecpoint); if (ecdh != NULL) EC_KEY_free(ecdh); #endif EVP_MD_CTX_cleanup(&md_ctx); return(-1); }
Vulnerable
[]
openssl
edc032b5e3f3ebb1006a9c89e0ae00504f47966f
2.315731255756958e+38
509
Add SRP support.
1
runChecks(T& source,bool reduceMemory,bool reduceTime) { // // multipart test: also grab the type of the first part to // check which other tests are expected to fail // string firstPartType; bool threw = false; { try { MultiPartInputFile multi(source); firstPartType = multi.header(0).type(); threw = readMultiPart(multi , reduceMemory , reduceTime); } catch(...) { threw = true; } } { bool gotThrow = false; resetInput(source); try { RgbaInputFile rgba(source); gotThrow = readRgba( rgba, reduceMemory , reduceTime ); } catch(...) { gotThrow = true; } if (gotThrow && firstPartType != DEEPTILE) { threw = true; } } { bool gotThrow = false; resetInput(source); try { InputFile rgba(source); gotThrow = readScanline( rgba, reduceMemory , reduceTime ); } catch(...) { gotThrow = true; } if (gotThrow && firstPartType != DEEPTILE) { threw = true; } } { bool gotThrow = false; resetInput(source); try { TiledInputFile rgba(source); gotThrow = readTile( rgba, reduceMemory , reduceTime ); } catch(...) { gotThrow = true; } if (gotThrow && firstPartType == TILEDIMAGE) { threw = true; } } { bool gotThrow = false; resetInput(source); try { DeepScanLineInputFile rgba(source); gotThrow = readDeepScanLine( rgba, reduceMemory , reduceTime ); } catch(...) { gotThrow = true; } if (gotThrow && firstPartType == DEEPSCANLINE) { threw = true; } } { bool gotThrow = false; resetInput(source); try { DeepTiledInputFile rgba(source); gotThrow = readDeepTile( rgba, reduceMemory , reduceTime ); } catch(...) { gotThrow = true; } if (gotThrow && firstPartType == DEEPTILE) { threw = true; } } return threw; }
Safe
[ "CWE-787" ]
openexr
ae6d203892cc9311917a7f4f05354ef792b3e58e
2.8030878906053563e+38
111
Handle xsampling and bad seekg() calls in exrcheck (#872) * fix exrcheck xsampling!=1 Signed-off-by: Peter Hillman <peterh@wetafx.co.nz> * fix handling bad seekg() calls in exrcheck Signed-off-by: Peter Hillman <peterh@wetafx.co.nz> * fix deeptile detection in multipart files Signed-off-by: Peter Hillman <peterh@wetafx.co.nz>
0
flatpak_proxy_set_log_messages (FlatpakProxy *proxy, gboolean log) { proxy->log_messages = log; }
Safe
[ "CWE-284", "CWE-436" ]
flatpak
52346bf187b5a7f1c0fe9075b328b7ad6abe78f6
3.3826531675416623e+38
5
Fix vulnerability in dbus proxy During the authentication all client data is directly forwarded to the dbus daemon as is, until we detect the BEGIN command after which we start filtering the binary dbus protocol. Unfortunately the detection of the BEGIN command in the proxy did not exactly match the detection in the dbus daemon. A BEGIN followed by a space or tab was considered ok in the daemon but not by the proxy. This could be exploited to send arbitrary dbus messages to the host, which can be used to break out of the sandbox. This was noticed by Gabriel Campana of The Google Security Team. This fix makes the detection of the authentication phase end match the dbus code. In addition we duplicate the authentication line validation from dbus, which includes ensuring all data is ASCII, and limiting the size of a line to 16k. In fact, we add some extra stringent checks, disallowing ASCII control chars and requiring that auth lines start with a capital letter.
0
static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int buffer_size; int ret = 0; struct sk_buff *skb = NULL; int noblock; long timeo; caif_assert(cf_sk); ret = sock_error(sk); if (ret) goto err; ret = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto err; ret = -EOPNOTSUPP; if (msg->msg_namelen) goto err; ret = -EINVAL; if (unlikely(msg->msg_iov->iov_base == NULL)) goto err; noblock = msg->msg_flags & MSG_DONTWAIT; timeo = sock_sndtimeo(sk, noblock); timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), 1, timeo, &ret); if (ret) goto err; ret = -EPIPE; if (cf_sk->sk.sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) goto err; /* Error if trying to write more than maximum frame size. */ ret = -EMSGSIZE; if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM) goto err; buffer_size = len + cf_sk->headroom + cf_sk->tailroom; ret = -ENOMEM; skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret); if (!skb || skb_tailroom(skb) < buffer_size) goto err; skb_reserve(skb, cf_sk->headroom); ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (ret) goto err; ret = transmit_skb(skb, cf_sk, noblock, timeo); if (ret < 0) /* skb is already freed */ return ret; return len; err: kfree_skb(skb); return ret; }
Safe
[ "CWE-200" ]
linux
2d6fbfe733f35c6b355c216644e08e149c61b271
9.324009705205202e+37
69
caif: Fix missing msg_namelen update in caif_seqpkt_recvmsg() The current code does not fill the msg_name member in case it is set. It also does not set the msg_namelen member to 0 and therefore makes net/socket.c leak the local, uninitialized sockaddr_storage variable to userland -- 128 bytes of kernel stack memory. Fix that by simply setting msg_namelen to 0 as obviously nobody cared about caif_seqpkt_recvmsg() not filling the msg_name in case it was set. Cc: Sjur Braendeland <sjur.brandeland@stericsson.com> Signed-off-by: Mathias Krause <minipli@googlemail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
cmsHPROFILE CMSEXPORT cmsOpenProfileFromFile(const char *ICCProfile, const char *sAccess) { return cmsOpenProfileFromFileTHR(NULL, ICCProfile, sAccess); }
Safe
[]
Little-CMS
d2d902b9a03583ae482c782b2f243f7e5268a47d
1.827548892858446e+38
4
>Changes from Richard Hughes
0
untrusted_launcher_response_callback (GtkDialog *dialog, int response_id, ActivateParametersDesktop *parameters) { GdkScreen *screen; char *uri; GFile *file; switch (response_id) { case RESPONSE_RUN: screen = gtk_widget_get_screen (GTK_WIDGET (parameters->parent_window)); uri = nautilus_file_get_uri (parameters->file); nautilus_debug_log (FALSE, NAUTILUS_DEBUG_LOG_DOMAIN_USER, "directory view activate_callback launch_desktop_file window=%p: %s", parameters->parent_window, uri); nautilus_launch_desktop_file (screen, uri, NULL, parameters->parent_window); g_free (uri); break; case RESPONSE_MARK_TRUSTED: file = nautilus_file_get_location (parameters->file); nautilus_file_mark_desktop_file_trusted (file, parameters->parent_window, NULL, NULL); g_object_unref (file); break; default: /* Just destroy dialog */ break; } gtk_widget_destroy (GTK_WIDGET (dialog)); activate_parameters_desktop_free (parameters); }
Vulnerable
[]
nautilus
1e1c916f5537eb5e4144950f291f4a3962fc2395
2.8053566011079443e+38
34
Add "interactive" argument to nautilus_file_mark_desktop_file_trusted. 2009-02-24 Alexander Larsson <alexl@redhat.com> * libnautilus-private/nautilus-file-operations.c: * libnautilus-private/nautilus-file-operations.h: * libnautilus-private/nautilus-mime-actions.c: Add "interactive" argument to nautilus_file_mark_desktop_file_trusted. * src/nautilus-application.c: Mark all desktopfiles on the desktop trusted on first run. svn path=/trunk/; revision=15009
1
const file_tree_checksum& data_tree_checksum(bool reset) { static file_tree_checksum checksum; if (reset) checksum.reset(); if(checksum.nfiles == 0) { get_file_tree_checksum_internal("data/",checksum); get_file_tree_checksum_internal(get_user_data_dir() + "/data/",checksum); LOG_FS << "calculated data tree checksum: " << checksum.nfiles << " files; " << checksum.sum_size << " bytes\n"; } return checksum; }
Safe
[ "CWE-200" ]
wesnoth
af61f9fdd15cd439da9e2fe5fa39d174c923eaae
1.3963389879930036e+38
15
fs: Use game data path to resolve ./ in the absence of a current_dir Fixes a file content disclosure bug (#22042) affecting functionality relying on the get_wml_location() function and not passing a non-empty value for the current_dir parameter. See <https://gna.org/bugs/?22042> for details. This is a candidate for the 1.10 and 1.12 branches. (Backported from master, commit 314425ab0e57b32909d324f7d4bf213d62cbd3b5.)
0
static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, unsigned dpl, unsigned ist, unsigned seg) { gate->offset_low = PTR_LOW(func); gate->segment = __KERNEL_CS; gate->ist = ist; gate->p = 1; gate->dpl = dpl; gate->zero0 = 0; gate->zero1 = 0; gate->type = type; gate->offset_middle = PTR_MIDDLE(func); gate->offset_high = PTR_HIGH(func); }
Safe
[ "CWE-119" ]
linux-2.6
5ac37f87ff18843aabab84cf75b2f8504c2d81fe
6.438200334983473e+36
14
x86: fix ldt limit for 64 bit Fix size of LDT entries. On x86-64, ldt_desc is a double-sized descriptor. Signed-off-by: Michael Karcher <kernel@mkarcher.dialup.fu-berlin.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
0
inline void StreamResource::EmitRead(ssize_t nread, const uv_buf_t& buf) { #ifdef DEBUG v8::SealHandleScope handle_scope(v8::Isolate::GetCurrent()); #endif if (nread > 0) bytes_read_ += static_cast<uint64_t>(nread); listener_->OnStreamRead(nread, buf); }
Safe
[ "CWE-416" ]
node
7f178663ebffc82c9f8a5a1b6bf2da0c263a30ed
1.9166618359088537e+38
8
src: use unique_ptr for WriteWrap This commit attempts to avoid a use-after-free error by using unqiue_ptr and passing a reference to it. CVE-ID: CVE-2020-8265 Fixes: https://github.com/nodejs-private/node-private/issues/227 PR-URL: https://github.com/nodejs-private/node-private/pull/238 Reviewed-By: Michael Dawson <midawson@redhat.com> Reviewed-By: Tobias Nießen <tniessen@tnie.de> Reviewed-By: Richard Lau <rlau@redhat.com>
0
int DCTStream::getChar() { if (current == limit) if (!readLine()) return EOF; return *current++; }
Safe
[ "CWE-787" ]
poppler
ae614bf8ab42c9d0c7ac57ecdfdcbcfc4ff6c639
1.7979855973277728e+38
8
Fix DCTStream::getChars we're reading past the buffer check I wonder how this had never crashed before :S Fixes #1011
0
event_name2nr(char_u *start, char_u **end) { char_u *p; int i; int len; /* the event name ends with end of line, '|', a blank or a comma */ for (p = start; *p && !VIM_ISWHITE(*p) && *p != ',' && *p != '|'; ++p) ; for (i = 0; event_names[i].name != NULL; ++i) { len = (int)STRLEN(event_names[i].name); if (len == p - start && STRNICMP(event_names[i].name, start, len) == 0) break; } if (*p == ',') ++p; *end = p; if (event_names[i].name == NULL) return NUM_EVENTS; return event_names[i].event; }
Safe
[ "CWE-200", "CWE-668" ]
vim
5a73e0ca54c77e067c3b12ea6f35e3e8681e8cf8
2.2013092309580277e+38
22
patch 8.0.1263: others can read the swap file if a user is careless Problem: Others can read the swap file if a user is careless with his primary group. Solution: If the group permission allows for reading but the world permissions doesn't, make sure the group is right.
0
write_direct_sig (KBNODE root, PKT_public_key *psk, struct revocation_key *revkey, u32 timestamp, const char *cache_nonce) { gpg_error_t err; PACKET *pkt; PKT_signature *sig; KBNODE node; PKT_public_key *pk; if (opt.verbose) log_info (_("writing direct signature\n")); /* Get the pk packet from the pub_tree. */ node = find_kbnode (root, PKT_PUBLIC_KEY); if (!node) BUG (); pk = node->pkt->pkt.public_key; /* We have to cache the key, so that the verification of the signature creation is able to retrieve the public key. */ cache_public_key (pk); /* Make the signature. */ err = make_keysig_packet (&sig, pk, NULL,NULL, psk, 0x1F, 0, timestamp, 0, keygen_add_revkey, revkey, cache_nonce); if (err) { log_error ("make_keysig_packet failed: %s\n", gpg_strerror (err) ); return err; } pkt = xmalloc_clear (sizeof *pkt); pkt->pkttype = PKT_SIGNATURE; pkt->pkt.signature = sig; add_kbnode (root, new_kbnode (pkt)); return err; }
Safe
[ "CWE-20" ]
gnupg
2183683bd633818dd031b090b5530951de76f392
1.9379766270834853e+38
39
Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <wk@gnupg.org>
0
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_setattr *setattr = &u->setattr; __be32 status = nfs_ok; int err; if (setattr->sa_iattr.ia_valid & ATTR_SIZE) { status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, &setattr->sa_stateid, WR_STATE, NULL, NULL); if (status) { dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n"); return status; } } err = fh_want_write(&cstate->current_fh); if (err) return nfserrno(err); status = nfs_ok; status = check_attr_support(rqstp, cstate, setattr->sa_bmval, nfsd_attrmask); if (status) goto out; if (setattr->sa_acl != NULL) status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh, setattr->sa_acl); if (status) goto out; if (setattr->sa_label.len) status = nfsd4_set_nfs4_label(rqstp, &cstate->current_fh, &setattr->sa_label); if (status) goto out; status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr, 0, (time_t)0); out: fh_drop_write(&cstate->current_fh); return status; }
Safe
[ "CWE-476" ]
linux
01310bb7c9c98752cc763b36532fab028e0f8f81
7.06383245248059e+36
42
nfsd: COPY and CLONE operations require the saved filehandle to be set Make sure we have a saved filehandle, otherwise we'll oops with a null pointer dereference in nfs4_preprocess_stateid_op(). Signed-off-by: Scott Mayhew <smayhew@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields <bfields@redhat.com>
0
nautilus_file_get_date_as_string (NautilusFile *file, NautilusDateType date_type) { return nautilus_file_fit_date_as_string (file, date_type, 0, NULL, NULL, NULL); }
Safe
[]
nautilus
7632a3e13874a2c5e8988428ca913620a25df983
1.2149552363290464e+37
5
Check for trusted desktop file launchers. 2009-02-24 Alexander Larsson <alexl@redhat.com> * libnautilus-private/nautilus-directory-async.c: Check for trusted desktop file launchers. * libnautilus-private/nautilus-file-private.h: * libnautilus-private/nautilus-file.c: * libnautilus-private/nautilus-file.h: Add nautilus_file_is_trusted_link. Allow unsetting of custom display name. * libnautilus-private/nautilus-mime-actions.c: Display dialog when trying to launch a non-trusted desktop file. svn path=/trunk/; revision=15003
0