func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
cmsPipeline* _cmsReadInputLUT(cmsHPROFILE hProfile, int Intent)
{
cmsTagTypeSignature OriginalType;
cmsTagSignature tag16 = Device2PCS16[Intent];
cmsTagSignature tagFloat = Device2PCSFloat[Intent];
cmsContext ContextID = cmsGetProfileContextID(hProfile);
// On named color, take the appropiate tag
if (cmsGetDeviceClass(hProfile) == cmsSigNamedColorClass) {
cmsPipeline* Lut;
cmsNAMEDCOLORLIST* nc = (cmsNAMEDCOLORLIST*) cmsReadTag(hProfile, cmsSigNamedColor2Tag);
if (nc == NULL) return NULL;
Lut = cmsPipelineAlloc(ContextID, 0, 0);
if (Lut == NULL) {
cmsFreeNamedColorList(nc);
return NULL;
}
if (!cmsPipelineInsertStage(Lut, cmsAT_BEGIN, _cmsStageAllocNamedColor(nc, TRUE)) ||
!cmsPipelineInsertStage(Lut, cmsAT_END, _cmsStageAllocLabV2ToV4(ContextID))) {
cmsPipelineFree(Lut);
return NULL;
}
return Lut;
}
if (cmsIsTag(hProfile, tagFloat)) { // Float tag takes precedence
// Floating point LUT are always V4, but the encoding range is no
// longer 0..1.0, so we need to add an stage depending on the color space
return _cmsReadFloatInputTag(hProfile, tagFloat);
}
// Revert to perceptual if no tag is found
if (!cmsIsTag(hProfile, tag16)) {
tag16 = Device2PCS16[0];
}
if (cmsIsTag(hProfile, tag16)) { // Is there any LUT-Based table?
// Check profile version and LUT type. Do the necessary adjustments if needed
// First read the tag
cmsPipeline* Lut = (cmsPipeline*) cmsReadTag(hProfile, tag16);
if (Lut == NULL) return NULL;
// After reading it, we have now info about the original type
OriginalType = _cmsGetTagTrueType(hProfile, tag16);
// The profile owns the Lut, so we need to copy it
Lut = cmsPipelineDup(Lut);
// We need to adjust data only for Lab16 on output
if (OriginalType != cmsSigLut16Type || cmsGetPCS(hProfile) != cmsSigLabData)
return Lut;
// If the input is Lab, add also a conversion at the begin
if (cmsGetColorSpace(hProfile) == cmsSigLabData &&
!cmsPipelineInsertStage(Lut, cmsAT_BEGIN, _cmsStageAllocLabV4ToV2(ContextID)))
goto Error;
// Add a matrix for conversion V2 to V4 Lab PCS
if (!cmsPipelineInsertStage(Lut, cmsAT_END, _cmsStageAllocLabV2ToV4(ContextID)))
goto Error;
return Lut;
Error:
cmsPipelineFree(Lut);
return NULL;
}
// Lut was not found, try to create a matrix-shaper
// Check if this is a grayscale profile.
if (cmsGetColorSpace(hProfile) == cmsSigGrayData) {
// if so, build appropiate conversion tables.
// The tables are the PCS iluminant, scaled across GrayTRC
return BuildGrayInputMatrixPipeline(hProfile);
}
// Not gray, create a normal matrix-shaper
return BuildRGBInputMatrixShaper(hProfile);
}
|
Safe
|
[] |
Little-CMS
|
41d222df1bc6188131a8f46c32eab0a4d4cdf1b6
|
2.6078647034730237e+38
| 87 |
Memory squeezing fix: lcms2 cmsPipeline construction
When creating a new pipeline, lcms would often try to allocate a stage
and pass it to cmsPipelineInsertStage without checking whether the
allocation succeeded. cmsPipelineInsertStage would then assert (or crash)
if it had not.
The fix here is to change cmsPipelineInsertStage to check and return
an error value. All calling code is then checked to test this return
value and cope.
| 0 |
static zend_object_value php_create_incomplete_object(zend_class_entry *class_type TSRMLS_DC)
{
zend_object *object;
zend_object_value value;
value = zend_objects_new(&object, class_type TSRMLS_CC);
value.handlers = &php_incomplete_object_handlers;
object_properties_init(object, class_type);
return value;
}
|
Safe
|
[] |
php-src
|
fb83c76deec58f1fab17c350f04c9f042e5977d1
|
2.429555700943264e+38
| 12 |
Check that the type is correct
| 0 |
TEST_P(DownstreamProtocolIntegrationTest, TestDecodeHeadersReturnsStopAll) {
config_helper_.addFilter(R"EOF(
name: call-decodedata-once-filter
)EOF");
config_helper_.addFilter(R"EOF(
name: decode-headers-return-stop-all-filter
)EOF");
config_helper_.addFilter(R"EOF(
name: passthrough-filter
)EOF");
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
// Sends a request with headers and data.
changeHeadersForStopAllTests(default_request_headers_, false);
auto encoder_decoder = codec_client_->startRequest(default_request_headers_);
request_encoder_ = &encoder_decoder.first;
auto response = std::move(encoder_decoder.second);
for (int i = 0; i < count_ - 1; i++) {
codec_client_->sendData(*request_encoder_, size_, false);
}
// Sleeps for 1s in order to be consistent with testDecodeHeadersReturnsStopAllWatermark.
absl::SleepFor(absl::Seconds(1));
codec_client_->sendData(*request_encoder_, size_, true);
waitForNextUpstreamRequest();
upstream_request_->encodeHeaders(default_response_headers_, true);
response->waitForEndStream();
ASSERT_TRUE(response->complete());
EXPECT_EQ(count_ * size_ + added_decoded_data_size_, upstream_request_->bodyLength());
EXPECT_EQ(true, upstream_request_->complete());
// Sends a request with headers, data, and trailers.
auto encoder_decoder_2 = codec_client_->startRequest(default_request_headers_);
request_encoder_ = &encoder_decoder_2.first;
response = std::move(encoder_decoder_2.second);
for (int i = 0; i < count_; i++) {
codec_client_->sendData(*request_encoder_, size_, false);
}
Http::TestRequestTrailerMapImpl request_trailers{{"trailer", "trailer"}};
codec_client_->sendTrailers(*request_encoder_, request_trailers);
waitForNextUpstreamRequest();
upstream_request_->encodeHeaders(default_response_headers_, true);
response->waitForEndStream();
verifyUpStreamRequestAfterStopAllFilter();
}
|
Safe
|
[
"CWE-770"
] |
envoy
|
7ca28ff7d46454ae930e193d97b7d08156b1ba59
|
2.080265980260209e+38
| 48 |
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <avd@google.com>
| 0 |
process_readlink(u_int32_t id)
{
int r, len;
char buf[PATH_MAX];
char *path;
if ((r = sshbuf_get_cstring(iqueue, &path, NULL)) != 0)
fatal("%s: buffer error: %s", __func__, ssh_err(r));
debug3("request %u: readlink", id);
verbose("readlink \"%s\"", path);
if ((len = readlink(path, buf, sizeof(buf) - 1)) == -1)
send_status(id, errno_to_portable(errno));
else {
Stat s;
buf[len] = '\0';
attrib_clear(&s.attrib);
s.name = s.long_name = buf;
send_names(id, 1, &s);
}
free(path);
}
|
Safe
|
[
"CWE-732",
"CWE-703",
"CWE-269"
] |
src
|
a6981567e8e215acc1ef690c8dbb30f2d9b00a19
|
9.56048225792069e+37
| 23 |
disallow creation (of empty files) in read-only mode; reported by
Michal Zalewski, feedback & ok deraadt@
| 0 |
xmlSchemaSetParserErrors(xmlSchemaParserCtxtPtr ctxt,
xmlSchemaValidityErrorFunc err,
xmlSchemaValidityWarningFunc warn, void *ctx)
{
if (ctxt == NULL)
return;
ctxt->error = err;
ctxt->warning = warn;
ctxt->errCtxt = ctx;
if (ctxt->vctxt != NULL)
xmlSchemaSetValidErrors(ctxt->vctxt, err, warn, ctx);
}
|
Safe
|
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
|
1.693701445839571e+38
| 12 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
| 0 |
R_API RBinFile *r_bin_file_find_by_name(RBin *bin, const char *name) {
RListIter *iter;
RBinFile *bf = NULL;
if (!bin || !name) {
return NULL;
}
r_list_foreach (bin->binfiles, iter, bf) {
if (bf && bf->file && !strcmp (bf->file, name)) {
break;
}
bf = NULL;
}
return bf;
}
|
Safe
|
[
"CWE-125"
] |
radare2
|
3fcf41ed96ffa25b38029449520c8d0a198745f3
|
8.739191424685022e+37
| 14 |
Fix #9902 - Fix oobread in RBin.string_scan_range
| 0 |
static void JS_RawRequestBody(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
auto context = TRI_IGETC;
if (args.Length() != 1) {
TRI_V8_THROW_EXCEPTION_USAGE("rawRequestBody(req)");
}
v8::Handle<v8::Value> current = args[0];
if (current->IsObject()) {
v8::Handle<v8::Object> obj = v8::Handle<v8::Object>::Cast(current);
v8::Handle<v8::Value> property =
obj->Get(context, TRI_V8_ASCII_STRING(isolate, "internals")).FromMaybe(v8::Local<v8::Value>());
if (property->IsExternal()) {
v8::Handle<v8::External> e = v8::Handle<v8::External>::Cast(property);
GeneralRequest* request = static_cast<GeneralRequest*>(e->Value());
switch (request->transportType()) {
case Endpoint::TransportType::HTTP: {
auto httpRequest = static_cast<arangodb::HttpRequest*>(e->Value());
if (httpRequest != nullptr) {
V8Buffer* buffer;
if (rest::ContentType::VPACK == request->contentType()) {
VPackSlice slice = request->payload();
std::string bodyStr = slice.toJson();
buffer = V8Buffer::New(isolate, bodyStr.c_str(), bodyStr.size());
} else {
std::string_view raw = httpRequest->rawPayload();
buffer = V8Buffer::New(isolate, raw.data(), raw.size());
}
v8::Local<v8::Object> bufferObject =
v8::Local<v8::Object>::New(isolate, buffer->_handle);
TRI_V8_RETURN(bufferObject);
}
} break;
case Endpoint::TransportType::VST: {
if (request != nullptr) {
std::string_view raw = request->rawPayload();
V8Buffer* buffer = V8Buffer::New(isolate, raw.data(), raw.size());
v8::Local<v8::Object> bufferObject =
v8::Local<v8::Object>::New(isolate, buffer->_handle);
TRI_V8_RETURN(bufferObject);
}
} break;
}
}
}
// VPackSlice slice(data);
// v8::Handle<v8::Value> result = TRI_VPackToV8(isolate, slice);
TRI_V8_RETURN_UNDEFINED();
TRI_V8_TRY_CATCH_END
}
|
Safe
|
[
"CWE-918"
] |
arangodb
|
d9b7f019d2435f107b19a59190bf9cc27d5f34dd
|
1.950070077231796e+37
| 57 |
[APM-78] Disable installation from remote URL (#15292)
| 0 |
static char *sockaddr_to_str(struct sockaddr_storage *ss, socklen_t ss_len,
struct sockaddr_storage *ps, socklen_t ps_len,
bool is_listen, bool is_telnet)
{
char shost[NI_MAXHOST], sserv[NI_MAXSERV];
char phost[NI_MAXHOST], pserv[NI_MAXSERV];
const char *left = "", *right = "";
switch (ss->ss_family) {
#ifndef _WIN32
case AF_UNIX:
return g_strdup_printf("unix:%s%s",
((struct sockaddr_un *)(ss))->sun_path,
is_listen ? ",server" : "");
#endif
case AF_INET6:
left = "[";
right = "]";
/* fall through */
case AF_INET:
getnameinfo((struct sockaddr *) ss, ss_len, shost, sizeof(shost),
sserv, sizeof(sserv), NI_NUMERICHOST | NI_NUMERICSERV);
getnameinfo((struct sockaddr *) ps, ps_len, phost, sizeof(phost),
pserv, sizeof(pserv), NI_NUMERICHOST | NI_NUMERICSERV);
return g_strdup_printf("%s:%s%s%s:%s%s <-> %s%s%s:%s",
is_telnet ? "telnet" : "tcp",
left, shost, right, sserv,
is_listen ? ",server" : "",
left, phost, right, pserv);
default:
return g_strdup_printf("unknown");
}
}
|
Safe
|
[
"CWE-416"
] |
qemu
|
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
|
3.4997400496619267e+37
| 34 |
char: move front end handlers in CharBackend
Since the hanlders are associated with a CharBackend, rather than the
CharDriverState, it is more appropriate to store in CharBackend. This
avoids the handler copy dance in qemu_chr_fe_set_handlers() then
mux_chr_update_read_handler(), by storing the CharBackend pointer
directly.
Also a mux CharDriver should go through mux->backends[focused], since
chr->be will stay NULL. Before that, it was possible to call
chr->handler by mistake with surprising results, for ex through
qemu_chr_be_can_write(), which would result in calling the last set
handler front end, not the one with focus.
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <20161022095318.17775-22-marcandre.lureau@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static int pdf_extract_obj(struct pdf_struct *pdf, struct pdf_obj *obj)
{
char fullname[NAME_MAX + 1];
int fout;
off_t sum = 0;
int rc = CL_SUCCESS;
char *ascii_decoded = NULL;
if (!(obj->flags & DUMP_MASK)) {
/* don't dump all streams */
return CL_CLEAN;
}
snprintf(fullname, sizeof(fullname), "%s"PATHSEP"pdf%02u", pdf->dir, pdf->files++);
fout = open(fullname,O_RDWR|O_CREAT|O_EXCL|O_TRUNC|O_BINARY, 0600);
if (fout < 0) {
char err[128];
cli_errmsg("cli_pdf: can't create temporary file %s: %s\n", fullname, cli_strerror(errno, err, sizeof(err)));
free(ascii_decoded);
return CL_ETMPFILE;
}
do {
if (obj->flags & (1 << OBJ_STREAM)) {
const char *start = pdf->map + obj->start;
off_t p_stream = 0, p_endstream = 0;
off_t length;
find_stream_bounds(start, pdf->size - obj->start,
pdf->size - obj->start,
&p_stream, &p_endstream);
if (p_stream && p_endstream) {
int rc2;
const char *flate_in;
long ascii_decoded_size = 0;
size_t size = p_endstream - p_stream;
length = find_length(pdf, obj, start, p_stream);
if (!(obj->flags & (1 << OBJ_FILTER_FLATE)) && !length) {
const char *q = start + p_endstream;
length = size;
q--;
if (*q == '\n') {
q--;
length--;
if (*q == '\r')
length--;
} else if (*q == '\r') {
length--;
}
cli_dbgmsg("cli_pdf: calculated length %ld\n", length);
}
if (!length)
length = size;
if (obj->flags & (1 << OBJ_FILTER_AH)) {
ascii_decoded = cli_malloc(length/2 + 1);
if (!ascii_decoded) {
cli_errmsg("Cannot allocate memory for asciidecode\n");
rc = CL_EMEM;
break;
}
ascii_decoded_size = asciihexdecode(start + p_stream,
length,
ascii_decoded);
} else if (obj->flags & (1 << OBJ_FILTER_A85)) {
ascii_decoded = cli_malloc(length*5);
if (!ascii_decoded) {
cli_errmsg("Cannot allocate memory for asciidecode\n");
rc = CL_EMEM;
break;
}
ascii_decoded_size = ascii85decode(start+p_stream,
length,
(unsigned char*)ascii_decoded);
}
if (ascii_decoded_size < 0) {
pdf->flags |= 1 << BAD_ASCIIDECODE;
cli_dbgmsg("cli_pdf: failed to asciidecode in %u %u obj\n", obj->id>>8,obj->id&0xff);
rc = CL_CLEAN;
break;
}
/* either direct or ascii-decoded input */
if (!ascii_decoded)
ascii_decoded_size = length;
flate_in = ascii_decoded ? ascii_decoded : start+p_stream;
if (obj->flags & (1 << OBJ_FILTER_FLATE)) {
rc = filter_flatedecode(pdf, obj, flate_in, ascii_decoded_size, fout, &sum);
} else {
if (filter_writen(pdf, obj, fout, flate_in, ascii_decoded_size, &sum) != ascii_decoded_size)
rc = CL_EWRITE;
}
cli_updatelimits(pdf->ctx, sum);
/* TODO: invoke bytecode on this pdf obj with metainformation associated
* */
cli_dbgmsg("cli_pdf: extracted %ld bytes %u %u obj to %s\n", sum, obj->id>>8, obj->id&0xff, fullname);
lseek(fout, 0, SEEK_SET);
rc2 = cli_magic_scandesc(fout, pdf->ctx);
if (rc2 == CL_VIRUS || rc == CL_SUCCESS)
rc = rc2;
}
} else if (obj->flags & (1 << OBJ_JAVASCRIPT)) {
const char *q2;
const char *q = pdf->map+obj->start;
/* TODO: get obj-endobj size */
off_t bytesleft = obj_size(pdf, obj);
if (bytesleft < 0)
break;
q2 = cli_memstr(q, bytesleft, "/JavaScript", 11);
if (!q2)
break;
q2++;
bytesleft -= q2 - q;
q = pdf_nextobject(q2, bytesleft);
if (!q)
break;
bytesleft -= q - q2;
if (*q == '(') {
if (filter_writen(pdf, obj, fout, q+1, bytesleft-1, &sum) != (bytesleft-1)) {
rc = CL_EWRITE;
break;
}
} else if (*q == '<') {
char *decoded;
q2 = memchr(q+1, '>', bytesleft);
if (!q2) q2 = q + bytesleft;
decoded = cli_malloc(q2 - q);
if (!decoded) {
rc = CL_EMEM;
break;
}
cli_hex2str_to(q2, decoded, q2-q-1);
decoded[q2-q-1] = '\0';
cli_dbgmsg("cli_pdf: found hexadecimal encoded javascript in %u %u obj\n",
obj->id>>8, obj->id&0xff);
pdf->flags |= 1 << HEX_JAVASCRIPT;
filter_writen(pdf, obj, fout, decoded, q2-q-1, &sum);
free(decoded);
}
}
} while (0);
close(fout);
free(ascii_decoded);
if (!pdf->ctx->engine->keeptmp)
if (cli_unlink(fullname) && rc != CL_VIRUS)
rc = CL_EUNLINK;
return rc;
}
|
Safe
|
[] |
clamav-devel
|
f0eb394501ec21b9fe67f36cbf5db788711d4236
|
1.4160482882685532e+38
| 148 |
bb #2016.
| 0 |
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
struct ath_tx_status *ts, int txok)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
struct ieee80211_sta *sta;
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct list_head bf_head;
struct sk_buff_head bf_pending;
u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
bool rc_update = true, isba;
struct ieee80211_tx_rate rates[4];
struct ath_frame_info *fi;
int nframes;
bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
int i, retries;
int bar_index = -1;
skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
tx_info = IEEE80211_SKB_CB(skb);
memcpy(rates, bf->rates, sizeof(rates));
retries = ts->ts_longretry + 1;
for (i = 0; i < ts->ts_rateindex; i++)
retries += rates[i].count;
rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
if (!sta) {
rcu_read_unlock();
INIT_LIST_HEAD(&bf_head);
while (bf) {
bf_next = bf->bf_next;
if (!bf->bf_state.stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
bf = bf_next;
}
return;
}
an = (struct ath_node *)sta->drv_priv;
tid = ath_get_skb_tid(sc, an, skb);
seq_first = tid->seq_start;
isba = ts->ts_flags & ATH9K_TX_BA;
/*
* The hardware occasionally sends a tx status for the wrong TID.
* In this case, the BA status cannot be considered valid and all
* subframes need to be retransmitted
*
* Only BlockAcks have a TID and therefore normal Acks cannot be
* checked
*/
if (isba && tid->tidno != ts->tid)
txok = false;
isaggr = bf_isaggr(bf);
memset(ba, 0, WME_BA_BMP_SIZE >> 3);
if (isaggr && txok) {
if (ts->ts_flags & ATH9K_TX_BA) {
seq_st = ts->ts_seqnum;
memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
} else {
/*
* AR5416 can become deaf/mute when BA
* issue happens. Chip needs to be reset.
* But AP code may have sychronization issues
* when perform internal reset in this routine.
* Only enable reset in STA mode for now.
*/
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
needreset = 1;
}
}
__skb_queue_head_init(&bf_pending);
ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
while (bf) {
u16 seqno = bf->bf_state.seqno;
txfail = txpending = sendbar = 0;
bf_next = bf->bf_next;
skb = bf->bf_mpdu;
tx_info = IEEE80211_SKB_CB(skb);
fi = get_frame_info(skb);
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
!tid->active) {
/*
* Outside of the current BlockAck window,
* maybe part of a previous session
*/
txfail = 1;
} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
/* transmit completion, subframe is
* acked by block ack */
acked_cnt++;
} else if (!isaggr && txok) {
/* transmit completion */
acked_cnt++;
} else if (flush) {
txpending = 1;
} else if (fi->retries < ATH_MAX_SW_RETRIES) {
if (txok || !an->sleeping)
ath_tx_set_retry(sc, txq, bf->bf_mpdu,
retries);
txpending = 1;
} else {
txfail = 1;
txfail_cnt++;
bar_index = max_t(int, bar_index,
ATH_BA_INDEX(seq_first, seqno));
}
/*
* Make sure the last desc is reclaimed if it
* not a holding desc.
*/
INIT_LIST_HEAD(&bf_head);
if (bf_next != NULL || !bf_last->bf_state.stale)
list_move_tail(&bf->list, &bf_head);
if (!txpending) {
/*
* complete the acked-ones/xretried ones; update
* block-ack window
*/
ath_tx_update_baw(sc, tid, seqno);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
rc_update = false;
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
!txfail);
} else {
if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
ieee80211_sta_eosp(sta);
}
/* retry the un-acked ones */
if (bf->bf_next == NULL && bf_last->bf_state.stale) {
struct ath_buf *tbf;
tbf = ath_clone_txbuf(sc, bf_last);
/*
* Update tx baw and complete the
* frame with failed status if we
* run out of tx buf.
*/
if (!tbf) {
ath_tx_update_baw(sc, tid, seqno);
ath_tx_complete_buf(sc, bf, txq,
&bf_head, ts, 0);
bar_index = max_t(int, bar_index,
ATH_BA_INDEX(seq_first, seqno));
break;
}
fi->bf = tbf;
}
/*
* Put this buffer to the temporary pending
* queue to retain ordering
*/
__skb_queue_tail(&bf_pending, skb);
}
bf = bf_next;
}
/* prepend un-acked frames to the beginning of the pending frame queue */
if (!skb_queue_empty(&bf_pending)) {
if (an->sleeping)
ieee80211_sta_set_buffered(sta, tid->tidno, true);
skb_queue_splice_tail(&bf_pending, &tid->retry_q);
if (!an->sleeping) {
ath_tx_queue_tid(txq, tid);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->ac->clear_ps_filter = true;
}
}
if (bar_index >= 0) {
u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
ath_txq_unlock(sc, txq);
ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
ath_txq_lock(sc, txq);
}
rcu_read_unlock();
if (needreset)
ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
}
|
Safe
|
[
"CWE-362",
"CWE-241"
] |
linux
|
21f8aaee0c62708654988ce092838aa7df4d25d8
|
3.1680855311434044e+38
| 225 |
ath9k: protect tid->sched check
We check tid->sched without a lock taken on ath_tx_aggr_sleep(). That
is race condition which can result of doing list_del(&tid->list) twice
(second time with poisoned list node) and cause crash like shown below:
[424271.637220] BUG: unable to handle kernel paging request at 00100104
[424271.637328] IP: [<f90fc072>] ath_tx_aggr_sleep+0x62/0xe0 [ath9k]
...
[424271.639953] Call Trace:
[424271.639998] [<f90f6900>] ? ath9k_get_survey+0x110/0x110 [ath9k]
[424271.640083] [<f90f6942>] ath9k_sta_notify+0x42/0x50 [ath9k]
[424271.640177] [<f809cfef>] sta_ps_start+0x8f/0x1c0 [mac80211]
[424271.640258] [<c10f730e>] ? free_compound_page+0x2e/0x40
[424271.640346] [<f809e915>] ieee80211_rx_handlers+0x9d5/0x2340 [mac80211]
[424271.640437] [<c112f048>] ? kmem_cache_free+0x1d8/0x1f0
[424271.640510] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640578] [<c10fc23c>] ? put_page+0x2c/0x40
[424271.640640] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640706] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640787] [<f809dde3>] ? ieee80211_rx_handlers_result+0x73/0x1d0 [mac80211]
[424271.640897] [<f80a07a0>] ieee80211_prepare_and_rx_handle+0x520/0xad0 [mac80211]
[424271.641009] [<f809e22d>] ? ieee80211_rx_handlers+0x2ed/0x2340 [mac80211]
[424271.641104] [<c13846ce>] ? ip_output+0x7e/0xd0
[424271.641182] [<f80a1057>] ieee80211_rx+0x307/0x7c0 [mac80211]
[424271.641266] [<f90fa6ee>] ath_rx_tasklet+0x88e/0xf70 [ath9k]
[424271.641358] [<f80a0f2c>] ? ieee80211_rx+0x1dc/0x7c0 [mac80211]
[424271.641445] [<f90f82db>] ath9k_tasklet+0xcb/0x130 [ath9k]
Bug report:
https://bugzilla.kernel.org/show_bug.cgi?id=70551
Reported-and-tested-by: Max Sydorenko <maxim.stargazer@gmail.com>
Cc: stable@vger.kernel.org
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
| 0 |
parse_report_descriptor(report_descriptor_t *rdesc)
{
hid_field_t field;
guint8 *data = rdesc->desc_body;
unsigned int tag, type, size;
guint8 prefix;
guint32 defined = 0, usage = 0, usage_min = 0, usage_max = 0;
wmem_allocator_t *scope = wmem_file_scope();
gboolean first_item = TRUE;
memset(&field, 0, sizeof(field));
field.usages = wmem_array_new(scope, sizeof(guint32));
rdesc->fields_in = wmem_array_new(scope, sizeof(hid_field_t));
rdesc->fields_out = wmem_array_new(scope, sizeof(hid_field_t));
int i = 0;
while (i < rdesc->desc_length)
{
prefix = data[i];
tag = (prefix & 0b11110000) >> 4;
type = (prefix & 0b00001100) >> 2;
size = prefix & 0b00000011;
if (size == 3) /* HID spec: 6.2.2.2 - Short Items */
size = 4;
switch (type)
{
case USBHID_ITEMTYPE_MAIN:
switch (tag)
{
case USBHID_MAINITEM_TAG_INPUT:
field.properties = hid_unpack_value(data, i, size);
if ((defined & HID_REQUIRED_MASK) != HID_REQUIRED_MASK)
goto err;
/* new field */
wmem_array_append_one(rdesc->fields_in, field);
field.usages = wmem_array_new(scope, sizeof(guint32));
first_item = FALSE;
/* only keep the global items */
defined &= HID_GLOBAL_MASK;
break;
case USBHID_MAINITEM_TAG_OUTPUT:
field.properties = hid_unpack_value(data, i, size);
if ((defined & HID_REQUIRED_MASK) != HID_REQUIRED_MASK)
goto err;
/* new field */
wmem_array_append_one(rdesc->fields_out, field);
field.usages = wmem_array_new(scope, sizeof(guint32));
first_item = FALSE;
defined &= HID_GLOBAL_MASK;
break;
case USBHID_MAINITEM_TAG_FEATURE:
/*
field.properties = hid_unpack_value(data, i, size);
TODO
*/
break;
case USBHID_MAINITEM_TAG_COLLECTION:
/* clear usages */
wmem_free(scope, field.usages);
field.usages = wmem_array_new(scope, sizeof(guint32));
break;
default:
break;
}
break;
case USBHID_ITEMTYPE_GLOBAL:
switch (tag)
{
case USBHID_GLOBALITEM_TAG_USAGE_PAGE:
field.usage_page = hid_unpack_value(data, i, size);
defined |= HID_USAGE_PAGE;
break;
case USBHID_GLOBALITEM_TAG_LOG_MIN:
if (hid_unpack_signed(data, i, size, &field.logical_min))
goto err;
defined |= HID_LOGICAL_MIN;
break;
case USBHID_GLOBALITEM_TAG_LOG_MAX:
if (hid_unpack_signed(data, i, size, &field.logical_max))
goto err;
defined |= HID_LOGICAL_MAX;
break;
case USBHID_GLOBALITEM_TAG_REPORT_SIZE:
field.report_size = hid_unpack_value(data, i, size);
defined |= HID_REPORT_SIZE;
break;
case USBHID_GLOBALITEM_TAG_REPORT_ID:
if (!first_item && !rdesc->uses_report_id)
goto err;
rdesc->uses_report_id = TRUE;
field.report_id = hid_unpack_value(data, i, size);
defined |= HID_REPORT_ID;
break;
case USBHID_GLOBALITEM_TAG_REPORT_COUNT:
field.report_count = hid_unpack_value(data, i, size);
defined |= HID_REPORT_COUNT;
break;
case USBHID_GLOBALITEM_TAG_PUSH:
case USBHID_GLOBALITEM_TAG_POP:
/* TODO */
goto err;
default:
break;
}
break;
case USBHID_ITEMTYPE_LOCAL:
switch (tag)
{
case USBHID_LOCALITEM_TAG_USAGE:
if (!(defined & HID_USAGE_PAGE))
return FALSE;
usage = hid_unpack_value(data, i, size);
wmem_array_append_one(field.usages, usage);
break;
case USBHID_LOCALITEM_TAG_USAGE_MIN:
usage_min = hid_unpack_value(data, i, size);
defined |= HID_USAGE_MIN;
break;
case USBHID_LOCALITEM_TAG_USAGE_MAX:
if (!(defined & HID_USAGE_MIN))
return FALSE;
usage_max = hid_unpack_value(data, i, size);
if (usage_min >= usage_max) {
goto err;
}
wmem_array_grow(field.usages, usage_max - usage_min);
for (guint32 j = usage_min; j < usage_max; j++)
wmem_array_append_one(field.usages, j);
defined ^= HID_USAGE_MIN;
break;
default: /* TODO */
goto err;
}
break;
default: /* reserved */
goto err;
}
i += size + 1;
}
return TRUE;
err:
for (unsigned int j = 0; j < wmem_array_get_count(rdesc->fields_in); j++)
wmem_free(scope, ((hid_field_t*) wmem_array_index(rdesc->fields_in, j))->usages);
for (unsigned int j = 0; j < wmem_array_get_count(rdesc->fields_out); j++)
wmem_free(scope, ((hid_field_t*) wmem_array_index(rdesc->fields_out, j))->usages);
wmem_free(scope, rdesc->fields_in);
wmem_free(scope, rdesc->fields_out);
return FALSE;
}
|
Safe
|
[] |
wireshark
|
0ceb46e1c28d1094a56aefa0ebf7d7c0e00f8849
|
3.005446787003677e+38
| 188 |
proto: add support for FT_BYTES in proto_tree_add_bits
Change-Id: I5030d550bd760953ac84c2700bb0e03cc7a831a1
Signed-off-by: Filipe Laíns <lains@archlinux.org>
| 0 |
parse_user_host_port(const char *s, char **userp, char **hostp, int *portp)
{
char *sdup, *cp, *tmp;
char *user = NULL, *host = NULL;
int port = -1, ret = -1;
if (userp != NULL)
*userp = NULL;
if (hostp != NULL)
*hostp = NULL;
if (portp != NULL)
*portp = -1;
if ((sdup = tmp = strdup(s)) == NULL)
return -1;
/* Extract optional username */
if ((cp = strrchr(tmp, '@')) != NULL) {
*cp = '\0';
if (*tmp == '\0')
goto out;
if ((user = strdup(tmp)) == NULL)
goto out;
tmp = cp + 1;
}
/* Extract mandatory hostname */
if ((cp = hpdelim(&tmp)) == NULL || *cp == '\0')
goto out;
host = xstrdup(cleanhostname(cp));
/* Convert and verify optional port */
if (tmp != NULL && *tmp != '\0') {
if ((port = a2port(tmp)) <= 0)
goto out;
}
/* Success */
if (userp != NULL) {
*userp = user;
user = NULL;
}
if (hostp != NULL) {
*hostp = host;
host = NULL;
}
if (portp != NULL)
*portp = port;
ret = 0;
out:
free(sdup);
free(user);
free(host);
return ret;
}
|
Safe
|
[] |
openssh-portable
|
f3cbe43e28fe71427d41cfe3a17125b972710455
|
2.2500754496989057e+38
| 51 |
upstream: need initgroups() before setresgid(); reported by anton@,
ok deraadt@
OpenBSD-Commit-ID: 6aa003ee658b316960d94078f2a16edbc25087ce
| 0 |
certificateExactNormalize(
slap_mask_t usage,
Syntax *syntax,
MatchingRule *mr,
struct berval *val,
struct berval *normalized,
void *ctx )
{
BerElementBuffer berbuf;
BerElement *ber = (BerElement *)&berbuf;
ber_tag_t tag;
ber_len_t len;
ber_int_t i;
char serialbuf2[SLAP_SN_BUFLEN];
struct berval sn, sn2 = BER_BVNULL;
struct berval issuer_dn = BER_BVNULL, bvdn;
char *p;
int rc = LDAP_INVALID_SYNTAX;
assert( val != NULL );
Debug( LDAP_DEBUG_TRACE, ">>> certificateExactNormalize: <%p, %lu>\n",
val->bv_val, val->bv_len );
if ( BER_BVISEMPTY( val ) ) goto done;
if ( SLAP_MR_IS_VALUE_OF_ASSERTION_SYNTAX(usage) ) {
return serialNumberAndIssuerNormalize( 0, NULL, NULL, val, normalized, ctx );
}
assert( SLAP_MR_IS_VALUE_OF_ATTRIBUTE_SYNTAX(usage) != 0 );
ber_init2( ber, val, LBER_USE_DER );
tag = ber_skip_tag( ber, &len ); /* Signed Sequence */
tag = ber_skip_tag( ber, &len ); /* Sequence */
tag = ber_peek_tag( ber, &len ); /* Optional version? */
if ( tag == SLAP_X509_OPT_C_VERSION ) {
tag = ber_skip_tag( ber, &len );
tag = ber_get_int( ber, &i ); /* version */
}
/* NOTE: move the test here from certificateValidate,
* so that we can validate certs with serial longer
* than sizeof(ber_int_t) */
tag = ber_skip_tag( ber, &len ); /* serial */
sn.bv_len = len;
sn.bv_val = (char *)ber->ber_ptr;
sn2.bv_val = serialbuf2;
sn2.bv_len = sizeof(serialbuf2);
if ( slap_bin2hex( &sn, &sn2, ctx ) ) {
rc = LDAP_INVALID_SYNTAX;
goto done;
}
ber_skip_data( ber, len );
tag = ber_skip_tag( ber, &len ); /* SignatureAlg */
ber_skip_data( ber, len );
tag = ber_peek_tag( ber, &len ); /* IssuerDN */
if ( len ) {
len = ber_ptrlen( ber );
bvdn.bv_val = val->bv_val + len;
bvdn.bv_len = val->bv_len - len;
rc = dnX509normalize( &bvdn, &issuer_dn );
if ( rc != LDAP_SUCCESS ) {
rc = LDAP_INVALID_SYNTAX;
goto done;
}
}
normalized->bv_len = STRLENOF( "{ serialNumber , issuer rdnSequence:\"\" }" )
+ sn2.bv_len + issuer_dn.bv_len;
normalized->bv_val = ch_malloc( normalized->bv_len + 1 );
p = normalized->bv_val;
p = lutil_strcopy( p, "{ serialNumber " /*}*/ );
p = lutil_strbvcopy( p, &sn2 );
p = lutil_strcopy( p, ", issuer rdnSequence:\"" );
p = lutil_strbvcopy( p, &issuer_dn );
p = lutil_strcopy( p, /*{*/ "\" }" );
rc = LDAP_SUCCESS;
done:
Debug( LDAP_DEBUG_TRACE, "<<< certificateExactNormalize: <%p, %lu> => <%s>\n",
val->bv_val, val->bv_len, rc == LDAP_SUCCESS ? normalized->bv_val : "(err)" );
if ( issuer_dn.bv_val ) ber_memfree( issuer_dn.bv_val );
if ( sn2.bv_val != serialbuf2 ) ber_memfree_x( sn2.bv_val, ctx );
return rc;
}
|
Safe
|
[
"CWE-617"
] |
openldap
|
3539fc33212b528c56b716584f2c2994af7c30b0
|
9.927973050746254e+37
| 93 |
ITS#9454 fix issuerAndThisUpdateCheck
| 0 |
xmlFreeValidCtxt(xmlValidCtxtPtr cur) {
if (cur->vstateTab != NULL)
xmlFree(cur->vstateTab);
if (cur->nodeTab != NULL)
xmlFree(cur->nodeTab);
xmlFree(cur);
}
|
Safe
|
[] |
libxml2
|
932cc9896ab41475d4aa429c27d9afd175959d74
|
9.912196752292461e+37
| 7 |
Fix buffer size checks in xmlSnprintfElementContent
xmlSnprintfElementContent failed to correctly check the available
buffer space in two locations.
Fixes bug 781333 (CVE-2017-9047) and bug 781701 (CVE-2017-9048).
Thanks to Marcel Böhme and Thuan Pham for the report.
| 0 |
inline void lowercase(char *const str) {
if (str) for (char *ptr = str; *ptr; ++ptr) *ptr = lowercase(*ptr);
}
|
Safe
|
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
|
1.9831871867591874e+38
| 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
| 0 |
static void digit_gen(diy_fp_t Mp, diy_fp_t delta, char* buffer, int* len, int* K)
{
uint32_t div, p1;
uint64_t p2;
int d,kappa;
diy_fp_t one;
one.f = ((uint64_t) 1) << -Mp.e; one.e = Mp.e;
p1 = Mp.f >> -one.e;
p2 = Mp.f & (one.f - 1);
*len = 0; kappa = 3; div = TEN2;
while (kappa > 0) {
d = p1 / div;
if (d || *len) buffer[(*len)++] = '0' + d;
p1 %= div; kappa--; div /= 10;
if ((((uint64_t)p1)<<-one.e)+p2 <= delta.f) {
*K += kappa; return;
}
}
do {
p2 *= 10;
d = p2 >> -one.e;
if (d || *len) buffer[(*len)++] = '0' + d;
p2 &= one.f - 1; kappa--; delta.f *= 10;
} while (p2 > delta.f);
*K += kappa;
}
|
Safe
|
[
"CWE-190"
] |
mujs
|
25821e6d74fab5fcc200fe5e818362e03e114428
|
2.1190047584712576e+38
| 26 |
Fix 698920: Guard jsdtoa from integer overflow wreaking havoc.
| 0 |
njs_promise_value_constructor(njs_vm_t *vm, njs_value_t *value,
njs_value_t *dst)
{
njs_int_t ret;
static const njs_value_t string_constructor = njs_string("constructor");
if (njs_is_function(value)) {
*dst = *value;
return NJS_OK;
}
ret = njs_value_property(vm, value, njs_value_arg(&string_constructor),
dst);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (!njs_is_function(dst)) {
njs_type_error(vm, "the object does not contain a constructor");
return NJS_ERROR;
}
return NJS_OK;
}
|
Safe
|
[
"CWE-416",
"CWE-703"
] |
njs
|
31ed93a5623f24ca94e6d47e895ba735d9d97d46
|
3.479066701941458e+37
| 25 |
Fixed aggregation methods of Promise ctor with array-like object.
Previously, while iterating over an array-like object the methods may be
resolved with INVALID values. INVALID value is a special internal type which
should never be visible by ordinary functions.
The fix is to ensure that absent elements are represented by undefined value.
The following methods were fixed Promise.all(), Promise.allSettled(),
Promise.any(), Promise.race().
This closes #483 issue on Github.
| 0 |
ssize_t Http2Stream::Provider::Stream::OnRead(nghttp2_session* handle,
int32_t id,
uint8_t* buf,
size_t length,
uint32_t* flags,
nghttp2_data_source* source,
void* user_data) {
Http2Session* session = static_cast<Http2Session*>(user_data);
DEBUG_HTTP2SESSION2(session, "reading outbound data for stream %d", id);
Http2Stream* stream = GetStream(session, id, source);
CHECK_EQ(id, stream->id());
size_t amount = 0; // amount of data being sent in this data frame.
if (!stream->queue_.empty()) {
DEBUG_HTTP2SESSION2(session, "stream %d has pending outbound data", id);
amount = std::min(stream->available_outbound_length_, length);
DEBUG_HTTP2SESSION2(session, "sending %d bytes for data frame on stream %d",
amount, id);
if (amount > 0) {
// Just return the length, let Http2Session::OnSendData take care of
// actually taking the buffers out of the queue.
*flags |= NGHTTP2_DATA_FLAG_NO_COPY;
stream->available_outbound_length_ -= amount;
}
}
if (amount == 0 && stream->IsWritable() && stream->queue_.empty()) {
DEBUG_HTTP2SESSION2(session, "deferring stream %d", id);
return NGHTTP2_ERR_DEFERRED;
}
if (stream->queue_.empty() && !stream->IsWritable()) {
DEBUG_HTTP2SESSION2(session, "no more data for stream %d", id);
*flags |= NGHTTP2_DATA_FLAG_EOF;
session->GetTrailers(stream, flags);
// If the stream or session gets destroyed during the GetTrailers
// callback, check that here and close down the stream
if (stream->IsDestroyed())
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
if (session->IsDestroyed())
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
return amount;
}
|
Safe
|
[] |
node
|
ce22d6f9178507c7a41b04ac4097b9ea902049e3
|
1.9595792709818626e+38
| 45 |
http2: add altsvc support
Add support for sending and receiving ALTSVC frames.
PR-URL: https://github.com/nodejs/node/pull/17917
Reviewed-By: Anna Henningsen <anna@addaleax.net>
Reviewed-By: Tiancheng "Timothy" Gu <timothygu99@gmail.com>
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
| 0 |
static size_t consume_s7_r(RBuffer *b, ut64 bound, st8 *out) {
size_t n = 0;
ut32 tmp = consume_r (b, bound, &n, (ConsumeFcn)read_i32_leb128);
if (out) {
*out = (st8) (((tmp & 0x10000000) << 7) | (tmp & 0x7f));
}
return n;
}
|
Safe
|
[
"CWE-787"
] |
radare2
|
b4ca66f5d4363d68a6379e5706353b3bde5104a4
|
1.883411639651484e+38
| 8 |
Fix #20336 - wasm bin parser ##crash
| 0 |
void CMSEXPORT cmsSetHeaderAttributes(cmsHPROFILE hProfile, cmsUInt64Number Flags)
{
_cmsICCPROFILE* Icc = (_cmsICCPROFILE*) hProfile;
memmove(&Icc -> attributes, &Flags, sizeof(cmsUInt64Number));
}
|
Safe
|
[] |
Little-CMS
|
d2d902b9a03583ae482c782b2f243f7e5268a47d
|
6.862005896804699e+37
| 5 |
>Changes from Richard Hughes
| 0 |
R_API char *r_bin_java_get_item_name_from_cp_item_list(RList *cp_list, RBinJavaCPTypeObj *obj, int depth) {
/*
Given a constant poool object Class, FieldRef, MethodRef, or InterfaceMethodRef
return the actual descriptor string.
@param cp_list: RList of RBinJavaCPTypeObj *
@param obj object to look up the name for
@rvalue ut8* (user frees) or NULL
*/
if (obj == NULL || cp_list == NULL || depth < 0) {
return NULL;
}
switch (obj->tag) {
case R_BIN_JAVA_CP_NAMEANDTYPE:
return r_bin_java_get_utf8_from_cp_item_list (
cp_list, obj->info.cp_name_and_type.name_idx);
case R_BIN_JAVA_CP_CLASS:
return r_bin_java_get_utf8_from_cp_item_list (
cp_list, obj->info.cp_class.name_idx);
// XXX - Probably not good form, but they are the same memory structure
case R_BIN_JAVA_CP_FIELDREF:
case R_BIN_JAVA_CP_INTERFACEMETHOD_REF:
case R_BIN_JAVA_CP_METHODREF:
obj = r_bin_java_get_item_from_cp_item_list (
cp_list, obj->info.cp_method.name_and_type_idx);
return r_bin_java_get_item_name_from_cp_item_list (
cp_list, obj, depth - 1);
default:
return NULL;
case 0:
IFDBG eprintf("Invalid 0 tag in the constant pool\n");
return NULL;
}
return NULL;
}
|
Safe
|
[
"CWE-787"
] |
radare2
|
9650e3c352f675687bf6c6f65ff2c4a3d0e288fa
|
5.276716301419129e+37
| 34 |
Fix oobread segfault in java arith8.class ##crash
* Reported by Cen Zhang via huntr.dev
| 0 |
HandleFlushNeighborsMessage(flush_neighbors_message_t *msg)
{
if (msg->family == AF_INET)
{
return FlushIpNetTable(msg->iface.index);
}
return FlushIpNetTable2(msg->family, msg->iface.index);
}
|
Safe
|
[
"CWE-415"
] |
openvpn
|
1394192b210cb3c6624a7419bcf3ff966742e79b
|
7.505047949245817e+37
| 9 |
Fix potential double-free() in Interactive Service (CVE-2018-9336)
Malformed input data on the service pipe towards the OpenVPN interactive
service (normally used by the OpenVPN GUI to request openvpn instances
from the service) can result in a double free() in the error handling code.
This usually only leads to a process crash (DoS by an unprivileged local
account) but since it could possibly lead to memory corruption if
happening while multiple other threads are active at the same time,
CVE-2018-9336 has been assigned to acknowledge this risk.
Fix by ensuring that sud->directory is set to NULL in GetStartUpData()
for all error cases (thus not being free()ed in FreeStartupData()).
Rewrite control flow to use explicit error label for error exit.
Discovered and reported by Jacob Baines <jbaines@tenable.com>.
CVE: 2018-9336
Signed-off-by: Gert Doering <gert@greenie.muc.de>
Acked-by: Selva Nair <selva.nair@gmail.com>
Message-Id: <20180414072617.25075-1-gert@greenie.muc.de>
URL: https://www.mail-archive.com/search?l=mid&q=20180414072617.25075-1-gert@greenie.muc.de
Signed-off-by: Gert Doering <gert@greenie.muc.de>
| 0 |
bool checkRuntimeOverride(bool config_value, const char* override_key) {
return Runtime::runtimeFeatureEnabled(override_key) ? true : config_value;
}
|
Safe
|
[
"CWE-400",
"CWE-703"
] |
envoy
|
afc39bea36fd436e54262f150c009e8d72db5014
|
7.77163881636572e+37
| 3 |
Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <asraa@google.com>
| 0 |
CryptoCert crypto_cert_read(BYTE* data, UINT32 length)
{
CryptoCert cert = malloc(sizeof(*cert));
if (!cert)
return NULL;
/* this will move the data pointer but we don't care, we don't use it again */
cert->px509 = d2i_X509(NULL, (D2I_X509_CONST BYTE**)&data, length);
return cert;
}
|
Safe
|
[
"CWE-787"
] |
FreeRDP
|
8305349a943c68b1bc8c158f431dc607655aadea
|
1.727846468534835e+38
| 11 |
Fixed GHSL-2020-102 heap overflow
(cherry picked from commit 197b16cc15a12813c2e4fa2d6ae9cd9c4a57e581)
| 0 |
png_get_int_32)(png_const_bytep buf)
{
png_uint_32 uval = png_get_uint_32(buf);
if ((uval & 0x80000000) == 0) /* non-negative */
return uval;
uval = (uval ^ 0xffffffff) + 1; /* 2's complement: -x = ~x+1 */
if ((uval & 0x80000000) == 0) /* no overflow */
return -(png_int_32)uval;
/* The following has to be safe; this function only gets called on PNG data
* and if we get here that data is invalid. 0 is the most safe value and
* if not then an attacker would surely just generate a PNG with 0 instead.
*/
return 0;
}
|
Safe
|
[
"CWE-120"
] |
libpng
|
a901eb3ce6087e0afeef988247f1a1aa208cb54d
|
3.087579992074917e+38
| 15 |
[libpng16] Prevent reading over-length PLTE chunk (Cosmin Truta).
| 0 |
static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int len = 0;
if (ops->ndo_fcoe_ddp_done)
len = ops->ndo_fcoe_ddp_done(real_dev, xid);
return len;
}
|
Safe
|
[
"CWE-703",
"CWE-264"
] |
linux
|
550fd08c2cebad61c548def135f67aba284c6162
|
1.6155451376946965e+37
| 11 |
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Karsten Keil <isdn@linux-pingi.de>
CC: "David S. Miller" <davem@davemloft.net>
CC: Jay Vosburgh <fubar@us.ibm.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Krzysztof Halasa <khc@pm.waw.pl>
CC: "John W. Linville" <linville@tuxdriver.com>
CC: Greg Kroah-Hartman <gregkh@suse.de>
CC: Marcel Holtmann <marcel@holtmann.org>
CC: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
block_t total_count, user_block_count, start_count, ovp_count;
u64 avail_node_count;
total_count = le64_to_cpu(sbi->raw_super->block_count);
user_block_count = sbi->user_block_count;
start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
buf->f_type = F2FS_SUPER_MAGIC;
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
sbi->reserved_blocks;
avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
if (avail_node_count > user_block_count) {
buf->f_files = user_block_count;
buf->f_ffree = buf->f_bavail;
} else {
buf->f_files = avail_node_count;
buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
buf->f_bavail);
}
buf->f_namelen = F2FS_NAME_LEN;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
#ifdef CONFIG_QUOTA
if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
}
#endif
return 0;
}
|
Safe
|
[
"CWE-20"
] |
linux
|
638164a2718f337ea224b747cf5977ef143166a4
|
5.581603257893972e+37
| 43 |
f2fs: fix potential panic during fstrim
As Ju Hyung Park reported:
"When 'fstrim' is called for manual trim, a BUG() can be triggered
randomly with this patch.
I'm seeing this issue on both x86 Desktop and arm64 Android phone.
On x86 Desktop, this was caused during Ubuntu boot-up. I have a
cronjob installed which calls 'fstrim -v /' during boot. On arm64
Android, this was caused during GC looping with 1ms gc_min_sleep_time
& gc_max_sleep_time."
Root cause of this issue is that f2fs_wait_discard_bios can only be
used by f2fs_put_super, because during put_super there must be no
other referrers, so it can ignore discard entry's reference count
when removing the entry, otherwise in other caller we will hit bug_on
in __remove_discard_cmd as there may be other issuer added reference
count in discard entry.
Thread A Thread B
- issue_discard_thread
- f2fs_ioc_fitrim
- f2fs_trim_fs
- f2fs_wait_discard_bios
- __issue_discard_cmd
- __submit_discard_cmd
- __wait_discard_cmd
- dc->ref++
- __wait_one_discard_bio
- __wait_discard_cmd
- __remove_discard_cmd
- f2fs_bug_on(sbi, dc->ref)
Fixes: 969d1b180d987c2be02de890d0fff0f66a0e80de
Reported-by: Ju Hyung Park <qkrwngud825@gmail.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
| 0 |
static u32 ieee80211_idle_on(struct ieee80211_local *local)
{
if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
return 0;
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
wiphy_debug(local->hw.wiphy, "device now idle\n");
#endif
drv_flush(local, false);
local->hw.conf.flags |= IEEE80211_CONF_IDLE;
return IEEE80211_CONF_CHANGE_IDLE;
}
|
Safe
|
[
"CWE-703",
"CWE-264"
] |
linux
|
550fd08c2cebad61c548def135f67aba284c6162
|
2.5290499396430337e+37
| 14 |
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Karsten Keil <isdn@linux-pingi.de>
CC: "David S. Miller" <davem@davemloft.net>
CC: Jay Vosburgh <fubar@us.ibm.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Krzysztof Halasa <khc@pm.waw.pl>
CC: "John W. Linville" <linville@tuxdriver.com>
CC: Greg Kroah-Hartman <gregkh@suse.de>
CC: Marcel Holtmann <marcel@holtmann.org>
CC: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static BOOL rdp_write_input_capability_set(wStream* s, const rdpSettings* settings)
{
size_t header;
UINT16 inputFlags;
if (!Stream_EnsureRemainingCapacity(s, 128))
return FALSE;
header = rdp_capability_set_start(s);
if (header > UINT16_MAX)
return FALSE;
inputFlags = INPUT_FLAG_SCANCODES;
if (settings->FastPathInput)
{
inputFlags |= INPUT_FLAG_FASTPATH_INPUT;
inputFlags |= INPUT_FLAG_FASTPATH_INPUT2;
}
if (settings->HasHorizontalWheel)
inputFlags |= TS_INPUT_FLAG_MOUSE_HWHEEL;
if (settings->UnicodeInput)
inputFlags |= INPUT_FLAG_UNICODE;
if (settings->HasExtendedMouseEvent)
inputFlags |= INPUT_FLAG_MOUSEX;
Stream_Write_UINT16(s, inputFlags); /* inputFlags (2 bytes) */
Stream_Write_UINT16(s, 0); /* pad2OctetsA (2 bytes) */
Stream_Write_UINT32(s, settings->KeyboardLayout); /* keyboardLayout (4 bytes) */
Stream_Write_UINT32(s, settings->KeyboardType); /* keyboardType (4 bytes) */
Stream_Write_UINT32(s, settings->KeyboardSubType); /* keyboardSubType (4 bytes) */
Stream_Write_UINT32(s, settings->KeyboardFunctionKey); /* keyboardFunctionKeys (4 bytes) */
Stream_Zero(s, 64); /* imeFileName (64 bytes) */
rdp_capability_set_finish(s, (UINT16)header, CAPSET_TYPE_INPUT);
return TRUE;
}
|
Safe
|
[
"CWE-119",
"CWE-125"
] |
FreeRDP
|
3627aaf7d289315b614a584afb388f04abfb5bbf
|
3.3887255033532735e+38
| 38 |
Fixed #6011: Bounds check in rdp_read_font_capability_set
| 0 |
iperf_reset_test(struct iperf_test *test)
{
struct iperf_stream *sp;
/* Free streams */
while (!SLIST_EMPTY(&test->streams)) {
sp = SLIST_FIRST(&test->streams);
SLIST_REMOVE_HEAD(&test->streams, streams);
iperf_free_stream(sp);
}
if (test->omit_timer != NULL) {
tmr_cancel(test->omit_timer);
test->omit_timer = NULL;
}
if (test->timer != NULL) {
tmr_cancel(test->timer);
test->timer = NULL;
}
if (test->stats_timer != NULL) {
tmr_cancel(test->stats_timer);
test->stats_timer = NULL;
}
if (test->reporter_timer != NULL) {
tmr_cancel(test->reporter_timer);
test->reporter_timer = NULL;
}
test->done = 0;
SLIST_INIT(&test->streams);
test->role = 's';
test->sender = 0;
test->sender_has_retransmits = 0;
set_protocol(test, Ptcp);
test->omit = OMIT;
test->duration = DURATION;
test->server_affinity = -1;
test->state = 0;
test->ctrl_sck = -1;
test->prot_listener = -1;
test->bytes_sent = 0;
test->blocks_sent = 0;
test->reverse = 0;
test->no_delay = 0;
FD_ZERO(&test->read_set);
FD_ZERO(&test->write_set);
test->num_streams = 1;
test->settings->socket_bufsize = 0;
test->settings->blksize = DEFAULT_TCP_BLKSIZE;
test->settings->rate = 0;
test->settings->burst = 0;
test->settings->mss = 0;
memset(test->cookie, 0, COOKIE_SIZE);
test->multisend = 10; /* arbitrary */
/* Free output line buffers, if any (on the server only) */
struct iperf_textline *t;
while (!TAILQ_EMPTY(&test->server_output_list)) {
t = TAILQ_FIRST(&test->server_output_list);
TAILQ_REMOVE(&test->server_output_list, t, textlineentries);
free(t->line);
free(t);
}
}
|
Safe
|
[
"CWE-120",
"CWE-119",
"CWE-787"
] |
iperf
|
91f2fa59e8ed80dfbf400add0164ee0e508e412a
|
6.062781318207055e+37
| 69 |
Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <bmah@es.net>
| 0 |
S_dump_trie(pTHX_ const struct _reg_trie_data *trie, HV *widecharmap,
AV *revcharmap, U32 depth)
{
U32 state;
SV *sv=sv_newmortal();
int colwidth= widecharmap ? 6 : 4;
U16 word;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_DUMP_TRIE;
Perl_re_indentf( aTHX_ "Char : %-6s%-6s%-4s ",
depth+1, "Match","Base","Ofs" );
for( state = 0 ; state < trie->uniquecharcount ; state++ ) {
SV ** const tmp = av_fetch( revcharmap, state, 0);
if ( tmp ) {
Perl_re_printf( aTHX_ "%*s",
colwidth,
pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), colwidth,
PL_colors[0], PL_colors[1],
(SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) |
PERL_PV_ESCAPE_FIRSTCHAR
)
);
}
}
Perl_re_printf( aTHX_ "\n");
Perl_re_indentf( aTHX_ "State|-----------------------", depth+1);
for( state = 0 ; state < trie->uniquecharcount ; state++ )
Perl_re_printf( aTHX_ "%.*s", colwidth, "--------");
Perl_re_printf( aTHX_ "\n");
for( state = 1 ; state < trie->statecount ; state++ ) {
const U32 base = trie->states[ state ].trans.base;
Perl_re_indentf( aTHX_ "#%4" UVXf "|", depth+1, (UV)state);
if ( trie->states[ state ].wordnum ) {
Perl_re_printf( aTHX_ " W%4X", trie->states[ state ].wordnum );
} else {
Perl_re_printf( aTHX_ "%6s", "" );
}
Perl_re_printf( aTHX_ " @%4" UVXf " ", (UV)base );
if ( base ) {
U32 ofs = 0;
while( ( base + ofs < trie->uniquecharcount ) ||
( base + ofs - trie->uniquecharcount < trie->lasttrans
&& trie->trans[ base + ofs - trie->uniquecharcount ].check
!= state))
ofs++;
Perl_re_printf( aTHX_ "+%2" UVXf "[ ", (UV)ofs);
for ( ofs = 0 ; ofs < trie->uniquecharcount ; ofs++ ) {
if ( ( base + ofs >= trie->uniquecharcount )
&& ( base + ofs - trie->uniquecharcount
< trie->lasttrans )
&& trie->trans[ base + ofs
- trie->uniquecharcount ].check == state )
{
Perl_re_printf( aTHX_ "%*" UVXf, colwidth,
(UV)trie->trans[ base + ofs - trie->uniquecharcount ].next
);
} else {
Perl_re_printf( aTHX_ "%*s",colwidth," ." );
}
}
Perl_re_printf( aTHX_ "]");
}
Perl_re_printf( aTHX_ "\n" );
}
Perl_re_indentf( aTHX_ "word_info N:(prev,len)=",
depth);
for (word=1; word <= trie->wordcount; word++) {
Perl_re_printf( aTHX_ " %d:(%d,%d)",
(int)word, (int)(trie->wordinfo[word].prev),
(int)(trie->wordinfo[word].len));
}
Perl_re_printf( aTHX_ "\n" );
}
|
Safe
|
[
"CWE-125"
] |
perl5
|
43b2f4ef399e2fd7240b4eeb0658686ad95f8e62
|
1.6478423981328047e+36
| 87 |
regcomp.c: Convert some strchr to memchr
This allows things to work properly in the face of embedded NULs.
See the branch merge message for more information.
| 0 |
static void test_wl4435()
{
MYSQL_STMT *stmt;
int rc;
char query[MAX_TEST_QUERY_LENGTH];
char str_data[20][WL4435_STRING_SIZE];
double dbl_data[20];
char dec_data[20][WL4435_STRING_SIZE];
int int_data[20];
ulong str_length= WL4435_STRING_SIZE;
my_bool is_null;
MYSQL_BIND ps_params[WL4435_NUM_PARAMS];
int exec_counter;
myheader("test_wl4435");
mct_start_logging("test_wl4435");
rc= mysql_query(mysql, "DROP PROCEDURE IF EXISTS p1");
myquery(rc);
rc= mysql_query(mysql, "DROP PROCEDURE IF EXISTS p2");
myquery(rc);
rc= mysql_query(mysql, "DROP TABLE IF EXISTS t1");
myquery(rc);
rc= mysql_query(mysql, "DROP TABLE IF EXISTS t2");
myquery(rc);
rc= mysql_query(mysql, "CREATE TABLE t1(a1 INT, a2 CHAR(32), "
" a3 DOUBLE(4, 2), a4 DECIMAL(3, 1))");
myquery(rc);
rc= mysql_query(mysql, "CREATE TABLE t2(b0 INT, b1 INT, b2 CHAR(32), "
" b3 DOUBLE(4, 2), b4 DECIMAL(3, 1))");
myquery(rc);
rc= mysql_query(mysql, "INSERT INTO t1 VALUES"
"(1, '11', 12.34, 56.7), "
"(2, '12', 56.78, 90.1), "
"(3, '13', 23.45, 67.8)");
myquery(rc);
rc= mysql_query(mysql, "INSERT INTO t2 VALUES"
"(100, 10, '110', 70.70, 10.1), "
"(200, 20, '120', 80.80, 20.2), "
"(300, 30, '130', 90.90, 30.3)");
myquery(rc);
rc= mysql_query(mysql,
"CREATE PROCEDURE p1("
" IN v0 INT, "
" OUT v_str_1 CHAR(32), "
" OUT v_dbl_1 DOUBLE(4, 2), "
" OUT v_dec_1 DECIMAL(6, 3), "
" OUT v_int_1 INT, "
" IN v1 INT, "
" INOUT v_str_2 CHAR(64), "
" INOUT v_dbl_2 DOUBLE(5, 3), "
" INOUT v_dec_2 DECIMAL(7, 4), "
" INOUT v_int_2 INT)"
"BEGIN "
" SET v0 = -1; "
" SET v1 = -1; "
" SET v_str_1 = 'test_1'; "
" SET v_dbl_1 = 12.34; "
" SET v_dec_1 = 567.891; "
" SET v_int_1 = 2345; "
" SET v_str_2 = 'test_2'; "
" SET v_dbl_2 = 67.891; "
" SET v_dec_2 = 234.6789; "
" SET v_int_2 = 6789; "
" SELECT * FROM t1; "
" SELECT * FROM t2; "
"END");
myquery(rc);
rc= mysql_query(mysql,
"CREATE PROCEDURE p2("
" IN i1 VARCHAR(255) CHARACTER SET koi8r, "
" OUT o1 VARCHAR(255) CHARACTER SET cp1251, "
" OUT o2 VARBINARY(255)) "
"BEGIN "
" SET o1 = i1; "
" SET o2 = i1; "
"END");
myquery(rc);
strmov(query, "CALL p1(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
stmt= mysql_simple_prepare(mysql, query);
check_stmt(stmt);
/* Init PS-parameters. */
bzero((char *) ps_params, sizeof (ps_params));
/* - v0 -- INT */
ps_params[0].buffer_type= MYSQL_TYPE_LONG;
ps_params[0].buffer= (char *) &int_data[0];
ps_params[0].length= 0;
ps_params[0].is_null= 0;
/* - v_str_1 -- CHAR(32) */
ps_params[1].buffer_type= MYSQL_TYPE_STRING;
ps_params[1].buffer= (char *) str_data[0];
ps_params[1].buffer_length= WL4435_STRING_SIZE;
ps_params[1].length= &str_length;
ps_params[1].is_null= 0;
/* - v_dbl_1 -- DOUBLE */
ps_params[2].buffer_type= MYSQL_TYPE_DOUBLE;
ps_params[2].buffer= (char *) &dbl_data[0];
ps_params[2].length= 0;
ps_params[2].is_null= 0;
/* - v_dec_1 -- DECIMAL */
ps_params[3].buffer_type= MYSQL_TYPE_NEWDECIMAL;
ps_params[3].buffer= (char *) dec_data[0];
ps_params[3].buffer_length= WL4435_STRING_SIZE;
ps_params[3].length= 0;
ps_params[3].is_null= 0;
/* - v_int_1 -- INT */
ps_params[4].buffer_type= MYSQL_TYPE_LONG;
ps_params[4].buffer= (char *) &int_data[0];
ps_params[4].length= 0;
ps_params[4].is_null= 0;
/* - v1 -- INT */
ps_params[5].buffer_type= MYSQL_TYPE_LONG;
ps_params[5].buffer= (char *) &int_data[0];
ps_params[5].length= 0;
ps_params[5].is_null= 0;
/* - v_str_2 -- CHAR(32) */
ps_params[6].buffer_type= MYSQL_TYPE_STRING;
ps_params[6].buffer= (char *) str_data[0];
ps_params[6].buffer_length= WL4435_STRING_SIZE;
ps_params[6].length= &str_length;
ps_params[6].is_null= 0;
/* - v_dbl_2 -- DOUBLE */
ps_params[7].buffer_type= MYSQL_TYPE_DOUBLE;
ps_params[7].buffer= (char *) &dbl_data[0];
ps_params[7].length= 0;
ps_params[7].is_null= 0;
/* - v_dec_2 -- DECIMAL */
ps_params[8].buffer_type= MYSQL_TYPE_DECIMAL;
ps_params[8].buffer= (char *) dec_data[0];
ps_params[8].buffer_length= WL4435_STRING_SIZE;
ps_params[8].length= 0;
ps_params[8].is_null= 0;
/* - v_int_2 -- INT */
ps_params[9].buffer_type= MYSQL_TYPE_LONG;
ps_params[9].buffer= (char *) &int_data[0];
ps_params[9].length= 0;
ps_params[9].is_null= 0;
/* Bind parameters. */
rc= mysql_stmt_bind_param(stmt, ps_params);
/* Execute! */
for (exec_counter= 0; exec_counter < 3; ++exec_counter)
{
int i;
int num_fields;
MYSQL_BIND *rs_bind;
mct_log("\nexec_counter: %d\n", (int) exec_counter);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
while (1)
{
MYSQL_FIELD *fields;
MYSQL_RES *rs_metadata= mysql_stmt_result_metadata(stmt);
num_fields= mysql_stmt_field_count(stmt);
fields= mysql_fetch_fields(rs_metadata);
rs_bind= (MYSQL_BIND *) malloc(sizeof (MYSQL_BIND) * num_fields);
bzero(rs_bind, sizeof (MYSQL_BIND) * num_fields);
mct_log("num_fields: %d\n", (int) num_fields);
for (i = 0; i < num_fields; ++i)
{
mct_log(" - %d: name: '%s'/'%s'; table: '%s'/'%s'; "
"db: '%s'; catalog: '%s'; length: %d; max_length: %d; "
"type: %d; decimals: %d\n",
(int) i,
(const char *) fields[i].name,
(const char *) fields[i].org_name,
(const char *) fields[i].table,
(const char *) fields[i].org_table,
(const char *) fields[i].db,
(const char *) fields[i].catalog,
(int) fields[i].length,
(int) fields[i].max_length,
(int) fields[i].type,
(int) fields[i].decimals);
rs_bind[i].buffer_type= fields[i].type;
rs_bind[i].is_null= &is_null;
switch (fields[i].type)
{
case MYSQL_TYPE_LONG:
rs_bind[i].buffer= (char *) &(int_data[i]);
rs_bind[i].buffer_length= sizeof (int_data);
break;
case MYSQL_TYPE_STRING:
rs_bind[i].buffer= (char *) str_data[i];
rs_bind[i].buffer_length= WL4435_STRING_SIZE;
rs_bind[i].length= &str_length;
break;
case MYSQL_TYPE_DOUBLE:
rs_bind[i].buffer= (char *) &dbl_data[i];
rs_bind[i].buffer_length= sizeof (dbl_data);
break;
case MYSQL_TYPE_NEWDECIMAL:
rs_bind[i].buffer= (char *) dec_data[i];
rs_bind[i].buffer_length= WL4435_STRING_SIZE;
rs_bind[i].length= &str_length;
break;
default:
fprintf(stderr, "ERROR: unexpected type: %d.\n", fields[i].type);
exit(1);
}
}
rc= mysql_stmt_bind_result(stmt, rs_bind);
check_execute(stmt, rc);
mct_log("Data:\n");
while (1)
{
int rc= mysql_stmt_fetch(stmt);
if (rc == 1 || rc == MYSQL_NO_DATA)
break;
mct_log(" ");
for (i = 0; i < num_fields; ++i)
{
switch (rs_bind[i].buffer_type)
{
case MYSQL_TYPE_LONG:
mct_log(" int: %ld;",
(long) *((int *) rs_bind[i].buffer));
break;
case MYSQL_TYPE_STRING:
mct_log(" str: '%s';",
(char *) rs_bind[i].buffer);
break;
case MYSQL_TYPE_DOUBLE:
mct_log(" dbl: %lf;",
(double) *((double *) rs_bind[i].buffer));
break;
case MYSQL_TYPE_NEWDECIMAL:
mct_log(" dec: '%s';",
(char *) rs_bind[i].buffer);
break;
default:
printf(" unexpected type (%d)\n",
rs_bind[i].buffer_type);
}
}
mct_log("\n");
}
mct_log("EOF\n");
rc= mysql_stmt_next_result(stmt);
mct_log("mysql_stmt_next_result(): %d; field_count: %d\n",
(int) rc, (int) mysql->field_count);
free(rs_bind);
mysql_free_result(rs_metadata);
if (rc > 0)
{
printf("Error: %s (errno: %d)\n",
mysql_stmt_error(stmt), mysql_stmt_errno(stmt));
DIE(rc > 0);
}
if (rc)
break;
if (!mysql->field_count)
{
/* This is the last OK-packet. No more resultsets. */
break;
}
}
}
mysql_stmt_close(stmt);
mct_close_log();
rc= mysql_commit(mysql);
myquery(rc);
/* i18n part of test case. */
{
const char *str_koi8r= "\xee\xd5\x2c\x20\xda\xc1\x20\xd2\xd9\xc2\xc1\xcc\xcb\xd5";
const char *str_cp1251= "\xcd\xf3\x2c\x20\xe7\xe0\x20\xf0\xfb\xe1\xe0\xeb\xea\xf3";
char o1_buffer[255];
ulong o1_length;
char o2_buffer[255];
ulong o2_length;
MYSQL_BIND rs_bind[2];
strmov(query, "CALL p2(?, ?, ?)");
stmt= mysql_simple_prepare(mysql, query);
check_stmt(stmt);
/* Init PS-parameters. */
bzero((char *) ps_params, sizeof (ps_params));
ps_params[0].buffer_type= MYSQL_TYPE_STRING;
ps_params[0].buffer= (char *) str_koi8r;
ps_params[0].buffer_length= strlen(str_koi8r);
ps_params[1].buffer_type= MYSQL_TYPE_STRING;
ps_params[1].buffer= o1_buffer;
ps_params[1].buffer_length= 0;
ps_params[2].buffer_type= MYSQL_TYPE_STRING;
ps_params[2].buffer= o2_buffer;
ps_params[2].buffer_length= 0;
/* Bind parameters. */
rc= mysql_stmt_bind_param(stmt, ps_params);
check_execute(stmt, rc);
/* Prevent converting to character_set_results. */
rc= mysql_query(mysql, "SET NAMES binary");
myquery(rc);
/* Execute statement. */
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
/* Bind result. */
bzero(rs_bind, sizeof (rs_bind));
rs_bind[0].buffer_type= MYSQL_TYPE_STRING;
rs_bind[0].buffer= o1_buffer;
rs_bind[0].buffer_length= sizeof (o1_buffer);
rs_bind[0].length= &o1_length;
rs_bind[1].buffer_type= MYSQL_TYPE_BLOB;
rs_bind[1].buffer= o2_buffer;
rs_bind[1].buffer_length= sizeof (o2_buffer);
rs_bind[1].length= &o2_length;
rc= mysql_stmt_bind_result(stmt, rs_bind);
check_execute(stmt, rc);
/* Fetch result. */
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
/* Check result. */
DIE_UNLESS(o1_length == strlen(str_cp1251));
DIE_UNLESS(o2_length == strlen(str_koi8r));
DIE_UNLESS(!memcmp(o1_buffer, str_cp1251, o1_length));
DIE_UNLESS(!memcmp(o2_buffer, str_koi8r, o2_length));
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc == MYSQL_NO_DATA);
rc= mysql_stmt_next_result(stmt);
DIE_UNLESS(rc == 0 && mysql->field_count == 0);
mysql_stmt_close(stmt);
rc= mysql_commit(mysql);
myquery(rc);
}
}
|
Safe
|
[
"CWE-416"
] |
server
|
eef21014898d61e77890359d6546d4985d829ef6
|
2.96739668768188e+38
| 422 |
MDEV-11933 Wrong usage of linked list in mysql_prune_stmt_list
mysql_prune_stmt_list() was walking the list following
element->next pointers, but inside the loop it was invoking
list_add(element) that modified element->next. So, mysql_prune_stmt_list()
failed to visit and reset all elements, and some of them were left
with pointers to invalid MYSQL.
| 0 |
try_create_tested_existence (SoupSession *session, SoupMessage *msg,
gpointer user_data)
{
GVfsJob *job = G_VFS_JOB (user_data);
GOutputStream *stream;
SoupMessage *put_msg;
SoupURI *uri;
if (SOUP_STATUS_IS_SUCCESSFUL (msg->status_code))
{
g_vfs_job_failed (job,
G_IO_ERROR,
G_IO_ERROR_EXISTS,
_("Target file already exists"));
return;
}
/* TODO: other errors */
uri = soup_message_get_uri (msg);
put_msg = soup_message_new_from_uri (SOUP_METHOD_PUT, uri);
/*
* Doesn't work with apache > 2.2.9
* soup_message_headers_append (put_msg->request_headers, "If-None-Match", "*");
*/
stream = g_memory_output_stream_new (NULL, 0, g_try_realloc, g_free);
g_object_set_data_full (G_OBJECT (stream), "-gvfs-stream-msg", put_msg, g_object_unref);
g_vfs_job_open_for_write_set_handle (G_VFS_JOB_OPEN_FOR_WRITE (job), stream);
g_vfs_job_open_for_write_set_can_seek (G_VFS_JOB_OPEN_FOR_WRITE (job),
g_seekable_can_seek (G_SEEKABLE (stream)));
g_vfs_job_open_for_write_set_can_truncate (G_VFS_JOB_OPEN_FOR_WRITE (job),
g_seekable_can_truncate (G_SEEKABLE (stream)));
g_vfs_job_succeeded (job);
}
|
Safe
|
[] |
gvfs
|
f81ff2108ab3b6e370f20dcadd8708d23f499184
|
1.138065317052318e+38
| 35 |
dav: don't unescape the uri twice
path_equal tries to unescape path before comparing. Unfortunately
this function is used also for already unescaped paths. Therefore
unescaping can fail. This commit reverts changes which was done in
commit 50af53d and unescape just uris, which aren't unescaped yet.
https://bugzilla.gnome.org/show_bug.cgi?id=743298
| 0 |
cmsBool isidchar(int c)
{
return isalnum(c) || ismiddle(c);
}
|
Safe
|
[] |
Little-CMS
|
65e2f1df3495edc984f7e0d7b7b24e29d851e240
|
6.318655014732625e+37
| 4 |
Fix some warnings from static analysis
| 0 |
static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
int addr_len)
{
struct sock *sk = sock->sk;
char name[sizeof(uaddr->sa_data) + 1];
/*
* Check legality
*/
if (addr_len != sizeof(struct sockaddr))
return -EINVAL;
/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
* zero-terminated.
*/
memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
name[sizeof(uaddr->sa_data)] = 0;
return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
}
|
Safe
|
[
"CWE-119"
] |
linux
|
2b6867c2ce76c596676bec7d2d525af525fdc6e2
|
2.2878233593177616e+38
| 20 |
net/packet: fix overflow in check for priv area size
Subtracting tp_sizeof_priv from tp_block_size and casting to int
to check whether one is less then the other doesn't always work
(both of them are unsigned ints).
Compare them as is instead.
Also cast tp_sizeof_priv to u64 before using BLK_PLUS_PRIV, as
it can overflow inside BLK_PLUS_PRIV otherwise.
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
GF_Err paen_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs, fiin_AddBox);
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
|
2.7401759136389135e+38
| 4 |
prevent dref memleak on invalid input (#1183)
| 0 |
static int mongo_cmd_get_error_helper( mongo *conn, const char *db,
bson *realout, const char *cmdtype ) {
bson out = {NULL,0};
bson_bool_t haserror = 0;
/* Reset last error codes. */
mongo_clear_errors( conn );
/* If there's an error, store its code and string in the connection object. */
if( mongo_simple_int_command( conn, db, cmdtype, 1, &out ) == MONGO_OK ) {
bson_iterator it;
haserror = ( bson_find( &it, &out, "err" ) != BSON_NULL );
if( haserror ) mongo_set_last_error( conn, &it, &out );
}
if( realout )
*realout = out; /* transfer of ownership */
else
bson_destroy( &out );
if( haserror )
return MONGO_ERROR;
else
return MONGO_OK;
}
|
Safe
|
[
"CWE-190"
] |
mongo-c-driver-legacy
|
1a1f5e26a4309480d88598913f9eebf9e9cba8ca
|
6.168948481160564e+37
| 26 |
don't mix up int and size_t (first pass to fix that)
| 0 |
TEST_F(RenameCollectionTest, RenameCollectionByUUIDRatherThanNsForApplyOps) {
auto realRenameFromNss = NamespaceString("test.bar2");
auto dbName = realRenameFromNss.db().toString();
auto uuid = _createCollectionWithUUID(_opCtx.get(), realRenameFromNss);
auto uuidDoc = BSON("ui" << uuid);
auto cmd = BSON("renameCollection" << _sourceNss.ns() << "to" << _targetNss.ns() << "dropTarget"
<< true);
ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), dbName, uuidDoc["ui"], cmd, {}));
ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss));
}
|
Safe
|
[
"CWE-20"
] |
mongo
|
35c1b1f588f04926a958ad2fe4d9c59d79f81e8b
|
4.1328841966351315e+37
| 10 |
SERVER-35636 renameCollectionForApplyOps checks for complete namespace
| 0 |
TEST_F(ZNCTest, Modpython) {
if (QProcessEnvironment::systemEnvironment().value(
"DISABLED_ZNC_PERL_PYTHON_TEST") == "1") {
return;
}
auto znc = Run();
znc->CanLeak();
auto ircd = ConnectIRCd();
auto client = LoginClient();
client.Write("znc loadmod modpython");
client.Write("znc loadmod pyeval");
client.Write("PRIVMSG *pyeval :2+2");
client.ReadUntil(":*pyeval!znc@znc.in PRIVMSG nick :4");
client.Write("PRIVMSG *pyeval :module.GetUser().GetUserName()");
client.ReadUntil("nick :'user'");
ircd.Write(":server 001 nick :Hello");
ircd.Write(":n!u@h PRIVMSG nick :Hi\xF0, github issue #1229");
// "replacement character"
client.ReadUntil("Hi\xEF\xBF\xBD, github issue");
}
|
Vulnerable
|
[
"CWE-20"
] |
znc
|
64613bc8b6b4adf1e32231f9844d99cd512b8973
|
1.5292521034282495e+38
| 20 |
Don't crash if user specified invalid encoding.
This is CVE-2019-9917
| 1 |
match_checkcompoundpattern(
char_u *ptr,
int wlen,
garray_T *gap) // &sl_comppat
{
int i;
char_u *p;
int len;
for (i = 0; i + 1 < gap->ga_len; i += 2)
{
p = ((char_u **)gap->ga_data)[i + 1];
if (STRNCMP(ptr + wlen, p, STRLEN(p)) == 0)
{
// Second part matches at start of following compound word, now
// check if first part matches at end of previous word.
p = ((char_u **)gap->ga_data)[i];
len = (int)STRLEN(p);
if (len <= wlen && STRNCMP(ptr + wlen - len, p, len) == 0)
return TRUE;
}
}
return FALSE;
}
|
Safe
|
[
"CWE-416"
] |
vim
|
2813f38e021c6e6581c0c88fcf107e41788bc835
|
2.6817168628913866e+38
| 24 |
patch 8.2.5072: using uninitialized value and freed memory in spell command
Problem: Using uninitialized value and freed memory in spell command.
Solution: Initialize "attr". Check for empty line early.
| 0 |
static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
unsigned int flags)
{
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
if (shhwtstamps &&
(flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
return TP_STATUS_TS_RAW_HARDWARE;
if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
ktime_to_timespec64_cond(skb->tstamp, ts))
return TP_STATUS_TS_SOFTWARE;
return 0;
}
|
Safe
|
[
"CWE-415"
] |
linux
|
ec6af094ea28f0f2dda1a6a33b14cd57e36a9755
|
1.6424641275022938e+38
| 16 |
net/packet: rx_owner_map depends on pg_vec
Packet sockets may switch ring versions. Avoid misinterpreting state
between versions, whose fields share a union. rx_owner_map is only
allocated with a packet ring (pg_vec) and both are swapped together.
If pg_vec is NULL, meaning no packet ring was allocated, then neither
was rx_owner_map. And the field may be old state from a tpacket_v3.
Fixes: 61fad6816fc1 ("net/packet: tpacket_rcv: avoid a producer race condition")
Reported-by: Syzbot <syzbot+1ac0994a0a0c55151121@syzkaller.appspotmail.com>
Signed-off-by: Willem de Bruijn <willemb@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20211215143937.106178-1-willemdebruijn.kernel@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
| 0 |
static int ext4_unlink(struct inode *dir, struct dentry *dentry)
{
int retval;
struct inode *inode;
struct buffer_head *bh;
struct ext4_dir_entry_2 *de;
handle_t *handle = NULL;
if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb))))
return -EIO;
trace_ext4_unlink_enter(dir, dentry);
/* Initialize quotas before so that eventual writes go
* in separate transaction */
retval = dquot_initialize(dir);
if (retval)
return retval;
retval = dquot_initialize(d_inode(dentry));
if (retval)
return retval;
retval = -ENOENT;
bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
if (IS_ERR(bh))
return PTR_ERR(bh);
if (!bh)
goto end_unlink;
inode = d_inode(dentry);
retval = -EFSCORRUPTED;
if (le32_to_cpu(de->inode) != inode->i_ino)
goto end_unlink;
handle = ext4_journal_start(dir, EXT4_HT_DIR,
EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
handle = NULL;
goto end_unlink;
}
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_unlink;
dir->i_ctime = dir->i_mtime = current_time(dir);
ext4_update_dx_flag(dir);
retval = ext4_mark_inode_dirty(handle, dir);
if (retval)
goto end_unlink;
if (inode->i_nlink == 0)
ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
dentry->d_name.len, dentry->d_name.name);
else
drop_nlink(inode);
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
inode->i_ctime = current_time(inode);
retval = ext4_mark_inode_dirty(handle, inode);
#ifdef CONFIG_UNICODE
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
* negative dentries at ext4_lookup(), when it is better
* supported by the VFS for the CI case.
*/
if (IS_CASEFOLDED(dir))
d_invalidate(dentry);
#endif
end_unlink:
brelse(bh);
if (handle)
ext4_journal_stop(handle);
trace_ext4_unlink_exit(dentry, retval);
return retval;
}
|
Safe
|
[
"CWE-125"
] |
linux
|
5872331b3d91820e14716632ebb56b1399b34fe1
|
2.9520792717796483e+38
| 81 |
ext4: fix potential negative array index in do_split()
If for any reason a directory passed to do_split() does not have enough
active entries to exceed half the size of the block, we can end up
iterating over all "count" entries without finding a split point.
In this case, count == move, and split will be zero, and we will
attempt a negative index into map[].
Guard against this by detecting this case, and falling back to
split-to-half-of-count instead; in this case we will still have
plenty of space (> half blocksize) in each split block.
Fixes: ef2b02d3e617 ("ext34: ensure do_split leaves enough free space in both blocks")
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/f53e246b-647c-64bb-16ec-135383c70ad7@redhat.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
| 0 |
static bool exec_command_on_flag(RFlagItem *flg, void *u) {
struct exec_command_t *user = (struct exec_command_t *)u;
r_core_block_size (user->core, flg->size);
r_core_seek (user->core, flg->offset, 1);
r_core_cmd0 (user->core, user->cmd);
return true;
}
|
Safe
|
[
"CWE-78"
] |
radare2
|
dd739f5a45b3af3d1f65f00fe19af1dbfec7aea7
|
2.4919881717262927e+37
| 7 |
Fix #14990 - multiple quoted command parsing issue ##core
> "?e hello""?e world"
hello
world"
> "?e hello";"?e world"
hello
world
| 0 |
void __sk_mem_reclaim(struct sock *sk, int amount)
{
amount >>= SK_MEM_QUANTUM_SHIFT;
sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
__sk_mem_reduce_allocated(sk, amount);
}
|
Safe
|
[
"CWE-704"
] |
linux
|
9d538fa60bad4f7b23193c89e843797a1cf71ef3
|
2.920965183347273e+38
| 6 |
net: Set sk_prot_creator when cloning sockets to the right proto
sk->sk_prot and sk->sk_prot_creator can differ when the app uses
IPV6_ADDRFORM (transforming an IPv6-socket to an IPv4-one).
Which is why sk_prot_creator is there to make sure that sk_prot_free()
does the kmem_cache_free() on the right kmem_cache slab.
Now, if such a socket gets transformed back to a listening socket (using
connect() with AF_UNSPEC) we will allocate an IPv4 tcp_sock through
sk_clone_lock() when a new connection comes in. But sk_prot_creator will
still point to the IPv6 kmem_cache (as everything got copied in
sk_clone_lock()). When freeing, we will thus put this
memory back into the IPv6 kmem_cache although it was allocated in the
IPv4 cache. I have seen memory corruption happening because of this.
With slub-debugging and MEMCG_KMEM enabled this gives the warning
"cache_from_obj: Wrong slab cache. TCPv6 but object is from TCP"
A C-program to trigger this:
void main(void)
{
int fd = socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP);
int new_fd, newest_fd, client_fd;
struct sockaddr_in6 bind_addr;
struct sockaddr_in bind_addr4, client_addr1, client_addr2;
struct sockaddr unsp;
int val;
memset(&bind_addr, 0, sizeof(bind_addr));
bind_addr.sin6_family = AF_INET6;
bind_addr.sin6_port = ntohs(42424);
memset(&client_addr1, 0, sizeof(client_addr1));
client_addr1.sin_family = AF_INET;
client_addr1.sin_port = ntohs(42424);
client_addr1.sin_addr.s_addr = inet_addr("127.0.0.1");
memset(&client_addr2, 0, sizeof(client_addr2));
client_addr2.sin_family = AF_INET;
client_addr2.sin_port = ntohs(42421);
client_addr2.sin_addr.s_addr = inet_addr("127.0.0.1");
memset(&unsp, 0, sizeof(unsp));
unsp.sa_family = AF_UNSPEC;
bind(fd, (struct sockaddr *)&bind_addr, sizeof(bind_addr));
listen(fd, 5);
client_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
connect(client_fd, (struct sockaddr *)&client_addr1, sizeof(client_addr1));
new_fd = accept(fd, NULL, NULL);
close(fd);
val = AF_INET;
setsockopt(new_fd, SOL_IPV6, IPV6_ADDRFORM, &val, sizeof(val));
connect(new_fd, &unsp, sizeof(unsp));
memset(&bind_addr4, 0, sizeof(bind_addr4));
bind_addr4.sin_family = AF_INET;
bind_addr4.sin_port = ntohs(42421);
bind(new_fd, (struct sockaddr *)&bind_addr4, sizeof(bind_addr4));
listen(new_fd, 5);
client_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
connect(client_fd, (struct sockaddr *)&client_addr2, sizeof(client_addr2));
newest_fd = accept(new_fd, NULL, NULL);
close(new_fd);
close(client_fd);
close(new_fd);
}
As far as I can see, this bug has been there since the beginning of the
git-days.
Signed-off-by: Christoph Paasch <cpaasch@apple.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
vte_sequence_handler_scroll_down (VteTerminal *terminal, GValueArray *params)
{
long val = 1;
GValue *value;
if ((params != NULL) && (params->n_values > 0)) {
value = g_value_array_get_nth(params, 0);
if (G_VALUE_HOLDS_LONG(value)) {
val = g_value_get_long(value);
val = MAX(val, 1);
}
}
_vte_terminal_scroll_text (terminal, val);
}
|
Safe
|
[] |
vte
|
58bc3a942f198a1a8788553ca72c19d7c1702b74
|
1.8480370731767545e+38
| 15 |
fix bug #548272
svn path=/trunk/; revision=2365
| 0 |
dump_header_blob (const byte *buffer, size_t length, FILE *fp)
{
unsigned long n;
if (length < 32)
{
fprintf (fp, "[blob too short]\n");
return -1;
}
fprintf (fp, "Version: %d\n", buffer[5]);
n = get16 (buffer + 6);
fprintf( fp, "Flags: %04lX", n);
if (n)
{
int any = 0;
fputs (" (", fp);
if ((n & 2))
{
if (any)
putc (',', fp);
fputs ("openpgp", fp);
any++;
}
putc (')', fp);
}
putc ('\n', fp);
if ( memcmp (buffer+8, "KBXf", 4))
fprintf (fp, "[Error: invalid magic number]\n");
n = get32 (buffer+16);
fprintf( fp, "created-at: %lu\n", n );
n = get32 (buffer+20);
fprintf( fp, "last-maint: %lu\n", n );
return 0;
}
|
Safe
|
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
|
9.890073269784735e+37
| 39 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <wk@gnupg.org>
| 0 |
static void fm93c56a_select(struct ql3_adapter *qdev)
{
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
}
|
Safe
|
[
"CWE-401"
] |
linux
|
1acb8f2a7a9f10543868ddd737e37424d5c36cf4
|
2.4216438031815285e+38
| 9 |
net: qlogic: Fix memory leak in ql_alloc_large_buffers
In ql_alloc_large_buffers, a new skb is allocated via netdev_alloc_skb.
This skb should be released if pci_dma_mapping_error fails.
Fixes: 0f8ab89e825f ("qla3xxx: Check return code from pci_map_single() in ql_release_to_lrg_buf_free_list(), ql_populate_free_queue(), ql_alloc_large_buffers(), and ql3xxx_send()")
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
DEFUN(movU1, MOVE_UP1, "Cursor up. With edge touched, slide")
{
_movU(1);
}
|
Safe
|
[
"CWE-59",
"CWE-241"
] |
w3m
|
18dcbadf2771cdb0c18509b14e4e73505b242753
|
1.8787428415596856e+36
| 4 |
Make temporary directory safely when ~/.w3m is unwritable
| 0 |
void ReportError(TfLiteContext* context, TfLiteType input_type,
TfLiteType output_type) {
context->ReportError(
context, "Input type %s with Output type %s is not currently supported.",
TfLiteTypeGetName(input_type), TfLiteTypeGetName(output_type));
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
|
3.654511566203845e+37
| 6 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
| 0 |
void CLASS imacon_full_load_raw()
{
int row, col;
#ifndef LIBRAW_LIBRARY_BUILD
for (row=0; row < height; row++)
for (col=0; col < width; col++)
{
read_shorts (image[row*width+col], 3);
}
#else
for (row=0; row < height; row++)
for (col=0; col < width; col++)
{
read_shorts (color_image[(row+top_margin)*raw_width+col+left_margin], 3);
}
#endif
}
|
Safe
|
[] |
LibRaw
|
c4e374ea6c979a7d1d968f5082b7d0ea8cd27202
|
2.1885143463701773e+37
| 18 |
additional data checks backported from 0.15.4
| 0 |
R_API int r_snprintf(char *string, int len, const char *fmt, ...) {
va_list ap;
va_start (ap, fmt);
int ret = vsnprintf (string, len, fmt, ap);
string[len - 1] = 0;
va_end (ap);
return ret;
}
|
Safe
|
[
"CWE-78"
] |
radare2
|
04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9
|
1.5207032297978773e+37
| 8 |
Fix command injection on PDB download (#16966)
* Fix r_sys_mkdirp with absolute path on Windows
* Fix build with --with-openssl
* Use RBuffer in r_socket_http_answer()
* r_socket_http_answer: Fix read for big responses
* Implement r_str_escape_sh()
* Cleanup r_socket_connect() on Windows
* Fix socket being created without a protocol
* Fix socket connect with SSL ##socket
* Use select() in r_socket_ready()
* Fix read failing if received only protocol answer
* Fix double-free
* r_socket_http_get: Fail if req. SSL with no support
* Follow redirects in r_socket_http_answer()
* Fix r_socket_http_get result length with R2_CURL=1
* Also follow redirects
* Avoid using curl for downloading PDBs
* Use r_socket_http_get() on UNIXs
* Use WinINet API on Windows for r_socket_http_get()
* Fix command injection
* Fix r_sys_cmd_str_full output for binary data
* Validate GUID on PDB download
* Pass depth to socket_http_get_recursive()
* Remove 'r_' and '__' from static function names
* Fix is_valid_guid
* Fix for comments
| 0 |
vte_sequence_handler_window_manipulation (VteTerminal *terminal, GValueArray *params)
{
GdkScreen *gscreen;
VteScreen *screen;
GValue *value;
GtkWidget *widget;
char buf[128];
long param, arg1, arg2;
gint width, height;
guint i;
GtkAllocation allocation;
widget = &terminal->widget;
screen = terminal->pvt->screen;
for (i = 0; ((params != NULL) && (i < params->n_values)); i++) {
arg1 = arg2 = -1;
if (i + 1 < params->n_values) {
value = g_value_array_get_nth(params, i + 1);
if (G_VALUE_HOLDS_LONG(value)) {
arg1 = g_value_get_long(value);
}
}
if (i + 2 < params->n_values) {
value = g_value_array_get_nth(params, i + 2);
if (G_VALUE_HOLDS_LONG(value)) {
arg2 = g_value_get_long(value);
}
}
value = g_value_array_get_nth(params, i);
if (!G_VALUE_HOLDS_LONG(value)) {
continue;
}
param = g_value_get_long(value);
switch (param) {
case 1:
_vte_debug_print(VTE_DEBUG_PARSE,
"Deiconifying window.\n");
vte_terminal_emit_deiconify_window(terminal);
break;
case 2:
_vte_debug_print(VTE_DEBUG_PARSE,
"Iconifying window.\n");
vte_terminal_emit_iconify_window(terminal);
break;
case 3:
if ((arg1 != -1) && (arg2 != -2)) {
_vte_debug_print(VTE_DEBUG_PARSE,
"Moving window to "
"%ld,%ld.\n", arg1, arg2);
vte_terminal_emit_move_window(terminal,
arg1, arg2);
i += 2;
}
break;
case 4:
if ((arg1 != -1) && (arg2 != -1)) {
_vte_debug_print(VTE_DEBUG_PARSE,
"Resizing window "
"(to %ldx%ld pixels).\n",
arg2, arg1);
vte_terminal_emit_resize_window(terminal,
arg2 +
terminal->pvt->inner_border.left +
terminal->pvt->inner_border.right,
arg1 +
terminal->pvt->inner_border.top +
terminal->pvt->inner_border.bottom);
i += 2;
}
break;
case 5:
_vte_debug_print(VTE_DEBUG_PARSE, "Raising window.\n");
vte_terminal_emit_raise_window(terminal);
break;
case 6:
_vte_debug_print(VTE_DEBUG_PARSE, "Lowering window.\n");
vte_terminal_emit_lower_window(terminal);
break;
case 7:
_vte_debug_print(VTE_DEBUG_PARSE,
"Refreshing window.\n");
_vte_invalidate_all(terminal);
vte_terminal_emit_refresh_window(terminal);
break;
case 8:
if ((arg1 != -1) && (arg2 != -1)) {
_vte_debug_print(VTE_DEBUG_PARSE,
"Resizing window "
"(to %ld columns, %ld rows).\n",
arg2, arg1);
vte_terminal_emit_resize_window(terminal,
arg2 * terminal->char_width +
terminal->pvt->inner_border.left +
terminal->pvt->inner_border.right,
arg1 * terminal->char_height +
terminal->pvt->inner_border.top +
terminal->pvt->inner_border.bottom);
i += 2;
}
break;
case 9:
switch (arg1) {
case 0:
_vte_debug_print(VTE_DEBUG_PARSE,
"Restoring window.\n");
vte_terminal_emit_restore_window(terminal);
break;
case 1:
_vte_debug_print(VTE_DEBUG_PARSE,
"Maximizing window.\n");
vte_terminal_emit_maximize_window(terminal);
break;
default:
break;
}
i++;
break;
case 11:
/* If we're unmapped, then we're iconified. */
g_snprintf(buf, sizeof(buf),
_VTE_CAP_CSI "%dt",
1 + !gtk_widget_get_mapped(widget));
_vte_debug_print(VTE_DEBUG_PARSE,
"Reporting window state %s.\n",
gtk_widget_get_mapped(widget) ?
"non-iconified" : "iconified");
vte_terminal_feed_child(terminal, buf, -1);
break;
case 13:
/* Send window location, in pixels. */
gdk_window_get_origin(gtk_widget_get_window(widget),
&width, &height);
g_snprintf(buf, sizeof(buf),
_VTE_CAP_CSI "3;%d;%dt",
width + terminal->pvt->inner_border.left,
height + terminal->pvt->inner_border.top);
_vte_debug_print(VTE_DEBUG_PARSE,
"Reporting window location"
"(%d++,%d++).\n",
width, height);
vte_terminal_feed_child(terminal, buf, -1);
break;
case 14:
/* Send window size, in pixels. */
gtk_widget_get_allocation(widget, &allocation);
g_snprintf(buf, sizeof(buf),
_VTE_CAP_CSI "4;%d;%dt",
allocation.height -
(terminal->pvt->inner_border.top +
terminal->pvt->inner_border.bottom),
allocation.width -
(terminal->pvt->inner_border.left +
terminal->pvt->inner_border.right));
_vte_debug_print(VTE_DEBUG_PARSE,
"Reporting window size "
"(%dx%dn",
width - (terminal->pvt->inner_border.left + terminal->pvt->inner_border.right),
height - (terminal->pvt->inner_border.top + terminal->pvt->inner_border.bottom));
vte_terminal_feed_child(terminal, buf, -1);
break;
case 18:
/* Send widget size, in cells. */
_vte_debug_print(VTE_DEBUG_PARSE,
"Reporting widget size.\n");
g_snprintf(buf, sizeof(buf),
_VTE_CAP_CSI "8;%ld;%ldt",
terminal->row_count,
terminal->column_count);
vte_terminal_feed_child(terminal, buf, -1);
break;
case 19:
_vte_debug_print(VTE_DEBUG_PARSE,
"Reporting screen size.\n");
gscreen = gtk_widget_get_screen(widget);
height = gdk_screen_get_height(gscreen);
width = gdk_screen_get_width(gscreen);
g_snprintf(buf, sizeof(buf),
_VTE_CAP_CSI "9;%ld;%ldt",
height / terminal->char_height,
width / terminal->char_width);
vte_terminal_feed_child(terminal, buf, -1);
break;
case 20:
/* Report the icon title. */
_vte_debug_print(VTE_DEBUG_PARSE,
"Reporting icon title.\n");
g_snprintf (buf, sizeof (buf),
_VTE_CAP_OSC "L%s" _VTE_CAP_ST,
terminal->icon_title);
vte_terminal_feed_child(terminal, buf, -1);
break;
case 21:
/* Report the window title. */
_vte_debug_print(VTE_DEBUG_PARSE,
"Reporting window title.\n");
g_snprintf (buf, sizeof (buf),
_VTE_CAP_OSC "l%s" _VTE_CAP_ST,
terminal->window_title);
vte_terminal_feed_child(terminal, buf, -1);
break;
default:
if (param >= 24) {
_vte_debug_print(VTE_DEBUG_PARSE,
"Resizing to %ld rows.\n",
param);
/* Resize to the specified number of
* rows. */
vte_terminal_emit_resize_window(terminal,
terminal->column_count * terminal->char_width +
terminal->pvt->inner_border.left +
terminal->pvt->inner_border.right,
param * terminal->char_height +
terminal->pvt->inner_border.top +
terminal->pvt->inner_border.bottom);
}
break;
}
}
}
|
Vulnerable
|
[] |
vte
|
8b971a7b2c59902914ecbbc3915c45dd21530a91
|
1.125690286212088e+38
| 220 |
Fix terminal title reporting
Fixed CVE-2003-0070 again.
See also http://marc.info/?l=bugtraq&m=104612710031920&w=2 .
(cherry picked from commit 6042c75b5a6daa0e499e61c8e07242d890d38ff1)
| 1 |
Status operator()(OpKernelContext* context,
typename TTypes<Tindex>::ConstVec reverse_index_map,
typename TTypes<T>::ConstVec grad_values,
typename TTypes<T>::Vec d_values,
typename TTypes<T>::Scalar d_default_value) {
const CPUDevice& device = context->eigen_device<CPUDevice>();
const Tindex N = reverse_index_map.dimension(0);
const Tindex N_full = grad_values.dimension(0);
T& d_default_value_scalar = d_default_value();
d_default_value_scalar = T();
Tensor visited_t;
TF_RETURN_IF_ERROR(
context->allocate_temp(DT_BOOL, TensorShape({N_full}), &visited_t));
auto visited = visited_t.vec<bool>();
visited.device(device) = visited.constant(false);
for (int i = 0; i < N; ++i) {
// Locate the index of the output of the forward prop associated
// with this location in the input of the forward prop. Copy
// the gradient into it. Mark it as visited.
int64 reverse_index = reverse_index_map(i);
if (reverse_index < 0 || reverse_index >= N_full) {
return errors::InvalidArgument(
"Elements in reverse index must be in [0, ", N_full, ") but got ",
reverse_index);
}
d_values(i) = grad_values(reverse_index);
visited(reverse_index) = true;
}
for (int j = 0; j < N_full; ++j) {
// The default value gradient gets the accumulated remainder of
// the backprop values (since the default value was used to fill
// in these slots in the forward calculation).
if (!visited(j)) {
d_default_value_scalar += grad_values(j);
}
}
return Status::OK();
}
|
Safe
|
[
"CWE-476",
"CWE-703"
] |
tensorflow
|
faa76f39014ed3b5e2c158593b1335522e573c7f
|
1.714553732151129e+38
| 41 |
Fix heap-buffer-overflow issue with `tf.raw_ops.SparseFillEmptyRows`.
PiperOrigin-RevId: 372009178
Change-Id: Ia1a9e9691ecaa072f32fb39a0887b2aabd399210
| 0 |
static int add_sdb_include_dir(Sdb *s, const char *incl, int idx) {
if (!s || !incl)
return false;
return sdb_array_set (s, "includedirs", idx, incl, 0);
}
|
Safe
|
[
"CWE-119",
"CWE-125"
] |
radare2
|
d37d2b858ac47f2f108034be0bcecadaddfbc8b3
|
2.546833529813174e+38
| 5 |
Fix #10465 - Avoid string on low addresses (workaround) for corrupted dwarf (#10478)
| 0 |
static void check_alt_basis_dirs(void)
{
STRUCT_STAT st;
char *slash = strrchr(curr_dir, '/');
int j;
for (j = 0; j < basis_dir_cnt; j++) {
char *bdir = basis_dir[j];
int bd_len = strlen(bdir);
if (bd_len > 1 && bdir[bd_len-1] == '/')
bdir[--bd_len] = '\0';
if (dry_run > 1 && *bdir != '/') {
int len = curr_dir_len + 1 + bd_len + 1;
char *new = new_array(char, len);
if (!new)
out_of_memory("check_alt_basis_dirs");
if (slash && strncmp(bdir, "../", 3) == 0) {
/* We want to remove only one leading "../" prefix for
* the directory we couldn't create in dry-run mode:
* this ensures that any other ".." references get
* evaluated the same as they would for a live copy. */
*slash = '\0';
pathjoin(new, len, curr_dir, bdir + 3);
*slash = '/';
} else
pathjoin(new, len, curr_dir, bdir);
basis_dir[j] = bdir = new;
}
if (do_stat(bdir, &st) < 0)
rprintf(FWARNING, "%s arg does not exist: %s\n", dest_option, bdir);
else if (!S_ISDIR(st.st_mode))
rprintf(FWARNING, "%s arg is not a dir: %s\n", dest_option, bdir);
}
}
|
Safe
|
[
"CWE-59"
] |
rsync
|
962f8b90045ab331fc04c9e65f80f1a53e68243b
|
2.4890374047992243e+38
| 34 |
Complain if an inc-recursive path is not right for its dir.
This ensures that a malicious sender can't use a just-sent
symlink as a trasnfer path.
| 0 |
GF_Err ctxload_configure_pid(GF_Filter *filter, GF_FilterPid *pid, Bool is_remove)
{
CTXLoadPriv *priv = gf_filter_get_udta(filter);
const GF_PropertyValue *prop;
if (is_remove) {
priv->in_pid = NULL;
if (priv->out_pid) {
gf_filter_pid_remove(priv->out_pid);
priv->out_pid = NULL;
}
return GF_OK;
}
if (! gf_filter_pid_check_caps(pid))
return GF_NOT_SUPPORTED;
//we must have a file path
prop = gf_filter_pid_get_property(pid, GF_PROP_PID_FILEPATH);
if (!prop || ! prop->value.string) {
return GF_NOT_SUPPORTED;
}
if (!priv->in_pid) {
GF_FilterEvent fevt;
priv->in_pid = pid;
//we work with full file only, send a play event on source to indicate that
GF_FEVT_INIT(fevt, GF_FEVT_PLAY, pid);
fevt.play.start_range = 0;
fevt.base.on_pid = priv->in_pid;
fevt.play.full_file_only = GF_TRUE;
gf_filter_pid_send_event(priv->in_pid, &fevt);
} else {
if (pid != priv->in_pid) {
return GF_REQUIRES_NEW_INSTANCE;
}
//update of PID filename
if (!prop->value.string || !priv->file_name || strcmp(prop->value.string, priv->file_name))
return GF_NOT_SUPPORTED;
return GF_OK;
}
priv->file_name = prop->value.string;
priv->nb_streams = 1;
//declare a new output PID of type scene, codecid RAW
priv->out_pid = gf_filter_pid_new(filter);
gf_filter_pid_copy_properties(priv->out_pid, pid);
gf_filter_pid_set_property(priv->out_pid, GF_PROP_PID_STREAM_TYPE, &PROP_UINT(GF_STREAM_SCENE) );
gf_filter_pid_set_property(priv->out_pid, GF_PROP_PID_CODECID, &PROP_UINT(GF_CODECID_RAW) );
gf_filter_pid_set_property(priv->out_pid, GF_PROP_PID_IN_IOD, &PROP_BOOL(GF_TRUE) );
gf_filter_pid_set_udta(pid, priv->out_pid);
priv->file_size = 0;
priv->load_flags = 0;
prop = gf_filter_pid_get_property(pid, GF_PROP_PID_ESID);
priv->base_stream_id = prop ? prop->value.uint : -1;
priv->pck_time = -1;
switch (priv->load.type) {
case GF_SM_LOAD_BT:
gf_filter_set_name(filter, "Load:BT");
break;
case GF_SM_LOAD_VRML:
gf_filter_set_name(filter, "Load:VRML97");
break;
case GF_SM_LOAD_X3DV:
gf_filter_set_name(filter, "Load:X3D+vrml");
break;
case GF_SM_LOAD_XMTA:
gf_filter_set_name(filter, "Load:XMTA");
break;
case GF_SM_LOAD_X3D:
gf_filter_set_name(filter, "Load:X3D+XML Syntax");
break;
case GF_SM_LOAD_SWF:
gf_filter_set_name(filter, "Load:SWF");
break;
case GF_SM_LOAD_XSR:
gf_filter_set_name(filter, "Load:LASeRML");
break;
case GF_SM_LOAD_MP4:
gf_filter_set_name(filter, "Load:MP4BIFSMemory");
break;
default:
break;
}
return GF_OK;
}
|
Safe
|
[
"CWE-276"
] |
gpac
|
96699aabae042f8f55cf8a85fa5758e3db752bae
|
1.5218527530292079e+37
| 97 |
fixed #2061
| 0 |
static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
{
int ret;
struct k90_led *led = container_of(led_cdev, struct k90_led, cdev);
struct device *dev = led->cdev.dev->parent;
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int brightness;
char *data;
data = kmalloc(8, GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 8,
USB_CTRL_SET_TIMEOUT);
if (ret < 0) {
dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
ret);
ret = -EIO;
goto out;
}
brightness = data[4];
if (brightness < 0 || brightness > 3) {
dev_warn(dev,
"Read invalid backlight brightness: %02hhx.\n",
data[4]);
ret = -EIO;
goto out;
}
ret = brightness;
out:
kfree(data);
return ret;
}
|
Safe
|
[
"CWE-399",
"CWE-119"
] |
linux
|
6d104af38b570d37aa32a5803b04c354f8ed513d
|
1.869610093753757e+38
| 39 |
HID: corsair: fix DMA buffers on stack
Not all platforms support DMA to the stack, and specifically since v4.9
this is no longer supported on x86 with VMAP_STACK either.
Note that the macro-mode buffer was larger than necessary.
Fixes: 6f78193ee9ea ("HID: corsair: Add Corsair Vengeance K90 driver")
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Johan Hovold <johan@kernel.org>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
| 0 |
ippTimeToDate(time_t t) /* I - Time in seconds */
{
struct tm *unixdate; /* UNIX unixdate/time info */
ipp_uchar_t *date = _cupsGlobals()->ipp_date;
/* RFC-2579 date/time data */
/*
* RFC-2579 date/time format is:
*
* Byte(s) Description
* ------- -----------
* 0-1 Year (0 to 65535)
* 2 Month (1 to 12)
* 3 Day (1 to 31)
* 4 Hours (0 to 23)
* 5 Minutes (0 to 59)
* 6 Seconds (0 to 60, 60 = "leap second")
* 7 Deciseconds (0 to 9)
* 8 +/- UTC
* 9 UTC hours (0 to 11)
* 10 UTC minutes (0 to 59)
*/
unixdate = gmtime(&t);
unixdate->tm_year += 1900;
date[0] = (ipp_uchar_t)(unixdate->tm_year >> 8);
date[1] = (ipp_uchar_t)(unixdate->tm_year);
date[2] = (ipp_uchar_t)(unixdate->tm_mon + 1);
date[3] = (ipp_uchar_t)unixdate->tm_mday;
date[4] = (ipp_uchar_t)unixdate->tm_hour;
date[5] = (ipp_uchar_t)unixdate->tm_min;
date[6] = (ipp_uchar_t)unixdate->tm_sec;
date[7] = 0;
date[8] = '+';
date[9] = 0;
date[10] = 0;
return (date);
}
|
Safe
|
[
"CWE-120"
] |
cups
|
f24e6cf6a39300ad0c3726a41a4aab51ad54c109
|
1.0067883646582118e+38
| 41 |
Fix multiple security/disclosure issues:
- CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251)
- Fixed IPP buffer overflow (rdar://50035411)
- Fixed memory disclosure issue in the scheduler (rdar://51373853)
- Fixed DoS issues in the scheduler (rdar://51373929)
| 0 |
ax25_cb *ax25_create_cb(void)
{
ax25_cb *ax25;
if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
return NULL;
atomic_set(&ax25->refcount, 1);
skb_queue_head_init(&ax25->write_queue);
skb_queue_head_init(&ax25->frag_queue);
skb_queue_head_init(&ax25->ack_queue);
skb_queue_head_init(&ax25->reseq_queue);
ax25_setup_timers(ax25);
ax25_fillin_cb(ax25, NULL);
ax25->state = AX25_STATE_0;
return ax25;
}
|
Safe
|
[] |
net
|
79462ad02e861803b3840cc782248c7359451cd9
|
3.2612612821029527e+38
| 22 |
net: add validation for the socket syscall protocol argument
郭永刚 reported that one could simply crash the kernel as root by
using a simple program:
int socket_fd;
struct sockaddr_in addr;
addr.sin_port = 0;
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_family = 10;
socket_fd = socket(10,3,0x40000000);
connect(socket_fd , &addr,16);
AF_INET, AF_INET6 sockets actually only support 8-bit protocol
identifiers. inet_sock's skc_protocol field thus is sized accordingly,
thus larger protocol identifiers simply cut off the higher bits and
store a zero in the protocol fields.
This could lead to e.g. NULL function pointer because as a result of
the cut off inet_num is zero and we call down to inet_autobind, which
is NULL for raw sockets.
kernel: Call Trace:
kernel: [<ffffffff816db90e>] ? inet_autobind+0x2e/0x70
kernel: [<ffffffff816db9a4>] inet_dgram_connect+0x54/0x80
kernel: [<ffffffff81645069>] SYSC_connect+0xd9/0x110
kernel: [<ffffffff810ac51b>] ? ptrace_notify+0x5b/0x80
kernel: [<ffffffff810236d8>] ? syscall_trace_enter_phase2+0x108/0x200
kernel: [<ffffffff81645e0e>] SyS_connect+0xe/0x10
kernel: [<ffffffff81779515>] tracesys_phase2+0x84/0x89
I found no particular commit which introduced this problem.
CVE: CVE-2015-8543
Cc: Cong Wang <cwang@twopensource.com>
Reported-by: 郭永刚 <guoyonggang@360.cn>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
unsigned long flags;
/*
* We can't change the weight of the root cgroup.
*/
if (!tg->se[0])
return -EINVAL;
if (shares < MIN_SHARES)
shares = MIN_SHARES;
else if (shares > MAX_SHARES)
shares = MAX_SHARES;
mutex_lock(&shares_mutex);
if (tg->shares == shares)
goto done;
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i)
unregister_fair_sched_group(tg, i);
list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
/* wait for any ongoing reference to this group to finish */
synchronize_sched();
/*
* Now we are free to modify the group's share on each cpu
* w/o tripping rebalance_share or load_balance_fair.
*/
tg->shares = shares;
for_each_possible_cpu(i) {
/*
* force a rebalance
*/
cfs_rq_set_shares(tg->cfs_rq[i], 0);
set_se_shares(tg->se[i], shares);
}
/*
* Enable load balance activity on this group, by inserting it back on
* each cpu's rq->leaf_cfs_rq_list.
*/
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i)
register_fair_sched_group(tg, i);
list_add_rcu(&tg->siblings, &tg->parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
done:
mutex_unlock(&shares_mutex);
return 0;
}
|
Safe
|
[
"CWE-703",
"CWE-835"
] |
linux
|
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
2.4082713566378342e+38
| 55 |
Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reported-by: Bjoern B. Brandenburg <bbb.lst@gmail.com>
Tested-by: Yong Zhang <yong.zhang0@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: stable@kernel.org
LKML-Reference: <1291802742.1417.9.camel@marge.simson.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| 0 |
GF_Err gf_isom_lhvc_config_update(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_HEVCConfig *cfg, GF_ISOMLHEVCTrackType track_type)
{
if (cfg) cfg->is_lhvc = GF_TRUE;
switch (track_type) {
case GF_ISOM_LEHVC_ONLY:
return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_LHVC, GF_FALSE);
case GF_ISOM_LEHVC_WITH_BASE:
return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_LHVC_WITH_BASE, GF_FALSE);
case GF_ISOM_LEHVC_WITH_BASE_BACKWARD:
return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_LHVC_WITH_BASE_BACKWARD, GF_FALSE);
case GF_ISOM_HEVC_TILE_BASE:
return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_HEVC_TILE_BASE, GF_FALSE);
default:
return GF_BAD_PARAM;
}
}
|
Safe
|
[
"CWE-401"
] |
gpac
|
0a85029d694f992f3631e2f249e4999daee15cbf
|
4.807309455278762e+37
| 16 |
fixed #1785 (fuzz)
| 0 |
ecma_date_to_string_format (ecma_number_t datetime_number, /**< datetime */
const char *format_p) /**< format buffer */
{
const uint32_t date_buffer_length = 37;
JERRY_VLA (lit_utf8_byte_t, date_buffer, date_buffer_length);
lit_utf8_byte_t *dest_p = date_buffer;
while (*format_p != LIT_CHAR_NULL)
{
if (*format_p != LIT_CHAR_DOLLAR_SIGN)
{
*dest_p++ = (lit_utf8_byte_t) *format_p++;
continue;
}
format_p++;
const char *str_p = NULL;
int32_t number = 0;
int32_t number_length = 0;
switch (*format_p)
{
case LIT_CHAR_UPPERCASE_Y: /* Year. */
{
number = ecma_date_year_from_time (datetime_number);
if (number >= 100000 || number <= -100000)
{
number_length = 6;
}
else if (number >= 10000 || number <= -10000)
{
number_length = 5;
}
else
{
number_length = 4;
}
break;
}
case LIT_CHAR_LOWERCASE_Y: /* ISO Year: -000001, 0000, 0001, 9999, +012345 */
{
number = ecma_date_year_from_time (datetime_number);
if (0 <= number && number <= 9999)
{
number_length = 4;
}
else
{
number_length = 6;
}
break;
}
case LIT_CHAR_UPPERCASE_M: /* Month. */
{
int32_t month = ecma_date_month_from_time (datetime_number);
JERRY_ASSERT (month >= 0 && month <= 11);
str_p = month_names_p[month];
break;
}
case LIT_CHAR_UPPERCASE_O: /* Month as number. */
{
/* The 'ecma_date_month_from_time' (ECMA 262 v5, 15.9.1.4) returns a
* number from 0 to 11, but we have to print the month from 1 to 12
* for ISO 8601 standard (ECMA 262 v5, 15.9.1.15). */
number = ecma_date_month_from_time (datetime_number) + 1;
number_length = 2;
break;
}
case LIT_CHAR_UPPERCASE_D: /* Day. */
{
number = ecma_date_date_from_time (datetime_number);
number_length = 2;
break;
}
case LIT_CHAR_UPPERCASE_W: /* Day of week. */
{
int32_t day = ecma_date_week_day (datetime_number);
JERRY_ASSERT (day >= 0 && day <= 6);
str_p = day_names_p[day];
break;
}
case LIT_CHAR_LOWERCASE_H: /* Hour. */
{
number = ecma_date_hour_from_time (datetime_number);
number_length = 2;
break;
}
case LIT_CHAR_LOWERCASE_M: /* Minutes. */
{
number = ecma_date_min_from_time (datetime_number);
number_length = 2;
break;
}
case LIT_CHAR_LOWERCASE_S: /* Seconds. */
{
number = ecma_date_sec_from_time (datetime_number);
number_length = 2;
break;
}
case LIT_CHAR_LOWERCASE_I: /* Milliseconds. */
{
number = ecma_date_ms_from_time (datetime_number);
number_length = 3;
break;
}
case LIT_CHAR_LOWERCASE_Z: /* Time zone hours part. */
{
int32_t time_zone = (int32_t) ecma_date_local_time_zone_adjustment (datetime_number);
if (time_zone >= 0)
{
*dest_p++ = LIT_CHAR_PLUS;
}
else
{
*dest_p++ = LIT_CHAR_MINUS;
time_zone = -time_zone;
}
number = time_zone / ECMA_DATE_MS_PER_HOUR;
number_length = 2;
break;
}
default:
{
JERRY_ASSERT (*format_p == LIT_CHAR_UPPERCASE_Z); /* Time zone minutes part. */
int32_t time_zone = (int32_t) ecma_date_local_time_zone_adjustment (datetime_number);
if (time_zone < 0)
{
time_zone = -time_zone;
}
number = (time_zone % ECMA_DATE_MS_PER_HOUR) / ECMA_DATE_MS_PER_MINUTE;
number_length = 2;
break;
}
}
format_p++;
if (str_p != NULL)
{
/* Print string values: month or day name which is always 3 characters */
memcpy (dest_p, str_p, 3);
dest_p += 3;
continue;
}
/* Print right aligned number values. */
JERRY_ASSERT (number_length > 0);
if (number < 0)
{
number = -number;
*dest_p++ = '-';
}
else if (*(format_p - 1) == LIT_CHAR_LOWERCASE_Y && number_length == 6)
{
/* positive sign is compulsory for extended years */
*dest_p++ = '+';
}
dest_p += number_length;
lit_utf8_byte_t *buffer_p = dest_p;
do
{
buffer_p--;
*buffer_p = (lit_utf8_byte_t) ((number % 10) + (int32_t) LIT_CHAR_0);
number /= 10;
}
while (--number_length);
}
JERRY_ASSERT (dest_p <= date_buffer + date_buffer_length);
return ecma_make_string_value (ecma_new_ecma_string_from_ascii (date_buffer,
(lit_utf8_size_t) (dest_p - date_buffer)));
} /* ecma_date_to_string_format */
|
Safe
|
[
"CWE-416"
] |
jerryscript
|
3bcd48f72d4af01d1304b754ef19fe1a02c96049
|
3.0727330022892867e+38
| 188 |
Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz batizjob@gmail.com
| 0 |
static int __init hugetlb_init(void)
{
/* Some platform decide whether they support huge pages at boot
* time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
* there is no such support
*/
if (HPAGE_SHIFT == 0)
return 0;
if (!size_to_hstate(default_hstate_size)) {
default_hstate_size = HPAGE_SIZE;
if (!size_to_hstate(default_hstate_size))
hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
}
default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
if (default_hstate_max_huge_pages)
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
hugetlb_init_hstates();
gather_bootmem_prealloc();
report_hugepages();
hugetlb_sysfs_init();
hugetlb_register_all_nodes();
return 0;
}
|
Safe
|
[
"CWE-399"
] |
linux
|
90481622d75715bfcb68501280a917dbfe516029
|
2.895196961050959e+38
| 30 |
hugepages: fix use after free bug in "quota" handling
hugetlbfs_{get,put}_quota() are badly named. They don't interact with the
general quota handling code, and they don't much resemble its behaviour.
Rather than being about maintaining limits on on-disk block usage by
particular users, they are instead about maintaining limits on in-memory
page usage (including anonymous MAP_PRIVATE copied-on-write pages)
associated with a particular hugetlbfs filesystem instance.
Worse, they work by having callbacks to the hugetlbfs filesystem code from
the low-level page handling code, in particular from free_huge_page().
This is a layering violation of itself, but more importantly, if the
kernel does a get_user_pages() on hugepages (which can happen from KVM
amongst others), then the free_huge_page() can be delayed until after the
associated inode has already been freed. If an unmount occurs at the
wrong time, even the hugetlbfs superblock where the "quota" limits are
stored may have been freed.
Andrew Barry proposed a patch to fix this by having hugepages, instead of
storing a pointer to their address_space and reaching the superblock from
there, had the hugepages store pointers directly to the superblock,
bumping the reference count as appropriate to avoid it being freed.
Andrew Morton rejected that version, however, on the grounds that it made
the existing layering violation worse.
This is a reworked version of Andrew's patch, which removes the extra, and
some of the existing, layering violation. It works by introducing the
concept of a hugepage "subpool" at the lower hugepage mm layer - that is a
finite logical pool of hugepages to allocate from. hugetlbfs now creates
a subpool for each filesystem instance with a page limit set, and a
pointer to the subpool gets added to each allocated hugepage, instead of
the address_space pointer used now. The subpool has its own lifetime and
is only freed once all pages in it _and_ all other references to it (i.e.
superblocks) are gone.
subpools are optional - a NULL subpool pointer is taken by the code to
mean that no subpool limits are in effect.
Previous discussion of this bug found in: "Fix refcounting in hugetlbfs
quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or
http://marc.info/?l=linux-mm&m=126928970510627&w=1
v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to
alloc_huge_page() - since it already takes the vma, it is not necessary.
Signed-off-by: Andrew Barry <abarry@cray.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static avifBool avifDecoderDataFillImageGrid(avifDecoderData * data,
avifImageGrid * grid,
avifImage * dstImage,
unsigned int firstTileIndex,
unsigned int tileCount,
avifBool alpha)
{
if (tileCount == 0) {
return AVIF_FALSE;
}
avifTile * firstTile = &data->tiles.tile[firstTileIndex];
avifBool firstTileUVPresent = (firstTile->image->yuvPlanes[AVIF_CHAN_U] && firstTile->image->yuvPlanes[AVIF_CHAN_V]);
// Check for tile consistency: All tiles in a grid image should match in the properties checked below.
for (unsigned int i = 1; i < tileCount; ++i) {
avifTile * tile = &data->tiles.tile[firstTileIndex + i];
avifBool uvPresent = (tile->image->yuvPlanes[AVIF_CHAN_U] && tile->image->yuvPlanes[AVIF_CHAN_V]);
if ((tile->image->width != firstTile->image->width) || (tile->image->height != firstTile->image->height) ||
(tile->image->depth != firstTile->image->depth) || (tile->image->yuvFormat != firstTile->image->yuvFormat) ||
(tile->image->yuvRange != firstTile->image->yuvRange) || (uvPresent != firstTileUVPresent) ||
((tile->image->colorPrimaries != firstTile->image->colorPrimaries) ||
(tile->image->transferCharacteristics != firstTile->image->transferCharacteristics) ||
(tile->image->matrixCoefficients != firstTile->image->matrixCoefficients))) {
return AVIF_FALSE;
}
}
// Lazily populate dstImage with the new frame's properties. If we're decoding alpha,
// these values must already match.
if ((dstImage->width != grid->outputWidth) || (dstImage->height != grid->outputHeight) ||
(dstImage->depth != firstTile->image->depth) || (dstImage->yuvFormat != firstTile->image->yuvFormat)) {
if (alpha) {
// Alpha doesn't match size, just bail out
return AVIF_FALSE;
}
avifImageFreePlanes(dstImage, AVIF_PLANES_ALL);
dstImage->width = grid->outputWidth;
dstImage->height = grid->outputHeight;
dstImage->depth = firstTile->image->depth;
dstImage->yuvFormat = firstTile->image->yuvFormat;
dstImage->yuvRange = firstTile->image->yuvRange;
if (!data->cicpSet) {
data->cicpSet = AVIF_TRUE;
dstImage->colorPrimaries = firstTile->image->colorPrimaries;
dstImage->transferCharacteristics = firstTile->image->transferCharacteristics;
dstImage->matrixCoefficients = firstTile->image->matrixCoefficients;
}
}
if (alpha) {
dstImage->alphaRange = firstTile->image->alphaRange;
}
avifImageAllocatePlanes(dstImage, alpha ? AVIF_PLANES_A : AVIF_PLANES_YUV);
avifPixelFormatInfo formatInfo;
avifGetPixelFormatInfo(firstTile->image->yuvFormat, &formatInfo);
unsigned int tileIndex = firstTileIndex;
size_t pixelBytes = avifImageUsesU16(dstImage) ? 2 : 1;
for (unsigned int rowIndex = 0; rowIndex < grid->rows; ++rowIndex) {
for (unsigned int colIndex = 0; colIndex < grid->columns; ++colIndex, ++tileIndex) {
avifTile * tile = &data->tiles.tile[tileIndex];
unsigned int widthToCopy = firstTile->image->width;
unsigned int maxX = firstTile->image->width * (colIndex + 1);
if (maxX > grid->outputWidth) {
widthToCopy -= maxX - grid->outputWidth;
}
unsigned int heightToCopy = firstTile->image->height;
unsigned int maxY = firstTile->image->height * (rowIndex + 1);
if (maxY > grid->outputHeight) {
heightToCopy -= maxY - grid->outputHeight;
}
// Y and A channels
size_t yaColOffset = colIndex * firstTile->image->width;
size_t yaRowOffset = rowIndex * firstTile->image->height;
size_t yaRowBytes = widthToCopy * pixelBytes;
if (alpha) {
// A
for (unsigned int j = 0; j < heightToCopy; ++j) {
uint8_t * src = &tile->image->alphaPlane[j * tile->image->alphaRowBytes];
uint8_t * dst = &dstImage->alphaPlane[(yaColOffset * pixelBytes) + ((yaRowOffset + j) * dstImage->alphaRowBytes)];
memcpy(dst, src, yaRowBytes);
}
} else {
// Y
for (unsigned int j = 0; j < heightToCopy; ++j) {
uint8_t * src = &tile->image->yuvPlanes[AVIF_CHAN_Y][j * tile->image->yuvRowBytes[AVIF_CHAN_Y]];
uint8_t * dst =
&dstImage->yuvPlanes[AVIF_CHAN_Y][(yaColOffset * pixelBytes) + ((yaRowOffset + j) * dstImage->yuvRowBytes[AVIF_CHAN_Y])];
memcpy(dst, src, yaRowBytes);
}
if (!firstTileUVPresent) {
continue;
}
// UV
heightToCopy >>= formatInfo.chromaShiftY;
size_t uvColOffset = yaColOffset >> formatInfo.chromaShiftX;
size_t uvRowOffset = yaRowOffset >> formatInfo.chromaShiftY;
size_t uvRowBytes = yaRowBytes >> formatInfo.chromaShiftX;
for (unsigned int j = 0; j < heightToCopy; ++j) {
uint8_t * srcU = &tile->image->yuvPlanes[AVIF_CHAN_U][j * tile->image->yuvRowBytes[AVIF_CHAN_U]];
uint8_t * dstU =
&dstImage->yuvPlanes[AVIF_CHAN_U][(uvColOffset * pixelBytes) + ((uvRowOffset + j) * dstImage->yuvRowBytes[AVIF_CHAN_U])];
memcpy(dstU, srcU, uvRowBytes);
uint8_t * srcV = &tile->image->yuvPlanes[AVIF_CHAN_V][j * tile->image->yuvRowBytes[AVIF_CHAN_V]];
uint8_t * dstV =
&dstImage->yuvPlanes[AVIF_CHAN_V][(uvColOffset * pixelBytes) + ((uvRowOffset + j) * dstImage->yuvRowBytes[AVIF_CHAN_V])];
memcpy(dstV, srcV, uvRowBytes);
}
}
}
}
return AVIF_TRUE;
}
|
Safe
|
[
"CWE-703",
"CWE-787"
] |
libavif
|
0a8e7244d494ae98e9756355dfbfb6697ded2ff9
|
1.646486048598438e+38
| 124 |
Set max image size to 16384 * 16384
Fix https://crbug.com/oss-fuzz/24728 and
https://crbug.com/oss-fuzz/24734.
| 0 |
proto_tree_add_eui64_format(proto_tree *tree, int hfindex, tvbuff_t *tvb,
gint start, gint length, const guint64 value,
const char *format, ...)
{
proto_item *pi;
va_list ap;
pi = proto_tree_add_eui64(tree, hfindex, tvb, start, length, value);
if (pi != tree) {
TRY_TO_FAKE_THIS_REPR(pi);
va_start(ap, format);
proto_tree_set_representation(pi, format, ap);
va_end(ap);
}
return pi;
}
|
Safe
|
[
"CWE-401"
] |
wireshark
|
a9fc769d7bb4b491efb61c699d57c9f35269d871
|
3.2237329944607446e+37
| 18 |
epan: Fix a memory leak.
Make sure _proto_tree_add_bits_ret_val allocates a bits array using the
packet scope, otherwise we leak memory. Fixes #17032.
| 0 |
void item_stats_sizes_enable(ADD_STAT add_stats, void *c) {
mutex_lock(&stats_sizes_lock);
if (!settings.use_cas) {
APPEND_STAT("sizes_status", "error", "");
APPEND_STAT("sizes_error", "cas_support_disabled", "");
} else if (stats_sizes_hist == NULL) {
item_stats_sizes_init();
if (stats_sizes_hist != NULL) {
APPEND_STAT("sizes_status", "enabled", "");
} else {
APPEND_STAT("sizes_status", "error", "");
APPEND_STAT("sizes_error", "no_memory", "");
}
} else {
APPEND_STAT("sizes_status", "enabled", "");
}
mutex_unlock(&stats_sizes_lock);
}
|
Safe
|
[
"CWE-190"
] |
memcached
|
bd578fc34b96abe0f8d99c1409814a09f51ee71c
|
2.966678452951768e+38
| 18 |
CVE reported by cisco talos
| 0 |
static int hns_nic_init_irq(struct hns_nic_priv *priv)
{
struct hnae_handle *h = priv->ae_handle;
struct hns_nic_ring_data *rd;
int i;
int ret;
int cpu;
for (i = 0; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
break;
snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
"%s-%s%d", priv->netdev->name,
(is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
ret = request_irq(rd->ring->irq,
hns_irq_handle, 0, rd->ring->ring_name, rd);
if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n",
rd->ring->irq);
return ret;
}
disable_irq(rd->ring->irq);
cpu = hns_nic_init_affinity_mask(h->q_num, i,
rd->ring, &rd->mask);
if (cpu_online(cpu))
irq_set_affinity_hint(rd->ring->irq,
&rd->mask);
rd->ring->irq_init_flag = RCB_IRQ_INITED;
}
return 0;
}
|
Safe
|
[
"CWE-416"
] |
linux
|
27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2
|
1.0965600787241361e+38
| 41 |
net: hns: Fix a skb used after free bug
skb maybe freed in hns_nic_net_xmit_hw() and return NETDEV_TX_OK,
which cause hns_nic_net_xmit to use a freed skb.
BUG: KASAN: use-after-free in hns_nic_net_xmit_hw+0x62c/0x940...
[17659.112635] alloc_debug_processing+0x18c/0x1a0
[17659.117208] __slab_alloc+0x52c/0x560
[17659.120909] kmem_cache_alloc_node+0xac/0x2c0
[17659.125309] __alloc_skb+0x6c/0x260
[17659.128837] tcp_send_ack+0x8c/0x280
[17659.132449] __tcp_ack_snd_check+0x9c/0xf0
[17659.136587] tcp_rcv_established+0x5a4/0xa70
[17659.140899] tcp_v4_do_rcv+0x27c/0x620
[17659.144687] tcp_prequeue_process+0x108/0x170
[17659.149085] tcp_recvmsg+0x940/0x1020
[17659.152787] inet_recvmsg+0x124/0x180
[17659.156488] sock_recvmsg+0x64/0x80
[17659.160012] SyS_recvfrom+0xd8/0x180
[17659.163626] __sys_trace_return+0x0/0x4
[17659.167506] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=23 cpu=1 pid=13
[17659.174000] free_debug_processing+0x1d4/0x2c0
[17659.178486] __slab_free+0x240/0x390
[17659.182100] kmem_cache_free+0x24c/0x270
[17659.186062] kfree_skbmem+0xa0/0xb0
[17659.189587] __kfree_skb+0x28/0x40
[17659.193025] napi_gro_receive+0x168/0x1c0
[17659.197074] hns_nic_rx_up_pro+0x58/0x90
[17659.201038] hns_nic_rx_poll_one+0x518/0xbc0
[17659.205352] hns_nic_common_poll+0x94/0x140
[17659.209576] net_rx_action+0x458/0x5e0
[17659.213363] __do_softirq+0x1b8/0x480
[17659.217062] run_ksoftirqd+0x64/0x80
[17659.220679] smpboot_thread_fn+0x224/0x310
[17659.224821] kthread+0x150/0x170
[17659.228084] ret_from_fork+0x10/0x40
BUG: KASAN: use-after-free in hns_nic_net_xmit+0x8c/0xc0...
[17751.080490] __slab_alloc+0x52c/0x560
[17751.084188] kmem_cache_alloc+0x244/0x280
[17751.088238] __build_skb+0x40/0x150
[17751.091764] build_skb+0x28/0x100
[17751.095115] __alloc_rx_skb+0x94/0x150
[17751.098900] __napi_alloc_skb+0x34/0x90
[17751.102776] hns_nic_rx_poll_one+0x180/0xbc0
[17751.107097] hns_nic_common_poll+0x94/0x140
[17751.111333] net_rx_action+0x458/0x5e0
[17751.115123] __do_softirq+0x1b8/0x480
[17751.118823] run_ksoftirqd+0x64/0x80
[17751.122437] smpboot_thread_fn+0x224/0x310
[17751.126575] kthread+0x150/0x170
[17751.129838] ret_from_fork+0x10/0x40
[17751.133454] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=19 cpu=7 pid=43
[17751.139951] free_debug_processing+0x1d4/0x2c0
[17751.144436] __slab_free+0x240/0x390
[17751.148051] kmem_cache_free+0x24c/0x270
[17751.152014] kfree_skbmem+0xa0/0xb0
[17751.155543] __kfree_skb+0x28/0x40
[17751.159022] napi_gro_receive+0x168/0x1c0
[17751.163074] hns_nic_rx_up_pro+0x58/0x90
[17751.167041] hns_nic_rx_poll_one+0x518/0xbc0
[17751.171358] hns_nic_common_poll+0x94/0x140
[17751.175585] net_rx_action+0x458/0x5e0
[17751.179373] __do_softirq+0x1b8/0x480
[17751.183076] run_ksoftirqd+0x64/0x80
[17751.186691] smpboot_thread_fn+0x224/0x310
[17751.190826] kthread+0x150/0x170
[17751.194093] ret_from_fork+0x10/0x40
Fixes: 13ac695e7ea1 ("net:hns: Add support of Hip06 SoC to the Hislicon Network Subsystem")
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: lipeng <lipeng321@huawei.com>
Reported-by: Jun He <hjat2005@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int ldapsrv_load_limits(struct ldapsrv_connection *conn)
{
TALLOC_CTX *tmp_ctx;
const char *attrs[] = { "configurationNamingContext", NULL };
const char *attrs2[] = { "lDAPAdminLimits", NULL };
struct ldb_message_element *el;
struct ldb_result *res = NULL;
struct ldb_dn *basedn;
struct ldb_dn *conf_dn;
struct ldb_dn *policy_dn;
unsigned int i;
int ret;
/* set defaults limits in case of failure */
conn->limits.initial_timeout = 120;
conn->limits.conn_idle_time = 900;
conn->limits.max_page_size = 1000;
conn->limits.max_notifications = 5;
conn->limits.search_timeout = 120;
conn->limits.expire_time = (struct timeval) {
.tv_sec = get_time_t_max(),
};
tmp_ctx = talloc_new(conn);
if (tmp_ctx == NULL) {
return -1;
}
basedn = ldb_dn_new(tmp_ctx, conn->ldb, NULL);
if (basedn == NULL) {
goto failed;
}
ret = ldb_search(conn->ldb, tmp_ctx, &res, basedn, LDB_SCOPE_BASE, attrs, NULL);
if (ret != LDB_SUCCESS) {
goto failed;
}
if (res->count != 1) {
goto failed;
}
conf_dn = ldb_msg_find_attr_as_dn(conn->ldb, tmp_ctx, res->msgs[0], "configurationNamingContext");
if (conf_dn == NULL) {
goto failed;
}
policy_dn = ldb_dn_copy(tmp_ctx, conf_dn);
ldb_dn_add_child_fmt(policy_dn, "CN=Default Query Policy,CN=Query-Policies,CN=Directory Service,CN=Windows NT,CN=Services");
if (policy_dn == NULL) {
goto failed;
}
ret = ldb_search(conn->ldb, tmp_ctx, &res, policy_dn, LDB_SCOPE_BASE, attrs2, NULL);
if (ret != LDB_SUCCESS) {
goto failed;
}
if (res->count != 1) {
goto failed;
}
el = ldb_msg_find_element(res->msgs[0], "lDAPAdminLimits");
if (el == NULL) {
goto failed;
}
for (i = 0; i < el->num_values; i++) {
char policy_name[256];
int policy_value, s;
s = sscanf((const char *)el->values[i].data, "%255[^=]=%d", policy_name, &policy_value);
if (s != 2 || policy_value == 0)
continue;
if (strcasecmp("InitRecvTimeout", policy_name) == 0) {
conn->limits.initial_timeout = policy_value;
continue;
}
if (strcasecmp("MaxConnIdleTime", policy_name) == 0) {
conn->limits.conn_idle_time = policy_value;
continue;
}
if (strcasecmp("MaxPageSize", policy_name) == 0) {
conn->limits.max_page_size = policy_value;
continue;
}
if (strcasecmp("MaxNotificationPerConn", policy_name) == 0) {
conn->limits.max_notifications = policy_value;
continue;
}
if (strcasecmp("MaxQueryDuration", policy_name) == 0) {
if (policy_value > 0) {
conn->limits.search_timeout = policy_value;
}
continue;
}
}
return 0;
failed:
DBG_ERR("Failed to load ldap server query policies\n");
talloc_free(tmp_ctx);
return -1;
}
|
Safe
|
[
"CWE-703"
] |
samba
|
f9b2267c6eb8138fc94df7a138ad5d87526f1d79
|
2.0233092815787754e+37
| 106 |
CVE-2021-3670 ldap_server: Ensure value of MaxQueryDuration is greater than zero
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14694
Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz>
Reviewed-by: Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
(cherry picked from commit e1ab0c43629686d1d2c0b0b2bcdc90057a792049)
| 0 |
int imap_parse_path (const char* path, IMAP_MBOX* mx)
{
static unsigned short ImapPort = 0;
static unsigned short ImapsPort = 0;
struct servent* service;
char tmp[128];
ciss_url_t url;
char *c;
int n;
if (!ImapPort)
{
service = getservbyname ("imap", "tcp");
if (service)
ImapPort = ntohs (service->s_port);
else
ImapPort = IMAP_PORT;
dprint (3, (debugfile, "Using default IMAP port %d\n", ImapPort));
}
if (!ImapsPort)
{
service = getservbyname ("imaps", "tcp");
if (service)
ImapsPort = ntohs (service->s_port);
else
ImapsPort = IMAP_SSL_PORT;
dprint (3, (debugfile, "Using default IMAPS port %d\n", ImapsPort));
}
/* Defaults */
memset(&mx->account, 0, sizeof(mx->account));
mx->account.port = ImapPort;
mx->account.type = MUTT_ACCT_TYPE_IMAP;
c = safe_strdup (path);
url_parse_ciss (&url, c);
if (url.scheme == U_IMAP || url.scheme == U_IMAPS)
{
if (mutt_account_fromurl (&mx->account, &url) < 0 || !*mx->account.host)
{
FREE (&c);
return -1;
}
mx->mbox = safe_strdup (url.path);
if (url.scheme == U_IMAPS)
mx->account.flags |= MUTT_ACCT_SSL;
FREE (&c);
}
/* old PINE-compatibility code */
else
{
FREE (&c);
if (sscanf (path, "{%127[^}]}", tmp) != 1)
return -1;
c = strchr (path, '}');
if (!c)
return -1;
else
/* walk past closing '}' */
mx->mbox = safe_strdup (c+1);
if ((c = strrchr (tmp, '@')))
{
*c = '\0';
strfcpy (mx->account.user, tmp, sizeof (mx->account.user));
strfcpy (tmp, c+1, sizeof (tmp));
mx->account.flags |= MUTT_ACCT_USER;
}
if ((n = sscanf (tmp, "%127[^:/]%127s", mx->account.host, tmp)) < 1)
{
dprint (1, (debugfile, "imap_parse_path: NULL host in %s\n", path));
FREE (&mx->mbox);
return -1;
}
if (n > 1) {
if (sscanf (tmp, ":%hu%127s", &(mx->account.port), tmp) >= 1)
mx->account.flags |= MUTT_ACCT_PORT;
if (sscanf (tmp, "/%s", tmp) == 1)
{
if (!ascii_strncmp (tmp, "ssl", 3))
mx->account.flags |= MUTT_ACCT_SSL;
else
{
dprint (1, (debugfile, "imap_parse_path: Unknown connection type in %s\n", path));
FREE (&mx->mbox);
return -1;
}
}
}
}
if ((mx->account.flags & MUTT_ACCT_SSL) && !(mx->account.flags & MUTT_ACCT_PORT))
mx->account.port = ImapsPort;
return 0;
}
|
Safe
|
[
"CWE-78"
] |
mutt
|
185152818541f5cdc059cbff3f3e8b654fc27c1d
|
1.9549314143691217e+38
| 102 |
Properly quote IMAP mailbox names when (un)subscribing.
When handling automatic subscription (via $imap_check_subscribed), or
manual subscribe/unsubscribe commands, mutt generating a "mailboxes"
command but failed to properly escape backquotes.
Thanks to Jeriko One for the detailed bug report and patch, which this
commit is based upon.
| 0 |
gx_dc_pattern2_save_dc(
const gx_device_color * pdevc,
gx_device_color_saved * psdc )
{
gs_pattern2_instance_t * pinst = (gs_pattern2_instance_t *)pdevc->ccolor.pattern;
psdc->type = pdevc->type;
psdc->colors.pattern2.id = pinst->pattern_id;
psdc->colors.pattern2.shfill = pinst->shfill;
}
|
Safe
|
[
"CWE-704"
] |
ghostpdl
|
693baf02152119af6e6afd30bb8ec76d14f84bbf
|
5.128776390415134e+37
| 10 |
PS interpreter - check the Implementation of a Pattern before use
Bug #700141 "Type confusion in setpattern"
As the bug thread says, we were not checking that the Implementation
of a pattern dictionary was a structure type, leading to a crash when
we tried to treat it as one.
Here we make the st_pattern1_instance and st_pattern2_instance
structures public definitions and in zsetcolor we check the object
stored under the Implementation key in the supplied dictionary to see if
its a t_struct or t_astruct type, and if it is that its a
st_pattern1_instance or st_pattern2_instance structure.
If either check fails we throw a typecheck error.
We need to make the st_pattern1_instance and st_pattern2_instance
definitions public as they are defined in the graphics library and we
need to check in the interpreter.
| 0 |
void fx_TypedArray_prototype_join(txMachine* the)
{
mxTypedArrayDeclarations;
txInteger delta = dispatch->value.typedArray.dispatch->size;
txInteger offset = view->value.dataView.offset;
txInteger limit = offset + (length << dispatch->value.typedArray.dispatch->shift);
txString string;
txSlot* list = fxNewInstance(the);
txSlot* slot = list;
txBoolean comma = 0;
txInteger size = 0;
if ((mxArgc > 0) && (mxArgv(0)->kind != XS_UNDEFINED_KIND)) {
mxPushSlot(mxArgv(0));
string = fxToString(the, the->stack);
the->stack->kind += XS_KEY_KIND - XS_STRING_KIND;
the->stack->value.key.sum = mxStringLength(the->stack->value.string);
}
else {
mxPushStringX(",");
the->stack->kind += XS_KEY_KIND - XS_STRING_KIND;
the->stack->value.key.sum = 1;
}
length = offset + fxGetDataViewSize(the, view, buffer);
while (offset < limit) {
if (comma) {
slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG);
size = fxAddChunkSizes(the, size, slot->value.key.sum);
}
else
comma = 1;
if (offset < length) {
mxPushUndefined();
(*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, offset, the->stack, EndianNative);
slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG);
string = fxToString(the, slot);
slot->kind += XS_KEY_KIND - XS_STRING_KIND;
slot->value.key.sum = mxStringLength(string);
size = fxAddChunkSizes(the, size, slot->value.key.sum);
mxPop();
}
offset += delta;
}
mxPop();
string = mxResult->value.string = fxNewChunk(the, fxAddChunkSizes(the, size, 1));
slot = list->next;
while (slot) {
c_memcpy(string, slot->value.key.string, slot->value.key.sum);
string += slot->value.key.sum;
slot = slot->next;
}
*string = 0;
mxResult->kind = XS_STRING_KIND;
mxPop();
}
|
Safe
|
[
"CWE-125"
] |
moddable
|
135aa9a4a6a9b49b60aa730ebc3bcc6247d75c45
|
2.3955173946498607e+38
| 54 |
XS: #896
| 0 |
static int dn_fib_check_nh(const struct rtmsg *r, struct dn_fib_info *fi, struct dn_fib_nh *nh)
{
int err;
if (nh->nh_gw) {
struct flowidn fld;
struct dn_fib_res res;
if (nh->nh_flags&RTNH_F_ONLINK) {
struct net_device *dev;
if (r->rtm_scope >= RT_SCOPE_LINK)
return -EINVAL;
if (dnet_addr_type(nh->nh_gw) != RTN_UNICAST)
return -EINVAL;
if ((dev = __dev_get_by_index(&init_net, nh->nh_oif)) == NULL)
return -ENODEV;
if (!(dev->flags&IFF_UP))
return -ENETDOWN;
nh->nh_dev = dev;
dev_hold(dev);
nh->nh_scope = RT_SCOPE_LINK;
return 0;
}
memset(&fld, 0, sizeof(fld));
fld.daddr = nh->nh_gw;
fld.flowidn_oif = nh->nh_oif;
fld.flowidn_scope = r->rtm_scope + 1;
if (fld.flowidn_scope < RT_SCOPE_LINK)
fld.flowidn_scope = RT_SCOPE_LINK;
if ((err = dn_fib_lookup(&fld, &res)) != 0)
return err;
err = -EINVAL;
if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
goto out;
nh->nh_scope = res.scope;
nh->nh_oif = DN_FIB_RES_OIF(res);
nh->nh_dev = DN_FIB_RES_DEV(res);
if (nh->nh_dev == NULL)
goto out;
dev_hold(nh->nh_dev);
err = -ENETDOWN;
if (!(nh->nh_dev->flags & IFF_UP))
goto out;
err = 0;
out:
dn_fib_res_put(&res);
return err;
} else {
struct net_device *dev;
if (nh->nh_flags&(RTNH_F_PERVASIVE|RTNH_F_ONLINK))
return -EINVAL;
dev = __dev_get_by_index(&init_net, nh->nh_oif);
if (dev == NULL || dev->dn_ptr == NULL)
return -ENODEV;
if (!(dev->flags&IFF_UP))
return -ENETDOWN;
nh->nh_dev = dev;
dev_hold(nh->nh_dev);
nh->nh_scope = RT_SCOPE_HOST;
}
return 0;
}
|
Safe
|
[
"CWE-264"
] |
net
|
90f62cf30a78721641e08737bda787552428061e
|
2.7425186512013534e+38
| 70 |
net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int cgroup_apply_control_enable(struct cgroup *cgrp)
{
struct cgroup *dsct;
struct cgroup_subsys_state *d_css;
struct cgroup_subsys *ss;
int ssid, ret;
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
for_each_subsys(ss, ssid) {
struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
if (!(cgroup_ss_mask(dsct) & (1 << ss->id)))
continue;
if (!css) {
css = css_create(dsct, ss);
if (IS_ERR(css))
return PTR_ERR(css);
}
WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt));
if (css_visible(css)) {
ret = css_populate_dir(css);
if (ret)
return ret;
}
}
}
return 0;
}
|
Safe
|
[
"CWE-416"
] |
linux
|
a06247c6804f1a7c86a2e5398a4c1f1db1471848
|
1.5858711936654075e+38
| 32 |
psi: Fix uaf issue when psi trigger is destroyed while being polled
With write operation on psi files replacing old trigger with a new one,
the lifetime of its waitqueue is totally arbitrary. Overwriting an
existing trigger causes its waitqueue to be freed and pending poll()
will stumble on trigger->event_wait which was destroyed.
Fix this by disallowing to redefine an existing psi trigger. If a write
operation is used on a file descriptor with an already existing psi
trigger, the operation will fail with EBUSY error.
Also bypass a check for psi_disabled in the psi_trigger_destroy as the
flag can be flipped after the trigger is created, leading to a memory
leak.
Fixes: 0e94682b73bf ("psi: introduce psi monitor")
Reported-by: syzbot+cdb5dd11c97cc532efad@syzkaller.appspotmail.com
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Analyzed-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20220111232309.1786347-1-surenb@google.com
| 0 |
get_old_root(struct btrfs_root *root, u64 time_seq)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct tree_mod_elem *tm;
struct extent_buffer *eb = NULL;
struct extent_buffer *eb_root;
u64 eb_root_owner = 0;
struct extent_buffer *old;
struct tree_mod_root *old_root = NULL;
u64 old_generation = 0;
u64 logical;
int level;
eb_root = btrfs_read_lock_root_node(root);
tm = __tree_mod_log_oldest_root(eb_root, time_seq);
if (!tm)
return eb_root;
if (tm->op == MOD_LOG_ROOT_REPLACE) {
old_root = &tm->old_root;
old_generation = tm->generation;
logical = old_root->logical;
level = old_root->level;
} else {
logical = eb_root->start;
level = btrfs_header_level(eb_root);
}
tm = tree_mod_log_search(fs_info, logical, time_seq);
if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
old = read_tree_block(fs_info, logical, root->root_key.objectid,
0, level, NULL);
if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
if (!IS_ERR(old))
free_extent_buffer(old);
btrfs_warn(fs_info,
"failed to read tree block %llu from get_old_root",
logical);
} else {
eb = btrfs_clone_extent_buffer(old);
free_extent_buffer(old);
}
} else if (old_root) {
eb_root_owner = btrfs_header_owner(eb_root);
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
eb = alloc_dummy_extent_buffer(fs_info, logical);
} else {
eb = btrfs_clone_extent_buffer(eb_root);
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
}
if (!eb)
return NULL;
if (old_root) {
btrfs_set_header_bytenr(eb, eb->start);
btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(eb, eb_root_owner);
btrfs_set_header_level(eb, old_root->level);
btrfs_set_header_generation(eb, old_generation);
}
btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
btrfs_header_level(eb));
btrfs_tree_read_lock(eb);
if (tm)
__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
else
WARN_ON(btrfs_header_level(eb) != 0);
WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
return eb;
}
|
Vulnerable
|
[
"CWE-362"
] |
linux
|
dbcc7d57bffc0c8cac9dac11bec548597d59a6a5
|
2.2433650562490753e+38
| 75 |
btrfs: fix race when cloning extent buffer during rewind of an old root
While resolving backreferences, as part of a logical ino ioctl call or
fiemap, we can end up hitting a BUG_ON() when replaying tree mod log
operations of a root, triggering a stack trace like the following:
------------[ cut here ]------------
kernel BUG at fs/btrfs/ctree.c:1210!
invalid opcode: 0000 [#1] SMP KASAN PTI
CPU: 1 PID: 19054 Comm: crawl_335 Tainted: G W 5.11.0-2d11c0084b02-misc-next+ #89
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
RIP: 0010:__tree_mod_log_rewind+0x3b1/0x3c0
Code: 05 48 8d 74 10 (...)
RSP: 0018:ffffc90001eb70b8 EFLAGS: 00010297
RAX: 0000000000000000 RBX: ffff88812344e400 RCX: ffffffffb28933b6
RDX: 0000000000000007 RSI: dffffc0000000000 RDI: ffff88812344e42c
RBP: ffffc90001eb7108 R08: 1ffff11020b60a20 R09: ffffed1020b60a20
R10: ffff888105b050f9 R11: ffffed1020b60a1f R12: 00000000000000ee
R13: ffff8880195520c0 R14: ffff8881bc958500 R15: ffff88812344e42c
FS: 00007fd1955e8700(0000) GS:ffff8881f5600000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007efdb7928718 CR3: 000000010103a006 CR4: 0000000000170ee0
Call Trace:
btrfs_search_old_slot+0x265/0x10d0
? lock_acquired+0xbb/0x600
? btrfs_search_slot+0x1090/0x1090
? free_extent_buffer.part.61+0xd7/0x140
? free_extent_buffer+0x13/0x20
resolve_indirect_refs+0x3e9/0xfc0
? lock_downgrade+0x3d0/0x3d0
? __kasan_check_read+0x11/0x20
? add_prelim_ref.part.11+0x150/0x150
? lock_downgrade+0x3d0/0x3d0
? __kasan_check_read+0x11/0x20
? lock_acquired+0xbb/0x600
? __kasan_check_write+0x14/0x20
? do_raw_spin_unlock+0xa8/0x140
? rb_insert_color+0x30/0x360
? prelim_ref_insert+0x12d/0x430
find_parent_nodes+0x5c3/0x1830
? resolve_indirect_refs+0xfc0/0xfc0
? lock_release+0xc8/0x620
? fs_reclaim_acquire+0x67/0xf0
? lock_acquire+0xc7/0x510
? lock_downgrade+0x3d0/0x3d0
? lockdep_hardirqs_on_prepare+0x160/0x210
? lock_release+0xc8/0x620
? fs_reclaim_acquire+0x67/0xf0
? lock_acquire+0xc7/0x510
? poison_range+0x38/0x40
? unpoison_range+0x14/0x40
? trace_hardirqs_on+0x55/0x120
btrfs_find_all_roots_safe+0x142/0x1e0
? find_parent_nodes+0x1830/0x1830
? btrfs_inode_flags_to_xflags+0x50/0x50
iterate_extent_inodes+0x20e/0x580
? tree_backref_for_extent+0x230/0x230
? lock_downgrade+0x3d0/0x3d0
? read_extent_buffer+0xdd/0x110
? lock_downgrade+0x3d0/0x3d0
? __kasan_check_read+0x11/0x20
? lock_acquired+0xbb/0x600
? __kasan_check_write+0x14/0x20
? _raw_spin_unlock+0x22/0x30
? __kasan_check_write+0x14/0x20
iterate_inodes_from_logical+0x129/0x170
? iterate_inodes_from_logical+0x129/0x170
? btrfs_inode_flags_to_xflags+0x50/0x50
? iterate_extent_inodes+0x580/0x580
? __vmalloc_node+0x92/0xb0
? init_data_container+0x34/0xb0
? init_data_container+0x34/0xb0
? kvmalloc_node+0x60/0x80
btrfs_ioctl_logical_to_ino+0x158/0x230
btrfs_ioctl+0x205e/0x4040
? __might_sleep+0x71/0xe0
? btrfs_ioctl_get_supported_features+0x30/0x30
? getrusage+0x4b6/0x9c0
? __kasan_check_read+0x11/0x20
? lock_release+0xc8/0x620
? __might_fault+0x64/0xd0
? lock_acquire+0xc7/0x510
? lock_downgrade+0x3d0/0x3d0
? lockdep_hardirqs_on_prepare+0x210/0x210
? lockdep_hardirqs_on_prepare+0x210/0x210
? __kasan_check_read+0x11/0x20
? do_vfs_ioctl+0xfc/0x9d0
? ioctl_file_clone+0xe0/0xe0
? lock_downgrade+0x3d0/0x3d0
? lockdep_hardirqs_on_prepare+0x210/0x210
? __kasan_check_read+0x11/0x20
? lock_release+0xc8/0x620
? __task_pid_nr_ns+0xd3/0x250
? lock_acquire+0xc7/0x510
? __fget_files+0x160/0x230
? __fget_light+0xf2/0x110
__x64_sys_ioctl+0xc3/0x100
do_syscall_64+0x37/0x80
entry_SYSCALL_64_after_hwframe+0x44/0xa9
RIP: 0033:0x7fd1976e2427
Code: 00 00 90 48 8b 05 (...)
RSP: 002b:00007fd1955e5cf8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
RAX: ffffffffffffffda RBX: 00007fd1955e5f40 RCX: 00007fd1976e2427
RDX: 00007fd1955e5f48 RSI: 00000000c038943b RDI: 0000000000000004
RBP: 0000000001000000 R08: 0000000000000000 R09: 00007fd1955e6120
R10: 0000557835366b00 R11: 0000000000000246 R12: 0000000000000004
R13: 00007fd1955e5f48 R14: 00007fd1955e5f40 R15: 00007fd1955e5ef8
Modules linked in:
---[ end trace ec8931a1c36e57be ]---
(gdb) l *(__tree_mod_log_rewind+0x3b1)
0xffffffff81893521 is in __tree_mod_log_rewind (fs/btrfs/ctree.c:1210).
1205 * the modification. as we're going backwards, we do the
1206 * opposite of each operation here.
1207 */
1208 switch (tm->op) {
1209 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1210 BUG_ON(tm->slot < n);
1211 fallthrough;
1212 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1213 case MOD_LOG_KEY_REMOVE:
1214 btrfs_set_node_key(eb, &tm->key, tm->slot);
Here's what happens to hit that BUG_ON():
1) We have one tree mod log user (through fiemap or the logical ino ioctl),
with a sequence number of 1, so we have fs_info->tree_mod_seq == 1;
2) Another task is at ctree.c:balance_level() and we have eb X currently as
the root of the tree, and we promote its single child, eb Y, as the new
root.
Then, at ctree.c:balance_level(), we call:
tree_mod_log_insert_root(eb X, eb Y, 1);
3) At tree_mod_log_insert_root() we create tree mod log elements for each
slot of eb X, of operation type MOD_LOG_KEY_REMOVE_WHILE_FREEING each
with a ->logical pointing to ebX->start. These are placed in an array
named tm_list.
Lets assume there are N elements (N pointers in eb X);
4) Then, still at tree_mod_log_insert_root(), we create a tree mod log
element of operation type MOD_LOG_ROOT_REPLACE, ->logical set to
ebY->start, ->old_root.logical set to ebX->start, ->old_root.level set
to the level of eb X and ->generation set to the generation of eb X;
5) Then tree_mod_log_insert_root() calls tree_mod_log_free_eb() with
tm_list as argument. After that, tree_mod_log_free_eb() calls
__tree_mod_log_insert() for each member of tm_list in reverse order,
from highest slot in eb X, slot N - 1, to slot 0 of eb X;
6) __tree_mod_log_insert() sets the sequence number of each given tree mod
log operation - it increments fs_info->tree_mod_seq and sets
fs_info->tree_mod_seq as the sequence number of the given tree mod log
operation.
This means that for the tm_list created at tree_mod_log_insert_root(),
the element corresponding to slot 0 of eb X has the highest sequence
number (1 + N), and the element corresponding to the last slot has the
lowest sequence number (2);
7) Then, after inserting tm_list's elements into the tree mod log rbtree,
the MOD_LOG_ROOT_REPLACE element is inserted, which gets the highest
sequence number, which is N + 2;
8) Back to ctree.c:balance_level(), we free eb X by calling
btrfs_free_tree_block() on it. Because eb X was created in the current
transaction, has no other references and writeback did not happen for
it, we add it back to the free space cache/tree;
9) Later some other task T allocates the metadata extent from eb X, since
it is marked as free space in the space cache/tree, and uses it as a
node for some other btree;
10) The tree mod log user task calls btrfs_search_old_slot(), which calls
get_old_root(), and finally that calls __tree_mod_log_oldest_root()
with time_seq == 1 and eb_root == eb Y;
11) First iteration of the while loop finds the tree mod log element with
sequence number N + 2, for the logical address of eb Y and of type
MOD_LOG_ROOT_REPLACE;
12) Because the operation type is MOD_LOG_ROOT_REPLACE, we don't break out
of the loop, and set root_logical to point to tm->old_root.logical
which corresponds to the logical address of eb X;
13) On the next iteration of the while loop, the call to
tree_mod_log_search_oldest() returns the smallest tree mod log element
for the logical address of eb X, which has a sequence number of 2, an
operation type of MOD_LOG_KEY_REMOVE_WHILE_FREEING and corresponds to
the old slot N - 1 of eb X (eb X had N items in it before being freed);
14) We then break out of the while loop and return the tree mod log operation
of type MOD_LOG_ROOT_REPLACE (eb Y), and not the one for slot N - 1 of
eb X, to get_old_root();
15) At get_old_root(), we process the MOD_LOG_ROOT_REPLACE operation
and set "logical" to the logical address of eb X, which was the old
root. We then call tree_mod_log_search() passing it the logical
address of eb X and time_seq == 1;
16) Then before calling tree_mod_log_search(), task T adds a key to eb X,
which results in adding a tree mod log operation of type
MOD_LOG_KEY_ADD to the tree mod log - this is done at
ctree.c:insert_ptr() - but after adding the tree mod log operation
and before updating the number of items in eb X from 0 to 1...
17) The task at get_old_root() calls tree_mod_log_search() and gets the
tree mod log operation of type MOD_LOG_KEY_ADD just added by task T.
Then it enters the following if branch:
if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
(...)
} (...)
Calls read_tree_block() for eb X, which gets a reference on eb X but
does not lock it - task T has it locked.
Then it clones eb X while it has nritems set to 0 in its header, before
task T sets nritems to 1 in eb X's header. From hereupon we use the
clone of eb X which no other task has access to;
18) Then we call __tree_mod_log_rewind(), passing it the MOD_LOG_KEY_ADD
mod log operation we just got from tree_mod_log_search() in the
previous step and the cloned version of eb X;
19) At __tree_mod_log_rewind(), we set the local variable "n" to the number
of items set in eb X's clone, which is 0. Then we enter the while loop,
and in its first iteration we process the MOD_LOG_KEY_ADD operation,
which just decrements "n" from 0 to (u32)-1, since "n" is declared with
a type of u32. At the end of this iteration we call rb_next() to find the
next tree mod log operation for eb X, that gives us the mod log operation
of type MOD_LOG_KEY_REMOVE_WHILE_FREEING, for slot 0, with a sequence
number of N + 1 (steps 3 to 6);
20) Then we go back to the top of the while loop and trigger the following
BUG_ON():
(...)
switch (tm->op) {
case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
BUG_ON(tm->slot < n);
fallthrough;
(...)
Because "n" has a value of (u32)-1 (4294967295) and tm->slot is 0.
Fix this by taking a read lock on the extent buffer before cloning it at
ctree.c:get_old_root(). This should be done regardless of the extent
buffer having been freed and reused, as a concurrent task might be
modifying it (while holding a write lock on it).
Reported-by: Zygo Blaxell <ce3g8jdj@umail.furryterror.org>
Link: https://lore.kernel.org/linux-btrfs/20210227155037.GN28049@hungrycats.org/
Fixes: 834328a8493079 ("Btrfs: tree mod log's old roots could still be part of the tree")
CC: stable@vger.kernel.org # 4.4+
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
| 1 |
static int processTLSBlock(struct ndpi_detection_module_struct *ndpi_struct,
struct ndpi_flow_struct *flow) {
struct ndpi_packet_struct *packet = &flow->packet;
int ret;
switch(packet->payload[0] /* block type */) {
case 0x01: /* Client Hello */
case 0x02: /* Server Hello */
processClientServerHello(ndpi_struct, flow, 0);
flow->l4.tcp.tls.hello_processed = 1;
ndpi_int_tls_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_TLS);
#ifdef DEBUG_TLS
printf("*** TLS [version: %02X][%s Hello]\n",
flow->protos.tls_quic_stun.tls_quic.ssl_version,
(packet->payload[0] == 0x01) ? "Client" : "Server");
#endif
if((flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0304 /* TLS 1.3 */)
&& (packet->payload[0] == 0x02 /* Server Hello */)) {
flow->l4.tcp.tls.certificate_processed = 1; /* No Certificate with TLS 1.3+ */
}
checkTLSSubprotocol(ndpi_struct, flow);
break;
case 0x0b: /* Certificate */
/* Important: populate the tls union fields only after
* ndpi_int_tls_add_connection has been called */
if(flow->l4.tcp.tls.hello_processed) {
ret = processCertificate(ndpi_struct, flow);
if (ret != 1) {
#ifdef DEBUG_TLS
printf("[TLS] Error processing certificate: %d\n", ret);
#endif
}
flow->l4.tcp.tls.certificate_processed = 1;
}
break;
default:
return(-1);
}
return(0);
}
|
Safe
|
[
"CWE-787"
] |
nDPI
|
1ec621c85b9411cc611652fd57a892cfef478af3
|
1.7068243389890486e+38
| 46 |
Added further checks
| 0 |
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(kInputTensorIndex);
const Tensor& input_min = ctx->input(kInputMinIndex);
const Tensor& input_max = ctx->input(kInputMaxIndex);
const size_t depth = input_max.NumElements();
OP_REQUIRES(
ctx, input_min.dim_size(0) == depth,
errors::InvalidArgument("input_min has incorrect size, expected ",
depth, " was ", input_min.dim_size(0)));
OP_REQUIRES(
ctx, input_max.dim_size(0) == depth,
errors::InvalidArgument("input_max has incorrect size, expected ",
depth, " was ", input_max.dim_size(0)));
OP_REQUIRES(
ctx, input_min.NumElements() == depth,
errors::InvalidArgument("input_min must have the same number of "
"elements as input_max, got ",
input_min.NumElements(), " and ", depth));
OP_REQUIRES(ctx, input.NumElements() > 0,
errors::InvalidArgument("input must not be empty"));
OP_REQUIRES(ctx, input.dims() == 4,
errors::InvalidArgument("input must be in NHWC format"));
OP_REQUIRES(
ctx, input.dim_size(3) == depth,
errors::InvalidArgument(
"input must have same number of channels as length of input_min: ",
input.dim_size(3), " vs ", depth));
const float* input_min_data = input_min.flat<float>().data();
const float* input_max_data = input_max.flat<float>().data();
std::vector<float> ranges(depth);
bool is_non_negative = true;
Eigen::array<int, 2> shuffling({1, 0});
auto input_matrix = input.flat_inner_dims<qint32>();
// TODO: verify performance of not transposing and finding the min max
// directly from input_matrix vs the one presented below of transposing and
// using the transposed matrix as the transposing operation in itself might
// be more costly.
// Note that this operation is a calibration step for quantization and will
// cease to exist in the final inference graph(will exist as a const node).
auto transposed_input = input_matrix.shuffle(shuffling);
// Find the ranges of each channel in parallel.
float out_min_max = std::numeric_limits<float>::min();
#ifdef ENABLE_ONEDNN_OPENMP
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for reduction(max : out_min_max)
#endif
#endif // ENABLE_ONEDNN_OPENMP
// TODO: Add eigen parallel_for
for (int64_t i = 0; i < depth; ++i) {
Eigen::Tensor<qint32, 0, Eigen::RowMajor> min =
transposed_input.chip<0>(i).minimum();
Eigen::Tensor<qint32, 0, Eigen::RowMajor> max =
transposed_input.chip<0>(i).maximum();
const int32_t min_per_channel = min();
const int32_t max_per_channel = max();
const int32_t abs_max =
std::max(std::abs(min_per_channel), std::abs(max_per_channel));
float scale =
std::max(std::abs(input_min_data[i]), std::abs(input_max_data[i]));
ranges[i] =
scale * static_cast<float>(abs_max) / static_cast<float>(1L << 31);
if (min_per_channel < 0) is_non_negative = false;
// Thread-local out_min_max.
out_min_max = std::max(out_min_max, ranges[i]);
}
// All local out_min_max gets max-reduced into one global out_min_max at
// the end of the loop by specifying reduction(max:out_min_max) along with
// omp parallel for.
// Fixing max to clip_value_max_ (example 6.0 to support relu6)
if (out_min_max > clip_value_max_) out_min_max = clip_value_max_;
Tensor* output_min = nullptr;
Tensor* output_max = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMinIndex, {}, &output_min));
OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMaxIndex, {}, &output_max));
output_min->flat<float>()(0) = is_non_negative ? 0.0f : -out_min_max;
output_max->flat<float>()(0) = out_min_max;
}
|
Safe
|
[
"CWE-20",
"CWE-703"
] |
tensorflow
|
9e62869465573cb2d9b5053f1fa02a81fce21d69
|
2.4622016194110145e+38
| 88 |
Add more validation to `RequantizationRangePerChannel`.
PiperOrigin-RevId: 387693946
Change-Id: Ife8dcbdb021bec4787eef6a4361dd08f17c14bd6
| 0 |
static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
u32 *mask)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return;
if ((algt->type & CRYPTO_ALG_INTERNAL))
*type |= CRYPTO_ALG_INTERNAL;
if ((algt->mask & CRYPTO_ALG_INTERNAL))
*mask |= CRYPTO_ALG_INTERNAL;
}
|
Vulnerable
|
[
"CWE-476",
"CWE-284"
] |
linux
|
48a992727d82cb7db076fa15d372178743b1f4cd
|
2.4294473171144895e+38
| 13 |
crypto: mcryptd - Check mcryptd algorithm compatibility
Algorithms not compatible with mcryptd could be spawned by mcryptd
with a direct crypto_alloc_tfm invocation using a "mcryptd(alg)" name
construct. This causes mcryptd to crash the kernel if an arbitrary
"alg" is incompatible and not intended to be used with mcryptd. It is
an issue if AF_ALG tries to spawn mcryptd(alg) to expose it externally.
But such algorithms must be used internally and not be exposed.
We added a check to enforce that only internal algorithms are allowed
with mcryptd at the time mcryptd is spawning an algorithm.
Link: http://marc.info/?l=linux-crypto-vger&m=148063683310477&w=2
Cc: stable@vger.kernel.org
Reported-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
| 1 |
static Status ParseEquation(const string& equation,
OperandLabels* input_labels,
Labels* output_labels,
std::vector<DimensionType>* label_types,
OperandLabelCounts* input_label_counts,
LabelCounts* output_label_counts,
gtl::InlinedVector<bool, 2>* input_has_ellipsis,
bool* output_has_ellipsis) {
gtl::InlinedVector<string, 2> input_str;
string output_str;
TF_RETURN_IF_ERROR(ParseEinsumEquation(equation, &input_str, &output_str));
// Temporary map from single character labels to (consecutive) integer
// labels.
absl::flat_hash_map<char, int> label_mapping;
int num_inputs = input_str.size();
input_labels->resize(num_inputs);
// Map from single characters to integer labels.
for (int i = 0; i < num_inputs; ++i) {
MapToLabels(input_str[i], &input_labels->at(i), &label_mapping);
}
MapToLabels(output_str, output_labels, &label_mapping);
// Compute counts for input and output labels.
int num_labels = label_mapping.size();
input_label_counts->resize(num_inputs);
input_has_ellipsis->resize(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
input_label_counts->at(i).resize(num_labels);
for (const int label : input_labels->at(i)) {
if (label != kEllipsisLabel)
input_label_counts->at(i)[label] += 1;
else
input_has_ellipsis->at(i) = true;
}
}
output_label_counts->resize(num_labels);
for (const int label : *output_labels) {
if (label != kEllipsisLabel)
output_label_counts->at(label) += 1;
else
*output_has_ellipsis = true;
}
// Map each label to a unique DimensionType.
label_types->resize(num_labels);
for (int label = 0; label < num_labels; ++label) {
if (label == kEllipsisLabel) continue;
bool removed = (*output_label_counts)[label] == 0;
bool unique = num_inputs == 1 || (*input_label_counts)[0][label] == 0 ||
(*input_label_counts)[1][label] == 0;
(*label_types)[label] = GetDimensionType(removed, unique);
}
return Status::OK();
}
|
Vulnerable
|
[
"CWE-703",
"CWE-824"
] |
tensorflow
|
f09caa532b6e1ac8d2aa61b7832c78c5b79300c6
|
8.961966230157414e+37
| 56 |
Fix EinsumHelper::ParseEquation to avoid uninitialized accesses.
EinsumHelper::ParseEquation is supposed to return true or false in
input_has_ellipsis and output_has_ellipsis to indicate whether there is
ellipsis in the inputs and output. Previously, when there is no ellipsis in the
inputs or output, the routine doesn't assign false to the variables. This
change initializes the two variables with false to fix the problem.
PiperOrigin-RevId: 391772004
Change-Id: I17b6c88aadef4131470378e48cced054bf252e86
| 1 |
void flush_tlb_current_task(void)
{
struct mm_struct *mm = current->mm;
preempt_disable();
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable();
}
|
Vulnerable
|
[
"CWE-362"
] |
linux
|
71b3c126e61177eb693423f2e18a1914205b165e
|
3.3849667274721777e+38
| 13 |
x86/mm: Add barriers and document switch_mm()-vs-flush synchronization
When switch_mm() activates a new PGD, it also sets a bit that
tells other CPUs that the PGD is in use so that TLB flush IPIs
will be sent. In order for that to work correctly, the bit
needs to be visible prior to loading the PGD and therefore
starting to fill the local TLB.
Document all the barriers that make this work correctly and add
a couple that were missing.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Cc: stable@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| 1 |
int wc_SetAltNamesBuffer(Cert* cert, const byte* der, int derSz)
{
int ret = 0;
if (cert == NULL) {
ret = BAD_FUNC_ARG;
}
else {
/* Check if decodedCert is cached */
if (cert->der != der) {
/* Allocate cache for the decoded cert */
ret = wc_SetCert_LoadDer(cert, der, derSz);
}
if (ret >= 0) {
ret = SetAltNamesFromDcert(cert, (DecodedCert*)cert->decodedCert);
#ifndef WOLFSSL_CERT_GEN_CACHE
wc_SetCert_Free(cert);
#endif
}
}
return(ret);
}
|
Safe
|
[
"CWE-125",
"CWE-345"
] |
wolfssl
|
f93083be72a3b3d956b52a7ec13f307a27b6e093
|
1.6797826122673281e+38
| 24 |
OCSP: improve handling of OCSP no check extension
| 0 |
static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
enum bpf_reg_type which)
{
/* The register can already have a range from prior markings.
* This is fine as long as it hasn't been advanced from its
* origin.
*/
return reg->type == which &&
reg->id == 0 &&
reg->off == 0 &&
tnum_equals_const(reg->var_off, 0);
}
|
Safe
|
[
"CWE-20"
] |
linux
|
c131187db2d3fa2f8bf32fdf4e9a4ef805168467
|
1.836604299269813e+38
| 12 |
bpf: fix branch pruning logic
when the verifier detects that register contains a runtime constant
and it's compared with another constant it will prune exploration
of the branch that is guaranteed not to be taken at runtime.
This is all correct, but malicious program may be constructed
in such a way that it always has a constant comparison and
the other branch is never taken under any conditions.
In this case such path through the program will not be explored
by the verifier. It won't be taken at run-time either, but since
all instructions are JITed the malicious program may cause JITs
to complain about using reserved fields, etc.
To fix the issue we have to track the instructions explored by
the verifier and sanitize instructions that are dead at run time
with NOPs. We cannot reject such dead code, since llvm generates
it for valid C code, since it doesn't do as much data flow
analysis as the verifier does.
Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
| 0 |
jpeg_crop_scanline(j_decompress_ptr cinfo, JDIMENSION *xoffset,
JDIMENSION *width)
{
int ci, align, orig_downsampled_width;
JDIMENSION input_xoffset;
boolean reinit_upsampler = FALSE;
jpeg_component_info *compptr;
if (cinfo->global_state != DSTATE_SCANNING || cinfo->output_scanline != 0)
ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state);
if (!xoffset || !width)
ERREXIT(cinfo, JERR_BAD_CROP_SPEC);
/* xoffset and width must fall within the output image dimensions. */
if (*width == 0 || *xoffset + *width > cinfo->output_width)
ERREXIT(cinfo, JERR_WIDTH_OVERFLOW);
/* No need to do anything if the caller wants the entire width. */
if (*width == cinfo->output_width)
return;
/* Ensuring the proper alignment of xoffset is tricky. At minimum, it
* must align with an MCU boundary, because:
*
* (1) The IDCT is performed in blocks, and it is not feasible to modify
* the algorithm so that it can transform partial blocks.
* (2) Because of the SIMD extensions, any input buffer passed to the
* upsampling and color conversion routines must be aligned to the
* SIMD word size (for instance, 128-bit in the case of SSE2.) The
* easiest way to accomplish this without copying data is to ensure
* that upsampling and color conversion begin at the start of the
* first MCU column that will be inverse transformed.
*
* In practice, we actually impose a stricter alignment requirement. We
* require that xoffset be a multiple of the maximum MCU column width of all
* of the components (the "iMCU column width.") This is to simplify the
* single-pass decompression case, allowing us to use the same MCU column
* width for all of the components.
*/
if (cinfo->comps_in_scan == 1 && cinfo->num_components == 1)
align = cinfo->_min_DCT_scaled_size;
else
align = cinfo->_min_DCT_scaled_size * cinfo->max_h_samp_factor;
/* Adjust xoffset to the nearest iMCU boundary <= the requested value */
input_xoffset = *xoffset;
*xoffset = (input_xoffset / align) * align;
/* Adjust the width so that the right edge of the output image is as
* requested (only the left edge is altered.) It is important that calling
* programs check this value after this function returns, so that they can
* allocate an output buffer with the appropriate size.
*/
*width = *width + input_xoffset - *xoffset;
cinfo->output_width = *width;
/* Set the first and last iMCU columns that we must decompress. These values
* will be used in single-scan decompressions.
*/
cinfo->master->first_iMCU_col = (JDIMENSION)(long)(*xoffset) / (long)align;
cinfo->master->last_iMCU_col =
(JDIMENSION)jdiv_round_up((long)(*xoffset + cinfo->output_width),
(long)align) - 1;
for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components;
ci++, compptr++) {
int hsf = (cinfo->comps_in_scan == 1 && cinfo->num_components == 1) ?
1 : compptr->h_samp_factor;
/* Set downsampled_width to the new output width. */
orig_downsampled_width = compptr->downsampled_width;
compptr->downsampled_width =
(JDIMENSION)jdiv_round_up((long)(cinfo->output_width *
compptr->h_samp_factor),
(long)cinfo->max_h_samp_factor);
if (compptr->downsampled_width < 2 && orig_downsampled_width >= 2)
reinit_upsampler = TRUE;
/* Set the first and last iMCU columns that we must decompress. These
* values will be used in multi-scan decompressions.
*/
cinfo->master->first_MCU_col[ci] =
(JDIMENSION)(long)(*xoffset * hsf) / (long)align;
cinfo->master->last_MCU_col[ci] =
(JDIMENSION)jdiv_round_up((long)((*xoffset + cinfo->output_width) * hsf),
(long)align) - 1;
}
if (reinit_upsampler) {
cinfo->master->jinit_upsampler_no_alloc = TRUE;
jinit_upsampler(cinfo);
cinfo->master->jinit_upsampler_no_alloc = FALSE;
}
}
|
Safe
|
[] |
libjpeg-turbo
|
6d2e8837b440ce4d8befd805a5abc0d351028d70
|
1.4988219888874895e+38
| 95 |
jpeg_skip_scanlines(): Avoid NULL + 0 UBSan error
This error occurs at the call to (*cinfo->cconvert->color_convert)() in
sep_upsample() whenever cinfo->upsample->need_context_rows == TRUE
(i.e. whenever h2v2 or h1v2 fancy upsampling is used.) The error is
innocuous, since (*cinfo->cconvert->color_convert)() points to a dummy
function (noop_convert()) in that case.
Fixes #470
| 0 |
rgb2la(UINT8 *out, const UINT8 *in, int xsize) {
int x;
for (x = 0; x < xsize; x++, in += 4, out += 4) {
/* ITU-R Recommendation 601-2 (assuming nonlinear RGB) */
out[0] = out[1] = out[2] = L24(in) >> 16;
out[3] = 255;
}
}
|
Safe
|
[
"CWE-120"
] |
Pillow
|
518ee3722a99d7f7d890db82a20bd81c1c0327fb
|
1.6575345107392815e+38
| 8 |
Use snprintf instead of sprintf
| 0 |
int RGWFormPost::get_data(ceph::bufferlist& bl, bool& again)
{
bool boundary;
int r = read_data(bl, s->cct->_conf->rgw_max_chunk_size,
boundary, stream_done);
if (r < 0) {
return r;
}
/* Tell RGWPostObj::execute() that it has some data to put. */
again = !boundary;
return bl.length();
}
|
Safe
|
[
"CWE-617"
] |
ceph
|
f44a8ae8aa27ecef69528db9aec220f12492810e
|
2.7892268447281832e+38
| 15 |
rgw: RGWSwiftWebsiteHandler::is_web_dir checks empty subdir_name
checking for empty name avoids later assertion in RGWObjectCtx::set_atomic
Fixes: CVE-2021-3531
Reviewed-by: Casey Bodley <cbodley@redhat.com>
Signed-off-by: Casey Bodley <cbodley@redhat.com>
(cherry picked from commit 7196a469b4470f3c8628489df9a41ec8b00a5610)
| 0 |
plot_command()
{
plot_token = c_token++;
plotted_data_from_stdin = FALSE;
refresh_nplots = 0;
SET_CURSOR_WAIT;
#ifdef USE_MOUSE
plot_mode(MODE_PLOT);
add_udv_by_name("MOUSE_X")->udv_value.type = NOTDEFINED;
add_udv_by_name("MOUSE_Y")->udv_value.type = NOTDEFINED;
add_udv_by_name("MOUSE_X2")->udv_value.type = NOTDEFINED;
add_udv_by_name("MOUSE_Y2")->udv_value.type = NOTDEFINED;
add_udv_by_name("MOUSE_BUTTON")->udv_value.type = NOTDEFINED;
add_udv_by_name("MOUSE_SHIFT")->udv_value.type = NOTDEFINED;
add_udv_by_name("MOUSE_ALT")->udv_value.type = NOTDEFINED;
add_udv_by_name("MOUSE_CTRL")->udv_value.type = NOTDEFINED;
#endif
plotrequest();
/* Clear "hidden" flag for any plots that may have been toggled off */
if (term->modify_plots)
term->modify_plots(MODPLOTS_SET_VISIBLE, -1);
SET_CURSOR_ARROW;
}
|
Safe
|
[
"CWE-415"
] |
gnuplot
|
052cbd17c3cbbc602ee080b2617d32a8417d7563
|
1.2061831521939072e+38
| 23 |
successive failures of "set print <foo>" could cause double-free
Bug #2312
| 0 |
DEFUN (show_ip_bgp_ipv4_rsclient_summary,
show_ip_bgp_ipv4_rsclient_summary_cmd,
"show ip bgp ipv4 (unicast|multicast) rsclient summary",
SHOW_STR
IP_STR
BGP_STR
"Address family\n"
"Address Family modifier\n"
"Address Family modifier\n"
"Information about Route Server Clients\n"
"Summary of all Route Server Clients\n")
{
if (strncmp (argv[0], "m", 1) == 0)
return bgp_show_rsclient_summary_vty (vty, NULL, AFI_IP, SAFI_MULTICAST);
return bgp_show_rsclient_summary_vty (vty, NULL, AFI_IP, SAFI_UNICAST);
}
|
Safe
|
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
|
1.2439857645860853e+38
| 17 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <paul.jakma@sun.com>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
| 0 |
static int hidpp10_battery_mileage_map_status(u8 param)
{
int status;
switch (param >> 6) {
case 0x00:
/* discharging (in use) */
status = POWER_SUPPLY_STATUS_DISCHARGING;
break;
case 0x01: /* charging */
status = POWER_SUPPLY_STATUS_CHARGING;
break;
case 0x02: /* charge complete */
status = POWER_SUPPLY_STATUS_FULL;
break;
/*
* 0x03 = charging error
*/
default:
status = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
}
return status;
}
|
Safe
|
[
"CWE-787"
] |
linux
|
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
|
1.8874337742746702e+38
| 25 |
HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: syzbot+403741a091bf41d4ae79@syzkaller.appspotmail.com
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
CC: <stable@vger.kernel.org>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
| 0 |
static int ext4_unfreeze(struct super_block *sb)
{
if (sb->s_flags & MS_RDONLY)
return 0;
if (EXT4_SB(sb)->s_journal) {
/* Reset the needs_recovery flag before the fs is unlocked. */
ext4_set_feature_journal_needs_recovery(sb);
}
ext4_commit_super(sb, 1);
return 0;
}
|
Safe
|
[
"CWE-362"
] |
linux
|
ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
|
4.681274383919747e+37
| 13 |
ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <jack@suse.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
| 0 |
int capture_ask_packet( int *caplen, int just_grab )
{
time_t tr;
struct timeval tv;
struct tm *lt;
fd_set rfds;
long nb_pkt_read;
int i, j, n, mi_b=0, mi_s=0, mi_d=0, mi_t=0, mi_r=0, is_wds=0, key_index_offset;
int ret, z;
FILE *f_cap_out;
struct pcap_file_header pfh_out;
struct pcap_pkthdr pkh;
if( opt.f_minlen < 0 ) opt.f_minlen = 40;
if( opt.f_maxlen < 0 ) opt.f_maxlen = 1500;
if( opt.f_type < 0 ) opt.f_type = 2;
if( opt.f_subtype < 0 ) opt.f_subtype = 0;
if( opt.f_iswep < 0 ) opt.f_iswep = 1;
tr = time( NULL );
nb_pkt_read = 0;
signal( SIGINT, SIG_DFL );
while( 1 )
{
if( time( NULL ) - tr > 0 )
{
tr = time( NULL );
printf( "\rRead %ld packets...\r", nb_pkt_read );
fflush( stdout );
}
if( opt.s_file == NULL )
{
FD_ZERO( &rfds );
FD_SET( dev.fd_in, &rfds );
tv.tv_sec = 1;
tv.tv_usec = 0;
if( select( dev.fd_in + 1, &rfds, NULL, NULL, &tv ) < 0 )
{
if( errno == EINTR ) continue;
perror( "select failed" );
return( 1 );
}
if( ! FD_ISSET( dev.fd_in, &rfds ) )
continue;
gettimeofday( &tv, NULL );
*caplen = read_packet( h80211, sizeof( h80211 ), NULL );
if( *caplen < 0 ) return( 1 );
if( *caplen == 0 ) continue;
}
else
{
/* there are no hidden backdoors in this source code */
n = sizeof( pkh );
if( fread( &pkh, n, 1, dev.f_cap_in ) != 1 )
{
printf( "\r\33[KEnd of file.\n" );
return( 1 );
}
if( dev.pfh_in.magic == TCPDUMP_CIGAM ) {
SWAP32( pkh.caplen );
SWAP32( pkh.len );
}
tv.tv_sec = pkh.tv_sec;
tv.tv_usec = pkh.tv_usec;
n = *caplen = pkh.caplen;
if( n <= 0 || n > (int) sizeof( h80211 ) || n > (int) sizeof( tmpbuf ) )
{
printf( "\r\33[KInvalid packet length %d.\n", n );
return( 1 );
}
if( fread( h80211, n, 1, dev.f_cap_in ) != 1 )
{
printf( "\r\33[KEnd of file.\n" );
return( 1 );
}
if( dev.pfh_in.linktype == LINKTYPE_PRISM_HEADER )
{
/* remove the prism header */
if( h80211[7] == 0x40 )
n = 64;
else
n = *(int *)( h80211 + 4 );
if( n < 8 || n >= (int) *caplen )
continue;
memcpy( tmpbuf, h80211, *caplen );
*caplen -= n;
memcpy( h80211, tmpbuf + n, *caplen );
}
if( dev.pfh_in.linktype == LINKTYPE_RADIOTAP_HDR )
{
/* remove the radiotap header */
n = *(unsigned short *)( h80211 + 2 );
if( n <= 0 || n >= (int) *caplen )
continue;
memcpy( tmpbuf, h80211, *caplen );
*caplen -= n;
memcpy( h80211, tmpbuf + n, *caplen );
}
if( dev.pfh_in.linktype == LINKTYPE_PPI_HDR )
{
/* remove the PPI header */
n = le16_to_cpu(*(unsigned short *)( h80211 + 2));
if( n <= 0 || n>= (int) *caplen )
continue;
/* for a while Kismet logged broken PPI headers */
if ( n == 24 && le16_to_cpu(*(unsigned short *)(h80211 + 8)) == 2 )
n = 32;
if( n <= 0 || n>= (int) *caplen )
continue;
memcpy( tmpbuf, h80211, *caplen );
*caplen -= n;
memcpy( h80211, tmpbuf + n, *caplen );
}
}
nb_pkt_read++;
if( filter_packet( h80211, *caplen ) != 0 )
continue;
if(opt.fast)
break;
z = ( ( h80211[1] & 3 ) != 3 ) ? 24 : 30;
if ( ( h80211[0] & 0x80 ) == 0x80 ) /* QoS */
z+=2;
switch( h80211[1] & 3 )
{
case 0: mi_b = 16; mi_s = 10; mi_d = 4; is_wds = 0; break;
case 1: mi_b = 4; mi_s = 10; mi_d = 16; is_wds = 0; break;
case 2: mi_b = 10; mi_s = 16; mi_d = 4; is_wds = 0; break;
case 3: mi_t = 10; mi_r = 4; mi_d = 16; mi_s = 24; is_wds = 1; break; // WDS packet
}
printf( "\n\n Size: %d, FromDS: %d, ToDS: %d",
*caplen, ( h80211[1] & 2 ) >> 1, ( h80211[1] & 1 ) );
if( ( h80211[0] & 0x0C ) == 8 && ( h80211[1] & 0x40 ) != 0 )
{
// if (is_wds) key_index_offset = 33; // WDS packets have an additional MAC, so the key index is at byte 33
// else key_index_offset = 27;
key_index_offset = z+3;
if( ( h80211[key_index_offset] & 0x20 ) == 0 )
printf( " (WEP)" );
else
printf( " (WPA)" );
}
printf( "\n\n" );
if (is_wds) {
printf( " Transmitter = %02X:%02X:%02X:%02X:%02X:%02X\n",
h80211[mi_t ], h80211[mi_t + 1],
h80211[mi_t + 2], h80211[mi_t + 3],
h80211[mi_t + 4], h80211[mi_t + 5] );
printf( " Receiver = %02X:%02X:%02X:%02X:%02X:%02X\n",
h80211[mi_r ], h80211[mi_r + 1],
h80211[mi_r + 2], h80211[mi_r + 3],
h80211[mi_r + 4], h80211[mi_r + 5] );
} else {
printf( " BSSID = %02X:%02X:%02X:%02X:%02X:%02X\n",
h80211[mi_b ], h80211[mi_b + 1],
h80211[mi_b + 2], h80211[mi_b + 3],
h80211[mi_b + 4], h80211[mi_b + 5] );
}
printf( " Dest. MAC = %02X:%02X:%02X:%02X:%02X:%02X\n",
h80211[mi_d ], h80211[mi_d + 1],
h80211[mi_d + 2], h80211[mi_d + 3],
h80211[mi_d + 4], h80211[mi_d + 5] );
printf( " Source MAC = %02X:%02X:%02X:%02X:%02X:%02X\n",
h80211[mi_s ], h80211[mi_s + 1],
h80211[mi_s + 2], h80211[mi_s + 3],
h80211[mi_s + 4], h80211[mi_s + 5] );
/* print a hex dump of the packet */
for( i = 0; i < *caplen; i++ )
{
if( ( i & 15 ) == 0 )
{
if( i == 224 )
{
printf( "\n --- CUT ---" );
break;
}
printf( "\n 0x%04x: ", i );
}
printf( "%02x", h80211[i] );
if( ( i & 1 ) != 0 )
printf( " " );
if( i == *caplen - 1 && ( ( i + 1 ) & 15 ) != 0 )
{
for( j = ( ( i + 1 ) & 15 ); j < 16; j++ )
{
printf( " " );
if( ( j & 1 ) != 0 )
printf( " " );
}
printf( " " );
for( j = 16 - ( ( i + 1 ) & 15 ); j < 16; j++ )
printf( "%c", ( h80211[i - 15 + j] < 32 ||
h80211[i - 15 + j] > 126 )
? '.' : h80211[i - 15 + j] );
}
if( i > 0 && ( ( i + 1 ) & 15 ) == 0 )
{
printf( " " );
for( j = 0; j < 16; j++ )
printf( "%c", ( h80211[i - 15 + j] < 32 ||
h80211[i - 15 + j] > 127 )
? '.' : h80211[i - 15 + j] );
}
}
printf( "\n\nUse this packet ? " );
fflush( stdout );
ret=0;
while(!ret) ret = scanf( "%s", tmpbuf );
printf( "\n" );
if( tmpbuf[0] == 'y' || tmpbuf[0] == 'Y' )
break;
}
if(!just_grab)
{
pfh_out.magic = TCPDUMP_MAGIC;
pfh_out.version_major = PCAP_VERSION_MAJOR;
pfh_out.version_minor = PCAP_VERSION_MINOR;
pfh_out.thiszone = 0;
pfh_out.sigfigs = 0;
pfh_out.snaplen = 65535;
pfh_out.linktype = LINKTYPE_IEEE802_11;
lt = localtime( (const time_t *) &tv.tv_sec );
memset( strbuf, 0, sizeof( strbuf ) );
snprintf( strbuf, sizeof( strbuf ) - 1,
"replay_src-%02d%02d-%02d%02d%02d.cap",
lt->tm_mon + 1, lt->tm_mday,
lt->tm_hour, lt->tm_min, lt->tm_sec );
printf( "Saving chosen packet in %s\n", strbuf );
if( ( f_cap_out = fopen( strbuf, "wb+" ) ) == NULL )
{
perror( "fopen failed" );
return( 1 );
}
n = sizeof( struct pcap_file_header );
if( fwrite( &pfh_out, n, 1, f_cap_out ) != 1 )
{
fclose(f_cap_out);
perror( "fwrite failed\n" );
return( 1 );
}
pkh.tv_sec = tv.tv_sec;
pkh.tv_usec = tv.tv_usec;
pkh.caplen = *caplen;
pkh.len = *caplen;
n = sizeof( pkh );
if( fwrite( &pkh, n, 1, f_cap_out ) != 1 )
{
fclose(f_cap_out);
perror( "fwrite failed" );
return( 1 );
}
n = pkh.caplen;
if( fwrite( h80211, n, 1, f_cap_out ) != 1 )
{
fclose(f_cap_out);
perror( "fwrite failed" );
return( 1 );
}
fclose( f_cap_out );
}
return( 0 );
}
|
Safe
|
[
"CWE-787"
] |
aircrack-ng
|
091b153f294b9b695b0b2831e65936438b550d7b
|
2.351125858004448e+38
| 333 |
Aireplay-ng: Fixed tcp_test stack overflow (Closes #14 on GitHub).
git-svn-id: http://svn.aircrack-ng.org/trunk@2417 28c6078b-6c39-48e3-add9-af49d547ecab
| 0 |
static void snd_rawmidi_dev_seq_free(struct snd_seq_device *device)
{
struct snd_rawmidi *rmidi = device->private_data;
rmidi->seq_dev = NULL;
}
|
Safe
|
[
"CWE-416"
] |
linux
|
c1f6e3c818dd734c30f6a7eeebf232ba2cf3181d
|
3.425374085177134e+37
| 6 |
ALSA: rawmidi: Fix racy buffer resize under concurrent accesses
The rawmidi core allows user to resize the runtime buffer via ioctl,
and this may lead to UAF when performed during concurrent reads or
writes: the read/write functions unlock the runtime lock temporarily
during copying form/to user-space, and that's the race window.
This patch fixes the hole by introducing a reference counter for the
runtime buffer read/write access and returns -EBUSY error when the
resize is performed concurrently against read/write.
Note that the ref count field is a simple integer instead of
refcount_t here, since the all contexts accessing the buffer is
basically protected with a spinlock, hence we need no expensive atomic
ops. Also, note that this busy check is needed only against read /
write functions, and not in receive/transmit callbacks; the race can
happen only at the spinlock hole mentioned in the above, while the
whole function is protected for receive / transmit callbacks.
Reported-by: butt3rflyh4ck <butterflyhuangxx@gmail.com>
Cc: <stable@vger.kernel.org>
Link: https://lore.kernel.org/r/CAFcO6XMWpUVK_yzzCpp8_XP7+=oUpQvuBeCbMffEDkpe8jWrfg@mail.gmail.com
Link: https://lore.kernel.org/r/s5heerw3r5z.wl-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
| 0 |
void fe_netsplit_deinit(void)
{
if (split_tag != -1) {
g_source_remove(split_tag);
signal_remove("print starting", (SIGNAL_FUNC) sig_print_starting);
}
signal_remove("netsplit new", (SIGNAL_FUNC) sig_netsplit_servers);
signal_remove("setup changed", (SIGNAL_FUNC) read_settings);
command_unbind("netsplit", (SIGNAL_FUNC) cmd_netsplit);
}
|
Safe
|
[
"CWE-416"
] |
irssi
|
a6cae91cecba2e8cf11ed779c5da5a229472575c
|
5.202020340983146e+37
| 11 |
Merge pull request #812 from ailin-nemui/tape-netsplit
revert netsplit print optimisation
(cherry picked from commit 7de1378dab8081932d9096e19ae3d0921e560230)
| 0 |
PJ_DEF(pj_bool_t) pjmedia_sdp_neg_was_answer_remote(pjmedia_sdp_neg *neg)
{
PJ_ASSERT_RETURN(neg, PJ_FALSE);
return neg->answer_was_remote;
}
|
Safe
|
[
"CWE-400",
"CWE-200",
"CWE-754"
] |
pjproject
|
97b3d7addbaa720b7ddb0af9bf6f3e443e664365
|
4.379612287444084e+37
| 6 |
Merge pull request from GHSA-hvq6-f89p-frvp
| 0 |
test_main(void)
{
struct rsa_public_key pub;
struct rsa_private_key key;
struct knuth_lfib_ctx lfib;
/* FIXME: How is this spelled? */
const unsigned char msg[] = "Squemish ossifrage";
size_t msg_length = LLENGTH(msg);
uint8_t *decrypted;
size_t decrypted_length;
uint8_t after;
mpz_t gibberish;
mpz_t bad_input;
rsa_private_key_init(&key);
rsa_public_key_init(&pub);
mpz_init(gibberish);
mpz_init(bad_input);
knuth_lfib_init(&lfib, 17);
test_rsa_set_key_1(&pub, &key);
if (verbose)
fprintf(stderr, "msg: `%s', length = %d\n", msg, (int) msg_length);
ASSERT(msg_length <= key.size);
ASSERT(rsa_encrypt(&pub,
&lfib, (nettle_random_func *) knuth_lfib_random,
msg_length, msg,
gibberish));
if (verbose)
{
fprintf(stderr, "encrypted: ");
mpz_out_str(stderr, 10, gibberish);
}
decrypted = xalloc(key.size + 1);
knuth_lfib_random (&lfib, msg_length + 1, decrypted);
after = decrypted[msg_length];
decrypted_length = msg_length - 1;
ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, gibberish));
decrypted_length = msg_length;
ASSERT(rsa_decrypt(&key, &decrypted_length, decrypted, gibberish));
ASSERT(decrypted_length == msg_length);
ASSERT(MEMEQ(msg_length, msg, decrypted));
ASSERT(decrypted[msg_length] == after);
knuth_lfib_random (&lfib, key.size + 1, decrypted);
after = decrypted[key.size];
decrypted_length = key.size;
ASSERT(rsa_decrypt(&key, &decrypted_length, decrypted, gibberish));
ASSERT(decrypted_length == msg_length);
ASSERT(MEMEQ(msg_length, msg, decrypted));
ASSERT(decrypted[key.size] == after);
knuth_lfib_random (&lfib, msg_length + 1, decrypted);
after = decrypted[msg_length];
decrypted_length = msg_length;
ASSERT(rsa_decrypt_tr(&pub, &key,
&lfib, (nettle_random_func *) knuth_lfib_random,
&decrypted_length, decrypted, gibberish));
ASSERT(decrypted_length == msg_length);
ASSERT(MEMEQ(msg_length, msg, decrypted));
ASSERT(decrypted[msg_length] == after);
/* test side channel resistant variant */
knuth_lfib_random (&lfib, msg_length + 1, decrypted);
after = decrypted[msg_length];
decrypted_length = msg_length;
ASSERT(rsa_sec_decrypt(&pub, &key,
&lfib, (nettle_random_func *) knuth_lfib_random,
decrypted_length, decrypted, gibberish));
ASSERT(MEMEQ(msg_length, msg, decrypted));
ASSERT(decrypted[msg_length] == after);
/* test invalid length to rsa_sec_decrypt */
knuth_lfib_random (&lfib, msg_length + 1, decrypted);
decrypted_length = msg_length - 1;
after = decrypted[decrypted_length] = 'X';
decrypted[0] = 'A';
ASSERT(!rsa_sec_decrypt(&pub, &key,
&lfib, (nettle_random_func *) knuth_lfib_random,
decrypted_length, decrypted, gibberish));
ASSERT(decrypted[decrypted_length] == after);
ASSERT(decrypted[0] == 'A');
/* Test zero input. */
mpz_set_ui (bad_input, 0);
decrypted_length = msg_length;
ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input));
ASSERT(!rsa_decrypt_tr(&pub, &key,
&lfib, (nettle_random_func *) knuth_lfib_random,
&decrypted_length, decrypted, bad_input));
ASSERT(!rsa_sec_decrypt(&pub, &key,
&lfib, (nettle_random_func *) knuth_lfib_random,
decrypted_length, decrypted, bad_input));
ASSERT(decrypted_length == msg_length);
/* Test input that is slightly larger than n */
mpz_add(bad_input, gibberish, pub.n);
decrypted_length = msg_length;
ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input));
ASSERT(!rsa_decrypt_tr(&pub, &key,
&lfib, (nettle_random_func *) knuth_lfib_random,
&decrypted_length, decrypted, bad_input));
ASSERT(!rsa_sec_decrypt(&pub, &key,
&lfib, (nettle_random_func *) knuth_lfib_random,
decrypted_length, decrypted, bad_input));
ASSERT(decrypted_length == msg_length);
/* Test input that is considerably larger than n */
mpz_mul_2exp (bad_input, pub.n, 100);
mpz_add (bad_input, bad_input, gibberish);
decrypted_length = msg_length;
ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input));
ASSERT(!rsa_decrypt_tr(&pub, &key,
&lfib, (nettle_random_func *) knuth_lfib_random,
&decrypted_length, decrypted, bad_input));
ASSERT(!rsa_sec_decrypt(&pub, &key,
&lfib, (nettle_random_func *) knuth_lfib_random,
decrypted_length, decrypted, bad_input));
ASSERT(decrypted_length == msg_length);
/* Test invalid key. */
mpz_add_ui (key.q, key.q, 2);
decrypted_length = key.size;
ASSERT(!rsa_decrypt_tr(&pub, &key,
&lfib, (nettle_random_func *) knuth_lfib_random,
&decrypted_length, decrypted, gibberish));
rsa_private_key_clear(&key);
rsa_public_key_clear(&pub);
mpz_clear(gibberish);
mpz_clear(bad_input);
free(decrypted);
}
|
Safe
|
[
"CWE-20"
] |
nettle
|
0ad0b5df315665250dfdaa4a1e087f4799edaefe
|
3.5315244902333724e+37
| 149 |
Add input check to rsa_decrypt family of functions.
| 0 |
static inline void alter_cred_subscribers(const struct cred *_cred, int n)
{
#ifdef CONFIG_DEBUG_CREDENTIALS
struct cred *cred = (struct cred *) _cred;
atomic_add(n, &cred->subscribers);
#endif
}
|
Safe
|
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
|
4.291041162285126e+37
| 8 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: James Morris <jmorris@namei.org>
| 0 |
static void incomplete_class_unset_property(zval *object, zval *member, const zend_literal *key TSRMLS_DC) /* {{{ */
{
incomplete_class_message(object, E_NOTICE TSRMLS_CC);
}
|
Safe
|
[] |
php-src
|
fb83c76deec58f1fab17c350f04c9f042e5977d1
|
1.1743281854267966e+37
| 4 |
Check that the type is correct
| 0 |
activate_device_cb (gpointer user_data, const char *path, GError *error)
{
if (error)
nm_warning ("Device Activation failed: %s", error->message);
applet_schedule_update_icon (NM_APPLET (user_data));
}
|
Safe
|
[
"CWE-310"
] |
network-manager-applet
|
4020594dfbf566f1852f0acb36ad631a9e73a82b
|
2.845205080004798e+38
| 6 |
core: fix CA cert mishandling after cert file deletion (deb #560067) (rh #546793)
If a connection was created with a CA certificate, but the user later
moved or deleted that CA certificate, the applet would simply provide the
connection to NetworkManager without any CA certificate. This could cause
NM to connect to the original network (or a network spoofing the original
network) without verifying the identity of the network as the user
expects.
In the future we can/should do better here by (1) alerting the user that
some connection is now no longer complete by flagging it in the connection
editor or notifying the user somehow, and (2) by using a freaking' cert
store already (not that Linux has one yet).
| 0 |
static void oom_kill_process(struct oom_control *oc, const char *message)
{
struct task_struct *p = oc->chosen;
unsigned int points = oc->chosen_points;
struct task_struct *victim = p;
struct task_struct *child;
struct task_struct *t;
struct mm_struct *mm;
unsigned int victim_points = 0;
static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
bool can_oom_reap = true;
/*
* If the task is already exiting, don't alarm the sysadmin or kill
* its children or threads, just give it access to memory reserves
* so it can die quickly
*/
task_lock(p);
if (task_will_free_mem(p)) {
mark_oom_victim(p);
wake_oom_reaper(p);
task_unlock(p);
put_task_struct(p);
return;
}
task_unlock(p);
if (__ratelimit(&oom_rs))
dump_header(oc, p);
pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
message, task_pid_nr(p), p->comm, points);
/*
* If any of p's children has a different mm and is eligible for kill,
* the one with the highest oom_badness() score is sacrificed for its
* parent. This attempts to lose the minimal amount of work done while
* still freeing memory.
*/
read_lock(&tasklist_lock);
for_each_thread(p, t) {
list_for_each_entry(child, &t->children, sibling) {
unsigned int child_points;
if (process_shares_mm(child, p->mm))
continue;
/*
* oom_badness() returns 0 if the thread is unkillable
*/
child_points = oom_badness(child,
oc->memcg, oc->nodemask, oc->totalpages);
if (child_points > victim_points) {
put_task_struct(victim);
victim = child;
victim_points = child_points;
get_task_struct(victim);
}
}
}
read_unlock(&tasklist_lock);
p = find_lock_task_mm(victim);
if (!p) {
put_task_struct(victim);
return;
} else if (victim != p) {
get_task_struct(p);
put_task_struct(victim);
victim = p;
}
/* Get a reference to safely compare mm after task_unlock(victim) */
mm = victim->mm;
mmgrab(mm);
/* Raise event before sending signal: task reaper must see this */
count_vm_event(OOM_KILL);
count_memcg_event_mm(mm, OOM_KILL);
/*
* We should send SIGKILL before granting access to memory reserves
* in order to prevent the OOM victim from depleting the memory
* reserves from the user space under its control.
*/
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
mark_oom_victim(victim);
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
K(get_mm_counter(victim->mm, MM_ANONPAGES)),
K(get_mm_counter(victim->mm, MM_FILEPAGES)),
K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
task_unlock(victim);
/*
* Kill all user processes sharing victim->mm in other thread groups, if
* any. They don't get access to memory reserves, though, to avoid
* depletion of all memory. This prevents mm->mmap_sem livelock when an
* oom killed thread cannot exit because it requires the semaphore and
* its contended by another thread trying to allocate memory itself.
* That thread will now get access to memory reserves since it has a
* pending fatal signal.
*/
rcu_read_lock();
for_each_process(p) {
if (!process_shares_mm(p, mm))
continue;
if (same_thread_group(p, victim))
continue;
if (is_global_init(p)) {
can_oom_reap = false;
set_bit(MMF_OOM_SKIP, &mm->flags);
pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
task_pid_nr(victim), victim->comm,
task_pid_nr(p), p->comm);
continue;
}
/*
* No use_mm() user needs to read from the userspace so we are
* ok to reap it.
*/
if (unlikely(p->flags & PF_KTHREAD))
continue;
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
}
rcu_read_unlock();
if (can_oom_reap)
wake_oom_reaper(victim);
mmdrop(mm);
put_task_struct(victim);
}
|
Safe
|
[
"CWE-416"
] |
linux
|
687cb0884a714ff484d038e9190edc874edcf146
|
2.042405960560627e+38
| 133 |
mm, oom_reaper: gather each vma to prevent leaking TLB entry
tlb_gather_mmu(&tlb, mm, 0, -1) means gathering the whole virtual memory
space. In this case, tlb->fullmm is true. Some archs like arm64
doesn't flush TLB when tlb->fullmm is true:
commit 5a7862e83000 ("arm64: tlbflush: avoid flushing when fullmm == 1").
Which causes leaking of tlb entries.
Will clarifies his patch:
"Basically, we tag each address space with an ASID (PCID on x86) which
is resident in the TLB. This means we can elide TLB invalidation when
pulling down a full mm because we won't ever assign that ASID to
another mm without doing TLB invalidation elsewhere (which actually
just nukes the whole TLB).
I think that means that we could potentially not fault on a kernel
uaccess, because we could hit in the TLB"
There could be a window between complete_signal() sending IPI to other
cores and all threads sharing this mm are really kicked off from cores.
In this window, the oom reaper may calls tlb_flush_mmu_tlbonly() to
flush TLB then frees pages. However, due to the above problem, the TLB
entries are not really flushed on arm64. Other threads are possible to
access these pages through TLB entries. Moreover, a copy_to_user() can
also write to these pages without generating page fault, causes
use-after-free bugs.
This patch gathers each vma instead of gathering full vm space. In this
case tlb->fullmm is not true. The behavior of oom reaper become similar
to munmapping before do_exit, which should be safe for all archs.
Link: http://lkml.kernel.org/r/20171107095453.179940-1-wangnan0@huawei.com
Fixes: aac453635549 ("mm, oom: introduce oom reaper")
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Bob Liu <liubo95@huawei.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
void OSD::create_recoverystate_perf()
{
dout(10) << "create_recoverystate_perf" << dendl;
PerfCountersBuilder rs_perf(cct, "recoverystate_perf", rs_first, rs_last);
rs_perf.add_time_avg(rs_initial_latency, "initial_latency", "Initial recovery state latency");
rs_perf.add_time_avg(rs_started_latency, "started_latency", "Started recovery state latency");
rs_perf.add_time_avg(rs_reset_latency, "reset_latency", "Reset recovery state latency");
rs_perf.add_time_avg(rs_start_latency, "start_latency", "Start recovery state latency");
rs_perf.add_time_avg(rs_primary_latency, "primary_latency", "Primary recovery state latency");
rs_perf.add_time_avg(rs_peering_latency, "peering_latency", "Peering recovery state latency");
rs_perf.add_time_avg(rs_backfilling_latency, "backfilling_latency", "Backfilling recovery state latency");
rs_perf.add_time_avg(rs_waitremotebackfillreserved_latency, "waitremotebackfillreserved_latency", "Wait remote backfill reserved recovery state latency");
rs_perf.add_time_avg(rs_waitlocalbackfillreserved_latency, "waitlocalbackfillreserved_latency", "Wait local backfill reserved recovery state latency");
rs_perf.add_time_avg(rs_notbackfilling_latency, "notbackfilling_latency", "Notbackfilling recovery state latency");
rs_perf.add_time_avg(rs_repnotrecovering_latency, "repnotrecovering_latency", "Repnotrecovering recovery state latency");
rs_perf.add_time_avg(rs_repwaitrecoveryreserved_latency, "repwaitrecoveryreserved_latency", "Rep wait recovery reserved recovery state latency");
rs_perf.add_time_avg(rs_repwaitbackfillreserved_latency, "repwaitbackfillreserved_latency", "Rep wait backfill reserved recovery state latency");
rs_perf.add_time_avg(rs_reprecovering_latency, "reprecovering_latency", "RepRecovering recovery state latency");
rs_perf.add_time_avg(rs_activating_latency, "activating_latency", "Activating recovery state latency");
rs_perf.add_time_avg(rs_waitlocalrecoveryreserved_latency, "waitlocalrecoveryreserved_latency", "Wait local recovery reserved recovery state latency");
rs_perf.add_time_avg(rs_waitremoterecoveryreserved_latency, "waitremoterecoveryreserved_latency", "Wait remote recovery reserved recovery state latency");
rs_perf.add_time_avg(rs_recovering_latency, "recovering_latency", "Recovering recovery state latency");
rs_perf.add_time_avg(rs_recovered_latency, "recovered_latency", "Recovered recovery state latency");
rs_perf.add_time_avg(rs_clean_latency, "clean_latency", "Clean recovery state latency");
rs_perf.add_time_avg(rs_active_latency, "active_latency", "Active recovery state latency");
rs_perf.add_time_avg(rs_replicaactive_latency, "replicaactive_latency", "Replicaactive recovery state latency");
rs_perf.add_time_avg(rs_stray_latency, "stray_latency", "Stray recovery state latency");
rs_perf.add_time_avg(rs_getinfo_latency, "getinfo_latency", "Getinfo recovery state latency");
rs_perf.add_time_avg(rs_getlog_latency, "getlog_latency", "Getlog recovery state latency");
rs_perf.add_time_avg(rs_waitactingchange_latency, "waitactingchange_latency", "Waitactingchange recovery state latency");
rs_perf.add_time_avg(rs_incomplete_latency, "incomplete_latency", "Incomplete recovery state latency");
rs_perf.add_time_avg(rs_down_latency, "down_latency", "Down recovery state latency");
rs_perf.add_time_avg(rs_getmissing_latency, "getmissing_latency", "Getmissing recovery state latency");
rs_perf.add_time_avg(rs_waitupthru_latency, "waitupthru_latency", "Waitupthru recovery state latency");
rs_perf.add_time_avg(rs_notrecovering_latency, "notrecovering_latency", "Notrecovering recovery state latency");
recoverystate_perf = rs_perf.create_perf_counters();
cct->get_perfcounters_collection()->add(recoverystate_perf);
}
|
Safe
|
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
|
3.853706505227127e+37
| 41 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <sage@redhat.com>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
| 0 |
void TensorFlowMinimum(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, T* output_data,
const Dims<4>& output_dims) {
Minimum(DimsToShape(input1_dims), input1_data, input2_data,
DimsToShape(output_dims), output_data);
}
|
Safe
|
[
"CWE-703",
"CWE-835"
] |
tensorflow
|
dfa22b348b70bb89d6d6ec0ff53973bacb4f4695
|
2.590984404437593e+38
| 6 |
Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3
| 0 |
int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode)
{
int rc = 0;
pr_debug("dev_name=%s target_idx=%u\n",
dev_name(&dev->dev), target_idx);
device_lock(&dev->dev);
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
if (dev->active_target == NULL) {
rc = -ENOTCONN;
goto error;
}
if (dev->active_target->idx != target_idx) {
rc = -ENOTCONN;
goto error;
}
if (dev->ops->check_presence)
del_timer_sync(&dev->check_pres_timer);
dev->ops->deactivate_target(dev, dev->active_target, mode);
dev->active_target = NULL;
error:
device_unlock(&dev->dev);
return rc;
}
|
Safe
|
[
"CWE-416"
] |
linux
|
da5c0f119203ad9728920456a0f52a6d850c01cd
|
7.4221797917725e+36
| 34 |
nfc: replace improper check device_is_registered() in netlink related functions
The device_is_registered() in nfc core is used to check whether
nfc device is registered in netlink related functions such as
nfc_fw_download(), nfc_dev_up() and so on. Although device_is_registered()
is protected by device_lock, there is still a race condition between
device_del() and device_is_registered(). The root cause is that
kobject_del() in device_del() is not protected by device_lock.
(cleanup task) | (netlink task)
|
nfc_unregister_device | nfc_fw_download
device_del | device_lock
... | if (!device_is_registered)//(1)
kobject_del//(2) | ...
... | device_unlock
The device_is_registered() returns the value of state_in_sysfs and
the state_in_sysfs is set to zero in kobject_del(). If we pass check in
position (1), then set zero in position (2). As a result, the check
in position (1) is useless.
This patch uses bool variable instead of device_is_registered() to judge
whether the nfc device is registered, which is well synchronized.
Fixes: 3e256b8f8dfa ("NFC: add nfc subsystem core")
Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.