func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
void kvm_disable_steal_time(void)
{
if (!has_steal_clock)
return;
wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}
|
Safe
|
[] |
kvm
|
29fa6825463c97e5157284db80107d1bfac5d77b
|
3.0543266539777965e+38
| 7 |
x86, kvm: Clear paravirt_enabled on KVM guests for espfix32's benefit
paravirt_enabled has the following effects:
- Disables the F00F bug workaround warning. There is no F00F bug
workaround any more because Linux's standard IDT handling already
works around the F00F bug, but the warning still exists. This
is only cosmetic, and, in any event, there is no such thing as
KVM on a CPU with the F00F bug.
- Disables 32-bit APM BIOS detection. On a KVM paravirt system,
there should be no APM BIOS anyway.
- Disables tboot. I think that the tboot code should check the
CPUID hypervisor bit directly if it matters.
- paravirt_enabled disables espfix32. espfix32 should *not* be
disabled under KVM paravirt.
The last point is the purpose of this patch. It fixes a leak of the
high 16 bits of the kernel stack address on 32-bit KVM paravirt
guests. Fixes CVE-2014-8134.
Cc: stable@vger.kernel.org
Suggested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
void buildDummyLexicalParent(
IRBuilder &builder,
Function *parent,
Function *child) {
// FunctionScopeAnalysis works through CreateFunctionInsts, so we have to add
// that even though these functions are never invoked.
auto *block = builder.createBasicBlock(parent);
builder.setInsertionBlock(block);
builder.createUnreachableInst();
auto *inst = builder.createCreateFunctionInst(child);
builder.createReturnInst(inst);
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
hermes
|
091835377369c8fd5917d9b87acffa721ad2a168
|
2.2509085210701594e+38
| 12 |
Correctly restore whether or not a function is an inner generator
Summary:
If a generator was large enough to be lazily compiled, we would lose
that information when reconstituting the function's context. This meant
the function was generated as a regular function instead of a generator.
#utd-hermes-ignore-android
Reviewed By: tmikov
Differential Revision: D23580247
fbshipit-source-id: af5628bf322cbdc7c7cdfbb5f8d0756328518ea1
| 0 |
zsetstrokecolor(i_ctx_t * i_ctx_p)
{
int code;
code = zswapcolors(i_ctx_p);
if (code < 0)
return code;
/* Set up for the continuation procedure which will finish by restoring the fill colour space */
/* Make sure the exec stack has enough space */
check_estack(1);
/* Now, the actual continuation routine */
push_op_estack(setstrokecolor_cont);
code = zsetcolor(i_ctx_p);
if (code >= 0)
return o_push_estack;
return code;
}
|
Vulnerable
|
[] |
ghostpdl
|
ea735ba37dc0fd5f5622d031830b9a559dec1cc9
|
7.282248001131977e+37
| 21 |
Fix error condition for SC and CS
The SC and CS PDF operators correctly checked the return code from the
underlying setcolor and setcolorspace code, but we had already
set up the exec stack for handling a non-error return.
We have to do this before calling the underlying code, as that also
uses a state machine, and alters the exec stack. We must push our
own execution context first.
Ordinarily this isn't a problem, but if we have a custom error handler
which doesn't stop the interpreter, then we would continue on to try
and use what we'd pushed onto the exec stack, with predictably dire
results.
Here we avoid this by saving the exec stack pointer on entry, and if
an error occurs, restoring back to that point before returning control
to the PostScript interpreter.
A minor point, but we now also reset the space/color on an error as
well, previously it would have been left with the wrong space set.
| 1 |
static json_t * generate_new_credential(struct config_module * config, json_t * j_params, const char * username) {
json_t * j_query, * j_return;
char * username_escaped, * mod_name_escaped, * username_clause, * challenge_hash;
int res;
size_t challenge_b64_len, challenge_len = (size_t)json_integer_value(json_object_get(j_params, "challenge-length"));
unsigned char challenge_b64[challenge_len*2], challenge[challenge_len+1];
char session[SESSION_LENGTH+1] = {0}, * session_hash;
gnutls_rnd(GNUTLS_RND_NONCE, challenge, challenge_len);
if (o_base64_encode(challenge, challenge_len, challenge_b64, &challenge_b64_len)) {
challenge_b64[challenge_b64_len] = '\0';
if ((challenge_hash = generate_hash(config->hash_algorithm, (const char *)challenge_b64)) != NULL) {
rand_string(session, SESSION_LENGTH);
if ((session_hash = generate_hash(config->hash_algorithm, session)) != NULL) {
username_escaped = h_escape_string_with_quotes(config->conn, username);
mod_name_escaped = h_escape_string_with_quotes(config->conn, json_string_value(json_object_get(j_params, "mod_name")));
username_clause = msprintf(" (SELECT gswu_id FROM "G_TABLE_WEBAUTHN_USER" WHERE UPPER(gswu_username) = UPPER(%s) AND gswu_mod_name = %s)", username_escaped, mod_name_escaped);
// Disable all credential with status 0 (new) of the same user
j_query = json_pack("{sss{si}s{s{ssss+}si}}",
"table",
G_TABLE_WEBAUTHN_CREDENTIAL,
"set",
"gswc_status",
2,
"where",
"gswu_id",
"operator",
"raw",
"value",
" =",
username_clause,
"gswc_status",
0);
res = h_update(config->conn, j_query, NULL);
json_decref(j_query);
if (res == H_OK) {
// Insert new credential
j_query = json_pack("{sss{s{ss}sssssi}}",
"table",
G_TABLE_WEBAUTHN_CREDENTIAL,
"values",
"gswu_id",
"raw",
username_clause,
"gswc_session_hash",
session_hash,
"gswc_challenge_hash",
challenge_hash,
"gswc_status",
0);
res = h_insert(config->conn, j_query, NULL);
json_decref(j_query);
if (res == H_OK) {
j_return = json_pack("{sis{ssss}}", "result", G_OK, "credential", "session", session, "challenge", challenge_b64);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "generate_new_credential - Error executing j_query insert");
config->glewlwyd_module_callback_metrics_increment_counter(config, GLWD_METRICS_DATABSE_ERROR, 1, NULL);
j_return = json_pack("{si}", "result", G_ERROR_DB);
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "generate_new_credential - Error executing j_query update");
config->glewlwyd_module_callback_metrics_increment_counter(config, GLWD_METRICS_DATABSE_ERROR, 1, NULL);
j_return = json_pack("{si}", "result", G_ERROR_DB);
}
o_free(username_clause);
o_free(username_escaped);
o_free(mod_name_escaped);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "generate_new_credential - Error generate_hash session");
j_return = json_pack("{si}", "result", G_ERROR);
}
o_free(session_hash);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "generate_new_credential - Error generate_hash challenge");
j_return = json_pack("{si}", "result", G_ERROR);
}
o_free(challenge_hash);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "generate_new_credential - Error o_base64_encode challenge");
j_return = json_pack("{si}", "result", G_ERROR);
}
return j_return;
}
|
Safe
|
[
"CWE-120",
"CWE-787"
] |
glewlwyd
|
0efd112bb62f566877750ad62ee828bff579b4e2
|
1.763183891055672e+38
| 83 |
Fix fido2 signature validation bug
| 0 |
Supports_Condition_Obj Parser::parse_supports_operator(bool top_level)
{
Supports_Condition_Obj cond = parse_supports_condition_in_parens(/*parens_required=*/top_level);
if (cond.isNull()) return {};
while (true) {
Supports_Operator::Operand op = Supports_Operator::OR;
if (lex < kwd_and >()) { op = Supports_Operator::AND; }
else if(!lex < kwd_or >()) { break; }
lex < css_whitespace >();
Supports_Condition_Obj right = parse_supports_condition_in_parens(/*parens_required=*/true);
// Supports_Condition* cc = SASS_MEMORY_NEW(Supports_Condition, *static_cast<Supports_Condition*>(cond));
cond = SASS_MEMORY_NEW(Supports_Operator, pstate, cond, right, op);
}
return cond;
}
|
Safe
|
[
"CWE-674"
] |
libsass
|
f2db04883e5fff4e03777dcc1eb60d4373c45be1
|
1.584962088000678e+38
| 18 |
Make `parse_css_variable_value` non-recursive
Fixes #2658 stack overflow
| 0 |
static void php_wddx_process_data(void *user_data, const XML_Char *s, int len)
{
st_entry *ent;
wddx_stack *stack = (wddx_stack *)user_data;
TSRMLS_FETCH();
if (!wddx_stack_is_empty(stack) && !stack->done) {
wddx_stack_top(stack, (void**)&ent);
switch (Z_TYPE_P(ent)) {
case ST_STRING:
if (Z_STRLEN_P(ent->data) == 0) {
STR_FREE(Z_STRVAL_P(ent->data));
Z_STRVAL_P(ent->data) = estrndup(s, len);
Z_STRLEN_P(ent->data) = len;
} else {
Z_STRVAL_P(ent->data) = erealloc(Z_STRVAL_P(ent->data), Z_STRLEN_P(ent->data) + len + 1);
memcpy(Z_STRVAL_P(ent->data) + Z_STRLEN_P(ent->data), s, len);
Z_STRLEN_P(ent->data) += len;
Z_STRVAL_P(ent->data)[Z_STRLEN_P(ent->data)] = '\0';
}
break;
case ST_BINARY:
if (Z_STRLEN_P(ent->data) == 0) {
STR_FREE(Z_STRVAL_P(ent->data));
Z_STRVAL_P(ent->data) = estrndup(s, len + 1);
} else {
Z_STRVAL_P(ent->data) = erealloc(Z_STRVAL_P(ent->data), Z_STRLEN_P(ent->data) + len + 1);
memcpy(Z_STRVAL_P(ent->data) + Z_STRLEN_P(ent->data), s, len);
}
Z_STRLEN_P(ent->data) += len;
Z_STRVAL_P(ent->data)[Z_STRLEN_P(ent->data)] = '\0';
break;
case ST_NUMBER:
Z_TYPE_P(ent->data) = IS_STRING;
Z_STRLEN_P(ent->data) = len;
Z_STRVAL_P(ent->data) = estrndup(s, len);
convert_scalar_to_number(ent->data TSRMLS_CC);
break;
case ST_BOOLEAN:
if (!strcmp(s, "true")) {
Z_LVAL_P(ent->data) = 1;
} else if (!strcmp(s, "false")) {
Z_LVAL_P(ent->data) = 0;
} else {
stack->top--;
zval_ptr_dtor(&ent->data);
if (ent->varname)
efree(ent->varname);
efree(ent);
}
break;
case ST_DATETIME: {
char *tmp;
tmp = emalloc(len + 1);
memcpy(tmp, s, len);
tmp[len] = '\0';
Z_LVAL_P(ent->data) = php_parse_date(tmp, NULL);
/* date out of range < 1969 or > 2038 */
if (Z_LVAL_P(ent->data) == -1) {
Z_TYPE_P(ent->data) = IS_STRING;
Z_STRLEN_P(ent->data) = len;
Z_STRVAL_P(ent->data) = estrndup(s, len);
}
efree(tmp);
}
break;
default:
break;
}
}
}
|
Vulnerable
|
[] |
php-src
|
1785d2b805f64eaaacf98c14c9e13107bf085ab1
|
2.868964865710925e+38
| 78 |
Fixed bug #70741: Session WDDX Packet Deserialization Type Confusion Vulnerability
| 1 |
S3BootScriptLibDeinitialize (
IN EFI_HANDLE ImageHandle,
IN EFI_SYSTEM_TABLE *SystemTable
)
{
EFI_STATUS Status;
if (!mAcpiS3Enable) {
return RETURN_SUCCESS;
}
DEBUG ((EFI_D_INFO, "%a() in %a module\n", __FUNCTION__, gEfiCallerBaseName));
if (mEventDxeSmmReadyToLock != NULL) {
//
// Close the DxeSmmReadyToLock event.
//
Status = gBS->CloseEvent (mEventDxeSmmReadyToLock);
ASSERT_EFI_ERROR (Status);
}
if (mBootScriptSmst != NULL) {
if (mRegistrationSmmExitBootServices != NULL) {
//
// Unregister SmmExitBootServices notification.
//
Status = mBootScriptSmst->SmmRegisterProtocolNotify (
&gEdkiiSmmExitBootServicesProtocolGuid,
NULL,
&mRegistrationSmmExitBootServices
);
ASSERT_EFI_ERROR (Status);
}
if (mRegistrationSmmLegacyBoot != NULL) {
//
// Unregister SmmLegacyBoot notification.
//
Status = mBootScriptSmst->SmmRegisterProtocolNotify (
&gEdkiiSmmLegacyBootProtocolGuid,
NULL,
&mRegistrationSmmLegacyBoot
);
ASSERT_EFI_ERROR (Status);
}
if (mRegistrationSmmReadyToLock != NULL) {
//
// Unregister SmmReadyToLock notification.
//
Status = mBootScriptSmst->SmmRegisterProtocolNotify (
&gEfiSmmReadyToLockProtocolGuid,
NULL,
&mRegistrationSmmReadyToLock
);
ASSERT_EFI_ERROR (Status);
}
}
//
// Free the resources allocated and set PCDs to 0.
//
if (mS3BootScriptTableAllocated) {
Status = gBS->FreePages ((EFI_PHYSICAL_ADDRESS) (UINTN) mS3BootScriptTablePtr, EFI_SIZE_TO_PAGES(sizeof(SCRIPT_TABLE_PRIVATE_DATA)));
ASSERT_EFI_ERROR (Status);
Status = PcdSet64S (PcdS3BootScriptTablePrivateDataPtr, 0);
ASSERT_EFI_ERROR (Status);
}
if ((mBootScriptSmst != NULL) && mS3BootScriptTableSmmAllocated) {
Status = mBootScriptSmst->SmmFreePool (mS3BootScriptTableSmmPtr);
ASSERT_EFI_ERROR (Status);
Status = PcdSet64S (PcdS3BootScriptTablePrivateSmmDataPtr, 0);
ASSERT_EFI_ERROR (Status);
}
return RETURN_SUCCESS;
}
|
Safe
|
[
"CWE-787"
] |
edk2
|
322ac05f8bbc1bce066af1dabd1b70ccdbe28891
|
2.5367466195252136e+38
| 75 |
MdeModulePkg/PiDxeS3BootScriptLib: Fix potential numeric truncation (CVE-2019-14563)
REF:https://bugzilla.tianocore.org/show_bug.cgi?id=2001
For S3BootScriptLib APIs:
S3BootScriptSaveIoWrite
S3BootScriptSaveMemWrite
S3BootScriptSavePciCfgWrite
S3BootScriptSavePciCfg2Write
S3BootScriptSaveSmbusExecute
S3BootScriptSaveInformation
S3BootScriptSaveInformationAsciiString
S3BootScriptLabel (happen in S3BootScriptLabelInternal())
possible numeric truncations will happen that may lead to S3 boot script
entry with improper size being returned to store the boot script data.
This commit will add checks to prevent this kind of issue.
Please note that the remaining S3BootScriptLib APIs:
S3BootScriptSaveIoReadWrite
S3BootScriptSaveMemReadWrite
S3BootScriptSavePciCfgReadWrite
S3BootScriptSavePciCfg2ReadWrite
S3BootScriptSaveStall
S3BootScriptSaveDispatch2
S3BootScriptSaveDispatch
S3BootScriptSaveMemPoll
S3BootScriptSaveIoPoll
S3BootScriptSavePciPoll
S3BootScriptSavePci2Poll
S3BootScriptCloseTable
S3BootScriptExecute
S3BootScriptMoveLastOpcode
S3BootScriptCompare
are not affected by such numeric truncation.
Signed-off-by: Hao A Wu <hao.a.wu@intel.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Eric Dong <eric.dong@intel.com>
Acked-by: Jian J Wang <jian.j.wang@intel.com>
| 0 |
GF_Err elng_box_dump(GF_Box *a, FILE * trace)
{
GF_ExtendedLanguageBox *p = (GF_ExtendedLanguageBox *)a;
gf_isom_box_dump_start(a, "ExtendedLanguageBox", trace);
gf_fprintf(trace, "LanguageCode=\"%s\">\n", p->extended_language);
gf_isom_box_dump_done("ExtendedLanguageBox", a, trace);
return GF_OK;
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
|
1.1500923712890702e+38
| 8 |
fixed #2138
| 0 |
e_ews_connection_delete_attachments_finish (EEwsConnection *cnc,
GAsyncResult *result,
gchar **new_change_key,
GError **error)
{
GSimpleAsyncResult *simple;
EwsAsyncData *async_data;
g_return_val_if_fail (cnc != NULL, FALSE);
g_return_val_if_fail (
g_simple_async_result_is_valid (
result, G_OBJECT (cnc), e_ews_connection_delete_attachments),
FALSE);
simple = G_SIMPLE_ASYNC_RESULT (result);
async_data = g_simple_async_result_get_op_res_gpointer (simple);
if (g_simple_async_result_propagate_error (simple, error))
return FALSE;
if (new_change_key)
*new_change_key = async_data->sync_state;
else
g_free (async_data->sync_state);
return TRUE;
}
|
Safe
|
[
"CWE-295"
] |
evolution-ews
|
915226eca9454b8b3e5adb6f2fff9698451778de
|
2.329555246975242e+38
| 27 |
I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
| 0 |
request_is_satisfied (NautilusDirectory *directory,
NautilusFile *file,
Request request)
{
if (REQUEST_WANTS_TYPE (request, REQUEST_FILE_LIST) &&
!(directory->details->directory_loaded &&
directory->details->directory_loaded_sent_notification))
{
return FALSE;
}
if (REQUEST_WANTS_TYPE (request, REQUEST_DIRECTORY_COUNT))
{
if (has_problem (directory, file, lacks_directory_count))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_FILE_INFO))
{
if (has_problem (directory, file, lacks_info))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_FILESYSTEM_INFO))
{
if (has_problem (directory, file, lacks_filesystem_info))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_DEEP_COUNT))
{
if (has_problem (directory, file, lacks_deep_count))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_THUMBNAIL))
{
if (has_problem (directory, file, lacks_thumbnail))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_MOUNT))
{
if (has_problem (directory, file, lacks_mount))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_MIME_LIST))
{
if (has_problem (directory, file, lacks_mime_list))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_LINK_INFO))
{
if (has_problem (directory, file, lacks_link_info))
{
return FALSE;
}
}
return TRUE;
}
|
Safe
|
[
"CWE-20"
] |
nautilus
|
1630f53481f445ada0a455e9979236d31a8d3bb0
|
1.5280309581033537e+38
| 77 |
mime-actions: use file metadata for trusting desktop files
Currently we only trust desktop files that have the executable bit
set, and don't replace the displayed icon or the displayed name until
it's trusted, which prevents for running random programs by a malicious
desktop file.
However, the executable permission is preserved if the desktop file
comes from a compressed file.
To prevent this, add a metadata::trusted metadata to the file once the
user acknowledges the file as trusted. This adds metadata to the file,
which cannot be added unless it has access to the computer.
Also remove the SHEBANG "trusted" content we were putting inside the
desktop file, since that doesn't add more security since it can come
with the file itself.
https://bugzilla.gnome.org/show_bug.cgi?id=777991
| 0 |
static void user_write_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
struct rwpng_write_state *write_state = (struct rwpng_write_state *)png_get_io_ptr(png_ptr);
if (SUCCESS != write_state->retval) {
return;
}
if (!fwrite(data, length, 1, write_state->outfile)) {
write_state->retval = CANT_WRITE_ERROR;
}
write_state->bytes_written += length;
}
|
Safe
|
[
"CWE-190",
"CWE-787"
] |
pngquant
|
b7c217680cda02dddced245d237ebe8c383be285
|
2.259405938188041e+38
| 14 |
Fix integer overflow in rwpng.h (CVE-2016-5735)
Reported by Choi Jaeseung
Found with Sparrow (http://ropas.snu.ac.kr/sparrow)
| 0 |
ReadData(mat_t *mat, matvar_t *matvar)
{
if ( mat == NULL || matvar == NULL || mat->fp == NULL )
return 1;
else if ( mat->version == MAT_FT_MAT5 )
return Mat_VarRead5(mat,matvar);
#if defined(MAT73) && MAT73
else if ( mat->version == MAT_FT_MAT73 )
return Mat_VarRead73(mat,matvar);
#endif
else if ( mat->version == MAT_FT_MAT4 )
return Mat_VarRead4(mat,matvar);
return 1;
}
|
Safe
|
[
"CWE-401"
] |
matio
|
a47b7cd3aca70e9a0bddf8146eb4ab0cbd19c2c3
|
7.308132392139066e+37
| 14 |
Fix memory leak
As reported by https://github.com/tbeu/matio/issues/131
| 0 |
TfLiteStatus copy_ledger(const TfLiteSparsity* sparsity, TfLiteTensor* ledger) {
if (sparsity == nullptr) {
return kTfLiteOk;
}
const auto* array_segments = sparsity->dim_metadata[1].array_segments;
const auto* array_indices = sparsity->dim_metadata[1].array_indices;
uint8_t* output_data = GetTensorData<uint8_t>(ledger);
int output_data_ptr = 0;
for (int i = 0; i < array_segments->size - 1; i++) {
int row_start = array_segments->data[i];
int row_end = array_segments->data[i + 1];
if (row_end - row_start > UINT8_MAX) {
return kTfLiteError;
}
// Copy num of non-zero blocks in row i.
output_data[output_data_ptr] = static_cast<uint8_t>(row_end - row_start);
output_data_ptr++;
for (int j = row_start; j < row_end; j++) {
if (array_indices->data[j] > UINT8_MAX) {
return kTfLiteError;
}
// Copy indices of non-zero blocks in row i.
output_data[output_data_ptr] =
static_cast<uint8_t>(array_indices->data[j]);
output_data_ptr++;
}
}
return kTfLiteOk;
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
|
5.563464556004234e+36
| 32 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
| 0 |
static int f_midi_register_card(struct f_midi *midi)
{
struct snd_card *card;
struct snd_rawmidi *rmidi;
int err;
static struct snd_device_ops ops = {
.dev_free = f_midi_snd_free,
};
err = snd_card_new(&midi->gadget->dev, midi->index, midi->id,
THIS_MODULE, 0, &card);
if (err < 0) {
ERROR(midi, "snd_card_new() failed\n");
goto fail;
}
midi->card = card;
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, midi, &ops);
if (err < 0) {
ERROR(midi, "snd_device_new() failed: error %d\n", err);
goto fail;
}
strcpy(card->driver, f_midi_longname);
strcpy(card->longname, f_midi_longname);
strcpy(card->shortname, f_midi_shortname);
/* Set up rawmidi */
snd_component_add(card, "MIDI");
err = snd_rawmidi_new(card, card->longname, 0,
midi->out_ports, midi->in_ports, &rmidi);
if (err < 0) {
ERROR(midi, "snd_rawmidi_new() failed: error %d\n", err);
goto fail;
}
midi->rmidi = rmidi;
midi->in_last_port = 0;
strcpy(rmidi->name, card->shortname);
rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT |
SNDRV_RAWMIDI_INFO_INPUT |
SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = midi;
rmidi->private_free = f_midi_rmidi_free;
midi->free_ref++;
/*
* Yes, rawmidi OUTPUT = USB IN, and rawmidi INPUT = USB OUT.
* It's an upside-down world being a gadget.
*/
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &gmidi_in_ops);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &gmidi_out_ops);
/* register it - we're ready to go */
err = snd_card_register(card);
if (err < 0) {
ERROR(midi, "snd_card_register() failed\n");
goto fail;
}
VDBG(midi, "%s() finished ok\n", __func__);
return 0;
fail:
f_midi_unregister_card(midi);
return err;
}
|
Safe
|
[
"CWE-415"
] |
linux
|
7fafcfdf6377b18b2a726ea554d6e593ba44349f
|
2.6057421564614657e+38
| 66 |
USB: gadget: f_midi: fixing a possible double-free in f_midi
It looks like there is a possibility of a double-free vulnerability on an
error path of the f_midi_set_alt function in the f_midi driver. If the
path is feasible then free_ep_req gets called twice:
req->complete = f_midi_complete;
err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC);
=> ...
usb_gadget_giveback_request
=>
f_midi_complete (CALLBACK)
(inside f_midi_complete, for various cases of status)
free_ep_req(ep, req); // first kfree
if (err) {
ERROR(midi, "%s: couldn't enqueue request: %d\n",
midi->out_ep->name, err);
free_ep_req(midi->out_ep, req); // second kfree
return err;
}
The double-free possibility was introduced with commit ad0d1a058eac
("usb: gadget: f_midi: fix leak on failed to enqueue out requests").
Found by MOXCAFE tool.
Signed-off-by: Tuba Yavuz <tuba@ece.ufl.edu>
Fixes: ad0d1a058eac ("usb: gadget: f_midi: fix leak on failed to enqueue out requests")
Acked-by: Felipe Balbi <felipe.balbi@linux.intel.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
static CURLcode bearssl_connect_step1(struct Curl_easy *data,
struct connectdata *conn, int sockindex)
{
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
struct ssl_backend_data *backend = connssl->backend;
const char * const ssl_cafile = SSL_CONN_CONFIG(CAfile);
#ifndef CURL_DISABLE_PROXY
const char *hostname = SSL_IS_PROXY() ? conn->http_proxy.host.name :
conn->host.name;
#else
const char *hostname = conn->host.name;
#endif
const bool verifypeer = SSL_CONN_CONFIG(verifypeer);
const bool verifyhost = SSL_CONN_CONFIG(verifyhost);
CURLcode ret;
unsigned version_min, version_max;
#ifdef ENABLE_IPV6
struct in6_addr addr;
#else
struct in_addr addr;
#endif
switch(SSL_CONN_CONFIG(version)) {
case CURL_SSLVERSION_SSLv2:
failf(data, "BearSSL does not support SSLv2");
return CURLE_SSL_CONNECT_ERROR;
case CURL_SSLVERSION_SSLv3:
failf(data, "BearSSL does not support SSLv3");
return CURLE_SSL_CONNECT_ERROR;
case CURL_SSLVERSION_TLSv1_0:
version_min = BR_TLS10;
version_max = BR_TLS10;
break;
case CURL_SSLVERSION_TLSv1_1:
version_min = BR_TLS11;
version_max = BR_TLS11;
break;
case CURL_SSLVERSION_TLSv1_2:
version_min = BR_TLS12;
version_max = BR_TLS12;
break;
case CURL_SSLVERSION_DEFAULT:
case CURL_SSLVERSION_TLSv1:
version_min = BR_TLS10;
version_max = BR_TLS12;
break;
default:
failf(data, "BearSSL: unknown CURLOPT_SSLVERSION");
return CURLE_SSL_CONNECT_ERROR;
}
if(ssl_cafile) {
ret = load_cafile(ssl_cafile, &backend->anchors, &backend->anchors_len);
if(ret != CURLE_OK) {
if(verifypeer) {
failf(data, "error setting certificate verify locations."
" CAfile: %s", ssl_cafile);
return ret;
}
infof(data, "error setting certificate verify locations,"
" continuing anyway:\n");
}
}
/* initialize SSL context */
br_ssl_client_init_full(&backend->ctx, &backend->x509.minimal,
backend->anchors, backend->anchors_len);
br_ssl_engine_set_versions(&backend->ctx.eng, version_min, version_max);
br_ssl_engine_set_buffer(&backend->ctx.eng, backend->buf,
sizeof(backend->buf), 1);
/* initialize X.509 context */
backend->x509.vtable = &x509_vtable;
backend->x509.verifypeer = verifypeer;
backend->x509.verifyhost = verifyhost;
br_ssl_engine_set_x509(&backend->ctx.eng, &backend->x509.vtable);
if(SSL_SET_OPTION(primary.sessionid)) {
void *session;
Curl_ssl_sessionid_lock(data);
if(!Curl_ssl_getsessionid(data, conn, &session, NULL, sockindex)) {
br_ssl_engine_set_session_parameters(&backend->ctx.eng, session);
infof(data, "BearSSL: re-using session ID\n");
}
Curl_ssl_sessionid_unlock(data);
}
if(conn->bits.tls_enable_alpn) {
int cur = 0;
/* NOTE: when adding more protocols here, increase the size of the
* protocols array in `struct ssl_backend_data`.
*/
#ifdef USE_NGHTTP2
if(data->state.httpversion >= CURL_HTTP_VERSION_2
#ifndef CURL_DISABLE_PROXY
&& (!SSL_IS_PROXY() || !conn->bits.tunnel_proxy)
#endif
) {
backend->protocols[cur++] = NGHTTP2_PROTO_VERSION_ID;
infof(data, "ALPN, offering %s\n", NGHTTP2_PROTO_VERSION_ID);
}
#endif
backend->protocols[cur++] = ALPN_HTTP_1_1;
infof(data, "ALPN, offering %s\n", ALPN_HTTP_1_1);
br_ssl_engine_set_protocol_names(&backend->ctx.eng,
backend->protocols, cur);
}
if((1 == Curl_inet_pton(AF_INET, hostname, &addr))
#ifdef ENABLE_IPV6
|| (1 == Curl_inet_pton(AF_INET6, hostname, &addr))
#endif
) {
if(verifyhost) {
failf(data, "BearSSL: "
"host verification of IP address is not supported");
return CURLE_PEER_FAILED_VERIFICATION;
}
hostname = NULL;
}
if(!br_ssl_client_reset(&backend->ctx, hostname, 0))
return CURLE_FAILED_INIT;
backend->active = TRUE;
connssl->connecting_state = ssl_connect_2;
return CURLE_OK;
}
|
Vulnerable
|
[
"CWE-290"
] |
curl
|
b09c8ee15771c614c4bf3ddac893cdb12187c844
|
2.0890441618805092e+38
| 134 |
vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
To make sure we set and extract the correct session.
Reported-by: Mingtao Yang
Bug: https://curl.se/docs/CVE-2021-22890.html
CVE-2021-22890
| 1 |
static int __ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
ext4_fsblk_t index = from >> PAGE_SHIFT;
unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize, pos;
ext4_lblk_t iblock;
struct inode *inode = mapping->host;
struct buffer_head *bh;
struct page *page;
int err = 0;
page = find_or_create_page(mapping, from >> PAGE_SHIFT,
mapping_gfp_constraint(mapping, ~__GFP_FS));
if (!page)
return -ENOMEM;
blocksize = inode->i_sb->s_blocksize;
iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
/* Find the buffer that contains "offset" */
bh = page_buffers(page);
pos = blocksize;
while (offset >= pos) {
bh = bh->b_this_page;
iblock++;
pos += blocksize;
}
if (buffer_freed(bh)) {
BUFFER_TRACE(bh, "freed: skip");
goto unlock;
}
if (!buffer_mapped(bh)) {
BUFFER_TRACE(bh, "unmapped");
ext4_get_block(inode, iblock, bh, 0);
/* unmapped? It's a hole - nothing to do */
if (!buffer_mapped(bh)) {
BUFFER_TRACE(bh, "still unmapped");
goto unlock;
}
}
/* Ok, it's mapped. Make sure it's up-to-date */
if (PageUptodate(page))
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
err = -EIO;
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
err = fscrypt_decrypt_pagecache_blocks(page, blocksize,
bh_offset(bh));
if (err) {
clear_buffer_uptodate(bh);
goto unlock;
}
}
}
if (ext4_should_journal_data(inode)) {
BUFFER_TRACE(bh, "get write access");
err = ext4_journal_get_write_access(handle, bh);
if (err)
goto unlock;
}
zero_user(page, offset, length);
BUFFER_TRACE(bh, "zeroed end of block");
if (ext4_should_journal_data(inode)) {
err = ext4_handle_dirty_metadata(handle, inode, bh);
} else {
err = 0;
mark_buffer_dirty(bh);
if (ext4_should_order_data(inode))
err = ext4_jbd2_inode_add_write(handle, inode, from,
length);
}
unlock:
unlock_page(page);
put_page(page);
return err;
}
|
Safe
|
[
"CWE-703"
] |
linux
|
ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1
|
1.6328149104517294e+38
| 92 |
ext4: check journal inode extents more carefully
Currently, system zones just track ranges of block, that are "important"
fs metadata (bitmaps, group descriptors, journal blocks, etc.). This
however complicates how extent tree (or indirect blocks) can be checked
for inodes that actually track such metadata - currently the journal
inode but arguably we should be treating quota files or resize inode
similarly. We cannot run __ext4_ext_check() on such metadata inodes when
loading their extents as that would immediately trigger the validity
checks and so we just hack around that and special-case the journal
inode. This however leads to a situation that a journal inode which has
extent tree of depth at least one can have invalid extent tree that gets
unnoticed until ext4_cache_extents() crashes.
To overcome this limitation, track inode number each system zone belongs
to (0 is used for zones not belonging to any inode). We can then verify
inode number matches the expected one when verifying extent tree and
thus avoid the false errors. With this there's no need to to
special-case journal inode during extent tree checking anymore so remove
it.
Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode")
Reported-by: Wolfgang Frisch <wolfgang.frisch@suse.com>
Reviewed-by: Lukas Czerner <lczerner@redhat.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20200728130437.7804-4-jack@suse.cz
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
| 0 |
static u8 service_to_ulpipe(u16 service_id)
{
switch (service_id) {
case WMI_CONTROL_SVC:
return 4;
case WMI_BEACON_SVC:
case WMI_CAB_SVC:
case WMI_UAPSD_SVC:
case WMI_MGMT_SVC:
case WMI_DATA_VO_SVC:
case WMI_DATA_VI_SVC:
case WMI_DATA_BE_SVC:
case WMI_DATA_BK_SVC:
return 1;
default:
return 0;
}
}
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
linux
|
853acf7caf10b828102d92d05b5c101666a6142b
|
2.2881577310051825e+38
| 18 |
ath9k_htc: release allocated buffer if timed out
In htc_config_pipe_credits, htc_setup_complete, and htc_connect_service
if time out happens, the allocated buffer needs to be released.
Otherwise there will be memory leak.
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
| 0 |
GF_Err kind_box_read(GF_Box *s,GF_BitStream *bs)
{
GF_KindBox *ptr = (GF_KindBox *)s;
if (ptr->size) {
u32 bytesToRead = (u32) ptr->size;
char *data;
u32 schemeURIlen;
data = (char*)gf_malloc(bytesToRead * sizeof(char));
if (!data) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, data, bytesToRead);
/*safety check in case the string is not null-terminated*/
if (data[bytesToRead-1]) {
data = (char*)gf_realloc(data, sizeof(char)*(bytesToRead + 1));
if (!data) return GF_OUT_OF_MEM;
data[bytesToRead] = 0;
bytesToRead++;
}
ptr->schemeURI = gf_strdup(data);
if (!ptr->schemeURI) return GF_OUT_OF_MEM;
schemeURIlen = (u32) strlen(data);
if (bytesToRead > schemeURIlen+1) {
/* read the value */
char *data_value = data + schemeURIlen +1;
ptr->value = gf_strdup(data_value);
if (!ptr->value) return GF_OUT_OF_MEM;
}
gf_free(data);
}
return GF_OK;
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
|
7.763204377913852e+36
| 31 |
fixed #1587
| 0 |
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot)
{
struct sock *sk;
sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
if (sk) {
sk->sk_family = family;
/*
* See comment in struct sock definition to understand
* why we need sk_prot_creator -acme
*/
sk->sk_prot = sk->sk_prot_creator = prot;
sock_lock_init(sk);
sock_net_set(sk, get_net(net));
}
return sk;
}
|
Safe
|
[
"CWE-264"
] |
linux-2.6
|
df0bca049d01c0ee94afb7cd5dfd959541e6c8da
|
2.3032710976155386e+38
| 19 |
net: 4 bytes kernel memory disclosure in SO_BSDCOMPAT gsopt try #2
In function sock_getsockopt() located in net/core/sock.c, optval v.val
is not correctly initialized and directly returned in userland in case
we have SO_BSDCOMPAT option set.
This dummy code should trigger the bug:
int main(void)
{
unsigned char buf[4] = { 0, 0, 0, 0 };
int len;
int sock;
sock = socket(33, 2, 2);
getsockopt(sock, 1, SO_BSDCOMPAT, &buf, &len);
printf("%x%x%x%x\n", buf[0], buf[1], buf[2], buf[3]);
close(sock);
}
Here is a patch that fix this bug by initalizing v.val just after its
declaration.
Signed-off-by: Clément Lecigne <clement.lecigne@netasq.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
void kvmppc_claim_lpid(long lpid)
{
set_bit(lpid, lpid_inuse);
}
|
Safe
|
[
"CWE-476"
] |
linux
|
ac64115a66c18c01745bbd3c47a36b124e5fd8c0
|
2.193387719533816e+38
| 4 |
KVM: PPC: Fix oops when checking KVM_CAP_PPC_HTM
The following program causes a kernel oops:
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/kvm.h>
main()
{
int fd = open("/dev/kvm", O_RDWR);
ioctl(fd, KVM_CHECK_EXTENSION, KVM_CAP_PPC_HTM);
}
This happens because when using the global KVM fd with
KVM_CHECK_EXTENSION, kvm_vm_ioctl_check_extension() gets
called with a NULL kvm argument, which gets dereferenced
in is_kvmppc_hv_enabled(). Spotted while reading the code.
Let's use the hv_enabled fallback variable, like everywhere
else in this function.
Fixes: 23528bb21ee2 ("KVM: PPC: Introduce KVM_CAP_PPC_HTM")
Cc: stable@vger.kernel.org # v4.7+
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
| 0 |
htmlParseEndTag(htmlParserCtxtPtr ctxt)
{
const xmlChar *name;
const xmlChar *oldname;
int i, ret;
if ((CUR != '<') || (NXT(1) != '/')) {
htmlParseErr(ctxt, XML_ERR_LTSLASH_REQUIRED,
"htmlParseEndTag: '</' not found\n", NULL, NULL);
return (0);
}
SKIP(2);
name = htmlParseHTMLName(ctxt);
if (name == NULL)
return (0);
/*
* We should definitely be at the ending "S? '>'" part
*/
SKIP_BLANKS;
if ((!IS_CHAR_CH(CUR)) || (CUR != '>')) {
htmlParseErr(ctxt, XML_ERR_GT_REQUIRED,
"End tag : expected '>'\n", NULL, NULL);
if (ctxt->recovery) {
/*
* We're not at the ending > !!
* Error, unless in recover mode where we search forwards
* until we find a >
*/
while (CUR != '\0' && CUR != '>') NEXT;
NEXT;
}
} else
NEXT;
/*
* if we ignored misplaced tags in htmlParseStartTag don't pop them
* out now.
*/
if ((ctxt->depth > 0) &&
(xmlStrEqual(name, BAD_CAST "html") ||
xmlStrEqual(name, BAD_CAST "body") ||
xmlStrEqual(name, BAD_CAST "head"))) {
ctxt->depth--;
return (0);
}
/*
* If the name read is not one of the element in the parsing stack
* then return, it's just an error.
*/
for (i = (ctxt->nameNr - 1); i >= 0; i--) {
if (xmlStrEqual(name, ctxt->nameTab[i]))
break;
}
if (i < 0) {
htmlParseErr(ctxt, XML_ERR_TAG_NAME_MISMATCH,
"Unexpected end tag : %s\n", name, NULL);
return (0);
}
/*
* Check for auto-closure of HTML elements.
*/
htmlAutoCloseOnClose(ctxt, name);
/*
* Well formedness constraints, opening and closing must match.
* With the exception that the autoclose may have popped stuff out
* of the stack.
*/
if (!xmlStrEqual(name, ctxt->name)) {
if ((ctxt->name != NULL) && (!xmlStrEqual(ctxt->name, name))) {
htmlParseErr(ctxt, XML_ERR_TAG_NAME_MISMATCH,
"Opening and ending tag mismatch: %s and %s\n",
name, ctxt->name);
}
}
/*
* SAX: End of Tag
*/
oldname = ctxt->name;
if ((oldname != NULL) && (xmlStrEqual(oldname, name))) {
if ((ctxt->sax != NULL) && (ctxt->sax->endElement != NULL))
ctxt->sax->endElement(ctxt->userData, name);
htmlNodeInfoPop(ctxt);
htmlnamePop(ctxt);
ret = 1;
} else {
ret = 0;
}
return (ret);
}
|
Safe
|
[
"CWE-399"
] |
libxml2
|
de0cc20c29cb3f056062925395e0f68d2250a46f
|
2.1718971769730678e+38
| 97 |
Fix some buffer conversion issues
https://bugzilla.gnome.org/show_bug.cgi?id=690202
Buffer overflow errors originating from xmlBufGetInputBase in 2.9.0
The pointers from the context input were not properly reset after
that call which can do reallocations.
| 0 |
xmlLineNumbersDefault(int val) {
int old = xmlLineNumbersDefaultValue;
xmlLineNumbersDefaultValue = val;
return(old);
}
|
Safe
|
[
"CWE-119"
] |
libxml2
|
23f05e0c33987d6605387b300c4be5da2120a7ab
|
2.0539584723362973e+37
| 6 |
Detect excessive entities expansion upon replacement
If entities expansion in the XML parser is asked for,
it is possble to craft relatively small input document leading
to excessive on-the-fly content generation.
This patch accounts for those replacement and stop parsing
after a given threshold. it can be bypassed as usual with the
HUGE parser option.
| 0 |
static const SSL_METHOD *ssl3_get_server_method(int ver)
{
if (ver == SSL3_VERSION)
return (SSLv3_server_method());
else
return (NULL);
}
|
Safe
|
[
"CWE-20"
] |
openssl
|
b19d8143212ae5fbc9cebfd51c01f802fabccd33
|
2.67928881085522e+37
| 7 |
Fix DHE Null CKE vulnerability
If client auth is used then a server can seg fault in the event of a DHE
cipher being used and a zero length ClientKeyExchange message being sent
by the client. This could be exploited in a DoS attack.
CVE-2015-1787
Reviewed-by: Richard Levitte <levitte@openssl.org>
| 0 |
static int edge_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int mcr;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
mcr = edge_port->shadow_mcr;
if (set & TIOCM_RTS)
mcr |= MCR_RTS;
if (set & TIOCM_DTR)
mcr |= MCR_DTR;
if (set & TIOCM_LOOP)
mcr |= MCR_LOOPBACK;
if (clear & TIOCM_RTS)
mcr &= ~MCR_RTS;
if (clear & TIOCM_DTR)
mcr &= ~MCR_DTR;
if (clear & TIOCM_LOOP)
mcr &= ~MCR_LOOPBACK;
edge_port->shadow_mcr = mcr;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
restore_mcr(edge_port, mcr);
return 0;
}
|
Safe
|
[
"CWE-191"
] |
linux
|
654b404f2a222f918af9b0cd18ad469d0c941a8e
|
2.726653977458978e+38
| 30 |
USB: serial: io_ti: fix information leak in completion handler
Add missing sanity check to the bulk-in completion handler to avoid an
integer underflow that can be triggered by a malicious device.
This avoids leaking 128 kB of memory content from after the URB transfer
buffer to user space.
Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32")
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Cc: stable <stable@vger.kernel.org> # 2.6.30
Signed-off-by: Johan Hovold <johan@kernel.org>
| 0 |
static int ZEND_FASTCALL zend_binary_assign_op_obj_helper_SPEC_UNUSED_CV(int (*binary_op)(zval *result, zval *op1, zval *op2 TSRMLS_DC), ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_op *op_data = opline+1;
zend_free_op free_op_data1;
zval **object_ptr = _get_obj_zval_ptr_ptr_unused(TSRMLS_C);
zval *object;
zval *property = _get_zval_ptr_cv(&opline->op2, EX(Ts), BP_VAR_R TSRMLS_CC);
zval *value = get_zval_ptr(&op_data->op1, EX(Ts), &free_op_data1, BP_VAR_R);
znode *result = &opline->result;
int have_get_ptr = 0;
if (IS_UNUSED == IS_VAR && !object_ptr) {
zend_error_noreturn(E_ERROR, "Cannot use string offset as an object");
}
EX_T(result->u.var).var.ptr_ptr = NULL;
make_real_object(object_ptr TSRMLS_CC);
object = *object_ptr;
if (Z_TYPE_P(object) != IS_OBJECT) {
zend_error(E_WARNING, "Attempt to assign property of non-object");
FREE_OP(free_op_data1);
if (!RETURN_VALUE_UNUSED(result)) {
EX_T(result->u.var).var.ptr = EG(uninitialized_zval_ptr);
EX_T(result->u.var).var.ptr_ptr = NULL;
PZVAL_LOCK(EG(uninitialized_zval_ptr));
}
} else {
/* here we are sure we are dealing with an object */
if (0) {
MAKE_REAL_ZVAL_PTR(property);
}
/* here property is a string */
if (opline->extended_value == ZEND_ASSIGN_OBJ
&& Z_OBJ_HT_P(object)->get_property_ptr_ptr) {
zval **zptr = Z_OBJ_HT_P(object)->get_property_ptr_ptr(object, property TSRMLS_CC);
if (zptr != NULL) { /* NULL means no success in getting PTR */
SEPARATE_ZVAL_IF_NOT_REF(zptr);
have_get_ptr = 1;
binary_op(*zptr, *zptr, value TSRMLS_CC);
if (!RETURN_VALUE_UNUSED(result)) {
EX_T(result->u.var).var.ptr = *zptr;
EX_T(result->u.var).var.ptr_ptr = NULL;
PZVAL_LOCK(*zptr);
}
}
}
if (!have_get_ptr) {
zval *z = NULL;
if (opline->extended_value == ZEND_ASSIGN_OBJ) {
if (Z_OBJ_HT_P(object)->read_property) {
z = Z_OBJ_HT_P(object)->read_property(object, property, BP_VAR_R TSRMLS_CC);
}
} else /* if (opline->extended_value == ZEND_ASSIGN_DIM) */ {
if (Z_OBJ_HT_P(object)->read_dimension) {
z = Z_OBJ_HT_P(object)->read_dimension(object, property, BP_VAR_R TSRMLS_CC);
}
}
if (z) {
if (Z_TYPE_P(z) == IS_OBJECT && Z_OBJ_HT_P(z)->get) {
zval *value = Z_OBJ_HT_P(z)->get(z TSRMLS_CC);
if (Z_REFCOUNT_P(z) == 0) {
GC_REMOVE_ZVAL_FROM_BUFFER(z);
zval_dtor(z);
FREE_ZVAL(z);
}
z = value;
}
Z_ADDREF_P(z);
SEPARATE_ZVAL_IF_NOT_REF(&z);
binary_op(z, z, value TSRMLS_CC);
if (opline->extended_value == ZEND_ASSIGN_OBJ) {
Z_OBJ_HT_P(object)->write_property(object, property, z TSRMLS_CC);
} else /* if (opline->extended_value == ZEND_ASSIGN_DIM) */ {
Z_OBJ_HT_P(object)->write_dimension(object, property, z TSRMLS_CC);
}
if (!RETURN_VALUE_UNUSED(result)) {
EX_T(result->u.var).var.ptr = z;
EX_T(result->u.var).var.ptr_ptr = NULL;
PZVAL_LOCK(z);
}
zval_ptr_dtor(&z);
} else {
zend_error(E_WARNING, "Attempt to assign property of non-object");
if (!RETURN_VALUE_UNUSED(result)) {
EX_T(result->u.var).var.ptr = EG(uninitialized_zval_ptr);
EX_T(result->u.var).var.ptr_ptr = NULL;
PZVAL_LOCK(EG(uninitialized_zval_ptr));
}
}
}
if (0) {
zval_ptr_dtor(&property);
} else {
}
FREE_OP(free_op_data1);
}
/* assign_obj has two opcodes! */
ZEND_VM_INC_OPCODE();
ZEND_VM_NEXT_OPCODE();
}
|
Safe
|
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
|
2.0703555320261387e+38
| 112 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
| 0 |
longlong Item_master_pos_wait::val_int()
{
DBUG_ASSERT(fixed == 1);
THD* thd = current_thd;
String *log_name = args[0]->val_str(&value);
int event_count= 0;
null_value=0;
if (thd->slave_thread || !log_name || !log_name->length())
{
null_value = 1;
return 0;
}
#ifdef HAVE_REPLICATION
longlong pos = (ulong)args[1]->val_int();
longlong timeout = (arg_count>=3) ? args[2]->val_int() : 0 ;
String connection_name_buff;
LEX_STRING connection_name;
Master_info *mi= NULL;
if (arg_count >= 4)
{
String *con;
if (!(con= args[3]->val_str(&connection_name_buff)))
goto err;
connection_name.str= (char*) con->ptr();
connection_name.length= con->length();
if (check_master_connection_name(&connection_name))
{
my_error(ER_WRONG_ARGUMENTS, MYF(ME_JUST_WARNING),
"MASTER_CONNECTION_NAME");
goto err;
}
}
else
connection_name= thd->variables.default_master_connection;
if (!(mi= get_master_info(&connection_name, Sql_condition::WARN_LEVEL_WARN)))
goto err;
if ((event_count = mi->rli.wait_for_pos(thd, log_name, pos, timeout)) == -2)
{
null_value = 1;
event_count=0;
}
mi->release();
#endif
return event_count;
#ifdef HAVE_REPLICATION
err:
{
null_value = 1;
return 0;
}
#endif
}
|
Safe
|
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
|
3.571254984984254e+37
| 57 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
| 0 |
INST_HANDLER (sbrx) { // SBRC Rr, b
// SBRS Rr, b
int b = buf[0] & 0x7;
int r = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x01) << 4);
RAnalOp next_op;
// calculate next instruction size (call recursively avr_op_analyze)
// and free next_op's esil string (we dont need it now)
avr_op_analyze (anal,
&next_op,
op->addr + op->size, buf + op->size, len - op->size,
cpu);
r_strbuf_fini (&next_op.esil);
op->jump = op->addr + next_op.size + 2;
// cycles
op->cycles = 1; // XXX: This is a bug, because depends on eval state,
// so it cannot be really be known until this
// instruction is executed by the ESIL interpreter!!!
// In case of evaluating to false, this instruction
// needs 2/3 cycles, elsewhere it needs only 1 cycle.
ESIL_A ("%d,1,<<,r%d,&,", b, r); // Rr(b)
ESIL_A ((buf[1] & 0xe) == 0xc
? "!," // SBRC => branch if cleared
: "!,!,"); // SBRS => branch if set
ESIL_A ("?{,%"PFMT64d",pc,=,},", op->jump); // ?true => jmp
}
|
Vulnerable
|
[
"CWE-125"
] |
radare2
|
25a3703ef2e015bbe1d1f16f6b2f63bb10dd34f4
|
2.703798272325798e+37
| 27 |
Fix invalid free in RAnal.avr
| 1 |
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
if (!list_empty_careful(&vcpu->async_pf.done))
return true;
if (kvm_apic_has_events(vcpu))
return true;
if (vcpu->arch.pv.pv_unhalted)
return true;
if (vcpu->arch.exception.pending)
return true;
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
(vcpu->arch.nmi_pending &&
static_call(kvm_x86_nmi_allowed)(vcpu, false)))
return true;
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
(vcpu->arch.smi_pending &&
static_call(kvm_x86_smi_allowed)(vcpu, false)))
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
(kvm_cpu_has_interrupt(vcpu) ||
kvm_guest_apic_has_interrupt(vcpu)))
return true;
if (kvm_hv_has_stimer_pending(vcpu))
return true;
if (is_guest_mode(vcpu) &&
kvm_x86_ops.nested_ops->hv_timer_pending &&
kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
return true;
return false;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
55749769fe608fa3f4a075e42e89d237c8e37637
|
3.3101232452931852e+38
| 39 |
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <butterflyhuangxx@gmail.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Message-Id: <20211210163625.2886-6-dwmw2@infradead.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
static int report_prin_search_prop_set(struct transaction_t *txn,
struct meth_params *rparams __attribute__((unused)),
xmlNodePtr inroot,
struct propfind_ctx *fctx)
{
xmlNodePtr node;
const struct prop_entry *entry;
/* Look for child elements in request */
for (node = inroot->children; node; node = node->next) {
if (node->type == XML_ELEMENT_NODE) {
txn->error.desc =
"DAV:principal-search-property-set XML element MUST be empty";
return HTTP_BAD_REQUEST;
}
}
for (entry = prin_search_props; entry->name; entry++) {
node = xmlNewChild(fctx->root, NULL,
BAD_CAST "principal-search-property", NULL);
node = xmlNewChild(node, NULL, BAD_CAST "prop", NULL);
ensure_ns(fctx->ns, entry->ns, fctx->root,
known_namespaces[entry->ns].href,
known_namespaces[entry->ns].prefix);
xmlNewChild(node, fctx->ns[entry->ns], BAD_CAST entry->name, NULL);
}
return HTTP_OK;
}
|
Safe
|
[] |
cyrus-imapd
|
6703ff881b6056e0c045a7b795ce8ba1bbb87027
|
2.1130243597757984e+38
| 29 |
http_dav.c: add 'private' Cache-Control directive for cacheable responses that require authentication
| 0 |
void sc_populate_mount_ns(struct sc_apparmor *apparmor, int snap_update_ns_fd,
const char *base_snap_name, const char *snap_name)
{
// Get the current working directory before we start fiddling with
// mounts and possibly pivot_root. At the end of the whole process, we
// will try to re-locate to the same directory (if possible).
char *vanilla_cwd SC_CLEANUP(sc_cleanup_string) = NULL;
vanilla_cwd = get_current_dir_name();
if (vanilla_cwd == NULL) {
die("cannot get the current working directory");
}
// Classify the current distribution, as claimed by /etc/os-release.
sc_distro distro = sc_classify_distro();
// Check which mode we should run in, normal or legacy.
if (sc_should_use_normal_mode(distro, base_snap_name)) {
// In normal mode we use the base snap as / and set up several bind mounts.
const struct sc_mount mounts[] = {
{"/dev"}, // because it contains devices on host OS
{"/etc"}, // because that's where /etc/resolv.conf lives, perhaps a bad idea
{"/home"}, // to support /home/*/snap and home interface
{"/root"}, // because that is $HOME for services
{"/proc"}, // fundamental filesystem
{"/sys"}, // fundamental filesystem
{"/tmp"}, // to get writable tmp
{"/var/snap"}, // to get access to global snap data
{"/var/lib/snapd"}, // to get access to snapd state and seccomp profiles
{"/var/tmp"}, // to get access to the other temporary directory
{"/run"}, // to get /run with sockets and what not
{"/lib/modules",.is_optional = true}, // access to the modules of the running kernel
{"/usr/src"}, // FIXME: move to SecurityMounts in system-trace interface
{"/var/log"}, // FIXME: move to SecurityMounts in log-observe interface
#ifdef MERGED_USR
{"/run/media", true, "/media"}, // access to the users removable devices
#else
{"/media", true}, // access to the users removable devices
#endif // MERGED_USR
{"/run/netns", true}, // access to the 'ip netns' network namespaces
// The /mnt directory is optional in base snaps to ensure backwards
// compatibility with the first version of base snaps that was
// released.
{"/mnt",.is_optional = true}, // to support the removable-media interface
{"/var/lib/extrausers",.is_optional = true}, // access to UID/GID of extrausers (if available)
{},
};
char rootfs_dir[PATH_MAX] = { 0 };
sc_must_snprintf(rootfs_dir, sizeof rootfs_dir,
"%s/%s/current/", SNAP_MOUNT_DIR,
base_snap_name);
if (access(rootfs_dir, F_OK) != 0) {
if (sc_streq(base_snap_name, "core")) {
// As a special fallback, allow the
// base snap to degrade from "core" to
// "ubuntu-core". This is needed for
// the migration tests.
base_snap_name = "ubuntu-core";
sc_must_snprintf(rootfs_dir, sizeof rootfs_dir,
"%s/%s/current/",
SNAP_MOUNT_DIR,
base_snap_name);
if (access(rootfs_dir, F_OK) != 0) {
die("cannot locate the core or legacy core snap (current symlink missing?)");
}
}
// If after the special case handling above we are
// still not ok, die
if (access(rootfs_dir, F_OK) != 0)
die("cannot locate the base snap: %s", base_snap_name);
}
struct sc_mount_config normal_config = {
.rootfs_dir = rootfs_dir,
.mounts = mounts,
.distro = distro,
.normal_mode = true,
.base_snap_name = base_snap_name,
};
sc_bootstrap_mount_namespace(&normal_config);
} else {
// In legacy mode we don't pivot and instead just arrange bi-
// directional mount propagation for two directories.
const struct sc_mount mounts[] = {
{"/media", true},
{"/run/netns", true},
{},
};
struct sc_mount_config legacy_config = {
.rootfs_dir = "/",
.mounts = mounts,
.distro = distro,
.normal_mode = false,
.base_snap_name = base_snap_name,
};
sc_bootstrap_mount_namespace(&legacy_config);
}
// set up private mounts
// TODO: rename this and fold it into bootstrap
setup_private_mount(snap_name);
// set up private /dev/pts
// TODO: fold this into bootstrap
setup_private_pts();
// setup the security backend bind mounts
sc_call_snap_update_ns(snap_update_ns_fd, snap_name, apparmor);
// Try to re-locate back to vanilla working directory. This can fail
// because that directory is no longer present.
if (chdir(vanilla_cwd) != 0) {
debug("cannot remain in %s, moving to the void directory",
vanilla_cwd);
if (chdir(SC_VOID_DIR) != 0) {
die("cannot change directory to %s", SC_VOID_DIR);
}
debug("successfully moved to %s", SC_VOID_DIR);
}
}
|
Safe
|
[
"CWE-59",
"CWE-703"
] |
snapd
|
bdbfeebef03245176ae0dc323392bb0522a339b1
|
3.280583958951084e+38
| 116 |
cmd/snap-confine: chown private /tmp parent to root.root
When snap-confine creates a private /tmp directory for a given snap it
first creates a temporary directory in /tmp/ named after the snap, along
with a random name. Inside that directory it creates a /tmp directory
with permissions appropriate for a future /tmp, namely 1777.
Up until recently the that directory was owned by the user who first
invoked snap-confine. Since the directory is reused by all the users on
the system this logic makes no sense.
This patch changes the related logic so that the private /tmp directory
is owned by root, just like the real one.
Signed-off-by: Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
| 0 |
isStandaloneDebug(void *ctx ATTRIBUTE_UNUSED)
{
callbacks++;
if (quiet)
return(0);
fprintf(SAXdebug, "SAX.isStandalone()\n");
return(0);
}
|
Safe
|
[
"CWE-125"
] |
libxml2
|
a820dbeac29d330bae4be05d9ecd939ad6b4aa33
|
1.7176936937083424e+38
| 8 |
Bug 758605: Heap-based buffer overread in xmlDictAddString <https://bugzilla.gnome.org/show_bug.cgi?id=758605>
Reviewed by David Kilzer.
* HTMLparser.c:
(htmlParseName): Add bounds check.
(htmlParseNameComplex): Ditto.
* result/HTML/758605.html: Added.
* result/HTML/758605.html.err: Added.
* result/HTML/758605.html.sax: Added.
* runtest.c:
(pushParseTest): The input for the new test case was so small
(4 bytes) that htmlParseChunk() was never called after
htmlCreatePushParserCtxt(), thereby creating a false positive
test failure. Fixed by using a do-while loop so we always call
htmlParseChunk() at least once.
* test/HTML/758605.html: Added.
| 0 |
Get a specific body section's MIME headers */
PHP_FUNCTION(imap_fetchmime)
{
zval *streamind;
zend_long msgno, flags = 0;
pils *imap_le_struct;
char *body;
zend_string *sec;
unsigned long len;
int argc = ZEND_NUM_ARGS();
if (zend_parse_parameters(argc, "rlS|l", &streamind, &msgno, &sec, &flags) == FAILURE) {
return;
}
if (flags && ((flags & ~(FT_UID|FT_PEEK|FT_INTERNAL)) != 0)) {
php_error_docref(NULL, E_WARNING, "invalid value for the options parameter");
RETURN_FALSE;
}
if ((imap_le_struct = (pils *)zend_fetch_resource(Z_RES_P(streamind), "imap", le_imap)) == NULL) {
RETURN_FALSE;
}
if (argc < 4 || !(flags & FT_UID)) {
/* only perform the check if the msgno is a message number and not a UID */
PHP_IMAP_CHECK_MSGNO(msgno);
}
body = mail_fetch_mime(imap_le_struct->imap_stream, msgno, ZSTR_VAL(sec), &len, (argc == 4 ? flags : NIL));
if (!body) {
php_error_docref(NULL, E_WARNING, "No body MIME information available");
RETURN_FALSE;
}
RETVAL_STRINGL(body, len);
|
Safe
|
[
"CWE-88"
] |
php-src
|
336d2086a9189006909ae06c7e95902d7d5ff77e
|
2.2627417534777696e+38
| 36 |
Disable rsh/ssh functionality in imap by default (bug #77153)
| 0 |
netsnmp_mibindex_lookup( const char *dirname )
{
int i;
static char tmpbuf[300];
for (i=0; i<_mibindex; i++) {
if ( _mibindexes[i] &&
strcmp( _mibindexes[i], dirname ) == 0) {
snprintf(tmpbuf, sizeof(tmpbuf), "%s/mib_indexes/%d",
get_persistent_directory(), i);
tmpbuf[sizeof(tmpbuf)-1] = 0;
DEBUGMSGTL(("mibindex", "lookup: %s (%d) %s\n", dirname, i, tmpbuf ));
return tmpbuf;
}
}
DEBUGMSGTL(("mibindex", "lookup: (none)\n"));
return NULL;
}
|
Vulnerable
|
[
"CWE-59",
"CWE-61"
] |
net-snmp
|
4fd9a450444a434a993bc72f7c3486ccce41f602
|
3.0738863631924123e+38
| 18 |
CHANGES: snmpd: Stop reading and writing the mib_indexes/* files
Caching directory contents is something the operating system should do
and is not something Net-SNMP should do. Instead of storing a copy of
the directory contents in ${tmp_dir}/mib_indexes/${n}, always scan a
MIB directory.
| 1 |
encode_CT_CLEAR(const struct ofpact_null *null OVS_UNUSED,
enum ofp_version ofp_version OVS_UNUSED,
struct ofpbuf *out)
{
put_NXAST_CT_CLEAR(out);
}
|
Safe
|
[
"CWE-125"
] |
ovs
|
9237a63c47bd314b807cda0bd2216264e82edbe8
|
1.4635517610479666e+38
| 6 |
ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Justin Pettit <jpettit@ovn.org>
| 0 |
Type_std_attributes()
:collation(&my_charset_bin, DERIVATION_COERCIBLE),
decimals(0), max_length(0), unsigned_flag(false)
{ }
|
Safe
|
[
"CWE-617"
] |
server
|
2e7891080667c59ac80f788eef4d59d447595772
|
2.588888691369546e+38
| 4 |
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <sergey.petrunya@mariadb.com>
| 0 |
static void printer_reset_interface(struct printer_dev *dev)
{
unsigned long flags;
if (dev->interface < 0)
return;
DBG(dev, "%s\n", __func__);
if (dev->in_ep->desc)
usb_ep_disable(dev->in_ep);
if (dev->out_ep->desc)
usb_ep_disable(dev->out_ep);
spin_lock_irqsave(&dev->lock, flags);
dev->in_ep->desc = NULL;
dev->out_ep->desc = NULL;
dev->interface = -1;
spin_unlock_irqrestore(&dev->lock, flags);
}
|
Safe
|
[
"CWE-416"
] |
linux
|
e8d5f92b8d30bb4ade76494490c3c065e12411b1
|
1.612497771381591e+38
| 21 |
usb: gadget: function: printer: fix use-after-free in __lock_acquire
Fix this by increase object reference count.
BUG: KASAN: use-after-free in __lock_acquire+0x3fd4/0x4180
kernel/locking/lockdep.c:3831
Read of size 8 at addr ffff8880683b0018 by task syz-executor.0/3377
CPU: 1 PID: 3377 Comm: syz-executor.0 Not tainted 5.6.11 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xce/0x128 lib/dump_stack.c:118
print_address_description.constprop.4+0x21/0x3c0 mm/kasan/report.c:374
__kasan_report+0x131/0x1b0 mm/kasan/report.c:506
kasan_report+0x12/0x20 mm/kasan/common.c:641
__asan_report_load8_noabort+0x14/0x20 mm/kasan/generic_report.c:135
__lock_acquire+0x3fd4/0x4180 kernel/locking/lockdep.c:3831
lock_acquire+0x127/0x350 kernel/locking/lockdep.c:4488
__raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline]
_raw_spin_lock_irqsave+0x35/0x50 kernel/locking/spinlock.c:159
printer_ioctl+0x4a/0x110 drivers/usb/gadget/function/f_printer.c:723
vfs_ioctl fs/ioctl.c:47 [inline]
ksys_ioctl+0xfb/0x130 fs/ioctl.c:763
__do_sys_ioctl fs/ioctl.c:772 [inline]
__se_sys_ioctl fs/ioctl.c:770 [inline]
__x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:770
do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4531a9
Code: ed 60 fc ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48
89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d
01 f0 ff ff 0f 83 bb 60 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007fd14ad72c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
RAX: ffffffffffffffda RBX: 000000000073bfa8 RCX: 00000000004531a9
RDX: fffffffffffffff9 RSI: 000000000000009e RDI: 0000000000000003
RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00000000004bbd61
R13: 00000000004d0a98 R14: 00007fd14ad736d4 R15: 00000000ffffffff
Allocated by task 2393:
save_stack+0x21/0x90 mm/kasan/common.c:72
set_track mm/kasan/common.c:80 [inline]
__kasan_kmalloc.constprop.3+0xa7/0xd0 mm/kasan/common.c:515
kasan_kmalloc+0x9/0x10 mm/kasan/common.c:529
kmem_cache_alloc_trace+0xfa/0x2d0 mm/slub.c:2813
kmalloc include/linux/slab.h:555 [inline]
kzalloc include/linux/slab.h:669 [inline]
gprinter_alloc+0xa1/0x870 drivers/usb/gadget/function/f_printer.c:1416
usb_get_function+0x58/0xc0 drivers/usb/gadget/functions.c:61
config_usb_cfg_link+0x1ed/0x3e0 drivers/usb/gadget/configfs.c:444
configfs_symlink+0x527/0x11d0 fs/configfs/symlink.c:202
vfs_symlink+0x33d/0x5b0 fs/namei.c:4201
do_symlinkat+0x11b/0x1d0 fs/namei.c:4228
__do_sys_symlinkat fs/namei.c:4242 [inline]
__se_sys_symlinkat fs/namei.c:4239 [inline]
__x64_sys_symlinkat+0x73/0xb0 fs/namei.c:4239
do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Freed by task 3368:
save_stack+0x21/0x90 mm/kasan/common.c:72
set_track mm/kasan/common.c:80 [inline]
kasan_set_free_info mm/kasan/common.c:337 [inline]
__kasan_slab_free+0x135/0x190 mm/kasan/common.c:476
kasan_slab_free+0xe/0x10 mm/kasan/common.c:485
slab_free_hook mm/slub.c:1444 [inline]
slab_free_freelist_hook mm/slub.c:1477 [inline]
slab_free mm/slub.c:3034 [inline]
kfree+0xf7/0x410 mm/slub.c:3995
gprinter_free+0x49/0xd0 drivers/usb/gadget/function/f_printer.c:1353
usb_put_function+0x38/0x50 drivers/usb/gadget/functions.c:87
config_usb_cfg_unlink+0x2db/0x3b0 drivers/usb/gadget/configfs.c:485
configfs_unlink+0x3b9/0x7f0 fs/configfs/symlink.c:250
vfs_unlink+0x287/0x570 fs/namei.c:4073
do_unlinkat+0x4f9/0x620 fs/namei.c:4137
__do_sys_unlink fs/namei.c:4184 [inline]
__se_sys_unlink fs/namei.c:4182 [inline]
__x64_sys_unlink+0x42/0x50 fs/namei.c:4182
do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
The buggy address belongs to the object at ffff8880683b0000
which belongs to the cache kmalloc-1k of size 1024
The buggy address is located 24 bytes inside of
1024-byte region [ffff8880683b0000, ffff8880683b0400)
The buggy address belongs to the page:
page:ffffea0001a0ec00 refcount:1 mapcount:0 mapping:ffff88806c00e300
index:0xffff8880683b1800 compound_mapcount: 0
flags: 0x100000000010200(slab|head)
raw: 0100000000010200 0000000000000000 0000000600000001 ffff88806c00e300
raw: ffff8880683b1800 000000008010000a 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Reported-by: Kyungtae Kim <kt0755@gmail.com>
Signed-off-by: Zqiang <qiang.zhang@windriver.com>
Signed-off-by: Felipe Balbi <balbi@kernel.org>
| 0 |
int gnutls_fips140_mode_enabled(void)
{
#ifdef ENABLE_FIPS140
int ret = _gnutls_fips_mode_enabled();
if (ret == 1)
return ret;
#endif
return 0;
}
|
Safe
|
[
"CWE-20"
] |
gnutls
|
b0a3048e56611a2deee4976aeba3b8c0740655a6
|
1.0573047248114693e+38
| 10 |
env: use secure_getenv when reading environment variables
| 0 |
static int ahci_cond_start_engines(AHCIDevice *ad)
{
AHCIPortRegs *pr = &ad->port_regs;
bool cmd_start = pr->cmd & PORT_CMD_START;
bool cmd_on = pr->cmd & PORT_CMD_LIST_ON;
bool fis_start = pr->cmd & PORT_CMD_FIS_RX;
bool fis_on = pr->cmd & PORT_CMD_FIS_ON;
if (cmd_start && !cmd_on) {
if (!ahci_map_clb_address(ad)) {
pr->cmd &= ~PORT_CMD_START;
error_report("AHCI: Failed to start DMA engine: "
"bad command list buffer address");
return -1;
}
} else if (!cmd_start && cmd_on) {
ahci_unmap_clb_address(ad);
}
if (fis_start && !fis_on) {
if (!ahci_map_fis_address(ad)) {
pr->cmd &= ~PORT_CMD_FIS_RX;
error_report("AHCI: Failed to start FIS receive engine: "
"bad FIS receive buffer address");
return -1;
}
} else if (!fis_start && fis_on) {
ahci_unmap_fis_address(ad);
}
return 0;
}
|
Safe
|
[
"CWE-772",
"CWE-401"
] |
qemu
|
d68f0f778e7f4fbd674627274267f269e40f0b04
|
2.8981568959712158e+38
| 32 |
ide: ahci: call cleanup function in ahci unit
This can avoid memory leak when hotunplug the ahci device.
Signed-off-by: Li Qiang <liqiang6-s@360.cn>
Message-id: 1488449293-80280-4-git-send-email-liqiang6-s@360.cn
Signed-off-by: John Snow <jsnow@redhat.com>
| 0 |
print_error (int err)
{
const char *p;
char buf[50];
switch (err)
{
case 0: p = "success";
case CCID_DRIVER_ERR_OUT_OF_CORE: p = "out of core"; break;
case CCID_DRIVER_ERR_INV_VALUE: p = "invalid value"; break;
case CCID_DRIVER_ERR_NO_DRIVER: p = "no driver"; break;
case CCID_DRIVER_ERR_NOT_SUPPORTED: p = "not supported"; break;
case CCID_DRIVER_ERR_LOCKING_FAILED: p = "locking failed"; break;
case CCID_DRIVER_ERR_BUSY: p = "busy"; break;
case CCID_DRIVER_ERR_NO_CARD: p = "no card"; break;
case CCID_DRIVER_ERR_CARD_INACTIVE: p = "card inactive"; break;
case CCID_DRIVER_ERR_CARD_IO_ERROR: p = "card I/O error"; break;
case CCID_DRIVER_ERR_GENERAL_ERROR: p = "general error"; break;
case CCID_DRIVER_ERR_NO_READER: p = "no reader"; break;
case CCID_DRIVER_ERR_ABORTED: p = "aborted"; break;
default: sprintf (buf, "0x%05x", err); p = buf; break;
}
fprintf (stderr, "operation failed: %s\n", p);
}
|
Safe
|
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
|
1.3053588792081947e+38
| 24 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <wk@gnupg.org>
| 0 |
resolve_data_descendant_schema_nodeid(const char *nodeid, struct lyd_node *start)
{
char *str, *token, *p;
struct lyd_node *result = NULL, *iter;
const struct lys_node *schema = NULL;
assert(nodeid && start);
if (nodeid[0] == '/') {
return NULL;
}
str = p = strdup(nodeid);
LY_CHECK_ERR_RETURN(!str, LOGMEM(start->schema->module->ctx), NULL);
while (p) {
token = p;
p = strchr(p, '/');
if (p) {
*p = '\0';
p++;
}
if (p) {
/* inner node */
if (resolve_descendant_schema_nodeid(token, schema ? schema->child : start->schema,
LYS_CONTAINER | LYS_CHOICE | LYS_CASE | LYS_LEAF, 0, &schema)
|| !schema) {
result = NULL;
break;
}
if (schema->nodetype & (LYS_CHOICE | LYS_CASE)) {
continue;
}
} else {
/* final node */
if (resolve_descendant_schema_nodeid(token, schema ? schema->child : start->schema, LYS_LEAF, 0, &schema)
|| !schema) {
result = NULL;
break;
}
}
LY_TREE_FOR(result ? result->child : start, iter) {
if (iter->schema == schema) {
/* move in data tree according to returned schema */
result = iter;
break;
}
}
if (!iter) {
/* instance not found */
result = NULL;
break;
}
}
free(str);
return result;
}
|
Safe
|
[
"CWE-119"
] |
libyang
|
32fb4993bc8bb49e93e84016af3c10ea53964be5
|
2.686872180860071e+38
| 60 |
schema tree BUGFIX do not check features while still resolving schema
Fixes #723
| 0 |
static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
u64 key;
if (!vsi || !macaddr)
return NULL;
key = i40e_addr_to_hkey(macaddr);
hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
if ((ether_addr_equal(macaddr, f->macaddr)) &&
(vlan == f->vlan))
return f;
}
return NULL;
}
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
linux
|
27d461333459d282ffa4a2bdb6b215a59d493a8f
|
4.064695539607751e+37
| 17 |
i40e: prevent memory leak in i40e_setup_macvlans
In i40e_setup_macvlans if i40e_setup_channel fails the allocated memory
for ch should be released.
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
| 0 |
void mz_free(void *p) { MZ_FREE(p); }
|
Safe
|
[
"CWE-20",
"CWE-190"
] |
tinyexr
|
a685e3332f61cd4e59324bf3f669d36973d64270
|
1.1244420914261504e+38
| 1 |
Make line_no with too large value(2**20) invalid. Fixes #124
| 0 |
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
int nodeid)
{
struct kmem_cache_node *ptr;
ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
BUG_ON(!ptr);
memcpy(ptr, list, sizeof(struct kmem_cache_node));
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
spin_lock_init(&ptr->list_lock);
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->node[nodeid] = ptr;
}
|
Safe
|
[
"CWE-703"
] |
linux
|
c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
|
2.1278093651032316e+38
| 17 |
mm/slab.c: fix SLAB freelist randomization duplicate entries
This patch fixes a bug in the freelist randomization code. When a high
random number is used, the freelist will contain duplicate entries. It
will result in different allocations sharing the same chunk.
It will result in odd behaviours and crashes. It should be uncommon but
it depends on the machines. We saw it happening more often on some
machines (every few hours of running tests).
Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization")
Link: http://lkml.kernel.org/r/20170103181908.143178-1-thgarnie@google.com
Signed-off-by: John Sperbeck <jsperbeck@google.com>
Signed-off-by: Thomas Garnier <thgarnie@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static void sctp_close(struct sock *sk, long timeout)
{
struct net *net = sock_net(sk);
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct list_head *pos, *temp;
unsigned int data_was_unread;
pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING;
ep = sctp_sk(sk)->ep;
/* Clean up any skbs sitting on the receive queue. */
data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
/* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
if (sctp_style(sk, TCP)) {
/* A closed association can still be in the list if
* it belongs to a TCP-style listening socket that is
* not yet accepted. If so, free it. If not, send an
* ABORT or SHUTDOWN based on the linger options.
*/
if (sctp_state(asoc, CLOSED)) {
sctp_unhash_established(asoc);
sctp_association_free(asoc);
continue;
}
}
if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
!skb_queue_empty(&asoc->ulpq.reasm) ||
(sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
struct sctp_chunk *chunk;
chunk = sctp_make_abort_user(asoc, NULL, 0);
if (chunk)
sctp_primitive_ABORT(net, asoc, chunk);
} else
sctp_primitive_SHUTDOWN(net, asoc, NULL);
}
/* On a TCP-style socket, block for at most linger_time if set. */
if (sctp_style(sk, TCP) && timeout)
sctp_wait_for_close(sk, timeout);
/* This will run the backlog queue. */
release_sock(sk);
/* Supposedly, no process has access to the socket, but
* the net layers still may.
* Also, sctp_destroy_sock() needs to be called with addr_wq_lock
* held and that should be grabbed before socket lock.
*/
spin_lock_bh(&net->sctp.addr_wq_lock);
bh_lock_sock(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
*/
sock_hold(sk);
sk_common_release(sk);
bh_unlock_sock(sk);
spin_unlock_bh(&net->sctp.addr_wq_lock);
sock_put(sk);
SCTP_DBG_OBJCNT_DEC(sock);
}
|
Safe
|
[
"CWE-362",
"CWE-703"
] |
linux
|
2d45a02d0166caf2627fe91897c6ffc3b19514c4
|
1.8527774481089918e+38
| 77 |
sctp: fix ASCONF list handling
->auto_asconf_splist is per namespace and mangled by functions like
sctp_setsockopt_auto_asconf() which doesn't guarantee any serialization.
Also, the call to inet_sk_copy_descendant() was backuping
->auto_asconf_list through the copy but was not honoring
->do_auto_asconf, which could lead to list corruption if it was
different between both sockets.
This commit thus fixes the list handling by using ->addr_wq_lock
spinlock to protect the list. A special handling is done upon socket
creation and destruction for that. Error handlig on sctp_init_sock()
will never return an error after having initialized asconf, so
sctp_destroy_sock() can be called without addrq_wq_lock. The lock now
will be take on sctp_close_sock(), before locking the socket, so we
don't do it in inverse order compared to sctp_addr_wq_timeout_handler().
Instead of taking the lock on sctp_sock_migrate() for copying and
restoring the list values, it's preferred to avoid rewritting it by
implementing sctp_copy_descendant().
Issue was found with a test application that kept flipping sysctl
default_auto_asconf on and off, but one could trigger it by issuing
simultaneous setsockopt() calls on multiple sockets or by
creating/destroying sockets fast enough. This is only triggerable
locally.
Fixes: 9f7d653b67ae ("sctp: Add Auto-ASCONF support (core).")
Reported-by: Ji Jianwen <jiji@redhat.com>
Suggested-by: Neil Horman <nhorman@tuxdriver.com>
Suggested-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
const char* llhttp_method_name(llhttp_method_t method) {
#define HTTP_METHOD_GEN(NUM, NAME, STRING) case HTTP_##NAME: return #STRING;
switch (method) {
HTTP_ALL_METHOD_MAP(HTTP_METHOD_GEN)
default: abort();
}
#undef HTTP_METHOD_GEN
}
|
Safe
|
[
"CWE-444"
] |
node
|
af488f8dc82d69847992ea1cd2f53dc8082b3b91
|
1.4686608555939656e+38
| 8 |
deps: update llhttp to 6.0.4
Refs: https://hackerone.com/reports/1238099
Refs: https://hackerone.com/reports/1238709
Refs: https://github.com/nodejs-private/llhttp-private/pull/6
Refs: https://github.com/nodejs-private/llhttp-private/pull/5
CVE-ID: CVE-2021-22959
CVE-ID: CVE-2021-22960
PR-URL: https://github.com/nodejs-private/node-private/pull/284
Reviewed-By: Akshay K <iit.akshay@gmail.com>
Reviewed-By: James M Snell <jasnell@gmail.com>
Reviewed-By: Robert Nagy <ronagy@icloud.com>
| 0 |
static void wait_current_trans(struct btrfs_fs_info *fs_info)
{
struct btrfs_transaction *cur_trans;
spin_lock(&fs_info->trans_lock);
cur_trans = fs_info->running_transaction;
if (cur_trans && is_transaction_blocked(cur_trans)) {
refcount_inc(&cur_trans->use_count);
spin_unlock(&fs_info->trans_lock);
wait_event(fs_info->transaction_wait,
cur_trans->state >= TRANS_STATE_UNBLOCKED ||
TRANS_ABORTED(cur_trans));
btrfs_put_transaction(cur_trans);
} else {
spin_unlock(&fs_info->trans_lock);
}
}
|
Safe
|
[
"CWE-703",
"CWE-667"
] |
linux
|
1cb3db1cf383a3c7dbda1aa0ce748b0958759947
|
3.252448755664099e+38
| 18 |
btrfs: fix deadlock with concurrent chunk allocations involving system chunks
When a task attempting to allocate a new chunk verifies that there is not
currently enough free space in the system space_info and there is another
task that allocated a new system chunk but it did not finish yet the
creation of the respective block group, it waits for that other task to
finish creating the block group. This is to avoid exhaustion of the system
chunk array in the superblock, which is limited, when we have a thundering
herd of tasks allocating new chunks. This problem was described and fixed
by commit eafa4fd0ad0607 ("btrfs: fix exhaustion of the system chunk array
due to concurrent allocations").
However there are two very similar scenarios where this can lead to a
deadlock:
1) Task B allocated a new system chunk and task A is waiting on task B
to finish creation of the respective system block group. However before
task B ends its transaction handle and finishes the creation of the
system block group, it attempts to allocate another chunk (like a data
chunk for an fallocate operation for a very large range). Task B will
be unable to progress and allocate the new chunk, because task A set
space_info->chunk_alloc to 1 and therefore it loops at
btrfs_chunk_alloc() waiting for task A to finish its chunk allocation
and set space_info->chunk_alloc to 0, but task A is waiting on task B
to finish creation of the new system block group, therefore resulting
in a deadlock;
2) Task B allocated a new system chunk and task A is waiting on task B to
finish creation of the respective system block group. By the time that
task B enter the final phase of block group allocation, which happens
at btrfs_create_pending_block_groups(), when it modifies the extent
tree, the device tree or the chunk tree to insert the items for some
new block group, it needs to allocate a new chunk, so it ends up at
btrfs_chunk_alloc() and keeps looping there because task A has set
space_info->chunk_alloc to 1, but task A is waiting for task B to
finish creation of the new system block group and release the reserved
system space, therefore resulting in a deadlock.
In short, the problem is if a task B needs to allocate a new chunk after
it previously allocated a new system chunk and if another task A is
currently waiting for task B to complete the allocation of the new system
chunk.
Unfortunately this deadlock scenario introduced by the previous fix for
the system chunk array exhaustion problem does not have a simple and short
fix, and requires a big change to rework the chunk allocation code so that
chunk btree updates are all made in the first phase of chunk allocation.
And since this deadlock regression is being frequently hit on zoned
filesystems and the system chunk array exhaustion problem is triggered
in more extreme cases (originally observed on PowerPC with a node size
of 64K when running the fallocate tests from stress-ng), revert the
changes from that commit. The next patch in the series, with a subject
of "btrfs: rework chunk allocation to avoid exhaustion of the system
chunk array" does the necessary changes to fix the system chunk array
exhaustion problem.
Reported-by: Naohiro Aota <naohiro.aota@wdc.com>
Link: https://lore.kernel.org/linux-btrfs/20210621015922.ewgbffxuawia7liz@naota-xeon/
Fixes: eafa4fd0ad0607 ("btrfs: fix exhaustion of the system chunk array due to concurrent allocations")
CC: stable@vger.kernel.org # 5.12+
Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Tested-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Tested-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
| 0 |
compute_Perms_value_V5(std::string const& encryption_key,
QPDF::EncryptionData const& data)
{
// Algorithm 3.10 from the PDF 1.7 extension level 3
unsigned char k[16];
compute_Perms_value_V5_clear(encryption_key, data, k);
return process_with_aes(
encryption_key, true,
std::string(reinterpret_cast<char*>(k), sizeof(k)));
}
|
Safe
|
[
"CWE-125"
] |
qpdf
|
dea704f0ab7f625e1e7b3f9a1110b45b63157317
|
2.8531799130199207e+38
| 10 |
Pad keys to avoid memory errors (fixes #147)
| 0 |
std::string binaryToHex(const byte *data, size_t size)
{
std::stringstream hexOutput;
unsigned long tl = (unsigned long)((size / 16) * 16);
unsigned long tl_offset = (unsigned long)(size - tl);
for (unsigned long loop = 0; loop < (unsigned long)size; loop++) {
if (data[loop] < 16) {
hexOutput << "0";
}
hexOutput << std::hex << (int)data[loop];
if ((loop % 8) == 7) {
hexOutput << " ";
}
if ((loop % 16) == 15 || loop == (tl + tl_offset - 1)) {
int max = 15;
if (loop >= tl) {
max = tl_offset - 1;
for (int offset = 0; offset < (int)(16 - tl_offset); offset++) {
if ((offset % 8) == 7) {
hexOutput << " ";
}
hexOutput << " ";
}
}
hexOutput << " ";
for (int offset = max; offset >= 0; offset--) {
if (offset == (max - 8)) {
hexOutput << " ";
}
byte c = '.';
if (data[loop - offset] >= 0x20 && data[loop - offset] <= 0x7E) {
c = data[loop - offset] ;
}
hexOutput << (char) c ;
}
hexOutput << std::endl;
}
}
hexOutput << std::endl << std::endl << std::endl;
return hexOutput.str();
}
|
Safe
|
[
"CWE-125"
] |
exiv2
|
6e3855aed7ba8bb4731fc4087ca7f9078b2f3d97
|
1.3476800792083043e+38
| 45 |
Fix https://github.com/Exiv2/exiv2/issues/55
| 0 |
void license_get_server_rsa_public_key(rdpLicense* license)
{
BYTE* Exponent;
BYTE* Modulus;
int ModulusLength;
if (license->ServerCertificate->length < 1)
{
certificate_read_server_certificate(license->certificate,
license->rdp->settings->ServerCertificate,
license->rdp->settings->ServerCertificateLength);
}
Exponent = license->certificate->cert_info.exponent;
Modulus = license->certificate->cert_info.Modulus;
ModulusLength = license->certificate->cert_info.ModulusLength;
CopyMemory(license->Exponent, Exponent, 4);
license->ModulusLength = ModulusLength;
license->Modulus = (BYTE*) malloc(ModulusLength);
memcpy(license->Modulus, Modulus, ModulusLength);
}
|
Safe
|
[] |
FreeRDP
|
f1d6afca6ae620f9855a33280bdc6f3ad9153be0
|
1.302829199518558e+38
| 23 |
Fix CVE-2014-0791
This patch fixes CVE-2014-0791, the remaining length in the stream is checked
before doing some malloc().
| 0 |
static void pred_weight_table(HEVCContext *s, GetBitContext *gb)
{
int i = 0;
int j = 0;
uint8_t luma_weight_l0_flag[16];
uint8_t chroma_weight_l0_flag[16];
uint8_t luma_weight_l1_flag[16];
uint8_t chroma_weight_l1_flag[16];
s->sh.luma_log2_weight_denom = get_ue_golomb_long(gb);
if (s->sps->chroma_format_idc != 0) {
int delta = get_se_golomb(gb);
s->sh.chroma_log2_weight_denom = av_clip_c(s->sh.luma_log2_weight_denom + delta, 0, 7);
}
for (i = 0; i < s->sh.nb_refs[L0]; i++) {
luma_weight_l0_flag[i] = get_bits1(gb);
if (!luma_weight_l0_flag[i]) {
s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
s->sh.luma_offset_l0[i] = 0;
}
}
if (s->sps->chroma_format_idc != 0) { // FIXME: invert "if" and "for"
for (i = 0; i < s->sh.nb_refs[L0]; i++)
chroma_weight_l0_flag[i] = get_bits1(gb);
} else {
for (i = 0; i < s->sh.nb_refs[L0]; i++)
chroma_weight_l0_flag[i] = 0;
}
for (i = 0; i < s->sh.nb_refs[L0]; i++) {
if (luma_weight_l0_flag[i]) {
int delta_luma_weight_l0 = get_se_golomb(gb);
s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
s->sh.luma_offset_l0[i] = get_se_golomb(gb);
}
if (chroma_weight_l0_flag[i]) {
for (j = 0; j < 2; j++) {
int delta_chroma_weight_l0 = get_se_golomb(gb);
int delta_chroma_offset_l0 = get_se_golomb(gb);
s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
s->sh.chroma_offset_l0[i][j] = av_clip_c((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
>> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
}
} else {
s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
s->sh.chroma_offset_l0[i][0] = 0;
s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
s->sh.chroma_offset_l0[i][1] = 0;
}
}
if (s->sh.slice_type == B_SLICE) {
for (i = 0; i < s->sh.nb_refs[L1]; i++) {
luma_weight_l1_flag[i] = get_bits1(gb);
if (!luma_weight_l1_flag[i]) {
s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
s->sh.luma_offset_l1[i] = 0;
}
}
if (s->sps->chroma_format_idc != 0) {
for (i = 0; i < s->sh.nb_refs[L1]; i++)
chroma_weight_l1_flag[i] = get_bits1(gb);
} else {
for (i = 0; i < s->sh.nb_refs[L1]; i++)
chroma_weight_l1_flag[i] = 0;
}
for (i = 0; i < s->sh.nb_refs[L1]; i++) {
if (luma_weight_l1_flag[i]) {
int delta_luma_weight_l1 = get_se_golomb(gb);
s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
s->sh.luma_offset_l1[i] = get_se_golomb(gb);
}
if (chroma_weight_l1_flag[i]) {
for (j = 0; j < 2; j++) {
int delta_chroma_weight_l1 = get_se_golomb(gb);
int delta_chroma_offset_l1 = get_se_golomb(gb);
s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
s->sh.chroma_offset_l1[i][j] = av_clip_c((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
>> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
}
} else {
s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
s->sh.chroma_offset_l1[i][0] = 0;
s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
s->sh.chroma_offset_l1[i][1] = 0;
}
}
}
}
|
Safe
|
[
"CWE-703"
] |
FFmpeg
|
b25e84b7399bd91605596b67d761d3464dbe8a6e
|
8.737204203748203e+36
| 88 |
hevc: check that the VCL NAL types are the same for all slice segments of a frame
Fixes possible invalid memory access for mismatching skipped/non-skipped
slice segments.
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Sample-Id: 00001533-google
| 0 |
void AuthorizationSession::addAuthorizedPrincipal(Principal* principal) {
// Log out any already-logged-in user on the same database as "principal".
logoutDatabase(principal->getName().getDB().toString()); // See SERVER-8144.
_authenticatedPrincipals.add(principal);
if (!principal->isImplicitPrivilegeAcquisitionEnabled())
return;
_acquirePrivilegesForPrincipalFromDatabase(ADMIN_DBNAME, principal->getName());
principal->markDatabaseAsProbed(ADMIN_DBNAME);
const std::string dbname = principal->getName().getDB().toString();
_acquirePrivilegesForPrincipalFromDatabase(dbname, principal->getName());
principal->markDatabaseAsProbed(dbname);
_externalState->onAddAuthorizedPrincipal(principal);
}
|
Vulnerable
|
[
"CWE-264"
] |
mongo
|
c5ad04549e40b1069029026081d9324e9e06156c
|
2.9438803056381645e+38
| 15 |
SERVER-9983 Do not needlessly lock when looking up privileges for the __system@local user.
Uncorrected, this can cause replica set heartbeats to stall behind operations
that hold the read lock for a long time.
| 1 |
void show_ip(struct pt_regs *regs, const char *loglvl)
{
#ifdef CONFIG_X86_32
printk("%sEIP: %pS\n", loglvl, (void *)regs->ip);
#else
printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
#endif
show_opcodes(regs, loglvl);
}
|
Safe
|
[
"CWE-20"
] |
linux
|
342db04ae71273322f0011384a9ed414df8bdae4
|
1.8868763103362004e+38
| 9 |
x86/dumpstack: Don't dump kernel memory based on usermode RIP
show_opcodes() is used both for dumping kernel instructions and for dumping
user instructions. If userspace causes #PF by jumping to a kernel address,
show_opcodes() can be reached with regs->ip controlled by the user,
pointing to kernel code. Make sure that userspace can't trick us into
dumping kernel memory into dmesg.
Fixes: 7cccf0725cf7 ("x86/dumpstack: Add a show_ip() function")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: security@kernel.org
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20180828154901.112726-1-jannh@google.com
| 0 |
static int hci_uart_setup(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct hci_rp_read_local_version *ver;
struct sk_buff *skb;
unsigned int speed;
int err;
/* Init speed if any */
if (hu->init_speed)
speed = hu->init_speed;
else if (hu->proto->init_speed)
speed = hu->proto->init_speed;
else
speed = 0;
if (speed)
hci_uart_set_baudrate(hu, speed);
/* Operational speed if any */
if (hu->oper_speed)
speed = hu->oper_speed;
else if (hu->proto->oper_speed)
speed = hu->proto->oper_speed;
else
speed = 0;
if (hu->proto->set_baudrate && speed) {
err = hu->proto->set_baudrate(hu, speed);
if (!err)
hci_uart_set_baudrate(hu, speed);
}
if (hu->proto->setup)
return hu->proto->setup(hu);
if (!test_bit(HCI_UART_VND_DETECT, &hu->hdev_flags))
return 0;
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s: Reading local version information failed (%ld)",
hdev->name, PTR_ERR(skb));
return 0;
}
if (skb->len != sizeof(*ver)) {
BT_ERR("%s: Event length mismatch for version information",
hdev->name);
goto done;
}
ver = (struct hci_rp_read_local_version *)skb->data;
switch (le16_to_cpu(ver->manufacturer)) {
#ifdef CONFIG_BT_HCIUART_INTEL
case 2:
hdev->set_bdaddr = btintel_set_bdaddr;
btintel_check_bdaddr(hdev);
break;
#endif
#ifdef CONFIG_BT_HCIUART_BCM
case 15:
hdev->set_bdaddr = btbcm_set_bdaddr;
btbcm_check_bdaddr(hdev);
break;
#endif
default:
break;
}
done:
kfree_skb(skb);
return 0;
}
|
Safe
|
[
"CWE-416"
] |
linux
|
56897b217a1d0a91c9920cb418d6b3fe922f590a
|
1.3496924069190975e+37
| 76 |
Bluetooth: hci_ldisc: Postpone HCI_UART_PROTO_READY bit set in hci_uart_set_proto()
task A: task B:
hci_uart_set_proto flush_to_ldisc
- p->open(hu) -> h5_open //alloc h5 - receive_buf
- set_bit HCI_UART_PROTO_READY - tty_port_default_receive_buf
- hci_uart_register_dev - tty_ldisc_receive_buf
- hci_uart_tty_receive
- test_bit HCI_UART_PROTO_READY
- h5_recv
- clear_bit HCI_UART_PROTO_READY while() {
- p->open(hu) -> h5_close //free h5
- h5_rx_3wire_hdr
- h5_reset() //use-after-free
}
It could use ioctl to set hci uart proto, but there is
a use-after-free issue when hci_uart_register_dev() fail in
hci_uart_set_proto(), see stack above, fix this by setting
HCI_UART_PROTO_READY bit only when hci_uart_register_dev()
return success.
Reported-by: syzbot+899a33dc0fa0dbaf06a6@syzkaller.appspotmail.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Jeremy Cline <jcline@redhat.com>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
| 0 |
int gnutls_init(gnutls_session_t * session, unsigned int flags)
{
int ret;
FAIL_IF_LIB_ERROR;
*session = gnutls_calloc(1, sizeof(struct gnutls_session_int));
if (*session == NULL)
return GNUTLS_E_MEMORY_ERROR;
ret = gnutls_mutex_init(&(*session)->internals.post_negotiation_lock);
if (ret < 0) {
gnutls_assert();
gnutls_free(*session);
return ret;
}
ret = gnutls_mutex_init(&(*session)->internals.epoch_lock);
if (ret < 0) {
gnutls_assert();
gnutls_mutex_deinit(&(*session)->internals.post_negotiation_lock);
gnutls_free(*session);
return ret;
}
ret = _gnutls_epoch_setup_next(*session, 1, NULL);
if (ret < 0) {
gnutls_mutex_deinit(&(*session)->internals.post_negotiation_lock);
gnutls_mutex_deinit(&(*session)->internals.epoch_lock);
gnutls_free(*session);
return gnutls_assert_val(GNUTLS_E_MEMORY_ERROR);
}
_gnutls_epoch_bump(*session);
(*session)->security_parameters.entity =
(flags & GNUTLS_SERVER ? GNUTLS_SERVER : GNUTLS_CLIENT);
/* the default certificate type for TLS */
(*session)->security_parameters.client_ctype = DEFAULT_CERT_TYPE;
(*session)->security_parameters.server_ctype = DEFAULT_CERT_TYPE;
/* Initialize buffers */
_gnutls_buffer_init(&(*session)->internals.handshake_hash_buffer);
_gnutls_buffer_init(&(*session)->internals.post_handshake_hash_buffer);
_gnutls_buffer_init(&(*session)->internals.hb_remote_data);
_gnutls_buffer_init(&(*session)->internals.hb_local_data);
_gnutls_buffer_init(&(*session)->internals.record_presend_buffer);
_gnutls_buffer_init(&(*session)->internals.record_key_update_buffer);
_gnutls_buffer_init(&(*session)->internals.reauth_buffer);
_mbuffer_head_init(&(*session)->internals.record_buffer);
_mbuffer_head_init(&(*session)->internals.record_send_buffer);
_mbuffer_head_init(&(*session)->internals.record_recv_buffer);
_mbuffer_head_init(&(*session)->internals.early_data_recv_buffer);
_gnutls_buffer_init(&(*session)->internals.early_data_presend_buffer);
_mbuffer_head_init(&(*session)->internals.handshake_send_buffer);
_gnutls_handshake_recv_buffer_init(*session);
(*session)->internals.expire_time = DEFAULT_EXPIRE_TIME;
/* Ticket key rotation - set the default X to 3 times the ticket expire time */
(*session)->key.totp.last_result = 0;
gnutls_handshake_set_max_packet_length((*session),
MAX_HANDSHAKE_PACKET_SIZE);
/* set the socket pointers to -1;
*/
(*session)->internals.transport_recv_ptr =
(gnutls_transport_ptr_t) - 1;
(*session)->internals.transport_send_ptr =
(gnutls_transport_ptr_t) - 1;
/* set the default maximum record size for TLS
*/
(*session)->security_parameters.max_record_recv_size =
DEFAULT_MAX_RECORD_SIZE;
(*session)->security_parameters.max_record_send_size =
DEFAULT_MAX_RECORD_SIZE;
(*session)->security_parameters.max_user_record_recv_size =
DEFAULT_MAX_RECORD_SIZE;
(*session)->security_parameters.max_user_record_send_size =
DEFAULT_MAX_RECORD_SIZE;
/* set the default early data size for TLS
*/
if ((*session)->security_parameters.entity == GNUTLS_SERVER) {
(*session)->security_parameters.max_early_data_size =
DEFAULT_MAX_EARLY_DATA_SIZE;
} else {
(*session)->security_parameters.max_early_data_size =
UINT32_MAX;
}
/* Everything else not initialized here is initialized as NULL
* or 0. This is why calloc is used. However, we want to
* ensure that certain portions of data are initialized at
* runtime before being used. Mark such regions with a
* valgrind client request as undefined.
*/
#ifdef HAVE_VALGRIND_MEMCHECK_H
if (RUNNING_ON_VALGRIND) {
if (flags & GNUTLS_CLIENT)
VALGRIND_MAKE_MEM_UNDEFINED((*session)->security_parameters.client_random,
GNUTLS_RANDOM_SIZE);
if (flags & GNUTLS_SERVER) {
VALGRIND_MAKE_MEM_UNDEFINED((*session)->security_parameters.server_random,
GNUTLS_RANDOM_SIZE);
VALGRIND_MAKE_MEM_UNDEFINED((*session)->key.session_ticket_key,
TICKET_MASTER_KEY_SIZE);
}
}
#endif
handshake_internal_state_clear1(*session);
#ifdef HAVE_WRITEV
#ifdef MSG_NOSIGNAL
if (flags & GNUTLS_NO_SIGNAL)
gnutls_transport_set_vec_push_function(*session, system_writev_nosignal);
else
#endif
gnutls_transport_set_vec_push_function(*session, system_writev);
#else
gnutls_transport_set_push_function(*session, system_write);
#endif
(*session)->internals.pull_timeout_func = gnutls_system_recv_timeout;
(*session)->internals.pull_func = system_read;
(*session)->internals.errno_func = system_errno;
(*session)->internals.saved_username_size = -1;
/* heartbeat timeouts */
(*session)->internals.hb_retrans_timeout_ms = 1000;
(*session)->internals.hb_total_timeout_ms = 60000;
if (flags & GNUTLS_DATAGRAM) {
(*session)->internals.dtls.mtu = DTLS_DEFAULT_MTU;
(*session)->internals.transport = GNUTLS_DGRAM;
gnutls_dtls_set_timeouts(*session, DTLS_RETRANS_TIMEOUT, 60000);
} else {
(*session)->internals.transport = GNUTLS_STREAM;
}
/* Enable useful extensions */
if ((flags & GNUTLS_CLIENT) && !(flags & GNUTLS_NO_EXTENSIONS)) {
#ifdef ENABLE_OCSP
gnutls_ocsp_status_request_enable_client(*session, NULL, 0,
NULL);
#endif
}
/* session tickets in server side are enabled by setting a key */
if (flags & GNUTLS_SERVER)
flags |= GNUTLS_NO_TICKETS;
(*session)->internals.flags = flags;
if (_gnutls_disable_tls13 != 0)
(*session)->internals.flags |= INT_FLAG_NO_TLS13;
/* Install the default keylog function */
gnutls_session_set_keylog_function(*session, _gnutls_nss_keylog_func);
return 0;
}
|
Safe
|
[] |
gnutls
|
3d7fae761e65e9d0f16d7247ee8a464d4fe002da
|
1.2438915317922648e+38
| 167 |
valgrind: check if session ticket key is used without initialization
This adds a valgrind client request for
session->key.session_ticket_key to make sure that it is not used
without initialization.
Signed-off-by: Daiki Ueno <ueno@gnu.org>
| 0 |
compress_init(const char *root)
{
ds_ctxt_t *ctxt;
ds_compress_ctxt_t *compress_ctxt;
comp_thread_ctxt_t *threads;
/* Create and initialize the worker threads */
threads = create_worker_threads(xtrabackup_compress_threads);
if (threads == NULL) {
msg("compress: failed to create worker threads.");
return NULL;
}
ctxt = (ds_ctxt_t *) my_malloc(sizeof(ds_ctxt_t) +
sizeof(ds_compress_ctxt_t),
MYF(MY_FAE));
compress_ctxt = (ds_compress_ctxt_t *) (ctxt + 1);
compress_ctxt->threads = threads;
compress_ctxt->nthreads = xtrabackup_compress_threads;
ctxt->ptr = compress_ctxt;
ctxt->root = my_strdup(root, MYF(MY_FAE));
return ctxt;
}
|
Safe
|
[
"CWE-703"
] |
edgeless-mariadb
|
91d5fffa0796b8208c3d6633c8f296da8914af4d
|
2.043654598266281e+38
| 26 |
MDEV-28719: compress_write() leaks data_mutex on error
| 0 |
int gnutls_system_global_init()
{
#ifdef _WIN32
#if defined(__MINGW32__) && !defined(__MINGW64__) && __MINGW32_MAJOR_VERSION <= 3 && __MINGW32_MINOR_VERSION <= 20
HMODULE crypto;
crypto = LoadLibraryA("Crypt32.dll");
if (crypto == NULL)
return GNUTLS_E_CRYPTO_INIT_FAILED;
Loaded_CertEnumCRLsInStore =
(Type_CertEnumCRLsInStore) GetProcAddress(crypto,
"CertEnumCRLsInStore");
if (Loaded_CertEnumCRLsInStore == NULL) {
FreeLibrary(crypto);
return GNUTLS_E_CRYPTO_INIT_FAILED;
}
Crypt32_dll = crypto;
#endif
#endif
return 0;
}
|
Safe
|
[
"CWE-20"
] |
gnutls
|
b0a3048e56611a2deee4976aeba3b8c0740655a6
|
1.6721022363898795e+37
| 23 |
env: use secure_getenv when reading environment variables
| 0 |
static js_Ast *identifier(js_State *J)
{
js_Ast *a;
if (J->lookahead == TK_IDENTIFIER) {
a = jsP_newstrnode(J, AST_IDENTIFIER, J->text);
jsP_next(J);
return a;
}
jsP_error(J, "unexpected token: %s (expected identifier)", jsY_tokenstring(J->lookahead));
}
|
Safe
|
[
"CWE-674"
] |
mujs
|
4d45a96e57fbabf00a7378b337d0ddcace6f38c1
|
2.8644569731143498e+38
| 10 |
Guard binary expressions from too much recursion.
| 0 |
static void init_no_dither(int line)
{
;
}
|
Safe
|
[
"CWE-369"
] |
libcaca
|
84bd155087b93ab2d8d7cb5b1ac94ecd4cf4f93c
|
2.6180486158045453e+38
| 4 |
dither: fix integer overflows that were causing a division by zero.
Fixes: #36 (CVE-2018-20544)
| 0 |
MYSQL_RES * STDCALL mysql_use_result(MYSQL *mysql)
{
return (*mysql->methods->use_result)(mysql);
}
|
Safe
|
[] |
mysql-server
|
3d8134d2c9b74bc8883ffe2ef59c168361223837
|
2.4983577244518924e+38
| 4 |
Bug#25988681: USE-AFTER-FREE IN MYSQL_STMT_CLOSE()
Description: If mysql_stmt_close() encountered error,
it recorded error in prepared statement
but then frees memory assigned to prepared
statement. If mysql_stmt_error() is used
to get error information, it will result
into use after free.
In all cases where mysql_stmt_close() can
fail, error would have been set by
cli_advanced_command in MYSQL structure.
Solution: Don't copy error from MYSQL using set_stmt_errmsg.
There is no automated way to test the fix since
it is in mysql_stmt_close() which does not expect
any reply from server.
Reviewed-By: Georgi Kodinov <georgi.kodinov@oracle.com>
Reviewed-By: Ramil Kalimullin <ramil.kalimullin@oracle.com>
| 0 |
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
struct vcpu_svm *svm = to_svm(vcpu);
u32 dummy;
u32 eax = 1;
if (!init_event) {
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
}
init_vmcb(svm);
kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
}
|
Safe
|
[
"CWE-200",
"CWE-399"
] |
linux
|
cbdb967af3d54993f5814f1cee0ed311a055377d
|
1.4057742383775512e+38
| 17 |
KVM: svm: unconditionally intercept #DB
This is needed to avoid the possibility that the guest triggers
an infinite stream of #DB exceptions (CVE-2015-8104).
VMX is not affected: because it does not save DR6 in the VMCS,
it already intercepts #DB unconditionally.
Reported-by: Jan Beulich <jbeulich@suse.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
gs_window_clear (GSWindow *window)
{
g_return_if_fail (GS_IS_WINDOW (window));
clear_widget (GTK_WIDGET (window));
clear_widget (window->priv->drawing_area);
}
|
Safe
|
[] |
gnome-screensaver
|
a5f66339be6719c2b8fc478a1d5fc6545297d950
|
2.2391061479343554e+38
| 7 |
Ensure keyboard grab and unlock dialog exist after monitor removal
gnome-screensaver currently doesn't deal with monitors getting
removed properly. If the unlock dialog is on the removed monitor
then the unlock dialog and its associated keyboard grab are not
moved to an existing monitor when the monitor removal is processed.
This means that users can gain access to the locked system by placing
the mouse pointer on an external monitor and then disconnect the
external monitor.
CVE-2010-0414
https://bugzilla.gnome.org/show_bug.cgi?id=609337
| 0 |
static void discov_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
discov_off.work);
bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
/* When discoverable timeout triggers, then just make sure
* the limited discoverable flag is cleared. Even in the case
* of a timeout triggered from general discoverable, it is
* safe to unconditionally clear the flag.
*/
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
hdev->discov_timeout = 0;
hci_dev_unlock(hdev);
hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
mgmt_new_settings(hdev);
}
|
Safe
|
[
"CWE-362"
] |
linux
|
e2cb6b891ad2b8caa9131e3be70f45243df82a80
|
7.83577974614385e+37
| 23 |
bluetooth: eliminate the potential race condition when removing the HCI controller
There is a possible race condition vulnerability between issuing a HCI
command and removing the cont. Specifically, functions hci_req_sync()
and hci_dev_do_close() can race each other like below:
thread-A in hci_req_sync() | thread-B in hci_dev_do_close()
| hci_req_sync_lock(hdev);
test_bit(HCI_UP, &hdev->flags); |
... | test_and_clear_bit(HCI_UP, &hdev->flags)
hci_req_sync_lock(hdev); |
|
In this commit we alter the sequence in function hci_req_sync(). Hence,
the thread-A cannot issue th.
Signed-off-by: Lin Ma <linma@zju.edu.cn>
Cc: Marcel Holtmann <marcel@holtmann.org>
Fixes: 7c6a329e4447 ("[Bluetooth] Fix regression from using default link policy")
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
static void settings_init(void) {
settings.use_cas = true;
settings.access = 0700;
settings.port = 11211;
settings.udpport = 11211;
/* By default this string should be NULL for getaddrinfo() */
settings.inter = NULL;
settings.maxbytes = 64 * 1024 * 1024; /* default is 64MB */
settings.maxconns = 1024; /* to limit connections-related memory to about 5MB */
settings.verbose = 0;
settings.oldest_live = 0;
settings.oldest_cas = 0; /* supplements accuracy of oldest_live */
settings.evict_to_free = 1; /* push old items out of cache when memory runs out */
settings.socketpath = NULL; /* by default, not using a unix socket */
settings.factor = 1.25;
settings.chunk_size = 48; /* space for a modest key and value */
settings.num_threads = 4; /* N workers */
settings.num_threads_per_udp = 0;
settings.prefix_delimiter = ':';
settings.detail_enabled = 0;
settings.reqs_per_event = 20;
settings.backlog = 1024;
settings.binding_protocol = negotiating_prot;
settings.item_size_max = 1024 * 1024; /* The famous 1MB upper limit. */
settings.slab_page_size = 1024 * 1024; /* chunks are split from 1MB pages. */
settings.slab_chunk_size_max = settings.slab_page_size / 2;
settings.sasl = false;
settings.maxconns_fast = true;
settings.lru_crawler = false;
settings.lru_crawler_sleep = 100;
settings.lru_crawler_tocrawl = 0;
settings.lru_maintainer_thread = false;
settings.lru_segmented = true;
settings.hot_lru_pct = 20;
settings.warm_lru_pct = 40;
settings.hot_max_factor = 0.2;
settings.warm_max_factor = 2.0;
settings.inline_ascii_response = false;
settings.temp_lru = false;
settings.temporary_ttl = 61;
settings.idle_timeout = 0; /* disabled */
settings.hashpower_init = 0;
settings.slab_reassign = true;
settings.slab_automove = 1;
settings.slab_automove_ratio = 0.8;
settings.slab_automove_window = 30;
settings.shutdown_command = false;
settings.tail_repair_time = TAIL_REPAIR_TIME_DEFAULT;
settings.flush_enabled = true;
settings.dump_enabled = true;
settings.crawls_persleep = 1000;
settings.logger_watcher_buf_size = LOGGER_WATCHER_BUF_SIZE;
settings.logger_buf_size = LOGGER_BUF_SIZE;
settings.drop_privileges = true;
#ifdef MEMCACHED_DEBUG
settings.relaxed_privileges = false;
#endif
}
|
Vulnerable
|
[
"CWE-20",
"CWE-703",
"CWE-400"
] |
memcached
|
dbb7a8af90054bf4ef51f5814ef7ceb17d83d974
|
5.212301434681895e+37
| 58 |
disable UDP port by default
As reported, UDP amplification attacks have started to use insecure
internet-exposed memcached instances. UDP used to be a lot more popular as a
transport for memcached many years ago, but I'm not aware of many recent
users.
Ten years ago, the TCP connection overhead from many clients was relatively
high (dozens or hundreds per client server), but these days many clients are
batched, or user fewer processes, or simply anre't worried about it.
While changing the default to listen on localhost only would also help, the
true culprit is UDP. There are many more use cases for using memcached over
the network than there are for using the UDP protocol.
| 1 |
static gboolean try_remove_cache(gpointer user_data)
{
cache_timer = 0;
if (__sync_fetch_and_sub(&cache_refcount, 1) == 1) {
debug("No cache users, removing it.");
g_hash_table_destroy(cache);
cache = NULL;
}
return FALSE;
}
|
Safe
|
[
"CWE-119"
] |
connman
|
5c281d182ecdd0a424b64f7698f32467f8f67b71
|
1.0754797067768197e+38
| 13 |
dnsproxy: Fix crash on malformed DNS response
If the response query string is malformed, we might access memory
pass the end of "name" variable in parse_response().
| 0 |
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
char *status;
switch (to_devlink(dev)->status) {
case DL_STATE_NONE:
status = "not tracked"; break;
case DL_STATE_DORMANT:
status = "dormant"; break;
case DL_STATE_AVAILABLE:
status = "available"; break;
case DL_STATE_CONSUMER_PROBE:
status = "consumer probing"; break;
case DL_STATE_ACTIVE:
status = "active"; break;
case DL_STATE_SUPPLIER_UNBIND:
status = "supplier unbinding"; break;
default:
status = "unknown"; break;
}
return sysfs_emit(buf, "%s\n", status);
}
|
Safe
|
[
"CWE-787"
] |
linux
|
aa838896d87af561a33ecefea1caa4c15a68bc47
|
1.6177708318899625e+38
| 23 |
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <joe@perches.com>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
Opal::Call::emit_established_in_main ()
{
established ();
}
|
Safe
|
[] |
ekiga
|
7d09807257963a4f5168a01aec1795a398746372
|
1.80128017604916e+38
| 4 |
Validate UTF-8 strings before showing them
Closes bug #653009.
| 0 |
static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.nested_cr3 = __sme_set(root);
mark_dirty(svm->vmcb, VMCB_NPT);
}
|
Safe
|
[
"CWE-401"
] |
linux
|
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
|
3.0225047113469496e+38
| 8 |
KVM: SVM: Fix potential memory leak in svm_cpu_init()
When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page
held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually
the only possible outcome here.
Reviewed-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
ast_for_flow_stmt(struct compiling *c, const node *n)
{
/*
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
| yield_stmt
break_stmt: 'break'
continue_stmt: 'continue'
return_stmt: 'return' [testlist]
yield_stmt: yield_expr
yield_expr: 'yield' testlist | 'yield' 'from' test
raise_stmt: 'raise' [test [',' test [',' test]]]
*/
node *ch;
REQ(n, flow_stmt);
ch = CHILD(n, 0);
switch (TYPE(ch)) {
case break_stmt:
return Break(LINENO(n), n->n_col_offset,
n->n_end_lineno, n->n_end_col_offset, c->c_arena);
case continue_stmt:
return Continue(LINENO(n), n->n_col_offset,
n->n_end_lineno, n->n_end_col_offset, c->c_arena);
case yield_stmt: { /* will reduce to yield_expr */
expr_ty exp = ast_for_expr(c, CHILD(ch, 0));
if (!exp)
return NULL;
return Expr(exp, LINENO(n), n->n_col_offset,
n->n_end_lineno, n->n_end_col_offset, c->c_arena);
}
case return_stmt:
if (NCH(ch) == 1)
return Return(NULL, LINENO(n), n->n_col_offset,
n->n_end_lineno, n->n_end_col_offset, c->c_arena);
else {
expr_ty expression = ast_for_testlist(c, CHILD(ch, 1));
if (!expression)
return NULL;
return Return(expression, LINENO(n), n->n_col_offset,
n->n_end_lineno, n->n_end_col_offset, c->c_arena);
}
case raise_stmt:
if (NCH(ch) == 1)
return Raise(NULL, NULL, LINENO(n), n->n_col_offset,
n->n_end_lineno, n->n_end_col_offset, c->c_arena);
else if (NCH(ch) >= 2) {
expr_ty cause = NULL;
expr_ty expression = ast_for_expr(c, CHILD(ch, 1));
if (!expression)
return NULL;
if (NCH(ch) == 4) {
cause = ast_for_expr(c, CHILD(ch, 3));
if (!cause)
return NULL;
}
return Raise(expression, cause, LINENO(n), n->n_col_offset,
n->n_end_lineno, n->n_end_col_offset, c->c_arena);
}
/* fall through */
default:
PyErr_Format(PyExc_SystemError,
"unexpected flow_stmt: %d", TYPE(ch));
return NULL;
}
}
|
Safe
|
[
"CWE-125"
] |
cpython
|
a4d78362397fc3bced6ea80fbc7b5f4827aec55e
|
9.824338475789132e+37
| 65 |
bpo-36495: Fix two out-of-bounds array reads (GH-12641)
Research and fix by @bradlarsen.
| 0 |
static int sctp_connect_new_asoc(struct sctp_endpoint *ep,
const union sctp_addr *daddr,
const struct sctp_initmsg *init,
struct sctp_transport **tp)
{
struct sctp_association *asoc;
struct sock *sk = ep->base.sk;
struct net *net = sock_net(sk);
enum sctp_scope scope;
int err;
if (sctp_endpoint_is_peeled_off(ep, daddr))
return -EADDRNOTAVAIL;
if (!ep->base.bind_addr.port) {
if (sctp_autobind(sk))
return -EAGAIN;
} else {
if (inet_port_requires_bind_service(net, ep->base.bind_addr.port) &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
return -EACCES;
}
scope = sctp_scope(daddr);
asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
if (!asoc)
return -ENOMEM;
err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
if (err < 0)
goto free;
*tp = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN);
if (!*tp) {
err = -ENOMEM;
goto free;
}
if (!init)
return 0;
if (init->sinit_num_ostreams) {
__u16 outcnt = init->sinit_num_ostreams;
asoc->c.sinit_num_ostreams = outcnt;
/* outcnt has been changed, need to re-init stream */
err = sctp_stream_init(&asoc->stream, outcnt, 0, GFP_KERNEL);
if (err)
goto free;
}
if (init->sinit_max_instreams)
asoc->c.sinit_max_instreams = init->sinit_max_instreams;
if (init->sinit_max_attempts)
asoc->max_init_attempts = init->sinit_max_attempts;
if (init->sinit_max_init_timeo)
asoc->max_init_timeo =
msecs_to_jiffies(init->sinit_max_init_timeo);
return 0;
free:
sctp_association_free(asoc);
return err;
}
|
Safe
|
[
"CWE-362"
] |
linux
|
b166a20b07382b8bc1dcee2a448715c9c2c81b5b
|
4.242622242037231e+37
| 66 |
net/sctp: fix race condition in sctp_destroy_sock
If sctp_destroy_sock is called without sock_net(sk)->sctp.addr_wq_lock
held and sp->do_auto_asconf is true, then an element is removed
from the auto_asconf_splist without any proper locking.
This can happen in the following functions:
1. In sctp_accept, if sctp_sock_migrate fails.
2. In inet_create or inet6_create, if there is a bpf program
attached to BPF_CGROUP_INET_SOCK_CREATE which denies
creation of the sctp socket.
The bug is fixed by acquiring addr_wq_lock in sctp_destroy_sock
instead of sctp_close.
This addresses CVE-2021-23133.
Reported-by: Or Cohen <orcohen@paloaltonetworks.com>
Reviewed-by: Xin Long <lucien.xin@gmail.com>
Fixes: 610236587600 ("bpf: Add new cgroup attach type to enable sock modifications")
Signed-off-by: Or Cohen <orcohen@paloaltonetworks.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
virtual bool find_function_processor (void *arg) { return 0; }
|
Safe
|
[
"CWE-617"
] |
server
|
2e7891080667c59ac80f788eef4d59d447595772
|
1.5200529913338113e+37
| 1 |
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <sergey.petrunya@mariadb.com>
| 0 |
static int ttusb_dec_init_stb(struct ttusb_dec *dec)
{
int result;
unsigned int mode = 0, model = 0, version = 0;
dprintk("%s\n", __func__);
result = ttusb_dec_get_stb_state(dec, &mode, &model, &version);
if (result)
return result;
if (!mode) {
if (version == 0xABCDEFAB)
printk(KERN_INFO "ttusb_dec: no version info in Firmware\n");
else
printk(KERN_INFO "ttusb_dec: Firmware %x.%02x%c%c\n",
version >> 24, (version >> 16) & 0xff,
(version >> 8) & 0xff, version & 0xff);
result = ttusb_dec_boot_dsp(dec);
if (result)
return result;
} else {
/* We can't trust the USB IDs that some firmwares
give the box */
switch (model) {
case 0x00070001:
case 0x00070008:
case 0x0007000c:
ttusb_dec_set_model(dec, TTUSB_DEC3000S);
break;
case 0x00070009:
case 0x00070013:
ttusb_dec_set_model(dec, TTUSB_DEC2000T);
break;
case 0x00070011:
ttusb_dec_set_model(dec, TTUSB_DEC2540T);
break;
default:
printk(KERN_ERR "%s: unknown model returned by firmware (%08x) - please report\n",
__func__, model);
return -ENOENT;
}
if (version >= 0x01770000)
dec->can_playback = 1;
}
return 0;
}
|
Safe
|
[
"CWE-772"
] |
linux
|
a10feaf8c464c3f9cfdd3a8a7ce17e1c0d498da1
|
2.7172993495015468e+38
| 48 |
media: ttusb-dec: Fix info-leak in ttusb_dec_send_command()
The function at issue does not always initialize each byte allocated
for 'b' and can therefore leak uninitialized memory to a USB device in
the call to usb_bulk_msg()
Use kzalloc() instead of kmalloc()
Signed-off-by: Tomas Bortoli <tomasbortoli@gmail.com>
Reported-by: syzbot+0522702e9d67142379f1@syzkaller.appspotmail.com
Signed-off-by: Sean Young <sean@mess.org>
Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
| 0 |
GF_Box *tref_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TrackReferenceBox, GF_ISOM_BOX_TYPE_TREF);
return (GF_Box *)tmp;
|
Safe
|
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
|
7.066881276241555e+36
| 5 |
fixed #1587
| 0 |
NCR_ProcessKnown
(NTP_Packet *message, /* the received message */
struct timeval *now, /* timestamp at time of receipt */
double now_err,
NCR_Instance inst, /* the instance record for this peer/server */
NTP_Local_Address *local_addr, /* the receiving address */
int length /* the length of the received packet */
)
{
int pkt_mode, proc_packet, proc_as_unknown, log_peer_access;
if (!check_packet_format(message, length))
return 0;
pkt_mode = NTP_LVM_TO_MODE(message->lvm);
proc_packet = 0;
proc_as_unknown = 0;
log_peer_access = 0;
/* Now, depending on the mode we decide what to do */
switch (pkt_mode) {
case MODE_ACTIVE:
switch (inst->mode) {
case MODE_ACTIVE:
/* Ordinary symmetric peering */
log_peer_access = 1;
proc_packet = 1;
break;
case MODE_PASSIVE:
/* In this software this case should not arise, we don't
support unconfigured peers */
break;
case MODE_CLIENT:
/* This is where we have the remote configured as a server and he has
us configured as a peer, process as from an unknown source */
proc_as_unknown = 1;
break;
default:
/* Discard */
break;
}
break;
case MODE_PASSIVE:
switch (inst->mode) {
case MODE_ACTIVE:
/* This would arise if we have the remote configured as a peer and
he does not have us configured */
log_peer_access = 1;
proc_packet = 1;
break;
case MODE_PASSIVE:
/* Error condition in RFC 5905 */
break;
default:
/* Discard */
break;
}
break;
case MODE_CLIENT:
/* If message is client mode, we just respond with a server mode
packet, regardless of what we think the remote machine is
supposed to be. However, even though this is a configured
peer or server, we still implement access restrictions on
client mode operation.
This copes with the case for an isolated network where one
machine is set by eye and is used as the master, with the
other machines pointed at it. If the master goes down, we
want to be able to reset its time at startup by relying on
one of the secondaries to flywheel it. The behaviour coded here
is required in the secondaries to make this possible. */
proc_as_unknown = 1;
break;
case MODE_SERVER:
/* Ignore presend reply */
if (inst->presend_done)
break;
switch (inst->mode) {
case MODE_CLIENT:
/* Standard case where he's a server and we're the client */
proc_packet = 1;
break;
default:
/* Discard */
break;
}
break;
case MODE_BROADCAST:
/* Just ignore these */
break;
default:
/* Obviously ignore */
break;
}
if (log_peer_access)
CLG_LogNTPPeerAccess(&inst->remote_addr.ip_addr, now->tv_sec);
if (proc_packet) {
/* Check if the reply was received by the socket that sent the request */
if (local_addr->sock_fd != inst->local_addr.sock_fd) {
DEBUG_LOG(LOGF_NtpCore,
"Packet received by wrong socket %d (expected %d)",
local_addr->sock_fd, inst->local_addr.sock_fd);
return 0;
}
/* Ignore packets from offline sources */
if (inst->opmode == MD_OFFLINE || inst->tx_suspended) {
DEBUG_LOG(LOGF_NtpCore, "Packet from offline source");
return 0;
}
return receive_packet(message, now, now_err, inst, local_addr, length);
} else if (proc_as_unknown) {
NCR_ProcessUnknown(message, now, now_err, &inst->remote_addr,
local_addr, length);
/* It's not a reply to our request, don't return success */
return 0;
} else {
DEBUG_LOG(LOGF_NtpCore, "NTP packet discarded pkt_mode=%d our_mode=%d",
pkt_mode, inst->mode);
return 0;
}
}
|
Safe
|
[] |
chrony
|
a78bf9725a7b481ebff0e0c321294ba767f2c1d8
|
2.7985117002425746e+38
| 132 |
ntp: restrict authentication of server/peer to specified key
When a server/peer was specified with a key number to enable
authentication with a symmetric key, packets received from the
server/peer were accepted if they were authenticated with any of
the keys contained in the key file and not just the specified key.
This allowed an attacker who knew one key of a client/peer to modify
packets from its servers/peers that were authenticated with other
keys in a man-in-the-middle (MITM) attack. For example, in a network
where each NTP association had a separate key and all hosts had only
keys they needed, a client of a server could not attack other clients
of the server, but it could attack the server and also attack its own
clients (i.e. modify packets from other servers).
To not allow the server/peer to be authenticated with other keys
extend the authentication test to check if the key ID in the received
packet is equal to the configured key number. As a consequence, it's
no longer possible to authenticate two peers to each other with two
different keys, both peers have to be configured to use the same key.
This issue was discovered by Matt Street of Cisco ASIG.
| 0 |
has_tablespace_privilege_name_name(PG_FUNCTION_ARGS)
{
Name username = PG_GETARG_NAME(0);
text *tablespacename = PG_GETARG_TEXT_P(1);
text *priv_type_text = PG_GETARG_TEXT_P(2);
Oid roleid;
Oid tablespaceoid;
AclMode mode;
AclResult aclresult;
roleid = get_role_oid_or_public(NameStr(*username));
tablespaceoid = convert_tablespace_name(tablespacename);
mode = convert_tablespace_priv_string(priv_type_text);
aclresult = pg_tablespace_aclcheck(tablespaceoid, roleid, mode);
PG_RETURN_BOOL(aclresult == ACLCHECK_OK);
}
|
Safe
|
[
"CWE-264"
] |
postgres
|
fea164a72a7bfd50d77ba5fb418d357f8f2bb7d0
|
8.074535515086095e+37
| 18 |
Shore up ADMIN OPTION restrictions.
Granting a role without ADMIN OPTION is supposed to prevent the grantee
from adding or removing members from the granted role. Issuing SET ROLE
before the GRANT bypassed that, because the role itself had an implicit
right to add or remove members. Plug that hole by recognizing that
implicit right only when the session user matches the current role.
Additionally, do not recognize it during a security-restricted operation
or during execution of a SECURITY DEFINER function. The restriction on
SECURITY DEFINER is not security-critical. However, it seems best for a
user testing his own SECURITY DEFINER function to see the same behavior
others will see. Back-patch to 8.4 (all supported versions).
The SQL standards do not conflate roles and users as PostgreSQL does;
only SQL roles have members, and only SQL users initiate sessions. An
application using PostgreSQL users and roles as SQL users and roles will
never attempt to grant membership in the role that is the session user,
so the implicit right to add or remove members will never arise.
The security impact was mostly that a role member could revoke access
from others, contrary to the wishes of his own grantor. Unapproved role
member additions are less notable, because the member can still largely
achieve that by creating a view or a SECURITY DEFINER function.
Reviewed by Andres Freund and Tom Lane. Reported, independently, by
Jonas Sundman and Noah Misch.
Security: CVE-2014-0060
| 0 |
uint32_t enc_untrusted_qe_get_target_info(sgx_target_info_t *qe_target_info) {
uint32_t result;
CHECK_OCALL(ocall_enc_untrusted_qe_get_target_info(&result, qe_target_info));
return result;
}
|
Safe
|
[
"CWE-200",
"CWE-668"
] |
asylo
|
ecfcd0008b6f8f63c6fa3cc1b62fcd4a52f2c0ad
|
1.7903949123248527e+38
| 5 |
Store untrusted input to enclave variable
The untrusted input pointer should be stored to trusted variable before
checking to avoid unexpected modifications after checking.
PiperOrigin-RevId: 362553830
Change-Id: I743f9bd3487de60269e247d74f2188f2ffc06d01
| 0 |
struct aac_driver_ident* aac_get_driver_ident(int devtype)
{
return &aac_drivers[devtype];
}
|
Safe
|
[
"CWE-284",
"CWE-264"
] |
linux
|
f856567b930dfcdbc3323261bf77240ccdde01f5
|
6.260515967274772e+37
| 4 |
aacraid: missing capable() check in compat ioctl
In commit d496f94d22d1 ('[SCSI] aacraid: fix security weakness') we
added a check on CAP_SYS_RAWIO to the ioctl. The compat ioctls need the
check as well.
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *req = data;
u32 status = ntohl(req->a_res.status);
if (RPC_ASSASSINATED(task))
goto die;
if (task->tk_status < 0) {
dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
goto retry_rebind;
}
if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
rpc_delay(task, NLMCLNT_GRACE_WAIT);
goto retry_unlock;
}
if (status != NLM_LCK_GRANTED)
printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
die:
return;
retry_rebind:
nlm_rebind_host(req->a_host);
retry_unlock:
rpc_restart_call(task);
}
|
Vulnerable
|
[
"CWE-400",
"CWE-399",
"CWE-703"
] |
linux
|
0b760113a3a155269a3fba93a409c640031dd68f
|
1.1238790797595283e+38
| 25 |
NLM: Don't hang forever on NLM unlock requests
If the NLM daemon is killed on the NFS server, we can currently end up
hanging forever on an 'unlock' request, instead of aborting. Basically,
if the rpcbind request fails, or the server keeps returning garbage, we
really want to quit instead of retrying.
Tested-by: Vasily Averin <vvs@sw.ru>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: stable@kernel.org
| 1 |
yang_check_leaflist(struct lys_module *module, struct lys_node_leaflist *leaflist, int options,
struct unres_schema *unres)
{
int i, j;
if (yang_fill_type(module, &leaflist->type, (struct yang_type *)leaflist->type.der, leaflist, unres)) {
yang_type_free(module->ctx, &leaflist->type);
goto error;
}
if (yang_check_iffeatures(module, NULL, leaflist, LEAF_LIST_KEYWORD, unres)) {
yang_type_free(module->ctx, &leaflist->type);
goto error;
}
if (unres_schema_add_node(module, unres, &leaflist->type, UNRES_TYPE_DER, (struct lys_node *)leaflist) == -1) {
yang_type_free(module->ctx, &leaflist->type);
goto error;
}
for (i = 0; i < leaflist->dflt_size; ++i) {
/* check for duplicity in case of configuration data,
* in case of status data duplicities are allowed */
if (leaflist->flags & LYS_CONFIG_W) {
for (j = i +1; j < leaflist->dflt_size; ++j) {
if (ly_strequal(leaflist->dflt[i], leaflist->dflt[j], 1)) {
LOGVAL(module->ctx, LYE_INARG, LY_VLOG_LYS, leaflist, leaflist->dflt[i], "default");
LOGVAL(module->ctx, LYE_SPEC, LY_VLOG_LYS, leaflist, "Duplicated default value \"%s\".", leaflist->dflt[i]);
goto error;
}
}
}
/* check default value (if not defined, there still could be some restrictions
* that need to be checked against a default value from a derived type) */
if (!(module->ctx->models.flags & LY_CTX_TRUSTED) &&
(unres_schema_add_node(module, unres, &leaflist->type, UNRES_TYPE_DFLT,
(struct lys_node *)(&leaflist->dflt[i])) == -1)) {
goto error;
}
}
if (leaflist->when && yang_check_ext_instance(module, &leaflist->when->ext, leaflist->when->ext_size, leaflist->when, unres)) {
goto error;
}
if (yang_check_must(module, leaflist->must, leaflist->must_size, unres)) {
goto error;
}
/* check XPath dependencies */
if (!(module->ctx->models.flags & LY_CTX_TRUSTED) && (leaflist->when || leaflist->must_size)) {
if (options & LYS_PARSE_OPT_INGRP) {
if (lyxp_node_check_syntax((struct lys_node *)leaflist)) {
goto error;
}
} else {
if (unres_schema_add_node(module, unres, leaflist, UNRES_XPATH, NULL) == -1) {
goto error;
}
}
}
return EXIT_SUCCESS;
error:
return EXIT_FAILURE;
}
|
Safe
|
[
"CWE-415"
] |
libyang
|
d9feacc4a590d35dbc1af21caf9080008b4450ed
|
7.273786678197699e+37
| 65 |
yang parser BUGFIX double free
Fixes #742
| 0 |
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
{
struct iwl_trans *trans = trans_pcie->trans;
if (!trans_pcie->msix_enabled) {
if (trans->trans_cfg->mq_rx_supported &&
test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_umac_prph(trans, UREG_CHICK,
UREG_CHICK_MSI_ENABLE);
return;
}
/*
* The IVAR table needs to be configured again after reset,
* but if the device is disabled, we can't write to
* prph.
*/
if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
/*
* Each cause from the causes list above and the RX causes is
* represented as a byte in the IVAR table. The first nibble
* represents the bound interrupt vector of the cause, the second
* represents no auto clear for this cause. This will be set if its
* interrupt vector is bound to serve other causes.
*/
iwl_pcie_map_rx_causes(trans);
iwl_pcie_map_non_rx_causes(trans);
}
|
Safe
|
[
"CWE-476"
] |
linux
|
8188a18ee2e48c9a7461139838048363bfce3fef
|
2.6843929526833787e+38
| 30 |
iwlwifi: pcie: fix rb_allocator workqueue allocation
We don't handle failures in the rb_allocator workqueue allocation
correctly. To fix that, move the code earlier so the cleanup is
easier and we don't have to undo all the interrupt allocations in
this case.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
| 0 |
static void parse_staves(struct SYMBOL *s,
struct staff_s *staves)
{
char *p;
int voice, flags_st, brace, bracket, parenth, err;
short flags;
struct staff_s *p_staff;
/* define the voices */
err = 0;
flags = 0;
brace = bracket = parenth = 0;
flags_st = 0;
voice = 0;
p = s->text + 7;
while (*p != '\0' && !isspace((unsigned char) *p))
p++;
while (*p != '\0') {
switch (*p) {
case ' ':
case '\t':
break;
case '[':
if (parenth || brace + bracket >= 2) {
error(1, s, "Misplaced '[' in %%%%staves");
err = 1;
break;
}
if (brace + bracket == 0)
flags |= OPEN_BRACKET;
else
flags |= OPEN_BRACKET2;
bracket++;
flags_st <<= 8;
flags_st |= OPEN_BRACKET;
break;
case '{':
if (parenth || brace || bracket >= 2) {
error(1, s, "Misplaced '{' in %%%%staves");
err = 1;
break;
}
if (bracket == 0)
flags |= OPEN_BRACE;
else
flags |= OPEN_BRACE2;
brace++;
flags_st <<= 8;
flags_st |= OPEN_BRACE;
break;
case '(':
if (parenth) {
error(1, s, "Misplaced '(' in %%%%staves");
err = 1;
break;
}
flags |= OPEN_PARENTH;
parenth++;
flags_st <<= 8;
flags_st |= OPEN_PARENTH;
break;
case '*':
if (brace && !parenth && !(flags & (OPEN_BRACE | OPEN_BRACE2)))
flags |= FL_VOICE;
break;
case '+':
flags |= MASTER_VOICE;
break;
default:
if (!isalnum((unsigned char) *p) && *p != '_') {
error(1, s, "Bad voice ID in %%%%staves");
err = 1;
break;
}
if (voice >= MAXVOICE) {
error(1, s, "Too many voices in %%%%staves");
err = 1;
break;
}
{
int i, v;
char sep, *q;
q = p;
while (isalnum((unsigned char) *p) || *p == '_')
p++;
sep = *p;
*p = '\0';
/* search the voice in the voice table */
v = -1;
for (i = 0; i < MAXVOICE; i++) {
if (strcmp(q, voice_tb[i].id) == 0) {
v = i;
break;
}
}
if (v < 0) {
error(1, s,
"Voice '%s' of %%%%staves has no symbol",
q);
err = 1;
// break;
p_staff = staves;
} else {
p_staff = staves + voice++;
p_staff->voice = v;
}
*p = sep;
}
for ( ; *p != '\0'; p++) {
switch (*p) {
case ' ':
case '\t':
continue;
case ']':
if (!(flags_st & OPEN_BRACKET)) {
error(1, s,
"Misplaced ']' in %%%%staves");
err = 1;
break;
}
bracket--;
if (brace + bracket == 0)
flags |= CLOSE_BRACKET;
else
flags |= CLOSE_BRACKET2;
flags_st >>= 8;
continue;
case '}':
if (!(flags_st & OPEN_BRACE)) {
error(1, s,
"Misplaced '}' in %%%%staves");
err = 1;
break;
}
brace--;
if (bracket == 0)
flags |= CLOSE_BRACE;
else
flags |= CLOSE_BRACE2;
flags &= ~FL_VOICE;
flags_st >>= 8;
continue;
case ')':
if (!(flags_st & OPEN_PARENTH)) {
error(1, s,
"Misplaced ')' in %%%%staves");
err = 1;
break;
}
parenth--;
flags |= CLOSE_PARENTH;
flags_st >>= 8;
continue;
case '|':
flags |= STOP_BAR;
continue;
}
break;
}
p_staff->flags = flags;
flags = 0;
if (*p == '\0')
break;
continue;
}
if (*p == '\0')
break;
p++;
}
if (flags_st != 0) {
error(1, s, "'}', ')' or ']' missing in %%%%staves");
err = 1;
}
if (err) {
int i;
for (i = 0; i < voice; i++)
staves[i].flags = 0;
}
if (voice < MAXVOICE)
staves[voice].voice = -1;
}
|
Safe
|
[
"CWE-787"
] |
abcm2ps
|
dc0372993674d0b50fedfbf7b9fad1239b8efc5f
|
2.7953189557556933e+38
| 184 |
fix: crash when too many accidentals in K: (signature + explicit)
Issue #17.
| 0 |
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel=GetPixelChannelChannel(morph_image,i);
PixelTrait traits=GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if (((morph_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(morph_images,p) == 0))
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphImages)
#endif
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
|
Safe
|
[
"CWE-119",
"CWE-703"
] |
ImageMagick
|
3cbfb163cff9e5b8cdeace8312e9bfee810ed02b
|
3.1719665307117217e+38
| 205 |
https://github.com/ImageMagick/ImageMagick/issues/296
| 0 |
Reservation reserve(uint64_t size) {
if (size == 0) {
return {nullptr, 0};
}
// Verify the semantics that drain() enforces: if the slice is empty, either because
// no data has been added or because all the added data has been drained, the data
// section is at the very start of the slice.
ASSERT(!(dataSize() == 0 && data_ > 0));
uint64_t available_size = capacity_ - reservable_;
if (available_size == 0) {
return {nullptr, 0};
}
uint64_t reservation_size = std::min(size, available_size);
void* reservation = &(base_[reservable_]);
return {reservation, static_cast<size_t>(reservation_size)};
}
|
Safe
|
[
"CWE-401"
] |
envoy
|
5eba69a1f375413fb93fab4173f9c393ac8c2818
|
3.350736255763516e+38
| 16 |
[buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144)
Signed-off-by: antonio <avd@google.com>
| 0 |
static void dump_vmcs(void)
{
u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
u32 secondary_exec_control = 0;
unsigned long cr4 = vmcs_readl(GUEST_CR4);
u64 efer = vmcs_read64(GUEST_IA32_EFER);
int i, n;
if (cpu_has_secondary_exec_ctrls())
secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
pr_err("*** Guest State ***\n");
pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
vmcs_readl(CR0_GUEST_HOST_MASK));
pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
(cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
{
pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
}
pr_err("RSP = 0x%016lx RIP = 0x%016lx\n",
vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
vmcs_readl(GUEST_SYSENTER_ESP),
vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
vmx_dump_sel("CS: ", GUEST_CS_SELECTOR);
vmx_dump_sel("DS: ", GUEST_DS_SELECTOR);
vmx_dump_sel("SS: ", GUEST_SS_SELECTOR);
vmx_dump_sel("ES: ", GUEST_ES_SELECTOR);
vmx_dump_sel("FS: ", GUEST_FS_SELECTOR);
vmx_dump_sel("GS: ", GUEST_GS_SELECTOR);
vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
vmx_dump_sel("TR: ", GUEST_TR_SELECTOR);
if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
(vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
efer, vmcs_read64(GUEST_IA32_PAT));
pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
vmcs_read64(GUEST_IA32_DEBUGCTL),
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
if (cpu_has_load_perf_global_ctrl &&
vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
pr_err("PerfGlobCtl = 0x%016llx\n",
vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
pr_err("Interruptibility = %08x ActivityState = %08x\n",
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
vmcs_read32(GUEST_ACTIVITY_STATE));
if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
pr_err("InterruptStatus = %04x\n",
vmcs_read16(GUEST_INTR_STATUS));
pr_err("*** Host State ***\n");
pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
vmcs_read16(HOST_TR_SELECTOR));
pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
vmcs_readl(HOST_TR_BASE));
pr_err("GDTBase=%016lx IDTBase=%016lx\n",
vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
vmcs_readl(HOST_CR4));
pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
vmcs_readl(HOST_IA32_SYSENTER_ESP),
vmcs_read32(HOST_IA32_SYSENTER_CS),
vmcs_readl(HOST_IA32_SYSENTER_EIP));
if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
vmcs_read64(HOST_IA32_EFER),
vmcs_read64(HOST_IA32_PAT));
if (cpu_has_load_perf_global_ctrl &&
vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
pr_err("PerfGlobCtl = 0x%016llx\n",
vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
pr_err("*** Control State ***\n");
pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control);
pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
vmcs_read32(EXCEPTION_BITMAP),
vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
pr_err(" reason=%08x qualification=%016lx\n",
vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
pr_err("IDTVectoring: info=%08x errcode=%08x\n",
vmcs_read32(IDT_VECTORING_INFO_FIELD),
vmcs_read32(IDT_VECTORING_ERROR_CODE));
pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
pr_err("TSC Multiplier = 0x%016llx\n",
vmcs_read64(TSC_MULTIPLIER));
if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
n = vmcs_read32(CR3_TARGET_COUNT);
for (i = 0; i + 1 < n; i += 4)
pr_err("CR3 target%u=%016lx target%u=%016lx\n",
i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2),
i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2));
if (i < n)
pr_err("CR3 target%u=%016lx\n",
i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2));
if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
pr_err("PLE Gap=%08x Window=%08x\n",
vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
pr_err("Virtual processor ID = 0x%04x\n",
vmcs_read16(VIRTUAL_PROCESSOR_ID));
}
|
Safe
|
[
"CWE-284"
] |
linux
|
727ba748e110b4de50d142edca9d6a9b7e6111d8
|
3.226334179893698e+38
| 141 |
kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: stable@vger.kernel.org
Signed-off-by: Felix Wilhelm <fwilhelm@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
rb_str_new_frozen(VALUE orig)
{
VALUE klass, str;
if (OBJ_FROZEN(orig)) return orig;
klass = rb_obj_class(orig);
if (STR_SHARED_P(orig) && (str = RSTRING(orig)->as.heap.aux.shared)) {
long ofs;
assert(OBJ_FROZEN(str));
ofs = RSTRING_LEN(str) - RSTRING_LEN(orig);
if ((ofs > 0) || (klass != RBASIC(str)->klass) ||
(!OBJ_TAINTED(str) && OBJ_TAINTED(orig)) ||
ENCODING_GET(str) != ENCODING_GET(orig)) {
str = str_new3(klass, str);
RSTRING(str)->as.heap.ptr += ofs;
RSTRING(str)->as.heap.len -= ofs;
rb_enc_cr_str_exact_copy(str, orig);
OBJ_INFECT(str, orig);
}
}
else if (STR_EMBED_P(orig)) {
str = str_new(klass, RSTRING_PTR(orig), RSTRING_LEN(orig));
rb_enc_cr_str_exact_copy(str, orig);
OBJ_INFECT(str, orig);
}
else if (STR_ASSOC_P(orig)) {
VALUE assoc = RSTRING(orig)->as.heap.aux.shared;
FL_UNSET(orig, STR_ASSOC);
str = str_new4(klass, orig);
FL_SET(str, STR_ASSOC);
RSTRING(str)->as.heap.aux.shared = assoc;
}
else {
str = str_new4(klass, orig);
}
OBJ_FREEZE(str);
return str;
}
|
Safe
|
[
"CWE-119"
] |
ruby
|
1c2ef610358af33f9ded3086aa2d70aac03dcac5
|
1.982218117042146e+38
| 38 |
* string.c (rb_str_justify): CVE-2009-4124.
Fixes a bug reported by
Emmanouel Kellinis <Emmanouel.Kellinis AT kpmg.co.uk>, KPMG London;
Patch by nobu.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@26038 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
| 0 |
DIR *dd_init_next_file(struct dump_dir *dd)
{
// if (!dd->locked)
// error_msg_and_die("dump_dir is not opened"); /* bug */
if (dd->next_dir)
closedir(dd->next_dir);
dd->next_dir = opendir(dd->dd_dirname);
if (!dd->next_dir)
{
error_msg("Can't open directory '%s'", dd->dd_dirname);
}
return dd->next_dir;
}
|
Vulnerable
|
[
"CWE-20"
] |
libreport
|
1951e7282043dfe1268d492aea056b554baedb75
|
1.928746730366323e+38
| 16 |
lib: fix races in dump directory handling code
Florian Weimer <fweimer@redhat.com>:
dd_opendir() should keep a file handle (opened with O_DIRECTORY) and
use openat() and similar functions to access files in it.
...
The file system manipulation functions should guard against hard
links (check that link count is <= 1, just as in the user coredump
code in abrt-hook-ccpp), possibly after opening the file
with O_PATH first to avoid side effects on open/close.
Related: #1214745
Signed-off-by: Jakub Filak <jfilak@redhat.com>
| 1 |
cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
bool wait_flag, bool posix_lck, int lock, int unlock,
unsigned int xid)
{
int rc = 0;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct inode *inode = cfile->dentry->d_inode;
if (posix_lck) {
int posix_lock_type;
rc = cifs_posix_lock_set(file, flock);
if (!rc || rc < 0)
return rc;
if (type & server->vals->shared_lock_type)
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
if (unlock == 1)
posix_lock_type = CIFS_UNLCK;
rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
current->tgid, flock->fl_start, length,
NULL, posix_lock_type, wait_flag);
goto out;
}
if (lock) {
struct cifsLockInfo *lock;
lock = cifs_lock_init(flock->fl_start, length, type);
if (!lock)
return -ENOMEM;
rc = cifs_lock_add_if(cfile, lock, wait_flag);
if (rc < 0) {
kfree(lock);
return rc;
}
if (!rc)
goto out;
/*
* Windows 7 server can delay breaking lease from read to None
* if we set a byte-range lock on a file - break it explicitly
* before sending the lock to the server to be sure the next
* read won't conflict with non-overlapted locks due to
* pagereading.
*/
if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
CIFS_CACHE_READ(CIFS_I(inode))) {
cifs_invalidate_mapping(inode);
cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
inode);
CIFS_I(inode)->oplock = 0;
}
rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
type, 1, 0, wait_flag);
if (rc) {
kfree(lock);
return rc;
}
cifs_lock_add(cfile, lock);
} else if (unlock)
rc = server->ops->mand_unlock_range(cfile, flock, xid);
out:
if (flock->fl_flags & FL_POSIX)
posix_lock_file_wait(file, flock);
return rc;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
5d81de8e8667da7135d3a32a964087c0faf5483f
|
2.1367590016842295e+38
| 78 |
cifs: ensure that uncached writes handle unmapped areas correctly
It's possible for userland to pass down an iovec via writev() that has a
bogus user pointer in it. If that happens and we're doing an uncached
write, then we can end up getting less bytes than we expect from the
call to iov_iter_copy_from_user. This is CVE-2014-0069
cifs_iovec_write isn't set up to handle that situation however. It'll
blindly keep chugging through the page array and not filling those pages
with anything useful. Worse yet, we'll later end up with a negative
number in wdata->tailsz, which will confuse the sending routines and
cause an oops at the very least.
Fix this by having the copy phase of cifs_iovec_write stop copying data
in this situation and send the last write as a short one. At the same
time, we want to avoid sending a zero-length write to the server, so
break out of the loop and set rc to -EFAULT if that happens. This also
allows us to handle the case where no address in the iovec is valid.
[Note: Marking this for stable on v3.4+ kernels, but kernels as old as
v2.6.38 may have a similar problem and may need similar fix]
Cc: <stable@vger.kernel.org> # v3.4+
Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru>
Reported-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Steve French <smfrench@gmail.com>
| 0 |
calculateNumXLevels (const TileDescription& tileDesc,
int minX, int maxX,
int minY, int maxY)
{
int num = 0;
switch (tileDesc.mode)
{
case ONE_LEVEL:
num = 1;
break;
case MIPMAP_LEVELS:
{
int w = maxX - minX + 1;
int h = maxY - minY + 1;
num = roundLog2 (std::max (w, h), tileDesc.roundingMode) + 1;
}
break;
case RIPMAP_LEVELS:
{
int w = maxX - minX + 1;
num = roundLog2 (w, tileDesc.roundingMode) + 1;
}
break;
default:
throw IEX_NAMESPACE::ArgExc ("Unknown LevelMode format.");
}
return num;
}
|
Safe
|
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
|
2.626052280494461e+38
| 37 |
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <peterh@wetafx.co.nz>
| 0 |
static void seek_interrupt(void)
{
debugt(__func__, "");
if (inr != 2 || (reply_buffer[ST0] & 0xF8) != 0x20) {
DPRINT("seek failed\n");
drive_state[current_drive].track = NEED_2_RECAL;
cont->error();
cont->redo();
return;
}
if (drive_state[current_drive].track >= 0 &&
drive_state[current_drive].track != reply_buffer[ST1] &&
!blind_seek) {
debug_dcl(drive_params[current_drive].flags,
"clearing NEWCHANGE flag because of effective seek\n");
debug_dcl(drive_params[current_drive].flags, "jiffies=%lu\n",
jiffies);
clear_bit(FD_DISK_NEWCHANGE_BIT,
&drive_state[current_drive].flags);
/* effective seek */
drive_state[current_drive].select_date = jiffies;
}
drive_state[current_drive].track = reply_buffer[ST1];
floppy_ready();
}
|
Safe
|
[
"CWE-416"
] |
linux
|
233087ca063686964a53c829d547c7571e3f67bf
|
1.5143485354224532e+37
| 25 |
floppy: disable FDRAWCMD by default
Minh Yuan reported a concurrency use-after-free issue in the floppy code
between raw_cmd_ioctl and seek_interrupt.
[ It turns out this has been around, and that others have reported the
KASAN splats over the years, but Minh Yuan had a reproducer for it and
so gets primary credit for reporting it for this fix - Linus ]
The problem is, this driver tends to break very easily and nowadays,
nobody is expected to use FDRAWCMD anyway since it was used to
manipulate non-standard formats. The risk of breaking the driver is
higher than the risk presented by this race, and accessing the device
requires privileges anyway.
Let's just add a config option to completely disable this ioctl and
leave it disabled by default. Distros shouldn't use it, and only those
running on antique hardware might need to enable it.
Link: https://lore.kernel.org/all/000000000000b71cdd05d703f6bf@google.com/
Link: https://lore.kernel.org/lkml/CAKcFiNC=MfYVW-Jt9A3=FPJpTwCD2PL_ULNCpsCVE5s8ZeBQgQ@mail.gmail.com
Link: https://lore.kernel.org/all/CAEAjamu1FRhz6StCe_55XY5s389ZP_xmCF69k987En+1z53=eg@mail.gmail.com
Reported-by: Minh Yuan <yuanmingbuaa@gmail.com>
Reported-by: syzbot+8e8958586909d62b6840@syzkaller.appspotmail.com
Reported-by: cruise k <cruise4k@gmail.com>
Reported-by: Kyungtae Kim <kt0755@gmail.com>
Suggested-by: Linus Torvalds <torvalds@linuxfoundation.org>
Tested-by: Denis Efremov <efremov@linux.com>
Signed-off-by: Willy Tarreau <w@1wt.eu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
readNextTileData (InputStreamMutex *streamData,
TiledInputFile::Data *ifd,
int &dx, int &dy,
int &lx, int &ly,
char * & buffer,
int &dataSize)
{
//
// Read the next tile block from the file
//
if(isMultiPart(ifd->version))
{
int part;
Xdr::read <StreamIO> (*streamData->is, part);
if(part!=ifd->partNumber)
{
throw IEX_NAMESPACE::InputExc("Unexpected part number in readNextTileData");
}
}
//
// Read the first few bytes of the tile (the header).
//
Xdr::read <StreamIO> (*streamData->is, dx);
Xdr::read <StreamIO> (*streamData->is, dy);
Xdr::read <StreamIO> (*streamData->is, lx);
Xdr::read <StreamIO> (*streamData->is, ly);
Xdr::read <StreamIO> (*streamData->is, dataSize);
if (dataSize > (int) ifd->tileBufferSize)
throw IEX_NAMESPACE::InputExc ("Unexpected tile block length.");
//
// Read the pixel data.
//
streamData->is->read (buffer, dataSize);
//
// Keep track of which tile is the next one in
// the file, so that we can avoid redundant seekg()
// operations (seekg() can be fairly expensive).
//
streamData->currentPosition += 5 * Xdr::size<int>() + dataSize;
}
|
Safe
|
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
|
2.402382778585203e+37
| 48 |
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <peterh@wetafx.co.nz>
| 0 |
static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
{
if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
|
Safe
|
[
"CWE-120"
] |
linux
|
1cc5ef91d2ff94d2bf2de3b3585423e8a1051cb6
|
2.737555150273612e+38
| 9 |
netfilter: ctnetlink: add a range check for l3/l4 protonum
The indexes to the nf_nat_l[34]protos arrays come from userspace. So
check the tuple's family, e.g. l3num, when creating the conntrack in
order to prevent an OOB memory access during setup. Here is an example
kernel panic on 4.14.180 when userspace passes in an index greater than
NFPROTO_NUMPROTO.
Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
Modules linked in:...
Process poc (pid: 5614, stack limit = 0x00000000a3933121)
CPU: 4 PID: 5614 Comm: poc Tainted: G S W O 4.14.180-g051355490483
Hardware name: Qualcomm Technologies, Inc. SM8150 V2 PM8150 Google Inc. MSM
task: 000000002a3dfffe task.stack: 00000000a3933121
pc : __cfi_check_fail+0x1c/0x24
lr : __cfi_check_fail+0x1c/0x24
...
Call trace:
__cfi_check_fail+0x1c/0x24
name_to_dev_t+0x0/0x468
nfnetlink_parse_nat_setup+0x234/0x258
ctnetlink_parse_nat_setup+0x4c/0x228
ctnetlink_new_conntrack+0x590/0xc40
nfnetlink_rcv_msg+0x31c/0x4d4
netlink_rcv_skb+0x100/0x184
nfnetlink_rcv+0xf4/0x180
netlink_unicast+0x360/0x770
netlink_sendmsg+0x5a0/0x6a4
___sys_sendmsg+0x314/0x46c
SyS_sendmsg+0xb4/0x108
el0_svc_naked+0x34/0x38
This crash is not happening since 5.4+, however, ctnetlink still
allows for creating entries with unsupported layer 3 protocol number.
Fixes: c1d10adb4a521 ("[NETFILTER]: Add ctnetlink port for nf_conntrack")
Signed-off-by: Will McVicker <willmcvicker@google.com>
[pablo@netfilter.org: rebased original patch on top of nf.git]
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
| 0 |
static ssize_t disk_discard_alignment_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
}
|
Safe
|
[
"CWE-416"
] |
linux-stable
|
77da160530dd1dc94f6ae15a981f24e5f0021e84
|
6.696767986351794e+37
| 8 |
block: fix use-after-free in seq file
I got a KASAN report of use-after-free:
==================================================================
BUG: KASAN: use-after-free in klist_iter_exit+0x61/0x70 at addr ffff8800b6581508
Read of size 8 by task trinity-c1/315
=============================================================================
BUG kmalloc-32 (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in disk_seqf_start+0x66/0x110 age=144 cpu=1 pid=315
___slab_alloc+0x4f1/0x520
__slab_alloc.isra.58+0x56/0x80
kmem_cache_alloc_trace+0x260/0x2a0
disk_seqf_start+0x66/0x110
traverse+0x176/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
INFO: Freed in disk_seqf_stop+0x42/0x50 age=160 cpu=1 pid=315
__slab_free+0x17a/0x2c0
kfree+0x20a/0x220
disk_seqf_stop+0x42/0x50
traverse+0x3b5/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
CPU: 1 PID: 315 Comm: trinity-c1 Tainted: G B 4.7.0+ #62
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014
ffffea0002d96000 ffff880119b9f918 ffffffff81d6ce81 ffff88011a804480
ffff8800b6581500 ffff880119b9f948 ffffffff8146c7bd ffff88011a804480
ffffea0002d96000 ffff8800b6581500 fffffffffffffff4 ffff880119b9f970
Call Trace:
[<ffffffff81d6ce81>] dump_stack+0x65/0x84
[<ffffffff8146c7bd>] print_trailer+0x10d/0x1a0
[<ffffffff814704ff>] object_err+0x2f/0x40
[<ffffffff814754d1>] kasan_report_error+0x221/0x520
[<ffffffff8147590e>] __asan_report_load8_noabort+0x3e/0x40
[<ffffffff83888161>] klist_iter_exit+0x61/0x70
[<ffffffff82404389>] class_dev_iter_exit+0x9/0x10
[<ffffffff81d2e8ea>] disk_seqf_stop+0x3a/0x50
[<ffffffff8151f812>] seq_read+0x4b2/0x11a0
[<ffffffff815f8fdc>] proc_reg_read+0xbc/0x180
[<ffffffff814b24e4>] do_loop_readv_writev+0x134/0x210
[<ffffffff814b4c45>] do_readv_writev+0x565/0x660
[<ffffffff814b8a17>] vfs_readv+0x67/0xa0
[<ffffffff814b8de6>] do_preadv+0x126/0x170
[<ffffffff814b92ec>] SyS_preadv+0xc/0x10
This problem can occur in the following situation:
open()
- pread()
- .seq_start()
- iter = kmalloc() // succeeds
- seqf->private = iter
- .seq_stop()
- kfree(seqf->private)
- pread()
- .seq_start()
- iter = kmalloc() // fails
- .seq_stop()
- class_dev_iter_exit(seqf->private) // boom! old pointer
As the comment in disk_seqf_stop() says, stop is called even if start
failed, so we need to reinitialise the private pointer to NULL when seq
iteration stops.
An alternative would be to set the private pointer to NULL when the
kmalloc() in disk_seqf_start() fails.
Cc: stable@vger.kernel.org
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
| 0 |
libssh2_userauth_hostbased_fromfile_ex(LIBSSH2_SESSION *session,
const char *user,
unsigned int user_len,
const char *publickey,
const char *privatekey,
const char *passphrase,
const char *host,
unsigned int host_len,
const char *localuser,
unsigned int localuser_len)
{
int rc;
BLOCK_ADJUST(rc, session,
userauth_hostbased_fromfile(session, user, user_len,
publickey, privatekey,
passphrase, host, host_len,
localuser, localuser_len));
return rc;
}
|
Safe
|
[
"CWE-787"
] |
libssh2
|
dc109a7f518757741590bb993c0c8412928ccec2
|
2.184941249230855e+38
| 19 |
Security fixes (#315)
* Bounds checks
Fixes for CVEs
https://www.libssh2.org/CVE-2019-3863.html
https://www.libssh2.org/CVE-2019-3856.html
* Packet length bounds check
CVE
https://www.libssh2.org/CVE-2019-3855.html
* Response length check
CVE
https://www.libssh2.org/CVE-2019-3859.html
* Bounds check
CVE
https://www.libssh2.org/CVE-2019-3857.html
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
and additional data validation
* Check bounds before reading into buffers
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
* declare SIZE_MAX and UINT_MAX if needed
| 0 |
AsyncConnection::AsyncConnection(CephContext *cct, AsyncMessenger *m, DispatchQueue *q,
Worker *w)
: Connection(cct, m), delay_state(NULL), async_msgr(m), conn_id(q->get_id()),
logger(w->get_perf_counter()), global_seq(0), connect_seq(0), peer_global_seq(0),
state(STATE_NONE), state_after_send(STATE_NONE), port(-1),
dispatch_queue(q), can_write(WriteStatus::NOWRITE),
keepalive(false), recv_buf(NULL),
recv_max_prefetch(MAX(msgr->cct->_conf->ms_tcp_prefetch_max_size, TCP_PREFETCH_MIN_SIZE)),
recv_start(0), recv_end(0),
last_active(ceph::coarse_mono_clock::now()),
inactive_timeout_us(cct->_conf->ms_tcp_read_timeout*1000*1000),
got_bad_auth(false), authorizer(NULL), replacing(false),
is_reset_from_peer(false), once_ready(false), state_buffer(NULL), state_offset(0),
worker(w), center(&w->center)
{
read_handler = new C_handle_read(this);
write_handler = new C_handle_write(this);
wakeup_handler = new C_time_wakeup(this);
tick_handler = new C_tick_wakeup(this);
memset(msgvec, 0, sizeof(msgvec));
// double recv_max_prefetch see "read_until"
recv_buf = new char[2*recv_max_prefetch];
state_buffer = new char[4096];
logger->inc(l_msgr_created_connections);
}
|
Safe
|
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
|
1.3544248155764546e+38
| 25 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <sage@redhat.com>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
| 0 |
YR_OBJECT* yr_object_lookup_field(
YR_OBJECT* object,
const char* field_name)
{
YR_STRUCTURE_MEMBER* member;
assert(object != NULL);
assert(object->type == OBJECT_TYPE_STRUCTURE);
member = object_as_structure(object)->members;
while (member != NULL)
{
if (strcmp(member->object->identifier, field_name) == 0)
return member->object;
member = member->next;
}
return NULL;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
yara
|
4a342f01e5439b9bb901aff1c6c23c536baeeb3f
|
2.6807753577156906e+38
| 21 |
Fix heap overflow (reported by Jurriaan Bremer)
When setting a new array item with yr_object_array_set_item() the array size is doubled if the index for the new item is larger than the already allocated ones. No further checks were made to ensure that the index fits into the array after doubling its capacity. If the array capacity was for example 64, and a new object is assigned to an index larger than 128 the overflow occurs. As yr_object_array_set_item() is usually invoked with indexes that increase monotonically by one, this bug never triggered before. But the new "dotnet" module has the potential to allow the exploitation of this bug by scanning a specially crafted .NET binary.
| 0 |
ClientRequestContext::hostHeaderVerifyFailed(const char *A, const char *B)
{
// IP address validation for Host: failed. Admin wants to ignore them.
// NP: we do not yet handle CONNECT tunnels well, so ignore for them
if (!Config.onoff.hostStrictVerify && http->request->method != Http::METHOD_CONNECT) {
debugs(85, 3, "SECURITY ALERT: Host header forgery detected on " << http->getConn()->clientConnection <<
" (" << A << " does not match " << B << ") on URL: " << http->request->effectiveRequestUri());
// NP: it is tempting to use 'flags.noCache' but that is all about READing cache data.
// The problems here are about WRITE for new cache content, which means flags.cachable
http->request->flags.cachable = false; // MUST NOT cache (for now)
// XXX: when we have updated the cache key to base on raw-IP + URI this cacheable limit can go.
http->request->flags.hierarchical = false; // MUST NOT pass to peers (for now)
// XXX: when we have sorted out the best way to relay requests properly to peers this hierarchical limit can go.
http->doCallouts();
return;
}
debugs(85, DBG_IMPORTANT, "SECURITY ALERT: Host header forgery detected on " <<
http->getConn()->clientConnection << " (" << A << " does not match " << B << ")");
if (const char *ua = http->request->header.getStr(Http::HdrType::USER_AGENT))
debugs(85, DBG_IMPORTANT, "SECURITY ALERT: By user agent: " << ua);
debugs(85, DBG_IMPORTANT, "SECURITY ALERT: on URL: " << http->request->effectiveRequestUri());
// IP address validation for Host: failed. reject the connection.
clientStreamNode *node = (clientStreamNode *)http->client_stream.tail->prev->data;
clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
assert (repContext);
repContext->setReplyToError(ERR_CONFLICT_HOST, Http::scConflict,
http->request->method, NULL,
http->getConn()->clientConnection->remote,
http->request,
NULL,
#if USE_AUTH
http->getConn() != NULL && http->getConn()->getAuth() != NULL ?
http->getConn()->getAuth() : http->request->auth_user_request);
#else
NULL);
#endif
node = (clientStreamNode *)http->client_stream.tail->data;
clientStreamRead(node, http, node->readBuffer);
}
|
Safe
|
[
"CWE-116"
] |
squid
|
6bf66733c122804fada7f5839ef5f3b57e57591c
|
8.71934466985976e+37
| 42 |
Handle more Range requests (#790)
Also removed some effectively unused code.
| 0 |
static void add_softcursor(struct vc_data *vc)
{
int i = scr_readw((u16 *) vc->vc_pos);
u32 type = vc->vc_cursor_type;
if (!(type & CUR_SW))
return;
if (softcursor_original != -1)
return;
softcursor_original = i;
i |= CUR_SET(type);
i ^= CUR_CHANGE(type);
if ((type & CUR_ALWAYS_BG) &&
(softcursor_original & CUR_BG) == (i & CUR_BG))
i ^= CUR_BG;
if ((type & CUR_INVERT_FG_BG) && (i & CUR_FG) == ((i & CUR_BG) >> 4))
i ^= CUR_FG;
scr_writew(i, (u16 *)vc->vc_pos);
if (con_should_update(vc))
vc->vc_sw->con_putc(vc, i, vc->state.y, vc->state.x);
}
|
Safe
|
[
"CWE-125"
] |
linux
|
3c4e0dff2095c579b142d5a0693257f1c58b4804
|
2.342929367029726e+37
| 21 |
vt: Disable KD_FONT_OP_COPY
It's buggy:
On Fri, Nov 06, 2020 at 10:30:08PM +0800, Minh Yuan wrote:
> We recently discovered a slab-out-of-bounds read in fbcon in the latest
> kernel ( v5.10-rc2 for now ). The root cause of this vulnerability is that
> "fbcon_do_set_font" did not handle "vc->vc_font.data" and
> "vc->vc_font.height" correctly, and the patch
> <https://lkml.org/lkml/2020/9/27/223> for VT_RESIZEX can't handle this
> issue.
>
> Specifically, we use KD_FONT_OP_SET to set a small font.data for tty6, and
> use KD_FONT_OP_SET again to set a large font.height for tty1. After that,
> we use KD_FONT_OP_COPY to assign tty6's vc_font.data to tty1's vc_font.data
> in "fbcon_do_set_font", while tty1 retains the original larger
> height. Obviously, this will cause an out-of-bounds read, because we can
> access a smaller vc_font.data with a larger vc_font.height.
Further there was only one user ever.
- Android's loadfont, busybox and console-tools only ever use OP_GET
and OP_SET
- fbset documentation only mentions the kernel cmdline font: option,
not anything else.
- systemd used OP_COPY before release 232 published in Nov 2016
Now unfortunately the crucial report seems to have gone down with
gmane, and the commit message doesn't say much. But the pull request
hints at OP_COPY being broken
https://github.com/systemd/systemd/pull/3651
So in other words, this never worked, and the only project which
foolishly every tried to use it, realized that rather quickly too.
Instead of trying to fix security issues here on dead code by adding
missing checks, fix the entire thing by removing the functionality.
Note that systemd code using the OP_COPY function ignored the return
value, so it doesn't matter what we're doing here really - just in
case a lone server somewhere happens to be extremely unlucky and
running an affected old version of systemd. The relevant code from
font_copy_to_all_vcs() in systemd was:
/* copy font from active VT, where the font was uploaded to */
cfo.op = KD_FONT_OP_COPY;
cfo.height = vcs.v_active-1; /* tty1 == index 0 */
(void) ioctl(vcfd, KDFONTOP, &cfo);
Note this just disables the ioctl, garbage collecting the now unused
callbacks is left for -next.
v2: Tetsuo found the old mail, which allowed me to find it on another
archive. Add the link too.
Acked-by: Peilin Ye <yepeilin.cs@gmail.com>
Reported-by: Minh Yuan <yuanmingbuaa@gmail.com>
References: https://lists.freedesktop.org/archives/systemd-devel/2016-June/036935.html
References: https://github.com/systemd/systemd/pull/3651
Cc: Greg KH <greg@kroah.com>
Cc: Peilin Ye <yepeilin.cs@gmail.com>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://lore.kernel.org/r/20201108153806.3140315-1-daniel.vetter@ffwll.ch
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
int bsbits = ac->ac_sb->s_blocksize_bits;
loff_t size, isize;
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
return;
if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
return;
size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
>> bsbits;
if ((size == isize) &&
!ext4_fs_is_busy(sbi) &&
(atomic_read(&ac->ac_inode->i_writecount) == 0)) {
ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
return;
}
if (sbi->s_mb_group_prealloc <= 0) {
ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
return;
}
/* don't use group allocation for large files */
size = max(size, isize);
if (size > sbi->s_mb_stream_request) {
ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
return;
}
BUG_ON(ac->ac_lg != NULL);
/*
* locality group prealloc space are per cpu. The reason for having
* per cpu locality group is to reduce the contention between block
* request from multiple CPUs.
*/
ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
/* we're going to use group allocation */
ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
/* serialize all allocations in the group */
mutex_lock(&ac->ac_lg->lg_mutex);
}
|
Safe
|
[
"CWE-416"
] |
linux
|
8844618d8aa7a9973e7b527d038a2a589665002c
|
1.529494813154045e+38
| 49 |
ext4: only look at the bg_flags field if it is valid
The bg_flags field in the block group descripts is only valid if the
uninit_bg or metadata_csum feature is enabled. We were not
consistently looking at this field; fix this.
Also block group #0 must never have uninitialized allocation bitmaps,
or need to be zeroed, since that's where the root inode, and other
special inodes are set up. Check for these conditions and mark the
file system as corrupted if they are detected.
This addresses CVE-2018-10876.
https://bugzilla.kernel.org/show_bug.cgi?id=199403
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: stable@kernel.org
| 0 |
int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func)
{
const unsigned long start_block_id = phys_to_block_id(start);
const unsigned long end_block_id = phys_to_block_id(start + size - 1);
struct memory_block *mem;
unsigned long block_id;
int ret = 0;
if (!size)
return 0;
for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
mem = find_memory_block_by_id(block_id);
if (!mem)
continue;
ret = func(mem, arg);
put_device(&mem->dev);
if (ret)
break;
}
return ret;
}
|
Safe
|
[
"CWE-787"
] |
linux
|
aa838896d87af561a33ecefea1caa4c15a68bc47
|
2.3462206266982556e+38
| 24 |
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <joe@perches.com>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
static void CTXLoad_CheckStreams(CTXLoadPriv *priv )
{
u32 i, j, max_dur;
GF_StreamContext *sc;
u32 nb_aus=0;
max_dur = 0;
i=0;
while ((sc = (GF_StreamContext *)gf_list_enum(priv->ctx->streams, &i))) {
GF_AUContext *au;
/*all streams in root OD are handled with ESID 0 to differentiate with any animation streams*/
if (CTXLoad_StreamInRootOD(priv->ctx->root_od, sc->ESID)) sc->in_root_od = GF_TRUE;
if (!sc->timeScale) sc->timeScale = 1000;
j=0;
while ((au = (GF_AUContext *)gf_list_enum(sc->AUs, &j))) {
if (!au->timing) au->timing = (u64) (sc->timeScale*au->timing_sec);
if (gf_list_count(au->commands))
nb_aus++;
}
if (au && sc->in_root_od && (au->timing>max_dur)) max_dur = (u32) (au->timing * 1000 / sc->timeScale);
}
if (max_dur) {
priv->scene->root_od->duration = max_dur;
gf_scene_set_duration(priv->scene);
}
if ((priv->load_flags==1) && priv->ctx->root_od && priv->ctx->root_od->URLString) {
gf_filter_pid_set_property(priv->out_pid, GF_PROP_PID_REMOTE_URL, &PROP_STRING(priv->ctx->root_od->URLString) );
}
if ((priv->load_flags==2) && !nb_aus) {
gf_filter_pid_set_eos(priv->out_pid);
}
}
|
Safe
|
[
"CWE-276"
] |
gpac
|
96699aabae042f8f55cf8a85fa5758e3db752bae
|
2.9299281165410803e+37
| 32 |
fixed #2061
| 0 |
static ssize_t v86d_store(struct device_driver *dev, const char *buf,
size_t count)
{
strncpy(v86d_path, buf, PATH_MAX);
return count;
}
|
Safe
|
[
"CWE-190"
] |
linux
|
9f645bcc566a1e9f921bdae7528a01ced5bc3713
|
1.522700994909163e+38
| 6 |
video: uvesafb: Fix integer overflow in allocation
cmap->len can get close to INT_MAX/2, allowing for an integer overflow in
allocation. This uses kmalloc_array() instead to catch the condition.
Reported-by: Dr Silvio Cesare of InfoSect <silvio.cesare@gmail.com>
Fixes: 8bdb3a2d7df48 ("uvesafb: the driver core")
Cc: stable@vger.kernel.org
Signed-off-by: Kees Cook <keescook@chromium.org>
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.