CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2012-2136
|
https://www.cvedetails.com/cve/CVE-2012-2136/
|
CWE-20
|
https://github.com/torvalds/linux/commit/cc9b17ad29ecaa20bfe426a8d4dbfb94b13ff1cc
|
cc9b17ad29ecaa20bfe426a8d4dbfb94b13ff1cc
|
net: sock: validate data_len before allocating skb in sock_alloc_send_pskb()
We need to validate the number of pages consumed by data_len, otherwise frags
array could be overflowed by userspace. So this patch validate data_len and
return -EMSGSIZE when data_len may occupies more frags than MAX_SKB_FRAGS.
Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
__acquires(proto_list_mutex)
{
mutex_lock(&proto_list_mutex);
return seq_list_start_head(&proto_list, *pos);
}
|
__acquires(proto_list_mutex)
{
mutex_lock(&proto_list_mutex);
return seq_list_start_head(&proto_list, *pos);
}
|
C
|
linux
| 0 |
CVE-2011-2351
|
https://www.cvedetails.com/cve/CVE-2011-2351/
|
CWE-399
|
https://github.com/chromium/chromium/commit/bf381d8a02c3d272d4dd879ac719d8993dfb5ad6
|
bf381d8a02c3d272d4dd879ac719d8993dfb5ad6
|
Enable HistoryModelWorker by default, now that bug 69561 is fixed.
BUG=69561
TEST=Run sync manually and run integration tests, sync should not crash.
Review URL: http://codereview.chromium.org/7016007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85211 0039d316-1c4b-4281-b951-d872f2087c98
|
void SyncBackendHost::Core::RemoveParentJsEventRouter() {
DCHECK_EQ(MessageLoop::current(), host_->frontend_loop_);
parent_router_ = NULL;
MessageLoop* core_message_loop = host_->core_thread_.message_loop();
CHECK(core_message_loop);
core_message_loop->PostTask(
FROM_HERE,
NewRunnableMethod(this,
&SyncBackendHost::Core::DisconnectChildJsEventRouter));
}
|
void SyncBackendHost::Core::RemoveParentJsEventRouter() {
DCHECK_EQ(MessageLoop::current(), host_->frontend_loop_);
parent_router_ = NULL;
MessageLoop* core_message_loop = host_->core_thread_.message_loop();
CHECK(core_message_loop);
core_message_loop->PostTask(
FROM_HERE,
NewRunnableMethod(this,
&SyncBackendHost::Core::DisconnectChildJsEventRouter));
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/511d0a0a31a54e0cc0f15cb1b977dc9f9b20f0d3
|
511d0a0a31a54e0cc0f15cb1b977dc9f9b20f0d3
|
Implement new websocket handshake based on draft-hixie-thewebsocketprotocol-76
BUG=none
TEST=net_unittests passes
Review URL: http://codereview.chromium.org/1108002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42736 0039d316-1c4b-4281-b951-d872f2087c98
|
void WebSocket::OnError(const SocketStream* socket_stream, int error) {
origin_loop_->PostTask(FROM_HERE,
NewRunnableMethod(this, &WebSocket::DoError, error));
}
|
void WebSocket::OnError(const SocketStream* socket_stream, int error) {
origin_loop_->PostTask(FROM_HERE,
NewRunnableMethod(this, &WebSocket::DoError, error));
}
|
C
|
Chrome
| 0 |
CVE-2018-1000877
|
https://www.cvedetails.com/cve/CVE-2018-1000877/
|
CWE-415
|
https://github.com/libarchive/libarchive/pull/1105/commits/021efa522ad729ff0f5806c4ce53e4a6cc1daa31
|
021efa522ad729ff0f5806c4ce53e4a6cc1daa31
|
Avoid a double-free when a window size of 0 is specified
new_size can be 0 with a malicious or corrupted RAR archive.
realloc(area, 0) is equivalent to free(area), so the region would
be free()d here and the free()d again in the cleanup function.
Found with a setup running AFL, afl-rb, and qsym.
|
read_header(struct archive_read *a, struct archive_entry *entry,
char head_type)
{
const void *h;
const char *p, *endp;
struct rar *rar;
struct rar_header rar_header;
struct rar_file_header file_header;
int64_t header_size;
unsigned filename_size, end;
char *filename;
char *strp;
char packed_size[8];
char unp_size[8];
int ttime;
struct archive_string_conv *sconv, *fn_sconv;
unsigned long crc32_val;
int ret = (ARCHIVE_OK), ret2;
rar = (struct rar *)(a->format->data);
/* Setup a string conversion object for non-rar-unicode filenames. */
sconv = rar->opt_sconv;
if (sconv == NULL) {
if (!rar->init_default_conversion) {
rar->sconv_default =
archive_string_default_conversion_for_read(
&(a->archive));
rar->init_default_conversion = 1;
}
sconv = rar->sconv_default;
}
if ((h = __archive_read_ahead(a, 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
memcpy(&rar_header, p, sizeof(rar_header));
rar->file_flags = archive_le16dec(rar_header.flags);
header_size = archive_le16dec(rar_header.size);
if (header_size < (int64_t)sizeof(file_header) + 7) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
crc32_val = crc32(0, (const unsigned char *)p + 2, 7 - 2);
__archive_read_consume(a, 7);
if (!(rar->file_flags & FHD_SOLID))
{
rar->compression_method = 0;
rar->packed_size = 0;
rar->unp_size = 0;
rar->mtime = 0;
rar->ctime = 0;
rar->atime = 0;
rar->arctime = 0;
rar->mode = 0;
memset(&rar->salt, 0, sizeof(rar->salt));
rar->atime = 0;
rar->ansec = 0;
rar->ctime = 0;
rar->cnsec = 0;
rar->mtime = 0;
rar->mnsec = 0;
rar->arctime = 0;
rar->arcnsec = 0;
}
else
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"RAR solid archive support unavailable.");
return (ARCHIVE_FATAL);
}
if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
/* File Header CRC check. */
crc32_val = crc32(crc32_val, h, (unsigned)(header_size - 7));
if ((crc32_val & 0xffff) != archive_le16dec(rar_header.crc)) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Header CRC error");
return (ARCHIVE_FATAL);
}
/* If no CRC error, Go on parsing File Header. */
p = h;
endp = p + header_size - 7;
memcpy(&file_header, p, sizeof(file_header));
p += sizeof(file_header);
rar->compression_method = file_header.method;
ttime = archive_le32dec(file_header.file_time);
rar->mtime = get_time(ttime);
rar->file_crc = archive_le32dec(file_header.file_crc);
if (rar->file_flags & FHD_PASSWORD)
{
archive_entry_set_is_data_encrypted(entry, 1);
rar->has_encrypted_entries = 1;
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"RAR encryption support unavailable.");
/* Since it is only the data part itself that is encrypted we can at least
extract information about the currently processed entry and don't need
to return ARCHIVE_FATAL here. */
/*return (ARCHIVE_FATAL);*/
}
if (rar->file_flags & FHD_LARGE)
{
memcpy(packed_size, file_header.pack_size, 4);
memcpy(packed_size + 4, p, 4); /* High pack size */
p += 4;
memcpy(unp_size, file_header.unp_size, 4);
memcpy(unp_size + 4, p, 4); /* High unpack size */
p += 4;
rar->packed_size = archive_le64dec(&packed_size);
rar->unp_size = archive_le64dec(&unp_size);
}
else
{
rar->packed_size = archive_le32dec(file_header.pack_size);
rar->unp_size = archive_le32dec(file_header.unp_size);
}
if (rar->packed_size < 0 || rar->unp_size < 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid sizes specified.");
return (ARCHIVE_FATAL);
}
rar->bytes_remaining = rar->packed_size;
/* TODO: RARv3 subblocks contain comments. For now the complete block is
* consumed at the end.
*/
if (head_type == NEWSUB_HEAD) {
size_t distance = p - (const char *)h;
header_size += rar->packed_size;
/* Make sure we have the extended data. */
if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
endp = p + header_size - 7;
p += distance;
}
filename_size = archive_le16dec(file_header.name_size);
if (p + filename_size > endp) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid filename size");
return (ARCHIVE_FATAL);
}
if (rar->filename_allocated < filename_size * 2 + 2) {
char *newptr;
size_t newsize = filename_size * 2 + 2;
newptr = realloc(rar->filename, newsize);
if (newptr == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->filename = newptr;
rar->filename_allocated = newsize;
}
filename = rar->filename;
memcpy(filename, p, filename_size);
filename[filename_size] = '\0';
if (rar->file_flags & FHD_UNICODE)
{
if (filename_size != strlen(filename))
{
unsigned char highbyte, flagbits, flagbyte;
unsigned fn_end, offset;
end = filename_size;
fn_end = filename_size * 2;
filename_size = 0;
offset = (unsigned)strlen(filename) + 1;
highbyte = *(p + offset++);
flagbits = 0;
flagbyte = 0;
while (offset < end && filename_size < fn_end)
{
if (!flagbits)
{
flagbyte = *(p + offset++);
flagbits = 8;
}
flagbits -= 2;
switch((flagbyte >> flagbits) & 3)
{
case 0:
filename[filename_size++] = '\0';
filename[filename_size++] = *(p + offset++);
break;
case 1:
filename[filename_size++] = highbyte;
filename[filename_size++] = *(p + offset++);
break;
case 2:
filename[filename_size++] = *(p + offset + 1);
filename[filename_size++] = *(p + offset);
offset += 2;
break;
case 3:
{
char extra, high;
uint8_t length = *(p + offset++);
if (length & 0x80) {
extra = *(p + offset++);
high = (char)highbyte;
} else
extra = high = 0;
length = (length & 0x7f) + 2;
while (length && filename_size < fn_end) {
unsigned cp = filename_size >> 1;
filename[filename_size++] = high;
filename[filename_size++] = p[cp] + extra;
length--;
}
}
break;
}
}
if (filename_size > fn_end) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid filename");
return (ARCHIVE_FATAL);
}
filename[filename_size++] = '\0';
/*
* Do not increment filename_size here as the computations below
* add the space for the terminating NUL explicitly.
*/
filename[filename_size] = '\0';
/* Decoded unicode form is UTF-16BE, so we have to update a string
* conversion object for it. */
if (rar->sconv_utf16be == NULL) {
rar->sconv_utf16be = archive_string_conversion_from_charset(
&a->archive, "UTF-16BE", 1);
if (rar->sconv_utf16be == NULL)
return (ARCHIVE_FATAL);
}
fn_sconv = rar->sconv_utf16be;
strp = filename;
while (memcmp(strp, "\x00\x00", 2))
{
if (!memcmp(strp, "\x00\\", 2))
*(strp + 1) = '/';
strp += 2;
}
p += offset;
} else {
/*
* If FHD_UNICODE is set but no unicode data, this file name form
* is UTF-8, so we have to update a string conversion object for
* it accordingly.
*/
if (rar->sconv_utf8 == NULL) {
rar->sconv_utf8 = archive_string_conversion_from_charset(
&a->archive, "UTF-8", 1);
if (rar->sconv_utf8 == NULL)
return (ARCHIVE_FATAL);
}
fn_sconv = rar->sconv_utf8;
while ((strp = strchr(filename, '\\')) != NULL)
*strp = '/';
p += filename_size;
}
}
else
{
fn_sconv = sconv;
while ((strp = strchr(filename, '\\')) != NULL)
*strp = '/';
p += filename_size;
}
/* Split file in multivolume RAR. No more need to process header. */
if (rar->filename_save &&
filename_size == rar->filename_save_size &&
!memcmp(rar->filename, rar->filename_save, filename_size + 1))
{
__archive_read_consume(a, header_size - 7);
rar->cursor++;
if (rar->cursor >= rar->nodes)
{
rar->nodes++;
if ((rar->dbo =
realloc(rar->dbo, sizeof(*rar->dbo) * rar->nodes)) == NULL)
{
archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->dbo[rar->cursor].header_size = header_size;
rar->dbo[rar->cursor].start_offset = -1;
rar->dbo[rar->cursor].end_offset = -1;
}
if (rar->dbo[rar->cursor].start_offset < 0)
{
rar->dbo[rar->cursor].start_offset = a->filter->position;
rar->dbo[rar->cursor].end_offset = rar->dbo[rar->cursor].start_offset +
rar->packed_size;
}
return ret;
}
rar->filename_save = (char*)realloc(rar->filename_save,
filename_size + 1);
memcpy(rar->filename_save, rar->filename, filename_size + 1);
rar->filename_save_size = filename_size;
/* Set info for seeking */
free(rar->dbo);
if ((rar->dbo = calloc(1, sizeof(*rar->dbo))) == NULL)
{
archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->dbo[0].header_size = header_size;
rar->dbo[0].start_offset = -1;
rar->dbo[0].end_offset = -1;
rar->cursor = 0;
rar->nodes = 1;
if (rar->file_flags & FHD_SALT)
{
if (p + 8 > endp) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
memcpy(rar->salt, p, 8);
p += 8;
}
if (rar->file_flags & FHD_EXTTIME) {
if (read_exttime(p, rar, endp) < 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
}
__archive_read_consume(a, header_size - 7);
rar->dbo[0].start_offset = a->filter->position;
rar->dbo[0].end_offset = rar->dbo[0].start_offset + rar->packed_size;
switch(file_header.host_os)
{
case OS_MSDOS:
case OS_OS2:
case OS_WIN32:
rar->mode = archive_le32dec(file_header.file_attr);
if (rar->mode & FILE_ATTRIBUTE_DIRECTORY)
rar->mode = AE_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH;
else
rar->mode = AE_IFREG;
rar->mode |= S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
break;
case OS_UNIX:
case OS_MAC_OS:
case OS_BEOS:
rar->mode = archive_le32dec(file_header.file_attr);
break;
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unknown file attributes from RAR file's host OS");
return (ARCHIVE_FATAL);
}
rar->bytes_uncopied = rar->bytes_unconsumed = 0;
rar->lzss.position = rar->offset = 0;
rar->offset_seek = 0;
rar->dictionary_size = 0;
rar->offset_outgoing = 0;
rar->br.cache_avail = 0;
rar->br.avail_in = 0;
rar->crc_calculated = 0;
rar->entry_eof = 0;
rar->valid = 1;
rar->is_ppmd_block = 0;
rar->start_new_table = 1;
free(rar->unp_buffer);
rar->unp_buffer = NULL;
rar->unp_offset = 0;
rar->unp_buffer_size = UNP_BUFFER_SIZE;
memset(rar->lengthtable, 0, sizeof(rar->lengthtable));
__archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context);
rar->ppmd_valid = rar->ppmd_eod = 0;
/* Don't set any archive entries for non-file header types */
if (head_type == NEWSUB_HEAD)
return ret;
archive_entry_set_mtime(entry, rar->mtime, rar->mnsec);
archive_entry_set_ctime(entry, rar->ctime, rar->cnsec);
archive_entry_set_atime(entry, rar->atime, rar->ansec);
archive_entry_set_size(entry, rar->unp_size);
archive_entry_set_mode(entry, rar->mode);
if (archive_entry_copy_pathname_l(entry, filename, filename_size, fn_sconv))
{
if (errno == ENOMEM)
{
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Pathname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Pathname cannot be converted from %s to current locale.",
archive_string_conversion_charset_name(fn_sconv));
ret = (ARCHIVE_WARN);
}
if (((rar->mode) & AE_IFMT) == AE_IFLNK)
{
/* Make sure a symbolic-link file does not have its body. */
rar->bytes_remaining = 0;
archive_entry_set_size(entry, 0);
/* Read a symbolic-link name. */
if ((ret2 = read_symlink_stored(a, entry, sconv)) < (ARCHIVE_WARN))
return ret2;
if (ret > ret2)
ret = ret2;
}
if (rar->bytes_remaining == 0)
rar->entry_eof = 1;
return ret;
}
|
read_header(struct archive_read *a, struct archive_entry *entry,
char head_type)
{
const void *h;
const char *p, *endp;
struct rar *rar;
struct rar_header rar_header;
struct rar_file_header file_header;
int64_t header_size;
unsigned filename_size, end;
char *filename;
char *strp;
char packed_size[8];
char unp_size[8];
int ttime;
struct archive_string_conv *sconv, *fn_sconv;
unsigned long crc32_val;
int ret = (ARCHIVE_OK), ret2;
rar = (struct rar *)(a->format->data);
/* Setup a string conversion object for non-rar-unicode filenames. */
sconv = rar->opt_sconv;
if (sconv == NULL) {
if (!rar->init_default_conversion) {
rar->sconv_default =
archive_string_default_conversion_for_read(
&(a->archive));
rar->init_default_conversion = 1;
}
sconv = rar->sconv_default;
}
if ((h = __archive_read_ahead(a, 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
memcpy(&rar_header, p, sizeof(rar_header));
rar->file_flags = archive_le16dec(rar_header.flags);
header_size = archive_le16dec(rar_header.size);
if (header_size < (int64_t)sizeof(file_header) + 7) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
crc32_val = crc32(0, (const unsigned char *)p + 2, 7 - 2);
__archive_read_consume(a, 7);
if (!(rar->file_flags & FHD_SOLID))
{
rar->compression_method = 0;
rar->packed_size = 0;
rar->unp_size = 0;
rar->mtime = 0;
rar->ctime = 0;
rar->atime = 0;
rar->arctime = 0;
rar->mode = 0;
memset(&rar->salt, 0, sizeof(rar->salt));
rar->atime = 0;
rar->ansec = 0;
rar->ctime = 0;
rar->cnsec = 0;
rar->mtime = 0;
rar->mnsec = 0;
rar->arctime = 0;
rar->arcnsec = 0;
}
else
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"RAR solid archive support unavailable.");
return (ARCHIVE_FATAL);
}
if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
/* File Header CRC check. */
crc32_val = crc32(crc32_val, h, (unsigned)(header_size - 7));
if ((crc32_val & 0xffff) != archive_le16dec(rar_header.crc)) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Header CRC error");
return (ARCHIVE_FATAL);
}
/* If no CRC error, Go on parsing File Header. */
p = h;
endp = p + header_size - 7;
memcpy(&file_header, p, sizeof(file_header));
p += sizeof(file_header);
rar->compression_method = file_header.method;
ttime = archive_le32dec(file_header.file_time);
rar->mtime = get_time(ttime);
rar->file_crc = archive_le32dec(file_header.file_crc);
if (rar->file_flags & FHD_PASSWORD)
{
archive_entry_set_is_data_encrypted(entry, 1);
rar->has_encrypted_entries = 1;
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"RAR encryption support unavailable.");
/* Since it is only the data part itself that is encrypted we can at least
extract information about the currently processed entry and don't need
to return ARCHIVE_FATAL here. */
/*return (ARCHIVE_FATAL);*/
}
if (rar->file_flags & FHD_LARGE)
{
memcpy(packed_size, file_header.pack_size, 4);
memcpy(packed_size + 4, p, 4); /* High pack size */
p += 4;
memcpy(unp_size, file_header.unp_size, 4);
memcpy(unp_size + 4, p, 4); /* High unpack size */
p += 4;
rar->packed_size = archive_le64dec(&packed_size);
rar->unp_size = archive_le64dec(&unp_size);
}
else
{
rar->packed_size = archive_le32dec(file_header.pack_size);
rar->unp_size = archive_le32dec(file_header.unp_size);
}
if (rar->packed_size < 0 || rar->unp_size < 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid sizes specified.");
return (ARCHIVE_FATAL);
}
rar->bytes_remaining = rar->packed_size;
/* TODO: RARv3 subblocks contain comments. For now the complete block is
* consumed at the end.
*/
if (head_type == NEWSUB_HEAD) {
size_t distance = p - (const char *)h;
header_size += rar->packed_size;
/* Make sure we have the extended data. */
if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
endp = p + header_size - 7;
p += distance;
}
filename_size = archive_le16dec(file_header.name_size);
if (p + filename_size > endp) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid filename size");
return (ARCHIVE_FATAL);
}
if (rar->filename_allocated < filename_size * 2 + 2) {
char *newptr;
size_t newsize = filename_size * 2 + 2;
newptr = realloc(rar->filename, newsize);
if (newptr == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->filename = newptr;
rar->filename_allocated = newsize;
}
filename = rar->filename;
memcpy(filename, p, filename_size);
filename[filename_size] = '\0';
if (rar->file_flags & FHD_UNICODE)
{
if (filename_size != strlen(filename))
{
unsigned char highbyte, flagbits, flagbyte;
unsigned fn_end, offset;
end = filename_size;
fn_end = filename_size * 2;
filename_size = 0;
offset = (unsigned)strlen(filename) + 1;
highbyte = *(p + offset++);
flagbits = 0;
flagbyte = 0;
while (offset < end && filename_size < fn_end)
{
if (!flagbits)
{
flagbyte = *(p + offset++);
flagbits = 8;
}
flagbits -= 2;
switch((flagbyte >> flagbits) & 3)
{
case 0:
filename[filename_size++] = '\0';
filename[filename_size++] = *(p + offset++);
break;
case 1:
filename[filename_size++] = highbyte;
filename[filename_size++] = *(p + offset++);
break;
case 2:
filename[filename_size++] = *(p + offset + 1);
filename[filename_size++] = *(p + offset);
offset += 2;
break;
case 3:
{
char extra, high;
uint8_t length = *(p + offset++);
if (length & 0x80) {
extra = *(p + offset++);
high = (char)highbyte;
} else
extra = high = 0;
length = (length & 0x7f) + 2;
while (length && filename_size < fn_end) {
unsigned cp = filename_size >> 1;
filename[filename_size++] = high;
filename[filename_size++] = p[cp] + extra;
length--;
}
}
break;
}
}
if (filename_size > fn_end) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid filename");
return (ARCHIVE_FATAL);
}
filename[filename_size++] = '\0';
/*
* Do not increment filename_size here as the computations below
* add the space for the terminating NUL explicitly.
*/
filename[filename_size] = '\0';
/* Decoded unicode form is UTF-16BE, so we have to update a string
* conversion object for it. */
if (rar->sconv_utf16be == NULL) {
rar->sconv_utf16be = archive_string_conversion_from_charset(
&a->archive, "UTF-16BE", 1);
if (rar->sconv_utf16be == NULL)
return (ARCHIVE_FATAL);
}
fn_sconv = rar->sconv_utf16be;
strp = filename;
while (memcmp(strp, "\x00\x00", 2))
{
if (!memcmp(strp, "\x00\\", 2))
*(strp + 1) = '/';
strp += 2;
}
p += offset;
} else {
/*
* If FHD_UNICODE is set but no unicode data, this file name form
* is UTF-8, so we have to update a string conversion object for
* it accordingly.
*/
if (rar->sconv_utf8 == NULL) {
rar->sconv_utf8 = archive_string_conversion_from_charset(
&a->archive, "UTF-8", 1);
if (rar->sconv_utf8 == NULL)
return (ARCHIVE_FATAL);
}
fn_sconv = rar->sconv_utf8;
while ((strp = strchr(filename, '\\')) != NULL)
*strp = '/';
p += filename_size;
}
}
else
{
fn_sconv = sconv;
while ((strp = strchr(filename, '\\')) != NULL)
*strp = '/';
p += filename_size;
}
/* Split file in multivolume RAR. No more need to process header. */
if (rar->filename_save &&
filename_size == rar->filename_save_size &&
!memcmp(rar->filename, rar->filename_save, filename_size + 1))
{
__archive_read_consume(a, header_size - 7);
rar->cursor++;
if (rar->cursor >= rar->nodes)
{
rar->nodes++;
if ((rar->dbo =
realloc(rar->dbo, sizeof(*rar->dbo) * rar->nodes)) == NULL)
{
archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->dbo[rar->cursor].header_size = header_size;
rar->dbo[rar->cursor].start_offset = -1;
rar->dbo[rar->cursor].end_offset = -1;
}
if (rar->dbo[rar->cursor].start_offset < 0)
{
rar->dbo[rar->cursor].start_offset = a->filter->position;
rar->dbo[rar->cursor].end_offset = rar->dbo[rar->cursor].start_offset +
rar->packed_size;
}
return ret;
}
rar->filename_save = (char*)realloc(rar->filename_save,
filename_size + 1);
memcpy(rar->filename_save, rar->filename, filename_size + 1);
rar->filename_save_size = filename_size;
/* Set info for seeking */
free(rar->dbo);
if ((rar->dbo = calloc(1, sizeof(*rar->dbo))) == NULL)
{
archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->dbo[0].header_size = header_size;
rar->dbo[0].start_offset = -1;
rar->dbo[0].end_offset = -1;
rar->cursor = 0;
rar->nodes = 1;
if (rar->file_flags & FHD_SALT)
{
if (p + 8 > endp) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
memcpy(rar->salt, p, 8);
p += 8;
}
if (rar->file_flags & FHD_EXTTIME) {
if (read_exttime(p, rar, endp) < 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
}
__archive_read_consume(a, header_size - 7);
rar->dbo[0].start_offset = a->filter->position;
rar->dbo[0].end_offset = rar->dbo[0].start_offset + rar->packed_size;
switch(file_header.host_os)
{
case OS_MSDOS:
case OS_OS2:
case OS_WIN32:
rar->mode = archive_le32dec(file_header.file_attr);
if (rar->mode & FILE_ATTRIBUTE_DIRECTORY)
rar->mode = AE_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH;
else
rar->mode = AE_IFREG;
rar->mode |= S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
break;
case OS_UNIX:
case OS_MAC_OS:
case OS_BEOS:
rar->mode = archive_le32dec(file_header.file_attr);
break;
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unknown file attributes from RAR file's host OS");
return (ARCHIVE_FATAL);
}
rar->bytes_uncopied = rar->bytes_unconsumed = 0;
rar->lzss.position = rar->offset = 0;
rar->offset_seek = 0;
rar->dictionary_size = 0;
rar->offset_outgoing = 0;
rar->br.cache_avail = 0;
rar->br.avail_in = 0;
rar->crc_calculated = 0;
rar->entry_eof = 0;
rar->valid = 1;
rar->is_ppmd_block = 0;
rar->start_new_table = 1;
free(rar->unp_buffer);
rar->unp_buffer = NULL;
rar->unp_offset = 0;
rar->unp_buffer_size = UNP_BUFFER_SIZE;
memset(rar->lengthtable, 0, sizeof(rar->lengthtable));
__archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context);
rar->ppmd_valid = rar->ppmd_eod = 0;
/* Don't set any archive entries for non-file header types */
if (head_type == NEWSUB_HEAD)
return ret;
archive_entry_set_mtime(entry, rar->mtime, rar->mnsec);
archive_entry_set_ctime(entry, rar->ctime, rar->cnsec);
archive_entry_set_atime(entry, rar->atime, rar->ansec);
archive_entry_set_size(entry, rar->unp_size);
archive_entry_set_mode(entry, rar->mode);
if (archive_entry_copy_pathname_l(entry, filename, filename_size, fn_sconv))
{
if (errno == ENOMEM)
{
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Pathname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Pathname cannot be converted from %s to current locale.",
archive_string_conversion_charset_name(fn_sconv));
ret = (ARCHIVE_WARN);
}
if (((rar->mode) & AE_IFMT) == AE_IFLNK)
{
/* Make sure a symbolic-link file does not have its body. */
rar->bytes_remaining = 0;
archive_entry_set_size(entry, 0);
/* Read a symbolic-link name. */
if ((ret2 = read_symlink_stored(a, entry, sconv)) < (ARCHIVE_WARN))
return ret2;
if (ret > ret2)
ret = ret2;
}
if (rar->bytes_remaining == 0)
rar->entry_eof = 1;
return ret;
}
|
C
|
libarchive
| 0 |
CVE-2018-12896
|
https://www.cvedetails.com/cve/CVE-2018-12896/
|
CWE-190
|
https://github.com/torvalds/linux/commit/78c9c4dfbf8c04883941445a195276bb4bb92c76
|
78c9c4dfbf8c04883941445a195276bb4bb92c76
|
posix-timers: Sanitize overrun handling
The posix timer overrun handling is broken because the forwarding functions
can return a huge number of overruns which does not fit in an int. As a
consequence timer_getoverrun(2) and siginfo::si_overrun can turn into
random number generators.
The k_clock::timer_forward() callbacks return a 64 bit value now. Make
k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal
accounting is correct. 3Remove the temporary (int) casts.
Add a helper function which clamps the overrun value returned to user space
via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value
between 0 and INT_MAX. INT_MAX is an indicator for user space that the
overrun value has been clamped.
Reported-by: Team OWL337 <icytxw@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: John Stultz <john.stultz@linaro.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Link: https://lkml.kernel.org/r/20180626132705.018623573@linutronix.de
|
void run_posix_cpu_timers(struct task_struct *tsk)
{
LIST_HEAD(firing);
struct k_itimer *timer, *next;
unsigned long flags;
lockdep_assert_irqs_disabled();
/*
* The fast path checks that there are no expired thread or thread
* group timers. If that's so, just return.
*/
if (!fastpath_timer_check(tsk))
return;
if (!lock_task_sighand(tsk, &flags))
return;
/*
* Here we take off tsk->signal->cpu_timers[N] and
* tsk->cpu_timers[N] all the timers that are firing, and
* put them on the firing list.
*/
check_thread_timers(tsk, &firing);
check_process_timers(tsk, &firing);
/*
* We must release these locks before taking any timer's lock.
* There is a potential race with timer deletion here, as the
* siglock now protects our private firing list. We have set
* the firing flag in each timer, so that a deletion attempt
* that gets the timer lock before we do will give it up and
* spin until we've taken care of that timer below.
*/
unlock_task_sighand(tsk, &flags);
/*
* Now that all the timers on our list have the firing flag,
* no one will touch their list entries but us. We'll take
* each timer's lock before clearing its firing flag, so no
* timer call will interfere.
*/
list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
int cpu_firing;
spin_lock(&timer->it_lock);
list_del_init(&timer->it.cpu.entry);
cpu_firing = timer->it.cpu.firing;
timer->it.cpu.firing = 0;
/*
* The firing flag is -1 if we collided with a reset
* of the timer, which already reported this
* almost-firing as an overrun. So don't generate an event.
*/
if (likely(cpu_firing >= 0))
cpu_timer_fire(timer);
spin_unlock(&timer->it_lock);
}
}
|
void run_posix_cpu_timers(struct task_struct *tsk)
{
LIST_HEAD(firing);
struct k_itimer *timer, *next;
unsigned long flags;
lockdep_assert_irqs_disabled();
/*
* The fast path checks that there are no expired thread or thread
* group timers. If that's so, just return.
*/
if (!fastpath_timer_check(tsk))
return;
if (!lock_task_sighand(tsk, &flags))
return;
/*
* Here we take off tsk->signal->cpu_timers[N] and
* tsk->cpu_timers[N] all the timers that are firing, and
* put them on the firing list.
*/
check_thread_timers(tsk, &firing);
check_process_timers(tsk, &firing);
/*
* We must release these locks before taking any timer's lock.
* There is a potential race with timer deletion here, as the
* siglock now protects our private firing list. We have set
* the firing flag in each timer, so that a deletion attempt
* that gets the timer lock before we do will give it up and
* spin until we've taken care of that timer below.
*/
unlock_task_sighand(tsk, &flags);
/*
* Now that all the timers on our list have the firing flag,
* no one will touch their list entries but us. We'll take
* each timer's lock before clearing its firing flag, so no
* timer call will interfere.
*/
list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
int cpu_firing;
spin_lock(&timer->it_lock);
list_del_init(&timer->it.cpu.entry);
cpu_firing = timer->it.cpu.firing;
timer->it.cpu.firing = 0;
/*
* The firing flag is -1 if we collided with a reset
* of the timer, which already reported this
* almost-firing as an overrun. So don't generate an event.
*/
if (likely(cpu_firing >= 0))
cpu_timer_fire(timer);
spin_unlock(&timer->it_lock);
}
}
|
C
|
linux
| 0 |
CVE-2017-0393
|
https://www.cvedetails.com/cve/CVE-2017-0393/
| null |
https://android.googlesource.com/platform/external/libvpx/+/6886e8e0a9db2dbad723dc37a548233e004b33bc
|
6886e8e0a9db2dbad723dc37a548233e004b33bc
|
vp8:fix threading issues
1 - stops de allocating before threads are closed.
2 - limits threads to mb_rows when mb_rows < partitions
BUG=webm:851
Bug: 30436808
Change-Id: Ie017818ed28103ca9d26d57087f31361b642e09b
(cherry picked from commit 70cca742efa20617c70c3209aa614a70f282f90e)
|
static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
unsigned int mb_idx)
{
MB_PREDICTION_MODE mode;
int i;
#if CONFIG_ERROR_CONCEALMENT
int corruption_detected = 0;
#else
(void)mb_idx;
#endif
if (xd->mode_info_context->mbmi.mb_skip_coeff)
{
vp8_reset_mb_tokens_context(xd);
}
else if (!vp8dx_bool_error(xd->current_bc))
{
int eobtotal;
eobtotal = vp8_decode_mb_tokens(pbi, xd);
/* Special case: Force the loopfilter to skip when eobtotal is zero */
xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0);
}
mode = xd->mode_info_context->mbmi.mode;
if (xd->segmentation_enabled)
vp8_mb_init_dequantizer(pbi, xd);
#if CONFIG_ERROR_CONCEALMENT
if(pbi->ec_active)
{
int throw_residual;
/* When we have independent partitions we can apply residual even
* though other partitions within the frame are corrupt.
*/
throw_residual = (!pbi->independent_partitions &&
pbi->frame_corrupt_residual);
throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));
if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual))
{
/* MB with corrupt residuals or corrupt mode/motion vectors.
* Better to use the predictor as reconstruction.
*/
pbi->frame_corrupt_residual = 1;
memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
vp8_conceal_corrupt_mb(xd);
corruption_detected = 1;
/* force idct to be skipped for B_PRED and use the
* prediction only for reconstruction
* */
memset(xd->eobs, 0, 25);
}
}
#endif
/* do prediction */
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
vp8_build_intra_predictors_mbuv_s(xd,
xd->recon_above[1],
xd->recon_above[2],
xd->recon_left[1],
xd->recon_left[2],
xd->recon_left_stride[1],
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride);
if (mode != B_PRED)
{
vp8_build_intra_predictors_mby_s(xd,
xd->recon_above[0],
xd->recon_left[0],
xd->recon_left_stride[0],
xd->dst.y_buffer,
xd->dst.y_stride);
}
else
{
short *DQC = xd->dequant_y1;
int dst_stride = xd->dst.y_stride;
/* clear out residual eob info */
if(xd->mode_info_context->mbmi.mb_skip_coeff)
memset(xd->eobs, 0, 25);
intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
for (i = 0; i < 16; i++)
{
BLOCKD *b = &xd->block[i];
unsigned char *dst = xd->dst.y_buffer + b->offset;
B_PREDICTION_MODE b_mode =
xd->mode_info_context->bmi[i].as_mode;
unsigned char *Above;
unsigned char *yleft;
int left_stride;
unsigned char top_left;
/*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/
if (i < 4 && pbi->common.filter_level)
Above = xd->recon_above[0] + b->offset;
else
Above = dst - dst_stride;
if (i%4==0 && pbi->common.filter_level)
{
yleft = xd->recon_left[0] + i;
left_stride = 1;
}
else
{
yleft = dst - 1;
left_stride = dst_stride;
}
if ((i==4 || i==8 || i==12) && pbi->common.filter_level)
top_left = *(xd->recon_left[0] + i - 1);
else
top_left = Above[-1];
vp8_intra4x4_predict(Above, yleft, left_stride,
b_mode, dst, dst_stride, top_left);
if (xd->eobs[i] )
{
if (xd->eobs[i] > 1)
{
vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride);
}
else
{
vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0],
dst, dst_stride, dst, dst_stride);
memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
}
}
}
}
}
else
{
vp8_build_inter_predictors_mb(xd);
}
#if CONFIG_ERROR_CONCEALMENT
if (corruption_detected)
{
return;
}
#endif
if(!xd->mode_info_context->mbmi.mb_skip_coeff)
{
/* dequantization and idct */
if (mode != B_PRED)
{
short *DQC = xd->dequant_y1;
if (mode != SPLITMV)
{
BLOCKD *b = &xd->block[24];
/* do 2nd order transform on the dc block */
if (xd->eobs[24] > 1)
{
vp8_dequantize_b(b, xd->dequant_y2);
vp8_short_inv_walsh4x4(&b->dqcoeff[0],
xd->qcoeff);
memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
}
else
{
b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
vp8_short_inv_walsh4x4_1(&b->dqcoeff[0],
xd->qcoeff);
memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
}
/* override the dc dequant constant in order to preserve the
* dc components
*/
DQC = xd->dequant_y1_dc;
}
vp8_dequant_idct_add_y_block
(xd->qcoeff, DQC,
xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs);
}
vp8_dequant_idct_add_uv_block
(xd->qcoeff+16*16, xd->dequant_uv,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs+16);
}
}
|
static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
unsigned int mb_idx)
{
MB_PREDICTION_MODE mode;
int i;
#if CONFIG_ERROR_CONCEALMENT
int corruption_detected = 0;
#else
(void)mb_idx;
#endif
if (xd->mode_info_context->mbmi.mb_skip_coeff)
{
vp8_reset_mb_tokens_context(xd);
}
else if (!vp8dx_bool_error(xd->current_bc))
{
int eobtotal;
eobtotal = vp8_decode_mb_tokens(pbi, xd);
/* Special case: Force the loopfilter to skip when eobtotal is zero */
xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0);
}
mode = xd->mode_info_context->mbmi.mode;
if (xd->segmentation_enabled)
vp8_mb_init_dequantizer(pbi, xd);
#if CONFIG_ERROR_CONCEALMENT
if(pbi->ec_active)
{
int throw_residual;
/* When we have independent partitions we can apply residual even
* though other partitions within the frame are corrupt.
*/
throw_residual = (!pbi->independent_partitions &&
pbi->frame_corrupt_residual);
throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));
if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual))
{
/* MB with corrupt residuals or corrupt mode/motion vectors.
* Better to use the predictor as reconstruction.
*/
pbi->frame_corrupt_residual = 1;
memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
vp8_conceal_corrupt_mb(xd);
corruption_detected = 1;
/* force idct to be skipped for B_PRED and use the
* prediction only for reconstruction
* */
memset(xd->eobs, 0, 25);
}
}
#endif
/* do prediction */
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
vp8_build_intra_predictors_mbuv_s(xd,
xd->recon_above[1],
xd->recon_above[2],
xd->recon_left[1],
xd->recon_left[2],
xd->recon_left_stride[1],
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride);
if (mode != B_PRED)
{
vp8_build_intra_predictors_mby_s(xd,
xd->recon_above[0],
xd->recon_left[0],
xd->recon_left_stride[0],
xd->dst.y_buffer,
xd->dst.y_stride);
}
else
{
short *DQC = xd->dequant_y1;
int dst_stride = xd->dst.y_stride;
/* clear out residual eob info */
if(xd->mode_info_context->mbmi.mb_skip_coeff)
memset(xd->eobs, 0, 25);
intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
for (i = 0; i < 16; i++)
{
BLOCKD *b = &xd->block[i];
unsigned char *dst = xd->dst.y_buffer + b->offset;
B_PREDICTION_MODE b_mode =
xd->mode_info_context->bmi[i].as_mode;
unsigned char *Above;
unsigned char *yleft;
int left_stride;
unsigned char top_left;
/*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/
if (i < 4 && pbi->common.filter_level)
Above = xd->recon_above[0] + b->offset;
else
Above = dst - dst_stride;
if (i%4==0 && pbi->common.filter_level)
{
yleft = xd->recon_left[0] + i;
left_stride = 1;
}
else
{
yleft = dst - 1;
left_stride = dst_stride;
}
if ((i==4 || i==8 || i==12) && pbi->common.filter_level)
top_left = *(xd->recon_left[0] + i - 1);
else
top_left = Above[-1];
vp8_intra4x4_predict(Above, yleft, left_stride,
b_mode, dst, dst_stride, top_left);
if (xd->eobs[i] )
{
if (xd->eobs[i] > 1)
{
vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride);
}
else
{
vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0],
dst, dst_stride, dst, dst_stride);
memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
}
}
}
}
}
else
{
vp8_build_inter_predictors_mb(xd);
}
#if CONFIG_ERROR_CONCEALMENT
if (corruption_detected)
{
return;
}
#endif
if(!xd->mode_info_context->mbmi.mb_skip_coeff)
{
/* dequantization and idct */
if (mode != B_PRED)
{
short *DQC = xd->dequant_y1;
if (mode != SPLITMV)
{
BLOCKD *b = &xd->block[24];
/* do 2nd order transform on the dc block */
if (xd->eobs[24] > 1)
{
vp8_dequantize_b(b, xd->dequant_y2);
vp8_short_inv_walsh4x4(&b->dqcoeff[0],
xd->qcoeff);
memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
}
else
{
b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
vp8_short_inv_walsh4x4_1(&b->dqcoeff[0],
xd->qcoeff);
memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
}
/* override the dc dequant constant in order to preserve the
* dc components
*/
DQC = xd->dequant_y1_dc;
}
vp8_dequant_idct_add_y_block
(xd->qcoeff, DQC,
xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs);
}
vp8_dequant_idct_add_uv_block
(xd->qcoeff+16*16, xd->dequant_uv,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs+16);
}
}
|
C
|
Android
| 0 |
CVE-2011-2909
|
https://www.cvedetails.com/cve/CVE-2011-2909/
|
CWE-200
|
https://github.com/torvalds/linux/commit/819cbb120eaec7e014e5abd029260db1ca8c5735
|
819cbb120eaec7e014e5abd029260db1ca8c5735
|
staging: comedi: fix infoleak to userspace
driver_name and board_name are pointers to strings, not buffers of size
COMEDI_NAMELEN. Copying COMEDI_NAMELEN bytes of a string containing
less than COMEDI_NAMELEN-1 bytes would leak some unrelated bytes.
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Cc: stable <stable@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
|
static int check_insn_config_length(struct comedi_insn *insn,
unsigned int *data)
{
if (insn->n < 1)
return -EINVAL;
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
case INSN_CONFIG_DIO_INPUT:
case INSN_CONFIG_DISARM:
case INSN_CONFIG_RESET:
if (insn->n == 1)
return 0;
break;
case INSN_CONFIG_ARM:
case INSN_CONFIG_DIO_QUERY:
case INSN_CONFIG_BLOCK_SIZE:
case INSN_CONFIG_FILTER:
case INSN_CONFIG_SERIAL_CLOCK:
case INSN_CONFIG_BIDIRECTIONAL_DATA:
case INSN_CONFIG_ALT_SOURCE:
case INSN_CONFIG_SET_COUNTER_MODE:
case INSN_CONFIG_8254_READ_STATUS:
case INSN_CONFIG_SET_ROUTING:
case INSN_CONFIG_GET_ROUTING:
case INSN_CONFIG_GET_PWM_STATUS:
case INSN_CONFIG_PWM_SET_PERIOD:
case INSN_CONFIG_PWM_GET_PERIOD:
if (insn->n == 2)
return 0;
break;
case INSN_CONFIG_SET_GATE_SRC:
case INSN_CONFIG_GET_GATE_SRC:
case INSN_CONFIG_SET_CLOCK_SRC:
case INSN_CONFIG_GET_CLOCK_SRC:
case INSN_CONFIG_SET_OTHER_SRC:
case INSN_CONFIG_GET_COUNTER_STATUS:
case INSN_CONFIG_PWM_SET_H_BRIDGE:
case INSN_CONFIG_PWM_GET_H_BRIDGE:
case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
if (insn->n == 3)
return 0;
break;
case INSN_CONFIG_PWM_OUTPUT:
case INSN_CONFIG_ANALOG_TRIG:
if (insn->n == 5)
return 0;
break;
/* by default we allow the insn since we don't have checks for
* all possible cases yet */
default:
printk(KERN_WARNING
"comedi: no check for data length of config insn id "
"%i is implemented.\n"
" Add a check to %s in %s.\n"
" Assuming n=%i is correct.\n", data[0], __func__,
__FILE__, insn->n);
return 0;
break;
}
return -EINVAL;
}
|
static int check_insn_config_length(struct comedi_insn *insn,
unsigned int *data)
{
if (insn->n < 1)
return -EINVAL;
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
case INSN_CONFIG_DIO_INPUT:
case INSN_CONFIG_DISARM:
case INSN_CONFIG_RESET:
if (insn->n == 1)
return 0;
break;
case INSN_CONFIG_ARM:
case INSN_CONFIG_DIO_QUERY:
case INSN_CONFIG_BLOCK_SIZE:
case INSN_CONFIG_FILTER:
case INSN_CONFIG_SERIAL_CLOCK:
case INSN_CONFIG_BIDIRECTIONAL_DATA:
case INSN_CONFIG_ALT_SOURCE:
case INSN_CONFIG_SET_COUNTER_MODE:
case INSN_CONFIG_8254_READ_STATUS:
case INSN_CONFIG_SET_ROUTING:
case INSN_CONFIG_GET_ROUTING:
case INSN_CONFIG_GET_PWM_STATUS:
case INSN_CONFIG_PWM_SET_PERIOD:
case INSN_CONFIG_PWM_GET_PERIOD:
if (insn->n == 2)
return 0;
break;
case INSN_CONFIG_SET_GATE_SRC:
case INSN_CONFIG_GET_GATE_SRC:
case INSN_CONFIG_SET_CLOCK_SRC:
case INSN_CONFIG_GET_CLOCK_SRC:
case INSN_CONFIG_SET_OTHER_SRC:
case INSN_CONFIG_GET_COUNTER_STATUS:
case INSN_CONFIG_PWM_SET_H_BRIDGE:
case INSN_CONFIG_PWM_GET_H_BRIDGE:
case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
if (insn->n == 3)
return 0;
break;
case INSN_CONFIG_PWM_OUTPUT:
case INSN_CONFIG_ANALOG_TRIG:
if (insn->n == 5)
return 0;
break;
/* by default we allow the insn since we don't have checks for
* all possible cases yet */
default:
printk(KERN_WARNING
"comedi: no check for data length of config insn id "
"%i is implemented.\n"
" Add a check to %s in %s.\n"
" Assuming n=%i is correct.\n", data[0], __func__,
__FILE__, insn->n);
return 0;
break;
}
return -EINVAL;
}
|
C
|
linux
| 0 |
CVE-2014-9766
|
https://www.cvedetails.com/cve/CVE-2014-9766/
|
CWE-189
|
https://cgit.freedesktop.org/pixman/commit/?id=857e40f3d2bc2cfb714913e0cd7e6184cf69aca3
|
857e40f3d2bc2cfb714913e0cd7e6184cf69aca3
| null |
_pixman_image_get_scanline_generic_float (pixman_iter_t * iter,
const uint32_t *mask)
{
pixman_iter_get_scanline_t fetch_32 = iter->data;
uint32_t *buffer = iter->buffer;
fetch_32 (iter, NULL);
pixman_expand_to_float ((argb_t *)buffer, buffer, PIXMAN_a8r8g8b8, iter->width);
return iter->buffer;
}
|
_pixman_image_get_scanline_generic_float (pixman_iter_t * iter,
const uint32_t *mask)
{
pixman_iter_get_scanline_t fetch_32 = iter->data;
uint32_t *buffer = iter->buffer;
fetch_32 (iter, NULL);
pixman_expand_to_float ((argb_t *)buffer, buffer, PIXMAN_a8r8g8b8, iter->width);
return iter->buffer;
}
|
C
|
pixman
| 0 |
CVE-2017-9059
|
https://www.cvedetails.com/cve/CVE-2017-9059/
|
CWE-404
|
https://github.com/torvalds/linux/commit/c70422f760c120480fee4de6c38804c72aa26bc1
|
c70422f760c120480fee4de6c38804c72aa26bc1
|
Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Another RDMA update from Chuck Lever, and a bunch of miscellaneous
bugfixes"
* tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits)
nfsd: Fix up the "supattr_exclcreat" attributes
nfsd: encoders mustn't use unitialized values in error cases
nfsd: fix undefined behavior in nfsd4_layout_verify
lockd: fix lockd shutdown race
NFSv4: Fix callback server shutdown
SUNRPC: Refactor svc_set_num_threads()
NFSv4.x/callback: Create the callback service through svc_create_pooled
lockd: remove redundant check on block
svcrdma: Clean out old XDR encoders
svcrdma: Remove the req_map cache
svcrdma: Remove unused RDMA Write completion handler
svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt
svcrdma: Clean up RPC-over-RDMA backchannel reply processing
svcrdma: Report Write/Reply chunk overruns
svcrdma: Clean up RDMA_ERROR path
svcrdma: Use rdma_rw API in RPC reply path
svcrdma: Introduce local rdma_rw API helpers
svcrdma: Clean up svc_rdma_get_inv_rkey()
svcrdma: Add helper to save pages under I/O
svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT
...
|
nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
stateid_t *stateid, unsigned char typemask,
struct nfs4_stid **s, struct nfsd_net *nn)
{
__be32 status;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return nfserr_bad_stateid;
status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
if (status == nfserr_stale_clientid) {
if (cstate->session)
return nfserr_bad_stateid;
return nfserr_stale_stateid;
}
if (status)
return status;
*s = find_stateid_by_type(cstate->clp, stateid, typemask);
if (!*s)
return nfserr_bad_stateid;
return nfs_ok;
}
|
nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
stateid_t *stateid, unsigned char typemask,
struct nfs4_stid **s, struct nfsd_net *nn)
{
__be32 status;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return nfserr_bad_stateid;
status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
if (status == nfserr_stale_clientid) {
if (cstate->session)
return nfserr_bad_stateid;
return nfserr_stale_stateid;
}
if (status)
return status;
*s = find_stateid_by_type(cstate->clp, stateid, typemask);
if (!*s)
return nfserr_bad_stateid;
return nfs_ok;
}
|
C
|
linux
| 0 |
CVE-2011-2351
|
https://www.cvedetails.com/cve/CVE-2011-2351/
|
CWE-399
|
https://github.com/chromium/chromium/commit/bf381d8a02c3d272d4dd879ac719d8993dfb5ad6
|
bf381d8a02c3d272d4dd879ac719d8993dfb5ad6
|
Enable HistoryModelWorker by default, now that bug 69561 is fixed.
BUG=69561
TEST=Run sync manually and run integration tests, sync should not crash.
Review URL: http://codereview.chromium.org/7016007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85211 0039d316-1c4b-4281-b951-d872f2087c98
|
void SyncBackendHost::Core::RouteJsEvent(
const std::string& name, const JsEventDetails& details) {
host_->frontend_loop_->PostTask(
FROM_HERE, NewRunnableMethod(
this, &Core::RouteJsEventOnFrontendLoop, name, details));
}
|
void SyncBackendHost::Core::RouteJsEvent(
const std::string& name, const JsEventDetails& details) {
host_->frontend_loop_->PostTask(
FROM_HERE, NewRunnableMethod(
this, &Core::RouteJsEventOnFrontendLoop, name, details));
}
|
C
|
Chrome
| 0 |
CVE-2016-6787
|
https://www.cvedetails.com/cve/CVE-2016-6787/
|
CWE-264
|
https://github.com/torvalds/linux/commit/f63a8daa5812afef4f06c962351687e1ff9ccb2b
|
f63a8daa5812afef4f06c962351687e1ff9ccb2b
|
perf: Fix event->ctx locking
There have been a few reported issues wrt. the lack of locking around
changing event->ctx. This patch tries to address those.
It avoids the whole rwsem thing; and while it appears to work, please
give it some thought in review.
What I did fail at is sensible runtime checks on the use of
event->ctx, the RCU use makes it very hard.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20150123125834.209535886@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
static void perf_cgroup_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *task;
cgroup_taskset_for_each(task, tset)
task_function_call(task, __perf_cgroup_move, task);
}
|
static void perf_cgroup_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *task;
cgroup_taskset_for_each(task, tset)
task_function_call(task, __perf_cgroup_move, task);
}
|
C
|
linux
| 0 |
CVE-2015-8543
|
https://www.cvedetails.com/cve/CVE-2015-8543/
| null |
https://github.com/torvalds/linux/commit/79462ad02e861803b3840cc782248c7359451cd9
|
79462ad02e861803b3840cc782248c7359451cd9
|
net: add validation for the socket syscall protocol argument
郭永刚 reported that one could simply crash the kernel as root by
using a simple program:
int socket_fd;
struct sockaddr_in addr;
addr.sin_port = 0;
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_family = 10;
socket_fd = socket(10,3,0x40000000);
connect(socket_fd , &addr,16);
AF_INET, AF_INET6 sockets actually only support 8-bit protocol
identifiers. inet_sock's skc_protocol field thus is sized accordingly,
thus larger protocol identifiers simply cut off the higher bits and
store a zero in the protocol fields.
This could lead to e.g. NULL function pointer because as a result of
the cut off inet_num is zero and we call down to inet_autobind, which
is NULL for raw sockets.
kernel: Call Trace:
kernel: [<ffffffff816db90e>] ? inet_autobind+0x2e/0x70
kernel: [<ffffffff816db9a4>] inet_dgram_connect+0x54/0x80
kernel: [<ffffffff81645069>] SYSC_connect+0xd9/0x110
kernel: [<ffffffff810ac51b>] ? ptrace_notify+0x5b/0x80
kernel: [<ffffffff810236d8>] ? syscall_trace_enter_phase2+0x108/0x200
kernel: [<ffffffff81645e0e>] SyS_connect+0xe/0x10
kernel: [<ffffffff81779515>] tracesys_phase2+0x84/0x89
I found no particular commit which introduced this problem.
CVE: CVE-2015-8543
Cc: Cong Wang <cwang@twopensource.com>
Reported-by: 郭永刚 <guoyonggang@360.cn>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static struct hlist_head *dn_find_list(struct sock *sk)
{
struct dn_scp *scp = DN_SK(sk);
if (scp->addr.sdn_flags & SDF_WILD)
return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK];
}
|
static struct hlist_head *dn_find_list(struct sock *sk)
{
struct dn_scp *scp = DN_SK(sk);
if (scp->addr.sdn_flags & SDF_WILD)
return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK];
}
|
C
|
linux
| 0 |
CVE-2014-2669
|
https://www.cvedetails.com/cve/CVE-2014-2669/
|
CWE-189
|
https://github.com/postgres/postgres/commit/31400a673325147e1205326008e32135a78b4d8a
|
31400a673325147e1205326008e32135a78b4d8a
|
Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064
|
circle_copy(CIRCLE *circle)
{
CIRCLE *result;
if (!PointerIsValid(circle))
return NULL;
result = (CIRCLE *) palloc(sizeof(CIRCLE));
memcpy((char *) result, (char *) circle, sizeof(CIRCLE));
return result;
}
|
circle_copy(CIRCLE *circle)
{
CIRCLE *result;
if (!PointerIsValid(circle))
return NULL;
result = (CIRCLE *) palloc(sizeof(CIRCLE));
memcpy((char *) result, (char *) circle, sizeof(CIRCLE));
return result;
}
|
C
|
postgres
| 0 |
CVE-2013-2206
|
https://www.cvedetails.com/cve/CVE-2013-2206/
| null |
https://github.com/torvalds/linux/commit/f2815633504b442ca0b0605c16bf3d88a3a0fcea
|
f2815633504b442ca0b0605c16bf3d88a3a0fcea
|
sctp: Use correct sideffect command in duplicate cookie handling
When SCTP is done processing a duplicate cookie chunk, it tries
to delete a newly created association. For that, it has to set
the right association for the side-effect processing to work.
However, when it uses the SCTP_CMD_NEW_ASOC command, that performs
more work then really needed (like hashing the associationa and
assigning it an id) and there is no point to do that only to
delete the association as a next step. In fact, it also creates
an impossible condition where an association may be found by
the getsockopt() call, and that association is empty. This
causes a crash in some sctp getsockopts.
The solution is rather simple. We simply use SCTP_CMD_SET_ASOC
command that doesn't have all the overhead and does exactly
what we need.
Reported-by: Karl Heiss <kheiss@gmail.com>
Tested-by: Karl Heiss <kheiss@gmail.com>
CC: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: Vlad Yasevich <vyasevich@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
struct sctp_chunk *reply;
/* Send a heartbeat to our peer. */
reply = sctp_make_heartbeat(asoc, transport);
if (!reply)
return SCTP_DISPOSITION_NOMEM;
/* Set rto_pending indicating that an RTT measurement
* is started with this heartbeat chunk.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING,
SCTP_TRANSPORT(transport));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
}
|
static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
struct sctp_chunk *reply;
/* Send a heartbeat to our peer. */
reply = sctp_make_heartbeat(asoc, transport);
if (!reply)
return SCTP_DISPOSITION_NOMEM;
/* Set rto_pending indicating that an RTT measurement
* is started with this heartbeat chunk.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING,
SCTP_TRANSPORT(transport));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
}
|
C
|
linux
| 0 |
CVE-2016-1640
|
https://www.cvedetails.com/cve/CVE-2016-1640/
|
CWE-17
|
https://github.com/chromium/chromium/commit/0a1c15fecb1240ab909e1431b6127410c3b380e0
|
0a1c15fecb1240ab909e1431b6127410c3b380e0
|
Make the webstore inline install dialog be tab-modal
Also clean up a few minor lint errors while I'm in here.
BUG=550047
Review URL: https://codereview.chromium.org/1496033003
Cr-Commit-Position: refs/heads/master@{#363925}
|
void ExtensionInstallPrompt::OnInstallFailure(
const extensions::CrxInstallError& error) {
install_ui_->OnInstallFailure(error);
}
|
void ExtensionInstallPrompt::OnInstallFailure(
const extensions::CrxInstallError& error) {
install_ui_->OnInstallFailure(error);
}
|
C
|
Chrome
| 0 |
CVE-2017-5011
|
https://www.cvedetails.com/cve/CVE-2017-5011/
|
CWE-200
|
https://github.com/chromium/chromium/commit/eea3300239f0b53e172a320eb8de59d0bea65f27
|
eea3300239f0b53e172a320eb8de59d0bea65f27
|
DevTools: move front-end URL handling to DevToolsUIBindingds
BUG=662859
Review-Url: https://codereview.chromium.org/2607833002
Cr-Commit-Position: refs/heads/master@{#440926}
|
DevToolsToolboxDelegate::~DevToolsToolboxDelegate() {
}
|
DevToolsToolboxDelegate::~DevToolsToolboxDelegate() {
}
|
C
|
Chrome
| 0 |
CVE-2015-8839
|
https://www.cvedetails.com/cve/CVE-2015-8839/
|
CWE-362
|
https://github.com/torvalds/linux/commit/ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
|
ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
|
ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <jack@suse.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
|
static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
int nodefs)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt;
const struct mount_opts *m;
char sep = nodefs ? '\n' : ',';
#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
if (sbi->s_sb_block != 1)
SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
for (m = ext4_mount_opts; m->token != Opt_err; m++) {
int want_set = m->flags & MOPT_SET;
if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
(m->flags & MOPT_CLEAR_ERR))
continue;
if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
continue; /* skip if same as the default */
if ((want_set &&
(sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
(!want_set && (sbi->s_mount_opt & m->mount_opt)))
continue; /* select Opt_noFoo vs Opt_Foo */
SEQ_OPTS_PRINT("%s", token2str(m->token));
}
if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
SEQ_OPTS_PRINT("resuid=%u",
from_kuid_munged(&init_user_ns, sbi->s_resuid));
if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
SEQ_OPTS_PRINT("resgid=%u",
from_kgid_munged(&init_user_ns, sbi->s_resgid));
def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
SEQ_OPTS_PUTS("errors=remount-ro");
if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
SEQ_OPTS_PUTS("errors=continue");
if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
SEQ_OPTS_PUTS("errors=panic");
if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
if (sb->s_flags & MS_I_VERSION)
SEQ_OPTS_PUTS("i_version");
if (nodefs || sbi->s_stripe)
SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) {
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
SEQ_OPTS_PUTS("data=journal");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
SEQ_OPTS_PUTS("data=ordered");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
SEQ_OPTS_PUTS("data=writeback");
}
if (nodefs ||
sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
SEQ_OPTS_PRINT("inode_readahead_blks=%u",
sbi->s_inode_readahead_blks);
if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
(sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
if (nodefs || sbi->s_max_dir_size_kb)
SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
ext4_show_quota_options(seq, sb);
return 0;
}
|
static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
int nodefs)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt;
const struct mount_opts *m;
char sep = nodefs ? '\n' : ',';
#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
if (sbi->s_sb_block != 1)
SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
for (m = ext4_mount_opts; m->token != Opt_err; m++) {
int want_set = m->flags & MOPT_SET;
if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
(m->flags & MOPT_CLEAR_ERR))
continue;
if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
continue; /* skip if same as the default */
if ((want_set &&
(sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
(!want_set && (sbi->s_mount_opt & m->mount_opt)))
continue; /* select Opt_noFoo vs Opt_Foo */
SEQ_OPTS_PRINT("%s", token2str(m->token));
}
if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
SEQ_OPTS_PRINT("resuid=%u",
from_kuid_munged(&init_user_ns, sbi->s_resuid));
if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
SEQ_OPTS_PRINT("resgid=%u",
from_kgid_munged(&init_user_ns, sbi->s_resgid));
def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
SEQ_OPTS_PUTS("errors=remount-ro");
if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
SEQ_OPTS_PUTS("errors=continue");
if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
SEQ_OPTS_PUTS("errors=panic");
if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
if (sb->s_flags & MS_I_VERSION)
SEQ_OPTS_PUTS("i_version");
if (nodefs || sbi->s_stripe)
SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) {
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
SEQ_OPTS_PUTS("data=journal");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
SEQ_OPTS_PUTS("data=ordered");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
SEQ_OPTS_PUTS("data=writeback");
}
if (nodefs ||
sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
SEQ_OPTS_PRINT("inode_readahead_blks=%u",
sbi->s_inode_readahead_blks);
if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
(sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
if (nodefs || sbi->s_max_dir_size_kb)
SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
ext4_show_quota_options(seq, sb);
return 0;
}
|
C
|
linux
| 0 |
CVE-2016-2179
|
https://www.cvedetails.com/cve/CVE-2016-2179/
|
CWE-399
|
https://git.openssl.org/?p=openssl.git;a=commit;h=f5c7f5dfbaf0d2f7d946d0fe86f08e6bcb36ed0d
|
f5c7f5dfbaf0d2f7d946d0fe86f08e6bcb36ed0d
| null |
int tls_construct_server_done(SSL *s)
{
if (!ssl_set_handshake_header(s, SSL3_MT_SERVER_DONE, 0)) {
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_DONE, ERR_R_INTERNAL_ERROR);
ossl_statem_set_error(s);
return 0;
}
if (!s->s3->tmp.cert_request) {
if (!ssl3_digest_cached_records(s, 0)) {
ossl_statem_set_error(s);
}
}
return 1;
}
|
int tls_construct_server_done(SSL *s)
{
if (!ssl_set_handshake_header(s, SSL3_MT_SERVER_DONE, 0)) {
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_DONE, ERR_R_INTERNAL_ERROR);
ossl_statem_set_error(s);
return 0;
}
if (!s->s3->tmp.cert_request) {
if (!ssl3_digest_cached_records(s, 0)) {
ossl_statem_set_error(s);
}
}
return 1;
}
|
C
|
openssl
| 0 |
CVE-2018-6140
|
https://www.cvedetails.com/cve/CVE-2018-6140/
|
CWE-20
|
https://github.com/chromium/chromium/commit/2aec794f26098c7a361c27d7c8f57119631cca8a
|
2aec794f26098c7a361c27d7c8f57119631cca8a
|
[DevTools] Do not allow chrome.debugger to attach to web ui pages
If the page navigates to web ui, we force detach the debugger extension.
TBR=alexclarke@chromium.org
Bug: 798222
Change-Id: Idb46c2f59e839388397a8dfa6ce2e2a897698df3
Reviewed-on: https://chromium-review.googlesource.com/935961
Commit-Queue: Dmitry Gozman <dgozman@chromium.org>
Reviewed-by: Devlin <rdevlin.cronin@chromium.org>
Reviewed-by: Pavel Feldman <pfeldman@chromium.org>
Reviewed-by: Nasko Oskov <nasko@chromium.org>
Cr-Commit-Position: refs/heads/master@{#540916}
|
GURL BrowserDevToolsAgentHost::GetURL() {
return GURL();
}
|
GURL BrowserDevToolsAgentHost::GetURL() {
return GURL();
}
|
C
|
Chrome
| 0 |
CVE-2019-1563
|
https://www.cvedetails.com/cve/CVE-2019-1563/
|
CWE-311
|
https://git.openssl.org/gitweb/?p=openssl.git;a=commitdiff;h=08229ad838c50f644d7e928e2eef147b4308ad64
|
08229ad838c50f644d7e928e2eef147b4308ad64
| null |
int CMS_verify_receipt(CMS_ContentInfo *rcms, CMS_ContentInfo *ocms,
STACK_OF(X509) *certs,
X509_STORE *store, unsigned int flags)
{
int r;
flags &= ~(CMS_DETACHED | CMS_TEXT);
r = CMS_verify(rcms, certs, store, NULL, NULL, flags);
if (r <= 0)
return r;
return cms_Receipt_verify(rcms, ocms);
}
|
int CMS_verify_receipt(CMS_ContentInfo *rcms, CMS_ContentInfo *ocms,
STACK_OF(X509) *certs,
X509_STORE *store, unsigned int flags)
{
int r;
flags &= ~(CMS_DETACHED | CMS_TEXT);
r = CMS_verify(rcms, certs, store, NULL, NULL, flags);
if (r <= 0)
return r;
return cms_Receipt_verify(rcms, ocms);
}
|
C
|
openssl
| 0 |
CVE-2011-4611
|
https://www.cvedetails.com/cve/CVE-2011-4611/
|
CWE-189
|
https://github.com/torvalds/linux/commit/0837e3242c73566fc1c0196b4ec61779c25ffc93
|
0837e3242c73566fc1c0196b4ec61779c25ffc93
|
perf, powerpc: Handle events that raise an exception without overflowing
Events on POWER7 can roll back if a speculative event doesn't
eventually complete. Unfortunately in some rare cases they will
raise a performance monitor exception. We need to catch this to
ensure we reset the PMC. In all cases the PMC will be 256 or less
cycles from overflow.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: <stable@kernel.org> # as far back as it applies cleanly
LKML-Reference: <20110309143842.6c22845e@kryten>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
event->hw.idx = cpuhw->limited_hwidx[i];
val = (event->hw.idx == 5) ? pmc5 : pmc6;
local64_set(&event->hw.prev_count, val);
perf_event_update_userpage(event);
}
}
|
static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
event->hw.idx = cpuhw->limited_hwidx[i];
val = (event->hw.idx == 5) ? pmc5 : pmc6;
local64_set(&event->hw.prev_count, val);
perf_event_update_userpage(event);
}
}
|
C
|
linux
| 0 |
CVE-2013-2858
|
https://www.cvedetails.com/cve/CVE-2013-2858/
|
CWE-416
|
https://github.com/chromium/chromium/commit/828eab2216a765dea92575c290421c115b8ad028
|
828eab2216a765dea92575c290421c115b8ad028
|
Added daily UMA for non-data-reduction-proxy data usage when the proxy is enabled.
BUG=325325
Review URL: https://codereview.chromium.org/106113002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@239897 0039d316-1c4b-4281-b951-d872f2087c98
|
virtual ~LoggingNetworkChangeObserver() {
net::NetworkChangeNotifier::RemoveIPAddressObserver(this);
net::NetworkChangeNotifier::RemoveConnectionTypeObserver(this);
net::NetworkChangeNotifier::RemoveNetworkChangeObserver(this);
}
|
virtual ~LoggingNetworkChangeObserver() {
net::NetworkChangeNotifier::RemoveIPAddressObserver(this);
net::NetworkChangeNotifier::RemoveConnectionTypeObserver(this);
net::NetworkChangeNotifier::RemoveNetworkChangeObserver(this);
}
|
C
|
Chrome
| 0 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
static const cac_object_t *cac_find_obj_by_id(unsigned short object_id)
{
int i;
for (i = 0; i < cac_object_count; i++) {
if (cac_objects[i].fd == object_id) {
return &cac_objects[i];
}
}
return NULL;
}
|
static const cac_object_t *cac_find_obj_by_id(unsigned short object_id)
{
int i;
for (i = 0; i < cac_object_count; i++) {
if (cac_objects[i].fd == object_id) {
return &cac_objects[i];
}
}
return NULL;
}
|
C
|
OpenSC
| 0 |
CVE-2016-3899
|
https://www.cvedetails.com/cve/CVE-2016-3899/
|
CWE-284
|
https://android.googlesource.com/platform/frameworks/av/+/97837bb6cbac21ea679843a0037779d3834bed64
|
97837bb6cbac21ea679843a0037779d3834bed64
|
OMXCodec: check IMemory::pointer() before using allocation
Bug: 29421811
Change-Id: I0a73ba12bae4122f1d89fc92e5ea4f6a96cd1ed1
|
status_t OMXCodec::findTargetColorFormat(
const sp<MetaData>& meta, OMX_COLOR_FORMATTYPE *colorFormat) {
ALOGV("findTargetColorFormat");
CHECK(mIsEncoder);
*colorFormat = OMX_COLOR_FormatYUV420SemiPlanar;
int32_t targetColorFormat;
if (meta->findInt32(kKeyColorFormat, &targetColorFormat)) {
*colorFormat = (OMX_COLOR_FORMATTYPE) targetColorFormat;
}
return isColorFormatSupported(*colorFormat, kPortIndexInput);
}
|
status_t OMXCodec::findTargetColorFormat(
const sp<MetaData>& meta, OMX_COLOR_FORMATTYPE *colorFormat) {
ALOGV("findTargetColorFormat");
CHECK(mIsEncoder);
*colorFormat = OMX_COLOR_FormatYUV420SemiPlanar;
int32_t targetColorFormat;
if (meta->findInt32(kKeyColorFormat, &targetColorFormat)) {
*colorFormat = (OMX_COLOR_FORMATTYPE) targetColorFormat;
}
return isColorFormatSupported(*colorFormat, kPortIndexInput);
}
|
C
|
Android
| 0 |
CVE-2016-10192
|
https://www.cvedetails.com/cve/CVE-2016-10192/
|
CWE-119
|
https://github.com/FFmpeg/FFmpeg/commit/a5d25faa3f4b18dac737fdb35d0dd68eb0dc2156
|
a5d25faa3f4b18dac737fdb35d0dd68eb0dc2156
|
ffserver: Check chunk size
Fixes out of array access
Fixes: poc_ffserver.py
Found-by: Paul Cher <paulcher@icloud.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
|
static inline void print_stream_params(AVIOContext *pb, FFServerStream *stream)
{
int i, stream_no;
const char *type = "unknown";
char parameters[64];
LayeredAVStream *st;
AVCodec *codec;
stream_no = stream->nb_streams;
avio_printf(pb, "<table><tr><th>Stream<th>"
"type<th>kbit/s<th>codec<th>"
"Parameters\n");
for (i = 0; i < stream_no; i++) {
st = stream->streams[i];
codec = avcodec_find_encoder(st->codecpar->codec_id);
parameters[0] = 0;
switch(st->codecpar->codec_type) {
case AVMEDIA_TYPE_AUDIO:
type = "audio";
snprintf(parameters, sizeof(parameters), "%d channel(s), %d Hz",
st->codecpar->channels, st->codecpar->sample_rate);
break;
case AVMEDIA_TYPE_VIDEO:
type = "video";
snprintf(parameters, sizeof(parameters),
"%dx%d, q=%d-%d, fps=%d", st->codecpar->width,
st->codecpar->height, st->codec->qmin, st->codec->qmax,
st->time_base.den / st->time_base.num);
break;
default:
abort();
}
avio_printf(pb, "<tr><td>%d<td>%s<td>%"PRId64
"<td>%s<td>%s\n",
i, type, (int64_t)st->codecpar->bit_rate/1000,
codec ? codec->name : "", parameters);
}
avio_printf(pb, "</table>\n");
}
|
static inline void print_stream_params(AVIOContext *pb, FFServerStream *stream)
{
int i, stream_no;
const char *type = "unknown";
char parameters[64];
LayeredAVStream *st;
AVCodec *codec;
stream_no = stream->nb_streams;
avio_printf(pb, "<table><tr><th>Stream<th>"
"type<th>kbit/s<th>codec<th>"
"Parameters\n");
for (i = 0; i < stream_no; i++) {
st = stream->streams[i];
codec = avcodec_find_encoder(st->codecpar->codec_id);
parameters[0] = 0;
switch(st->codecpar->codec_type) {
case AVMEDIA_TYPE_AUDIO:
type = "audio";
snprintf(parameters, sizeof(parameters), "%d channel(s), %d Hz",
st->codecpar->channels, st->codecpar->sample_rate);
break;
case AVMEDIA_TYPE_VIDEO:
type = "video";
snprintf(parameters, sizeof(parameters),
"%dx%d, q=%d-%d, fps=%d", st->codecpar->width,
st->codecpar->height, st->codec->qmin, st->codec->qmax,
st->time_base.den / st->time_base.num);
break;
default:
abort();
}
avio_printf(pb, "<tr><td>%d<td>%s<td>%"PRId64
"<td>%s<td>%s\n",
i, type, (int64_t)st->codecpar->bit_rate/1000,
codec ? codec->name : "", parameters);
}
avio_printf(pb, "</table>\n");
}
|
C
|
FFmpeg
| 0 |
CVE-2015-5302
|
https://www.cvedetails.com/cve/CVE-2015-5302/
|
CWE-200
|
https://github.com/abrt/libreport/commit/257578a23d1537a2d235aaa2b1488ee4f818e360
|
257578a23d1537a2d235aaa2b1488ee4f818e360
|
wizard: fix save users changes after reviewing dump dir files
If the user reviewed the dump dir's files during reporting the crash, the
changes was thrown away and original data was passed to the bugzilla bug
report.
report-gtk saves the first text view buffer and then reloads data from the
reported problem directory, which causes that the changes made to those text
views are thrown away.
Function save_text_if_changed(), except of saving text, also reload the files
from dump dir and update gui state from the dump dir. The commit moves the
reloading and updating gui functions away from this function.
Related to rhbz#1270235
Signed-off-by: Matej Habrnal <mhabrnal@redhat.com>
|
static void set_cursor_if_appropriate(GtkTextView *text_view,
gint x,
gint y)
{
GSList *tags = NULL, *tagp = NULL;
GtkTextIter iter;
gboolean hovering = FALSE;
gtk_text_view_get_iter_at_location(text_view, &iter, x, y);
tags = gtk_text_iter_get_tags(&iter);
for (tagp = tags; tagp != NULL; tagp = tagp->next)
{
GtkTextTag *tag = tagp->data;
gpointer url = g_object_get_data(G_OBJECT (tag), "url");
if (url != 0)
{
hovering = TRUE;
break;
}
}
if (hovering != hovering_over_link)
{
hovering_over_link = hovering;
if (hovering_over_link)
gdk_window_set_cursor(gtk_text_view_get_window(text_view, GTK_TEXT_WINDOW_TEXT), hand_cursor);
else
gdk_window_set_cursor(gtk_text_view_get_window(text_view, GTK_TEXT_WINDOW_TEXT), regular_cursor);
}
if (tags)
g_slist_free (tags);
}
|
static void set_cursor_if_appropriate(GtkTextView *text_view,
gint x,
gint y)
{
GSList *tags = NULL, *tagp = NULL;
GtkTextIter iter;
gboolean hovering = FALSE;
gtk_text_view_get_iter_at_location(text_view, &iter, x, y);
tags = gtk_text_iter_get_tags(&iter);
for (tagp = tags; tagp != NULL; tagp = tagp->next)
{
GtkTextTag *tag = tagp->data;
gpointer url = g_object_get_data(G_OBJECT (tag), "url");
if (url != 0)
{
hovering = TRUE;
break;
}
}
if (hovering != hovering_over_link)
{
hovering_over_link = hovering;
if (hovering_over_link)
gdk_window_set_cursor(gtk_text_view_get_window(text_view, GTK_TEXT_WINDOW_TEXT), hand_cursor);
else
gdk_window_set_cursor(gtk_text_view_get_window(text_view, GTK_TEXT_WINDOW_TEXT), regular_cursor);
}
if (tags)
g_slist_free (tags);
}
|
C
|
libreport
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
R=jochen@chromium.org
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void classMethodWithOptionalMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMMethod");
TestObjectV8Internal::classMethodWithOptionalMethod(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
static void classMethodWithOptionalMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMMethod");
TestObjectV8Internal::classMethodWithOptionalMethod(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
C
|
Chrome
| 0 |
CVE-2018-20067
|
https://www.cvedetails.com/cve/CVE-2018-20067/
|
CWE-254
|
https://github.com/chromium/chromium/commit/a7d715ae5b654d1f98669fd979a00282a7229044
|
a7d715ae5b654d1f98669fd979a00282a7229044
|
Prevent renderer initiated back navigation to cancel a browser one.
Renderer initiated back/forward navigations must not be able to cancel ongoing
browser initiated navigation if they are not user initiated.
Note: 'normal' renderer initiated navigation uses the
FrameHost::BeginNavigation() path. A code similar to this patch is done
in NavigatorImpl::OnBeginNavigation().
Test:
-----
Added: NavigationBrowserTest.
* HistoryBackInBeforeUnload
* HistoryBackInBeforeUnloadAfterSetTimeout
* HistoryBackCancelPendingNavigationNoUserGesture
* HistoryBackCancelPendingNavigationUserGesture
Fixed:
* (WPT) .../the-history-interface/traverse_the_history_2.html
* (WPT) .../the-history-interface/traverse_the_history_3.html
* (WPT) .../the-history-interface/traverse_the_history_4.html
* (WPT) .../the-history-interface/traverse_the_history_5.html
Bug: 879965
Change-Id: I1a9bfaaea1ffc219e6c32f6e676b660e746c578c
Reviewed-on: https://chromium-review.googlesource.com/1209744
Commit-Queue: Arthur Sonzogni <arthursonzogni@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Reviewed-by: Mustaq Ahmed <mustaq@chromium.org>
Reviewed-by: Camille Lamy <clamy@chromium.org>
Reviewed-by: Charlie Reis <creis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#592823}
|
void LocalFrameClientImpl::ReportLegacySymantecCert(const KURL& url,
bool did_fail) {
if (web_frame_->Client())
web_frame_->Client()->ReportLegacySymantecCert(url, did_fail);
}
|
void LocalFrameClientImpl::ReportLegacySymantecCert(const KURL& url,
bool did_fail) {
if (web_frame_->Client())
web_frame_->Client()->ReportLegacySymantecCert(url, did_fail);
}
|
C
|
Chrome
| 0 |
CVE-2017-9501
|
https://www.cvedetails.com/cve/CVE-2017-9501/
|
CWE-617
|
https://github.com/ImageMagick/ImageMagick/commit/01843366d6a7b96e22ad7bb67f3df7d9fd4d5d74
|
01843366d6a7b96e22ad7bb67f3df7d9fd4d5d74
|
Fixed incorrect call to DestroyImage reported in #491.
|
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,&image->exception));
}
|
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,&image->exception));
}
|
C
|
ImageMagick
| 0 |
CVE-2018-0500
|
https://www.cvedetails.com/cve/CVE-2018-0500/
|
CWE-119
|
https://github.com/curl/curl/commit/ba1dbd78e5f1ed67c1b8d37ac89d90e5e330b628
|
ba1dbd78e5f1ed67c1b8d37ac89d90e5e330b628
|
smtp: use the upload buffer size for scratch buffer malloc
... not the read buffer size, as that can be set smaller and thus cause
a buffer overflow! CVE-2018-0500
Reported-by: Peter Wu
Bug: https://curl.haxx.se/docs/adv_2018-70a2.html
|
static CURLcode smtp_state_command_resp(struct connectdata *conn, int smtpcode,
smtpstate instate)
{
CURLcode result = CURLE_OK;
struct Curl_easy *data = conn->data;
struct SMTP *smtp = data->req.protop;
char *line = data->state.buffer;
size_t len = strlen(line);
(void)instate; /* no use for this yet */
if((smtp->rcpt && smtpcode/100 != 2 && smtpcode != 553 && smtpcode != 1) ||
(!smtp->rcpt && smtpcode/100 != 2 && smtpcode != 1)) {
failf(data, "Command failed: %d", smtpcode);
result = CURLE_RECV_ERROR;
}
else {
/* Temporarily add the LF character back and send as body to the client */
if(!data->set.opt_no_body) {
line[len] = '\n';
result = Curl_client_write(conn, CLIENTWRITE_BODY, line, len + 1);
line[len] = '\0';
}
if(smtpcode != 1) {
if(smtp->rcpt) {
smtp->rcpt = smtp->rcpt->next;
if(smtp->rcpt) {
/* Send the next command */
result = smtp_perform_command(conn);
}
else
/* End of DO phase */
state(conn, SMTP_STOP);
}
else
/* End of DO phase */
state(conn, SMTP_STOP);
}
}
return result;
}
|
static CURLcode smtp_state_command_resp(struct connectdata *conn, int smtpcode,
smtpstate instate)
{
CURLcode result = CURLE_OK;
struct Curl_easy *data = conn->data;
struct SMTP *smtp = data->req.protop;
char *line = data->state.buffer;
size_t len = strlen(line);
(void)instate; /* no use for this yet */
if((smtp->rcpt && smtpcode/100 != 2 && smtpcode != 553 && smtpcode != 1) ||
(!smtp->rcpt && smtpcode/100 != 2 && smtpcode != 1)) {
failf(data, "Command failed: %d", smtpcode);
result = CURLE_RECV_ERROR;
}
else {
/* Temporarily add the LF character back and send as body to the client */
if(!data->set.opt_no_body) {
line[len] = '\n';
result = Curl_client_write(conn, CLIENTWRITE_BODY, line, len + 1);
line[len] = '\0';
}
if(smtpcode != 1) {
if(smtp->rcpt) {
smtp->rcpt = smtp->rcpt->next;
if(smtp->rcpt) {
/* Send the next command */
result = smtp_perform_command(conn);
}
else
/* End of DO phase */
state(conn, SMTP_STOP);
}
else
/* End of DO phase */
state(conn, SMTP_STOP);
}
}
return result;
}
|
C
|
curl
| 0 |
CVE-2011-2918
|
https://www.cvedetails.com/cve/CVE-2011-2918/
|
CWE-399
|
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = NULL;
return 0;
}
|
int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = NULL;
return 0;
}
|
C
|
linux
| 0 |
CVE-2017-18218
|
https://www.cvedetails.com/cve/CVE-2017-18218/
|
CWE-416
|
https://github.com/torvalds/linux/commit/27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2
|
27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2
|
net: hns: Fix a skb used after free bug
skb maybe freed in hns_nic_net_xmit_hw() and return NETDEV_TX_OK,
which cause hns_nic_net_xmit to use a freed skb.
BUG: KASAN: use-after-free in hns_nic_net_xmit_hw+0x62c/0x940...
[17659.112635] alloc_debug_processing+0x18c/0x1a0
[17659.117208] __slab_alloc+0x52c/0x560
[17659.120909] kmem_cache_alloc_node+0xac/0x2c0
[17659.125309] __alloc_skb+0x6c/0x260
[17659.128837] tcp_send_ack+0x8c/0x280
[17659.132449] __tcp_ack_snd_check+0x9c/0xf0
[17659.136587] tcp_rcv_established+0x5a4/0xa70
[17659.140899] tcp_v4_do_rcv+0x27c/0x620
[17659.144687] tcp_prequeue_process+0x108/0x170
[17659.149085] tcp_recvmsg+0x940/0x1020
[17659.152787] inet_recvmsg+0x124/0x180
[17659.156488] sock_recvmsg+0x64/0x80
[17659.160012] SyS_recvfrom+0xd8/0x180
[17659.163626] __sys_trace_return+0x0/0x4
[17659.167506] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=23 cpu=1 pid=13
[17659.174000] free_debug_processing+0x1d4/0x2c0
[17659.178486] __slab_free+0x240/0x390
[17659.182100] kmem_cache_free+0x24c/0x270
[17659.186062] kfree_skbmem+0xa0/0xb0
[17659.189587] __kfree_skb+0x28/0x40
[17659.193025] napi_gro_receive+0x168/0x1c0
[17659.197074] hns_nic_rx_up_pro+0x58/0x90
[17659.201038] hns_nic_rx_poll_one+0x518/0xbc0
[17659.205352] hns_nic_common_poll+0x94/0x140
[17659.209576] net_rx_action+0x458/0x5e0
[17659.213363] __do_softirq+0x1b8/0x480
[17659.217062] run_ksoftirqd+0x64/0x80
[17659.220679] smpboot_thread_fn+0x224/0x310
[17659.224821] kthread+0x150/0x170
[17659.228084] ret_from_fork+0x10/0x40
BUG: KASAN: use-after-free in hns_nic_net_xmit+0x8c/0xc0...
[17751.080490] __slab_alloc+0x52c/0x560
[17751.084188] kmem_cache_alloc+0x244/0x280
[17751.088238] __build_skb+0x40/0x150
[17751.091764] build_skb+0x28/0x100
[17751.095115] __alloc_rx_skb+0x94/0x150
[17751.098900] __napi_alloc_skb+0x34/0x90
[17751.102776] hns_nic_rx_poll_one+0x180/0xbc0
[17751.107097] hns_nic_common_poll+0x94/0x140
[17751.111333] net_rx_action+0x458/0x5e0
[17751.115123] __do_softirq+0x1b8/0x480
[17751.118823] run_ksoftirqd+0x64/0x80
[17751.122437] smpboot_thread_fn+0x224/0x310
[17751.126575] kthread+0x150/0x170
[17751.129838] ret_from_fork+0x10/0x40
[17751.133454] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=19 cpu=7 pid=43
[17751.139951] free_debug_processing+0x1d4/0x2c0
[17751.144436] __slab_free+0x240/0x390
[17751.148051] kmem_cache_free+0x24c/0x270
[17751.152014] kfree_skbmem+0xa0/0xb0
[17751.155543] __kfree_skb+0x28/0x40
[17751.159022] napi_gro_receive+0x168/0x1c0
[17751.163074] hns_nic_rx_up_pro+0x58/0x90
[17751.167041] hns_nic_rx_poll_one+0x518/0xbc0
[17751.171358] hns_nic_common_poll+0x94/0x140
[17751.175585] net_rx_action+0x458/0x5e0
[17751.179373] __do_softirq+0x1b8/0x480
[17751.183076] run_ksoftirqd+0x64/0x80
[17751.186691] smpboot_thread_fn+0x224/0x310
[17751.190826] kthread+0x150/0x170
[17751.194093] ret_from_fork+0x10/0x40
Fixes: 13ac695e7ea1 ("net:hns: Add support of Hip06 SoC to the Hislicon Network Subsystem")
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: lipeng <lipeng321@huawei.com>
Reported-by: Jun He <hjat2005@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static int hns_nic_ring_open(struct net_device *netdev, int idx)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
napi_enable(&priv->ring_data[idx].napi);
enable_irq(priv->ring_data[idx].ring->irq);
h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
return 0;
}
|
static int hns_nic_ring_open(struct net_device *netdev, int idx)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
napi_enable(&priv->ring_data[idx].napi);
enable_irq(priv->ring_data[idx].ring->irq);
h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
return 0;
}
|
C
|
linux
| 0 |
CVE-2011-4127
|
https://www.cvedetails.com/cve/CVE-2011-4127/
|
CWE-264
|
https://github.com/torvalds/linux/commit/0bfc96cb77224736dfa35c3c555d37b3646ef35e
|
0bfc96cb77224736dfa35c3c555d37b3646ef35e
|
block: fail SCSI passthrough ioctls on partition devices
Linux allows executing the SG_IO ioctl on a partition or LVM volume, and
will pass the command to the underlying block device. This is
well-known, but it is also a large security problem when (via Unix
permissions, ACLs, SELinux or a combination thereof) a program or user
needs to be granted access only to part of the disk.
This patch lets partitions forward a small set of harmless ioctls;
others are logged with printk so that we can see which ioctls are
actually sent. In my tests only CDROM_GET_CAPABILITY actually occurred.
Of course it was being sent to a (partition on a) hard disk, so it would
have failed with ENOTTY and the patch isn't changing anything in
practice. Still, I'm treating it specially to avoid spamming the logs.
In principle, this restriction should include programs running with
CAP_SYS_RAWIO. If for example I let a program access /dev/sda2 and
/dev/sdb, it still should not be able to read/write outside the
boundaries of /dev/sda2 independent of the capabilities. However, for
now programs with CAP_SYS_RAWIO will still be allowed to send the
ioctls. Their actions will still be logged.
This patch does not affect the non-libata IDE driver. That driver
however already tests for bd != bd->bd_contains before issuing some
ioctl; it could be restricted further to forbid these ioctls even for
programs running with CAP_SYS_ADMIN/CAP_SYS_RAWIO.
Cc: linux-scsi@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>
Cc: James Bottomley <JBottomley@parallels.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
[ Make it also print the command name when warning - Linus ]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
{
unsigned int prot_op = SCSI_PROT_NORMAL;
unsigned int dix = scsi_prot_sg_count(scmd);
if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
if (dif && dix)
prot_op = SCSI_PROT_READ_PASS;
else if (dif && !dix)
prot_op = SCSI_PROT_READ_STRIP;
else if (!dif && dix)
prot_op = SCSI_PROT_READ_INSERT;
} else {
if (dif && dix)
prot_op = SCSI_PROT_WRITE_PASS;
else if (dif && !dix)
prot_op = SCSI_PROT_WRITE_INSERT;
else if (!dif && dix)
prot_op = SCSI_PROT_WRITE_STRIP;
}
scsi_set_prot_op(scmd, prot_op);
scsi_set_prot_type(scmd, dif);
}
|
static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
{
unsigned int prot_op = SCSI_PROT_NORMAL;
unsigned int dix = scsi_prot_sg_count(scmd);
if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
if (dif && dix)
prot_op = SCSI_PROT_READ_PASS;
else if (dif && !dix)
prot_op = SCSI_PROT_READ_STRIP;
else if (!dif && dix)
prot_op = SCSI_PROT_READ_INSERT;
} else {
if (dif && dix)
prot_op = SCSI_PROT_WRITE_PASS;
else if (dif && !dix)
prot_op = SCSI_PROT_WRITE_INSERT;
else if (!dif && dix)
prot_op = SCSI_PROT_WRITE_STRIP;
}
scsi_set_prot_op(scmd, prot_op);
scsi_set_prot_type(scmd, dif);
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/27c68f543e5eba779902447445dfb05ec3f5bf75
|
27c68f543e5eba779902447445dfb05ec3f5bf75
|
Revert of Add accelerated VP9 decode infrastructure and an implementation for VA-API. (patchset #7 id:260001 of https://codereview.chromium.org/1318863003/ )
Reason for revert:
I think this patch broke compile step for Chromium Linux ChromeOS MSan Builder.
First failing build:
http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20Linux%20ChromeOS%20MSan%20Builder/builds/8310
All recent builds:
http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20Linux%20ChromeOS%20MSan%20Builder?numbuilds=200
Sorry for the revert. I'll re-revert if I'm wrong.
Cheers,
Tommy
Original issue's description:
> Add accelerated VP9 decode infrastructure and an implementation for VA-API.
>
> - Add a hardware/platform-independent VP9Decoder class and related
> infrastructure, implementing AcceleratedVideoDecoder interface. VP9Decoder
> performs the initial stages of the decode process, which are to be done
> on host/in software, such as stream parsing and reference frame management.
>
> - Add a VP9Accelerator interface, used by the VP9Decoder to offload the
> remaining stages of the decode process to hardware. VP9Accelerator
> implementations are platform-specific.
>
> - Add the first implementation of VP9Accelerator - VaapiVP9Accelerator - and
> integrate it with VaapiVideoDecodeAccelerator, for devices which provide
> hardware VP9 acceleration through VA-API. Hook it up to the new
> infrastructure and VP9Decoder.
>
> - Extend Vp9Parser to provide functionality required by VP9Decoder and
> VP9Accelerator, including superframe parsing, handling of loop filter
> and segmentation initialization, state persistence across frames and
> resetting when needed. Also add code calculating segmentation dequants
> and loop filter levels.
>
> - Update vp9_parser_unittest to the new Vp9Parser interface and flow.
>
> TEST=vp9_parser_unittest,vda_unittest,Chrome VP9 playback
> BUG=chrome-os-partner:41469,chrome-os-partner:41470,chromium:525331
> TBR=dpranke@chromium.org
>
> Committed: https://crrev.com/e3cc0a661b8abfdc74f569940949bc1f336ece40
> Cr-Commit-Position: refs/heads/master@{#349312}
TBR=wuchengli@chromium.org,kcwu@chromium.org,sandersd@chromium.org,jorgelo@chromium.org,posciak@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chrome-os-partner:41469,chrome-os-partner:41470,chromium:525331
Review URL: https://codereview.chromium.org/1357513002
Cr-Commit-Position: refs/heads/master@{#349443}
|
bool VP9Decoder::DecodeAndOutputPicture(scoped_refptr<VP9Picture> pic) {
DCHECK(!pic_size_.IsEmpty());
DCHECK(pic->frame_hdr);
if (!accelerator_->SubmitDecode(pic, parser_.GetSegmentation(),
parser_.GetLoopFilter(), ref_frames_))
return false;
if (pic->frame_hdr->show_frame) {
if (!accelerator_->OutputPicture(pic))
return false;
}
RefreshReferenceFrames(pic);
return true;
}
|
bool VP9Decoder::DecodeAndOutputPicture(scoped_refptr<VP9Picture> pic) {
DCHECK(!pic_size_.IsEmpty());
DCHECK(pic->frame_hdr);
if (!accelerator_->SubmitDecode(pic, parser_.GetSegmentation(),
parser_.GetLoopFilter(), ref_frames_))
return false;
if (pic->frame_hdr->show_frame) {
if (!accelerator_->OutputPicture(pic))
return false;
}
RefreshReferenceFrames(pic);
return true;
}
|
C
|
Chrome
| 0 |
CVE-2017-5075
|
https://www.cvedetails.com/cve/CVE-2017-5075/
|
CWE-200
|
https://github.com/chromium/chromium/commit/fea16c8b60ff3d0756d5eb392394963b647bc41a
|
fea16c8b60ff3d0756d5eb392394963b647bc41a
|
CSP: Strip the fragment from reported URLs.
We should have been stripping the fragment from the URL we report for
CSP violations, but we weren't. Now we are, by running the URLs through
`stripURLForUseInReport()`, which implements the stripping algorithm
from CSP2: https://www.w3.org/TR/CSP2/#strip-uri-for-reporting
Eventually, we will migrate more completely to the CSP3 world that
doesn't require such detailed stripping, as it exposes less data to the
reports, but we're not there yet.
BUG=678776
Review-Url: https://codereview.chromium.org/2619783002
Cr-Commit-Position: refs/heads/master@{#458045}
|
bool ContentSecurityPolicy::allowRequest(
WebURLRequest::RequestContext context,
const KURL& url,
const String& nonce,
const IntegrityMetadataSet& integrityMetadata,
ParserDisposition parserDisposition,
RedirectStatus redirectStatus,
SecurityViolationReportingPolicy reportingPolicy) const {
if (integrityMetadata.isEmpty() &&
!allowRequestWithoutIntegrity(context, url, redirectStatus,
reportingPolicy))
return false;
switch (context) {
case WebURLRequest::RequestContextAudio:
case WebURLRequest::RequestContextTrack:
case WebURLRequest::RequestContextVideo:
return allowMediaFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextBeacon:
case WebURLRequest::RequestContextEventSource:
case WebURLRequest::RequestContextFetch:
case WebURLRequest::RequestContextXMLHttpRequest:
case WebURLRequest::RequestContextSubresource:
return allowConnectToSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextEmbed:
case WebURLRequest::RequestContextObject:
return allowObjectFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextFavicon:
case WebURLRequest::RequestContextImage:
case WebURLRequest::RequestContextImageSet:
return allowImageFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextFont:
return allowFontFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextForm:
return allowFormAction(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextFrame:
case WebURLRequest::RequestContextIframe:
return allowFrameFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextImport:
case WebURLRequest::RequestContextScript:
return allowScriptFromSource(url, nonce, parserDisposition,
redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextXSLT:
return allowScriptFromSource(url, nonce, parserDisposition,
redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextManifest:
return allowManifestFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextServiceWorker:
case WebURLRequest::RequestContextSharedWorker:
case WebURLRequest::RequestContextWorker:
return allowWorkerContextFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextStyle:
return allowStyleFromSource(url, nonce, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextCSPReport:
case WebURLRequest::RequestContextDownload:
case WebURLRequest::RequestContextHyperlink:
case WebURLRequest::RequestContextInternal:
case WebURLRequest::RequestContextLocation:
case WebURLRequest::RequestContextPing:
case WebURLRequest::RequestContextPlugin:
case WebURLRequest::RequestContextPrefetch:
case WebURLRequest::RequestContextUnspecified:
return true;
}
ASSERT_NOT_REACHED();
return true;
}
|
bool ContentSecurityPolicy::allowRequest(
WebURLRequest::RequestContext context,
const KURL& url,
const String& nonce,
const IntegrityMetadataSet& integrityMetadata,
ParserDisposition parserDisposition,
RedirectStatus redirectStatus,
SecurityViolationReportingPolicy reportingPolicy) const {
if (integrityMetadata.isEmpty() &&
!allowRequestWithoutIntegrity(context, url, redirectStatus,
reportingPolicy))
return false;
switch (context) {
case WebURLRequest::RequestContextAudio:
case WebURLRequest::RequestContextTrack:
case WebURLRequest::RequestContextVideo:
return allowMediaFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextBeacon:
case WebURLRequest::RequestContextEventSource:
case WebURLRequest::RequestContextFetch:
case WebURLRequest::RequestContextXMLHttpRequest:
case WebURLRequest::RequestContextSubresource:
return allowConnectToSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextEmbed:
case WebURLRequest::RequestContextObject:
return allowObjectFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextFavicon:
case WebURLRequest::RequestContextImage:
case WebURLRequest::RequestContextImageSet:
return allowImageFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextFont:
return allowFontFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextForm:
return allowFormAction(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextFrame:
case WebURLRequest::RequestContextIframe:
return allowFrameFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextImport:
case WebURLRequest::RequestContextScript:
return allowScriptFromSource(url, nonce, parserDisposition,
redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextXSLT:
return allowScriptFromSource(url, nonce, parserDisposition,
redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextManifest:
return allowManifestFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextServiceWorker:
case WebURLRequest::RequestContextSharedWorker:
case WebURLRequest::RequestContextWorker:
return allowWorkerContextFromSource(url, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextStyle:
return allowStyleFromSource(url, nonce, redirectStatus, reportingPolicy);
case WebURLRequest::RequestContextCSPReport:
case WebURLRequest::RequestContextDownload:
case WebURLRequest::RequestContextHyperlink:
case WebURLRequest::RequestContextInternal:
case WebURLRequest::RequestContextLocation:
case WebURLRequest::RequestContextPing:
case WebURLRequest::RequestContextPlugin:
case WebURLRequest::RequestContextPrefetch:
case WebURLRequest::RequestContextUnspecified:
return true;
}
ASSERT_NOT_REACHED();
return true;
}
|
C
|
Chrome
| 0 |
CVE-2017-5546
|
https://www.cvedetails.com/cve/CVE-2017-5546/
| null |
https://github.com/torvalds/linux/commit/c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
|
c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
|
mm/slab.c: fix SLAB freelist randomization duplicate entries
This patch fixes a bug in the freelist randomization code. When a high
random number is used, the freelist will contain duplicate entries. It
will result in different allocations sharing the same chunk.
It will result in odd behaviours and crashes. It should be uncommon but
it depends on the machines. We saw it happening more often on some
machines (every few hours of running tests).
Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization")
Link: http://lkml.kernel.org/r/20170103181908.143178-1-thgarnie@google.com
Signed-off-by: John Sperbeck <jsperbeck@google.com>
Signed-off-by: Thomas Garnier <thgarnie@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
static void free_block(struct kmem_cache *cachep, void **objpp,
int nr_objects, int node, struct list_head *list)
{
int i;
struct kmem_cache_node *n = get_node(cachep, node);
struct page *page;
n->free_objects += nr_objects;
for (i = 0; i < nr_objects; i++) {
void *objp;
struct page *page;
objp = objpp[i];
page = virt_to_head_page(objp);
list_del(&page->lru);
check_spinlock_acquired_node(cachep, node);
slab_put_obj(cachep, page, objp);
STATS_DEC_ACTIVE(cachep);
/* fixup slab chains */
if (page->active == 0) {
list_add(&page->lru, &n->slabs_free);
n->free_slabs++;
} else {
/* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the
* other objects to be freed, too.
*/
list_add_tail(&page->lru, &n->slabs_partial);
}
}
while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
n->free_objects -= cachep->num;
page = list_last_entry(&n->slabs_free, struct page, lru);
list_move(&page->lru, list);
n->free_slabs--;
n->total_slabs--;
}
}
|
static void free_block(struct kmem_cache *cachep, void **objpp,
int nr_objects, int node, struct list_head *list)
{
int i;
struct kmem_cache_node *n = get_node(cachep, node);
struct page *page;
n->free_objects += nr_objects;
for (i = 0; i < nr_objects; i++) {
void *objp;
struct page *page;
objp = objpp[i];
page = virt_to_head_page(objp);
list_del(&page->lru);
check_spinlock_acquired_node(cachep, node);
slab_put_obj(cachep, page, objp);
STATS_DEC_ACTIVE(cachep);
/* fixup slab chains */
if (page->active == 0) {
list_add(&page->lru, &n->slabs_free);
n->free_slabs++;
} else {
/* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the
* other objects to be freed, too.
*/
list_add_tail(&page->lru, &n->slabs_partial);
}
}
while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
n->free_objects -= cachep->num;
page = list_last_entry(&n->slabs_free, struct page, lru);
list_move(&page->lru, list);
n->free_slabs--;
n->total_slabs--;
}
}
|
C
|
linux
| 0 |
CVE-2011-3055
|
https://www.cvedetails.com/cve/CVE-2011-3055/
| null |
https://github.com/chromium/chromium/commit/e9372a1bfd3588a80fcf49aa07321f0971dd6091
|
e9372a1bfd3588a80fcf49aa07321f0971dd6091
|
[V8] Pass Isolate to throwNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=86983
Reviewed by Adam Barth.
The objective is to pass Isolate around in V8 bindings.
This patch passes Isolate to throwNotEnoughArgumentsError().
No tests. No change in behavior.
* bindings/scripts/CodeGeneratorV8.pm:
(GenerateArgumentsCountCheck):
(GenerateEventConstructorCallback):
* bindings/scripts/test/V8/V8Float64Array.cpp:
(WebCore::Float64ArrayV8Internal::fooCallback):
* bindings/scripts/test/V8/V8TestActiveDOMObject.cpp:
(WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback):
(WebCore::TestActiveDOMObjectV8Internal::postMessageCallback):
* bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp:
(WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback):
* bindings/scripts/test/V8/V8TestEventConstructor.cpp:
(WebCore::V8TestEventConstructor::constructorCallback):
* bindings/scripts/test/V8/V8TestEventTarget.cpp:
(WebCore::TestEventTargetV8Internal::itemCallback):
(WebCore::TestEventTargetV8Internal::dispatchEventCallback):
* bindings/scripts/test/V8/V8TestInterface.cpp:
(WebCore::TestInterfaceV8Internal::supplementalMethod2Callback):
(WebCore::V8TestInterface::constructorCallback):
* bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp:
(WebCore::TestMediaQueryListListenerV8Internal::methodCallback):
* bindings/scripts/test/V8/V8TestNamedConstructor.cpp:
(WebCore::V8TestNamedConstructorConstructorCallback):
* bindings/scripts/test/V8/V8TestObj.cpp:
(WebCore::TestObjV8Internal::voidMethodWithArgsCallback):
(WebCore::TestObjV8Internal::intMethodWithArgsCallback):
(WebCore::TestObjV8Internal::objMethodWithArgsCallback):
(WebCore::TestObjV8Internal::methodWithSequenceArgCallback):
(WebCore::TestObjV8Internal::methodReturningSequenceCallback):
(WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback):
(WebCore::TestObjV8Internal::serializedValueCallback):
(WebCore::TestObjV8Internal::idbKeyCallback):
(WebCore::TestObjV8Internal::optionsObjectCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback):
(WebCore::TestObjV8Internal::methodWithCallbackArgCallback):
(WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback):
(WebCore::TestObjV8Internal::overloadedMethod1Callback):
(WebCore::TestObjV8Internal::overloadedMethod2Callback):
(WebCore::TestObjV8Internal::overloadedMethod3Callback):
(WebCore::TestObjV8Internal::overloadedMethod4Callback):
(WebCore::TestObjV8Internal::overloadedMethod5Callback):
(WebCore::TestObjV8Internal::overloadedMethod6Callback):
(WebCore::TestObjV8Internal::overloadedMethod7Callback):
(WebCore::TestObjV8Internal::overloadedMethod11Callback):
(WebCore::TestObjV8Internal::overloadedMethod12Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback):
(WebCore::TestObjV8Internal::convert1Callback):
(WebCore::TestObjV8Internal::convert2Callback):
(WebCore::TestObjV8Internal::convert3Callback):
(WebCore::TestObjV8Internal::convert4Callback):
(WebCore::TestObjV8Internal::convert5Callback):
(WebCore::TestObjV8Internal::strictFunctionCallback):
(WebCore::V8TestObj::constructorCallback):
* bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp:
(WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback):
(WebCore::V8TestSerializedScriptValueInterface::constructorCallback):
* bindings/v8/ScriptController.cpp:
(WebCore::setValueAndClosePopupCallback):
* bindings/v8/V8Proxy.cpp:
(WebCore::V8Proxy::throwNotEnoughArgumentsError):
* bindings/v8/V8Proxy.h:
(V8Proxy):
* bindings/v8/custom/V8AudioContextCustom.cpp:
(WebCore::V8AudioContext::constructorCallback):
* bindings/v8/custom/V8DataViewCustom.cpp:
(WebCore::V8DataView::getInt8Callback):
(WebCore::V8DataView::getUint8Callback):
(WebCore::V8DataView::setInt8Callback):
(WebCore::V8DataView::setUint8Callback):
* bindings/v8/custom/V8DirectoryEntryCustom.cpp:
(WebCore::V8DirectoryEntry::getDirectoryCallback):
(WebCore::V8DirectoryEntry::getFileCallback):
* bindings/v8/custom/V8IntentConstructor.cpp:
(WebCore::V8Intent::constructorCallback):
* bindings/v8/custom/V8SVGLengthCustom.cpp:
(WebCore::V8SVGLength::convertToSpecifiedUnitsCallback):
* bindings/v8/custom/V8WebGLRenderingContextCustom.cpp:
(WebCore::getObjectParameter):
(WebCore::V8WebGLRenderingContext::getAttachedShadersCallback):
(WebCore::V8WebGLRenderingContext::getExtensionCallback):
(WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback):
(WebCore::V8WebGLRenderingContext::getParameterCallback):
(WebCore::V8WebGLRenderingContext::getProgramParameterCallback):
(WebCore::V8WebGLRenderingContext::getShaderParameterCallback):
(WebCore::V8WebGLRenderingContext::getUniformCallback):
(WebCore::vertexAttribAndUniformHelperf):
(WebCore::uniformHelperi):
(WebCore::uniformMatrixHelper):
* bindings/v8/custom/V8WebKitMutationObserverCustom.cpp:
(WebCore::V8WebKitMutationObserver::constructorCallback):
(WebCore::V8WebKitMutationObserver::observeCallback):
* bindings/v8/custom/V8WebSocketCustom.cpp:
(WebCore::V8WebSocket::constructorCallback):
(WebCore::V8WebSocket::sendCallback):
* bindings/v8/custom/V8XMLHttpRequestCustom.cpp:
(WebCore::V8XMLHttpRequest::openCallback):
git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
v8::Persistent<v8::FunctionTemplate> V8Float64Array::GetTemplate()
{
V8BindingPerIsolateData* data = V8BindingPerIsolateData::current();
V8BindingPerIsolateData::TemplateMap::iterator result = data->templateMap().find(&info);
if (result != data->templateMap().end())
return result->second;
v8::HandleScope handleScope;
v8::Persistent<v8::FunctionTemplate> templ =
ConfigureV8Float64ArrayTemplate(GetRawTemplate());
data->templateMap().add(&info, templ);
return templ;
}
|
v8::Persistent<v8::FunctionTemplate> V8Float64Array::GetTemplate()
{
V8BindingPerIsolateData* data = V8BindingPerIsolateData::current();
V8BindingPerIsolateData::TemplateMap::iterator result = data->templateMap().find(&info);
if (result != data->templateMap().end())
return result->second;
v8::HandleScope handleScope;
v8::Persistent<v8::FunctionTemplate> templ =
ConfigureV8Float64ArrayTemplate(GetRawTemplate());
data->templateMap().add(&info, templ);
return templ;
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/a0af50481db56aa780942e8595a20c36b2c34f5c
|
a0af50481db56aa780942e8595a20c36b2c34f5c
|
Build fix following bug #30696.
Patch by Gavin Barraclough <barraclough@apple.com> on 2009-10-22
Reviewed by NOBODY (build fix).
* WebCoreSupport/FrameLoaderClientGtk.cpp:
(WebKit::FrameLoaderClient::windowObjectCleared):
* webkit/webkitwebframe.cpp:
(webkit_web_frame_get_global_context):
git-svn-id: svn://svn.chromium.org/blink/trunk@49964 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void webkit_web_frame_load_request(WebKitWebFrame* frame, WebKitNetworkRequest* request)
{
g_return_if_fail(WEBKIT_IS_WEB_FRAME(frame));
g_return_if_fail(WEBKIT_IS_NETWORK_REQUEST(request));
Frame* coreFrame = core(frame);
if (!coreFrame)
return;
coreFrame->loader()->load(core(request), false);
}
|
void webkit_web_frame_load_request(WebKitWebFrame* frame, WebKitNetworkRequest* request)
{
g_return_if_fail(WEBKIT_IS_WEB_FRAME(frame));
g_return_if_fail(WEBKIT_IS_NETWORK_REQUEST(request));
Frame* coreFrame = core(frame);
if (!coreFrame)
return;
coreFrame->loader()->load(core(request), false);
}
|
C
|
Chrome
| 0 |
CVE-2016-7115
|
https://www.cvedetails.com/cve/CVE-2016-7115/
|
CWE-119
|
https://github.com/haakonnessjoen/MAC-Telnet/commit/b69d11727d4f0f8cf719c79e3fb700f55ca03e9a
|
b69d11727d4f0f8cf719c79e3fb700f55ca03e9a
|
Merge pull request #20 from eyalitki/master
2nd round security fixes from eyalitki
|
static int handle_packet(unsigned char *data, int data_len) {
struct mt_mactelnet_hdr pkthdr;
/* Minimal size checks (pings are not supported here) */
if (data_len < MT_HEADER_LEN){
return -1;
}
parse_packet(data, &pkthdr);
/* We only care about packets with correct sessionkey */
if (pkthdr.seskey != sessionkey) {
return -1;
}
/* Handle data packets */
if (pkthdr.ptype == MT_PTYPE_DATA) {
struct mt_packet odata;
struct mt_mactelnet_control_hdr cpkt;
int success = 0;
/* Always transmit ACKNOWLEDGE packets in response to DATA packets */
init_packet(&odata, MT_PTYPE_ACK, srcmac, dstmac, sessionkey, pkthdr.counter + (data_len - MT_HEADER_LEN));
send_udp(&odata, 0);
/* Accept first packet, and all packets greater than incounter, and if counter has
wrapped around. */
if (pkthdr.counter > incounter || (incounter - pkthdr.counter) > 65535) {
incounter = pkthdr.counter;
} else {
/* Ignore double or old packets */
return -1;
}
/* Parse controlpacket data */
success = parse_control_packet(data + MT_HEADER_LEN, data_len - MT_HEADER_LEN, &cpkt);
while (success) {
/* If we receive pass_salt, transmit auth data back */
if (cpkt.cptype == MT_CPTYPE_PASSSALT) {
/* check validity, server sends exactly 16 bytes */
if (cpkt.length != 16) {
fprintf(stderr, _("Invalid salt length: %d (instead of 16) received from server %s\n"), cpkt.length, ether_ntoa((struct ether_addr *)dstmac));
}
memcpy(pass_salt, cpkt.data, 16);
send_auth(username, password);
}
/* If the (remaining) data did not have a control-packet magic byte sequence,
the data is raw terminal data to be outputted to the terminal. */
else if (cpkt.cptype == MT_CPTYPE_PLAINDATA) {
fwrite((const void *)cpkt.data, 1, cpkt.length, stdout);
}
/* END_AUTH means that the user/password negotiation is done, and after this point
terminal data may arrive, so we set up the terminal to raw mode. */
else if (cpkt.cptype == MT_CPTYPE_END_AUTH) {
/* we have entered "terminal mode" */
terminal_mode = 1;
if (is_a_tty) {
/* stop input buffering at all levels. Give full control of terminal to RouterOS */
raw_term();
setvbuf(stdin, (char*)NULL, _IONBF, 0);
/* Add resize signal handler */
signal(SIGWINCH, sig_winch);
}
}
/* Parse next controlpacket */
success = parse_control_packet(NULL, 0, &cpkt);
}
}
else if (pkthdr.ptype == MT_PTYPE_ACK) {
/* Handled elsewhere */
}
/* The server wants to terminate the connection, we have to oblige */
else if (pkthdr.ptype == MT_PTYPE_END) {
struct mt_packet odata;
/* Acknowledge the disconnection by sending a END packet in return */
init_packet(&odata, MT_PTYPE_END, srcmac, dstmac, pkthdr.seskey, 0);
send_udp(&odata, 0);
if (!quiet_mode) {
fprintf(stderr, _("Connection closed.\n"));
}
/* exit */
running = 0;
} else {
fprintf(stderr, _("Unhandeled packet type: %d received from server %s\n"), pkthdr.ptype, ether_ntoa((struct ether_addr *)dstmac));
return -1;
}
return pkthdr.ptype;
}
|
static int handle_packet(unsigned char *data, int data_len) {
struct mt_mactelnet_hdr pkthdr;
/* Minimal size checks (pings are not supported here) */
if (data_len < MT_HEADER_LEN){
return -1;
}
parse_packet(data, &pkthdr);
/* We only care about packets with correct sessionkey */
if (pkthdr.seskey != sessionkey) {
return -1;
}
/* Handle data packets */
if (pkthdr.ptype == MT_PTYPE_DATA) {
struct mt_packet odata;
struct mt_mactelnet_control_hdr cpkt;
int success = 0;
/* Always transmit ACKNOWLEDGE packets in response to DATA packets */
init_packet(&odata, MT_PTYPE_ACK, srcmac, dstmac, sessionkey, pkthdr.counter + (data_len - MT_HEADER_LEN));
send_udp(&odata, 0);
/* Accept first packet, and all packets greater than incounter, and if counter has
wrapped around. */
if (pkthdr.counter > incounter || (incounter - pkthdr.counter) > 65535) {
incounter = pkthdr.counter;
} else {
/* Ignore double or old packets */
return -1;
}
/* Parse controlpacket data */
success = parse_control_packet(data + MT_HEADER_LEN, data_len - MT_HEADER_LEN, &cpkt);
while (success) {
/* If we receive pass_salt, transmit auth data back */
if (cpkt.cptype == MT_CPTYPE_PASSSALT) {
memcpy(pass_salt, cpkt.data, cpkt.length);
send_auth(username, password);
}
/* If the (remaining) data did not have a control-packet magic byte sequence,
the data is raw terminal data to be outputted to the terminal. */
else if (cpkt.cptype == MT_CPTYPE_PLAINDATA) {
fwrite((const void *)cpkt.data, 1, cpkt.length, stdout);
}
/* END_AUTH means that the user/password negotiation is done, and after this point
terminal data may arrive, so we set up the terminal to raw mode. */
else if (cpkt.cptype == MT_CPTYPE_END_AUTH) {
/* we have entered "terminal mode" */
terminal_mode = 1;
if (is_a_tty) {
/* stop input buffering at all levels. Give full control of terminal to RouterOS */
raw_term();
setvbuf(stdin, (char*)NULL, _IONBF, 0);
/* Add resize signal handler */
signal(SIGWINCH, sig_winch);
}
}
/* Parse next controlpacket */
success = parse_control_packet(NULL, 0, &cpkt);
}
}
else if (pkthdr.ptype == MT_PTYPE_ACK) {
/* Handled elsewhere */
}
/* The server wants to terminate the connection, we have to oblige */
else if (pkthdr.ptype == MT_PTYPE_END) {
struct mt_packet odata;
/* Acknowledge the disconnection by sending a END packet in return */
init_packet(&odata, MT_PTYPE_END, srcmac, dstmac, pkthdr.seskey, 0);
send_udp(&odata, 0);
if (!quiet_mode) {
fprintf(stderr, _("Connection closed.\n"));
}
/* exit */
running = 0;
} else {
fprintf(stderr, _("Unhandeled packet type: %d received from server %s\n"), pkthdr.ptype, ether_ntoa((struct ether_addr *)dstmac));
return -1;
}
return pkthdr.ptype;
}
|
C
|
MAC-Telnet
| 1 |
CVE-2016-4558
|
https://www.cvedetails.com/cve/CVE-2016-4558/
| null |
https://github.com/torvalds/linux/commit/92117d8443bc5afacc8d5ba82e541946310f106e
|
92117d8443bc5afacc8d5ba82e541946310f106e
|
bpf: fix refcnt overflow
On a system with >32Gbyte of phyiscal memory and infinite RLIMIT_MEMLOCK,
the malicious application may overflow 32-bit bpf program refcnt.
It's also possible to overflow map refcnt on 1Tb system.
Impose 32k hard limit which means that the same bpf program or
map cannot be shared by more than 32k processes.
Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs")
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_map *map = filp->private_data;
seq_printf(m,
"map_type:\t%u\n"
"key_size:\t%u\n"
"value_size:\t%u\n"
"max_entries:\t%u\n"
"map_flags:\t%#x\n",
map->map_type,
map->key_size,
map->value_size,
map->max_entries,
map->map_flags);
}
|
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_map *map = filp->private_data;
seq_printf(m,
"map_type:\t%u\n"
"key_size:\t%u\n"
"value_size:\t%u\n"
"max_entries:\t%u\n"
"map_flags:\t%#x\n",
map->map_type,
map->key_size,
map->value_size,
map->max_entries,
map->map_flags);
}
|
C
|
linux
| 0 |
CVE-2011-4930
|
https://www.cvedetails.com/cve/CVE-2011-4930/
|
CWE-134
|
https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=5e5571d1a431eb3c61977b6dd6ec90186ef79867
|
5e5571d1a431eb3c61977b6dd6ec90186ef79867
| null |
void BaseShadow::removeJobPre( const char* reason )
{
if( ! jobAd ) {
dprintf( D_ALWAYS, "In removeJob() w/ NULL JobAd!" );
}
dprintf( D_ALWAYS, "Job %d.%d is being removed: %s\n",
getCluster(), getProc(), reason );
cleanUp();
int size = strlen( reason ) + strlen( ATTR_REMOVE_REASON ) + 4;
char* buf = (char*)malloc( size * sizeof(char) );
if( ! buf ) {
EXCEPT( "Out of memory!" );
}
sprintf( buf, "%s=\"%s\"", ATTR_REMOVE_REASON, reason );
jobAd->Insert( buf );
free( buf );
emailRemoveEvent( reason );
if( !updateJobInQueue(U_REMOVE) ) {
dprintf( D_ALWAYS, "Failed to update job queue!\n" );
}
}
|
void BaseShadow::removeJobPre( const char* reason )
{
if( ! jobAd ) {
dprintf( D_ALWAYS, "In removeJob() w/ NULL JobAd!" );
}
dprintf( D_ALWAYS, "Job %d.%d is being removed: %s\n",
getCluster(), getProc(), reason );
cleanUp();
int size = strlen( reason ) + strlen( ATTR_REMOVE_REASON ) + 4;
char* buf = (char*)malloc( size * sizeof(char) );
if( ! buf ) {
EXCEPT( "Out of memory!" );
}
sprintf( buf, "%s=\"%s\"", ATTR_REMOVE_REASON, reason );
jobAd->Insert( buf );
free( buf );
emailRemoveEvent( reason );
if( !updateJobInQueue(U_REMOVE) ) {
dprintf( D_ALWAYS, "Failed to update job queue!\n" );
}
}
|
CPP
|
htcondor
| 0 |
CVE-2018-12714
|
https://www.cvedetails.com/cve/CVE-2018-12714/
|
CWE-787
|
https://github.com/torvalds/linux/commit/81f9c4e4177d31ced6f52a89bb70e93bfb77ca03
|
81f9c4e4177d31ced6f52a89bb70e93bfb77ca03
|
Merge tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt:
"This contains a few fixes and a clean up.
- a bad merge caused an "endif" to go in the wrong place in
scripts/Makefile.build
- softirq tracing fix for tracing that corrupts lockdep and causes a
false splat
- histogram documentation typo fixes
- fix a bad memory reference when passing in no filter to the filter
code
- simplify code by using the swap macro instead of open coding the
swap"
* tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Fix SKIP_STACK_VALIDATION=1 build due to bad merge with -mrecord-mcount
tracing: Fix some errors in histogram documentation
tracing: Use swap macro in update_max_tr
softirq: Reorder trace_softirqs_on to prevent lockdep splat
tracing: Check for no filter when processing event filters
|
void trace_find_cmdline(int pid, char comm[])
{
preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
__trace_find_cmdline(pid, comm);
arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
}
|
void trace_find_cmdline(int pid, char comm[])
{
preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
__trace_find_cmdline(pid, comm);
arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
}
|
C
|
linux
| 0 |
CVE-2014-9644
|
https://www.cvedetails.com/cve/CVE-2014-9644/
|
CWE-264
|
https://github.com/torvalds/linux/commit/4943ba16bbc2db05115707b3ff7b4874e9e3c560
|
4943ba16bbc2db05115707b3ff7b4874e9e3c560
|
crypto: include crypto- module prefix in template
This adds the module loading prefix "crypto-" to the template lookup
as well.
For example, attempting to load 'vfat(blowfish)' via AF_ALG now correctly
includes the "crypto-" prefix at every level, correctly rejecting "vfat":
net-pf-38
algif-hash
crypto-vfat(blowfish)
crypto-vfat(blowfish)-all
crypto-vfat
Reported-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
|
void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
{
u32 *a = (u32 *)dst;
u32 *b = (u32 *)src;
for (; size >= 4; size -= 4)
*a++ ^= *b++;
crypto_xor_byte((u8 *)a, (u8 *)b, size);
}
|
void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
{
u32 *a = (u32 *)dst;
u32 *b = (u32 *)src;
for (; size >= 4; size -= 4)
*a++ ^= *b++;
crypto_xor_byte((u8 *)a, (u8 *)b, size);
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/8a50f99c25fb70ff43aaa82b6f9569db383f0ca8
|
8a50f99c25fb70ff43aaa82b6f9569db383f0ca8
|
[Sync] Rework unit tests for ChromeInvalidationClient
In particular, add unit tests that would have caught bug 139424.
Dep-inject InvalidationClient into ChromeInvalidationClient.
Use the function name 'UpdateRegisteredIds' consistently.
Replace some mocks with fakes.
BUG=139424
Review URL: https://chromiumcodereview.appspot.com/10827133
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@150665 0039d316-1c4b-4281-b951-d872f2087c98
|
virtual double GetJitter() {
return jitter_;
}
|
virtual double GetJitter() {
return jitter_;
}
|
C
|
Chrome
| 0 |
CVE-2014-3611
|
https://www.cvedetails.com/cve/CVE-2014-3611/
|
CWE-362
|
https://github.com/torvalds/linux/commit/2febc839133280d5a5e8e1179c94ea674489dae2
|
2febc839133280d5a5e8e1179c94ea674489dae2
|
KVM: x86: Improve thread safety in pit
There's a race condition in the PIT emulation code in KVM. In
__kvm_migrate_pit_timer the pit_timer object is accessed without
synchronization. If the race condition occurs at the wrong time this
can crash the host kernel.
This fixes CVE-2014-3611.
Cc: stable@vger.kernel.org
Signed-off-by: Andrew Honig <ahonig@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
static int speaker_ioport_write(struct kvm_io_device *this,
gpa_t addr, int len, const void *data)
{
struct kvm_pit *pit = speaker_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
struct kvm *kvm = pit->kvm;
u32 val = *(u32 *) data;
if (addr != KVM_SPEAKER_BASE_ADDRESS)
return -EOPNOTSUPP;
mutex_lock(&pit_state->lock);
pit_state->speaker_data_on = (val >> 1) & 1;
pit_set_gate(kvm, 2, val & 1);
mutex_unlock(&pit_state->lock);
return 0;
}
|
static int speaker_ioport_write(struct kvm_io_device *this,
gpa_t addr, int len, const void *data)
{
struct kvm_pit *pit = speaker_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
struct kvm *kvm = pit->kvm;
u32 val = *(u32 *) data;
if (addr != KVM_SPEAKER_BASE_ADDRESS)
return -EOPNOTSUPP;
mutex_lock(&pit_state->lock);
pit_state->speaker_data_on = (val >> 1) & 1;
pit_set_gate(kvm, 2, val & 1);
mutex_unlock(&pit_state->lock);
return 0;
}
|
C
|
linux
| 0 |
CVE-2016-3822
|
https://www.cvedetails.com/cve/CVE-2016-3822/
|
CWE-119
|
https://android.googlesource.com/platform/external/jhead/+/bae671597d47b9e5955c4cb742e468cebfd7ca6b
|
bae671597d47b9e5955c4cb742e468cebfd7ca6b
|
Fix possible out of bounds access
Bug: 28868315
Change-Id: I2b416c662f9ad7f9b3c6cf973a39c6693c66775a
|
static void float2urat(double value, unsigned int max, unsigned int *numerator,
unsigned int *denominator) {
if (value <= 0) {
*numerator = 0;
*denominator = 1;
return;
}
if (value > max) {
*numerator = max;
*denominator = 1;
return;
}
if (value < 1e-9) {
unsigned int n = (unsigned int)(value * max);
if (n == 0) {
*numerator = 0;
*denominator = 1;
} else {
*numerator = n;
*denominator = max;
}
return;
}
unsigned int d;
for (d = 1000000000; d >= 1; d /= 10) {
double s = value * d;
if (s <= max) {
unsigned int n = (unsigned int)s;
while (n % 10 == 0 && d >= 10) {
n /= 10;
d /= 10;
}
*numerator = n;
*denominator = d;
return;
}
}
*numerator = 0;
*denominator = 1;
}
|
static void float2urat(double value, unsigned int max, unsigned int *numerator,
unsigned int *denominator) {
if (value <= 0) {
*numerator = 0;
*denominator = 1;
return;
}
if (value > max) {
*numerator = max;
*denominator = 1;
return;
}
if (value < 1e-9) {
unsigned int n = (unsigned int)(value * max);
if (n == 0) {
*numerator = 0;
*denominator = 1;
} else {
*numerator = n;
*denominator = max;
}
return;
}
unsigned int d;
for (d = 1000000000; d >= 1; d /= 10) {
double s = value * d;
if (s <= max) {
unsigned int n = (unsigned int)s;
while (n % 10 == 0 && d >= 10) {
n /= 10;
d /= 10;
}
*numerator = n;
*denominator = d;
return;
}
}
*numerator = 0;
*denominator = 1;
}
|
C
|
Android
| 0 |
CVE-2018-17205
|
https://www.cvedetails.com/cve/CVE-2018-17205/
|
CWE-617
|
https://github.com/openvswitch/ovs/commit/0befd1f3745055c32940f5faf9559be6a14395e6
|
0befd1f3745055c32940f5faf9559be6a14395e6
|
ofproto: Fix OVS crash when reverting old flows in bundle commit
During bundle commit flows which are added in bundle are applied
to ofproto in-order. In case if a flow cannot be added (e.g. flow
action is go-to group id which does not exist), OVS tries to
revert back all previous flows which were successfully applied
from the same bundle. This is possible since OVS maintains list
of old flows which were replaced by flows from the bundle.
While reinserting old flows ovs asserts due to check on rule
state != RULE_INITIALIZED. This will work only for new flows, but
for old flow the rule state will be RULE_REMOVED. This is causing
an assert and OVS crash.
The ovs assert check should be modified to != RULE_INSERTED to prevent
any existing rule being re-inserted and allow new rules and old rules
(in case of revert) to get inserted.
Here is an example to trigger the assert:
$ ovs-vsctl add-br br-test -- set Bridge br-test datapath_type=netdev
$ cat flows.txt
flow add table=1,priority=0,in_port=2,actions=NORMAL
flow add table=1,priority=0,in_port=3,actions=NORMAL
$ ovs-ofctl dump-flows -OOpenflow13 br-test
cookie=0x0, duration=2.465s, table=1, n_packets=0, n_bytes=0, priority=0,in_port=2 actions=NORMAL
cookie=0x0, duration=2.465s, table=1, n_packets=0, n_bytes=0, priority=0,in_port=3 actions=NORMAL
$ cat flow-modify.txt
flow modify table=1,priority=0,in_port=2,actions=drop
flow modify table=1,priority=0,in_port=3,actions=group:10
$ ovs-ofctl bundle br-test flow-modify.txt -OOpenflow13
First flow rule will be modified since it is a valid rule. However second
rule is invalid since no group with id 10 exists. Bundle commit tries to
revert (insert) the first rule to old flow which results in ovs_assert at
ofproto_rule_insert__() since old rule->state = RULE_REMOVED.
Signed-off-by: Vishal Deep Ajmera <vishal.deep.ajmera@ericsson.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
|
ofproto_group_lookup__(const struct ofproto *ofproto, uint32_t group_id,
ovs_version_t version)
{
struct ofgroup *group;
CMAP_FOR_EACH_WITH_HASH (group, cmap_node, hash_int(group_id, 0),
&ofproto->groups) {
if (group->group_id == group_id
&& versions_visible_in_version(&group->versions, version)) {
return group;
}
}
return NULL;
}
|
ofproto_group_lookup__(const struct ofproto *ofproto, uint32_t group_id,
ovs_version_t version)
{
struct ofgroup *group;
CMAP_FOR_EACH_WITH_HASH (group, cmap_node, hash_int(group_id, 0),
&ofproto->groups) {
if (group->group_id == group_id
&& versions_visible_in_version(&group->versions, version)) {
return group;
}
}
return NULL;
}
|
C
|
ovs
| 0 |
CVE-2008-7316
|
https://www.cvedetails.com/cve/CVE-2008-7316/
|
CWE-20
|
https://github.com/torvalds/linux/commit/124d3b7041f9a0ca7c43a6293e1cae4576c32fd5
|
124d3b7041f9a0ca7c43a6293e1cae4576c32fd5
|
fix writev regression: pan hanging unkillable and un-straceable
Frederik Himpe reported an unkillable and un-straceable pan process.
Zero length iovecs can go into an infinite loop in writev, because the
iovec iterator does not always advance over them.
The sequence required to trigger this is not trivial. I think it
requires that a zero-length iovec be followed by a non-zero-length iovec
which causes a pagefault in the atomic usercopy. This causes the writev
code to drop back into single-segment copy mode, which then tries to
copy the 0 bytes of the zero-length iovec; a zero length copy looks like
a failure though, so it loops.
Put a test into iov_iter_advance to catch zero-length iovecs. We could
just put the test in the fallback path, but I feel it is more robust to
skip over zero-length iovecs throughout the code (iovec iterator may be
used in filesystems too, so it should be robust).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index)
{
int status;
struct page *page;
repeat:
page = find_lock_page(mapping, index);
if (likely(page))
return page;
page = page_cache_alloc(mapping);
if (!page)
return NULL;
status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
if (unlikely(status)) {
page_cache_release(page);
if (status == -EEXIST)
goto repeat;
return NULL;
}
return page;
}
|
struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index)
{
int status;
struct page *page;
repeat:
page = find_lock_page(mapping, index);
if (likely(page))
return page;
page = page_cache_alloc(mapping);
if (!page)
return NULL;
status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
if (unlikely(status)) {
page_cache_release(page);
if (status == -EEXIST)
goto repeat;
return NULL;
}
return page;
}
|
C
|
linux
| 0 |
CVE-2013-0910
|
https://www.cvedetails.com/cve/CVE-2013-0910/
|
CWE-287
|
https://github.com/chromium/chromium/commit/ac8bd041b81e46e4e4fcd5021aaa5499703952e6
|
ac8bd041b81e46e4e4fcd5021aaa5499703952e6
|
Follow-on fixes and naming changes for https://codereview.chromium.org/12086077/
BUG=172573
Review URL: https://codereview.chromium.org/12177018
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@180600 0039d316-1c4b-4281-b951-d872f2087c98
|
void NotifyPluginsOfActivation() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
for (PluginProcessHostIterator iter; !iter.Done(); ++iter)
iter->OnAppActivation();
}
|
void NotifyPluginsOfActivation() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
for (PluginProcessHostIterator iter; !iter.Done(); ++iter)
iter->OnAppActivation();
}
|
C
|
Chrome
| 0 |
CVE-2016-10162
|
https://www.cvedetails.com/cve/CVE-2016-10162/
|
CWE-476
|
https://github.com/php/php-src/commit/8d2539fa0faf3f63e1d1e7635347c5b9e777d47b
|
8d2539fa0faf3f63e1d1e7635347c5b9e777d47b
|
Fix bug #73831 - NULL Pointer Dereference while unserialize php object
|
static void php_wddx_process_data(void *user_data, const XML_Char *s, int len)
{
st_entry *ent;
wddx_stack *stack = (wddx_stack *)user_data;
if (!wddx_stack_is_empty(stack) && !stack->done) {
wddx_stack_top(stack, (void**)&ent);
switch (ent->type) {
case ST_BINARY:
case ST_STRING:
if (Z_STRLEN(ent->data) == 0) {
zval_ptr_dtor(&ent->data);
ZVAL_STRINGL(&ent->data, (char *)s, len);
} else {
Z_STR(ent->data) = zend_string_extend(Z_STR(ent->data), Z_STRLEN(ent->data) + len, 0);
memcpy(Z_STRVAL(ent->data) + Z_STRLEN(ent->data) - len, (char *)s, len);
Z_STRVAL(ent->data)[Z_STRLEN(ent->data)] = '\0';
}
break;
case ST_NUMBER:
ZVAL_STRINGL(&ent->data, (char *)s, len);
convert_scalar_to_number(&ent->data);
break;
case ST_BOOLEAN:
if (!strcmp((char *)s, "true")) {
ZVAL_TRUE(&ent->data);
} else if (!strcmp((char *)s, "false")) {
ZVAL_FALSE(&ent->data);
} else {
zval_ptr_dtor(&ent->data);
if (ent->varname) {
efree(ent->varname);
ent->varname = NULL;
}
ZVAL_UNDEF(&ent->data);
}
break;
case ST_DATETIME: {
zend_string *str;
if (Z_TYPE(ent->data) == IS_STRING) {
str = zend_string_safe_alloc(Z_STRLEN(ent->data), 1, len, 0);
memcpy(ZSTR_VAL(str), Z_STRVAL(ent->data), Z_STRLEN(ent->data));
memcpy(ZSTR_VAL(str) + Z_STRLEN(ent->data), s, len);
ZSTR_VAL(str)[ZSTR_LEN(str)] = '\0';
zval_dtor(&ent->data);
} else {
str = zend_string_init((char *)s, len, 0);
}
ZVAL_LONG(&ent->data, php_parse_date(ZSTR_VAL(str), NULL));
/* date out of range < 1969 or > 2038 */
if (Z_LVAL(ent->data) == -1) {
ZVAL_STR_COPY(&ent->data, str);
}
zend_string_release(str);
}
break;
default:
break;
}
}
}
|
static void php_wddx_process_data(void *user_data, const XML_Char *s, int len)
{
st_entry *ent;
wddx_stack *stack = (wddx_stack *)user_data;
if (!wddx_stack_is_empty(stack) && !stack->done) {
wddx_stack_top(stack, (void**)&ent);
switch (ent->type) {
case ST_BINARY:
case ST_STRING:
if (Z_STRLEN(ent->data) == 0) {
zval_ptr_dtor(&ent->data);
ZVAL_STRINGL(&ent->data, (char *)s, len);
} else {
Z_STR(ent->data) = zend_string_extend(Z_STR(ent->data), Z_STRLEN(ent->data) + len, 0);
memcpy(Z_STRVAL(ent->data) + Z_STRLEN(ent->data) - len, (char *)s, len);
Z_STRVAL(ent->data)[Z_STRLEN(ent->data)] = '\0';
}
break;
case ST_NUMBER:
ZVAL_STRINGL(&ent->data, (char *)s, len);
convert_scalar_to_number(&ent->data);
break;
case ST_BOOLEAN:
if (!strcmp((char *)s, "true")) {
ZVAL_TRUE(&ent->data);
} else if (!strcmp((char *)s, "false")) {
ZVAL_FALSE(&ent->data);
} else {
zval_ptr_dtor(&ent->data);
if (ent->varname) {
efree(ent->varname);
ent->varname = NULL;
}
ZVAL_UNDEF(&ent->data);
}
break;
case ST_DATETIME: {
zend_string *str;
if (Z_TYPE(ent->data) == IS_STRING) {
str = zend_string_safe_alloc(Z_STRLEN(ent->data), 1, len, 0);
memcpy(ZSTR_VAL(str), Z_STRVAL(ent->data), Z_STRLEN(ent->data));
memcpy(ZSTR_VAL(str) + Z_STRLEN(ent->data), s, len);
ZSTR_VAL(str)[ZSTR_LEN(str)] = '\0';
zval_dtor(&ent->data);
} else {
str = zend_string_init((char *)s, len, 0);
}
ZVAL_LONG(&ent->data, php_parse_date(ZSTR_VAL(str), NULL));
/* date out of range < 1969 or > 2038 */
if (Z_LVAL(ent->data) == -1) {
ZVAL_STR_COPY(&ent->data, str);
}
zend_string_release(str);
}
break;
default:
break;
}
}
}
|
C
|
php-src
| 0 |
CVE-2016-1667
|
https://www.cvedetails.com/cve/CVE-2016-1667/
|
CWE-284
|
https://github.com/chromium/chromium/commit/350f7d4b2c76950c8e7271284de84a9756b796e1
|
350f7d4b2c76950c8e7271284de84a9756b796e1
|
P2PQuicStream write functionality.
This adds the P2PQuicStream::WriteData function and adds tests. It also
adds the concept of a write buffered amount, enforcing this at the
P2PQuicStreamImpl.
Bug: 874296
Change-Id: Id02c8aa8d5368a87bb24a2e50dab5ef94bcae131
Reviewed-on: https://chromium-review.googlesource.com/c/1315534
Commit-Queue: Seth Hampson <shampson@chromium.org>
Reviewed-by: Henrik Boström <hbos@chromium.org>
Cr-Commit-Position: refs/heads/master@{#605766}
|
void SetupConnectedStreams() {
CallbackRunLoop run_loop(runner());
ASSERT_TRUE(client_peer_->quic_transport()->IsEncryptionEstablished());
ASSERT_TRUE(server_peer_->quic_transport()->IsEncryptionEstablished());
client_peer_->CreateStreamWithDelegate();
ASSERT_TRUE(client_peer_->stream());
ASSERT_TRUE(client_peer_->stream_delegate());
base::RepeatingCallback<void()> callback = run_loop.CreateCallback();
QuicPeerForTest* server_peer_ptr = server_peer_.get();
MockP2PQuicStreamDelegate* stream_delegate =
new MockP2PQuicStreamDelegate();
P2PQuicStream* server_stream;
EXPECT_CALL(*server_peer_->quic_transport_delegate(), OnStream(_))
.WillOnce(Invoke([&callback, &server_stream,
&stream_delegate](P2PQuicStream* stream) {
stream->SetDelegate(stream_delegate);
server_stream = stream;
callback.Run();
}));
client_peer_->stream()->WriteData(
std::vector<uint8_t>(kTriggerRemoteStreamPhrase.begin(),
kTriggerRemoteStreamPhrase.end()),
/*fin=*/false);
run_loop.RunUntilCallbacksFired();
server_peer_ptr->SetStreamAndDelegate(
static_cast<P2PQuicStreamImpl*>(server_stream),
std::unique_ptr<MockP2PQuicStreamDelegate>(stream_delegate));
ASSERT_TRUE(client_peer_->stream());
ASSERT_TRUE(client_peer_->stream_delegate());
}
|
void SetupConnectedStreams() {
CallbackRunLoop run_loop(runner());
ASSERT_TRUE(client_peer_->quic_transport()->IsEncryptionEstablished());
ASSERT_TRUE(server_peer_->quic_transport()->IsEncryptionEstablished());
client_peer_->CreateStreamWithDelegate();
ASSERT_TRUE(client_peer_->stream());
ASSERT_TRUE(client_peer_->stream_delegate());
base::RepeatingCallback<void()> callback = run_loop.CreateCallback();
QuicPeerForTest* server_peer_ptr = server_peer_.get();
MockP2PQuicStreamDelegate* stream_delegate =
new MockP2PQuicStreamDelegate();
P2PQuicStream* server_stream;
EXPECT_CALL(*server_peer_->quic_transport_delegate(), OnStream(_))
.WillOnce(Invoke([&callback, &server_stream,
&stream_delegate](P2PQuicStream* stream) {
stream->SetDelegate(stream_delegate);
server_stream = stream;
callback.Run();
}));
client_peer_->stream()->WriteOrBufferData(kTriggerRemoteStreamPhrase,
/*fin=*/false, nullptr);
run_loop.RunUntilCallbacksFired();
server_peer_ptr->SetStreamAndDelegate(
static_cast<P2PQuicStreamImpl*>(server_stream),
std::unique_ptr<MockP2PQuicStreamDelegate>(stream_delegate));
ASSERT_TRUE(client_peer_->stream());
ASSERT_TRUE(client_peer_->stream_delegate());
}
|
C
|
Chrome
| 1 |
CVE-2016-6520
|
https://www.cvedetails.com/cve/CVE-2016-6520/
|
CWE-125
|
https://github.com/ImageMagick/ImageMagick/commit/76401e172ea3a55182be2b8e2aca4d07270f6da6
|
76401e172ea3a55182be2b8e2aca4d07270f6da6
|
Evaluate lazy pixel cache morphology to prevent buffer overflow (bug report from Ibrahim M. El-Sayed)
|
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GrayscaleImage)
#endif
proceed=SetImageProgress(image,GrayscaleImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
return(SetImageColorspace(image,GRAYColorspace,exception));
}
|
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GrayscaleImage)
#endif
proceed=SetImageProgress(image,GrayscaleImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
return(SetImageColorspace(image,GRAYColorspace,exception));
}
|
C
|
ImageMagick
| 0 |
CVE-2019-14763
|
https://www.cvedetails.com/cve/CVE-2019-14763/
|
CWE-189
|
https://github.com/torvalds/linux/commit/c91815b596245fd7da349ecc43c8def670d2269e
|
c91815b596245fd7da349ecc43c8def670d2269e
|
usb: dwc3: gadget: never call ->complete() from ->ep_queue()
This is a requirement which has always existed but, somehow, wasn't
reflected in the documentation and problems weren't found until now
when Tuba Yavuz found a possible deadlock happening between dwc3 and
f_hid. She described the situation as follows:
spin_lock_irqsave(&hidg->write_spinlock, flags); // first acquire
/* we our function has been disabled by host */
if (!hidg->req) {
free_ep_req(hidg->in_ep, hidg->req);
goto try_again;
}
[...]
status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
=>
[...]
=> usb_gadget_giveback_request
=>
f_hidg_req_complete
=>
spin_lock_irqsave(&hidg->write_spinlock, flags); // second acquire
Note that this happens because dwc3 would call ->complete() on a
failed usb_ep_queue() due to failed Start Transfer command. This is,
anyway, a theoretical situation because dwc3 currently uses "No
Response Update Transfer" command for Bulk and Interrupt endpoints.
It's still good to make this case impossible to happen even if the "No
Reponse Update Transfer" command is changed.
Reported-by: Tuba Yavuz <tuba@ece.ufl.edu>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
{
int retries = 10000;
u32 reg;
/*
* Wait until device controller is ready. Only applies to 1.94a and
* later RTL.
*/
if (dwc->revision >= DWC3_REVISION_194A) {
while (--retries) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
if (reg & DWC3_DSTS_DCNRD)
udelay(5);
else
break;
}
if (retries <= 0)
return -ETIMEDOUT;
}
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
/* set requested state */
reg |= DWC3_DCTL_ULSTCHNGREQ(state);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
/*
* The following code is racy when called from dwc3_gadget_wakeup,
* and is not needed, at least on newer versions
*/
if (dwc->revision >= DWC3_REVISION_194A)
return 0;
/* wait for a change in DSTS */
retries = 10000;
while (--retries) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
if (DWC3_DSTS_USBLNKST(reg) == state)
return 0;
udelay(5);
}
return -ETIMEDOUT;
}
|
int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
{
int retries = 10000;
u32 reg;
/*
* Wait until device controller is ready. Only applies to 1.94a and
* later RTL.
*/
if (dwc->revision >= DWC3_REVISION_194A) {
while (--retries) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
if (reg & DWC3_DSTS_DCNRD)
udelay(5);
else
break;
}
if (retries <= 0)
return -ETIMEDOUT;
}
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
/* set requested state */
reg |= DWC3_DCTL_ULSTCHNGREQ(state);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
/*
* The following code is racy when called from dwc3_gadget_wakeup,
* and is not needed, at least on newer versions
*/
if (dwc->revision >= DWC3_REVISION_194A)
return 0;
/* wait for a change in DSTS */
retries = 10000;
while (--retries) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
if (DWC3_DSTS_USBLNKST(reg) == state)
return 0;
udelay(5);
}
return -ETIMEDOUT;
}
|
C
|
linux
| 0 |
CVE-2012-5136
|
https://www.cvedetails.com/cve/CVE-2012-5136/
|
CWE-20
|
https://github.com/chromium/chromium/commit/401d30ef93030afbf7e81e53a11b68fc36194502
|
401d30ef93030afbf7e81e53a11b68fc36194502
|
Refactoring: Move m_mayDisplaySeamlesslyWithParent down to Document
The member is used only in Document, thus no reason to
stay in SecurityContext.
TEST=none
BUG=none
R=haraken@chromium.org, abarth, haraken, hayato
Review URL: https://codereview.chromium.org/27615003
git-svn-id: svn://svn.chromium.org/blink/trunk@159829 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
PassRefPtr<Element> Document::createElement(const QualifiedName& qName, bool createdByParser)
{
RefPtr<Element> e;
if (qName.namespaceURI() == xhtmlNamespaceURI)
e = HTMLElementFactory::createHTMLElement(qName, this, 0, createdByParser);
else if (qName.namespaceURI() == SVGNames::svgNamespaceURI)
e = SVGElementFactory::createSVGElement(qName, this, createdByParser);
if (e)
m_sawElementsInKnownNamespaces = true;
else
e = Element::create(qName, &document());
ASSERT((qName.matches(imageTag) && e->tagQName().matches(imgTag) && e->tagQName().prefix() == qName.prefix()) || qName == e->tagQName());
return e.release();
}
|
PassRefPtr<Element> Document::createElement(const QualifiedName& qName, bool createdByParser)
{
RefPtr<Element> e;
if (qName.namespaceURI() == xhtmlNamespaceURI)
e = HTMLElementFactory::createHTMLElement(qName, this, 0, createdByParser);
else if (qName.namespaceURI() == SVGNames::svgNamespaceURI)
e = SVGElementFactory::createSVGElement(qName, this, createdByParser);
if (e)
m_sawElementsInKnownNamespaces = true;
else
e = Element::create(qName, &document());
ASSERT((qName.matches(imageTag) && e->tagQName().matches(imgTag) && e->tagQName().prefix() == qName.prefix()) || qName == e->tagQName());
return e.release();
}
|
C
|
Chrome
| 0 |
CVE-2018-11363
|
https://www.cvedetails.com/cve/CVE-2018-11363/
|
CWE-125
|
https://github.com/AndreRenaud/PDFGen/commit/ee58aff6918b8bbc3be29b9e3089485ea46ff956
|
ee58aff6918b8bbc3be29b9e3089485ea46ff956
|
jpeg: Fix another possible buffer overrun
Found via the clang libfuzzer
|
static int utf8_to_utf32(const char *utf8, int len, uint32_t *utf32)
{
uint32_t ch = *utf8;
int i;
uint8_t mask;
if ((ch & 0x80) == 0) {
len = 1;
mask = 0x7f;
} else if ((ch & 0xe0) == 0xc0 && len >= 2) {
len = 2;
mask = 0x1f;
} else if ((ch & 0xf0) == 0xe0 && len >= 3) {
len = 3;
mask = 0xf;
} else if ((ch & 0xf8) == 0xf0 && len >= 4) {
len = 4;
mask = 0x7;
} else
return -EINVAL;
ch = 0;
for (i = 0; i < len; i++) {
int shift = (len - i - 1) * 6;
if (i == 0)
ch |= ((uint32_t)(*utf8++) & mask) << shift;
else
ch |= ((uint32_t)(*utf8++) & 0x3f) << shift;
}
*utf32 = ch;
return len;
}
|
static int utf8_to_utf32(const char *utf8, int len, uint32_t *utf32)
{
uint32_t ch = *utf8;
int i;
uint8_t mask;
if ((ch & 0x80) == 0) {
len = 1;
mask = 0x7f;
} else if ((ch & 0xe0) == 0xc0 && len >= 2) {
len = 2;
mask = 0x1f;
} else if ((ch & 0xf0) == 0xe0 && len >= 3) {
len = 3;
mask = 0xf;
} else if ((ch & 0xf8) == 0xf0 && len >= 4) {
len = 4;
mask = 0x7;
} else
return -EINVAL;
ch = 0;
for (i = 0; i < len; i++) {
int shift = (len - i - 1) * 6;
if (i == 0)
ch |= ((uint32_t)(*utf8++) & mask) << shift;
else
ch |= ((uint32_t)(*utf8++) & 0x3f) << shift;
}
*utf32 = ch;
return len;
}
|
C
|
PDFGen
| 0 |
CVE-2012-1601
|
https://www.cvedetails.com/cve/CVE-2012-1601/
|
CWE-399
|
https://github.com/torvalds/linux/commit/9c895160d25a76c21b65bad141b08e8d4f99afef
|
9c895160d25a76c21b65bad141b08e8d4f99afef
|
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
(cherry picked from commit 3e515705a1f46beb1c942bb8043c16f8ac7b1e9e)
If some vcpus are created before KVM_CREATE_IRQCHIP, then
irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
to potential NULL pointer dereferences.
Fix by:
- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
This is somewhat long winded because vcpu->arch.apic is created without
kvm->lock held.
Based on earlier patch by Michael Ellerman.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
static void kvm_flush_icache(unsigned long start, unsigned long len)
{
int l;
for (l = 0; l < (len + 32); l += 32)
ia64_fc((void *)(start + l));
ia64_sync_i();
ia64_srlz_i();
}
|
static void kvm_flush_icache(unsigned long start, unsigned long len)
{
int l;
for (l = 0; l < (len + 32); l += 32)
ia64_fc((void *)(start + l));
ia64_sync_i();
ia64_srlz_i();
}
|
C
|
linux
| 0 |
CVE-2017-2647
|
https://www.cvedetails.com/cve/CVE-2017-2647/
|
CWE-476
|
https://github.com/torvalds/linux/commit/c06cfb08b88dfbe13be44a69ae2fdc3a7c902d81
|
c06cfb08b88dfbe13be44a69ae2fdc3a7c902d81
|
KEYS: Remove key_type::match in favour of overriding default by match_preparse
A previous patch added a ->match_preparse() method to the key type. This is
allowed to override the function called by the iteration algorithm.
Therefore, we can just set a default that simply checks for an exact match of
the key description with the original criterion data and allow match_preparse
to override it as needed.
The key_type::match op is then redundant and can be removed, as can the
user_match() function.
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
|
int ceph_decrypt2(struct ceph_crypto_key *secret,
void *dst1, size_t *dst1_len,
void *dst2, size_t *dst2_len,
const void *src, size_t src_len)
{
size_t t;
switch (secret->type) {
case CEPH_CRYPTO_NONE:
if (*dst1_len + *dst2_len < src_len)
return -ERANGE;
t = min(*dst1_len, src_len);
memcpy(dst1, src, t);
*dst1_len = t;
src += t;
src_len -= t;
if (src_len) {
t = min(*dst2_len, src_len);
memcpy(dst2, src, t);
*dst2_len = t;
}
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_decrypt2(secret->key, secret->len,
dst1, dst1_len, dst2, dst2_len,
src, src_len);
default:
return -EINVAL;
}
}
|
int ceph_decrypt2(struct ceph_crypto_key *secret,
void *dst1, size_t *dst1_len,
void *dst2, size_t *dst2_len,
const void *src, size_t src_len)
{
size_t t;
switch (secret->type) {
case CEPH_CRYPTO_NONE:
if (*dst1_len + *dst2_len < src_len)
return -ERANGE;
t = min(*dst1_len, src_len);
memcpy(dst1, src, t);
*dst1_len = t;
src += t;
src_len -= t;
if (src_len) {
t = min(*dst2_len, src_len);
memcpy(dst2, src, t);
*dst2_len = t;
}
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_decrypt2(secret->key, secret->len,
dst1, dst1_len, dst2, dst2_len,
src, src_len);
default:
return -EINVAL;
}
}
|
C
|
linux
| 0 |
CVE-2015-1213
|
https://www.cvedetails.com/cve/CVE-2015-1213/
|
CWE-119
|
https://github.com/chromium/chromium/commit/faaa2fd0a05f1622d9a8806da118d4f3b602e707
|
faaa2fd0a05f1622d9a8806da118d4f3b602e707
|
[Blink>Media] Allow autoplay muted on Android by default
There was a mistake causing autoplay muted is shipped on Android
but it will be disabled if the chromium embedder doesn't specify
content setting for "AllowAutoplay" preference. This CL makes the
AllowAutoplay preference true by default so that it is allowed by
embedders (including AndroidWebView) unless they explicitly
disable it.
Intent to ship:
https://groups.google.com/a/chromium.org/d/msg/blink-dev/Q1cnzNI2GpI/AL_eyUNABgAJ
BUG=689018
Review-Url: https://codereview.chromium.org/2677173002
Cr-Commit-Position: refs/heads/master@{#448423}
|
WebMediaPlayer::Preload HTMLMediaElement::preloadType() const {
const AtomicString& preload = fastGetAttribute(preloadAttr);
if (equalIgnoringCase(preload, "none")) {
UseCounter::count(document(), UseCounter::HTMLMediaElementPreloadNone);
return WebMediaPlayer::PreloadNone;
}
if (document().settings() &&
(document().settings()->getDataSaverEnabled() ||
document().settings()->getForcePreloadNoneForMediaElements()) &&
(m_currentSrc.protocol() != "blob" && m_currentSrc.protocol() != "data" &&
m_currentSrc.protocol() != "file")) {
UseCounter::count(document(),
UseCounter::HTMLMediaElementPreloadForcedNone);
return WebMediaPlayer::PreloadNone;
}
if (equalIgnoringCase(preload, "metadata")) {
UseCounter::count(document(), UseCounter::HTMLMediaElementPreloadMetadata);
return WebMediaPlayer::PreloadMetaData;
}
if (networkStateNotifier().isCellularConnectionType()) {
UseCounter::count(document(),
UseCounter::HTMLMediaElementPreloadForcedMetadata);
return WebMediaPlayer::PreloadMetaData;
}
if (equalIgnoringCase(preload, "auto")) {
UseCounter::count(document(), UseCounter::HTMLMediaElementPreloadAuto);
return WebMediaPlayer::PreloadAuto;
}
UseCounter::count(document(), UseCounter::HTMLMediaElementPreloadDefault);
return WebMediaPlayer::PreloadAuto;
}
|
WebMediaPlayer::Preload HTMLMediaElement::preloadType() const {
const AtomicString& preload = fastGetAttribute(preloadAttr);
if (equalIgnoringCase(preload, "none")) {
UseCounter::count(document(), UseCounter::HTMLMediaElementPreloadNone);
return WebMediaPlayer::PreloadNone;
}
if (document().settings() &&
(document().settings()->getDataSaverEnabled() ||
document().settings()->getForcePreloadNoneForMediaElements()) &&
(m_currentSrc.protocol() != "blob" && m_currentSrc.protocol() != "data" &&
m_currentSrc.protocol() != "file")) {
UseCounter::count(document(),
UseCounter::HTMLMediaElementPreloadForcedNone);
return WebMediaPlayer::PreloadNone;
}
if (equalIgnoringCase(preload, "metadata")) {
UseCounter::count(document(), UseCounter::HTMLMediaElementPreloadMetadata);
return WebMediaPlayer::PreloadMetaData;
}
if (networkStateNotifier().isCellularConnectionType()) {
UseCounter::count(document(),
UseCounter::HTMLMediaElementPreloadForcedMetadata);
return WebMediaPlayer::PreloadMetaData;
}
if (equalIgnoringCase(preload, "auto")) {
UseCounter::count(document(), UseCounter::HTMLMediaElementPreloadAuto);
return WebMediaPlayer::PreloadAuto;
}
UseCounter::count(document(), UseCounter::HTMLMediaElementPreloadDefault);
return WebMediaPlayer::PreloadAuto;
}
|
C
|
Chrome
| 0 |
CVE-2014-3191
|
https://www.cvedetails.com/cve/CVE-2014-3191/
|
CWE-416
|
https://github.com/chromium/chromium/commit/11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
Defer call to updateWidgetPositions() outside of RenderLayerScrollableArea.
updateWidgetPositions() can destroy the render tree, so it should never
be called from inside RenderLayerScrollableArea. Leaving it there allows
for the potential of use-after-free bugs.
BUG=402407
R=vollick@chromium.org
Review URL: https://codereview.chromium.org/490473003
git-svn-id: svn://svn.chromium.org/blink/trunk@180681 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void FrameView::setTransparent(bool isTransparent)
{
m_isTransparent = isTransparent;
DisableCompositingQueryAsserts disabler;
if (renderView() && renderView()->layer()->hasCompositedLayerMapping())
renderView()->layer()->compositedLayerMapping()->updateContentsOpaque();
}
|
void FrameView::setTransparent(bool isTransparent)
{
m_isTransparent = isTransparent;
DisableCompositingQueryAsserts disabler;
if (renderView() && renderView()->layer()->hasCompositedLayerMapping())
renderView()->layer()->compositedLayerMapping()->updateContentsOpaque();
}
|
C
|
Chrome
| 0 |
CVE-2016-1907
|
https://www.cvedetails.com/cve/CVE-2016-1907/
|
CWE-119
|
https://anongit.mindrot.org/openssh.git/commit/?id=2fecfd486bdba9f51b3a789277bb0733ca36e1c0
|
2fecfd486bdba9f51b3a789277bb0733ca36e1c0
| null |
ssh_packet_get_bytes(struct ssh *ssh, u_int64_t *ibytes, u_int64_t *obytes)
{
if (ibytes)
*ibytes = ssh->state->p_read.bytes;
if (obytes)
*obytes = ssh->state->p_send.bytes;
}
|
ssh_packet_get_bytes(struct ssh *ssh, u_int64_t *ibytes, u_int64_t *obytes)
{
if (ibytes)
*ibytes = ssh->state->p_read.bytes;
if (obytes)
*obytes = ssh->state->p_send.bytes;
}
|
C
|
mindrot
| 0 |
CVE-2016-5219
|
https://www.cvedetails.com/cve/CVE-2016-5219/
|
CWE-416
|
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <kbr@chromium.org>
Reviewed-by: Kentaro Hara <haraken@chromium.org>
Reviewed-by: Geoff Lang <geofflang@chromium.org>
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#657568}
|
void GLES2Implementation::SetColorSpaceMetadataCHROMIUM(
GLuint texture_id,
GLColorSpace color_space) {
#if defined(__native_client__)
SetGLError(GL_INVALID_VALUE, "GLES2::SetColorSpaceMetadataCHROMIUM",
"not supported");
#else
gfx::ColorSpace* gfx_color_space =
reinterpret_cast<gfx::ColorSpace*>(color_space);
base::Pickle color_space_data;
IPC::ParamTraits<gfx::ColorSpace>::Write(&color_space_data, *gfx_color_space);
ScopedTransferBufferPtr buffer(color_space_data.size(), helper_,
transfer_buffer_);
if (!buffer.valid() || buffer.size() < color_space_data.size()) {
SetGLError(GL_OUT_OF_MEMORY, "GLES2::SetColorSpaceMetadataCHROMIUM",
"out of memory");
return;
}
memcpy(buffer.address(), color_space_data.data(), color_space_data.size());
helper_->SetColorSpaceMetadataCHROMIUM(
texture_id, buffer.shm_id(), buffer.offset(), color_space_data.size());
#endif
}
|
void GLES2Implementation::SetColorSpaceMetadataCHROMIUM(
GLuint texture_id,
GLColorSpace color_space) {
#if defined(__native_client__)
SetGLError(GL_INVALID_VALUE, "GLES2::SetColorSpaceMetadataCHROMIUM",
"not supported");
#else
gfx::ColorSpace* gfx_color_space =
reinterpret_cast<gfx::ColorSpace*>(color_space);
base::Pickle color_space_data;
IPC::ParamTraits<gfx::ColorSpace>::Write(&color_space_data, *gfx_color_space);
ScopedTransferBufferPtr buffer(color_space_data.size(), helper_,
transfer_buffer_);
if (!buffer.valid() || buffer.size() < color_space_data.size()) {
SetGLError(GL_OUT_OF_MEMORY, "GLES2::SetColorSpaceMetadataCHROMIUM",
"out of memory");
return;
}
memcpy(buffer.address(), color_space_data.data(), color_space_data.size());
helper_->SetColorSpaceMetadataCHROMIUM(
texture_id, buffer.shm_id(), buffer.offset(), color_space_data.size());
#endif
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/9ad7483d8e7c20e9f1a5a08d00150fb51899f14c
|
9ad7483d8e7c20e9f1a5a08d00150fb51899f14c
|
Shutdown Timebomb - In canary, get a callstack if it takes longer than
10 minutes. In Dev, get callstack if it takes longer than 20 minutes.
In Beta (50 minutes) and Stable (100 minutes) it is same as before.
BUG=519321
R=asvitkine@chromium.org
Review URL: https://codereview.chromium.org/1409333005
Cr-Commit-Position: refs/heads/master@{#355586}
|
JankTimeBomb::~JankTimeBomb() {
}
|
JankTimeBomb::~JankTimeBomb() {
}
|
C
|
Chrome
| 0 |
CVE-2016-5219
|
https://www.cvedetails.com/cve/CVE-2016-5219/
|
CWE-416
|
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <kbr@chromium.org>
Reviewed-by: Kentaro Hara <haraken@chromium.org>
Reviewed-by: Geoff Lang <geofflang@chromium.org>
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#657568}
|
void GLES2DecoderPassthroughImpl::OnDebugMessage(GLenum source,
GLenum type,
GLuint id,
GLenum severity,
GLsizei length,
const GLchar* message) {
if (type == GL_DEBUG_TYPE_ERROR && source == GL_DEBUG_SOURCE_API) {
had_error_callback_ = true;
}
}
|
void GLES2DecoderPassthroughImpl::OnDebugMessage(GLenum source,
GLenum type,
GLuint id,
GLenum severity,
GLsizei length,
const GLchar* message) {
if (type == GL_DEBUG_TYPE_ERROR && source == GL_DEBUG_SOURCE_API) {
had_error_callback_ = true;
}
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/3a353ebdb7753a3fbeb401c4c0e0f3358ccbb90b
|
3a353ebdb7753a3fbeb401c4c0e0f3358ccbb90b
|
Support pausing media when a context is frozen.
Media is resumed when the context is unpaused. This feature will be used
for bfcache and pausing iframes feature policy.
BUG=907125
Change-Id: Ic3925ea1a4544242b7bf0b9ad8c9cb9f63976bbd
Reviewed-on: https://chromium-review.googlesource.com/c/1410126
Commit-Queue: Dave Tapuska <dtapuska@chromium.org>
Reviewed-by: Kentaro Hara <haraken@chromium.org>
Reviewed-by: Mounir Lamouri <mlamouri@chromium.org>
Cr-Commit-Position: refs/heads/master@{#623319}
|
void DefaultAudioDestinationHandler::StopRendering() {
DCHECK(IsMainThread());
StopPlatformDestination();
}
|
void DefaultAudioDestinationHandler::StopRendering() {
DCHECK(IsMainThread());
StopPlatformDestination();
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/62b8b6e168a12263aab6b88dbef0b900cc37309f
|
62b8b6e168a12263aab6b88dbef0b900cc37309f
|
Add partial magnifier to ash palette.
The partial magnifier will magnify a small portion of the screen, similar to a spyglass.
TEST=./out/Release/ash_unittests --gtest_filter=PartialMagnificationControllerTest.*
TBR=oshima@chromium.org
BUG=616112
Review-Url: https://codereview.chromium.org/2239553002
Cr-Commit-Position: refs/heads/master@{#414124}
|
aura::Window* PartialMagnificationController::GetCurrentRootWindow() {
|
aura::Window* PartialMagnificationController::GetCurrentRootWindow() {
aura::Window::Windows root_windows = Shell::GetAllRootWindows();
for (aura::Window::Windows::const_iterator iter = root_windows.begin();
iter != root_windows.end(); ++iter) {
aura::Window* root_window = *iter;
if (root_window->ContainsPointInRoot(
root_window->GetHost()->dispatcher()->GetLastMouseLocationInRoot()))
return root_window;
}
return NULL;
}
|
C
|
Chrome
| 1 |
CVE-2014-3171
|
https://www.cvedetails.com/cve/CVE-2014-3171/
| null |
https://github.com/chromium/chromium/commit/d10a8dac48d3a9467e81c62cb45208344f4542db
|
d10a8dac48d3a9467e81c62cb45208344f4542db
|
Replace further questionable HashMap::add usages in bindings
BUG=390928
R=dcarney@chromium.org
Review URL: https://codereview.chromium.org/411273002
git-svn-id: svn://svn.chromium.org/blink/trunk@178823 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static uint32_t encode(uint32_t value)
{
if (value & (1U << 31))
value = ((~value) << 1) + 1;
else
value <<= 1;
return value;
}
|
static uint32_t encode(uint32_t value)
{
if (value & (1U << 31))
value = ((~value) << 1) + 1;
else
value <<= 1;
return value;
}
|
C
|
Chrome
| 0 |
CVE-2013-0910
|
https://www.cvedetails.com/cve/CVE-2013-0910/
|
CWE-287
|
https://github.com/chromium/chromium/commit/ac8bd041b81e46e4e4fcd5021aaa5499703952e6
|
ac8bd041b81e46e4e4fcd5021aaa5499703952e6
|
Follow-on fixes and naming changes for https://codereview.chromium.org/12086077/
BUG=172573
Review URL: https://codereview.chromium.org/12177018
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@180600 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderMessageFilter::OnGetProcessMemorySizes(
size_t* private_bytes, size_t* shared_bytes) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
using base::ProcessMetrics;
#if !defined(OS_MACOSX) || defined(OS_IOS)
scoped_ptr<ProcessMetrics> metrics(
ProcessMetrics::CreateProcessMetrics(peer_handle()));
#else
scoped_ptr<ProcessMetrics> metrics(
ProcessMetrics::CreateProcessMetrics(peer_handle(), NULL));
#endif
metrics->GetMemoryBytes(private_bytes, shared_bytes);
}
|
void RenderMessageFilter::OnGetProcessMemorySizes(
size_t* private_bytes, size_t* shared_bytes) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
using base::ProcessMetrics;
#if !defined(OS_MACOSX) || defined(OS_IOS)
scoped_ptr<ProcessMetrics> metrics(
ProcessMetrics::CreateProcessMetrics(peer_handle()));
#else
scoped_ptr<ProcessMetrics> metrics(
ProcessMetrics::CreateProcessMetrics(peer_handle(), NULL));
#endif
metrics->GetMemoryBytes(private_bytes, shared_bytes);
}
|
C
|
Chrome
| 0 |
CVE-2013-6630
|
https://www.cvedetails.com/cve/CVE-2013-6630/
|
CWE-189
|
https://github.com/chromium/chromium/commit/805eabb91d386c86bd64336c7643f6dfa864151d
|
805eabb91d386c86bd64336c7643f6dfa864151d
|
Convert ARRAYSIZE_UNSAFE -> arraysize in base/.
R=thestig@chromium.org
BUG=423134
Review URL: https://codereview.chromium.org/656033009
Cr-Commit-Position: refs/heads/master@{#299835}
|
void ProcessBacktrace(void *const *trace,
size_t size,
BacktraceOutputHandler* handler) {
#if defined(USE_SYMBOLIZE)
for (size_t i = 0; i < size; ++i) {
OutputFrameId(i, handler);
handler->HandleOutput(" ");
OutputPointer(trace[i], handler);
handler->HandleOutput(" ");
char buf[1024] = { '\0' };
void* address = static_cast<char*>(trace[i]) - 1;
if (google::Symbolize(address, buf, sizeof(buf)))
handler->HandleOutput(buf);
else
handler->HandleOutput("<unknown>");
handler->HandleOutput("\n");
}
#elif !defined(__UCLIBC__)
bool printed = false;
if (in_signal_handler == 0) {
scoped_ptr<char*, FreeDeleter>
trace_symbols(backtrace_symbols(trace, size));
if (trace_symbols.get()) {
for (size_t i = 0; i < size; ++i) {
std::string trace_symbol = trace_symbols.get()[i];
DemangleSymbols(&trace_symbol);
handler->HandleOutput(trace_symbol.c_str());
handler->HandleOutput("\n");
}
printed = true;
}
}
if (!printed) {
for (size_t i = 0; i < size; ++i) {
handler->HandleOutput(" [");
OutputPointer(trace[i], handler);
handler->HandleOutput("]\n");
}
}
#endif // defined(USE_SYMBOLIZE)
}
|
void ProcessBacktrace(void *const *trace,
size_t size,
BacktraceOutputHandler* handler) {
#if defined(USE_SYMBOLIZE)
for (size_t i = 0; i < size; ++i) {
OutputFrameId(i, handler);
handler->HandleOutput(" ");
OutputPointer(trace[i], handler);
handler->HandleOutput(" ");
char buf[1024] = { '\0' };
void* address = static_cast<char*>(trace[i]) - 1;
if (google::Symbolize(address, buf, sizeof(buf)))
handler->HandleOutput(buf);
else
handler->HandleOutput("<unknown>");
handler->HandleOutput("\n");
}
#elif !defined(__UCLIBC__)
bool printed = false;
if (in_signal_handler == 0) {
scoped_ptr<char*, FreeDeleter>
trace_symbols(backtrace_symbols(trace, size));
if (trace_symbols.get()) {
for (size_t i = 0; i < size; ++i) {
std::string trace_symbol = trace_symbols.get()[i];
DemangleSymbols(&trace_symbol);
handler->HandleOutput(trace_symbol.c_str());
handler->HandleOutput("\n");
}
printed = true;
}
}
if (!printed) {
for (size_t i = 0; i < size; ++i) {
handler->HandleOutput(" [");
OutputPointer(trace[i], handler);
handler->HandleOutput("]\n");
}
}
#endif // defined(USE_SYMBOLIZE)
}
|
C
|
Chrome
| 0 |
CVE-2018-6151
|
https://www.cvedetails.com/cve/CVE-2018-6151/
|
CWE-125
|
https://github.com/chromium/chromium/commit/cbb2c0940d4e3914ccd74f6466ff4cb9e50e0e86
|
cbb2c0940d4e3914ccd74f6466ff4cb9e50e0e86
|
Don't downcast DownloadManagerDelegate to ChromeDownloadManagerDelegate.
DownloadManager has public SetDelegate method and tests and or other subsystems
can install their own implementations of the delegate.
Bug: 805905
Change-Id: Iecf1e0aceada0e1048bed1e2d2ceb29ca64295b8
TBR: tests updated to follow the API change.
Reviewed-on: https://chromium-review.googlesource.com/894702
Reviewed-by: David Vallet <dvallet@chromium.org>
Reviewed-by: Min Qin <qinmin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#533515}
|
void UpdateDSEOrigin() { dse_changed_callback_.Run(); }
|
void UpdateDSEOrigin() { dse_changed_callback_.Run(); }
|
C
|
Chrome
| 0 |
CVE-2011-2799
|
https://www.cvedetails.com/cve/CVE-2011-2799/
|
CWE-399
|
https://github.com/chromium/chromium/commit/5a2de6455f565783c73e53eae2c8b953e7d48520
|
5a2de6455f565783c73e53eae2c8b953e7d48520
|
2011-06-02 Joone Hur <joone.hur@collabora.co.uk>
Reviewed by Martin Robinson.
[GTK] Only load dictionaries if spell check is enabled
https://bugs.webkit.org/show_bug.cgi?id=32879
We don't need to call enchant if enable-spell-checking is false.
* webkit/webkitwebview.cpp:
(webkit_web_view_update_settings): Skip loading dictionaries when enable-spell-checking is false.
(webkit_web_view_settings_notify): Ditto.
git-svn-id: svn://svn.chromium.org/blink/trunk@87925 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
gboolean webkit_web_view_search_text(WebKitWebView* webView, const gchar* string, gboolean caseSensitive, gboolean forward, gboolean shouldWrap)
{
g_return_val_if_fail(WEBKIT_IS_WEB_VIEW(webView), FALSE);
g_return_val_if_fail(string, FALSE);
TextCaseSensitivity caseSensitivity = caseSensitive ? TextCaseSensitive : TextCaseInsensitive;
FindDirection direction = forward ? FindDirectionForward : FindDirectionBackward;
return core(webView)->findString(String::fromUTF8(string), caseSensitivity, direction, shouldWrap);
}
|
gboolean webkit_web_view_search_text(WebKitWebView* webView, const gchar* string, gboolean caseSensitive, gboolean forward, gboolean shouldWrap)
{
g_return_val_if_fail(WEBKIT_IS_WEB_VIEW(webView), FALSE);
g_return_val_if_fail(string, FALSE);
TextCaseSensitivity caseSensitivity = caseSensitive ? TextCaseSensitive : TextCaseInsensitive;
FindDirection direction = forward ? FindDirectionForward : FindDirectionBackward;
return core(webView)->findString(String::fromUTF8(string), caseSensitivity, direction, shouldWrap);
}
|
C
|
Chrome
| 0 |
CVE-2015-5289
|
https://www.cvedetails.com/cve/CVE-2015-5289/
|
CWE-119
|
https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=08fa47c4850cea32c3116665975bca219fbf2fe6
|
08fa47c4850cea32c3116665975bca219fbf2fe6
| null |
populate_recordset_array_element_start(void *state, bool isnull)
{
PopulateRecordsetState *_state = (PopulateRecordsetState *) state;
if (_state->lex->lex_level == 1 &&
_state->lex->token_type != JSON_TOKEN_OBJECT_START)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("argument of %s must be an array of objects",
_state->function_name)));
}
|
populate_recordset_array_element_start(void *state, bool isnull)
{
PopulateRecordsetState *_state = (PopulateRecordsetState *) state;
if (_state->lex->lex_level == 1 &&
_state->lex->token_type != JSON_TOKEN_OBJECT_START)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("argument of %s must be an array of objects",
_state->function_name)));
}
|
C
|
postgresql
| 0 |
CVE-2015-6817
|
https://www.cvedetails.com/cve/CVE-2015-6817/
|
CWE-287
|
https://github.com/pgbouncer/pgbouncer/commit/7ca3e5279d05fceb1e8a043c6f5b6f58dea3ed38
|
7ca3e5279d05fceb1e8a043c6f5b6f58dea3ed38
|
Remove too early set of auth_user
When query returns 0 rows (user not found),
this user stays as login user...
Should fix #69.
|
static bool check_client_passwd(PgSocket *client, const char *passwd)
{
char md5[MD5_PASSWD_LEN + 1];
PgUser *user = client->auth_user;
int auth_type = client->client_auth_type;
/* disallow empty passwords */
if (!*passwd || !*user->passwd)
return false;
switch (auth_type) {
case AUTH_PLAIN:
return strcmp(user->passwd, passwd) == 0;
case AUTH_MD5:
if (strlen(passwd) != MD5_PASSWD_LEN)
return false;
if (!isMD5(user->passwd))
pg_md5_encrypt(user->passwd, user->name, strlen(user->name), user->passwd);
pg_md5_encrypt(user->passwd + 3, (char *)client->tmp_login_salt, 4, md5);
return strcmp(md5, passwd) == 0;
}
return false;
}
|
static bool check_client_passwd(PgSocket *client, const char *passwd)
{
char md5[MD5_PASSWD_LEN + 1];
PgUser *user = client->auth_user;
int auth_type = client->client_auth_type;
/* disallow empty passwords */
if (!*passwd || !*user->passwd)
return false;
switch (auth_type) {
case AUTH_PLAIN:
return strcmp(user->passwd, passwd) == 0;
case AUTH_MD5:
if (strlen(passwd) != MD5_PASSWD_LEN)
return false;
if (!isMD5(user->passwd))
pg_md5_encrypt(user->passwd, user->name, strlen(user->name), user->passwd);
pg_md5_encrypt(user->passwd + 3, (char *)client->tmp_login_salt, 4, md5);
return strcmp(md5, passwd) == 0;
}
return false;
}
|
C
|
pgbouncer
| 0 |
CVE-2011-2918
|
https://www.cvedetails.com/cve/CVE-2011-2918/
|
CWE-399
|
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
void handle_ld_nf(u32 insn, struct pt_regs *regs)
{
int rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long *reg;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
maybe_flush_windows(0, 0, rd, from_kernel);
reg = fetch_reg_addr(rd, regs);
if (from_kernel || rd < 16) {
reg[0] = 0;
if ((insn & 0x780000) == 0x180000)
reg[1] = 0;
} else if (test_thread_flag(TIF_32BIT)) {
put_user(0, (int __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, ((int __user *) reg) + 1);
} else {
put_user(0, (unsigned long __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, (unsigned long __user *) reg + 1);
}
advance(regs);
}
|
void handle_ld_nf(u32 insn, struct pt_regs *regs)
{
int rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long *reg;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
maybe_flush_windows(0, 0, rd, from_kernel);
reg = fetch_reg_addr(rd, regs);
if (from_kernel || rd < 16) {
reg[0] = 0;
if ((insn & 0x780000) == 0x180000)
reg[1] = 0;
} else if (test_thread_flag(TIF_32BIT)) {
put_user(0, (int __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, ((int __user *) reg) + 1);
} else {
put_user(0, (unsigned long __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, (unsigned long __user *) reg + 1);
}
advance(regs);
}
|
C
|
linux
| 1 |
CVE-2012-5148
|
https://www.cvedetails.com/cve/CVE-2012-5148/
|
CWE-20
|
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
|
e89cfcb9090e8c98129ae9160c513f504db74599
|
Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
|
void TabStripModel::UpdateTabContentsStateAt(int index,
TabStripModelObserver::TabChangeType change_type) {
DCHECK(ContainsIndex(index));
FOR_EACH_OBSERVER(TabStripModelObserver, observers_,
TabChangedAt(GetTabContentsAtImpl(index), index, change_type));
}
|
void TabStripModel::UpdateTabContentsStateAt(int index,
TabStripModelObserver::TabChangeType change_type) {
DCHECK(ContainsIndex(index));
FOR_EACH_OBSERVER(TabStripModelObserver, observers_,
TabChangedAt(GetTabContentsAtImpl(index), index, change_type));
}
|
C
|
Chrome
| 0 |
CVE-2016-10012
|
https://www.cvedetails.com/cve/CVE-2016-10012/
|
CWE-119
|
https://github.com/openbsd/src/commit/3095060f479b86288e31c79ecbc5131a66bcd2f9
|
3095060f479b86288e31c79ecbc5131a66bcd2f9
|
Remove support for pre-authentication compression. Doing compression
early in the protocol probably seemed reasonable in the 1990s, but
today it's clearly a bad idea in terms of both cryptography (cf.
multiple compression oracle attacks in TLS) and attack surface.
Moreover, to support it across privilege-separation zlib needed
the assistance of a complex shared-memory manager that made the
required attack surface considerably larger.
Prompted by Guido Vranken pointing out a compiler-elided security
check in the shared memory manager found by Stack
(http://css.csail.mit.edu/stack/); ok deraadt@ markus@
NB. pre-auth authentication has been disabled by default in sshd
for >10 years.
|
mm_record_login(Session *s, struct passwd *pw)
{
struct ssh *ssh = active_state; /* XXX */
socklen_t fromlen;
struct sockaddr_storage from;
/*
* Get IP address of client. If the connection is not a socket, let
* the address be 0.0.0.0.
*/
memset(&from, 0, sizeof(from));
fromlen = sizeof(from);
if (packet_connection_is_on_socket()) {
if (getpeername(packet_get_connection_in(),
(struct sockaddr *)&from, &fromlen) < 0) {
debug("getpeername: %.100s", strerror(errno));
cleanup_exit(255);
}
}
/* Record that there was a login on that tty from the remote host. */
record_login(s->pid, s->tty, pw->pw_name, pw->pw_uid,
session_get_remote_name_or_ip(ssh, utmp_len, options.use_dns),
(struct sockaddr *)&from, fromlen);
}
|
mm_record_login(Session *s, struct passwd *pw)
{
struct ssh *ssh = active_state; /* XXX */
socklen_t fromlen;
struct sockaddr_storage from;
/*
* Get IP address of client. If the connection is not a socket, let
* the address be 0.0.0.0.
*/
memset(&from, 0, sizeof(from));
fromlen = sizeof(from);
if (packet_connection_is_on_socket()) {
if (getpeername(packet_get_connection_in(),
(struct sockaddr *)&from, &fromlen) < 0) {
debug("getpeername: %.100s", strerror(errno));
cleanup_exit(255);
}
}
/* Record that there was a login on that tty from the remote host. */
record_login(s->pid, s->tty, pw->pw_name, pw->pw_uid,
session_get_remote_name_or_ip(ssh, utmp_len, options.use_dns),
(struct sockaddr *)&from, fromlen);
}
|
C
|
src
| 0 |
CVE-2018-12436
|
https://www.cvedetails.com/cve/CVE-2018-12436/
|
CWE-200
|
https://github.com/wolfSSL/wolfssl/commit/9b9568d500f31f964af26ba8d01e542e1f27e5ca
|
9b9568d500f31f964af26ba8d01e542e1f27e5ca
|
Change ECDSA signing to use blinding.
|
static int wc_ecc_sign_hash_hw(const byte* in, word32 inlen,
mp_int* r, mp_int* s, byte* out, word32 *outlen, WC_RNG* rng,
ecc_key* key)
{
int err;
#ifdef PLUTON_CRYPTO_ECC
if (key->devId != INVALID_DEVID) /* use hardware */
#endif
{
int keysize = key->dp->size;
/* Check args */
if (keysize > ECC_MAX_CRYPTO_HW_SIZE || inlen != keysize ||
*outlen < keysize*2) {
return ECC_BAD_ARG_E;
}
#if defined(WOLFSSL_ATECC508A)
/* Sign: Result is 32-bytes of R then 32-bytes of S */
err = atcatls_sign(key->slot, in, out);
if (err != ATCA_SUCCESS) {
return BAD_COND_E;
}
#elif defined(PLUTON_CRYPTO_ECC)
{
/* perform ECC sign */
word32 raw_sig_size = *outlen;
err = Crypto_EccSign(in, inlen, out, &raw_sig_size);
if (err != CRYPTO_RES_SUCCESS || raw_sig_size != keysize*2){
return BAD_COND_E;
}
}
#endif
/* Load R and S */
err = mp_read_unsigned_bin(r, &out[0], keysize);
if (err != MP_OKAY) {
return err;
}
err = mp_read_unsigned_bin(s, &out[keysize], keysize);
if (err != MP_OKAY) {
return err;
}
/* Check for zeros */
if (mp_iszero(r) || mp_iszero(s)) {
return MP_ZERO_E;
}
}
#ifdef PLUTON_CRYPTO_ECC
else {
err = wc_ecc_sign_hash_ex(in, inlen, rng, key, r, s);
}
#endif
return err;
}
|
static int wc_ecc_sign_hash_hw(const byte* in, word32 inlen,
mp_int* r, mp_int* s, byte* out, word32 *outlen, WC_RNG* rng,
ecc_key* key)
{
int err;
#ifdef PLUTON_CRYPTO_ECC
if (key->devId != INVALID_DEVID) /* use hardware */
#endif
{
int keysize = key->dp->size;
/* Check args */
if (keysize > ECC_MAX_CRYPTO_HW_SIZE || inlen != keysize ||
*outlen < keysize*2) {
return ECC_BAD_ARG_E;
}
#if defined(WOLFSSL_ATECC508A)
/* Sign: Result is 32-bytes of R then 32-bytes of S */
err = atcatls_sign(key->slot, in, out);
if (err != ATCA_SUCCESS) {
return BAD_COND_E;
}
#elif defined(PLUTON_CRYPTO_ECC)
{
/* perform ECC sign */
word32 raw_sig_size = *outlen;
err = Crypto_EccSign(in, inlen, out, &raw_sig_size);
if (err != CRYPTO_RES_SUCCESS || raw_sig_size != keysize*2){
return BAD_COND_E;
}
}
#endif
/* Load R and S */
err = mp_read_unsigned_bin(r, &out[0], keysize);
if (err != MP_OKAY) {
return err;
}
err = mp_read_unsigned_bin(s, &out[keysize], keysize);
if (err != MP_OKAY) {
return err;
}
/* Check for zeros */
if (mp_iszero(r) || mp_iszero(s)) {
return MP_ZERO_E;
}
}
#ifdef PLUTON_CRYPTO_ECC
else {
err = wc_ecc_sign_hash_ex(in, inlen, rng, key, r, s);
}
#endif
return err;
}
|
C
|
wolfssl
| 0 |
CVE-2013-1789
|
https://www.cvedetails.com/cve/CVE-2013-1789/
| null |
https://cgit.freedesktop.org/poppler/poppler/commit/?h=poppler-0.22&id=a9b8ab4657dec65b8b86c225d12c533ad7e984e2
|
a9b8ab4657dec65b8b86c225d12c533ad7e984e2
| null |
void Splash::pipeRunAACMYK8(SplashPipe *pipe) {
Guchar aSrc, aDest, alpha2, aResult;
SplashColor cDest;
Guchar cResult0, cResult1, cResult2, cResult3;
cDest[0] = pipe->destColorPtr[0];
cDest[1] = pipe->destColorPtr[1];
cDest[2] = pipe->destColorPtr[2];
cDest[3] = pipe->destColorPtr[3];
aDest = *pipe->destAlphaPtr;
aSrc = div255(pipe->aInput * pipe->shape);
aResult = aSrc + aDest - div255(aSrc * aDest);
alpha2 = aResult;
if (alpha2 == 0) {
cResult0 = 0;
cResult1 = 0;
cResult2 = 0;
cResult3 = 0;
} else {
cResult0 = state->cmykTransferC[(Guchar)(((alpha2 - aSrc) * cDest[0] +
aSrc * pipe->cSrc[0]) / alpha2)];
cResult1 = state->cmykTransferM[(Guchar)(((alpha2 - aSrc) * cDest[1] +
aSrc * pipe->cSrc[1]) / alpha2)];
cResult2 = state->cmykTransferY[(Guchar)(((alpha2 - aSrc) * cDest[2] +
aSrc * pipe->cSrc[2]) / alpha2)];
cResult3 = state->cmykTransferK[(Guchar)(((alpha2 - aSrc) * cDest[3] +
aSrc * pipe->cSrc[3]) / alpha2)];
}
if (state->overprintMask & 1) {
pipe->destColorPtr[0] = (state->overprintAdditive && pipe->shape != 0) ?
std::min<int>(pipe->destColorPtr[0] + cResult0, 255) :
cResult0;
}
if (state->overprintMask & 2) {
pipe->destColorPtr[1] = (state->overprintAdditive && pipe->shape != 0) ?
std::min<int>(pipe->destColorPtr[1] + cResult1, 255) :
cResult1;
}
if (state->overprintMask & 4) {
pipe->destColorPtr[2] = (state->overprintAdditive && pipe->shape != 0) ?
std::min<int>(pipe->destColorPtr[2] + cResult2, 255) :
cResult2;
}
if (state->overprintMask & 8) {
pipe->destColorPtr[3] = (state->overprintAdditive && pipe->shape != 0) ?
std::min<int>(pipe->destColorPtr[3] + cResult3, 255) :
cResult3;
}
pipe->destColorPtr += 4;
*pipe->destAlphaPtr++ = aResult;
++pipe->x;
}
|
void Splash::pipeRunAACMYK8(SplashPipe *pipe) {
Guchar aSrc, aDest, alpha2, aResult;
SplashColor cDest;
Guchar cResult0, cResult1, cResult2, cResult3;
cDest[0] = pipe->destColorPtr[0];
cDest[1] = pipe->destColorPtr[1];
cDest[2] = pipe->destColorPtr[2];
cDest[3] = pipe->destColorPtr[3];
aDest = *pipe->destAlphaPtr;
aSrc = div255(pipe->aInput * pipe->shape);
aResult = aSrc + aDest - div255(aSrc * aDest);
alpha2 = aResult;
if (alpha2 == 0) {
cResult0 = 0;
cResult1 = 0;
cResult2 = 0;
cResult3 = 0;
} else {
cResult0 = state->cmykTransferC[(Guchar)(((alpha2 - aSrc) * cDest[0] +
aSrc * pipe->cSrc[0]) / alpha2)];
cResult1 = state->cmykTransferM[(Guchar)(((alpha2 - aSrc) * cDest[1] +
aSrc * pipe->cSrc[1]) / alpha2)];
cResult2 = state->cmykTransferY[(Guchar)(((alpha2 - aSrc) * cDest[2] +
aSrc * pipe->cSrc[2]) / alpha2)];
cResult3 = state->cmykTransferK[(Guchar)(((alpha2 - aSrc) * cDest[3] +
aSrc * pipe->cSrc[3]) / alpha2)];
}
if (state->overprintMask & 1) {
pipe->destColorPtr[0] = (state->overprintAdditive && pipe->shape != 0) ?
std::min<int>(pipe->destColorPtr[0] + cResult0, 255) :
cResult0;
}
if (state->overprintMask & 2) {
pipe->destColorPtr[1] = (state->overprintAdditive && pipe->shape != 0) ?
std::min<int>(pipe->destColorPtr[1] + cResult1, 255) :
cResult1;
}
if (state->overprintMask & 4) {
pipe->destColorPtr[2] = (state->overprintAdditive && pipe->shape != 0) ?
std::min<int>(pipe->destColorPtr[2] + cResult2, 255) :
cResult2;
}
if (state->overprintMask & 8) {
pipe->destColorPtr[3] = (state->overprintAdditive && pipe->shape != 0) ?
std::min<int>(pipe->destColorPtr[3] + cResult3, 255) :
cResult3;
}
pipe->destColorPtr += 4;
*pipe->destAlphaPtr++ = aResult;
++pipe->x;
}
|
CPP
|
poppler
| 0 |
CVE-2019-17542
|
https://www.cvedetails.com/cve/CVE-2019-17542/
| null |
https://github.com/FFmpeg/FFmpeg/commit/02f909dc24b1f05cfbba75077c7707b905e63cd2
|
02f909dc24b1f05cfbba75077c7707b905e63cd2
|
avcodec/vqavideo: Set video size
Fixes: out of array access
Fixes: 15919/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_VQA_fuzzer-5657368257363968
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
|
static int decode_format80(VqaContext *s, int src_size,
unsigned char *dest, int dest_size, int check_size) {
int dest_index = 0;
int count, opcode, start;
int src_pos;
unsigned char color;
int i;
if (src_size < 0 || src_size > bytestream2_get_bytes_left(&s->gb)) {
av_log(s->avctx, AV_LOG_ERROR, "Chunk size %d is out of range\n",
src_size);
return AVERROR_INVALIDDATA;
}
start = bytestream2_tell(&s->gb);
while (bytestream2_tell(&s->gb) - start < src_size) {
opcode = bytestream2_get_byte(&s->gb);
ff_tlog(s->avctx, "opcode %02X: ", opcode);
/* 0x80 means that frame is finished */
if (opcode == 0x80)
break;
if (dest_index >= dest_size) {
av_log(s->avctx, AV_LOG_ERROR, "decode_format80 problem: dest_index (%d) exceeded dest_size (%d)\n",
dest_index, dest_size);
return AVERROR_INVALIDDATA;
}
if (opcode == 0xFF) {
count = bytestream2_get_le16(&s->gb);
src_pos = bytestream2_get_le16(&s->gb);
ff_tlog(s->avctx, "(1) copy %X bytes from absolute pos %X\n", count, src_pos);
CHECK_COUNT();
CHECK_COPY(src_pos);
for (i = 0; i < count; i++)
dest[dest_index + i] = dest[src_pos + i];
dest_index += count;
} else if (opcode == 0xFE) {
count = bytestream2_get_le16(&s->gb);
color = bytestream2_get_byte(&s->gb);
ff_tlog(s->avctx, "(2) set %X bytes to %02X\n", count, color);
CHECK_COUNT();
memset(&dest[dest_index], color, count);
dest_index += count;
} else if ((opcode & 0xC0) == 0xC0) {
count = (opcode & 0x3F) + 3;
src_pos = bytestream2_get_le16(&s->gb);
ff_tlog(s->avctx, "(3) copy %X bytes from absolute pos %X\n", count, src_pos);
CHECK_COUNT();
CHECK_COPY(src_pos);
for (i = 0; i < count; i++)
dest[dest_index + i] = dest[src_pos + i];
dest_index += count;
} else if (opcode > 0x80) {
count = opcode & 0x3F;
ff_tlog(s->avctx, "(4) copy %X bytes from source to dest\n", count);
CHECK_COUNT();
bytestream2_get_buffer(&s->gb, &dest[dest_index], count);
dest_index += count;
} else {
count = ((opcode & 0x70) >> 4) + 3;
src_pos = bytestream2_get_byte(&s->gb) | ((opcode & 0x0F) << 8);
ff_tlog(s->avctx, "(5) copy %X bytes from relpos %X\n", count, src_pos);
CHECK_COUNT();
CHECK_COPY(dest_index - src_pos);
for (i = 0; i < count; i++)
dest[dest_index + i] = dest[dest_index - src_pos + i];
dest_index += count;
}
}
/* validate that the entire destination buffer was filled; this is
* important for decoding frame maps since each vector needs to have a
* codebook entry; it is not important for compressed codebooks because
* not every entry needs to be filled */
if (check_size)
if (dest_index < dest_size) {
av_log(s->avctx, AV_LOG_ERROR, "decode_format80 problem: decode finished with dest_index (%d) < dest_size (%d)\n",
dest_index, dest_size);
memset(dest + dest_index, 0, dest_size - dest_index);
}
return 0; // let's display what we decoded anyway
}
|
static int decode_format80(VqaContext *s, int src_size,
unsigned char *dest, int dest_size, int check_size) {
int dest_index = 0;
int count, opcode, start;
int src_pos;
unsigned char color;
int i;
if (src_size < 0 || src_size > bytestream2_get_bytes_left(&s->gb)) {
av_log(s->avctx, AV_LOG_ERROR, "Chunk size %d is out of range\n",
src_size);
return AVERROR_INVALIDDATA;
}
start = bytestream2_tell(&s->gb);
while (bytestream2_tell(&s->gb) - start < src_size) {
opcode = bytestream2_get_byte(&s->gb);
ff_tlog(s->avctx, "opcode %02X: ", opcode);
/* 0x80 means that frame is finished */
if (opcode == 0x80)
break;
if (dest_index >= dest_size) {
av_log(s->avctx, AV_LOG_ERROR, "decode_format80 problem: dest_index (%d) exceeded dest_size (%d)\n",
dest_index, dest_size);
return AVERROR_INVALIDDATA;
}
if (opcode == 0xFF) {
count = bytestream2_get_le16(&s->gb);
src_pos = bytestream2_get_le16(&s->gb);
ff_tlog(s->avctx, "(1) copy %X bytes from absolute pos %X\n", count, src_pos);
CHECK_COUNT();
CHECK_COPY(src_pos);
for (i = 0; i < count; i++)
dest[dest_index + i] = dest[src_pos + i];
dest_index += count;
} else if (opcode == 0xFE) {
count = bytestream2_get_le16(&s->gb);
color = bytestream2_get_byte(&s->gb);
ff_tlog(s->avctx, "(2) set %X bytes to %02X\n", count, color);
CHECK_COUNT();
memset(&dest[dest_index], color, count);
dest_index += count;
} else if ((opcode & 0xC0) == 0xC0) {
count = (opcode & 0x3F) + 3;
src_pos = bytestream2_get_le16(&s->gb);
ff_tlog(s->avctx, "(3) copy %X bytes from absolute pos %X\n", count, src_pos);
CHECK_COUNT();
CHECK_COPY(src_pos);
for (i = 0; i < count; i++)
dest[dest_index + i] = dest[src_pos + i];
dest_index += count;
} else if (opcode > 0x80) {
count = opcode & 0x3F;
ff_tlog(s->avctx, "(4) copy %X bytes from source to dest\n", count);
CHECK_COUNT();
bytestream2_get_buffer(&s->gb, &dest[dest_index], count);
dest_index += count;
} else {
count = ((opcode & 0x70) >> 4) + 3;
src_pos = bytestream2_get_byte(&s->gb) | ((opcode & 0x0F) << 8);
ff_tlog(s->avctx, "(5) copy %X bytes from relpos %X\n", count, src_pos);
CHECK_COUNT();
CHECK_COPY(dest_index - src_pos);
for (i = 0; i < count; i++)
dest[dest_index + i] = dest[dest_index - src_pos + i];
dest_index += count;
}
}
/* validate that the entire destination buffer was filled; this is
* important for decoding frame maps since each vector needs to have a
* codebook entry; it is not important for compressed codebooks because
* not every entry needs to be filled */
if (check_size)
if (dest_index < dest_size) {
av_log(s->avctx, AV_LOG_ERROR, "decode_format80 problem: decode finished with dest_index (%d) < dest_size (%d)\n",
dest_index, dest_size);
memset(dest + dest_index, 0, dest_size - dest_index);
}
return 0; // let's display what we decoded anyway
}
|
C
|
FFmpeg
| 0 |
CVE-2015-8877
|
https://www.cvedetails.com/cve/CVE-2015-8877/
|
CWE-399
|
https://github.com/libgd/libgd/commit/4751b606fa38edc456d627140898a7ec679fcc24
|
4751b606fa38edc456d627140898a7ec679fcc24
|
gdImageScaleTwoPass memory leak fix
Fixing memory leak in gdImageScaleTwoPass, as reported by @cmb69 and
confirmed by @vapier. This bug actually bit me in production and I'm
very thankful that it was reported with an easy fix.
Fixes #173.
|
static double filter_blackman(const double x)
{
return (0.42f+0.5f*(double)cos(M_PI*x)+0.08f*(double)cos(2.0f*M_PI*x));
}
|
static double filter_blackman(const double x)
{
return (0.42f+0.5f*(double)cos(M_PI*x)+0.08f*(double)cos(2.0f*M_PI*x));
}
|
C
|
libgd
| 0 |
CVE-2014-3173
|
https://www.cvedetails.com/cve/CVE-2014-3173/
|
CWE-119
|
https://github.com/chromium/chromium/commit/ee7579229ff7e9e5ae28bf53aea069251499d7da
|
ee7579229ff7e9e5ae28bf53aea069251499d7da
|
Framebuffer clear() needs to consider the situation some draw buffers are disabled.
This is when we expose DrawBuffers extension.
BUG=376951
TEST=the attached test case, webgl conformance
R=kbr@chromium.org,bajones@chromium.org
Review URL: https://codereview.chromium.org/315283002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@275338 0039d316-1c4b-4281-b951-d872f2087c98
|
error::Error GLES2DecoderImpl::HandleShaderBinary(
uint32 immediate_data_size, const cmds::ShaderBinary& c) {
#if 1 // No binary shader support.
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glShaderBinary", "not supported");
return error::kNoError;
#else
GLsizei n = static_cast<GLsizei>(c.n);
if (n < 0) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "n < 0");
return error::kNoError;
}
GLsizei length = static_cast<GLsizei>(c.length);
if (length < 0) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "length < 0");
return error::kNoError;
}
uint32 data_size;
if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
return error::kOutOfBounds;
}
const GLuint* shaders = GetSharedMemoryAs<const GLuint*>(
c.shaders_shm_id, c.shaders_shm_offset, data_size);
GLenum binaryformat = static_cast<GLenum>(c.binaryformat);
const void* binary = GetSharedMemoryAs<const void*>(
c.binary_shm_id, c.binary_shm_offset, length);
if (shaders == NULL || binary == NULL) {
return error::kOutOfBounds;
}
scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
for (GLsizei ii = 0; ii < n; ++ii) {
Shader* shader = GetShader(shaders[ii]);
if (!shader) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "unknown shader");
return error::kNoError;
}
service_ids[ii] = shader->service_id();
}
return error::kNoError;
#endif
}
|
error::Error GLES2DecoderImpl::HandleShaderBinary(
uint32 immediate_data_size, const cmds::ShaderBinary& c) {
#if 1 // No binary shader support.
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glShaderBinary", "not supported");
return error::kNoError;
#else
GLsizei n = static_cast<GLsizei>(c.n);
if (n < 0) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "n < 0");
return error::kNoError;
}
GLsizei length = static_cast<GLsizei>(c.length);
if (length < 0) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "length < 0");
return error::kNoError;
}
uint32 data_size;
if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
return error::kOutOfBounds;
}
const GLuint* shaders = GetSharedMemoryAs<const GLuint*>(
c.shaders_shm_id, c.shaders_shm_offset, data_size);
GLenum binaryformat = static_cast<GLenum>(c.binaryformat);
const void* binary = GetSharedMemoryAs<const void*>(
c.binary_shm_id, c.binary_shm_offset, length);
if (shaders == NULL || binary == NULL) {
return error::kOutOfBounds;
}
scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
for (GLsizei ii = 0; ii < n; ++ii) {
Shader* shader = GetShader(shaders[ii]);
if (!shader) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "unknown shader");
return error::kNoError;
}
service_ids[ii] = shader->service_id();
}
return error::kNoError;
#endif
}
|
C
|
Chrome
| 0 |
CVE-2012-1179
|
https://www.cvedetails.com/cve/CVE-2012-1179/
|
CWE-264
|
https://github.com/torvalds/linux/commit/4a1d704194a441bf83c636004a479e01360ec850
|
4a1d704194a441bf83c636004a479e01360ec850
|
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream.
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[akpm@linux-foundation.org: checkpatch fixes]
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Jones <davej@redhat.com>
Acked-by: Larry Woodman <lwoodman@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Mark Salter <msalter@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
static void free_swap_count_continuations(struct swap_info_struct *si)
{
pgoff_t offset;
for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
struct page *head;
head = vmalloc_to_page(si->swap_map + offset);
if (page_private(head)) {
struct list_head *this, *next;
list_for_each_safe(this, next, &head->lru) {
struct page *page;
page = list_entry(this, struct page, lru);
list_del(this);
__free_page(page);
}
}
}
}
|
static void free_swap_count_continuations(struct swap_info_struct *si)
{
pgoff_t offset;
for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
struct page *head;
head = vmalloc_to_page(si->swap_map + offset);
if (page_private(head)) {
struct list_head *this, *next;
list_for_each_safe(this, next, &head->lru) {
struct page *page;
page = list_entry(this, struct page, lru);
list_del(this);
__free_page(page);
}
}
}
}
|
C
|
linux
| 0 |
CVE-2015-8543
|
https://www.cvedetails.com/cve/CVE-2015-8543/
| null |
https://github.com/torvalds/linux/commit/79462ad02e861803b3840cc782248c7359451cd9
|
79462ad02e861803b3840cc782248c7359451cd9
|
net: add validation for the socket syscall protocol argument
郭永刚 reported that one could simply crash the kernel as root by
using a simple program:
int socket_fd;
struct sockaddr_in addr;
addr.sin_port = 0;
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_family = 10;
socket_fd = socket(10,3,0x40000000);
connect(socket_fd , &addr,16);
AF_INET, AF_INET6 sockets actually only support 8-bit protocol
identifiers. inet_sock's skc_protocol field thus is sized accordingly,
thus larger protocol identifiers simply cut off the higher bits and
store a zero in the protocol fields.
This could lead to e.g. NULL function pointer because as a result of
the cut off inet_num is zero and we call down to inet_autobind, which
is NULL for raw sockets.
kernel: Call Trace:
kernel: [<ffffffff816db90e>] ? inet_autobind+0x2e/0x70
kernel: [<ffffffff816db9a4>] inet_dgram_connect+0x54/0x80
kernel: [<ffffffff81645069>] SYSC_connect+0xd9/0x110
kernel: [<ffffffff810ac51b>] ? ptrace_notify+0x5b/0x80
kernel: [<ffffffff810236d8>] ? syscall_trace_enter_phase2+0x108/0x200
kernel: [<ffffffff81645e0e>] SyS_connect+0xe/0x10
kernel: [<ffffffff81779515>] tracesys_phase2+0x84/0x89
I found no particular commit which introduced this problem.
CVE: CVE-2015-8543
Cc: Cong Wang <cwang@twopensource.com>
Reported-by: 郭永刚 <guoyonggang@360.cn>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static int irda_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk == NULL)
return 0;
lock_sock(sk);
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
/* Destroy IrDA socket */
irda_destroy_socket(irda_sk(sk));
sock_orphan(sk);
sock->sk = NULL;
release_sock(sk);
/* Purge queues (see sock_init_data()) */
skb_queue_purge(&sk->sk_receive_queue);
/* Destroy networking socket if we are the last reference on it,
* i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */
sock_put(sk);
/* Notes on socket locking and deallocation... - Jean II
* In theory we should put pairs of sock_hold() / sock_put() to
* prevent the socket to be destroyed whenever there is an
* outstanding request or outstanding incoming packet or event.
*
* 1) This may include IAS request, both in connect and getsockopt.
* Unfortunately, the situation is a bit more messy than it looks,
* because we close iriap and kfree(self) above.
*
* 2) This may include selective discovery in getsockopt.
* Same stuff as above, irlmp registration and self are gone.
*
* Probably 1 and 2 may not matter, because it's all triggered
* by a process and the socket layer already prevent the
* socket to go away while a process is holding it, through
* sockfd_put() and fput()...
*
* 3) This may include deferred TSAP closure. In particular,
* we may receive a late irda_disconnect_indication()
* Fortunately, (tsap_cb *)->close_pend should protect us
* from that.
*
* I did some testing on SMP, and it looks solid. And the socket
* memory leak is now gone... - Jean II
*/
return 0;
}
|
static int irda_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk == NULL)
return 0;
lock_sock(sk);
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
/* Destroy IrDA socket */
irda_destroy_socket(irda_sk(sk));
sock_orphan(sk);
sock->sk = NULL;
release_sock(sk);
/* Purge queues (see sock_init_data()) */
skb_queue_purge(&sk->sk_receive_queue);
/* Destroy networking socket if we are the last reference on it,
* i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */
sock_put(sk);
/* Notes on socket locking and deallocation... - Jean II
* In theory we should put pairs of sock_hold() / sock_put() to
* prevent the socket to be destroyed whenever there is an
* outstanding request or outstanding incoming packet or event.
*
* 1) This may include IAS request, both in connect and getsockopt.
* Unfortunately, the situation is a bit more messy than it looks,
* because we close iriap and kfree(self) above.
*
* 2) This may include selective discovery in getsockopt.
* Same stuff as above, irlmp registration and self are gone.
*
* Probably 1 and 2 may not matter, because it's all triggered
* by a process and the socket layer already prevent the
* socket to go away while a process is holding it, through
* sockfd_put() and fput()...
*
* 3) This may include deferred TSAP closure. In particular,
* we may receive a late irda_disconnect_indication()
* Fortunately, (tsap_cb *)->close_pend should protect us
* from that.
*
* I did some testing on SMP, and it looks solid. And the socket
* memory leak is now gone... - Jean II
*/
return 0;
}
|
C
|
linux
| 0 |
CVE-2013-6634
|
https://www.cvedetails.com/cve/CVE-2013-6634/
|
CWE-287
|
https://github.com/chromium/chromium/commit/50370b3c98047bdc80184ff87a502edc5c597d3a
|
50370b3c98047bdc80184ff87a502edc5c597d3a
|
During redirects in the one click sign in flow, check the current URL
instead of original URL to validate gaia http headers.
BUG=307159
Review URL: https://codereview.chromium.org/77343002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@236563 0039d316-1c4b-4281-b951-d872f2087c98
|
void ClearPendingEmailOnIOThread(content::ResourceContext* context) {
ProfileIOData* io_data = ProfileIOData::FromResourceContext(context);
DCHECK(io_data);
io_data->set_reverse_autologin_pending_email(std::string());
}
|
void ClearPendingEmailOnIOThread(content::ResourceContext* context) {
ProfileIOData* io_data = ProfileIOData::FromResourceContext(context);
DCHECK(io_data);
io_data->set_reverse_autologin_pending_email(std::string());
}
|
C
|
Chrome
| 0 |
CVE-2017-12182
|
https://www.cvedetails.com/cve/CVE-2017-12182/
|
CWE-20
|
https://cgit.freedesktop.org/xorg/xserver/commit/?id=1b1d4c04695dced2463404174b50b3581dbd857b
|
1b1d4c04695dced2463404174b50b3581dbd857b
| null |
ProcXF86DRIOpenConnection(register ClientPtr client)
{
xXF86DRIOpenConnectionReply rep;
drm_handle_t hSAREA;
char *busIdString;
CARD32 busIdStringLength = 0;
REQUEST(xXF86DRIOpenConnectionReq);
REQUEST_SIZE_MATCH(xXF86DRIOpenConnectionReq);
if (stuff->screen >= screenInfo.numScreens) {
client->errorValue = stuff->screen;
return BadValue;
}
if (!DRIOpenConnection(screenInfo.screens[stuff->screen],
&hSAREA, &busIdString)) {
return BadValue;
}
if (busIdString)
busIdStringLength = strlen(busIdString);
rep = (xXF86DRIOpenConnectionReply) {
.type = X_Reply,
.sequenceNumber = client->sequence,
.length = bytes_to_int32(SIZEOF(xXF86DRIOpenConnectionReply) -
SIZEOF(xGenericReply) +
pad_to_int32(busIdStringLength)),
.busIdStringLength = busIdStringLength,
.hSAREALow = (CARD32) (hSAREA & 0xffffffff),
#if defined(LONG64) && !defined(__linux__)
.hSAREAHigh = (CARD32) (hSAREA >> 32),
#else
.hSAREAHigh = 0
#endif
};
WriteToClient(client, sizeof(xXF86DRIOpenConnectionReply), &rep);
if (busIdStringLength)
WriteToClient(client, busIdStringLength, busIdString);
return Success;
}
|
ProcXF86DRIOpenConnection(register ClientPtr client)
{
xXF86DRIOpenConnectionReply rep;
drm_handle_t hSAREA;
char *busIdString;
CARD32 busIdStringLength = 0;
REQUEST(xXF86DRIOpenConnectionReq);
REQUEST_SIZE_MATCH(xXF86DRIOpenConnectionReq);
if (stuff->screen >= screenInfo.numScreens) {
client->errorValue = stuff->screen;
return BadValue;
}
if (!DRIOpenConnection(screenInfo.screens[stuff->screen],
&hSAREA, &busIdString)) {
return BadValue;
}
if (busIdString)
busIdStringLength = strlen(busIdString);
rep = (xXF86DRIOpenConnectionReply) {
.type = X_Reply,
.sequenceNumber = client->sequence,
.length = bytes_to_int32(SIZEOF(xXF86DRIOpenConnectionReply) -
SIZEOF(xGenericReply) +
pad_to_int32(busIdStringLength)),
.busIdStringLength = busIdStringLength,
.hSAREALow = (CARD32) (hSAREA & 0xffffffff),
#if defined(LONG64) && !defined(__linux__)
.hSAREAHigh = (CARD32) (hSAREA >> 32),
#else
.hSAREAHigh = 0
#endif
};
WriteToClient(client, sizeof(xXF86DRIOpenConnectionReply), &rep);
if (busIdStringLength)
WriteToClient(client, busIdStringLength, busIdString);
return Success;
}
|
C
|
xserver
| 0 |
CVE-2017-12187
|
https://www.cvedetails.com/cve/CVE-2017-12187/
|
CWE-20
|
https://cgit.freedesktop.org/xorg/xserver/commit/?id=cad5a1050b7184d828aef9c1dd151c3ab649d37e
|
cad5a1050b7184d828aef9c1dd151c3ab649d37e
| null |
matchAdaptor(ScreenPtr pScreen, XvAdaptorPtr refAdapt, Bool isOverlay)
{
int i;
XvScreenPtr xvsp =
dixLookupPrivate(&pScreen->devPrivates, XvGetScreenKey());
/* Do not try to go on if xv is not supported on this screen */
if (xvsp == NULL)
return NULL;
/* if the adaptor has the same name it's a perfect match */
for (i = 0; i < xvsp->nAdaptors; i++) {
XvAdaptorPtr pAdapt = xvsp->pAdaptors + i;
if (!strcmp(refAdapt->name, pAdapt->name))
return pAdapt;
}
/* otherwise we only look for XvImage adaptors */
if (!isImageAdaptor(refAdapt))
return NULL;
/* prefer overlay/overlay non-overlay/non-overlay pairing */
for (i = 0; i < xvsp->nAdaptors; i++) {
XvAdaptorPtr pAdapt = xvsp->pAdaptors + i;
if (isImageAdaptor(pAdapt) && isOverlay == hasOverlay(pAdapt))
return pAdapt;
}
/* but we'll take any XvImage pairing if we can get it */
for (i = 0; i < xvsp->nAdaptors; i++) {
XvAdaptorPtr pAdapt = xvsp->pAdaptors + i;
if (isImageAdaptor(pAdapt))
return pAdapt;
}
return NULL;
}
|
matchAdaptor(ScreenPtr pScreen, XvAdaptorPtr refAdapt, Bool isOverlay)
{
int i;
XvScreenPtr xvsp =
dixLookupPrivate(&pScreen->devPrivates, XvGetScreenKey());
/* Do not try to go on if xv is not supported on this screen */
if (xvsp == NULL)
return NULL;
/* if the adaptor has the same name it's a perfect match */
for (i = 0; i < xvsp->nAdaptors; i++) {
XvAdaptorPtr pAdapt = xvsp->pAdaptors + i;
if (!strcmp(refAdapt->name, pAdapt->name))
return pAdapt;
}
/* otherwise we only look for XvImage adaptors */
if (!isImageAdaptor(refAdapt))
return NULL;
/* prefer overlay/overlay non-overlay/non-overlay pairing */
for (i = 0; i < xvsp->nAdaptors; i++) {
XvAdaptorPtr pAdapt = xvsp->pAdaptors + i;
if (isImageAdaptor(pAdapt) && isOverlay == hasOverlay(pAdapt))
return pAdapt;
}
/* but we'll take any XvImage pairing if we can get it */
for (i = 0; i < xvsp->nAdaptors; i++) {
XvAdaptorPtr pAdapt = xvsp->pAdaptors + i;
if (isImageAdaptor(pAdapt))
return pAdapt;
}
return NULL;
}
|
C
|
xserver
| 0 |
CVE-2013-3076
|
https://www.cvedetails.com/cve/CVE-2013-3076/
|
CWE-200
|
https://github.com/torvalds/linux/commit/72a763d805a48ac8c0bf48fdb510e84c12de51fe
|
72a763d805a48ac8c0bf48fdb510e84c12de51fe
|
crypto: algif - suppress sending source address information in recvmsg
The current code does not set the msg_namelen member to 0 and therefore
makes net/socket.c leak the local sockaddr_storage variable to userland
-- 128 bytes of kernel stack memory. Fix that.
Cc: <stable@vger.kernel.org> # 2.6.38
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
|
static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
{
long timeout;
DEFINE_WAIT(wait);
int err = -ERESTARTSYS;
if (flags & MSG_DONTWAIT)
return -EAGAIN;
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
for (;;) {
if (signal_pending(current))
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
timeout = MAX_SCHEDULE_TIMEOUT;
if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
err = 0;
break;
}
}
finish_wait(sk_sleep(sk), &wait);
return err;
}
|
static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
{
long timeout;
DEFINE_WAIT(wait);
int err = -ERESTARTSYS;
if (flags & MSG_DONTWAIT)
return -EAGAIN;
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
for (;;) {
if (signal_pending(current))
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
timeout = MAX_SCHEDULE_TIMEOUT;
if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
err = 0;
break;
}
}
finish_wait(sk_sleep(sk), &wait);
return err;
}
|
C
|
linux
| 0 |
CVE-2016-1907
|
https://www.cvedetails.com/cve/CVE-2016-1907/
|
CWE-119
|
https://anongit.mindrot.org/openssh.git/commit/?id=2fecfd486bdba9f51b3a789277bb0733ca36e1c0
|
2fecfd486bdba9f51b3a789277bb0733ca36e1c0
| null |
kex_from_blob(struct sshbuf *m, struct kex **kexp)
{
struct kex *kex;
int r;
if ((kex = calloc(1, sizeof(struct kex))) == NULL ||
(kex->my = sshbuf_new()) == NULL ||
(kex->peer = sshbuf_new()) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
if ((r = sshbuf_get_string(m, &kex->session_id, &kex->session_id_len)) != 0 ||
(r = sshbuf_get_u32(m, &kex->we_need)) != 0 ||
(r = sshbuf_get_u32(m, (u_int *)&kex->hostkey_type)) != 0 ||
(r = sshbuf_get_u32(m, &kex->kex_type)) != 0 ||
(r = sshbuf_get_stringb(m, kex->my)) != 0 ||
(r = sshbuf_get_stringb(m, kex->peer)) != 0 ||
(r = sshbuf_get_u32(m, &kex->flags)) != 0 ||
(r = sshbuf_get_cstring(m, &kex->client_version_string, NULL)) != 0 ||
(r = sshbuf_get_cstring(m, &kex->server_version_string, NULL)) != 0)
goto out;
kex->server = 1;
kex->done = 1;
r = 0;
out:
if (r != 0 || kexp == NULL) {
if (kex != NULL) {
if (kex->my != NULL)
sshbuf_free(kex->my);
if (kex->peer != NULL)
sshbuf_free(kex->peer);
free(kex);
}
if (kexp != NULL)
*kexp = NULL;
} else {
*kexp = kex;
}
return r;
}
|
kex_from_blob(struct sshbuf *m, struct kex **kexp)
{
struct kex *kex;
int r;
if ((kex = calloc(1, sizeof(struct kex))) == NULL ||
(kex->my = sshbuf_new()) == NULL ||
(kex->peer = sshbuf_new()) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
if ((r = sshbuf_get_string(m, &kex->session_id, &kex->session_id_len)) != 0 ||
(r = sshbuf_get_u32(m, &kex->we_need)) != 0 ||
(r = sshbuf_get_u32(m, (u_int *)&kex->hostkey_type)) != 0 ||
(r = sshbuf_get_u32(m, &kex->kex_type)) != 0 ||
(r = sshbuf_get_stringb(m, kex->my)) != 0 ||
(r = sshbuf_get_stringb(m, kex->peer)) != 0 ||
(r = sshbuf_get_u32(m, &kex->flags)) != 0 ||
(r = sshbuf_get_cstring(m, &kex->client_version_string, NULL)) != 0 ||
(r = sshbuf_get_cstring(m, &kex->server_version_string, NULL)) != 0)
goto out;
kex->server = 1;
kex->done = 1;
r = 0;
out:
if (r != 0 || kexp == NULL) {
if (kex != NULL) {
if (kex->my != NULL)
sshbuf_free(kex->my);
if (kex->peer != NULL)
sshbuf_free(kex->peer);
free(kex);
}
if (kexp != NULL)
*kexp = NULL;
} else {
*kexp = kex;
}
return r;
}
|
C
|
mindrot
| 0 |
CVE-2013-1774
|
https://www.cvedetails.com/cve/CVE-2013-1774/
|
CWE-264
|
https://github.com/torvalds/linux/commit/1ee0a224bc9aad1de496c795f96bc6ba2c394811
|
1ee0a224bc9aad1de496c795f96bc6ba2c394811
|
USB: io_ti: Fix NULL dereference in chase_port()
The tty is NULL when the port is hanging up.
chase_port() needs to check for this.
This patch is intended for stable series.
The behavior was observed and tested in Linux 3.2 and 3.7.1.
Johan Hovold submitted a more elaborate patch for the mainline kernel.
[ 56.277883] usb 1-1: edge_bulk_in_callback - nonzero read bulk status received: -84
[ 56.278811] usb 1-1: USB disconnect, device number 3
[ 56.278856] usb 1-1: edge_bulk_in_callback - stopping read!
[ 56.279562] BUG: unable to handle kernel NULL pointer dereference at 00000000000001c8
[ 56.280536] IP: [<ffffffff8144e62a>] _raw_spin_lock_irqsave+0x19/0x35
[ 56.281212] PGD 1dc1b067 PUD 1e0f7067 PMD 0
[ 56.282085] Oops: 0002 [#1] SMP
[ 56.282744] Modules linked in:
[ 56.283512] CPU 1
[ 56.283512] Pid: 25, comm: khubd Not tainted 3.7.1 #1 innotek GmbH VirtualBox/VirtualBox
[ 56.283512] RIP: 0010:[<ffffffff8144e62a>] [<ffffffff8144e62a>] _raw_spin_lock_irqsave+0x19/0x35
[ 56.283512] RSP: 0018:ffff88001fa99ab0 EFLAGS: 00010046
[ 56.283512] RAX: 0000000000000046 RBX: 00000000000001c8 RCX: 0000000000640064
[ 56.283512] RDX: 0000000000010000 RSI: ffff88001fa99b20 RDI: 00000000000001c8
[ 56.283512] RBP: ffff88001fa99b20 R08: 0000000000000000 R09: 0000000000000000
[ 56.283512] R10: 0000000000000000 R11: ffffffff812fcb4c R12: ffff88001ddf53c0
[ 56.283512] R13: 0000000000000000 R14: 00000000000001c8 R15: ffff88001e19b9f4
[ 56.283512] FS: 0000000000000000(0000) GS:ffff88001fd00000(0000) knlGS:0000000000000000
[ 56.283512] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[ 56.283512] CR2: 00000000000001c8 CR3: 000000001dc51000 CR4: 00000000000006e0
[ 56.283512] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 56.283512] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
[ 56.283512] Process khubd (pid: 25, threadinfo ffff88001fa98000, task ffff88001fa94f80)
[ 56.283512] Stack:
[ 56.283512] 0000000000000046 00000000000001c8 ffffffff810578ec ffffffff812fcb4c
[ 56.283512] ffff88001e19b980 0000000000002710 ffffffff812ffe81 0000000000000001
[ 56.283512] ffff88001fa94f80 0000000000000202 ffffffff00000001 0000000000000296
[ 56.283512] Call Trace:
[ 56.283512] [<ffffffff810578ec>] ? add_wait_queue+0x12/0x3c
[ 56.283512] [<ffffffff812fcb4c>] ? usb_serial_port_work+0x28/0x28
[ 56.283512] [<ffffffff812ffe81>] ? chase_port+0x84/0x2d6
[ 56.283512] [<ffffffff81063f27>] ? try_to_wake_up+0x199/0x199
[ 56.283512] [<ffffffff81263a5c>] ? tty_ldisc_hangup+0x222/0x298
[ 56.283512] [<ffffffff81300171>] ? edge_close+0x64/0x129
[ 56.283512] [<ffffffff810612f7>] ? __wake_up+0x35/0x46
[ 56.283512] [<ffffffff8106135b>] ? should_resched+0x5/0x23
[ 56.283512] [<ffffffff81264916>] ? tty_port_shutdown+0x39/0x44
[ 56.283512] [<ffffffff812fcb4c>] ? usb_serial_port_work+0x28/0x28
[ 56.283512] [<ffffffff8125d38c>] ? __tty_hangup+0x307/0x351
[ 56.283512] [<ffffffff812e6ddc>] ? usb_hcd_flush_endpoint+0xde/0xed
[ 56.283512] [<ffffffff8144e625>] ? _raw_spin_lock_irqsave+0x14/0x35
[ 56.283512] [<ffffffff812fd361>] ? usb_serial_disconnect+0x57/0xc2
[ 56.283512] [<ffffffff812ea99b>] ? usb_unbind_interface+0x5c/0x131
[ 56.283512] [<ffffffff8128d738>] ? __device_release_driver+0x7f/0xd5
[ 56.283512] [<ffffffff8128d9cd>] ? device_release_driver+0x1a/0x25
[ 56.283512] [<ffffffff8128d393>] ? bus_remove_device+0xd2/0xe7
[ 56.283512] [<ffffffff8128b7a3>] ? device_del+0x119/0x167
[ 56.283512] [<ffffffff812e8d9d>] ? usb_disable_device+0x6a/0x180
[ 56.283512] [<ffffffff812e2ae0>] ? usb_disconnect+0x81/0xe6
[ 56.283512] [<ffffffff812e4435>] ? hub_thread+0x577/0xe82
[ 56.283512] [<ffffffff8144daa7>] ? __schedule+0x490/0x4be
[ 56.283512] [<ffffffff8105798f>] ? abort_exclusive_wait+0x79/0x79
[ 56.283512] [<ffffffff812e3ebe>] ? usb_remote_wakeup+0x2f/0x2f
[ 56.283512] [<ffffffff812e3ebe>] ? usb_remote_wakeup+0x2f/0x2f
[ 56.283512] [<ffffffff810570b4>] ? kthread+0x81/0x89
[ 56.283512] [<ffffffff81057033>] ? __kthread_parkme+0x5c/0x5c
[ 56.283512] [<ffffffff8145387c>] ? ret_from_fork+0x7c/0xb0
[ 56.283512] [<ffffffff81057033>] ? __kthread_parkme+0x5c/0x5c
[ 56.283512] Code: 8b 7c 24 08 e8 17 0b c3 ff 48 8b 04 24 48 83 c4 10 c3 53 48 89 fb 41 50 e8 e0 0a c3 ff 48 89 04 24 e8 e7 0a c3 ff ba 00 00 01 00
<f0> 0f c1 13 48 8b 04 24 89 d1 c1 ea 10 66 39 d1 74 07 f3 90 66
[ 56.283512] RIP [<ffffffff8144e62a>] _raw_spin_lock_irqsave+0x19/0x35
[ 56.283512] RSP <ffff88001fa99ab0>
[ 56.283512] CR2: 00000000000001c8
[ 56.283512] ---[ end trace 49714df27e1679ce ]---
Signed-off-by: Wolfgang Frisch <wfpub@roembden.net>
Cc: Johan Hovold <jhovold@gmail.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
static void edge_interrupt_callback(struct urb *urb)
{
struct edgeport_serial *edge_serial = urb->context;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
struct device *dev;
unsigned char *data = urb->transfer_buffer;
int length = urb->actual_length;
int port_number;
int function;
int retval;
__u8 lsr;
__u8 msr;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero urb status received: "
"%d\n", __func__, status);
goto exit;
}
if (!length) {
dev_dbg(&urb->dev->dev, "%s - no data in urb\n", __func__);
goto exit;
}
dev = &edge_serial->serial->dev->dev;
usb_serial_debug_data(dev, __func__, length, data);
if (length != 2) {
dev_dbg(dev, "%s - expecting packet of size 2, got %d\n", __func__, length);
goto exit;
}
port_number = TIUMP_GET_PORT_FROM_CODE(data[0]);
function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
port_number, function, data[1]);
port = edge_serial->serial->port[port_number];
edge_port = usb_get_serial_port_data(port);
if (!edge_port) {
dev_dbg(dev, "%s - edge_port not found\n", __func__);
return;
}
switch (function) {
case TIUMP_INTERRUPT_CODE_LSR:
lsr = map_line_status(data[1]);
if (lsr & UMP_UART_LSR_DATA_MASK) {
/* Save the LSR event for bulk read
completion routine */
dev_dbg(dev, "%s - LSR Event Port %u LSR Status = %02x\n",
__func__, port_number, lsr);
edge_port->lsr_event = 1;
edge_port->lsr_mask = lsr;
} else {
dev_dbg(dev, "%s - ===== Port %d LSR Status = %02x ======\n",
__func__, port_number, lsr);
handle_new_lsr(edge_port, 0, lsr, 0);
}
break;
case TIUMP_INTERRUPT_CODE_MSR: /* MSR */
/* Copy MSR from UMP */
msr = data[1];
dev_dbg(dev, "%s - ===== Port %u MSR Status = %02x ======\n",
__func__, port_number, msr);
handle_new_msr(edge_port, msr);
break;
default:
dev_err(&urb->dev->dev,
"%s - Unknown Interrupt code from UMP %x\n",
__func__, data[1]);
break;
}
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
|
static void edge_interrupt_callback(struct urb *urb)
{
struct edgeport_serial *edge_serial = urb->context;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
struct device *dev;
unsigned char *data = urb->transfer_buffer;
int length = urb->actual_length;
int port_number;
int function;
int retval;
__u8 lsr;
__u8 msr;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero urb status received: "
"%d\n", __func__, status);
goto exit;
}
if (!length) {
dev_dbg(&urb->dev->dev, "%s - no data in urb\n", __func__);
goto exit;
}
dev = &edge_serial->serial->dev->dev;
usb_serial_debug_data(dev, __func__, length, data);
if (length != 2) {
dev_dbg(dev, "%s - expecting packet of size 2, got %d\n", __func__, length);
goto exit;
}
port_number = TIUMP_GET_PORT_FROM_CODE(data[0]);
function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
port_number, function, data[1]);
port = edge_serial->serial->port[port_number];
edge_port = usb_get_serial_port_data(port);
if (!edge_port) {
dev_dbg(dev, "%s - edge_port not found\n", __func__);
return;
}
switch (function) {
case TIUMP_INTERRUPT_CODE_LSR:
lsr = map_line_status(data[1]);
if (lsr & UMP_UART_LSR_DATA_MASK) {
/* Save the LSR event for bulk read
completion routine */
dev_dbg(dev, "%s - LSR Event Port %u LSR Status = %02x\n",
__func__, port_number, lsr);
edge_port->lsr_event = 1;
edge_port->lsr_mask = lsr;
} else {
dev_dbg(dev, "%s - ===== Port %d LSR Status = %02x ======\n",
__func__, port_number, lsr);
handle_new_lsr(edge_port, 0, lsr, 0);
}
break;
case TIUMP_INTERRUPT_CODE_MSR: /* MSR */
/* Copy MSR from UMP */
msr = data[1];
dev_dbg(dev, "%s - ===== Port %u MSR Status = %02x ======\n",
__func__, port_number, msr);
handle_new_msr(edge_port, msr);
break;
default:
dev_err(&urb->dev->dev,
"%s - Unknown Interrupt code from UMP %x\n",
__func__, data[1]);
break;
}
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
|
C
|
linux
| 0 |
CVE-2009-3605
|
https://www.cvedetails.com/cve/CVE-2009-3605/
|
CWE-189
|
https://cgit.freedesktop.org/poppler/poppler/commit/?id=7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
|
7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
| null |
void CairoOutputDev::eoFill(GfxState *state) {
doPath (cairo, state, state->getPath());
cairo_set_fill_rule (cairo, CAIRO_FILL_RULE_EVEN_ODD);
cairo_set_source (cairo, fill_pattern);
LOG(printf ("fill-eo\n"));
cairo_fill (cairo);
if (cairo_shape) {
cairo_set_fill_rule (cairo_shape, CAIRO_FILL_RULE_EVEN_ODD);
doPath (cairo_shape, state, state->getPath());
cairo_fill (cairo_shape);
}
}
|
void CairoOutputDev::eoFill(GfxState *state) {
doPath (cairo, state, state->getPath());
cairo_set_fill_rule (cairo, CAIRO_FILL_RULE_EVEN_ODD);
cairo_set_source (cairo, fill_pattern);
LOG(printf ("fill-eo\n"));
cairo_fill (cairo);
if (cairo_shape) {
cairo_set_fill_rule (cairo_shape, CAIRO_FILL_RULE_EVEN_ODD);
doPath (cairo_shape, state, state->getPath());
cairo_fill (cairo_shape);
}
}
|
CPP
|
poppler
| 0 |
CVE-2018-13093
|
https://www.cvedetails.com/cve/CVE-2018-13093/
|
CWE-476
|
https://github.com/torvalds/linux/commit/afca6c5b2595fc44383919fba740c194b0b76aff
|
afca6c5b2595fc44383919fba740c194b0b76aff
|
xfs: validate cached inodes are free when allocated
A recent fuzzed filesystem image cached random dcache corruption
when the reproducer was run. This often showed up as panics in
lookup_slow() on a null inode->i_ops pointer when doing pathwalks.
BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
....
Call Trace:
lookup_slow+0x44/0x60
walk_component+0x3dd/0x9f0
link_path_walk+0x4a7/0x830
path_lookupat+0xc1/0x470
filename_lookup+0x129/0x270
user_path_at_empty+0x36/0x40
path_listxattr+0x98/0x110
SyS_listxattr+0x13/0x20
do_syscall_64+0xf5/0x280
entry_SYSCALL_64_after_hwframe+0x42/0xb7
but had many different failure modes including deadlocks trying to
lock the inode that was just allocated or KASAN reports of
use-after-free violations.
The cause of the problem was a corrupt INOBT on a v4 fs where the
root inode was marked as free in the inobt record. Hence when we
allocated an inode, it chose the root inode to allocate, found it in
the cache and re-initialised it.
We recently fixed a similar inode allocation issue caused by inobt
record corruption problem in xfs_iget_cache_miss() in commit
ee457001ed6c ("xfs: catch inode allocation state mismatch
corruption"). This change adds similar checks to the cache-hit path
to catch it, and turns the reproducer into a corruption shutdown
situation.
Reported-by: Wen Xu <wen.xu@gatech.edu>
Signed-Off-By: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
[darrick: fix typos in comment]
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
|
xfs_iflag_for_tag(
int tag)
{
switch (tag) {
case XFS_ICI_EOFBLOCKS_TAG:
return XFS_IEOFBLOCKS;
case XFS_ICI_COWBLOCKS_TAG:
return XFS_ICOWBLOCKS;
default:
ASSERT(0);
return 0;
}
}
|
xfs_iflag_for_tag(
int tag)
{
switch (tag) {
case XFS_ICI_EOFBLOCKS_TAG:
return XFS_IEOFBLOCKS;
case XFS_ICI_COWBLOCKS_TAG:
return XFS_ICOWBLOCKS;
default:
ASSERT(0);
return 0;
}
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/b9e2ecab97a8a7f3cce06951ab92a3eaef559206
|
b9e2ecab97a8a7f3cce06951ab92a3eaef559206
|
Do not discount a MANUAL_SUBFRAME load just because it involved
some redirects.
R=brettw
BUG=21353
TEST=none
Review URL: http://codereview.chromium.org/246073
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@27887 0039d316-1c4b-4281-b951-d872f2087c98
|
void NavigationController::FinishRestore(int selected_index) {
DCHECK(selected_index >= 0 && selected_index < entry_count());
ConfigureEntriesForRestore(&entries_);
set_max_restored_page_id(entry_count());
last_committed_entry_index_ = selected_index;
}
|
void NavigationController::FinishRestore(int selected_index) {
DCHECK(selected_index >= 0 && selected_index < entry_count());
ConfigureEntriesForRestore(&entries_);
set_max_restored_page_id(entry_count());
last_committed_entry_index_ = selected_index;
}
|
C
|
Chrome
| 0 |
CVE-2017-7500
|
https://www.cvedetails.com/cve/CVE-2017-7500/
|
CWE-59
|
https://github.com/rpm-software-management/rpm/commit/c815822c8bdb138066ff58c624ae83e3a12ebfa9
|
c815822c8bdb138066ff58c624ae83e3a12ebfa9
|
Make verification match the new restricted directory symlink behavior
Only follow directory symlinks owned by target directory owner or root
during verification to match the behavior of fsmVerify() in the new
CVE-2017-7500 world order.
The code is klunkier than it should and the logic should use common code
with fsmVerify() instead of duplicating it here, but that needs more
changes than is comfortable to backport so starting with this.
Also worth noting that the previous "follow the link" logic from
commit 3ccd774255b8215733e0bdfdf5a683da9dd10923 was not quite right,
it'd fail with RPMVERIFY_LSTATFAIL on a broken symlink when it should've
ran verification on the symlink itself. This behavior is fixed here too.
Finally, once again fakechroot gets in the way and forces the related
verify testcase to be changed to be able to create a valid link. Reuse
the replacement testcase for the purpose and add another case for
verifying an invalid link.
|
static int rpmVerifyScript(rpmts ts, Header h)
{
int rc = 0;
if (headerIsEntry(h, RPMTAG_VERIFYSCRIPT)) {
/* fake up a erasure transaction element */
rpmte p = rpmteNew(ts, h, TR_REMOVED, NULL, NULL);
if (p != NULL) {
rpmteSetHeader(p, h);
rc = (rpmpsmRun(ts, p, PKG_VERIFY) != RPMRC_OK);
/* clean up our fake transaction bits */
rpmteFree(p);
} else {
rc = RPMRC_FAIL;
}
}
return rc;
}
|
static int rpmVerifyScript(rpmts ts, Header h)
{
int rc = 0;
if (headerIsEntry(h, RPMTAG_VERIFYSCRIPT)) {
/* fake up a erasure transaction element */
rpmte p = rpmteNew(ts, h, TR_REMOVED, NULL, NULL);
if (p != NULL) {
rpmteSetHeader(p, h);
rc = (rpmpsmRun(ts, p, PKG_VERIFY) != RPMRC_OK);
/* clean up our fake transaction bits */
rpmteFree(p);
} else {
rc = RPMRC_FAIL;
}
}
return rc;
}
|
C
|
rpm
| 0 |
CVE-2016-1683
|
https://www.cvedetails.com/cve/CVE-2016-1683/
|
CWE-119
|
https://github.com/chromium/chromium/commit/96dbafe288dbe2f0cc45fa3c39daf6d0c37acbab
|
96dbafe288dbe2f0cc45fa3c39daf6d0c37acbab
|
Roll libxslt to 891681e3e948f31732229f53cb6db7215f740fc7
BUG=583156,583171
Review URL: https://codereview.chromium.org/1853083002
Cr-Commit-Position: refs/heads/master@{#385338}
|
xsltCheckStackElem(xsltTransformContextPtr ctxt, const xmlChar *name,
const xmlChar *nameURI) {
xsltStackElemPtr cur;
if ((ctxt == NULL) || (name == NULL))
return(-1);
cur = xsltStackLookup(ctxt, name, nameURI);
if (cur == NULL)
return(0);
if (cur->comp != NULL) {
if (cur->comp->type == XSLT_FUNC_WITHPARAM)
return(3);
else if (cur->comp->type == XSLT_FUNC_PARAM)
return(2);
}
return(1);
}
|
xsltCheckStackElem(xsltTransformContextPtr ctxt, const xmlChar *name,
const xmlChar *nameURI) {
xsltStackElemPtr cur;
if ((ctxt == NULL) || (name == NULL))
return(-1);
cur = xsltStackLookup(ctxt, name, nameURI);
if (cur == NULL)
return(0);
if (cur->comp != NULL) {
if (cur->comp->type == XSLT_FUNC_WITHPARAM)
return(3);
else if (cur->comp->type == XSLT_FUNC_PARAM)
return(2);
}
return(1);
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/f85716d839636e0814d3309ce1d8f8a2cd1fb9a8
|
f85716d839636e0814d3309ce1d8f8a2cd1fb9a8
|
[LayoutNG] Take overflow into account when column-balancing.
I didn't check why one test started to pass (it's not a very interesting
test for overflow).
Bug: 829028
Change-Id: I2c2d5fe3ea8c6a87f00df47f81d4158936d8a1bd
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1742162
Reviewed-by: Ian Kilpatrick <ikilpatrick@chromium.org>
Commit-Queue: Morten Stenshorne <mstensho@chromium.org>
Cr-Commit-Position: refs/heads/master@{#685349}
|
scoped_refptr<const NGPhysicalBoxFragment> RunBlockLayoutAlgorithm(
Element* element) {
NGBlockNode container(ToLayoutBox(element->GetLayoutObject()));
NGConstraintSpace space = ConstructBlockLayoutTestConstraintSpace(
WritingMode::kHorizontalTb, TextDirection::kLtr,
LogicalSize(LayoutUnit(1000), kIndefiniteSize));
return NGBaseLayoutAlgorithmTest::RunBlockLayoutAlgorithm(container, space);
}
|
scoped_refptr<const NGPhysicalBoxFragment> RunBlockLayoutAlgorithm(
Element* element) {
NGBlockNode container(ToLayoutBox(element->GetLayoutObject()));
NGConstraintSpace space = ConstructBlockLayoutTestConstraintSpace(
WritingMode::kHorizontalTb, TextDirection::kLtr,
LogicalSize(LayoutUnit(1000), kIndefiniteSize));
return NGBaseLayoutAlgorithmTest::RunBlockLayoutAlgorithm(container, space);
}
|
C
|
Chrome
| 0 |
CVE-2011-2898
|
https://www.cvedetails.com/cve/CVE-2011-2898/
|
CWE-264
|
https://github.com/torvalds/linux/commit/13fcb7bd322164c67926ffe272846d4860196dc6
|
13fcb7bd322164c67926ffe272846d4860196dc6
|
af_packet: prevent information leak
In 2.6.27, commit 393e52e33c6c2 (packet: deliver VLAN TCI to userspace)
added a small information leak.
Add padding field and make sure its zeroed before copy to user.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static int __packet_get_status(struct packet_sock *po, void *frame)
{
union {
struct tpacket_hdr *h1;
struct tpacket2_hdr *h2;
void *raw;
} h;
smp_rmb();
h.raw = frame;
switch (po->tp_version) {
case TPACKET_V1:
flush_dcache_page(pgv_to_page(&h.h1->tp_status));
return h.h1->tp_status;
case TPACKET_V2:
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
return h.h2->tp_status;
default:
pr_err("TPACKET version not supported\n");
BUG();
return 0;
}
}
|
static int __packet_get_status(struct packet_sock *po, void *frame)
{
union {
struct tpacket_hdr *h1;
struct tpacket2_hdr *h2;
void *raw;
} h;
smp_rmb();
h.raw = frame;
switch (po->tp_version) {
case TPACKET_V1:
flush_dcache_page(pgv_to_page(&h.h1->tp_status));
return h.h1->tp_status;
case TPACKET_V2:
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
return h.h2->tp_status;
default:
pr_err("TPACKET version not supported\n");
BUG();
return 0;
}
}
|
C
|
linux
| 0 |
CVE-2017-18232
|
https://www.cvedetails.com/cve/CVE-2017-18232/
| null |
https://github.com/torvalds/linux/commit/0558f33c06bb910e2879e355192227a8e8f0219d
|
0558f33c06bb910e2879e355192227a8e8f0219d
|
scsi: libsas: direct call probe and destruct
In commit 87c8331fcf72 ("[SCSI] libsas: prevent domain rediscovery
competing with ata error handling") introduced disco mutex to prevent
rediscovery competing with ata error handling and put the whole
revalidation in the mutex. But the rphy add/remove needs to wait for the
error handling which also grabs the disco mutex. This may leads to dead
lock.So the probe and destruct event were introduce to do the rphy
add/remove asynchronously and out of the lock.
The asynchronously processed workers makes the whole discovery process
not atomic, the other events may interrupt the process. For example,
if a loss of signal event inserted before the probe event, the
sas_deform_port() is called and the port will be deleted.
And sas_port_delete() may run before the destruct event, but the
port-x:x is the top parent of end device or expander. This leads to
a kernel WARNING such as:
[ 82.042979] sysfs group 'power' not found for kobject 'phy-1:0:22'
[ 82.042983] ------------[ cut here ]------------
[ 82.042986] WARNING: CPU: 54 PID: 1714 at fs/sysfs/group.c:237
sysfs_remove_group+0x94/0xa0
[ 82.043059] Call trace:
[ 82.043082] [<ffff0000082e7624>] sysfs_remove_group+0x94/0xa0
[ 82.043085] [<ffff00000864e320>] dpm_sysfs_remove+0x60/0x70
[ 82.043086] [<ffff00000863ee10>] device_del+0x138/0x308
[ 82.043089] [<ffff00000869a2d0>] sas_phy_delete+0x38/0x60
[ 82.043091] [<ffff00000869a86c>] do_sas_phy_delete+0x6c/0x80
[ 82.043093] [<ffff00000863dc20>] device_for_each_child+0x58/0xa0
[ 82.043095] [<ffff000008696f80>] sas_remove_children+0x40/0x50
[ 82.043100] [<ffff00000869d1bc>] sas_destruct_devices+0x64/0xa0
[ 82.043102] [<ffff0000080e93bc>] process_one_work+0x1fc/0x4b0
[ 82.043104] [<ffff0000080e96c0>] worker_thread+0x50/0x490
[ 82.043105] [<ffff0000080f0364>] kthread+0xfc/0x128
[ 82.043107] [<ffff0000080836c0>] ret_from_fork+0x10/0x50
Make probe and destruct a direct call in the disco and revalidate function,
but put them outside the lock. The whole discovery or revalidate won't
be interrupted by other events. And the DISCE_PROBE and DISCE_DESTRUCT
event are deleted as a result of the direct call.
Introduce a new list to destruct the sas_port and put the port delete after
the destruct. This makes sure the right order of destroying the sysfs
kobject and fix the warning above.
In sas_ex_revalidate_domain() have a loop to find all broadcasted
device, and sometimes we have a chance to find the same expander twice.
Because the sas_port will be deleted at the end of the whole revalidate
process, sas_port with the same name cannot be added before this.
Otherwise the sysfs will complain of creating duplicate filename. Since
the LLDD will send broadcast for every device change, we can only
process one expander's revalidation.
[mkp: kbuild test robot warning]
Signed-off-by: Jason Yan <yanaijie@huawei.com>
CC: John Garry <john.garry@huawei.com>
CC: Johannes Thumshirn <jthumshirn@suse.de>
CC: Ewan Milne <emilne@redhat.com>
CC: Christoph Hellwig <hch@lst.de>
CC: Tomas Henzl <thenzl@redhat.com>
CC: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
|
static void sas_suspend_devices(struct work_struct *work)
{
struct asd_sas_phy *phy;
struct domain_device *dev;
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
struct Scsi_Host *shost = port->ha->core.shost;
struct sas_internal *si = to_sas_internal(shost->transportt);
clear_bit(DISCE_SUSPEND, &port->disc.pending);
sas_suspend_sata(port);
/* lldd is free to forget the domain_device across the
* suspension, we force the issue here to keep the reference
* counts aligned
*/
list_for_each_entry(dev, &port->dev_list, dev_list_node)
sas_notify_lldd_dev_gone(dev);
/* we are suspending, so we know events are disabled and
* phy_list is not being mutated
*/
list_for_each_entry(phy, &port->phy_list, port_phy_el) {
if (si->dft->lldd_port_formed)
si->dft->lldd_port_deformed(phy);
phy->suspended = 1;
port->suspended = 1;
}
}
|
static void sas_suspend_devices(struct work_struct *work)
{
struct asd_sas_phy *phy;
struct domain_device *dev;
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
struct Scsi_Host *shost = port->ha->core.shost;
struct sas_internal *si = to_sas_internal(shost->transportt);
clear_bit(DISCE_SUSPEND, &port->disc.pending);
sas_suspend_sata(port);
/* lldd is free to forget the domain_device across the
* suspension, we force the issue here to keep the reference
* counts aligned
*/
list_for_each_entry(dev, &port->dev_list, dev_list_node)
sas_notify_lldd_dev_gone(dev);
/* we are suspending, so we know events are disabled and
* phy_list is not being mutated
*/
list_for_each_entry(phy, &port->phy_list, port_phy_el) {
if (si->dft->lldd_port_formed)
si->dft->lldd_port_deformed(phy);
phy->suspended = 1;
port->suspended = 1;
}
}
|
C
|
linux
| 0 |
CVE-2019-11599
|
https://www.cvedetails.com/cve/CVE-2019-11599/
|
CWE-362
|
https://github.com/torvalds/linux/commit/04f5866e41fb70690e28397487d8bd8eea7d712a
|
04f5866e41fb70690e28397487d8bd8eea7d712a
|
coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/alpine.LSU.2.11.1707191716030.2055@eggly.anvils
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/20190325224949.11068-1-aarcange@redhat.com
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reported-by: Jann Horn <jannh@google.com>
Suggested-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Jann Horn <jannh@google.com>
Acked-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
u64 frame = 0, flags = 0;
struct page *page = NULL;
if (pte_present(pte)) {
if (pm->show_pfn)
frame = pte_pfn(pte);
flags |= PM_PRESENT;
page = _vm_normal_page(vma, addr, pte, true);
if (pte_soft_dirty(pte))
flags |= PM_SOFT_DIRTY;
} else if (is_swap_pte(pte)) {
swp_entry_t entry;
if (pte_swp_soft_dirty(pte))
flags |= PM_SOFT_DIRTY;
entry = pte_to_swp_entry(pte);
if (pm->show_pfn)
frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags |= PM_SWAP;
if (is_migration_entry(entry))
page = migration_entry_to_page(entry);
if (is_device_private_entry(entry))
page = device_private_entry_to_page(entry);
}
if (page && !PageAnon(page))
flags |= PM_FILE;
if (page && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
if (vma->vm_flags & VM_SOFTDIRTY)
flags |= PM_SOFT_DIRTY;
return make_pme(frame, flags);
}
|
static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
u64 frame = 0, flags = 0;
struct page *page = NULL;
if (pte_present(pte)) {
if (pm->show_pfn)
frame = pte_pfn(pte);
flags |= PM_PRESENT;
page = _vm_normal_page(vma, addr, pte, true);
if (pte_soft_dirty(pte))
flags |= PM_SOFT_DIRTY;
} else if (is_swap_pte(pte)) {
swp_entry_t entry;
if (pte_swp_soft_dirty(pte))
flags |= PM_SOFT_DIRTY;
entry = pte_to_swp_entry(pte);
if (pm->show_pfn)
frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags |= PM_SWAP;
if (is_migration_entry(entry))
page = migration_entry_to_page(entry);
if (is_device_private_entry(entry))
page = device_private_entry_to_page(entry);
}
if (page && !PageAnon(page))
flags |= PM_FILE;
if (page && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
if (vma->vm_flags & VM_SOFTDIRTY)
flags |= PM_SOFT_DIRTY;
return make_pme(frame, flags);
}
|
C
|
linux
| 0 |
CVE-2016-10208
|
https://www.cvedetails.com/cve/CVE-2016-10208/
|
CWE-125
|
https://github.com/torvalds/linux/commit/3a4b77cd47bb837b8557595ec7425f281f2ca1fe
|
3a4b77cd47bb837b8557595ec7425f281f2ca1fe
|
ext4: validate s_first_meta_bg at mount time
Ralf Spenneberg reported that he hit a kernel crash when mounting a
modified ext4 image. And it turns out that kernel crashed when
calculating fs overhead (ext4_calculate_overhead()), this is because
the image has very large s_first_meta_bg (debug code shows it's
842150400), and ext4 overruns the memory in count_overhead() when
setting bitmap buffer, which is PAGE_SIZE.
ext4_calculate_overhead():
buf = get_zeroed_page(GFP_NOFS); <=== PAGE_SIZE buffer
blks = count_overhead(sb, i, buf);
count_overhead():
for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) { <=== j = 842150400
ext4_set_bit(EXT4_B2C(sbi, s++), buf); <=== buffer overrun
count++;
}
This can be reproduced easily for me by this script:
#!/bin/bash
rm -f fs.img
mkdir -p /mnt/ext4
fallocate -l 16M fs.img
mke2fs -t ext4 -O bigalloc,meta_bg,^resize_inode -F fs.img
debugfs -w -R "ssv first_meta_bg 842150400" fs.img
mount -o loop fs.img /mnt/ext4
Fix it by validating s_first_meta_bg first at mount time, and
refusing to mount if its value exceeds the largest possible meta_bg
number.
Reported-by: Ralf Spenneberg <ralf@os-t.de>
Signed-off-by: Eryu Guan <guaneryu@gmail.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
|
static int ext4_enable_quotas(struct super_block *sb)
{
int type, err = 0;
unsigned long qf_inums[EXT4_MAXQUOTAS] = {
le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
};
bool quota_mopt[EXT4_MAXQUOTAS] = {
test_opt(sb, USRQUOTA),
test_opt(sb, GRPQUOTA),
test_opt(sb, PRJQUOTA),
};
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
for (type = 0; type < EXT4_MAXQUOTAS; type++) {
if (qf_inums[type]) {
err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
DQUOT_USAGE_ENABLED |
(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
if (err) {
ext4_warning(sb,
"Failed to enable quota tracking "
"(type=%d, err=%d). Please run "
"e2fsck to fix.", type, err);
return err;
}
}
}
return 0;
}
|
static int ext4_enable_quotas(struct super_block *sb)
{
int type, err = 0;
unsigned long qf_inums[EXT4_MAXQUOTAS] = {
le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
};
bool quota_mopt[EXT4_MAXQUOTAS] = {
test_opt(sb, USRQUOTA),
test_opt(sb, GRPQUOTA),
test_opt(sb, PRJQUOTA),
};
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
for (type = 0; type < EXT4_MAXQUOTAS; type++) {
if (qf_inums[type]) {
err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
DQUOT_USAGE_ENABLED |
(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
if (err) {
ext4_warning(sb,
"Failed to enable quota tracking "
"(type=%d, err=%d). Please run "
"e2fsck to fix.", type, err);
return err;
}
}
}
return 0;
}
|
C
|
linux
| 0 |
CVE-2013-0886
|
https://www.cvedetails.com/cve/CVE-2013-0886/
| null |
https://github.com/chromium/chromium/commit/18d67244984a574ba2dd8779faabc0e3e34f4b76
|
18d67244984a574ba2dd8779faabc0e3e34f4b76
|
Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
TBR=sky@chromium.org
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
|
static void OnGrabNotify(GtkWidget* widget, gboolean was_grabbed,
RenderWidgetHostViewGtk* host_view) {
if (was_grabbed) {
if (host_view->was_imcontext_focused_before_grab_)
host_view->im_context_->OnFocusIn();
} else {
host_view->was_imcontext_focused_before_grab_ =
host_view->im_context_->is_focused();
if (host_view->was_imcontext_focused_before_grab_) {
gdk_window_set_cursor(gtk_widget_get_window(widget), NULL);
host_view->im_context_->OnFocusOut();
}
}
}
|
static void OnGrabNotify(GtkWidget* widget, gboolean was_grabbed,
RenderWidgetHostViewGtk* host_view) {
if (was_grabbed) {
if (host_view->was_imcontext_focused_before_grab_)
host_view->im_context_->OnFocusIn();
} else {
host_view->was_imcontext_focused_before_grab_ =
host_view->im_context_->is_focused();
if (host_view->was_imcontext_focused_before_grab_) {
gdk_window_set_cursor(gtk_widget_get_window(widget), NULL);
host_view->im_context_->OnFocusOut();
}
}
}
|
C
|
Chrome
| 0 |
CVE-2017-12858
|
https://www.cvedetails.com/cve/CVE-2017-12858/
|
CWE-415
|
https://github.com/nih-at/libzip/commit/2217022b7d1142738656d891e00b3d2d9179b796
|
2217022b7d1142738656d891e00b3d2d9179b796
|
Fix double free().
Found by Brian 'geeknik' Carpenter using AFL.
|
_zip_cdir_free(zip_cdir_t *cd)
{
zip_uint64_t i;
if (!cd)
return;
for (i=0; i<cd->nentry; i++)
_zip_entry_finalize(cd->entry+i);
free(cd->entry);
_zip_string_free(cd->comment);
free(cd);
}
|
_zip_cdir_free(zip_cdir_t *cd)
{
zip_uint64_t i;
if (!cd)
return;
for (i=0; i<cd->nentry; i++)
_zip_entry_finalize(cd->entry+i);
free(cd->entry);
_zip_string_free(cd->comment);
free(cd);
}
|
C
|
libzip
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.