CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2016-1615
|
https://www.cvedetails.com/cve/CVE-2016-1615/
|
CWE-254
|
https://github.com/chromium/chromium/commit/b399a05453d7b3e2dfdec67865fefe6953bcc59e
|
b399a05453d7b3e2dfdec67865fefe6953bcc59e
|
Allocate a FrameSinkId for RenderWidgetHostViewAura in mus+ash
RenderWidgetHostViewChildFrame expects its parent to have a valid
FrameSinkId. Make sure RenderWidgetHostViewAura has a FrameSinkId even
if DelegatedFrameHost is not used (in mus+ash).
BUG=706553
TBR=jam@chromium.org
Review-Url: https://codereview.chromium.org/2847253003
Cr-Commit-Position: refs/heads/master@{#468179}
|
void RenderWidgetHostViewAura::CreateDelegatedFrameHostClient() {
if (IsMus())
return;
if (!delegated_frame_host_client_) {
delegated_frame_host_client_ =
base::MakeUnique<DelegatedFrameHostClientAura>(this);
}
delegated_frame_host_ = base::MakeUnique<DelegatedFrameHost>(
frame_sink_id_, delegated_frame_host_client_.get());
if (renderer_compositor_frame_sink_) {
delegated_frame_host_->DidCreateNewRendererCompositorFrameSink(
renderer_compositor_frame_sink_);
}
UpdateNeedsBeginFramesInternal();
if (host_->delegate() && host_->delegate()->GetInputEventRouter()) {
host_->delegate()->GetInputEventRouter()->AddFrameSinkIdOwner(
GetFrameSinkId(), this);
}
}
|
void RenderWidgetHostViewAura::CreateDelegatedFrameHostClient() {
if (IsMus())
return;
cc::FrameSinkId frame_sink_id =
host_->AllocateFrameSinkId(is_guest_view_hack_);
if (!delegated_frame_host_client_) {
delegated_frame_host_client_ =
base::MakeUnique<DelegatedFrameHostClientAura>(this);
}
delegated_frame_host_ = base::MakeUnique<DelegatedFrameHost>(
frame_sink_id, delegated_frame_host_client_.get());
if (renderer_compositor_frame_sink_) {
delegated_frame_host_->DidCreateNewRendererCompositorFrameSink(
renderer_compositor_frame_sink_);
}
UpdateNeedsBeginFramesInternal();
if (host_->delegate() && host_->delegate()->GetInputEventRouter()) {
host_->delegate()->GetInputEventRouter()->AddFrameSinkIdOwner(
GetFrameSinkId(), this);
}
}
|
C
|
Chrome
| 1 |
CVE-2019-12111
|
https://www.cvedetails.com/cve/CVE-2019-12111/
|
CWE-476
|
https://github.com/miniupnp/miniupnp/commit/cb8a02af7a5677cf608e86d57ab04241cf34e24f
|
cb8a02af7a5677cf608e86d57ab04241cf34e24f
|
pcpserver.c: copyIPv6IfDifferent() check for NULL src argument
|
static int processPCPRequest(void * req, int req_size, pcp_info_t *pcp_msg_info)
{
int remainingSize;
/* start with PCP_SUCCESS as result code,
* if everything is OK value will be unchanged */
pcp_msg_info->result_code = PCP_SUCCESS;
remainingSize = req_size;
/* discard request that exceeds maximal length,
or that is shorter than PCP_MIN_LEN (=24)
or that is not the multiple of 4 */
if (req_size < 3)
return 0; /* ignore msg */
if (req_size < PCP_MIN_LEN) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 1; /* send response */
}
if ( (req_size > PCP_MAX_LEN) || ( (req_size & 3) != 0)) {
syslog(LOG_ERR, "PCP: Size of PCP packet(%d) is larger than %d bytes or "
"the size is not multiple of 4.\n", req_size, PCP_MAX_LEN);
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 1; /* send response */
}
/* first parse request header */
if (parseCommonRequestHeader(req, pcp_msg_info) ) {
return 1;
}
remainingSize -= PCP_COMMON_REQUEST_SIZE;
req += PCP_COMMON_REQUEST_SIZE;
if (pcp_msg_info->version == 1) {
/* legacy PCP version 1 support */
switch (pcp_msg_info->opcode) {
case PCP_OPCODE_MAP:
remainingSize -= PCP_MAP_V1_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printMAPOpcodeVersion1(req);
#endif /* DEBUG */
parsePCPMAP_version1(req, pcp_msg_info);
req += PCP_MAP_V1_SIZE;
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPMap(pcp_msg_info);
} else {
CreatePCPMap(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v1 MAP message.");
return pcp_msg_info->result_code;
}
break;
#ifdef PCP_PEER
case PCP_OPCODE_PEER:
remainingSize -= PCP_PEER_V1_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printPEEROpcodeVersion1(req);
#endif /* DEBUG */
parsePCPPEER_version1(req, pcp_msg_info);
req += PCP_PEER_V1_SIZE;
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPPeer(pcp_msg_info);
} else {
CreatePCPPeer(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v1 PEER message.");
return pcp_msg_info->result_code;
}
break;
#endif /* PCP_PEER */
default:
pcp_msg_info->result_code = PCP_ERR_UNSUPP_OPCODE;
break;
}
} else if (pcp_msg_info->version == 2) {
/* RFC 6887 PCP support
* http://tools.ietf.org/html/rfc6887 */
switch (pcp_msg_info->opcode) {
case PCP_OPCODE_ANNOUNCE:
/* should check PCP Client's IP Address in request */
/* see http://tools.ietf.org/html/rfc6887#section-14.1 */
break;
case PCP_OPCODE_MAP:
remainingSize -= PCP_MAP_V2_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printMAPOpcodeVersion2(req);
#endif /* DEBUG */
parsePCPMAP_version2(req, pcp_msg_info);
req += PCP_MAP_V2_SIZE;
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPMap(pcp_msg_info);
} else {
CreatePCPMap(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v2 MAP message.");
return pcp_msg_info->result_code;
}
break;
#ifdef PCP_PEER
case PCP_OPCODE_PEER:
remainingSize -= PCP_PEER_V2_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printPEEROpcodeVersion2(req);
#endif /* DEBUG */
parsePCPPEER_version2(req, pcp_msg_info);
req += PCP_PEER_V2_SIZE;
if (pcp_msg_info->result_code != 0) {
return pcp_msg_info->result_code;
}
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPPeer(pcp_msg_info);
} else {
CreatePCPPeer(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v2 PEER message.");
}
break;
#endif /* PCP_PEER */
#ifdef PCP_SADSCP
case PCP_OPCODE_SADSCP:
remainingSize -= PCP_SADSCP_REQ_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
remainingSize -= ((uint8_t *)req)[13]; /* app_name_length */
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printSADSCPOpcode(req);
#endif
parseSADSCP(req, pcp_msg_info);
req += PCP_SADSCP_REQ_SIZE;
if (pcp_msg_info->result_code != 0) {
return pcp_msg_info->result_code;
}
req += pcp_msg_info->app_name_len;
get_dscp_value(pcp_msg_info);
break;
#endif
default:
pcp_msg_info->result_code = PCP_ERR_UNSUPP_OPCODE;
break;
}
} else {
pcp_msg_info->result_code = PCP_ERR_UNSUPP_VERSION;
return pcp_msg_info->result_code;
}
return 1;
}
|
static int processPCPRequest(void * req, int req_size, pcp_info_t *pcp_msg_info)
{
int remainingSize;
/* start with PCP_SUCCESS as result code,
* if everything is OK value will be unchanged */
pcp_msg_info->result_code = PCP_SUCCESS;
remainingSize = req_size;
/* discard request that exceeds maximal length,
or that is shorter than PCP_MIN_LEN (=24)
or that is not the multiple of 4 */
if (req_size < 3)
return 0; /* ignore msg */
if (req_size < PCP_MIN_LEN) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 1; /* send response */
}
if ( (req_size > PCP_MAX_LEN) || ( (req_size & 3) != 0)) {
syslog(LOG_ERR, "PCP: Size of PCP packet(%d) is larger than %d bytes or "
"the size is not multiple of 4.\n", req_size, PCP_MAX_LEN);
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 1; /* send response */
}
/* first parse request header */
if (parseCommonRequestHeader(req, pcp_msg_info) ) {
return 1;
}
remainingSize -= PCP_COMMON_REQUEST_SIZE;
req += PCP_COMMON_REQUEST_SIZE;
if (pcp_msg_info->version == 1) {
/* legacy PCP version 1 support */
switch (pcp_msg_info->opcode) {
case PCP_OPCODE_MAP:
remainingSize -= PCP_MAP_V1_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printMAPOpcodeVersion1(req);
#endif /* DEBUG */
parsePCPMAP_version1(req, pcp_msg_info);
req += PCP_MAP_V1_SIZE;
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPMap(pcp_msg_info);
} else {
CreatePCPMap(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v1 MAP message.");
return pcp_msg_info->result_code;
}
break;
#ifdef PCP_PEER
case PCP_OPCODE_PEER:
remainingSize -= PCP_PEER_V1_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printPEEROpcodeVersion1(req);
#endif /* DEBUG */
parsePCPPEER_version1(req, pcp_msg_info);
req += PCP_PEER_V1_SIZE;
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPPeer(pcp_msg_info);
} else {
CreatePCPPeer(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v1 PEER message.");
return pcp_msg_info->result_code;
}
break;
#endif /* PCP_PEER */
default:
pcp_msg_info->result_code = PCP_ERR_UNSUPP_OPCODE;
break;
}
} else if (pcp_msg_info->version == 2) {
/* RFC 6887 PCP support
* http://tools.ietf.org/html/rfc6887 */
switch (pcp_msg_info->opcode) {
case PCP_OPCODE_ANNOUNCE:
/* should check PCP Client's IP Address in request */
/* see http://tools.ietf.org/html/rfc6887#section-14.1 */
break;
case PCP_OPCODE_MAP:
remainingSize -= PCP_MAP_V2_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printMAPOpcodeVersion2(req);
#endif /* DEBUG */
parsePCPMAP_version2(req, pcp_msg_info);
req += PCP_MAP_V2_SIZE;
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPMap(pcp_msg_info);
} else {
CreatePCPMap(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v2 MAP message.");
return pcp_msg_info->result_code;
}
break;
#ifdef PCP_PEER
case PCP_OPCODE_PEER:
remainingSize -= PCP_PEER_V2_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printPEEROpcodeVersion2(req);
#endif /* DEBUG */
parsePCPPEER_version2(req, pcp_msg_info);
req += PCP_PEER_V2_SIZE;
if (pcp_msg_info->result_code != 0) {
return pcp_msg_info->result_code;
}
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPPeer(pcp_msg_info);
} else {
CreatePCPPeer(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v2 PEER message.");
}
break;
#endif /* PCP_PEER */
#ifdef PCP_SADSCP
case PCP_OPCODE_SADSCP:
remainingSize -= PCP_SADSCP_REQ_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
remainingSize -= ((uint8_t *)req)[13]; /* app_name_length */
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printSADSCPOpcode(req);
#endif
parseSADSCP(req, pcp_msg_info);
req += PCP_SADSCP_REQ_SIZE;
if (pcp_msg_info->result_code != 0) {
return pcp_msg_info->result_code;
}
req += pcp_msg_info->app_name_len;
get_dscp_value(pcp_msg_info);
break;
#endif
default:
pcp_msg_info->result_code = PCP_ERR_UNSUPP_OPCODE;
break;
}
} else {
pcp_msg_info->result_code = PCP_ERR_UNSUPP_VERSION;
return pcp_msg_info->result_code;
}
return 1;
}
|
C
|
miniupnp
| 0 |
CVE-2013-0886
|
https://www.cvedetails.com/cve/CVE-2013-0886/
| null |
https://github.com/chromium/chromium/commit/18d67244984a574ba2dd8779faabc0e3e34f4b76
|
18d67244984a574ba2dd8779faabc0e3e34f4b76
|
Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
TBR=sky@chromium.org
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderWidgetHostViewGtk::SetTooltipText(const string16& tooltip_text) {
const int kMaxTooltipLength = 8 << 10;
const string16 clamped_tooltip =
ui::TruncateString(tooltip_text, kMaxTooltipLength);
if (clamped_tooltip.empty()) {
gtk_widget_set_has_tooltip(view_.get(), FALSE);
} else {
gtk_widget_set_tooltip_text(view_.get(),
UTF16ToUTF8(clamped_tooltip).c_str());
}
}
|
void RenderWidgetHostViewGtk::SetTooltipText(const string16& tooltip_text) {
const int kMaxTooltipLength = 8 << 10;
const string16 clamped_tooltip =
ui::TruncateString(tooltip_text, kMaxTooltipLength);
if (clamped_tooltip.empty()) {
gtk_widget_set_has_tooltip(view_.get(), FALSE);
} else {
gtk_widget_set_tooltip_text(view_.get(),
UTF16ToUTF8(clamped_tooltip).c_str());
}
}
|
C
|
Chrome
| 0 |
CVE-2015-6763
|
https://www.cvedetails.com/cve/CVE-2015-6763/
| null |
https://github.com/chromium/chromium/commit/f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
|
f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
|
MacViews: Enable secure text input for password Textfields.
In Cocoa the NSTextInputContext automatically enables secure text input
when activated and it's in the secure text entry mode.
RenderWidgetHostViewMac did the similar thing for ages following the
WebKit example.
views::Textfield needs to do the same thing in a fashion that's
sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions
are possible when the Textfield gets focus, activates the secure text
input mode and the RWHVM loses focus immediately afterwards and disables
the secure text input instead of leaving it in the enabled state.
BUG=818133,677220
Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b
Reviewed-on: https://chromium-review.googlesource.com/943064
Commit-Queue: Michail Pishchagin <mblsha@yandex-team.ru>
Reviewed-by: Pavel Feldman <pfeldman@chromium.org>
Reviewed-by: Avi Drissman <avi@chromium.org>
Reviewed-by: Peter Kasting <pkasting@chromium.org>
Cr-Commit-Position: refs/heads/master@{#542517}
|
bool InputType::IsTextField() const {
return false;
}
|
bool InputType::IsTextField() const {
return false;
}
|
C
|
Chrome
| 0 |
CVE-2017-9060
|
https://www.cvedetails.com/cve/CVE-2017-9060/
|
CWE-772
|
https://git.qemu.org/?p=qemu.git;a=commit;h=dd248ed7e204ee8a1873914e02b8b526e8f1b80d
|
dd248ed7e204ee8a1873914e02b8b526e8f1b80d
| null |
static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
{
g->virtio_config.events_read |= event_type;
virtio_notify_config(&g->parent_obj);
}
|
static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
{
g->virtio_config.events_read |= event_type;
virtio_notify_config(&g->parent_obj);
}
|
C
|
qemu
| 0 |
CVE-2013-0924
|
https://www.cvedetails.com/cve/CVE-2013-0924/
|
CWE-264
|
https://github.com/chromium/chromium/commit/e21bdfb9c758ac411012ad84f83d26d3f7dd69fb
|
e21bdfb9c758ac411012ad84f83d26d3f7dd69fb
|
Check prefs before allowing extension file access in the permissions API.
R=mpcomplete@chromium.org
BUG=169632
Review URL: https://chromiumcodereview.appspot.com/11884008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@176853 0039d316-1c4b-4281-b951-d872f2087c98
|
AlarmManager* TestExtensionSystem::alarm_manager() {
return alarm_manager_.get();
}
|
AlarmManager* TestExtensionSystem::alarm_manager() {
return alarm_manager_.get();
}
|
C
|
Chrome
| 0 |
CVE-2017-18200
|
https://www.cvedetails.com/cve/CVE-2017-18200/
|
CWE-20
|
https://github.com/torvalds/linux/commit/638164a2718f337ea224b747cf5977ef143166a4
|
638164a2718f337ea224b747cf5977ef143166a4
|
f2fs: fix potential panic during fstrim
As Ju Hyung Park reported:
"When 'fstrim' is called for manual trim, a BUG() can be triggered
randomly with this patch.
I'm seeing this issue on both x86 Desktop and arm64 Android phone.
On x86 Desktop, this was caused during Ubuntu boot-up. I have a
cronjob installed which calls 'fstrim -v /' during boot. On arm64
Android, this was caused during GC looping with 1ms gc_min_sleep_time
& gc_max_sleep_time."
Root cause of this issue is that f2fs_wait_discard_bios can only be
used by f2fs_put_super, because during put_super there must be no
other referrers, so it can ignore discard entry's reference count
when removing the entry, otherwise in other caller we will hit bug_on
in __remove_discard_cmd as there may be other issuer added reference
count in discard entry.
Thread A Thread B
- issue_discard_thread
- f2fs_ioc_fitrim
- f2fs_trim_fs
- f2fs_wait_discard_bios
- __issue_discard_cmd
- __submit_discard_cmd
- __wait_discard_cmd
- dc->ref++
- __wait_one_discard_bio
- __wait_discard_cmd
- __remove_discard_cmd
- f2fs_bug_on(sbi, dc->ref)
Fixes: 969d1b180d987c2be02de890d0fff0f66a0e80de
Reported-by: Ju Hyung Park <qkrwngud825@gmail.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
|
void f2fs_quota_off_umount(struct super_block *sb)
{
}
|
void f2fs_quota_off_umount(struct super_block *sb)
{
}
|
C
|
linux
| 0 |
CVE-2017-5009
|
https://www.cvedetails.com/cve/CVE-2017-5009/
|
CWE-119
|
https://github.com/chromium/chromium/commit/1c40f9042ae2d6ee7483d72998aabb5e73b2ff60
|
1c40f9042ae2d6ee7483d72998aabb5e73b2ff60
|
DevTools: send proper resource type in Network.RequestWillBeSent
This patch plumbs resoure type into the DispatchWillSendRequest
instrumenation. This allows us to report accurate type in
Network.RequestWillBeSent event, instead of "Other", that we report
today.
BUG=765501
R=dgozman
Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c
Reviewed-on: https://chromium-review.googlesource.com/667504
Reviewed-by: Pavel Feldman <pfeldman@chromium.org>
Reviewed-by: Dmitry Gozman <dgozman@chromium.org>
Commit-Queue: Andrey Lushnikov <lushnikov@chromium.org>
Cr-Commit-Position: refs/heads/master@{#507936}
|
WebCachePolicy FetchContext::ResourceRequestCachePolicy(
const ResourceRequest&,
Resource::Type,
FetchParameters::DeferOption defer) const {
return WebCachePolicy::kUseProtocolCachePolicy;
}
|
WebCachePolicy FetchContext::ResourceRequestCachePolicy(
const ResourceRequest&,
Resource::Type,
FetchParameters::DeferOption defer) const {
return WebCachePolicy::kUseProtocolCachePolicy;
}
|
C
|
Chrome
| 0 |
CVE-2017-5101
|
https://www.cvedetails.com/cve/CVE-2017-5101/
|
CWE-20
|
https://github.com/chromium/chromium/commit/29734f46c6dc9362783091180c2ee279ad53637f
|
29734f46c6dc9362783091180c2ee279ad53637f
|
media: remove base::SharedMemoryHandle usage in v4l2 encoder
This replaces a use of the legacy UnalignedSharedMemory ctor
taking a SharedMemoryHandle with the current ctor taking a
PlatformSharedMemoryRegion.
Bug: 849207
Change-Id: Iea24ebdcd941cf2fa97e19cf2aeac1a18f9773d9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1697602
Commit-Queue: Matthew Cary (CET) <mattcary@chromium.org>
Reviewed-by: Ricky Liang <jcliang@chromium.org>
Cr-Commit-Position: refs/heads/master@{#681740}
|
void V4L2JpegEncodeAccelerator::EncodedInstanceDmaBuf::PrepareJpegMarkers(
gfx::Size coded_size) {
DCHECK(parent_->encoder_task_runner_->BelongsToCurrentThread());
const int kNumDQT = 2;
for (size_t i = 0; i < kNumDQT; ++i) {
const uint8_t kQuantSegment[] = {
0xFF, JPEG_DQT, 0x00,
0x03 + kDctSize, // Segment length:67 (2-byte).
static_cast<uint8_t>(i) // Precision (4-bit high) = 0,
};
for (size_t j = 0; j < sizeof(kQuantSegment); ++j) {
jpeg_markers_.push_back(kQuantSegment[j]);
}
for (size_t j = 0; j < kDctSize; ++j) {
jpeg_markers_.push_back(quantization_table_[i].value[j]);
}
}
const int kNumOfComponents = 3;
const uint8_t kStartOfFrame[] = {
0xFF,
JPEG_SOF0, // Baseline.
0x00,
0x11, // Segment length:17 (2-byte).
8, // Data precision.
static_cast<uint8_t>((coded_size.height() >> 8) & 0xFF),
static_cast<uint8_t>(coded_size.height() & 0xFF),
static_cast<uint8_t>((coded_size.width() >> 8) & 0xFF),
static_cast<uint8_t>(coded_size.width() & 0xFF),
kNumOfComponents,
};
for (size_t i = 0; i < sizeof(kStartOfFrame); ++i) {
jpeg_markers_.push_back(kStartOfFrame[i]);
}
for (size_t i = 0; i < kNumOfComponents; ++i) {
uint8_t h_sample_factor = 1;
uint8_t v_sample_factor = 1;
uint8_t quant_table_number = 1;
if (!i) {
h_sample_factor = 2;
v_sample_factor = 2;
quant_table_number = 0;
}
jpeg_markers_.push_back(i + 1);
jpeg_markers_.push_back((h_sample_factor << 4) | v_sample_factor);
jpeg_markers_.push_back(quant_table_number);
}
static const uint8_t kDcSegment[] = {
0xFF, JPEG_DHT, 0x00,
0x1F, // Segment length:31 (2-byte).
};
static const uint8_t kAcSegment[] = {
0xFF, JPEG_DHT, 0x00,
0xB5, // Segment length:181 (2-byte).
};
const int kNumHuffmanTables = 2;
for (size_t i = 0; i < kNumHuffmanTables; ++i) {
for (size_t j = 0; j < sizeof(kDcSegment); ++j) {
jpeg_markers_.push_back(kDcSegment[j]);
}
jpeg_markers_.push_back(static_cast<uint8_t>(i));
const JpegHuffmanTable& dcTable = kDefaultDcTable[i];
for (size_t j = 0; j < kNumDcRunSizeBits; ++j)
jpeg_markers_.push_back(dcTable.code_length[j]);
for (size_t j = 0; j < kNumDcCodeWordsHuffVal; ++j)
jpeg_markers_.push_back(dcTable.code_value[j]);
for (size_t j = 0; j < sizeof(kAcSegment); ++j) {
jpeg_markers_.push_back(kAcSegment[j]);
}
jpeg_markers_.push_back(0x10 | static_cast<uint8_t>(i));
const JpegHuffmanTable& acTable = kDefaultAcTable[i];
for (size_t j = 0; j < kNumAcRunSizeBits; ++j)
jpeg_markers_.push_back(acTable.code_length[j]);
for (size_t j = 0; j < kNumAcCodeWordsHuffVal; ++j)
jpeg_markers_.push_back(acTable.code_value[j]);
}
static const uint8_t kStartOfScan[] = {
0xFF, JPEG_SOS, 0x00,
0x0C, // Segment Length:12 (2-byte).
0x03 // Number of components in scan.
};
for (size_t i = 0; i < sizeof(kStartOfScan); ++i) {
jpeg_markers_.push_back(kStartOfScan[i]);
}
for (uint8_t i = 0; i < kNumOfComponents; ++i) {
uint8_t dc_table_number = 1;
uint8_t ac_table_number = 1;
if (!i) {
dc_table_number = 0;
ac_table_number = 0;
}
jpeg_markers_.push_back(i + 1);
jpeg_markers_.push_back((dc_table_number << 4) | ac_table_number);
}
jpeg_markers_.push_back(0x00); // 0 for Baseline.
jpeg_markers_.push_back(0x3F); // 63 for Baseline.
jpeg_markers_.push_back(0x00); // 0 for Baseline.
}
|
void V4L2JpegEncodeAccelerator::EncodedInstanceDmaBuf::PrepareJpegMarkers(
gfx::Size coded_size) {
DCHECK(parent_->encoder_task_runner_->BelongsToCurrentThread());
const int kNumDQT = 2;
for (size_t i = 0; i < kNumDQT; ++i) {
const uint8_t kQuantSegment[] = {
0xFF, JPEG_DQT, 0x00,
0x03 + kDctSize, // Segment length:67 (2-byte).
static_cast<uint8_t>(i) // Precision (4-bit high) = 0,
};
for (size_t j = 0; j < sizeof(kQuantSegment); ++j) {
jpeg_markers_.push_back(kQuantSegment[j]);
}
for (size_t j = 0; j < kDctSize; ++j) {
jpeg_markers_.push_back(quantization_table_[i].value[j]);
}
}
const int kNumOfComponents = 3;
const uint8_t kStartOfFrame[] = {
0xFF,
JPEG_SOF0, // Baseline.
0x00,
0x11, // Segment length:17 (2-byte).
8, // Data precision.
static_cast<uint8_t>((coded_size.height() >> 8) & 0xFF),
static_cast<uint8_t>(coded_size.height() & 0xFF),
static_cast<uint8_t>((coded_size.width() >> 8) & 0xFF),
static_cast<uint8_t>(coded_size.width() & 0xFF),
kNumOfComponents,
};
for (size_t i = 0; i < sizeof(kStartOfFrame); ++i) {
jpeg_markers_.push_back(kStartOfFrame[i]);
}
for (size_t i = 0; i < kNumOfComponents; ++i) {
uint8_t h_sample_factor = 1;
uint8_t v_sample_factor = 1;
uint8_t quant_table_number = 1;
if (!i) {
h_sample_factor = 2;
v_sample_factor = 2;
quant_table_number = 0;
}
jpeg_markers_.push_back(i + 1);
jpeg_markers_.push_back((h_sample_factor << 4) | v_sample_factor);
jpeg_markers_.push_back(quant_table_number);
}
static const uint8_t kDcSegment[] = {
0xFF, JPEG_DHT, 0x00,
0x1F, // Segment length:31 (2-byte).
};
static const uint8_t kAcSegment[] = {
0xFF, JPEG_DHT, 0x00,
0xB5, // Segment length:181 (2-byte).
};
const int kNumHuffmanTables = 2;
for (size_t i = 0; i < kNumHuffmanTables; ++i) {
for (size_t j = 0; j < sizeof(kDcSegment); ++j) {
jpeg_markers_.push_back(kDcSegment[j]);
}
jpeg_markers_.push_back(static_cast<uint8_t>(i));
const JpegHuffmanTable& dcTable = kDefaultDcTable[i];
for (size_t j = 0; j < kNumDcRunSizeBits; ++j)
jpeg_markers_.push_back(dcTable.code_length[j]);
for (size_t j = 0; j < kNumDcCodeWordsHuffVal; ++j)
jpeg_markers_.push_back(dcTable.code_value[j]);
for (size_t j = 0; j < sizeof(kAcSegment); ++j) {
jpeg_markers_.push_back(kAcSegment[j]);
}
jpeg_markers_.push_back(0x10 | static_cast<uint8_t>(i));
const JpegHuffmanTable& acTable = kDefaultAcTable[i];
for (size_t j = 0; j < kNumAcRunSizeBits; ++j)
jpeg_markers_.push_back(acTable.code_length[j]);
for (size_t j = 0; j < kNumAcCodeWordsHuffVal; ++j)
jpeg_markers_.push_back(acTable.code_value[j]);
}
static const uint8_t kStartOfScan[] = {
0xFF, JPEG_SOS, 0x00,
0x0C, // Segment Length:12 (2-byte).
0x03 // Number of components in scan.
};
for (size_t i = 0; i < sizeof(kStartOfScan); ++i) {
jpeg_markers_.push_back(kStartOfScan[i]);
}
for (uint8_t i = 0; i < kNumOfComponents; ++i) {
uint8_t dc_table_number = 1;
uint8_t ac_table_number = 1;
if (!i) {
dc_table_number = 0;
ac_table_number = 0;
}
jpeg_markers_.push_back(i + 1);
jpeg_markers_.push_back((dc_table_number << 4) | ac_table_number);
}
jpeg_markers_.push_back(0x00); // 0 for Baseline.
jpeg_markers_.push_back(0x3F); // 63 for Baseline.
jpeg_markers_.push_back(0x00); // 0 for Baseline.
}
|
C
|
Chrome
| 0 |
CVE-2016-9560
|
https://www.cvedetails.com/cve/CVE-2016-9560/
|
CWE-119
|
https://github.com/mdadams/jasper/commit/1abc2e5a401a4bf1d5ca4df91358ce5df111f495
|
1abc2e5a401a4bf1d5ca4df91358ce5df111f495
|
Fixed an array overflow problem in the JPC decoder.
|
static int jpc_dec_tileinit(jpc_dec_t *dec, jpc_dec_tile_t *tile)
{
jpc_dec_tcomp_t *tcomp;
int compno;
int rlvlno;
jpc_dec_rlvl_t *rlvl;
jpc_dec_band_t *band;
jpc_dec_prc_t *prc;
int bndno;
jpc_tsfb_band_t *bnd;
int bandno;
jpc_dec_ccp_t *ccp;
int prccnt;
jpc_dec_cblk_t *cblk;
int cblkcnt;
uint_fast32_t tlprcxstart;
uint_fast32_t tlprcystart;
uint_fast32_t brprcxend;
uint_fast32_t brprcyend;
uint_fast32_t tlcbgxstart;
uint_fast32_t tlcbgystart;
uint_fast32_t brcbgxend;
uint_fast32_t brcbgyend;
uint_fast32_t cbgxstart;
uint_fast32_t cbgystart;
uint_fast32_t cbgxend;
uint_fast32_t cbgyend;
uint_fast32_t tlcblkxstart;
uint_fast32_t tlcblkystart;
uint_fast32_t brcblkxend;
uint_fast32_t brcblkyend;
uint_fast32_t cblkxstart;
uint_fast32_t cblkystart;
uint_fast32_t cblkxend;
uint_fast32_t cblkyend;
uint_fast32_t tmpxstart;
uint_fast32_t tmpystart;
uint_fast32_t tmpxend;
uint_fast32_t tmpyend;
jpc_dec_cp_t *cp;
jpc_tsfb_band_t bnds[JPC_MAXBANDS];
jpc_pchg_t *pchg;
int pchgno;
jpc_dec_cmpt_t *cmpt;
cp = tile->cp;
tile->realmode = 0;
if (cp->mctid == JPC_MCT_ICT) {
tile->realmode = 1;
}
for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno <
dec->numcomps; ++compno, ++tcomp, ++cmpt) {
ccp = &tile->cp->ccps[compno];
if (ccp->qmfbid == JPC_COX_INS) {
tile->realmode = 1;
}
tcomp->numrlvls = ccp->numrlvls;
if (!(tcomp->rlvls = jas_alloc2(tcomp->numrlvls,
sizeof(jpc_dec_rlvl_t)))) {
return -1;
}
if (!(tcomp->data = jas_seq2d_create(JPC_CEILDIV(tile->xstart,
cmpt->hstep), JPC_CEILDIV(tile->ystart, cmpt->vstep),
JPC_CEILDIV(tile->xend, cmpt->hstep), JPC_CEILDIV(tile->yend,
cmpt->vstep)))) {
return -1;
}
if (!(tcomp->tsfb = jpc_cod_gettsfb(ccp->qmfbid,
tcomp->numrlvls - 1))) {
return -1;
}
{
jpc_tsfb_getbands(tcomp->tsfb, jas_seq2d_xstart(tcomp->data),
jas_seq2d_ystart(tcomp->data), jas_seq2d_xend(tcomp->data),
jas_seq2d_yend(tcomp->data), bnds);
}
for (rlvlno = 0, rlvl = tcomp->rlvls; rlvlno < tcomp->numrlvls;
++rlvlno, ++rlvl) {
rlvl->bands = 0;
rlvl->xstart = JPC_CEILDIVPOW2(tcomp->xstart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->ystart = JPC_CEILDIVPOW2(tcomp->ystart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->xend = JPC_CEILDIVPOW2(tcomp->xend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->yend = JPC_CEILDIVPOW2(tcomp->yend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->prcwidthexpn = ccp->prcwidthexpns[rlvlno];
rlvl->prcheightexpn = ccp->prcheightexpns[rlvlno];
tlprcxstart = JPC_FLOORDIVPOW2(rlvl->xstart,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
tlprcystart = JPC_FLOORDIVPOW2(rlvl->ystart,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
brprcxend = JPC_CEILDIVPOW2(rlvl->xend,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
brprcyend = JPC_CEILDIVPOW2(rlvl->yend,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
rlvl->numhprcs = (brprcxend - tlprcxstart) >>
rlvl->prcwidthexpn;
rlvl->numvprcs = (brprcyend - tlprcystart) >>
rlvl->prcheightexpn;
rlvl->numprcs = rlvl->numhprcs * rlvl->numvprcs;
if (rlvl->xstart >= rlvl->xend || rlvl->ystart >= rlvl->yend) {
rlvl->bands = 0;
rlvl->numprcs = 0;
rlvl->numhprcs = 0;
rlvl->numvprcs = 0;
continue;
}
if (!rlvlno) {
tlcbgxstart = tlprcxstart;
tlcbgystart = tlprcystart;
brcbgxend = brprcxend;
brcbgyend = brprcyend;
rlvl->cbgwidthexpn = rlvl->prcwidthexpn;
rlvl->cbgheightexpn = rlvl->prcheightexpn;
} else {
tlcbgxstart = JPC_CEILDIVPOW2(tlprcxstart, 1);
tlcbgystart = JPC_CEILDIVPOW2(tlprcystart, 1);
brcbgxend = JPC_CEILDIVPOW2(brprcxend, 1);
brcbgyend = JPC_CEILDIVPOW2(brprcyend, 1);
rlvl->cbgwidthexpn = rlvl->prcwidthexpn - 1;
rlvl->cbgheightexpn = rlvl->prcheightexpn - 1;
}
rlvl->cblkwidthexpn = JAS_MIN(ccp->cblkwidthexpn,
rlvl->cbgwidthexpn);
rlvl->cblkheightexpn = JAS_MIN(ccp->cblkheightexpn,
rlvl->cbgheightexpn);
rlvl->numbands = (!rlvlno) ? 1 : 3;
if (!(rlvl->bands = jas_alloc2(rlvl->numbands,
sizeof(jpc_dec_band_t)))) {
return -1;
}
for (bandno = 0, band = rlvl->bands;
bandno < rlvl->numbands; ++bandno, ++band) {
bndno = (!rlvlno) ? 0 : (3 * (rlvlno - 1) +
bandno + 1);
bnd = &bnds[bndno];
band->orient = bnd->orient;
band->stepsize = ccp->stepsizes[bndno];
band->analgain = JPC_NOMINALGAIN(ccp->qmfbid,
tcomp->numrlvls - 1, rlvlno, band->orient);
band->absstepsize = jpc_calcabsstepsize(band->stepsize,
cmpt->prec + band->analgain);
band->numbps = ccp->numguardbits +
JPC_QCX_GETEXPN(band->stepsize) - 1;
band->roishift = (ccp->roishift + band->numbps >= JPC_PREC) ?
(JPC_PREC - 1 - band->numbps) : ccp->roishift;
band->data = 0;
band->prcs = 0;
if (bnd->xstart == bnd->xend || bnd->ystart == bnd->yend) {
continue;
}
if (!(band->data = jas_seq2d_create(0, 0, 0, 0))) {
return -1;
}
jas_seq2d_bindsub(band->data, tcomp->data, bnd->locxstart,
bnd->locystart, bnd->locxend, bnd->locyend);
jas_seq2d_setshift(band->data, bnd->xstart, bnd->ystart);
assert(rlvl->numprcs);
if (!(band->prcs = jas_alloc2(rlvl->numprcs,
sizeof(jpc_dec_prc_t)))) {
return -1;
}
/************************************************/
cbgxstart = tlcbgxstart;
cbgystart = tlcbgystart;
for (prccnt = rlvl->numprcs, prc = band->prcs;
prccnt > 0; --prccnt, ++prc) {
cbgxend = cbgxstart + (1 << rlvl->cbgwidthexpn);
cbgyend = cbgystart + (1 << rlvl->cbgheightexpn);
prc->xstart = JAS_MAX(cbgxstart, JAS_CAST(uint_fast32_t,
jas_seq2d_xstart(band->data)));
prc->ystart = JAS_MAX(cbgystart, JAS_CAST(uint_fast32_t,
jas_seq2d_ystart(band->data)));
prc->xend = JAS_MIN(cbgxend, JAS_CAST(uint_fast32_t,
jas_seq2d_xend(band->data)));
prc->yend = JAS_MIN(cbgyend, JAS_CAST(uint_fast32_t,
jas_seq2d_yend(band->data)));
if (prc->xend > prc->xstart && prc->yend > prc->ystart) {
tlcblkxstart = JPC_FLOORDIVPOW2(prc->xstart,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
tlcblkystart = JPC_FLOORDIVPOW2(prc->ystart,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
brcblkxend = JPC_CEILDIVPOW2(prc->xend,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
brcblkyend = JPC_CEILDIVPOW2(prc->yend,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
prc->numhcblks = (brcblkxend - tlcblkxstart) >>
rlvl->cblkwidthexpn;
prc->numvcblks = (brcblkyend - tlcblkystart) >>
rlvl->cblkheightexpn;
prc->numcblks = prc->numhcblks * prc->numvcblks;
assert(prc->numcblks > 0);
if (!(prc->incltagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->numimsbstagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->cblks = jas_alloc2(prc->numcblks,
sizeof(jpc_dec_cblk_t)))) {
return -1;
}
cblkxstart = cbgxstart;
cblkystart = cbgystart;
for (cblkcnt = prc->numcblks, cblk = prc->cblks;
cblkcnt > 0;) {
cblkxend = cblkxstart + (1 << rlvl->cblkwidthexpn);
cblkyend = cblkystart + (1 << rlvl->cblkheightexpn);
tmpxstart = JAS_MAX(cblkxstart, prc->xstart);
tmpystart = JAS_MAX(cblkystart, prc->ystart);
tmpxend = JAS_MIN(cblkxend, prc->xend);
tmpyend = JAS_MIN(cblkyend, prc->yend);
if (tmpxend > tmpxstart && tmpyend > tmpystart) {
cblk->firstpassno = -1;
cblk->mqdec = 0;
cblk->nulldec = 0;
cblk->flags = 0;
cblk->numpasses = 0;
cblk->segs.head = 0;
cblk->segs.tail = 0;
cblk->curseg = 0;
cblk->numimsbs = 0;
cblk->numlenbits = 3;
cblk->flags = 0;
if (!(cblk->data = jas_seq2d_create(0, 0, 0,
0))) {
return -1;
}
jas_seq2d_bindsub(cblk->data, band->data,
tmpxstart, tmpystart, tmpxend, tmpyend);
++cblk;
--cblkcnt;
}
cblkxstart += 1 << rlvl->cblkwidthexpn;
if (cblkxstart >= cbgxend) {
cblkxstart = cbgxstart;
cblkystart += 1 << rlvl->cblkheightexpn;
}
}
} else {
prc->cblks = 0;
prc->incltagtree = 0;
prc->numimsbstagtree = 0;
}
cbgxstart += 1 << rlvl->cbgwidthexpn;
if (cbgxstart >= brcbgxend) {
cbgxstart = tlcbgxstart;
cbgystart += 1 << rlvl->cbgheightexpn;
}
}
/********************************************/
}
}
}
if (!(tile->pi = jpc_dec_pi_create(dec, tile))) {
return -1;
}
for (pchgno = 0; pchgno < jpc_pchglist_numpchgs(tile->cp->pchglist);
++pchgno) {
pchg = jpc_pchg_copy(jpc_pchglist_get(tile->cp->pchglist, pchgno));
assert(pchg);
jpc_pi_addpchg(tile->pi, pchg);
}
jpc_pi_init(tile->pi);
return 0;
}
|
static int jpc_dec_tileinit(jpc_dec_t *dec, jpc_dec_tile_t *tile)
{
jpc_dec_tcomp_t *tcomp;
int compno;
int rlvlno;
jpc_dec_rlvl_t *rlvl;
jpc_dec_band_t *band;
jpc_dec_prc_t *prc;
int bndno;
jpc_tsfb_band_t *bnd;
int bandno;
jpc_dec_ccp_t *ccp;
int prccnt;
jpc_dec_cblk_t *cblk;
int cblkcnt;
uint_fast32_t tlprcxstart;
uint_fast32_t tlprcystart;
uint_fast32_t brprcxend;
uint_fast32_t brprcyend;
uint_fast32_t tlcbgxstart;
uint_fast32_t tlcbgystart;
uint_fast32_t brcbgxend;
uint_fast32_t brcbgyend;
uint_fast32_t cbgxstart;
uint_fast32_t cbgystart;
uint_fast32_t cbgxend;
uint_fast32_t cbgyend;
uint_fast32_t tlcblkxstart;
uint_fast32_t tlcblkystart;
uint_fast32_t brcblkxend;
uint_fast32_t brcblkyend;
uint_fast32_t cblkxstart;
uint_fast32_t cblkystart;
uint_fast32_t cblkxend;
uint_fast32_t cblkyend;
uint_fast32_t tmpxstart;
uint_fast32_t tmpystart;
uint_fast32_t tmpxend;
uint_fast32_t tmpyend;
jpc_dec_cp_t *cp;
jpc_tsfb_band_t bnds[64];
jpc_pchg_t *pchg;
int pchgno;
jpc_dec_cmpt_t *cmpt;
cp = tile->cp;
tile->realmode = 0;
if (cp->mctid == JPC_MCT_ICT) {
tile->realmode = 1;
}
for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno <
dec->numcomps; ++compno, ++tcomp, ++cmpt) {
ccp = &tile->cp->ccps[compno];
if (ccp->qmfbid == JPC_COX_INS) {
tile->realmode = 1;
}
tcomp->numrlvls = ccp->numrlvls;
if (!(tcomp->rlvls = jas_alloc2(tcomp->numrlvls,
sizeof(jpc_dec_rlvl_t)))) {
return -1;
}
if (!(tcomp->data = jas_seq2d_create(JPC_CEILDIV(tile->xstart,
cmpt->hstep), JPC_CEILDIV(tile->ystart, cmpt->vstep),
JPC_CEILDIV(tile->xend, cmpt->hstep), JPC_CEILDIV(tile->yend,
cmpt->vstep)))) {
return -1;
}
if (!(tcomp->tsfb = jpc_cod_gettsfb(ccp->qmfbid,
tcomp->numrlvls - 1))) {
return -1;
}
{
jpc_tsfb_getbands(tcomp->tsfb, jas_seq2d_xstart(tcomp->data),
jas_seq2d_ystart(tcomp->data), jas_seq2d_xend(tcomp->data),
jas_seq2d_yend(tcomp->data), bnds);
}
for (rlvlno = 0, rlvl = tcomp->rlvls; rlvlno < tcomp->numrlvls;
++rlvlno, ++rlvl) {
rlvl->bands = 0;
rlvl->xstart = JPC_CEILDIVPOW2(tcomp->xstart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->ystart = JPC_CEILDIVPOW2(tcomp->ystart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->xend = JPC_CEILDIVPOW2(tcomp->xend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->yend = JPC_CEILDIVPOW2(tcomp->yend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->prcwidthexpn = ccp->prcwidthexpns[rlvlno];
rlvl->prcheightexpn = ccp->prcheightexpns[rlvlno];
tlprcxstart = JPC_FLOORDIVPOW2(rlvl->xstart,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
tlprcystart = JPC_FLOORDIVPOW2(rlvl->ystart,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
brprcxend = JPC_CEILDIVPOW2(rlvl->xend,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
brprcyend = JPC_CEILDIVPOW2(rlvl->yend,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
rlvl->numhprcs = (brprcxend - tlprcxstart) >>
rlvl->prcwidthexpn;
rlvl->numvprcs = (brprcyend - tlprcystart) >>
rlvl->prcheightexpn;
rlvl->numprcs = rlvl->numhprcs * rlvl->numvprcs;
if (rlvl->xstart >= rlvl->xend || rlvl->ystart >= rlvl->yend) {
rlvl->bands = 0;
rlvl->numprcs = 0;
rlvl->numhprcs = 0;
rlvl->numvprcs = 0;
continue;
}
if (!rlvlno) {
tlcbgxstart = tlprcxstart;
tlcbgystart = tlprcystart;
brcbgxend = brprcxend;
brcbgyend = brprcyend;
rlvl->cbgwidthexpn = rlvl->prcwidthexpn;
rlvl->cbgheightexpn = rlvl->prcheightexpn;
} else {
tlcbgxstart = JPC_CEILDIVPOW2(tlprcxstart, 1);
tlcbgystart = JPC_CEILDIVPOW2(tlprcystart, 1);
brcbgxend = JPC_CEILDIVPOW2(brprcxend, 1);
brcbgyend = JPC_CEILDIVPOW2(brprcyend, 1);
rlvl->cbgwidthexpn = rlvl->prcwidthexpn - 1;
rlvl->cbgheightexpn = rlvl->prcheightexpn - 1;
}
rlvl->cblkwidthexpn = JAS_MIN(ccp->cblkwidthexpn,
rlvl->cbgwidthexpn);
rlvl->cblkheightexpn = JAS_MIN(ccp->cblkheightexpn,
rlvl->cbgheightexpn);
rlvl->numbands = (!rlvlno) ? 1 : 3;
if (!(rlvl->bands = jas_alloc2(rlvl->numbands,
sizeof(jpc_dec_band_t)))) {
return -1;
}
for (bandno = 0, band = rlvl->bands;
bandno < rlvl->numbands; ++bandno, ++band) {
bndno = (!rlvlno) ? 0 : (3 * (rlvlno - 1) +
bandno + 1);
bnd = &bnds[bndno];
band->orient = bnd->orient;
band->stepsize = ccp->stepsizes[bndno];
band->analgain = JPC_NOMINALGAIN(ccp->qmfbid,
tcomp->numrlvls - 1, rlvlno, band->orient);
band->absstepsize = jpc_calcabsstepsize(band->stepsize,
cmpt->prec + band->analgain);
band->numbps = ccp->numguardbits +
JPC_QCX_GETEXPN(band->stepsize) - 1;
band->roishift = (ccp->roishift + band->numbps >= JPC_PREC) ?
(JPC_PREC - 1 - band->numbps) : ccp->roishift;
band->data = 0;
band->prcs = 0;
if (bnd->xstart == bnd->xend || bnd->ystart == bnd->yend) {
continue;
}
if (!(band->data = jas_seq2d_create(0, 0, 0, 0))) {
return -1;
}
jas_seq2d_bindsub(band->data, tcomp->data, bnd->locxstart,
bnd->locystart, bnd->locxend, bnd->locyend);
jas_seq2d_setshift(band->data, bnd->xstart, bnd->ystart);
assert(rlvl->numprcs);
if (!(band->prcs = jas_alloc2(rlvl->numprcs,
sizeof(jpc_dec_prc_t)))) {
return -1;
}
/************************************************/
cbgxstart = tlcbgxstart;
cbgystart = tlcbgystart;
for (prccnt = rlvl->numprcs, prc = band->prcs;
prccnt > 0; --prccnt, ++prc) {
cbgxend = cbgxstart + (1 << rlvl->cbgwidthexpn);
cbgyend = cbgystart + (1 << rlvl->cbgheightexpn);
prc->xstart = JAS_MAX(cbgxstart, JAS_CAST(uint_fast32_t,
jas_seq2d_xstart(band->data)));
prc->ystart = JAS_MAX(cbgystart, JAS_CAST(uint_fast32_t,
jas_seq2d_ystart(band->data)));
prc->xend = JAS_MIN(cbgxend, JAS_CAST(uint_fast32_t,
jas_seq2d_xend(band->data)));
prc->yend = JAS_MIN(cbgyend, JAS_CAST(uint_fast32_t,
jas_seq2d_yend(band->data)));
if (prc->xend > prc->xstart && prc->yend > prc->ystart) {
tlcblkxstart = JPC_FLOORDIVPOW2(prc->xstart,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
tlcblkystart = JPC_FLOORDIVPOW2(prc->ystart,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
brcblkxend = JPC_CEILDIVPOW2(prc->xend,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
brcblkyend = JPC_CEILDIVPOW2(prc->yend,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
prc->numhcblks = (brcblkxend - tlcblkxstart) >>
rlvl->cblkwidthexpn;
prc->numvcblks = (brcblkyend - tlcblkystart) >>
rlvl->cblkheightexpn;
prc->numcblks = prc->numhcblks * prc->numvcblks;
assert(prc->numcblks > 0);
if (!(prc->incltagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->numimsbstagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->cblks = jas_alloc2(prc->numcblks,
sizeof(jpc_dec_cblk_t)))) {
return -1;
}
cblkxstart = cbgxstart;
cblkystart = cbgystart;
for (cblkcnt = prc->numcblks, cblk = prc->cblks;
cblkcnt > 0;) {
cblkxend = cblkxstart + (1 << rlvl->cblkwidthexpn);
cblkyend = cblkystart + (1 << rlvl->cblkheightexpn);
tmpxstart = JAS_MAX(cblkxstart, prc->xstart);
tmpystart = JAS_MAX(cblkystart, prc->ystart);
tmpxend = JAS_MIN(cblkxend, prc->xend);
tmpyend = JAS_MIN(cblkyend, prc->yend);
if (tmpxend > tmpxstart && tmpyend > tmpystart) {
cblk->firstpassno = -1;
cblk->mqdec = 0;
cblk->nulldec = 0;
cblk->flags = 0;
cblk->numpasses = 0;
cblk->segs.head = 0;
cblk->segs.tail = 0;
cblk->curseg = 0;
cblk->numimsbs = 0;
cblk->numlenbits = 3;
cblk->flags = 0;
if (!(cblk->data = jas_seq2d_create(0, 0, 0,
0))) {
return -1;
}
jas_seq2d_bindsub(cblk->data, band->data,
tmpxstart, tmpystart, tmpxend, tmpyend);
++cblk;
--cblkcnt;
}
cblkxstart += 1 << rlvl->cblkwidthexpn;
if (cblkxstart >= cbgxend) {
cblkxstart = cbgxstart;
cblkystart += 1 << rlvl->cblkheightexpn;
}
}
} else {
prc->cblks = 0;
prc->incltagtree = 0;
prc->numimsbstagtree = 0;
}
cbgxstart += 1 << rlvl->cbgwidthexpn;
if (cbgxstart >= brcbgxend) {
cbgxstart = tlcbgxstart;
cbgystart += 1 << rlvl->cbgheightexpn;
}
}
/********************************************/
}
}
}
if (!(tile->pi = jpc_dec_pi_create(dec, tile))) {
return -1;
}
for (pchgno = 0; pchgno < jpc_pchglist_numpchgs(tile->cp->pchglist);
++pchgno) {
pchg = jpc_pchg_copy(jpc_pchglist_get(tile->cp->pchglist, pchgno));
assert(pchg);
jpc_pi_addpchg(tile->pi, pchg);
}
jpc_pi_init(tile->pi);
return 0;
}
|
C
|
jasper
| 1 |
CVE-2011-2860
|
https://www.cvedetails.com/cve/CVE-2011-2860/
|
CWE-399
|
https://github.com/chromium/chromium/commit/6c390601f9ee3436bb32f84772977570265982ea
|
6c390601f9ee3436bb32f84772977570265982ea
|
https://bugs.webkit.org/show_bug.cgi?id=93587
Node::replaceChild() can create bad DOM topology with MutationEvent, Part 2
Reviewed by Kent Tamura.
Source/WebCore:
This is a followup of r124156. replaceChild() has yet another hidden
MutationEvent trigger. This change added a guard for it.
Test: fast/events/mutation-during-replace-child-2.html
* dom/ContainerNode.cpp:
(WebCore::ContainerNode::replaceChild):
LayoutTests:
* fast/events/mutation-during-replace-child-2-expected.txt: Added.
* fast/events/mutation-during-replace-child-2.html: Added.
git-svn-id: svn://svn.chromium.org/blink/trunk@125237 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void dispatchChildRemovalEvents(Node* child)
{
if (child->isInShadowTree())
return;
ASSERT(!eventDispatchForbidden());
InspectorInstrumentation::willRemoveDOMNode(child->document(), child);
RefPtr<Node> c = child;
RefPtr<Document> document = child->document();
if (c->parentNode() && document->hasListenerType(Document::DOMNODEREMOVED_LISTENER))
c->dispatchScopedEvent(MutationEvent::create(eventNames().DOMNodeRemovedEvent, true, c->parentNode()));
if (c->inDocument() && document->hasListenerType(Document::DOMNODEREMOVEDFROMDOCUMENT_LISTENER)) {
for (; c; c = c->traverseNextNode(child))
c->dispatchScopedEvent(MutationEvent::create(eventNames().DOMNodeRemovedFromDocumentEvent, false));
}
}
|
static void dispatchChildRemovalEvents(Node* child)
{
if (child->isInShadowTree())
return;
ASSERT(!eventDispatchForbidden());
InspectorInstrumentation::willRemoveDOMNode(child->document(), child);
RefPtr<Node> c = child;
RefPtr<Document> document = child->document();
if (c->parentNode() && document->hasListenerType(Document::DOMNODEREMOVED_LISTENER))
c->dispatchScopedEvent(MutationEvent::create(eventNames().DOMNodeRemovedEvent, true, c->parentNode()));
if (c->inDocument() && document->hasListenerType(Document::DOMNODEREMOVEDFROMDOCUMENT_LISTENER)) {
for (; c; c = c->traverseNextNode(child))
c->dispatchScopedEvent(MutationEvent::create(eventNames().DOMNodeRemovedFromDocumentEvent, false));
}
}
|
C
|
Chrome
| 0 |
CVE-2013-0886
|
https://www.cvedetails.com/cve/CVE-2013-0886/
| null |
https://github.com/chromium/chromium/commit/18d67244984a574ba2dd8779faabc0e3e34f4b76
|
18d67244984a574ba2dd8779faabc0e3e34f4b76
|
Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
TBR=sky@chromium.org
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderWidgetHostViewAura::AcceleratedSurfaceBuffersSwapped(
bool RenderWidgetHostViewAura::SwapBuffersPrepare(
const gfx::Rect& surface_rect,
const gfx::Rect& damage_rect,
BufferPresentedParams* params) {
DCHECK(params->surface_handle);
DCHECK(!params->texture_to_produce);
if (last_swapped_surface_size_ != surface_rect.size()) {
// The surface could have shrunk since we skipped an update, in which
// case we can expect a full update.
DLOG_IF(ERROR, damage_rect != surface_rect) << "Expected full damage rect";
skipped_damage_.setEmpty();
last_swapped_surface_size_ = surface_rect.size();
}
if (ShouldSkipFrame(surface_rect.size())) {
skipped_damage_.op(RectToSkIRect(damage_rect), SkRegion::kUnion_Op);
InsertSyncPointAndACK(*params);
return false;
}
DCHECK(!current_surface_ || image_transport_clients_.find(current_surface_) !=
image_transport_clients_.end());
if (current_surface_)
params->texture_to_produce = image_transport_clients_[current_surface_];
std::swap(current_surface_, params->surface_handle);
DCHECK(image_transport_clients_.find(current_surface_) !=
image_transport_clients_.end());
image_transport_clients_[current_surface_]->Consume(surface_rect.size());
released_front_lock_ = NULL;
UpdateExternalTexture();
return true;
}
void RenderWidgetHostViewAura::SwapBuffersCompleted(
const BufferPresentedParams& params) {
ui::Compositor* compositor = GetCompositor();
if (!compositor) {
InsertSyncPointAndACK(params);
} else {
// Add sending an ACK to the list of things to do OnCompositingDidCommit
can_lock_compositor_ = NO_PENDING_COMMIT;
on_compositing_did_commit_callbacks_.push_back(
base::Bind(&RenderWidgetHostViewAura::InsertSyncPointAndACK, params));
if (!compositor->HasObserver(this))
compositor->AddObserver(this);
}
}
void RenderWidgetHostViewAura::AcceleratedSurfaceBuffersSwapped(
const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params& params_in_pixel,
int gpu_host_id) {
const gfx::Rect surface_rect = gfx::Rect(gfx::Point(), params_in_pixel.size);
BufferPresentedParams ack_params(
params_in_pixel.route_id, gpu_host_id, params_in_pixel.surface_handle);
if (!SwapBuffersPrepare(surface_rect, surface_rect, &ack_params))
return;
previous_damage_.setRect(RectToSkIRect(surface_rect));
skipped_damage_.setEmpty();
ui::Compositor* compositor = GetCompositor();
if (compositor) {
gfx::Size surface_size = ConvertSizeToDIP(this, params_in_pixel.size);
window_->SchedulePaintInRect(gfx::Rect(surface_size));
}
SwapBuffersCompleted(ack_params);
}
|
void RenderWidgetHostViewAura::AcceleratedSurfaceBuffersSwapped(
const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params& params_in_pixel,
int gpu_host_id) {
surface_route_id_ = params_in_pixel.route_id;
if (params_in_pixel.protection_state_id &&
params_in_pixel.protection_state_id != protection_state_id_) {
DCHECK(!current_surface_);
if (!params_in_pixel.skip_ack)
InsertSyncPointAndACK(params_in_pixel.route_id, gpu_host_id, false, NULL);
return;
}
if (ShouldFastACK(params_in_pixel.surface_handle)) {
if (!params_in_pixel.skip_ack)
InsertSyncPointAndACK(params_in_pixel.route_id, gpu_host_id, false, NULL);
return;
}
current_surface_ = params_in_pixel.surface_handle;
if (!params_in_pixel.skip_ack)
released_front_lock_ = NULL;
UpdateExternalTexture();
ui::Compositor* compositor = GetCompositor();
if (!compositor) {
if (!params_in_pixel.skip_ack)
InsertSyncPointAndACK(params_in_pixel.route_id, gpu_host_id, true, NULL);
} else {
DCHECK(image_transport_clients_.find(params_in_pixel.surface_handle) !=
image_transport_clients_.end());
gfx::Size surface_size_in_pixel =
image_transport_clients_[params_in_pixel.surface_handle]->size();
gfx::Size surface_size = ConvertSizeToDIP(this, surface_size_in_pixel);
window_->SchedulePaintInRect(gfx::Rect(surface_size));
if (!params_in_pixel.skip_ack) {
can_lock_compositor_ = NO_PENDING_COMMIT;
on_compositing_did_commit_callbacks_.push_back(
base::Bind(&RenderWidgetHostViewAura::InsertSyncPointAndACK,
params_in_pixel.route_id,
gpu_host_id,
true));
if (!compositor->HasObserver(this))
compositor->AddObserver(this);
}
}
}
|
C
|
Chrome
| 1 |
CVE-2012-5148
|
https://www.cvedetails.com/cve/CVE-2012-5148/
|
CWE-20
|
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
|
e89cfcb9090e8c98129ae9160c513f504db74599
|
Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
|
bool Browser::TakeFocus(content::WebContents* source,
bool reverse) {
content::NotificationService::current()->Notify(
chrome::NOTIFICATION_FOCUS_RETURNED_TO_BROWSER,
content::Source<Browser>(this),
content::NotificationService::NoDetails());
return false;
}
|
bool Browser::TakeFocus(content::WebContents* source,
bool reverse) {
content::NotificationService::current()->Notify(
chrome::NOTIFICATION_FOCUS_RETURNED_TO_BROWSER,
content::Source<Browser>(this),
content::NotificationService::NoDetails());
return false;
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/6c5d779aaf0dec9628da8a20751e95fd09554b14
|
6c5d779aaf0dec9628da8a20751e95fd09554b14
|
Move the cancellation of blocked requests code from ResourceDispatcherHost::~ResourceDispatcherHost() to ResourceDispatcherHost::OnShutdown().
This causes the requests to be cancelled on the IO thread rather than the UI thread, which is important since cancellation may delete the URLRequest (and URLRequests should not outlive the IO thread).
BUG=39243
Review URL: http://codereview.chromium.org/1213004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42575 0039d316-1c4b-4281-b951-d872f2087c98
|
void ResourceDispatcherHost::OnReceivedRedirect(URLRequest* request,
const GURL& new_url,
bool* defer_redirect) {
RESOURCE_LOG("OnReceivedRedirect: " << request->url().spec());
ResourceDispatcherHostRequestInfo* info = InfoForRequest(request);
DCHECK(request->status().is_success());
if (info->process_type() != ChildProcessInfo::PLUGIN_PROCESS &&
!ChildProcessSecurityPolicy::GetInstance()->
CanRequestURL(info->child_id(), new_url)) {
LOG(INFO) << "Denied unauthorized request for " <<
new_url.possibly_invalid_spec();
CancelRequest(info->child_id(), info->request_id(), false);
return;
}
NotifyReceivedRedirect(request, info->child_id(), new_url);
if (HandleExternalProtocol(info->request_id(), info->child_id(),
info->route_id(), new_url,
info->resource_type(), info->resource_handler())) {
RemovePendingRequest(info->child_id(), info->request_id());
return;
}
scoped_refptr<ResourceResponse> response = new ResourceResponse;
PopulateResourceResponse(request, info->filter_policy(), response);
if (!info->resource_handler()->OnRequestRedirected(info->request_id(),
new_url,
response, defer_redirect))
CancelRequest(info->child_id(), info->request_id(), false);
}
|
void ResourceDispatcherHost::OnReceivedRedirect(URLRequest* request,
const GURL& new_url,
bool* defer_redirect) {
RESOURCE_LOG("OnReceivedRedirect: " << request->url().spec());
ResourceDispatcherHostRequestInfo* info = InfoForRequest(request);
DCHECK(request->status().is_success());
if (info->process_type() != ChildProcessInfo::PLUGIN_PROCESS &&
!ChildProcessSecurityPolicy::GetInstance()->
CanRequestURL(info->child_id(), new_url)) {
LOG(INFO) << "Denied unauthorized request for " <<
new_url.possibly_invalid_spec();
CancelRequest(info->child_id(), info->request_id(), false);
return;
}
NotifyReceivedRedirect(request, info->child_id(), new_url);
if (HandleExternalProtocol(info->request_id(), info->child_id(),
info->route_id(), new_url,
info->resource_type(), info->resource_handler())) {
RemovePendingRequest(info->child_id(), info->request_id());
return;
}
scoped_refptr<ResourceResponse> response = new ResourceResponse;
PopulateResourceResponse(request, info->filter_policy(), response);
if (!info->resource_handler()->OnRequestRedirected(info->request_id(),
new_url,
response, defer_redirect))
CancelRequest(info->child_id(), info->request_id(), false);
}
|
C
|
Chrome
| 0 |
CVE-2016-2451
|
https://www.cvedetails.com/cve/CVE-2016-2451/
|
CWE-264
|
https://android.googlesource.com/platform/frameworks/av/+/f9ed2fe6d61259e779a37d4c2d7edb33a1c1f8ba
|
f9ed2fe6d61259e779a37d4c2d7edb33a1c1f8ba
|
Add VPX output buffer size check
and handle dead observers more gracefully
Bug: 27597103
Change-Id: Id7acb25d5ef69b197da15ec200a9e4f9e7b03518
|
bool OMX::livesLocally(node_id /* node */, pid_t pid) {
return pid == getpid();
}
|
bool OMX::livesLocally(node_id /* node */, pid_t pid) {
return pid == getpid();
}
|
C
|
Android
| 0 |
CVE-2014-8130
|
https://www.cvedetails.com/cve/CVE-2014-8130/
|
CWE-369
|
https://github.com/vadz/libtiff/commit/3c5eb8b1be544e41d2c336191bc4936300ad7543
|
3c5eb8b1be544e41d2c336191bc4936300ad7543
|
* libtiff/tif_{unix,vms,win32}.c (_TIFFmalloc): ANSI C does not
require malloc() to return NULL pointer if requested allocation
size is zero. Assure that _TIFFmalloc does.
|
_tiffSizeProc(thandle_t fd)
{
struct stat sb;
if (fstat((int)fd,&sb)<0)
return(0);
else
return((uint64)sb.st_size);
}
|
_tiffSizeProc(thandle_t fd)
{
struct stat sb;
if (fstat((int)fd,&sb)<0)
return(0);
else
return((uint64)sb.st_size);
}
|
C
|
libtiff
| 0 |
CVE-2016-7097
|
https://www.cvedetails.com/cve/CVE-2016-7097/
|
CWE-285
|
https://github.com/torvalds/linux/commit/073931017b49d9458aa351605b43a7e34598caef
|
073931017b49d9458aa351605b43a7e34598caef
|
posix_acl: Clear SGID bit when setting file permissions
When file permissions are modified via chmod(2) and the user is not in
the owning group or capable of CAP_FSETID, the setgid bit is cleared in
inode_change_ok(). Setting a POSIX ACL via setxattr(2) sets the file
permissions as well as the new ACL, but doesn't clear the setgid bit in
a similar way; this allows to bypass the check in chmod(2). Fix that.
References: CVE-2016-7097
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
|
ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
{
struct posix_acl *default_acl, *acl;
int error;
error = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
if (error)
return error;
if (default_acl) {
error = __ext4_set_acl(handle, inode, ACL_TYPE_DEFAULT,
default_acl);
posix_acl_release(default_acl);
}
if (acl) {
if (!error)
error = __ext4_set_acl(handle, inode, ACL_TYPE_ACCESS,
acl);
posix_acl_release(acl);
}
return error;
}
|
ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
{
struct posix_acl *default_acl, *acl;
int error;
error = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
if (error)
return error;
if (default_acl) {
error = __ext4_set_acl(handle, inode, ACL_TYPE_DEFAULT,
default_acl);
posix_acl_release(default_acl);
}
if (acl) {
if (!error)
error = __ext4_set_acl(handle, inode, ACL_TYPE_ACCESS,
acl);
posix_acl_release(acl);
}
return error;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/2bfb2b8299e2fb6a432390a93a99a85fed1d29c9
|
2bfb2b8299e2fb6a432390a93a99a85fed1d29c9
|
Fix erroneous semicolon causing build failure: if statement has empty body [-Werror,-Wempty-body]
https://bugs.webkit.org/show_bug.cgi?id=108241
Patch by Kiran Muppala <cmuppala@apple.com> on 2013-01-29
Reviewed by Anders Carlsson.
* UIProcess/WebProcessProxy.cpp:
(WebKit::WebProcessProxy::addExistingWebPage): Remove erroneous
semicolon following the if condition.
git-svn-id: svn://svn.chromium.org/blink/trunk@141180 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void WebProcessProxy::assumeReadAccessToBaseURL(const String& urlString)
{
KURL url(KURL(), urlString);
if (!url.isLocalFile())
return;
KURL baseURL(KURL(), url.baseAsString());
m_localPathsWithAssumedReadAccess.add(baseURL.fileSystemPath());
}
|
void WebProcessProxy::assumeReadAccessToBaseURL(const String& urlString)
{
KURL url(KURL(), urlString);
if (!url.isLocalFile())
return;
KURL baseURL(KURL(), url.baseAsString());
m_localPathsWithAssumedReadAccess.add(baseURL.fileSystemPath());
}
|
C
|
Chrome
| 0 |
CVE-2014-0224
|
https://www.cvedetails.com/cve/CVE-2014-0224/
|
CWE-310
|
https://git.openssl.org/gitweb/?p=openssl.git;a=commit;h=bc8923b1ec9c467755cd86f7848c50ee8812e441
|
bc8923b1ec9c467755cd86f7848c50ee8812e441
| null |
int ssl3_get_client_hello(SSL *s)
{
int i,j,ok,al,ret= -1;
unsigned int cookie_len;
long n;
unsigned long id;
unsigned char *p,*d,*q;
SSL_CIPHER *c;
#ifndef OPENSSL_NO_COMP
SSL_COMP *comp=NULL;
#endif
STACK_OF(SSL_CIPHER) *ciphers=NULL;
/* We do this so that we will respond with our native type.
* If we are TLSv1 and we get SSLv3, we will respond with TLSv1,
* This down switching should be handled by a different method.
* If we are SSLv3, we will respond with SSLv3, even if prompted with
* TLSv1.
*/
if (s->state == SSL3_ST_SR_CLNT_HELLO_A
)
{
s->state=SSL3_ST_SR_CLNT_HELLO_B;
}
s->first_packet=1;
n=s->method->ssl_get_message(s,
SSL3_ST_SR_CLNT_HELLO_B,
SSL3_ST_SR_CLNT_HELLO_C,
SSL3_MT_CLIENT_HELLO,
SSL3_RT_MAX_PLAIN_LENGTH,
&ok);
if (!ok) return((int)n);
s->first_packet=0;
d=p=(unsigned char *)s->init_msg;
/* use version from inside client hello, not from record header
* (may differ: see RFC 2246, Appendix E, second paragraph) */
s->client_version=(((int)p[0])<<8)|(int)p[1];
p+=2;
if ((s->version == DTLS1_VERSION && s->client_version > s->version) ||
(s->version != DTLS1_VERSION && s->client_version < s->version))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_WRONG_VERSION_NUMBER);
if ((s->client_version>>8) == SSL3_VERSION_MAJOR &&
!s->enc_write_ctx && !s->write_hash)
{
/* similar to ssl3_get_record, send alert using remote version number */
s->version = s->client_version;
}
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
/* If we require cookies and this ClientHello doesn't
* contain one, just return since we do not want to
* allocate any memory yet. So check cookie length...
*/
if (SSL_get_options(s) & SSL_OP_COOKIE_EXCHANGE)
{
unsigned int session_length, cookie_length;
session_length = *(p + SSL3_RANDOM_SIZE);
cookie_length = *(p + SSL3_RANDOM_SIZE + session_length + 1);
if (cookie_length == 0)
return 1;
}
/* load the client random */
memcpy(s->s3->client_random,p,SSL3_RANDOM_SIZE);
p+=SSL3_RANDOM_SIZE;
/* get the session-id */
j= *(p++);
s->hit=0;
/* Versions before 0.9.7 always allow clients to resume sessions in renegotiation.
* 0.9.7 and later allow this by default, but optionally ignore resumption requests
* with flag SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION (it's a new flag rather
* than a change to default behavior so that applications relying on this for security
* won't even compile against older library versions).
*
* 1.0.1 and later also have a function SSL_renegotiate_abbreviated() to request
* renegotiation but not a new session (s->new_session remains unset): for servers,
* this essentially just means that the SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION
* setting will be ignored.
*/
if ((s->new_session && (s->options & SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION)))
{
if (!ssl_get_new_session(s,1))
goto err;
}
else
{
i=ssl_get_prev_session(s, p, j, d + n);
if (i == 1)
{ /* previous session */
s->hit=1;
}
else if (i == -1)
goto err;
else /* i == 0 */
{
if (!ssl_get_new_session(s,1))
goto err;
}
}
p+=j;
if (s->version == DTLS1_VERSION || s->version == DTLS1_BAD_VER)
{
/* cookie stuff */
cookie_len = *(p++);
/*
* The ClientHello may contain a cookie even if the
* HelloVerify message has not been sent--make sure that it
* does not cause an overflow.
*/
if ( cookie_len > sizeof(s->d1->rcvd_cookie))
{
/* too much data */
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_COOKIE_MISMATCH);
goto f_err;
}
/* verify the cookie if appropriate option is set. */
if ((SSL_get_options(s) & SSL_OP_COOKIE_EXCHANGE) &&
cookie_len > 0)
{
memcpy(s->d1->rcvd_cookie, p, cookie_len);
if ( s->ctx->app_verify_cookie_cb != NULL)
{
if ( s->ctx->app_verify_cookie_cb(s, s->d1->rcvd_cookie,
cookie_len) == 0)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,
SSL_R_COOKIE_MISMATCH);
goto f_err;
}
/* else cookie verification succeeded */
}
else if ( memcmp(s->d1->rcvd_cookie, s->d1->cookie,
s->d1->cookie_len) != 0) /* default verification */
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,
SSL_R_COOKIE_MISMATCH);
goto f_err;
}
ret = 2;
}
p += cookie_len;
}
n2s(p,i);
if ((i == 0) && (j != 0))
{
/* we need a cipher if we are not resuming a session */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_CIPHERS_SPECIFIED);
goto f_err;
}
if ((p+i) >= (d+n))
{
/* not enough data */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
if ((i > 0) && (ssl_bytes_to_cipher_list(s,p,i,&(ciphers))
== NULL))
{
goto err;
}
p+=i;
/* If it is a hit, check that the cipher is in the list */
if ((s->hit) && (i > 0))
{
j=0;
id=s->session->cipher->id;
#ifdef CIPHER_DEBUG
printf("client sent %d ciphers\n",sk_num(ciphers));
#endif
for (i=0; i<sk_SSL_CIPHER_num(ciphers); i++)
{
c=sk_SSL_CIPHER_value(ciphers,i);
#ifdef CIPHER_DEBUG
printf("client [%2d of %2d]:%s\n",
i,sk_num(ciphers),SSL_CIPHER_get_name(c));
#endif
if (c->id == id)
{
j=1;
break;
}
}
/* Disabled because it can be used in a ciphersuite downgrade
* attack: CVE-2010-4180.
*/
#if 0
if (j == 0 && (s->options & SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG) && (sk_SSL_CIPHER_num(ciphers) == 1))
{
/* Special case as client bug workaround: the previously used cipher may
* not be in the current list, the client instead might be trying to
* continue using a cipher that before wasn't chosen due to server
* preferences. We'll have to reject the connection if the cipher is not
* enabled, though. */
c = sk_SSL_CIPHER_value(ciphers, 0);
if (sk_SSL_CIPHER_find(SSL_get_ciphers(s), c) >= 0)
{
s->session->cipher = c;
j = 1;
}
}
#endif
if (j == 0)
{
/* we need to have the cipher in the cipher
* list if we are asked to reuse it */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_REQUIRED_CIPHER_MISSING);
goto f_err;
}
}
/* compression */
i= *(p++);
if ((p+i) > (d+n))
{
/* not enough data */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
q=p;
for (j=0; j<i; j++)
{
if (p[j] == 0) break;
}
p+=i;
if (j >= i)
{
/* no compress */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_COMPRESSION_SPECIFIED);
goto f_err;
}
#ifndef OPENSSL_NO_TLSEXT
/* TLS extensions*/
if (s->version >= SSL3_VERSION)
{
if (!ssl_parse_clienthello_tlsext(s,&p,d,n, &al))
{
/* 'al' set by ssl_parse_clienthello_tlsext */
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_PARSE_TLSEXT);
goto f_err;
}
}
if (ssl_check_clienthello_tlsext_early(s) <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
/* Check if we want to use external pre-shared secret for this
* handshake for not reused session only. We need to generate
* server_random before calling tls_session_secret_cb in order to allow
* SessionTicket processing to use it in key derivation. */
{
unsigned char *pos;
pos=s->s3->server_random;
if (ssl_fill_hello_random(s, 1, pos, SSL3_RANDOM_SIZE) <= 0)
{
al=SSL_AD_INTERNAL_ERROR;
goto f_err;
}
}
if (!s->hit && s->version >= TLS1_VERSION && s->tls_session_secret_cb)
{
SSL_CIPHER *pref_cipher=NULL;
s->session->master_key_length=sizeof(s->session->master_key);
if(s->tls_session_secret_cb(s, s->session->master_key, &s->session->master_key_length,
ciphers, &pref_cipher, s->tls_session_secret_cb_arg))
{
s->hit=1;
s->session->ciphers=ciphers;
s->session->verify_result=X509_V_OK;
ciphers=NULL;
/* check if some cipher was preferred by call back */
pref_cipher=pref_cipher ? pref_cipher : ssl3_choose_cipher(s, s->session->ciphers, SSL_get_ciphers(s));
if (pref_cipher == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_SHARED_CIPHER);
goto f_err;
}
s->session->cipher=pref_cipher;
if (s->cipher_list)
sk_SSL_CIPHER_free(s->cipher_list);
if (s->cipher_list_by_id)
sk_SSL_CIPHER_free(s->cipher_list_by_id);
s->cipher_list = sk_SSL_CIPHER_dup(s->session->ciphers);
s->cipher_list_by_id = sk_SSL_CIPHER_dup(s->session->ciphers);
}
}
#endif
/* Worst case, we will use the NULL compression, but if we have other
* options, we will now look for them. We have i-1 compression
* algorithms from the client, starting at q. */
s->s3->tmp.new_compression=NULL;
#ifndef OPENSSL_NO_COMP
/* This only happens if we have a cache hit */
if (s->session->compress_meth != 0)
{
int m, comp_id = s->session->compress_meth;
/* Perform sanity checks on resumed compression algorithm */
/* Can't disable compression */
if (s->options & SSL_OP_NO_COMPRESSION)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INCONSISTENT_COMPRESSION);
goto f_err;
}
/* Look for resumed compression method */
for (m = 0; m < sk_SSL_COMP_num(s->ctx->comp_methods); m++)
{
comp=sk_SSL_COMP_value(s->ctx->comp_methods,m);
if (comp_id == comp->id)
{
s->s3->tmp.new_compression=comp;
break;
}
}
if (s->s3->tmp.new_compression == NULL)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INVALID_COMPRESSION_ALGORITHM);
goto f_err;
}
/* Look for resumed method in compression list */
for (m = 0; m < i; m++)
{
if (q[m] == comp_id)
break;
}
if (m >= i)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_REQUIRED_COMPRESSSION_ALGORITHM_MISSING);
goto f_err;
}
}
else if (s->hit)
comp = NULL;
else if (!(s->options & SSL_OP_NO_COMPRESSION) && s->ctx->comp_methods)
{ /* See if we have a match */
int m,nn,o,v,done=0;
nn=sk_SSL_COMP_num(s->ctx->comp_methods);
for (m=0; m<nn; m++)
{
comp=sk_SSL_COMP_value(s->ctx->comp_methods,m);
v=comp->id;
for (o=0; o<i; o++)
{
if (v == q[o])
{
done=1;
break;
}
}
if (done) break;
}
if (done)
s->s3->tmp.new_compression=comp;
else
comp=NULL;
}
#else
/* If compression is disabled we'd better not try to resume a session
* using compression.
*/
if (s->session->compress_meth != 0)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INCONSISTENT_COMPRESSION);
goto f_err;
}
#endif
/* Given s->session->ciphers and SSL_get_ciphers, we must
* pick a cipher */
if (!s->hit)
{
#ifdef OPENSSL_NO_COMP
s->session->compress_meth=0;
#else
s->session->compress_meth=(comp == NULL)?0:comp->id;
#endif
if (s->session->ciphers != NULL)
sk_SSL_CIPHER_free(s->session->ciphers);
s->session->ciphers=ciphers;
if (ciphers == NULL)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_CIPHERS_PASSED);
goto f_err;
}
ciphers=NULL;
c=ssl3_choose_cipher(s,s->session->ciphers,
SSL_get_ciphers(s));
if (c == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_SHARED_CIPHER);
goto f_err;
}
s->s3->tmp.new_cipher=c;
}
else
{
/* Session-id reuse */
#ifdef REUSE_CIPHER_BUG
STACK_OF(SSL_CIPHER) *sk;
SSL_CIPHER *nc=NULL;
SSL_CIPHER *ec=NULL;
if (s->options & SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG)
{
sk=s->session->ciphers;
for (i=0; i<sk_SSL_CIPHER_num(sk); i++)
{
c=sk_SSL_CIPHER_value(sk,i);
if (c->algorithm_enc & SSL_eNULL)
nc=c;
if (SSL_C_IS_EXPORT(c))
ec=c;
}
if (nc != NULL)
s->s3->tmp.new_cipher=nc;
else if (ec != NULL)
s->s3->tmp.new_cipher=ec;
else
s->s3->tmp.new_cipher=s->session->cipher;
}
else
#endif
s->s3->tmp.new_cipher=s->session->cipher;
}
if (TLS1_get_version(s) < TLS1_2_VERSION || !(s->verify_mode & SSL_VERIFY_PEER))
{
if (!ssl3_digest_cached_records(s))
{
al = SSL_AD_INTERNAL_ERROR;
goto f_err;
}
}
/* we now have the following setup.
* client_random
* cipher_list - our prefered list of ciphers
* ciphers - the clients prefered list of ciphers
* compression - basically ignored right now
* ssl version is set - sslv3
* s->session - The ssl session has been setup.
* s->hit - session reuse flag
* s->tmp.new_cipher - the new cipher to use.
*/
/* Handles TLS extensions that we couldn't check earlier */
if (s->version >= SSL3_VERSION)
{
if (ssl_check_clienthello_tlsext_late(s) <= 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
}
if (ret < 0) ret=1;
if (0)
{
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
}
err:
if (ciphers != NULL) sk_SSL_CIPHER_free(ciphers);
return(ret);
}
|
int ssl3_get_client_hello(SSL *s)
{
int i,j,ok,al,ret= -1;
unsigned int cookie_len;
long n;
unsigned long id;
unsigned char *p,*d,*q;
SSL_CIPHER *c;
#ifndef OPENSSL_NO_COMP
SSL_COMP *comp=NULL;
#endif
STACK_OF(SSL_CIPHER) *ciphers=NULL;
/* We do this so that we will respond with our native type.
* If we are TLSv1 and we get SSLv3, we will respond with TLSv1,
* This down switching should be handled by a different method.
* If we are SSLv3, we will respond with SSLv3, even if prompted with
* TLSv1.
*/
if (s->state == SSL3_ST_SR_CLNT_HELLO_A
)
{
s->state=SSL3_ST_SR_CLNT_HELLO_B;
}
s->first_packet=1;
n=s->method->ssl_get_message(s,
SSL3_ST_SR_CLNT_HELLO_B,
SSL3_ST_SR_CLNT_HELLO_C,
SSL3_MT_CLIENT_HELLO,
SSL3_RT_MAX_PLAIN_LENGTH,
&ok);
if (!ok) return((int)n);
s->first_packet=0;
d=p=(unsigned char *)s->init_msg;
/* use version from inside client hello, not from record header
* (may differ: see RFC 2246, Appendix E, second paragraph) */
s->client_version=(((int)p[0])<<8)|(int)p[1];
p+=2;
if ((s->version == DTLS1_VERSION && s->client_version > s->version) ||
(s->version != DTLS1_VERSION && s->client_version < s->version))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_WRONG_VERSION_NUMBER);
if ((s->client_version>>8) == SSL3_VERSION_MAJOR &&
!s->enc_write_ctx && !s->write_hash)
{
/* similar to ssl3_get_record, send alert using remote version number */
s->version = s->client_version;
}
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
/* If we require cookies and this ClientHello doesn't
* contain one, just return since we do not want to
* allocate any memory yet. So check cookie length...
*/
if (SSL_get_options(s) & SSL_OP_COOKIE_EXCHANGE)
{
unsigned int session_length, cookie_length;
session_length = *(p + SSL3_RANDOM_SIZE);
cookie_length = *(p + SSL3_RANDOM_SIZE + session_length + 1);
if (cookie_length == 0)
return 1;
}
/* load the client random */
memcpy(s->s3->client_random,p,SSL3_RANDOM_SIZE);
p+=SSL3_RANDOM_SIZE;
/* get the session-id */
j= *(p++);
s->hit=0;
/* Versions before 0.9.7 always allow clients to resume sessions in renegotiation.
* 0.9.7 and later allow this by default, but optionally ignore resumption requests
* with flag SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION (it's a new flag rather
* than a change to default behavior so that applications relying on this for security
* won't even compile against older library versions).
*
* 1.0.1 and later also have a function SSL_renegotiate_abbreviated() to request
* renegotiation but not a new session (s->new_session remains unset): for servers,
* this essentially just means that the SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION
* setting will be ignored.
*/
if ((s->new_session && (s->options & SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION)))
{
if (!ssl_get_new_session(s,1))
goto err;
}
else
{
i=ssl_get_prev_session(s, p, j, d + n);
if (i == 1)
{ /* previous session */
s->hit=1;
}
else if (i == -1)
goto err;
else /* i == 0 */
{
if (!ssl_get_new_session(s,1))
goto err;
}
}
p+=j;
if (s->version == DTLS1_VERSION || s->version == DTLS1_BAD_VER)
{
/* cookie stuff */
cookie_len = *(p++);
/*
* The ClientHello may contain a cookie even if the
* HelloVerify message has not been sent--make sure that it
* does not cause an overflow.
*/
if ( cookie_len > sizeof(s->d1->rcvd_cookie))
{
/* too much data */
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_COOKIE_MISMATCH);
goto f_err;
}
/* verify the cookie if appropriate option is set. */
if ((SSL_get_options(s) & SSL_OP_COOKIE_EXCHANGE) &&
cookie_len > 0)
{
memcpy(s->d1->rcvd_cookie, p, cookie_len);
if ( s->ctx->app_verify_cookie_cb != NULL)
{
if ( s->ctx->app_verify_cookie_cb(s, s->d1->rcvd_cookie,
cookie_len) == 0)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,
SSL_R_COOKIE_MISMATCH);
goto f_err;
}
/* else cookie verification succeeded */
}
else if ( memcmp(s->d1->rcvd_cookie, s->d1->cookie,
s->d1->cookie_len) != 0) /* default verification */
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,
SSL_R_COOKIE_MISMATCH);
goto f_err;
}
ret = 2;
}
p += cookie_len;
}
n2s(p,i);
if ((i == 0) && (j != 0))
{
/* we need a cipher if we are not resuming a session */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_CIPHERS_SPECIFIED);
goto f_err;
}
if ((p+i) >= (d+n))
{
/* not enough data */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
if ((i > 0) && (ssl_bytes_to_cipher_list(s,p,i,&(ciphers))
== NULL))
{
goto err;
}
p+=i;
/* If it is a hit, check that the cipher is in the list */
if ((s->hit) && (i > 0))
{
j=0;
id=s->session->cipher->id;
#ifdef CIPHER_DEBUG
printf("client sent %d ciphers\n",sk_num(ciphers));
#endif
for (i=0; i<sk_SSL_CIPHER_num(ciphers); i++)
{
c=sk_SSL_CIPHER_value(ciphers,i);
#ifdef CIPHER_DEBUG
printf("client [%2d of %2d]:%s\n",
i,sk_num(ciphers),SSL_CIPHER_get_name(c));
#endif
if (c->id == id)
{
j=1;
break;
}
}
/* Disabled because it can be used in a ciphersuite downgrade
* attack: CVE-2010-4180.
*/
#if 0
if (j == 0 && (s->options & SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG) && (sk_SSL_CIPHER_num(ciphers) == 1))
{
/* Special case as client bug workaround: the previously used cipher may
* not be in the current list, the client instead might be trying to
* continue using a cipher that before wasn't chosen due to server
* preferences. We'll have to reject the connection if the cipher is not
* enabled, though. */
c = sk_SSL_CIPHER_value(ciphers, 0);
if (sk_SSL_CIPHER_find(SSL_get_ciphers(s), c) >= 0)
{
s->session->cipher = c;
j = 1;
}
}
#endif
if (j == 0)
{
/* we need to have the cipher in the cipher
* list if we are asked to reuse it */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_REQUIRED_CIPHER_MISSING);
goto f_err;
}
}
/* compression */
i= *(p++);
if ((p+i) > (d+n))
{
/* not enough data */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
q=p;
for (j=0; j<i; j++)
{
if (p[j] == 0) break;
}
p+=i;
if (j >= i)
{
/* no compress */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_COMPRESSION_SPECIFIED);
goto f_err;
}
#ifndef OPENSSL_NO_TLSEXT
/* TLS extensions*/
if (s->version >= SSL3_VERSION)
{
if (!ssl_parse_clienthello_tlsext(s,&p,d,n, &al))
{
/* 'al' set by ssl_parse_clienthello_tlsext */
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_PARSE_TLSEXT);
goto f_err;
}
}
if (ssl_check_clienthello_tlsext_early(s) <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
/* Check if we want to use external pre-shared secret for this
* handshake for not reused session only. We need to generate
* server_random before calling tls_session_secret_cb in order to allow
* SessionTicket processing to use it in key derivation. */
{
unsigned char *pos;
pos=s->s3->server_random;
if (ssl_fill_hello_random(s, 1, pos, SSL3_RANDOM_SIZE) <= 0)
{
al=SSL_AD_INTERNAL_ERROR;
goto f_err;
}
}
if (!s->hit && s->version >= TLS1_VERSION && s->tls_session_secret_cb)
{
SSL_CIPHER *pref_cipher=NULL;
s->session->master_key_length=sizeof(s->session->master_key);
if(s->tls_session_secret_cb(s, s->session->master_key, &s->session->master_key_length,
ciphers, &pref_cipher, s->tls_session_secret_cb_arg))
{
s->hit=1;
s->session->ciphers=ciphers;
s->session->verify_result=X509_V_OK;
ciphers=NULL;
/* check if some cipher was preferred by call back */
pref_cipher=pref_cipher ? pref_cipher : ssl3_choose_cipher(s, s->session->ciphers, SSL_get_ciphers(s));
if (pref_cipher == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_SHARED_CIPHER);
goto f_err;
}
s->session->cipher=pref_cipher;
if (s->cipher_list)
sk_SSL_CIPHER_free(s->cipher_list);
if (s->cipher_list_by_id)
sk_SSL_CIPHER_free(s->cipher_list_by_id);
s->cipher_list = sk_SSL_CIPHER_dup(s->session->ciphers);
s->cipher_list_by_id = sk_SSL_CIPHER_dup(s->session->ciphers);
}
}
#endif
/* Worst case, we will use the NULL compression, but if we have other
* options, we will now look for them. We have i-1 compression
* algorithms from the client, starting at q. */
s->s3->tmp.new_compression=NULL;
#ifndef OPENSSL_NO_COMP
/* This only happens if we have a cache hit */
if (s->session->compress_meth != 0)
{
int m, comp_id = s->session->compress_meth;
/* Perform sanity checks on resumed compression algorithm */
/* Can't disable compression */
if (s->options & SSL_OP_NO_COMPRESSION)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INCONSISTENT_COMPRESSION);
goto f_err;
}
/* Look for resumed compression method */
for (m = 0; m < sk_SSL_COMP_num(s->ctx->comp_methods); m++)
{
comp=sk_SSL_COMP_value(s->ctx->comp_methods,m);
if (comp_id == comp->id)
{
s->s3->tmp.new_compression=comp;
break;
}
}
if (s->s3->tmp.new_compression == NULL)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INVALID_COMPRESSION_ALGORITHM);
goto f_err;
}
/* Look for resumed method in compression list */
for (m = 0; m < i; m++)
{
if (q[m] == comp_id)
break;
}
if (m >= i)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_REQUIRED_COMPRESSSION_ALGORITHM_MISSING);
goto f_err;
}
}
else if (s->hit)
comp = NULL;
else if (!(s->options & SSL_OP_NO_COMPRESSION) && s->ctx->comp_methods)
{ /* See if we have a match */
int m,nn,o,v,done=0;
nn=sk_SSL_COMP_num(s->ctx->comp_methods);
for (m=0; m<nn; m++)
{
comp=sk_SSL_COMP_value(s->ctx->comp_methods,m);
v=comp->id;
for (o=0; o<i; o++)
{
if (v == q[o])
{
done=1;
break;
}
}
if (done) break;
}
if (done)
s->s3->tmp.new_compression=comp;
else
comp=NULL;
}
#else
/* If compression is disabled we'd better not try to resume a session
* using compression.
*/
if (s->session->compress_meth != 0)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INCONSISTENT_COMPRESSION);
goto f_err;
}
#endif
/* Given s->session->ciphers and SSL_get_ciphers, we must
* pick a cipher */
if (!s->hit)
{
#ifdef OPENSSL_NO_COMP
s->session->compress_meth=0;
#else
s->session->compress_meth=(comp == NULL)?0:comp->id;
#endif
if (s->session->ciphers != NULL)
sk_SSL_CIPHER_free(s->session->ciphers);
s->session->ciphers=ciphers;
if (ciphers == NULL)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_CIPHERS_PASSED);
goto f_err;
}
ciphers=NULL;
c=ssl3_choose_cipher(s,s->session->ciphers,
SSL_get_ciphers(s));
if (c == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_SHARED_CIPHER);
goto f_err;
}
s->s3->tmp.new_cipher=c;
}
else
{
/* Session-id reuse */
#ifdef REUSE_CIPHER_BUG
STACK_OF(SSL_CIPHER) *sk;
SSL_CIPHER *nc=NULL;
SSL_CIPHER *ec=NULL;
if (s->options & SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG)
{
sk=s->session->ciphers;
for (i=0; i<sk_SSL_CIPHER_num(sk); i++)
{
c=sk_SSL_CIPHER_value(sk,i);
if (c->algorithm_enc & SSL_eNULL)
nc=c;
if (SSL_C_IS_EXPORT(c))
ec=c;
}
if (nc != NULL)
s->s3->tmp.new_cipher=nc;
else if (ec != NULL)
s->s3->tmp.new_cipher=ec;
else
s->s3->tmp.new_cipher=s->session->cipher;
}
else
#endif
s->s3->tmp.new_cipher=s->session->cipher;
}
if (TLS1_get_version(s) < TLS1_2_VERSION || !(s->verify_mode & SSL_VERIFY_PEER))
{
if (!ssl3_digest_cached_records(s))
{
al = SSL_AD_INTERNAL_ERROR;
goto f_err;
}
}
/* we now have the following setup.
* client_random
* cipher_list - our prefered list of ciphers
* ciphers - the clients prefered list of ciphers
* compression - basically ignored right now
* ssl version is set - sslv3
* s->session - The ssl session has been setup.
* s->hit - session reuse flag
* s->tmp.new_cipher - the new cipher to use.
*/
/* Handles TLS extensions that we couldn't check earlier */
if (s->version >= SSL3_VERSION)
{
if (ssl_check_clienthello_tlsext_late(s) <= 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
}
if (ret < 0) ret=1;
if (0)
{
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
}
err:
if (ciphers != NULL) sk_SSL_CIPHER_free(ciphers);
return(ret);
}
|
C
|
openssl
| 0 |
CVE-2017-1000252
|
https://www.cvedetails.com/cve/CVE-2017-1000252/
|
CWE-20
|
https://github.com/torvalds/linux/commit/36ae3c0a36b7456432fedce38ae2f7bd3e01a563
|
36ae3c0a36b7456432fedce38ae2f7bd3e01a563
|
KVM: Don't accept obviously wrong gsi values via KVM_IRQFD
We cannot add routes for gsi values >= KVM_MAX_IRQ_ROUTES -- see
kvm_set_irq_routing(). Hence, there is no sense in accepting them
via KVM_IRQFD. Prevent them from entering the system in the first
place.
Signed-off-by: Jan H. Schönherr <jschoenh@amazon.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
ioeventfd_destructor(struct kvm_io_device *this)
{
struct _ioeventfd *p = to_ioeventfd(this);
ioeventfd_release(p);
}
|
ioeventfd_destructor(struct kvm_io_device *this)
{
struct _ioeventfd *p = to_ioeventfd(this);
ioeventfd_release(p);
}
|
C
|
linux
| 0 |
CVE-2019-5747
|
https://www.cvedetails.com/cve/CVE-2019-5747/
|
CWE-125
|
https://git.busybox.net/busybox/commit/?id=74d9f1ba37010face4bd1449df4d60dd84450b06
|
74d9f1ba37010face4bd1449df4d60dd84450b06
| null |
static int good_hostname(const char *name)
{
for (;;) {
name = valid_domain_label(name);
if (!name)
return 0;
if (!name[0])
return 1;
name++;
if (*name == '\0')
return 1; // We allow trailing dot too
}
}
|
static int good_hostname(const char *name)
{
for (;;) {
name = valid_domain_label(name);
if (!name)
return 0;
if (!name[0])
return 1;
name++;
if (*name == '\0')
return 1; // We allow trailing dot too
}
}
|
C
|
busybox
| 0 |
CVE-2019-6978
|
https://www.cvedetails.com/cve/CVE-2019-6978/
|
CWE-415
|
https://github.com/php/php-src/commit/089f7c0bc28d399b0420aa6ef058e4c1c120b2ae
|
089f7c0bc28d399b0420aa6ef058e4c1c120b2ae
|
Sync with upstream
Even though libgd/libgd#492 is not a relevant bug fix for PHP, since
the binding doesn't use the `gdImage*Ptr()` functions at all, we're
porting the fix to stay in sync here.
|
gdImagePtr gdImageCreateFromJpegCtxEx (gdIOCtx * infile, int ignore_warning)
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
jmpbuf_wrapper jmpbufw;
/* volatile so we can gdFree them after longjmp */
volatile JSAMPROW row = 0;
volatile gdImagePtr im = 0;
JSAMPROW rowptr[1];
unsigned int i, j;
int retval;
JDIMENSION nrows;
int channels = 3;
int inverted = 0;
memset (&cinfo, 0, sizeof (cinfo));
memset (&jerr, 0, sizeof (jerr));
jmpbufw.ignore_warning = ignore_warning;
cinfo.err = jpeg_std_error (&jerr);
cinfo.client_data = &jmpbufw;
cinfo.err->emit_message = (void (*)(j_common_ptr,int)) php_jpeg_emit_message;
if (setjmp (jmpbufw.jmpbuf) != 0) {
/* we're here courtesy of longjmp */
if (row) {
gdFree (row);
}
if (im) {
gdImageDestroy (im);
}
return 0;
}
cinfo.err->error_exit = fatal_jpeg_error;
jpeg_create_decompress (&cinfo);
jpeg_gdIOCtx_src (&cinfo, infile);
/* 2.0.22: save the APP14 marker to check for Adobe Photoshop CMYK files with inverted components. */
jpeg_save_markers(&cinfo, JPEG_APP0 + 14, 256);
retval = jpeg_read_header (&cinfo, TRUE);
if (retval != JPEG_HEADER_OK) {
gd_error_ex(GD_WARNING, "gd-jpeg: warning: jpeg_read_header returned %d, expected %d", retval, JPEG_HEADER_OK);
}
if (cinfo.image_height > INT_MAX) {
gd_error_ex(GD_WARNING, "gd-jpeg: warning: JPEG image height (%u) is greater than INT_MAX (%d) (and thus greater than gd can handle)", cinfo.image_height, INT_MAX);
}
if (cinfo.image_width > INT_MAX) {
gd_error_ex(GD_WARNING, "gd-jpeg: warning: JPEG image width (%u) is greater than INT_MAX (%d) (and thus greater than gd can handle)", cinfo.image_width, INT_MAX);
}
im = gdImageCreateTrueColor ((int) cinfo.image_width, (int) cinfo.image_height);
if (im == 0) {
gd_error("gd-jpeg error: cannot allocate gdImage struct");
goto error;
}
/* check if the resolution is specified */
switch (cinfo.density_unit) {
case 1:
im->res_x = cinfo.X_density;
im->res_y = cinfo.Y_density;
break;
case 2:
im->res_x = DPCM2DPI(cinfo.X_density);
im->res_y = DPCM2DPI(cinfo.Y_density);
break;
}
/* 2.0.22: very basic support for reading CMYK colorspace files. Nice for
* thumbnails but there's no support for fussy adjustment of the
* assumed properties of inks and paper. */
if ((cinfo.jpeg_color_space == JCS_CMYK) || (cinfo.jpeg_color_space == JCS_YCCK)) {
cinfo.out_color_space = JCS_CMYK;
} else {
cinfo.out_color_space = JCS_RGB;
}
if (jpeg_start_decompress (&cinfo) != TRUE) {
gd_error("gd-jpeg: warning: jpeg_start_decompress reports suspended data source");
}
/* REMOVED by TBB 2/12/01. This field of the structure is
* documented as private, and sure enough it's gone in the
* latest libjpeg, replaced by something else. Unfortunately
* there is still no right way to find out if the file was
* progressive or not; just declare your intent before you
* write one by calling gdImageInterlace(im, 1) yourself.
* After all, we're not really supposed to rework JPEGs and
* write them out again anyway. Lossy compression, remember?
*/
#if 0
gdImageInterlace (im, cinfo.progressive_mode != 0);
#endif
if (cinfo.out_color_space == JCS_RGB) {
if (cinfo.output_components != 3) {
gd_error_ex(GD_WARNING, "gd-jpeg: error: JPEG color quantization request resulted in output_components == %d (expected 3 for RGB)", cinfo.output_components);
goto error;
}
channels = 3;
} else if (cinfo.out_color_space == JCS_CMYK) {
jpeg_saved_marker_ptr marker;
if (cinfo.output_components != 4) {
gd_error_ex(GD_WARNING, "gd-jpeg: error: JPEG color quantization request resulted in output_components == %d (expected 4 for CMYK)", cinfo.output_components);
goto error;
}
channels = 4;
marker = cinfo.marker_list;
while (marker) {
if ((marker->marker == (JPEG_APP0 + 14)) && (marker->data_length >= 12) && (!strncmp((const char *) marker->data, "Adobe", 5))) {
inverted = 1;
break;
}
marker = marker->next;
}
} else {
gd_error_ex(GD_WARNING, "gd-jpeg: error: unexpected colorspace.");
goto error;
}
#if BITS_IN_JSAMPLE == 12
gd_error("gd-jpeg: error: jpeg library was compiled for 12-bit precision. This is mostly useless, because JPEGs on the web are 8-bit and such versions of the jpeg library won't read or write them. GD doesn't support these unusual images. Edit your jmorecfg.h file to specify the correct precision and completely 'make clean' and 'make install' libjpeg again. Sorry.");
goto error;
#endif /* BITS_IN_JSAMPLE == 12 */
row = safe_emalloc(cinfo.output_width * channels, sizeof(JSAMPLE), 0);
memset(row, 0, cinfo.output_width * channels * sizeof(JSAMPLE));
rowptr[0] = row;
if (cinfo.out_color_space == JCS_CMYK) {
for (i = 0; i < cinfo.output_height; i++) {
register JSAMPROW currow = row;
register int *tpix = im->tpixels[i];
nrows = jpeg_read_scanlines (&cinfo, rowptr, 1);
if (nrows != 1) {
gd_error_ex(GD_WARNING, "gd-jpeg: error: jpeg_read_scanlines returns %u, expected 1", nrows);
goto error;
}
for (j = 0; j < cinfo.output_width; j++, currow += 4, tpix++) {
*tpix = CMYKToRGB (currow[0], currow[1], currow[2], currow[3], inverted);
}
}
} else {
for (i = 0; i < cinfo.output_height; i++) {
register JSAMPROW currow = row;
register int *tpix = im->tpixels[i];
nrows = jpeg_read_scanlines (&cinfo, rowptr, 1);
if (nrows != 1) {
gd_error_ex(GD_WARNING, "gd-jpeg: error: jpeg_read_scanlines returns %u, expected 1", nrows);
goto error;
}
for (j = 0; j < cinfo.output_width; j++, currow += 3, tpix++) {
*tpix = gdTrueColor (currow[0], currow[1], currow[2]);
}
}
}
if (jpeg_finish_decompress (&cinfo) != TRUE) {
gd_error("gd-jpeg: warning: jpeg_finish_decompress reports suspended data source");
}
if (!ignore_warning) {
if (cinfo.err->num_warnings > 0) {
goto error;
}
}
jpeg_destroy_decompress (&cinfo);
gdFree (row);
return im;
error:
jpeg_destroy_decompress (&cinfo);
if (row) {
gdFree (row);
}
if (im) {
gdImageDestroy (im);
}
return 0;
}
|
gdImagePtr gdImageCreateFromJpegCtxEx (gdIOCtx * infile, int ignore_warning)
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
jmpbuf_wrapper jmpbufw;
/* volatile so we can gdFree them after longjmp */
volatile JSAMPROW row = 0;
volatile gdImagePtr im = 0;
JSAMPROW rowptr[1];
unsigned int i, j;
int retval;
JDIMENSION nrows;
int channels = 3;
int inverted = 0;
memset (&cinfo, 0, sizeof (cinfo));
memset (&jerr, 0, sizeof (jerr));
jmpbufw.ignore_warning = ignore_warning;
cinfo.err = jpeg_std_error (&jerr);
cinfo.client_data = &jmpbufw;
cinfo.err->emit_message = (void (*)(j_common_ptr,int)) php_jpeg_emit_message;
if (setjmp (jmpbufw.jmpbuf) != 0) {
/* we're here courtesy of longjmp */
if (row) {
gdFree (row);
}
if (im) {
gdImageDestroy (im);
}
return 0;
}
cinfo.err->error_exit = fatal_jpeg_error;
jpeg_create_decompress (&cinfo);
jpeg_gdIOCtx_src (&cinfo, infile);
/* 2.0.22: save the APP14 marker to check for Adobe Photoshop CMYK files with inverted components. */
jpeg_save_markers(&cinfo, JPEG_APP0 + 14, 256);
retval = jpeg_read_header (&cinfo, TRUE);
if (retval != JPEG_HEADER_OK) {
gd_error_ex(GD_WARNING, "gd-jpeg: warning: jpeg_read_header returned %d, expected %d", retval, JPEG_HEADER_OK);
}
if (cinfo.image_height > INT_MAX) {
gd_error_ex(GD_WARNING, "gd-jpeg: warning: JPEG image height (%u) is greater than INT_MAX (%d) (and thus greater than gd can handle)", cinfo.image_height, INT_MAX);
}
if (cinfo.image_width > INT_MAX) {
gd_error_ex(GD_WARNING, "gd-jpeg: warning: JPEG image width (%u) is greater than INT_MAX (%d) (and thus greater than gd can handle)", cinfo.image_width, INT_MAX);
}
im = gdImageCreateTrueColor ((int) cinfo.image_width, (int) cinfo.image_height);
if (im == 0) {
gd_error("gd-jpeg error: cannot allocate gdImage struct");
goto error;
}
/* check if the resolution is specified */
switch (cinfo.density_unit) {
case 1:
im->res_x = cinfo.X_density;
im->res_y = cinfo.Y_density;
break;
case 2:
im->res_x = DPCM2DPI(cinfo.X_density);
im->res_y = DPCM2DPI(cinfo.Y_density);
break;
}
/* 2.0.22: very basic support for reading CMYK colorspace files. Nice for
* thumbnails but there's no support for fussy adjustment of the
* assumed properties of inks and paper. */
if ((cinfo.jpeg_color_space == JCS_CMYK) || (cinfo.jpeg_color_space == JCS_YCCK)) {
cinfo.out_color_space = JCS_CMYK;
} else {
cinfo.out_color_space = JCS_RGB;
}
if (jpeg_start_decompress (&cinfo) != TRUE) {
gd_error("gd-jpeg: warning: jpeg_start_decompress reports suspended data source");
}
/* REMOVED by TBB 2/12/01. This field of the structure is
* documented as private, and sure enough it's gone in the
* latest libjpeg, replaced by something else. Unfortunately
* there is still no right way to find out if the file was
* progressive or not; just declare your intent before you
* write one by calling gdImageInterlace(im, 1) yourself.
* After all, we're not really supposed to rework JPEGs and
* write them out again anyway. Lossy compression, remember?
*/
#if 0
gdImageInterlace (im, cinfo.progressive_mode != 0);
#endif
if (cinfo.out_color_space == JCS_RGB) {
if (cinfo.output_components != 3) {
gd_error_ex(GD_WARNING, "gd-jpeg: error: JPEG color quantization request resulted in output_components == %d (expected 3 for RGB)", cinfo.output_components);
goto error;
}
channels = 3;
} else if (cinfo.out_color_space == JCS_CMYK) {
jpeg_saved_marker_ptr marker;
if (cinfo.output_components != 4) {
gd_error_ex(GD_WARNING, "gd-jpeg: error: JPEG color quantization request resulted in output_components == %d (expected 4 for CMYK)", cinfo.output_components);
goto error;
}
channels = 4;
marker = cinfo.marker_list;
while (marker) {
if ((marker->marker == (JPEG_APP0 + 14)) && (marker->data_length >= 12) && (!strncmp((const char *) marker->data, "Adobe", 5))) {
inverted = 1;
break;
}
marker = marker->next;
}
} else {
gd_error_ex(GD_WARNING, "gd-jpeg: error: unexpected colorspace.");
goto error;
}
#if BITS_IN_JSAMPLE == 12
gd_error("gd-jpeg: error: jpeg library was compiled for 12-bit precision. This is mostly useless, because JPEGs on the web are 8-bit and such versions of the jpeg library won't read or write them. GD doesn't support these unusual images. Edit your jmorecfg.h file to specify the correct precision and completely 'make clean' and 'make install' libjpeg again. Sorry.");
goto error;
#endif /* BITS_IN_JSAMPLE == 12 */
row = safe_emalloc(cinfo.output_width * channels, sizeof(JSAMPLE), 0);
memset(row, 0, cinfo.output_width * channels * sizeof(JSAMPLE));
rowptr[0] = row;
if (cinfo.out_color_space == JCS_CMYK) {
for (i = 0; i < cinfo.output_height; i++) {
register JSAMPROW currow = row;
register int *tpix = im->tpixels[i];
nrows = jpeg_read_scanlines (&cinfo, rowptr, 1);
if (nrows != 1) {
gd_error_ex(GD_WARNING, "gd-jpeg: error: jpeg_read_scanlines returns %u, expected 1", nrows);
goto error;
}
for (j = 0; j < cinfo.output_width; j++, currow += 4, tpix++) {
*tpix = CMYKToRGB (currow[0], currow[1], currow[2], currow[3], inverted);
}
}
} else {
for (i = 0; i < cinfo.output_height; i++) {
register JSAMPROW currow = row;
register int *tpix = im->tpixels[i];
nrows = jpeg_read_scanlines (&cinfo, rowptr, 1);
if (nrows != 1) {
gd_error_ex(GD_WARNING, "gd-jpeg: error: jpeg_read_scanlines returns %u, expected 1", nrows);
goto error;
}
for (j = 0; j < cinfo.output_width; j++, currow += 3, tpix++) {
*tpix = gdTrueColor (currow[0], currow[1], currow[2]);
}
}
}
if (jpeg_finish_decompress (&cinfo) != TRUE) {
gd_error("gd-jpeg: warning: jpeg_finish_decompress reports suspended data source");
}
if (!ignore_warning) {
if (cinfo.err->num_warnings > 0) {
goto error;
}
}
jpeg_destroy_decompress (&cinfo);
gdFree (row);
return im;
error:
jpeg_destroy_decompress (&cinfo);
if (row) {
gdFree (row);
}
if (im) {
gdImageDestroy (im);
}
return 0;
}
|
C
|
php-src
| 0 |
CVE-2018-6196
|
https://www.cvedetails.com/cve/CVE-2018-6196/
|
CWE-835
|
https://github.com/tats/w3m/commit/8354763b90490d4105695df52674d0fcef823e92
|
8354763b90490d4105695df52674d0fcef823e92
|
Prevent negative indent value in feed_table_block_tag()
Bug-Debian: https://github.com/tats/w3m/issues/88
|
visible_length(char *str)
{
int len = 0, n, max_len = 0;
int status = R_ST_NORMAL;
int prev_status = status;
Str tagbuf = Strnew();
char *t, *r2;
int amp_len = 0;
while (*str) {
prev_status = status;
if (next_status(*str, &status)) {
#ifdef USE_M17N
len += get_mcwidth(str);
n = get_mclen(str);
}
else {
n = 1;
}
#else
len++;
}
#endif
if (status == R_ST_TAG0) {
Strclear(tagbuf);
PUSH_TAG(str, n);
}
else if (status == R_ST_TAG || status == R_ST_DQUOTE
|| status == R_ST_QUOTE || status == R_ST_EQL
|| status == R_ST_VALUE) {
PUSH_TAG(str, n);
}
else if (status == R_ST_AMP) {
if (prev_status == R_ST_NORMAL) {
Strclear(tagbuf);
len--;
amp_len = 0;
}
else {
PUSH_TAG(str, n);
amp_len++;
}
}
else if (status == R_ST_NORMAL && prev_status == R_ST_AMP) {
PUSH_TAG(str, n);
r2 = tagbuf->ptr;
t = getescapecmd(&r2);
if (!*r2 && (*t == '\r' || *t == '\n')) {
if (len > max_len)
max_len = len;
len = 0;
}
else
len += get_strwidth(t) + get_strwidth(r2);
}
else if (status == R_ST_NORMAL && ST_IS_REAL_TAG(prev_status)) {
;
}
else if (*str == '\t') {
len--;
do {
len++;
} while ((visible_length_offset + len) % Tabstop != 0);
}
else if (*str == '\r' || *str == '\n') {
len--;
if (len > max_len)
max_len = len;
len = 0;
}
#ifdef USE_M17N
str += n;
#else
str++;
#endif
}
|
visible_length(char *str)
{
int len = 0, n, max_len = 0;
int status = R_ST_NORMAL;
int prev_status = status;
Str tagbuf = Strnew();
char *t, *r2;
int amp_len = 0;
while (*str) {
prev_status = status;
if (next_status(*str, &status)) {
#ifdef USE_M17N
len += get_mcwidth(str);
n = get_mclen(str);
}
else {
n = 1;
}
#else
len++;
}
#endif
if (status == R_ST_TAG0) {
Strclear(tagbuf);
PUSH_TAG(str, n);
}
else if (status == R_ST_TAG || status == R_ST_DQUOTE
|| status == R_ST_QUOTE || status == R_ST_EQL
|| status == R_ST_VALUE) {
PUSH_TAG(str, n);
}
else if (status == R_ST_AMP) {
if (prev_status == R_ST_NORMAL) {
Strclear(tagbuf);
len--;
amp_len = 0;
}
else {
PUSH_TAG(str, n);
amp_len++;
}
}
else if (status == R_ST_NORMAL && prev_status == R_ST_AMP) {
PUSH_TAG(str, n);
r2 = tagbuf->ptr;
t = getescapecmd(&r2);
if (!*r2 && (*t == '\r' || *t == '\n')) {
if (len > max_len)
max_len = len;
len = 0;
}
else
len += get_strwidth(t) + get_strwidth(r2);
}
else if (status == R_ST_NORMAL && ST_IS_REAL_TAG(prev_status)) {
;
}
else if (*str == '\t') {
len--;
do {
len++;
} while ((visible_length_offset + len) % Tabstop != 0);
}
else if (*str == '\r' || *str == '\n') {
len--;
if (len > max_len)
max_len = len;
len = 0;
}
#ifdef USE_M17N
str += n;
#else
str++;
#endif
}
|
C
|
w3m
| 0 |
CVE-2014-4503
|
https://www.cvedetails.com/cve/CVE-2014-4503/
|
CWE-20
|
https://github.com/sgminer-dev/sgminer/commit/910c36089940e81fb85c65b8e63dcd2fac71470c
|
910c36089940e81fb85c65b8e63dcd2fac71470c
|
stratum: parse_notify(): Don't die on malformed bbversion/prev_hash/nbit/ntime.
Might have introduced a memory leak, don't have time to check. :(
Should the other hex2bin()'s be checked?
Thanks to Mick Ayzenberg <mick.dejavusecurity.com> for finding this.
|
void cgsleep_ms_r(cgtimer_t *ts_start, int ms)
{
struct timespec ts_diff;
ms_to_timespec(&ts_diff, ms);
cgsleep_spec(&ts_diff, ts_start);
}
|
void cgsleep_ms_r(cgtimer_t *ts_start, int ms)
{
struct timespec ts_diff;
ms_to_timespec(&ts_diff, ms);
cgsleep_spec(&ts_diff, ts_start);
}
|
C
|
sgminer
| 0 |
CVE-2011-2881
|
https://www.cvedetails.com/cve/CVE-2011-2881/
|
CWE-119
|
https://github.com/chromium/chromium/commit/88c4913f11967abfd08a8b22b4423710322ac49b
|
88c4913f11967abfd08a8b22b4423710322ac49b
|
[chromium] Fix shutdown race when posting main thread task to CCThreadProxy and enable tests
https://bugs.webkit.org/show_bug.cgi?id=70161
Reviewed by David Levin.
Source/WebCore:
Adds a weak pointer mechanism to cancel main thread tasks posted to CCThreadProxy instances from the compositor
thread. Previously there was a race condition where main thread tasks could run even after the CCThreadProxy was
destroyed.
This race does not exist in the other direction because when tearing down a CCThreadProxy we first post a quit
task to the compositor thread and then suspend execution of the main thread until all compositor tasks for the
CCThreadProxy have been drained.
Covered by the now-enabled CCLayerTreeHostTest* unit tests.
* WebCore.gypi:
* platform/graphics/chromium/cc/CCScopedMainThreadProxy.h: Added.
(WebCore::CCScopedMainThreadProxy::create):
(WebCore::CCScopedMainThreadProxy::postTask):
(WebCore::CCScopedMainThreadProxy::shutdown):
(WebCore::CCScopedMainThreadProxy::CCScopedMainThreadProxy):
(WebCore::CCScopedMainThreadProxy::runTaskIfNotShutdown):
* platform/graphics/chromium/cc/CCThreadProxy.cpp:
(WebCore::CCThreadProxy::CCThreadProxy):
(WebCore::CCThreadProxy::~CCThreadProxy):
(WebCore::CCThreadProxy::createBeginFrameAndCommitTaskOnCCThread):
* platform/graphics/chromium/cc/CCThreadProxy.h:
Source/WebKit/chromium:
Enables the CCLayerTreeHostTest* tests by default. Most tests are run twice in a single thread and multiple
thread configuration. Some tests run only in the multiple thread configuration if they depend on the compositor
thread scheduling draws by itself.
* tests/CCLayerTreeHostTest.cpp:
(::CCLayerTreeHostTest::timeout):
(::CCLayerTreeHostTest::clearTimeout):
(::CCLayerTreeHostTest::CCLayerTreeHostTest):
(::CCLayerTreeHostTest::onEndTest):
(::CCLayerTreeHostTest::TimeoutTask::TimeoutTask):
(::CCLayerTreeHostTest::TimeoutTask::clearTest):
(::CCLayerTreeHostTest::TimeoutTask::~TimeoutTask):
(::CCLayerTreeHostTest::TimeoutTask::Run):
(::CCLayerTreeHostTest::runTest):
(::CCLayerTreeHostTest::doBeginTest):
(::CCLayerTreeHostTestThreadOnly::runTest):
(::CCLayerTreeHostTestSetNeedsRedraw::commitCompleteOnCCThread):
git-svn-id: svn://svn.chromium.org/blink/trunk@97784 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
virtual void beginTest()
{
postSetNeedsCommitThenRedrawToMainThread();
postSetNeedsCommitThenRedrawToMainThread();
}
|
virtual void beginTest()
{
postSetNeedsCommitThenRedrawToMainThread();
postSetNeedsCommitThenRedrawToMainThread();
}
|
C
|
Chrome
| 0 |
CVE-2015-3331
|
https://www.cvedetails.com/cve/CVE-2015-3331/
|
CWE-119
|
https://github.com/torvalds/linux/commit/ccfe8c3f7e52ae83155cb038753f4c75b774ca8a
|
ccfe8c3f7e52ae83155cb038753f4c75b774ca8a
|
crypto: aesni - fix memory usage in GCM decryption
The kernel crypto API logic requires the caller to provide the
length of (ciphertext || authentication tag) as cryptlen for the
AEAD decryption operation. Thus, the cipher implementation must
calculate the size of the plaintext output itself and cannot simply use
cryptlen.
The RFC4106 GCM decryption operation tries to overwrite cryptlen memory
in req->dst. As the destination buffer for decryption only needs to hold
the plaintext memory but cryptlen references the input buffer holding
(ciphertext || authentication tag), the assumption of the destination
buffer length in RFC4106 GCM operation leads to a too large size. This
patch simply uses the already calculated plaintext size.
In addition, this patch fixes the offset calculation of the AAD buffer
pointer: as mentioned before, cryptlen already includes the size of the
tag. Thus, the tag does not need to be added. With the addition, the AAD
will be written beyond the already allocated buffer.
Note, this fixes a kernel crash that can be triggered from user space
via AF_ALG(aead) -- simply use the libkcapi test application
from [1] and update it to use rfc4106-gcm-aes.
Using [1], the changes were tested using CAVS vectors to demonstrate
that the crypto operation still delivers the right results.
[1] http://www.chronox.de/libkcapi.html
CC: Tadeusz Struk <tadeusz.struk@intel.com>
Cc: stable@vger.kernel.org
Signed-off-by: Stephan Mueller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
|
static int ecb_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
kernel_fpu_end();
return err;
}
|
static int ecb_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
kernel_fpu_end();
return err;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/d1a59e4e845a01d7d7b80ef184b672752a9eae4d
|
d1a59e4e845a01d7d7b80ef184b672752a9eae4d
|
Fixing cross-process postMessage replies on more than two iterations.
When two frames are replying to each other using event.source across processes,
after the first two replies, things break down. The root cause is that in
RenderViewImpl::GetFrameByMappedID, the lookup was incorrect. It is now
properly searching for the remote frame id and returning the local one.
BUG=153445
Review URL: https://chromiumcodereview.appspot.com/11040015
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@159924 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderViewImpl::exitFullScreen() {
Send(new ViewHostMsg_ToggleFullscreen(routing_id_, false));
}
|
void RenderViewImpl::exitFullScreen() {
Send(new ViewHostMsg_ToggleFullscreen(routing_id_, false));
}
|
C
|
Chrome
| 0 |
CVE-2016-6787
|
https://www.cvedetails.com/cve/CVE-2016-6787/
|
CWE-264
|
https://github.com/torvalds/linux/commit/f63a8daa5812afef4f06c962351687e1ff9ccb2b
|
f63a8daa5812afef4f06c962351687e1ff9ccb2b
|
perf: Fix event->ctx locking
There have been a few reported issues wrt. the lack of locking around
changing event->ctx. This patch tries to address those.
It avoids the whole rwsem thing; and while it appears to work, please
give it some thought in review.
What I did fail at is sensible runtime checks on the use of
event->ctx, the RCU use makes it very hard.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20150123125834.209535886@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
static int perf_event_init_context(struct task_struct *child, int ctxn)
{
struct perf_event_context *child_ctx, *parent_ctx;
struct perf_event_context *cloned_ctx;
struct perf_event *event;
struct task_struct *parent = current;
int inherited_all = 1;
unsigned long flags;
int ret = 0;
if (likely(!parent->perf_event_ctxp[ctxn]))
return 0;
/*
* If the parent's context is a clone, pin it so it won't get
* swapped under us.
*/
parent_ctx = perf_pin_task_context(parent, ctxn);
if (!parent_ctx)
return 0;
/*
* No need to check if parent_ctx != NULL here; since we saw
* it non-NULL earlier, the only reason for it to become NULL
* is if we exit, and since we're currently in the middle of
* a fork we can't be exiting at the same time.
*/
/*
* Lock the parent list. No need to lock the child - not PID
* hashed yet and not running, so nobody can access it.
*/
mutex_lock(&parent_ctx->mutex);
/*
* We dont have to disable NMIs - we are only looking at
* the list, not manipulating it:
*/
list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
/*
* We can't hold ctx->lock when iterating the ->flexible_group list due
* to allocations, but we need to prevent rotation because
* rotate_ctx() will change the list from interrupt context.
*/
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 1;
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 0;
child_ctx = child->perf_event_ctxp[ctxn];
if (child_ctx && inherited_all) {
/*
* Mark the child context as a clone of the parent
* context, or of whatever the parent is a clone of.
*
* Note that if the parent is a clone, the holding of
* parent_ctx->lock avoids it from being uncloned.
*/
cloned_ctx = parent_ctx->parent_ctx;
if (cloned_ctx) {
child_ctx->parent_ctx = cloned_ctx;
child_ctx->parent_gen = parent_ctx->parent_gen;
} else {
child_ctx->parent_ctx = parent_ctx;
child_ctx->parent_gen = parent_ctx->generation;
}
get_ctx(child_ctx->parent_ctx);
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
put_ctx(parent_ctx);
return ret;
}
|
static int perf_event_init_context(struct task_struct *child, int ctxn)
{
struct perf_event_context *child_ctx, *parent_ctx;
struct perf_event_context *cloned_ctx;
struct perf_event *event;
struct task_struct *parent = current;
int inherited_all = 1;
unsigned long flags;
int ret = 0;
if (likely(!parent->perf_event_ctxp[ctxn]))
return 0;
/*
* If the parent's context is a clone, pin it so it won't get
* swapped under us.
*/
parent_ctx = perf_pin_task_context(parent, ctxn);
if (!parent_ctx)
return 0;
/*
* No need to check if parent_ctx != NULL here; since we saw
* it non-NULL earlier, the only reason for it to become NULL
* is if we exit, and since we're currently in the middle of
* a fork we can't be exiting at the same time.
*/
/*
* Lock the parent list. No need to lock the child - not PID
* hashed yet and not running, so nobody can access it.
*/
mutex_lock(&parent_ctx->mutex);
/*
* We dont have to disable NMIs - we are only looking at
* the list, not manipulating it:
*/
list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
/*
* We can't hold ctx->lock when iterating the ->flexible_group list due
* to allocations, but we need to prevent rotation because
* rotate_ctx() will change the list from interrupt context.
*/
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 1;
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 0;
child_ctx = child->perf_event_ctxp[ctxn];
if (child_ctx && inherited_all) {
/*
* Mark the child context as a clone of the parent
* context, or of whatever the parent is a clone of.
*
* Note that if the parent is a clone, the holding of
* parent_ctx->lock avoids it from being uncloned.
*/
cloned_ctx = parent_ctx->parent_ctx;
if (cloned_ctx) {
child_ctx->parent_ctx = cloned_ctx;
child_ctx->parent_gen = parent_ctx->parent_gen;
} else {
child_ctx->parent_ctx = parent_ctx;
child_ctx->parent_gen = parent_ctx->generation;
}
get_ctx(child_ctx->parent_ctx);
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
put_ctx(parent_ctx);
return ret;
}
|
C
|
linux
| 0 |
CVE-2017-13673
|
https://www.cvedetails.com/cve/CVE-2017-13673/
|
CWE-617
|
https://git.qemu.org/gitweb.cgi?p=qemu.git;a=commit;h=bfc56535f793c557aa754c50213fc5f882e6482d
|
bfc56535f793c557aa754c50213fc5f882e6482d
| null |
void vga_init_vbe(VGACommonState *s, Object *obj, MemoryRegion *system_memory)
{
/* With pc-0.12 and below we map both the PCI BAR and the fixed VBE region,
* so use an alias to avoid double-mapping the same region.
*/
memory_region_init_alias(&s->vram_vbe, obj, "vram.vbe",
&s->vram, 0, memory_region_size(&s->vram));
/* XXX: use optimized standard vga accesses */
memory_region_add_subregion(system_memory,
VBE_DISPI_LFB_PHYSICAL_ADDRESS,
&s->vram_vbe);
s->vbe_mapped = 1;
}
|
void vga_init_vbe(VGACommonState *s, Object *obj, MemoryRegion *system_memory)
{
/* With pc-0.12 and below we map both the PCI BAR and the fixed VBE region,
* so use an alias to avoid double-mapping the same region.
*/
memory_region_init_alias(&s->vram_vbe, obj, "vram.vbe",
&s->vram, 0, memory_region_size(&s->vram));
/* XXX: use optimized standard vga accesses */
memory_region_add_subregion(system_memory,
VBE_DISPI_LFB_PHYSICAL_ADDRESS,
&s->vram_vbe);
s->vbe_mapped = 1;
}
|
C
|
qemu
| 0 |
CVE-2016-1705
|
https://www.cvedetails.com/cve/CVE-2016-1705/
| null |
https://github.com/chromium/chromium/commit/4afb628e068367d5b73440537555902cd12416f8
|
4afb628e068367d5b73440537555902cd12416f8
|
gpu/android : Add support for partial swap with surface control.
Add support for PostSubBuffer to GLSurfaceEGLSurfaceControl. This should
allow the display compositor to draw the minimum sub-rect necessary from
the damage tracking in BufferQueue on the client-side, and also to pass
this damage rect to the framework.
R=piman@chromium.org
Bug: 926020
Change-Id: I73d3320cab68250d4c6865bf21c5531682d8bf61
Reviewed-on: https://chromium-review.googlesource.com/c/1457467
Commit-Queue: Khushal <khushalsagar@chromium.org>
Commit-Queue: Antoine Labour <piman@chromium.org>
Reviewed-by: Antoine Labour <piman@chromium.org>
Auto-Submit: Khushal <khushalsagar@chromium.org>
Cr-Commit-Position: refs/heads/master@{#629852}
|
cc::UIResourceId CompositorImpl::CreateUIResource(
cc::UIResourceClient* client) {
TRACE_EVENT0("compositor", "CompositorImpl::CreateUIResource");
return host_->GetUIResourceManager()->CreateUIResource(client);
}
|
cc::UIResourceId CompositorImpl::CreateUIResource(
cc::UIResourceClient* client) {
TRACE_EVENT0("compositor", "CompositorImpl::CreateUIResource");
return host_->GetUIResourceManager()->CreateUIResource(client);
}
|
C
|
Chrome
| 0 |
CVE-2015-3418
|
https://www.cvedetails.com/cve/CVE-2015-3418/
|
CWE-369
|
https://cgit.freedesktop.org/xorg/xserver/commit/?id=dc777c346d5d452a53b13b917c45f6a1bad2f20b
|
dc777c346d5d452a53b13b917c45f6a1bad2f20b
| null |
DetachUnboundGPU(ScreenPtr slave)
{
assert(slave->isGPU);
xorg_list_del(&slave->unattached_head);
slave->current_master = NULL;
}
|
DetachUnboundGPU(ScreenPtr slave)
{
assert(slave->isGPU);
xorg_list_del(&slave->unattached_head);
slave->current_master = NULL;
}
|
C
|
xserver
| 0 |
CVE-2012-3552
|
https://www.cvedetails.com/cve/CVE-2012-3552/
|
CWE-362
|
https://github.com/torvalds/linux/commit/f6d8bd051c391c1c0458a30b2a7abcd939329259
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
|
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct udp_sock *up = udp_sk(sk);
struct flowi4 *fl4;
int ulen = len;
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
int connected = 0;
__be32 daddr, faddr, saddr;
__be16 dport;
u8 tos;
int err, is_udplite = IS_UDPLITE(sk);
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
struct sk_buff *skb;
struct ip_options_data opt_copy;
if (len > 0xFFFF)
return -EMSGSIZE;
/*
* Check the flags.
*/
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
return -EOPNOTSUPP;
ipc.opt = NULL;
ipc.tx_flags = 0;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET)) {
release_sock(sk);
return -EINVAL;
}
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
/*
* Get and verify the address.
*/
if (msg->msg_name) {
struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name;
if (msg->msg_namelen < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET) {
if (usin->sin_family != AF_UNSPEC)
return -EAFNOSUPPORT;
}
daddr = usin->sin_addr.s_addr;
dport = usin->sin_port;
if (dport == 0)
return -EINVAL;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = inet->inet_daddr;
dport = inet->inet_dport;
/* Open fast path for connected socket.
Route will not be used, if at least one option is set.
*/
connected = 1;
}
ipc.addr = inet->inet_saddr;
ipc.oif = sk->sk_bound_dev_if;
err = sock_tx_timestamp(sk, &ipc.tx_flags);
if (err)
return err;
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc);
if (err)
return err;
if (ipc.opt)
free = 1;
connected = 0;
}
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt) {
memcpy(&opt_copy, inet_opt,
sizeof(*inet_opt) + inet_opt->opt.optlen);
ipc.opt = &opt_copy.opt;
}
rcu_read_unlock();
}
saddr = ipc.addr;
ipc.addr = faddr = daddr;
if (ipc.opt && ipc.opt->opt.srr) {
if (!daddr)
return -EINVAL;
faddr = ipc.opt->opt.faddr;
connected = 0;
}
tos = RT_TOS(inet->tos);
if (sock_flag(sk, SOCK_LOCALROUTE) ||
(msg->msg_flags & MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->opt.is_strictroute)) {
tos |= RTO_ONLINK;
connected = 0;
}
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
connected = 0;
}
if (connected)
rt = (struct rtable *)sk_dst_check(sk, 0);
if (rt == NULL) {
struct flowi4 fl4;
struct net *net = sock_net(sk);
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
faddr, saddr, dport, inet->inet_sport);
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
if (err == -ENETUNREACH)
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
err = -EACCES;
if ((rt->rt_flags & RTCF_BROADCAST) &&
!sock_flag(sk, SOCK_BROADCAST))
goto out;
if (connected)
sk_dst_set(sk, dst_clone(&rt->dst));
}
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
saddr = rt->rt_src;
if (!ipc.addr)
daddr = ipc.addr = rt->rt_dst;
/* Lockless fast path for the non-corking case. */
if (!corkreq) {
skb = ip_make_skb(sk, getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), &ipc, &rt,
msg->msg_flags);
err = PTR_ERR(skb);
if (skb && !IS_ERR(skb))
err = udp_send_skb(skb, daddr, dport);
goto out;
}
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
err = -EINVAL;
goto out;
}
/*
* Now cork the socket to pend data.
*/
fl4 = &inet->cork.fl.u.ip4;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = inet->inet_sport;
up->pending = AF_INET;
do_append_data:
up->len += ulen;
err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), &ipc, &rt,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_flush_pending_frames(sk);
else if (!corkreq)
err = udp_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
release_sock(sk);
out:
ip_rt_put(rt);
if (free)
kfree(ipc.opt);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
|
int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct udp_sock *up = udp_sk(sk);
struct flowi4 *fl4;
int ulen = len;
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
int connected = 0;
__be32 daddr, faddr, saddr;
__be16 dport;
u8 tos;
int err, is_udplite = IS_UDPLITE(sk);
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
struct sk_buff *skb;
if (len > 0xFFFF)
return -EMSGSIZE;
/*
* Check the flags.
*/
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
return -EOPNOTSUPP;
ipc.opt = NULL;
ipc.tx_flags = 0;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET)) {
release_sock(sk);
return -EINVAL;
}
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
/*
* Get and verify the address.
*/
if (msg->msg_name) {
struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name;
if (msg->msg_namelen < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET) {
if (usin->sin_family != AF_UNSPEC)
return -EAFNOSUPPORT;
}
daddr = usin->sin_addr.s_addr;
dport = usin->sin_port;
if (dport == 0)
return -EINVAL;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = inet->inet_daddr;
dport = inet->inet_dport;
/* Open fast path for connected socket.
Route will not be used, if at least one option is set.
*/
connected = 1;
}
ipc.addr = inet->inet_saddr;
ipc.oif = sk->sk_bound_dev_if;
err = sock_tx_timestamp(sk, &ipc.tx_flags);
if (err)
return err;
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc);
if (err)
return err;
if (ipc.opt)
free = 1;
connected = 0;
}
if (!ipc.opt)
ipc.opt = inet->opt;
saddr = ipc.addr;
ipc.addr = faddr = daddr;
if (ipc.opt && ipc.opt->srr) {
if (!daddr)
return -EINVAL;
faddr = ipc.opt->faddr;
connected = 0;
}
tos = RT_TOS(inet->tos);
if (sock_flag(sk, SOCK_LOCALROUTE) ||
(msg->msg_flags & MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->is_strictroute)) {
tos |= RTO_ONLINK;
connected = 0;
}
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
connected = 0;
}
if (connected)
rt = (struct rtable *)sk_dst_check(sk, 0);
if (rt == NULL) {
struct flowi4 fl4;
struct net *net = sock_net(sk);
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
faddr, saddr, dport, inet->inet_sport);
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
if (err == -ENETUNREACH)
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
err = -EACCES;
if ((rt->rt_flags & RTCF_BROADCAST) &&
!sock_flag(sk, SOCK_BROADCAST))
goto out;
if (connected)
sk_dst_set(sk, dst_clone(&rt->dst));
}
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
saddr = rt->rt_src;
if (!ipc.addr)
daddr = ipc.addr = rt->rt_dst;
/* Lockless fast path for the non-corking case. */
if (!corkreq) {
skb = ip_make_skb(sk, getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), &ipc, &rt,
msg->msg_flags);
err = PTR_ERR(skb);
if (skb && !IS_ERR(skb))
err = udp_send_skb(skb, daddr, dport);
goto out;
}
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
err = -EINVAL;
goto out;
}
/*
* Now cork the socket to pend data.
*/
fl4 = &inet->cork.fl.u.ip4;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = inet->inet_sport;
up->pending = AF_INET;
do_append_data:
up->len += ulen;
err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), &ipc, &rt,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_flush_pending_frames(sk);
else if (!corkreq)
err = udp_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
release_sock(sk);
out:
ip_rt_put(rt);
if (free)
kfree(ipc.opt);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
|
C
|
linux
| 1 |
CVE-2015-1352
|
https://www.cvedetails.com/cve/CVE-2015-1352/
| null |
https://git.php.net/?p=php-src.git;a=commit;h=124fb22a13fafa3648e4e15b4f207c7096d8155e
|
124fb22a13fafa3648e4e15b4f207c7096d8155e
| null |
PHP_FUNCTION(pg_fetch_all_columns)
{
zval *result;
PGresult *pgsql_result;
pgsql_result_handle *pg_result;
zend_long colno=0;
int pg_numrows, pg_row;
size_t num_fields;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r|l", &result, &colno) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(pg_result, pgsql_result_handle *, result, -1, "PostgreSQL result", le_result);
pgsql_result = pg_result->result;
num_fields = PQnfields(pgsql_result);
if (colno >= (zend_long)num_fields || colno < 0) {
php_error_docref(NULL, E_WARNING, "Invalid column number '%pd'", colno);
RETURN_FALSE;
}
array_init(return_value);
if ((pg_numrows = PQntuples(pgsql_result)) <= 0) {
return;
}
for (pg_row = 0; pg_row < pg_numrows; pg_row++) {
if (PQgetisnull(pgsql_result, pg_row, (int)colno)) {
add_next_index_null(return_value);
} else {
add_next_index_string(return_value, PQgetvalue(pgsql_result, pg_row, (int)colno));
}
}
}
|
PHP_FUNCTION(pg_fetch_all_columns)
{
zval *result;
PGresult *pgsql_result;
pgsql_result_handle *pg_result;
zend_long colno=0;
int pg_numrows, pg_row;
size_t num_fields;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r|l", &result, &colno) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(pg_result, pgsql_result_handle *, result, -1, "PostgreSQL result", le_result);
pgsql_result = pg_result->result;
num_fields = PQnfields(pgsql_result);
if (colno >= (zend_long)num_fields || colno < 0) {
php_error_docref(NULL, E_WARNING, "Invalid column number '%pd'", colno);
RETURN_FALSE;
}
array_init(return_value);
if ((pg_numrows = PQntuples(pgsql_result)) <= 0) {
return;
}
for (pg_row = 0; pg_row < pg_numrows; pg_row++) {
if (PQgetisnull(pgsql_result, pg_row, (int)colno)) {
add_next_index_null(return_value);
} else {
add_next_index_string(return_value, PQgetvalue(pgsql_result, pg_row, (int)colno));
}
}
}
|
C
|
php
| 0 |
CVE-2011-4112
|
https://www.cvedetails.com/cve/CVE-2011-4112/
|
CWE-264
|
https://github.com/torvalds/linux/commit/550fd08c2cebad61c548def135f67aba284c6162
|
550fd08c2cebad61c548def135f67aba284c6162
|
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Karsten Keil <isdn@linux-pingi.de>
CC: "David S. Miller" <davem@davemloft.net>
CC: Jay Vosburgh <fubar@us.ibm.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Krzysztof Halasa <khc@pm.waw.pl>
CC: "John W. Linville" <linville@tuxdriver.com>
CC: Greg Kroah-Hartman <gregkh@suse.de>
CC: Marcel Holtmann <marcel@holtmann.org>
CC: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static int wll_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN);
return ETH_ALEN;
}
|
static int wll_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN);
return ETH_ALEN;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/f2f703241635fa96fa630b83afcc9a330cc21b7e
|
f2f703241635fa96fa630b83afcc9a330cc21b7e
|
CrOS Shelf: Get rid of 'split view' mode for shelf background
In the new UI, "maximized" and "split view" are treated the same in
specs, so there is no more need for a separate "split view" mode. This
folds it into the "maximized" mode.
Note that the only thing that _seems_ different in
shelf_background_animator is ShelfBackgroundAnimator::kMaxAlpha (255)
vs kShelfTranslucentMaximizedWindow (254), which should be virtually
impossible to distinguish.
This CL therefore does not have any visual effect (and doesn't
directly fix the linked bug, but is relevant).
Bug: 899289
Change-Id: I60947338176ac15ca016b1ba4edf13d16362cb24
Reviewed-on: https://chromium-review.googlesource.com/c/1469741
Commit-Queue: Xiyuan Xia <xiyuan@chromium.org>
Reviewed-by: Xiyuan Xia <xiyuan@chromium.org>
Auto-Submit: Manu Cornet <manucornet@chromium.org>
Cr-Commit-Position: refs/heads/master@{#631752}
|
void ShelfBackgroundAnimator::PaintBackground(
ShelfBackgroundType background_type,
AnimationChangeType change_type) {
if (target_background_type_ == background_type &&
change_type == AnimationChangeType::ANIMATE) {
return;
}
AnimateBackground(background_type, change_type);
}
|
void ShelfBackgroundAnimator::PaintBackground(
ShelfBackgroundType background_type,
AnimationChangeType change_type) {
if (target_background_type_ == background_type &&
change_type == AnimationChangeType::ANIMATE) {
return;
}
AnimateBackground(background_type, change_type);
}
|
C
|
Chrome
| 0 |
CVE-2014-3480
|
https://www.cvedetails.com/cve/CVE-2014-3480/
|
CWE-20
|
https://github.com/file/file/commit/40bade80cbe2af1d0b2cd0420cebd5d5905a2382
|
40bade80cbe2af1d0b2cd0420cebd5d5905a2382
|
Fix incorrect bounds check for sector count. (Francisco Alonso and Jan Kaluza
at RedHat)
|
cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size)
{
size_t i, j;
cdf_secid_t maxsector = (cdf_secid_t)((sat->sat_len * size)
/ sizeof(maxsector));
DPRINTF(("Chain:"));
for (j = i = 0; sid >= 0; i++, j++) {
DPRINTF((" %d", sid));
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Counting chain loop limit"));
errno = EFTYPE;
return (size_t)-1;
}
if (sid >= maxsector) {
DPRINTF(("Sector %d >= %d\n", sid, maxsector));
errno = EFTYPE;
return (size_t)-1;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (i == 0) {
DPRINTF((" none, sid: %d\n", sid));
return (size_t)-1;
}
DPRINTF(("\n"));
return i;
}
|
cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size)
{
size_t i, j;
cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size);
DPRINTF(("Chain:"));
for (j = i = 0; sid >= 0; i++, j++) {
DPRINTF((" %d", sid));
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Counting chain loop limit"));
errno = EFTYPE;
return (size_t)-1;
}
if (sid > maxsector) {
DPRINTF(("Sector %d > %d\n", sid, maxsector));
errno = EFTYPE;
return (size_t)-1;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (i == 0) {
DPRINTF((" none, sid: %d\n", sid));
return (size_t)-1;
}
DPRINTF(("\n"));
return i;
}
|
C
|
file
| 1 |
CVE-2006-3635
|
https://www.cvedetails.com/cve/CVE-2006-3635/
|
CWE-119
|
https://github.com/torvalds/linux/commit/4dcc29e1574d88f4465ba865ed82800032f76418
|
4dcc29e1574d88f4465ba865ed82800032f76418
|
[IA64] Workaround for RSE issue
Problem: An application violating the architectural rules regarding
operation dependencies and having specific Register Stack Engine (RSE)
state at the time of the violation, may result in an illegal operation
fault and invalid RSE state. Such faults may initiate a cascade of
repeated illegal operation faults within OS interruption handlers.
The specific behavior is OS dependent.
Implication: An application causing an illegal operation fault with
specific RSE state may result in a series of illegal operation faults
and an eventual OS stack overflow condition.
Workaround: OS interruption handlers that switch to kernel backing
store implement a check for invalid RSE state to avoid the series
of illegal operation faults.
The core of the workaround is the RSE_WORKAROUND code sequence
inserted into each invocation of the SAVE_MIN_WITH_COVER and
SAVE_MIN_WITH_COVER_R19 macros. This sequence includes hard-coded
constants that depend on the number of stacked physical registers
being 96. The rest of this patch consists of code to disable this
workaround should this not be the case (with the presumption that
if a future Itanium processor increases the number of registers, it
would also remove the need for this patch).
Move the start of the RBS up to a mod32 boundary to avoid some
corner cases.
The dispatch_illegal_op_fault code outgrew the spot it was
squatting in when built with this patch and CONFIG_VIRT_CPU_ACCOUNTING=y
Move it out to the end of the ivt.
Signed-off-by: Tony Luck <tony.luck@intel.com>
|
check_bugs (void)
{
ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
(unsigned long) __end___mckinley_e9_bundles);
}
|
check_bugs (void)
{
ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
(unsigned long) __end___mckinley_e9_bundles);
}
|
C
|
linux
| 0 |
CVE-2018-6066
|
https://www.cvedetails.com/cve/CVE-2018-6066/
|
CWE-200
|
https://github.com/chromium/chromium/commit/fad67a5b73639d7211b24fd9bdb242e82039b765
|
fad67a5b73639d7211b24fd9bdb242e82039b765
|
Check CORS using PassesAccessControlCheck() with supplied SecurityOrigin
Partial revert of https://chromium-review.googlesource.com/535694.
Bug: 799477
Change-Id: I878bb9bcb83afaafe8601293db9aa644fc5929b3
Reviewed-on: https://chromium-review.googlesource.com/898427
Commit-Queue: Hiroshige Hayashizaki <hiroshige@chromium.org>
Reviewed-by: Kouhei Ueno <kouhei@chromium.org>
Reviewed-by: Yutaka Hirano <yhirano@chromium.org>
Reviewed-by: Takeshi Yoshino <tyoshino@chromium.org>
Cr-Commit-Position: refs/heads/master@{#535176}
|
void ImageResource::DestroyDecodedDataIfPossible() {
GetContent()->DestroyDecodedData();
if (GetContent()->HasImage() && !IsUnusedPreload() &&
GetContent()->IsRefetchableDataFromDiskCache()) {
UMA_HISTOGRAM_MEMORY_KB("Memory.Renderer.EstimatedDroppableEncodedSize",
EncodedSize() / 1024);
}
}
|
void ImageResource::DestroyDecodedDataIfPossible() {
GetContent()->DestroyDecodedData();
if (GetContent()->HasImage() && !IsUnusedPreload() &&
GetContent()->IsRefetchableDataFromDiskCache()) {
UMA_HISTOGRAM_MEMORY_KB("Memory.Renderer.EstimatedDroppableEncodedSize",
EncodedSize() / 1024);
}
}
|
C
|
Chrome
| 0 |
CVE-2018-13006
|
https://www.cvedetails.com/cve/CVE-2018-13006/
|
CWE-125
|
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
|
fixed 2 possible heap overflows (inc. #1088)
|
void dimm_del(GF_Box *s)
{
gf_free((GF_DIMMBox *)s);
}
|
void dimm_del(GF_Box *s)
{
gf_free((GF_DIMMBox *)s);
}
|
C
|
gpac
| 0 |
CVE-2016-3760
|
https://www.cvedetails.com/cve/CVE-2016-3760/
|
CWE-20
|
https://android.googlesource.com/platform/system/bt/+/37c88107679d36c419572732b4af6e18bb2f7dce
|
37c88107679d36c419572732b4af6e18bb2f7dce
|
Add guest mode functionality (2/3)
Add a flag to enable() to start Bluetooth in restricted
mode. In restricted mode, all devices that are paired during
restricted mode are deleted upon leaving restricted mode.
Right now restricted mode is only entered while a guest
user is active.
Bug: 27410683
Change-Id: I8f23d28ef0aa3a8df13d469c73005c8e1b894d19
|
int get_remote_device_property(bt_bdaddr_t *remote_addr, bt_property_type_t type)
{
/* sanity check */
if (interface_ready() == FALSE)
return BT_STATUS_NOT_READY;
return btif_get_remote_device_property(remote_addr, type);
}
|
int get_remote_device_property(bt_bdaddr_t *remote_addr, bt_property_type_t type)
{
/* sanity check */
if (interface_ready() == FALSE)
return BT_STATUS_NOT_READY;
return btif_get_remote_device_property(remote_addr, type);
}
|
C
|
Android
| 0 |
CVE-2018-16513
|
https://www.cvedetails.com/cve/CVE-2018-16513/
|
CWE-704
|
http://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=b326a71659b7837d3acde954b18bda1a6f5e9498
|
b326a71659b7837d3acde954b18bda1a6f5e9498
| null |
static int devicenrange(i_ctx_t * i_ctx_p, ref *space, float *ptr)
{
int i, limit, code;
PS_colour_space_t *cspace;
ref altspace;
code = array_get(imemory, space, 1, &altspace);
if (code < 0)
return code;
code = get_space_object(i_ctx_p, &altspace, &cspace);
if (code < 0)
return code;
code = cspace->numcomponents(i_ctx_p, &altspace, &limit);
if (code < 0)
return code;
for (i = 0;i < limit * 2;i+=2) {
ptr[i] = 0;
ptr[i+1] = 1;
}
return 0;
}
|
static int devicenrange(i_ctx_t * i_ctx_p, ref *space, float *ptr)
{
int i, limit, code;
PS_colour_space_t *cspace;
ref altspace;
code = array_get(imemory, space, 1, &altspace);
if (code < 0)
return code;
code = get_space_object(i_ctx_p, &altspace, &cspace);
if (code < 0)
return code;
code = cspace->numcomponents(i_ctx_p, &altspace, &limit);
if (code < 0)
return code;
for (i = 0;i < limit * 2;i+=2) {
ptr[i] = 0;
ptr[i+1] = 1;
}
return 0;
}
|
C
|
ghostscript
| 0 |
CVE-2018-16075
|
https://www.cvedetails.com/cve/CVE-2018-16075/
|
CWE-254
|
https://github.com/chromium/chromium/commit/d913f72b4875cf0814fc3f03ad7c00642097c4a4
|
d913f72b4875cf0814fc3f03ad7c00642097c4a4
|
Remove RequireCSSExtensionForFile runtime enabled flag.
The feature has long since been stable (since M64) and doesn't seem
to be a need for this flag.
BUG=788936
Change-Id: I666390b869289c328acb4a2daa5bf4154e1702c0
Reviewed-on: https://chromium-review.googlesource.com/c/1324143
Reviewed-by: Mike West <mkwst@chromium.org>
Reviewed-by: Camille Lamy <clamy@chromium.org>
Commit-Queue: Dave Tapuska <dtapuska@chromium.org>
Cr-Commit-Position: refs/heads/master@{#607329}
|
void WebRuntimeFeatures::EnableInputMultipleFieldsUI(bool enable) {
RuntimeEnabledFeatures::SetInputMultipleFieldsUIEnabled(enable);
}
|
void WebRuntimeFeatures::EnableInputMultipleFieldsUI(bool enable) {
RuntimeEnabledFeatures::SetInputMultipleFieldsUIEnabled(enable);
}
|
C
|
Chrome
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
R=jochen@chromium.org
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void strictFloatAttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
TestObjectV8Internal::strictFloatAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
static void strictFloatAttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
TestObjectV8Internal::strictFloatAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
C
|
Chrome
| 0 |
CVE-2019-5790
|
https://www.cvedetails.com/cve/CVE-2019-5790/
|
CWE-190
|
https://github.com/chromium/chromium/commit/88fcb3a6899d77b64195423333ad81a00803f997
|
88fcb3a6899d77b64195423333ad81a00803f997
|
Move user activation check to RemoteFrame::Navigate's callers.
Currently RemoteFrame::Navigate is the user of
Frame::HasTransientUserActivation that passes a RemoteFrame*, and
it seems wrong because the user activation (user gesture) needed by
the navigation should belong to the LocalFrame that initiated the
navigation.
Follow-up CLs after this one will update UserActivation code in
Frame to take a LocalFrame* instead of a Frame*, and get rid of
redundant IPCs.
Bug: 811414
Change-Id: I771c1694043edb54374a44213d16715d9c7da704
Reviewed-on: https://chromium-review.googlesource.com/914736
Commit-Queue: Mustaq Ahmed <mustaq@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Cr-Commit-Position: refs/heads/master@{#536728}
|
bool HTMLFormElement::IsURLAttribute(const Attribute& attribute) const {
return attribute.GetName() == actionAttr ||
HTMLElement::IsURLAttribute(attribute);
}
|
bool HTMLFormElement::IsURLAttribute(const Attribute& attribute) const {
return attribute.GetName() == actionAttr ||
HTMLElement::IsURLAttribute(attribute);
}
|
C
|
Chrome
| 0 |
CVE-2014-7815
|
https://www.cvedetails.com/cve/CVE-2014-7815/
|
CWE-264
|
https://git.qemu.org/?p=qemu.git;a=commit;h=e6908bfe8e07f2b452e78e677da1b45b1c0f6829
|
e6908bfe8e07f2b452e78e677da1b45b1c0f6829
| null |
long vnc_client_read_buf(VncState *vs, uint8_t *data, size_t datalen)
{
long ret;
#ifdef CONFIG_VNC_TLS
if (vs->tls.session) {
ret = vnc_client_read_tls(&vs->tls.session, data, datalen);
} else {
#ifdef CONFIG_VNC_WS
if (vs->ws_tls.session) {
ret = vnc_client_read_tls(&vs->ws_tls.session, data, datalen);
} else
#endif /* CONFIG_VNC_WS */
#endif /* CONFIG_VNC_TLS */
{
ret = qemu_recv(vs->csock, data, datalen, 0);
}
#ifdef CONFIG_VNC_TLS
}
#endif /* CONFIG_VNC_TLS */
VNC_DEBUG("Read wire %p %zd -> %ld\n", data, datalen, ret);
return vnc_client_io_error(vs, ret, socket_error());
}
|
long vnc_client_read_buf(VncState *vs, uint8_t *data, size_t datalen)
{
long ret;
#ifdef CONFIG_VNC_TLS
if (vs->tls.session) {
ret = vnc_client_read_tls(&vs->tls.session, data, datalen);
} else {
#ifdef CONFIG_VNC_WS
if (vs->ws_tls.session) {
ret = vnc_client_read_tls(&vs->ws_tls.session, data, datalen);
} else
#endif /* CONFIG_VNC_WS */
#endif /* CONFIG_VNC_TLS */
{
ret = qemu_recv(vs->csock, data, datalen, 0);
}
#ifdef CONFIG_VNC_TLS
}
#endif /* CONFIG_VNC_TLS */
VNC_DEBUG("Read wire %p %zd -> %ld\n", data, datalen, ret);
return vnc_client_io_error(vs, ret, socket_error());
}
|
C
|
qemu
| 0 |
CVE-2015-6252
|
https://www.cvedetails.com/cve/CVE-2015-6252/
|
CWE-399
|
https://github.com/torvalds/linux/commit/7932c0bd7740f4cd2aa168d3ce0199e7af7d72d5
|
7932c0bd7740f4cd2aa168d3ce0199e7af7d72d5
|
vhost: actually track log eventfd file
While reviewing vhost log code, I found out that log_file is never
set. Note: I haven't tested the change (QEMU doesn't use LOG_FD yet).
Cc: stable@vger.kernel.org
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
{
struct vhost_virtqueue *vq;
int i;
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
GFP_KERNEL);
vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
if (!vq->indirect || !vq->log || !vq->heads)
goto err_nomem;
}
return 0;
err_nomem:
for (; i >= 0; --i)
vhost_vq_free_iovecs(dev->vqs[i]);
return -ENOMEM;
}
|
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
{
struct vhost_virtqueue *vq;
int i;
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
GFP_KERNEL);
vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
if (!vq->indirect || !vq->log || !vq->heads)
goto err_nomem;
}
return 0;
err_nomem:
for (; i >= 0; --i)
vhost_vq_free_iovecs(dev->vqs[i]);
return -ENOMEM;
}
|
C
|
linux
| 0 |
CVE-2012-2896
|
https://www.cvedetails.com/cve/CVE-2012-2896/
|
CWE-189
|
https://github.com/chromium/chromium/commit/3aad1a37affb1ab70d1897f2b03eb8c077264984
|
3aad1a37affb1ab70d1897f2b03eb8c077264984
|
Fix SafeAdd and SafeMultiply
BUG=145648,145544
Review URL: https://chromiumcodereview.appspot.com/10916165
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@155478 0039d316-1c4b-4281-b951-d872f2087c98
|
void GLES2DecoderImpl::DoVertexAttrib4fv(GLuint index, const GLfloat* v) {
VertexAttribManager::VertexAttribInfo* info =
vertex_attrib_manager_->GetVertexAttribInfo(index);
if (!info) {
SetGLError(GL_INVALID_VALUE, "glVertexAttrib4fv", "index out of range");
return;
}
VertexAttribManager::VertexAttribInfo::Vec4 value;
value.v[0] = v[0];
value.v[1] = v[1];
value.v[2] = v[2];
value.v[3] = v[3];
info->set_value(value);
glVertexAttrib4fv(index, v);
}
|
void GLES2DecoderImpl::DoVertexAttrib4fv(GLuint index, const GLfloat* v) {
VertexAttribManager::VertexAttribInfo* info =
vertex_attrib_manager_->GetVertexAttribInfo(index);
if (!info) {
SetGLError(GL_INVALID_VALUE, "glVertexAttrib4fv", "index out of range");
return;
}
VertexAttribManager::VertexAttribInfo::Vec4 value;
value.v[0] = v[0];
value.v[1] = v[1];
value.v[2] = v[2];
value.v[3] = v[3];
info->set_value(value);
glVertexAttrib4fv(index, v);
}
|
C
|
Chrome
| 0 |
CVE-2011-4131
|
https://www.cvedetails.com/cve/CVE-2011-4131/
|
CWE-189
|
https://github.com/torvalds/linux/commit/bf118a342f10dafe44b14451a1392c3254629a1f
|
bf118a342f10dafe44b14451a1392c3254629a1f
|
NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: stable@kernel.org
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
|
static int decode_secinfo_gss(struct xdr_stream *xdr, struct nfs4_secinfo_flavor *flavor)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
flavor->gss.sec_oid4.len = be32_to_cpup(p);
if (flavor->gss.sec_oid4.len > GSS_OID_MAX_LEN)
goto out_err;
p = xdr_inline_decode(xdr, flavor->gss.sec_oid4.len);
if (unlikely(!p))
goto out_overflow;
memcpy(flavor->gss.sec_oid4.data, p, flavor->gss.sec_oid4.len);
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
flavor->gss.qop4 = be32_to_cpup(p++);
flavor->gss.service = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
out_err:
return -EINVAL;
}
|
static int decode_secinfo_gss(struct xdr_stream *xdr, struct nfs4_secinfo_flavor *flavor)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
flavor->gss.sec_oid4.len = be32_to_cpup(p);
if (flavor->gss.sec_oid4.len > GSS_OID_MAX_LEN)
goto out_err;
p = xdr_inline_decode(xdr, flavor->gss.sec_oid4.len);
if (unlikely(!p))
goto out_overflow;
memcpy(flavor->gss.sec_oid4.data, p, flavor->gss.sec_oid4.len);
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
flavor->gss.qop4 = be32_to_cpup(p++);
flavor->gss.service = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
out_err:
return -EINVAL;
}
|
C
|
linux
| 0 |
CVE-2011-4621
|
https://www.cvedetails.com/cve/CVE-2011-4621/
| null |
https://github.com/torvalds/linux/commit/f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reported-by: Bjoern B. Brandenburg <bbb.lst@gmail.com>
Tested-by: Yong Zhang <yong.zhang0@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: stable@kernel.org
LKML-Reference: <1291802742.1417.9.camel@marge.simson.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
static void update_cpu_load_active(struct rq *this_rq)
{
update_cpu_load(this_rq);
calc_load_account_active(this_rq);
}
|
static void update_cpu_load_active(struct rq *this_rq)
{
update_cpu_load(this_rq);
calc_load_account_active(this_rq);
}
|
C
|
linux
| 0 |
CVE-2010-3702
|
https://www.cvedetails.com/cve/CVE-2010-3702/
|
CWE-20
|
https://cgit.freedesktop.org/poppler/poppler/commit/?id=e853106b58d6b4b0467dbd6436c9bb1cfbd372cf
|
e853106b58d6b4b0467dbd6436c9bb1cfbd372cf
| null |
void Gfx::doRadialShFill(GfxRadialShading *shading) {
double xMin, yMin, xMax, yMax;
double x0, y0, r0, x1, y1, r1, t0, t1;
int nComps;
GfxColor colorA, colorB;
double xa, ya, xb, yb, ra, rb;
double ta, tb, sa, sb;
double sz, xz, yz, sMin, sMax;
GBool enclosed;
int ia, ib, k, n;
double *ctm;
double theta, alpha, angle, t;
GBool needExtend = gTrue;
shading->getCoords(&x0, &y0, &r0, &x1, &y1, &r1);
t0 = shading->getDomain0();
t1 = shading->getDomain1();
nComps = shading->getColorSpace()->getNComps();
if (x0 == x1 && y0 == y1) {
enclosed = gTrue;
theta = 0; // make gcc happy
sz = 0; // make gcc happy
} else if (r0 == r1) {
enclosed = gFalse;
theta = 0;
sz = 0; // make gcc happy
} else {
sz = -r0 / (r1 - r0);
xz = x0 + sz * (x1 - x0);
yz = y0 + sz * (y1 - y0);
enclosed = (xz - x0) * (xz - x0) + (yz - y0) * (yz - y0) <= r0 * r0;
theta = asin(r0 / sqrt((x0 - xz) * (x0 - xz) + (y0 - yz) * (y0 - yz)));
if (r0 > r1) {
theta = -theta;
}
}
if (enclosed) {
alpha = 0;
} else {
alpha = atan2(y1 - y0, x1 - x0);
}
state->getUserClipBBox(&xMin, &yMin, &xMax, &yMax);
if (enclosed) {
sMin = 0;
sMax = 1;
} else {
sMin = 1;
sMax = 0;
if ((x1 + r1) - (x0 + r0) != 0) {
sa = (xMin - (x0 + r0)) / ((x1 + r1) - (x0 + r0));
if (sa < sMin) {
sMin = sa;
} else if (sa > sMax) {
sMax = sa;
}
}
if ((x1 - r1) - (x0 - r0) != 0) {
sa = (xMax - (x0 - r0)) / ((x1 - r1) - (x0 - r0));
if (sa < sMin) {
sMin = sa;
} else if (sa > sMax) {
sMax = sa;
}
}
if ((y1 + r1) - (y0 + r0) != 0) {
sa = (yMin - (y0 + r0)) / ((y1 + r1) - (y0 + r0));
if (sa < sMin) {
sMin = sa;
} else if (sa > sMax) {
sMax = sa;
}
}
if ((y1 - r1) - (y0 - r0) != 0) {
sa = (yMax - (y0 - r0)) / ((y1 - r1) - (y0 - r0));
if (sa < sMin) {
sMin = sa;
} else if (sa > sMax) {
sMax = sa;
}
}
if (r0 < r1) {
if (sMin < sz) {
sMin = sz;
}
} else if (r0 > r1) {
if (sMax > sz) {
sMax = sz;
}
}
if (!shading->getExtend0() && sMin < 0) {
sMin = 0;
}
if (!shading->getExtend1() && sMax > 1) {
sMax = 1;
}
}
if (out->useShadedFills() &&
out->radialShadedFill(state, shading, sMin, sMax)) {
return;
}
ctm = state->getCTM();
t = fabs(ctm[0]);
if (fabs(ctm[1]) > t) {
t = fabs(ctm[1]);
}
if (fabs(ctm[2]) > t) {
t = fabs(ctm[2]);
}
if (fabs(ctm[3]) > t) {
t = fabs(ctm[3]);
}
if (r0 > r1) {
t *= r0;
} else {
t *= r1;
}
if (t < 1) {
n = 3;
} else {
n = (int)(M_PI / acos(1 - 0.1 / t));
if (n < 3) {
n = 3;
} else if (n > 200) {
n = 200;
}
}
ia = 0;
sa = sMin;
ta = t0 + sa * (t1 - t0);
xa = x0 + sa * (x1 - x0);
ya = y0 + sa * (y1 - y0);
ra = r0 + sa * (r1 - r0);
if (ta < t0) {
shading->getColor(t0, &colorA);
} else if (ta > t1) {
shading->getColor(t1, &colorA);
} else {
shading->getColor(ta, &colorA);
}
needExtend = !out->radialShadedSupportExtend(state, shading);
while (ia < radialMaxSplits) {
ib = radialMaxSplits;
sb = sMax;
tb = t0 + sb * (t1 - t0);
if (tb < t0) {
shading->getColor(t0, &colorB);
} else if (tb > t1) {
shading->getColor(t1, &colorB);
} else {
shading->getColor(tb, &colorB);
}
while (ib - ia > 1) {
if (isSameGfxColor(colorB, colorA, nComps, radialColorDelta) && ib < radialMaxSplits) {
GfxColor colorC;
int ic = (ia + ib) / 2;
double sc = sMin + ((double)ic / (double)radialMaxSplits) * (sMax - sMin);
double tc = t0 + sc * (t1 - t0);
if (tc < t0) {
shading->getColor(t0, &colorC);
} else if (tc > t1) {
shading->getColor(t1, &colorC);
} else {
shading->getColor(tc, &colorC);
}
if (isSameGfxColor(colorC, colorA, nComps, radialColorDelta))
break;
}
ib = (ia + ib) / 2;
sb = sMin + ((double)ib / (double)radialMaxSplits) * (sMax - sMin);
tb = t0 + sb * (t1 - t0);
if (tb < t0) {
shading->getColor(t0, &colorB);
} else if (tb > t1) {
shading->getColor(t1, &colorB);
} else {
shading->getColor(tb, &colorB);
}
}
xb = x0 + sb * (x1 - x0);
yb = y0 + sb * (y1 - y0);
rb = r0 + sb * (r1 - r0);
for (k = 0; k < nComps; ++k) {
colorA.c[k] = (colorA.c[k] + colorB.c[k]) / 2;
}
state->setFillColor(&colorA);
if (out->useFillColorStop())
out->updateFillColorStop(state, (sa - sMin)/(sMax - sMin));
else
out->updateFillColor(state);
if (needExtend) {
if (enclosed) {
state->moveTo(xa + ra, ya);
for (k = 1; k < n; ++k) {
angle = ((double)k / (double)n) * 2 * M_PI;
state->lineTo(xa + ra * cos(angle), ya + ra * sin(angle));
}
state->closePath();
state->moveTo(xb + rb, yb);
for (k = 1; k < n; ++k) {
angle = -((double)k / (double)n) * 2 * M_PI;
state->lineTo(xb + rb * cos(angle), yb + rb * sin(angle));
}
state->closePath();
} else {
state->moveTo(xa + ra * cos(alpha + theta + 0.5 * M_PI),
ya + ra * sin(alpha + theta + 0.5 * M_PI));
for (k = 0; k < n; ++k) {
angle = alpha + theta + 0.5 * M_PI
- ((double)k / (double)n) * (2 * theta + M_PI);
state->lineTo(xb + rb * cos(angle), yb + rb * sin(angle));
}
for (k = 0; k < n; ++k) {
angle = alpha - theta - 0.5 * M_PI
+ ((double)k / (double)n) * (2 * theta - M_PI);
state->lineTo(xa + ra * cos(angle), ya + ra * sin(angle));
}
state->closePath();
state->moveTo(xa + ra * cos(alpha + theta + 0.5 * M_PI),
ya + ra * sin(alpha + theta + 0.5 * M_PI));
for (k = 0; k < n; ++k) {
angle = alpha + theta + 0.5 * M_PI
+ ((double)k / (double)n) * (-2 * theta + M_PI);
state->lineTo(xb + rb * cos(angle), yb + rb * sin(angle));
}
for (k = 0; k < n; ++k) {
angle = alpha - theta - 0.5 * M_PI
+ ((double)k / (double)n) * (2 * theta + M_PI);
state->lineTo(xa + ra * cos(angle), ya + ra * sin(angle));
}
state->closePath();
}
}
if (!out->useFillColorStop()) {
if (!contentIsHidden())
out->fill(state);
state->clearPath();
}
ia = ib;
sa = sb;
ta = tb;
xa = xb;
ya = yb;
ra = rb;
colorA = colorB;
}
if (out->useFillColorStop()) {
state->setFillColor(&colorA);
out->updateFillColorStop(state, (sb - sMin)/(sMax - sMin));
state->moveTo(xMin, yMin);
state->lineTo(xMin, yMax);
state->lineTo(xMax, yMax);
state->lineTo(xMax, yMin);
state->closePath();
if (!contentIsHidden())
out->fill(state);
state->clearPath();
}
if (!needExtend)
return;
if (enclosed) {
if ((shading->getExtend0() && r0 <= r1) ||
(shading->getExtend1() && r1 < r0)) {
if (r0 <= r1) {
ta = t0;
ra = r0;
xa = x0;
ya = y0;
} else {
ta = t1;
ra = r1;
xa = x1;
ya = y1;
}
shading->getColor(ta, &colorA);
state->setFillColor(&colorA);
out->updateFillColor(state);
state->moveTo(xa + ra, ya);
for (k = 1; k < n; ++k) {
angle = ((double)k / (double)n) * 2 * M_PI;
state->lineTo(xa + ra * cos(angle), ya + ra * sin(angle));
}
state->closePath();
if (!contentIsHidden())
out->fill(state);
state->clearPath();
}
if ((shading->getExtend0() && r0 > r1) ||
(shading->getExtend1() && r1 >= r0)) {
if (r0 > r1) {
ta = t0;
ra = r0;
xa = x0;
ya = y0;
} else {
ta = t1;
ra = r1;
xa = x1;
ya = y1;
}
shading->getColor(ta, &colorA);
state->setFillColor(&colorA);
out->updateFillColor(state);
state->moveTo(xMin, yMin);
state->lineTo(xMin, yMax);
state->lineTo(xMax, yMax);
state->lineTo(xMax, yMin);
state->closePath();
state->moveTo(xa + ra, ya);
for (k = 1; k < n; ++k) {
angle = ((double)k / (double)n) * 2 * M_PI;
state->lineTo(xa + ra * cos(angle), ya + ra * sin(angle));
}
state->closePath();
if (!contentIsHidden())
out->fill(state);
state->clearPath();
}
}
}
|
void Gfx::doRadialShFill(GfxRadialShading *shading) {
double xMin, yMin, xMax, yMax;
double x0, y0, r0, x1, y1, r1, t0, t1;
int nComps;
GfxColor colorA, colorB;
double xa, ya, xb, yb, ra, rb;
double ta, tb, sa, sb;
double sz, xz, yz, sMin, sMax;
GBool enclosed;
int ia, ib, k, n;
double *ctm;
double theta, alpha, angle, t;
GBool needExtend = gTrue;
shading->getCoords(&x0, &y0, &r0, &x1, &y1, &r1);
t0 = shading->getDomain0();
t1 = shading->getDomain1();
nComps = shading->getColorSpace()->getNComps();
if (x0 == x1 && y0 == y1) {
enclosed = gTrue;
theta = 0; // make gcc happy
sz = 0; // make gcc happy
} else if (r0 == r1) {
enclosed = gFalse;
theta = 0;
sz = 0; // make gcc happy
} else {
sz = -r0 / (r1 - r0);
xz = x0 + sz * (x1 - x0);
yz = y0 + sz * (y1 - y0);
enclosed = (xz - x0) * (xz - x0) + (yz - y0) * (yz - y0) <= r0 * r0;
theta = asin(r0 / sqrt((x0 - xz) * (x0 - xz) + (y0 - yz) * (y0 - yz)));
if (r0 > r1) {
theta = -theta;
}
}
if (enclosed) {
alpha = 0;
} else {
alpha = atan2(y1 - y0, x1 - x0);
}
state->getUserClipBBox(&xMin, &yMin, &xMax, &yMax);
if (enclosed) {
sMin = 0;
sMax = 1;
} else {
sMin = 1;
sMax = 0;
if ((x1 + r1) - (x0 + r0) != 0) {
sa = (xMin - (x0 + r0)) / ((x1 + r1) - (x0 + r0));
if (sa < sMin) {
sMin = sa;
} else if (sa > sMax) {
sMax = sa;
}
}
if ((x1 - r1) - (x0 - r0) != 0) {
sa = (xMax - (x0 - r0)) / ((x1 - r1) - (x0 - r0));
if (sa < sMin) {
sMin = sa;
} else if (sa > sMax) {
sMax = sa;
}
}
if ((y1 + r1) - (y0 + r0) != 0) {
sa = (yMin - (y0 + r0)) / ((y1 + r1) - (y0 + r0));
if (sa < sMin) {
sMin = sa;
} else if (sa > sMax) {
sMax = sa;
}
}
if ((y1 - r1) - (y0 - r0) != 0) {
sa = (yMax - (y0 - r0)) / ((y1 - r1) - (y0 - r0));
if (sa < sMin) {
sMin = sa;
} else if (sa > sMax) {
sMax = sa;
}
}
if (r0 < r1) {
if (sMin < sz) {
sMin = sz;
}
} else if (r0 > r1) {
if (sMax > sz) {
sMax = sz;
}
}
if (!shading->getExtend0() && sMin < 0) {
sMin = 0;
}
if (!shading->getExtend1() && sMax > 1) {
sMax = 1;
}
}
if (out->useShadedFills() &&
out->radialShadedFill(state, shading, sMin, sMax)) {
return;
}
ctm = state->getCTM();
t = fabs(ctm[0]);
if (fabs(ctm[1]) > t) {
t = fabs(ctm[1]);
}
if (fabs(ctm[2]) > t) {
t = fabs(ctm[2]);
}
if (fabs(ctm[3]) > t) {
t = fabs(ctm[3]);
}
if (r0 > r1) {
t *= r0;
} else {
t *= r1;
}
if (t < 1) {
n = 3;
} else {
n = (int)(M_PI / acos(1 - 0.1 / t));
if (n < 3) {
n = 3;
} else if (n > 200) {
n = 200;
}
}
ia = 0;
sa = sMin;
ta = t0 + sa * (t1 - t0);
xa = x0 + sa * (x1 - x0);
ya = y0 + sa * (y1 - y0);
ra = r0 + sa * (r1 - r0);
if (ta < t0) {
shading->getColor(t0, &colorA);
} else if (ta > t1) {
shading->getColor(t1, &colorA);
} else {
shading->getColor(ta, &colorA);
}
needExtend = !out->radialShadedSupportExtend(state, shading);
while (ia < radialMaxSplits) {
ib = radialMaxSplits;
sb = sMax;
tb = t0 + sb * (t1 - t0);
if (tb < t0) {
shading->getColor(t0, &colorB);
} else if (tb > t1) {
shading->getColor(t1, &colorB);
} else {
shading->getColor(tb, &colorB);
}
while (ib - ia > 1) {
if (isSameGfxColor(colorB, colorA, nComps, radialColorDelta) && ib < radialMaxSplits) {
GfxColor colorC;
int ic = (ia + ib) / 2;
double sc = sMin + ((double)ic / (double)radialMaxSplits) * (sMax - sMin);
double tc = t0 + sc * (t1 - t0);
if (tc < t0) {
shading->getColor(t0, &colorC);
} else if (tc > t1) {
shading->getColor(t1, &colorC);
} else {
shading->getColor(tc, &colorC);
}
if (isSameGfxColor(colorC, colorA, nComps, radialColorDelta))
break;
}
ib = (ia + ib) / 2;
sb = sMin + ((double)ib / (double)radialMaxSplits) * (sMax - sMin);
tb = t0 + sb * (t1 - t0);
if (tb < t0) {
shading->getColor(t0, &colorB);
} else if (tb > t1) {
shading->getColor(t1, &colorB);
} else {
shading->getColor(tb, &colorB);
}
}
xb = x0 + sb * (x1 - x0);
yb = y0 + sb * (y1 - y0);
rb = r0 + sb * (r1 - r0);
for (k = 0; k < nComps; ++k) {
colorA.c[k] = (colorA.c[k] + colorB.c[k]) / 2;
}
state->setFillColor(&colorA);
if (out->useFillColorStop())
out->updateFillColorStop(state, (sa - sMin)/(sMax - sMin));
else
out->updateFillColor(state);
if (needExtend) {
if (enclosed) {
state->moveTo(xa + ra, ya);
for (k = 1; k < n; ++k) {
angle = ((double)k / (double)n) * 2 * M_PI;
state->lineTo(xa + ra * cos(angle), ya + ra * sin(angle));
}
state->closePath();
state->moveTo(xb + rb, yb);
for (k = 1; k < n; ++k) {
angle = -((double)k / (double)n) * 2 * M_PI;
state->lineTo(xb + rb * cos(angle), yb + rb * sin(angle));
}
state->closePath();
} else {
state->moveTo(xa + ra * cos(alpha + theta + 0.5 * M_PI),
ya + ra * sin(alpha + theta + 0.5 * M_PI));
for (k = 0; k < n; ++k) {
angle = alpha + theta + 0.5 * M_PI
- ((double)k / (double)n) * (2 * theta + M_PI);
state->lineTo(xb + rb * cos(angle), yb + rb * sin(angle));
}
for (k = 0; k < n; ++k) {
angle = alpha - theta - 0.5 * M_PI
+ ((double)k / (double)n) * (2 * theta - M_PI);
state->lineTo(xa + ra * cos(angle), ya + ra * sin(angle));
}
state->closePath();
state->moveTo(xa + ra * cos(alpha + theta + 0.5 * M_PI),
ya + ra * sin(alpha + theta + 0.5 * M_PI));
for (k = 0; k < n; ++k) {
angle = alpha + theta + 0.5 * M_PI
+ ((double)k / (double)n) * (-2 * theta + M_PI);
state->lineTo(xb + rb * cos(angle), yb + rb * sin(angle));
}
for (k = 0; k < n; ++k) {
angle = alpha - theta - 0.5 * M_PI
+ ((double)k / (double)n) * (2 * theta + M_PI);
state->lineTo(xa + ra * cos(angle), ya + ra * sin(angle));
}
state->closePath();
}
}
if (!out->useFillColorStop()) {
if (!contentIsHidden())
out->fill(state);
state->clearPath();
}
ia = ib;
sa = sb;
ta = tb;
xa = xb;
ya = yb;
ra = rb;
colorA = colorB;
}
if (out->useFillColorStop()) {
state->setFillColor(&colorA);
out->updateFillColorStop(state, (sb - sMin)/(sMax - sMin));
state->moveTo(xMin, yMin);
state->lineTo(xMin, yMax);
state->lineTo(xMax, yMax);
state->lineTo(xMax, yMin);
state->closePath();
if (!contentIsHidden())
out->fill(state);
state->clearPath();
}
if (!needExtend)
return;
if (enclosed) {
if ((shading->getExtend0() && r0 <= r1) ||
(shading->getExtend1() && r1 < r0)) {
if (r0 <= r1) {
ta = t0;
ra = r0;
xa = x0;
ya = y0;
} else {
ta = t1;
ra = r1;
xa = x1;
ya = y1;
}
shading->getColor(ta, &colorA);
state->setFillColor(&colorA);
out->updateFillColor(state);
state->moveTo(xa + ra, ya);
for (k = 1; k < n; ++k) {
angle = ((double)k / (double)n) * 2 * M_PI;
state->lineTo(xa + ra * cos(angle), ya + ra * sin(angle));
}
state->closePath();
if (!contentIsHidden())
out->fill(state);
state->clearPath();
}
if ((shading->getExtend0() && r0 > r1) ||
(shading->getExtend1() && r1 >= r0)) {
if (r0 > r1) {
ta = t0;
ra = r0;
xa = x0;
ya = y0;
} else {
ta = t1;
ra = r1;
xa = x1;
ya = y1;
}
shading->getColor(ta, &colorA);
state->setFillColor(&colorA);
out->updateFillColor(state);
state->moveTo(xMin, yMin);
state->lineTo(xMin, yMax);
state->lineTo(xMax, yMax);
state->lineTo(xMax, yMin);
state->closePath();
state->moveTo(xa + ra, ya);
for (k = 1; k < n; ++k) {
angle = ((double)k / (double)n) * 2 * M_PI;
state->lineTo(xa + ra * cos(angle), ya + ra * sin(angle));
}
state->closePath();
if (!contentIsHidden())
out->fill(state);
state->clearPath();
}
}
}
|
CPP
|
poppler
| 0 |
CVE-2019-5790
|
https://www.cvedetails.com/cve/CVE-2019-5790/
|
CWE-190
|
https://github.com/chromium/chromium/commit/88fcb3a6899d77b64195423333ad81a00803f997
|
88fcb3a6899d77b64195423333ad81a00803f997
|
Move user activation check to RemoteFrame::Navigate's callers.
Currently RemoteFrame::Navigate is the user of
Frame::HasTransientUserActivation that passes a RemoteFrame*, and
it seems wrong because the user activation (user gesture) needed by
the navigation should belong to the LocalFrame that initiated the
navigation.
Follow-up CLs after this one will update UserActivation code in
Frame to take a LocalFrame* instead of a Frame*, and get rid of
redundant IPCs.
Bug: 811414
Change-Id: I771c1694043edb54374a44213d16715d9c7da704
Reviewed-on: https://chromium-review.googlesource.com/914736
Commit-Queue: Mustaq Ahmed <mustaq@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Cr-Commit-Position: refs/heads/master@{#536728}
|
void RemoteFrame::Navigate(const FrameLoadRequest& passed_request) {
FrameLoadRequest frame_request(passed_request);
FrameLoader::SetReferrerForFrameRequest(frame_request);
FrameLoader::UpgradeInsecureRequest(frame_request.GetResourceRequest(),
frame_request.OriginDocument());
Client()->Navigate(frame_request.GetResourceRequest(),
frame_request.ReplacesCurrentItem());
}
|
void RemoteFrame::Navigate(const FrameLoadRequest& passed_request) {
FrameLoadRequest frame_request(passed_request);
FrameLoader::SetReferrerForFrameRequest(frame_request);
FrameLoader::UpgradeInsecureRequest(frame_request.GetResourceRequest(),
frame_request.OriginDocument());
frame_request.GetResourceRequest().SetHasUserGesture(
Frame::HasTransientUserActivation(this));
Client()->Navigate(frame_request.GetResourceRequest(),
frame_request.ReplacesCurrentItem());
}
|
C
|
Chrome
| 1 |
CVE-2018-6076
|
https://www.cvedetails.com/cve/CVE-2018-6076/
|
CWE-79
|
https://github.com/chromium/chromium/commit/f8f6ed59949be4451ee2f5443d8a313f102fde60
|
f8f6ed59949be4451ee2f5443d8a313f102fde60
|
Percent-encode UTF8 characters in URL fragment identifiers.
This brings us into line with Firefox, Safari, and the spec.
Bug: 758523
Change-Id: I7e354ab441222d9fd08e45f0e70f91ad4e35fafe
Reviewed-on: https://chromium-review.googlesource.com/668363
Commit-Queue: Mike West <mkwst@chromium.org>
Reviewed-by: Jochen Eisinger <jochen@chromium.org>
Reviewed-by: Andy Paicu <andypaicu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#507481}
|
bool CanonicalizeUserInfo(const char* username_source,
const Component& username,
const char* password_source,
const Component& password,
CanonOutput* output,
Component* out_username,
Component* out_password) {
return DoUserInfo<char, unsigned char>(
username_source, username, password_source, password,
output, out_username, out_password);
}
|
bool CanonicalizeUserInfo(const char* username_source,
const Component& username,
const char* password_source,
const Component& password,
CanonOutput* output,
Component* out_username,
Component* out_password) {
return DoUserInfo<char, unsigned char>(
username_source, username, password_source, password,
output, out_username, out_password);
}
|
C
|
Chrome
| 0 |
CVE-2017-11144
|
https://www.cvedetails.com/cve/CVE-2017-11144/
|
CWE-754
|
https://git.php.net/?p=php-src.git;a=commit;h=91826a311dd37f4c4e5d605fa7af331e80ddd4c3
|
91826a311dd37f4c4e5d605fa7af331e80ddd4c3
| null |
PHP_FUNCTION(openssl_pkey_export_to_file)
{
struct php_x509_request req;
zval * zpkey, * args = NULL;
char * passphrase = NULL;
size_t passphrase_len = 0;
char * filename = NULL;
size_t filename_len = 0;
zend_resource *key_resource = NULL;
int pem_write = 0;
EVP_PKEY * key;
BIO * bio_out = NULL;
const EVP_CIPHER * cipher;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "zp|s!a!", &zpkey, &filename, &filename_len, &passphrase, &passphrase_len, &args) == FAILURE) {
return;
}
RETVAL_FALSE;
PHP_OPENSSL_CHECK_SIZE_T_TO_INT(passphrase_len, passphrase);
key = php_openssl_evp_from_zval(zpkey, 0, passphrase, passphrase_len, 0, &key_resource);
if (key == NULL) {
php_error_docref(NULL, E_WARNING, "cannot get key from parameter 1");
RETURN_FALSE;
}
if (php_openssl_open_base_dir_chk(filename)) {
RETURN_FALSE;
}
PHP_SSL_REQ_INIT(&req);
if (PHP_SSL_REQ_PARSE(&req, args) == SUCCESS) {
bio_out = BIO_new_file(filename, "w");
if (bio_out == NULL) {
php_openssl_store_errors();
goto clean_exit;
}
if (passphrase && req.priv_key_encrypt) {
if (req.priv_key_encrypt_cipher) {
cipher = req.priv_key_encrypt_cipher;
} else {
cipher = (EVP_CIPHER *) EVP_des_ede3_cbc();
}
} else {
cipher = NULL;
}
switch (EVP_PKEY_base_id(key)) {
#ifdef HAVE_EVP_PKEY_EC
case EVP_PKEY_EC:
pem_write = PEM_write_bio_ECPrivateKey(bio_out, EVP_PKEY_get0_EC_KEY(key), cipher, (unsigned char *)passphrase, (int)passphrase_len, NULL, NULL);
break;
#endif
default:
pem_write = PEM_write_bio_PrivateKey(bio_out, key, cipher, (unsigned char *)passphrase, (int)passphrase_len, NULL, NULL);
break;
}
if (pem_write) {
/* Success!
* If returning the output as a string, do so now */
RETVAL_TRUE;
} else {
php_openssl_store_errors();
}
}
clean_exit:
PHP_SSL_REQ_DISPOSE(&req);
if (key_resource == NULL && key) {
EVP_PKEY_free(key);
}
if (bio_out) {
BIO_free(bio_out);
}
}
|
PHP_FUNCTION(openssl_pkey_export_to_file)
{
struct php_x509_request req;
zval * zpkey, * args = NULL;
char * passphrase = NULL;
size_t passphrase_len = 0;
char * filename = NULL;
size_t filename_len = 0;
zend_resource *key_resource = NULL;
int pem_write = 0;
EVP_PKEY * key;
BIO * bio_out = NULL;
const EVP_CIPHER * cipher;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "zp|s!a!", &zpkey, &filename, &filename_len, &passphrase, &passphrase_len, &args) == FAILURE) {
return;
}
RETVAL_FALSE;
PHP_OPENSSL_CHECK_SIZE_T_TO_INT(passphrase_len, passphrase);
key = php_openssl_evp_from_zval(zpkey, 0, passphrase, passphrase_len, 0, &key_resource);
if (key == NULL) {
php_error_docref(NULL, E_WARNING, "cannot get key from parameter 1");
RETURN_FALSE;
}
if (php_openssl_open_base_dir_chk(filename)) {
RETURN_FALSE;
}
PHP_SSL_REQ_INIT(&req);
if (PHP_SSL_REQ_PARSE(&req, args) == SUCCESS) {
bio_out = BIO_new_file(filename, "w");
if (bio_out == NULL) {
php_openssl_store_errors();
goto clean_exit;
}
if (passphrase && req.priv_key_encrypt) {
if (req.priv_key_encrypt_cipher) {
cipher = req.priv_key_encrypt_cipher;
} else {
cipher = (EVP_CIPHER *) EVP_des_ede3_cbc();
}
} else {
cipher = NULL;
}
switch (EVP_PKEY_base_id(key)) {
#ifdef HAVE_EVP_PKEY_EC
case EVP_PKEY_EC:
pem_write = PEM_write_bio_ECPrivateKey(bio_out, EVP_PKEY_get0_EC_KEY(key), cipher, (unsigned char *)passphrase, (int)passphrase_len, NULL, NULL);
break;
#endif
default:
pem_write = PEM_write_bio_PrivateKey(bio_out, key, cipher, (unsigned char *)passphrase, (int)passphrase_len, NULL, NULL);
break;
}
if (pem_write) {
/* Success!
* If returning the output as a string, do so now */
RETVAL_TRUE;
} else {
php_openssl_store_errors();
}
}
clean_exit:
PHP_SSL_REQ_DISPOSE(&req);
if (key_resource == NULL && key) {
EVP_PKEY_free(key);
}
if (bio_out) {
BIO_free(bio_out);
}
}
|
C
|
php
| 0 |
CVE-2017-8924
|
https://www.cvedetails.com/cve/CVE-2017-8924/
|
CWE-191
|
https://github.com/torvalds/linux/commit/654b404f2a222f918af9b0cd18ad469d0c941a8e
|
654b404f2a222f918af9b0cd18ad469d0c941a8e
|
USB: serial: io_ti: fix information leak in completion handler
Add missing sanity check to the bulk-in completion handler to avoid an
integer underflow that can be triggered by a malicious device.
This avoids leaking 128 kB of memory content from after the URB transfer
buffer to user space.
Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32")
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Cc: stable <stable@vger.kernel.org> # 2.6.30
Signed-off-by: Johan Hovold <johan@kernel.org>
|
static int tx_active(struct edgeport_port *port)
{
int status;
struct out_endpoint_desc_block *oedb;
__u8 *lsr;
int bytes_left = 0;
oedb = kmalloc(sizeof(*oedb), GFP_KERNEL);
if (!oedb)
return -ENOMEM;
/*
* Sigh, that's right, just one byte, as not all platforms can
* do DMA from stack
*/
lsr = kmalloc(1, GFP_KERNEL);
if (!lsr) {
kfree(oedb);
return -ENOMEM;
}
/* Read the DMA Count Registers */
status = read_ram(port->port->serial->dev, port->dma_address,
sizeof(*oedb), (void *)oedb);
if (status)
goto exit_is_tx_active;
dev_dbg(&port->port->dev, "%s - XByteCount 0x%X\n", __func__, oedb->XByteCount);
/* and the LSR */
status = read_ram(port->port->serial->dev,
port->uart_base + UMPMEM_OFFS_UART_LSR, 1, lsr);
if (status)
goto exit_is_tx_active;
dev_dbg(&port->port->dev, "%s - LSR = 0x%X\n", __func__, *lsr);
/* If either buffer has data or we are transmitting then return TRUE */
if ((oedb->XByteCount & 0x80) != 0)
bytes_left += 64;
if ((*lsr & UMP_UART_LSR_TX_MASK) == 0)
bytes_left += 1;
/* We return Not Active if we get any kind of error */
exit_is_tx_active:
dev_dbg(&port->port->dev, "%s - return %d\n", __func__, bytes_left);
kfree(lsr);
kfree(oedb);
return bytes_left;
}
|
static int tx_active(struct edgeport_port *port)
{
int status;
struct out_endpoint_desc_block *oedb;
__u8 *lsr;
int bytes_left = 0;
oedb = kmalloc(sizeof(*oedb), GFP_KERNEL);
if (!oedb)
return -ENOMEM;
/*
* Sigh, that's right, just one byte, as not all platforms can
* do DMA from stack
*/
lsr = kmalloc(1, GFP_KERNEL);
if (!lsr) {
kfree(oedb);
return -ENOMEM;
}
/* Read the DMA Count Registers */
status = read_ram(port->port->serial->dev, port->dma_address,
sizeof(*oedb), (void *)oedb);
if (status)
goto exit_is_tx_active;
dev_dbg(&port->port->dev, "%s - XByteCount 0x%X\n", __func__, oedb->XByteCount);
/* and the LSR */
status = read_ram(port->port->serial->dev,
port->uart_base + UMPMEM_OFFS_UART_LSR, 1, lsr);
if (status)
goto exit_is_tx_active;
dev_dbg(&port->port->dev, "%s - LSR = 0x%X\n", __func__, *lsr);
/* If either buffer has data or we are transmitting then return TRUE */
if ((oedb->XByteCount & 0x80) != 0)
bytes_left += 64;
if ((*lsr & UMP_UART_LSR_TX_MASK) == 0)
bytes_left += 1;
/* We return Not Active if we get any kind of error */
exit_is_tx_active:
dev_dbg(&port->port->dev, "%s - return %d\n", __func__, bytes_left);
kfree(lsr);
kfree(oedb);
return bytes_left;
}
|
C
|
linux
| 0 |
CVE-2014-2673
|
https://www.cvedetails.com/cve/CVE-2014-2673/
|
CWE-20
|
https://github.com/torvalds/linux/commit/621b5060e823301d0cba4cb52a7ee3491922d291
|
621b5060e823301d0cba4cb52a7ee3491922d291
|
powerpc/tm: Fix crash when forking inside a transaction
When we fork/clone we currently don't copy any of the TM state to the new
thread. This results in a TM bad thing (program check) when the new process is
switched in as the kernel does a tmrechkpt with TEXASR FS not set. Also, since
R1 is from userspace, we trigger the bad kernel stack pointer detection. So we
end up with something like this:
Bad kernel stack pointer 0 at c0000000000404fc
cpu 0x2: Vector: 700 (Program Check) at [c00000003ffefd40]
pc: c0000000000404fc: restore_gprs+0xc0/0x148
lr: 0000000000000000
sp: 0
msr: 9000000100201030
current = 0xc000001dd1417c30
paca = 0xc00000000fe00800 softe: 0 irq_happened: 0x01
pid = 0, comm = swapper/2
WARNING: exception is not recoverable, can't continue
The below fixes this by flushing the TM state before we copy the task_struct to
the clone. To do this we go through the tmreclaim patch, which removes the
checkpointed registers from the CPU and transitions the CPU out of TM suspend
mode. Hence we need to call tmrechkpt after to restore the checkpointed state
and the TM mode for the current task.
To make this fail from userspace is simply:
tbegin
li r0, 2
sc
<boom>
Kudos to Adhemerval Zanella Neto for finding this.
Signed-off-by: Michael Neuling <mikey@neuling.org>
cc: Adhemerval Zanella Neto <azanella@br.ibm.com>
cc: stable@vger.kernel.org
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
|
static void tm_reclaim_thread(struct thread_struct *thr,
struct thread_info *ti, uint8_t cause)
{
unsigned long msr_diff = 0;
/*
* If FP/VSX registers have been already saved to the
* thread_struct, move them to the transact_fp array.
* We clear the TIF_RESTORE_TM bit since after the reclaim
* the thread will no longer be transactional.
*/
if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
msr_diff = thr->tm_orig_msr & ~thr->regs->msr;
if (msr_diff & MSR_FP)
memcpy(&thr->transact_fp, &thr->fp_state,
sizeof(struct thread_fp_state));
if (msr_diff & MSR_VEC)
memcpy(&thr->transact_vr, &thr->vr_state,
sizeof(struct thread_vr_state));
clear_ti_thread_flag(ti, TIF_RESTORE_TM);
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
}
tm_reclaim(thr, thr->regs->msr, cause);
/* Having done the reclaim, we now have the checkpointed
* FP/VSX values in the registers. These might be valid
* even if we have previously called enable_kernel_fp() or
* flush_fp_to_thread(), so update thr->regs->msr to
* indicate their current validity.
*/
thr->regs->msr |= msr_diff;
}
|
static void tm_reclaim_thread(struct thread_struct *thr,
struct thread_info *ti, uint8_t cause)
{
unsigned long msr_diff = 0;
/*
* If FP/VSX registers have been already saved to the
* thread_struct, move them to the transact_fp array.
* We clear the TIF_RESTORE_TM bit since after the reclaim
* the thread will no longer be transactional.
*/
if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
msr_diff = thr->tm_orig_msr & ~thr->regs->msr;
if (msr_diff & MSR_FP)
memcpy(&thr->transact_fp, &thr->fp_state,
sizeof(struct thread_fp_state));
if (msr_diff & MSR_VEC)
memcpy(&thr->transact_vr, &thr->vr_state,
sizeof(struct thread_vr_state));
clear_ti_thread_flag(ti, TIF_RESTORE_TM);
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
}
tm_reclaim(thr, thr->regs->msr, cause);
/* Having done the reclaim, we now have the checkpointed
* FP/VSX values in the registers. These might be valid
* even if we have previously called enable_kernel_fp() or
* flush_fp_to_thread(), so update thr->regs->msr to
* indicate their current validity.
*/
thr->regs->msr |= msr_diff;
}
|
C
|
linux
| 0 |
CVE-2010-1172
|
https://www.cvedetails.com/cve/CVE-2010-1172/
|
CWE-264
|
https://cgit.freedesktop.org/dbus/dbus-glib/commit/?h=rhel5&id=9a6bce9b615abca6068348c1606ba8eaf13d9ae0
|
9a6bce9b615abca6068348c1606ba8eaf13d9ae0
| null |
my_object_get_value (MyObject *obj, guint *ret, GError **error)
|
my_object_get_value (MyObject *obj, guint *ret, GError **error)
{
*ret = obj->val;
return TRUE;
}
|
C
|
dbus
| 1 |
CVE-2019-12111
|
https://www.cvedetails.com/cve/CVE-2019-12111/
|
CWE-476
|
https://github.com/miniupnp/miniupnp/commit/cb8a02af7a5677cf608e86d57ab04241cf34e24f
|
cb8a02af7a5677cf608e86d57ab04241cf34e24f
|
pcpserver.c: copyIPv6IfDifferent() check for NULL src argument
|
static void printPEEROpcodeVersion2(const uint8_t *buf)
{
char ext_addr[INET6_ADDRSTRLEN];
char peer_addr[INET6_ADDRSTRLEN];
syslog(LOG_DEBUG, "PCP PEER: v2 Opcode specific information.");
syslog(LOG_DEBUG, "nonce: \t%08x%08x%08x",
READNU32(buf), READNU32(buf+4), READNU32(buf+8));
syslog(LOG_DEBUG, "Protocol: \t%d", buf[12]);
syslog(LOG_DEBUG, "Internal port:\t%d", READNU16(buf + 16));
syslog(LOG_DEBUG, "External IP: \t%s", inet_ntop(AF_INET6, buf + 20,
ext_addr, INET6_ADDRSTRLEN));
syslog(LOG_DEBUG, "External port:\t%d", READNU16(buf + 18));
syslog(LOG_DEBUG, "PEER IP: \t%s", inet_ntop(AF_INET6, buf + 40,
peer_addr, INET6_ADDRSTRLEN));
syslog(LOG_DEBUG, "PEER port: \t%d", READNU16(buf + 36));
}
|
static void printPEEROpcodeVersion2(const uint8_t *buf)
{
char ext_addr[INET6_ADDRSTRLEN];
char peer_addr[INET6_ADDRSTRLEN];
syslog(LOG_DEBUG, "PCP PEER: v2 Opcode specific information.");
syslog(LOG_DEBUG, "nonce: \t%08x%08x%08x",
READNU32(buf), READNU32(buf+4), READNU32(buf+8));
syslog(LOG_DEBUG, "Protocol: \t%d", buf[12]);
syslog(LOG_DEBUG, "Internal port:\t%d", READNU16(buf + 16));
syslog(LOG_DEBUG, "External IP: \t%s", inet_ntop(AF_INET6, buf + 20,
ext_addr, INET6_ADDRSTRLEN));
syslog(LOG_DEBUG, "External port:\t%d", READNU16(buf + 18));
syslog(LOG_DEBUG, "PEER IP: \t%s", inet_ntop(AF_INET6, buf + 40,
peer_addr, INET6_ADDRSTRLEN));
syslog(LOG_DEBUG, "PEER port: \t%d", READNU16(buf + 36));
}
|
C
|
miniupnp
| 0 |
CVE-2018-1068
|
https://www.cvedetails.com/cve/CVE-2018-1068/
|
CWE-787
|
https://github.com/torvalds/linux/commit/b71812168571fa55e44cdd0254471331b9c4c4c6
|
b71812168571fa55e44cdd0254471331b9c4c4c6
|
netfilter: ebtables: CONFIG_COMPAT: don't trust userland offsets
We need to make sure the offsets are not out of range of the
total size.
Also check that they are in ascending order.
The WARN_ON triggered by syzkaller (it sets panic_on_warn) is
changed to also bail out, no point in continuing parsing.
Briefly tested with simple ruleset of
-A INPUT --limit 1/s' --log
plus jump to custom chains using 32bit ebtables binary.
Reported-by: <syzbot+845a53d13171abf8bf29@syzkaller.appspotmail.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
|
ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
const struct net_device *in, const struct net_device *out)
{
const struct ethhdr *h = eth_hdr(skb);
const struct net_bridge_port *p;
__be16 ethproto;
if (skb_vlan_tag_present(skb))
ethproto = htons(ETH_P_8021Q);
else
ethproto = h->h_proto;
if (e->bitmask & EBT_802_3) {
if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto)))
return 1;
} else if (!(e->bitmask & EBT_NOPROTO) &&
NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto))
return 1;
if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in)))
return 1;
if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out)))
return 1;
/* rcu_read_lock()ed by nf_hook_thresh */
if (in && (p = br_port_get_rcu(in)) != NULL &&
NF_INVF(e, EBT_ILOGICALIN,
ebt_dev_check(e->logical_in, p->br->dev)))
return 1;
if (out && (p = br_port_get_rcu(out)) != NULL &&
NF_INVF(e, EBT_ILOGICALOUT,
ebt_dev_check(e->logical_out, p->br->dev)))
return 1;
if (e->bitmask & EBT_SOURCEMAC) {
if (NF_INVF(e, EBT_ISOURCE,
!ether_addr_equal_masked(h->h_source, e->sourcemac,
e->sourcemsk)))
return 1;
}
if (e->bitmask & EBT_DESTMAC) {
if (NF_INVF(e, EBT_IDEST,
!ether_addr_equal_masked(h->h_dest, e->destmac,
e->destmsk)))
return 1;
}
return 0;
}
|
ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
const struct net_device *in, const struct net_device *out)
{
const struct ethhdr *h = eth_hdr(skb);
const struct net_bridge_port *p;
__be16 ethproto;
if (skb_vlan_tag_present(skb))
ethproto = htons(ETH_P_8021Q);
else
ethproto = h->h_proto;
if (e->bitmask & EBT_802_3) {
if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto)))
return 1;
} else if (!(e->bitmask & EBT_NOPROTO) &&
NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto))
return 1;
if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in)))
return 1;
if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out)))
return 1;
/* rcu_read_lock()ed by nf_hook_thresh */
if (in && (p = br_port_get_rcu(in)) != NULL &&
NF_INVF(e, EBT_ILOGICALIN,
ebt_dev_check(e->logical_in, p->br->dev)))
return 1;
if (out && (p = br_port_get_rcu(out)) != NULL &&
NF_INVF(e, EBT_ILOGICALOUT,
ebt_dev_check(e->logical_out, p->br->dev)))
return 1;
if (e->bitmask & EBT_SOURCEMAC) {
if (NF_INVF(e, EBT_ISOURCE,
!ether_addr_equal_masked(h->h_source, e->sourcemac,
e->sourcemsk)))
return 1;
}
if (e->bitmask & EBT_DESTMAC) {
if (NF_INVF(e, EBT_IDEST,
!ether_addr_equal_masked(h->h_dest, e->destmac,
e->destmsk)))
return 1;
}
return 0;
}
|
C
|
linux
| 0 |
CVE-2015-5697
|
https://www.cvedetails.com/cve/CVE-2015-5697/
|
CWE-200
|
https://github.com/torvalds/linux/commit/b6878d9e03043695dbf3fa1caa6dfc09db225b16
|
b6878d9e03043695dbf3fa1caa6dfc09db225b16
|
md: use kzalloc() when bitmap is disabled
In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a
mdu_bitmap_file_t called "file".
5769 file = kmalloc(sizeof(*file), GFP_NOIO);
5770 if (!file)
5771 return -ENOMEM;
This structure is copied to user space at the end of the function.
5786 if (err == 0 &&
5787 copy_to_user(arg, file, sizeof(*file)))
5788 err = -EFAULT
But if bitmap is disabled only the first byte of "file" is initialized
with zero, so it's possible to read some bytes (up to 4095) of kernel
space memory from user space. This is an information leak.
5775 /* bitmap disabled, zero the first byte and copy out */
5776 if (!mddev->bitmap_info.file)
5777 file->pathname[0] = '\0';
Signed-off-by: Benjamin Randazzo <benjamin@randazzo.fr>
Signed-off-by: NeilBrown <neilb@suse.com>
|
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
int did_freeze = 0;
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (mddev->sync_thread)
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
err = -EBUSY;
goto out;
}
if (mddev->pers) {
__md_stop_writes(mddev);
err = -ENXIO;
if (mddev->ro==1)
goto out;
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
return err;
}
|
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
int did_freeze = 0;
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (mddev->sync_thread)
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
err = -EBUSY;
goto out;
}
if (mddev->pers) {
__md_stop_writes(mddev);
err = -ENXIO;
if (mddev->ro==1)
goto out;
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
return err;
}
|
C
|
linux
| 0 |
CVE-2018-9490
|
https://www.cvedetails.com/cve/CVE-2018-9490/
|
CWE-704
|
https://android.googlesource.com/platform/external/v8/+/a24543157ae2cdd25da43e20f4e48a07481e6ceb
|
a24543157ae2cdd25da43e20f4e48a07481e6ceb
|
Backport: Fix Object.entries/values with changing elements
Bug: 111274046
Test: m -j proxy_resolver_v8_unittest && adb sync && adb shell \
/data/nativetest64/proxy_resolver_v8_unittest/proxy_resolver_v8_unittest
Change-Id: I705fc512cc5837e9364ed187559cc75d079aa5cb
(cherry picked from commit d8be9a10287afed07705ac8af027d6a46d4def99)
|
static bool WasNeutered(JSObject* holder) {
JSArrayBufferView* view = JSArrayBufferView::cast(holder);
return view->WasNeutered();
}
|
static bool WasNeutered(JSObject* holder) {
JSArrayBufferView* view = JSArrayBufferView::cast(holder);
return view->WasNeutered();
}
|
C
|
Android
| 0 |
CVE-2018-7998
|
https://www.cvedetails.com/cve/CVE-2018-7998/
|
CWE-362
|
https://github.com/jcupitt/libvips/commit/20d840e6da15c1574b3ed998bc92f91d1e36c2a5
|
20d840e6da15c1574b3ed998bc92f91d1e36c2a5
|
fix a crash with delayed load
If a delayed load failed, it could leave the pipeline only half-set up.
Sebsequent threads could then segv.
Set a load-has-failed flag and test before generate.
See https://github.com/jcupitt/libvips/issues/893
|
vips_foreign_load_temp( VipsForeignLoad *load )
{
const guint64 disc_threshold = vips_get_disc_threshold();
const guint64 image_size = VIPS_IMAGE_SIZEOF_IMAGE( load->out );
/* If this is a partial operation, we can open directly.
*/
if( load->flags & VIPS_FOREIGN_PARTIAL ) {
#ifdef DEBUG
printf( "vips_foreign_load_temp: partial temp\n" );
#endif /*DEBUG*/
return( vips_image_new() );
}
/* If it can do sequential access and it's been requested, we can open
* directly.
*/
if( (load->flags & VIPS_FOREIGN_SEQUENTIAL) &&
load->access != VIPS_ACCESS_RANDOM ) {
#ifdef DEBUG
printf( "vips_foreign_load_temp: partial sequential temp\n" );
#endif /*DEBUG*/
return( vips_image_new() );
}
/* ->memory used to be called ->disc and default TRUE. If it's been
* forced FALSE, set memory TRUE.
*/
if( !load->disc )
load->memory = TRUE;
/* We open via disc if:
* - 'memory' is off
* - the uncompressed image will be larger than
* vips_get_disc_threshold()
*/
if( !load->memory &&
image_size > disc_threshold ) {
#ifdef DEBUG
printf( "vips_foreign_load_temp: disc temp\n" );
#endif /*DEBUG*/
return( vips_image_new_temp_file( "%s.v" ) );
}
#ifdef DEBUG
printf( "vips_foreign_load_temp: memory temp\n" );
#endif /*DEBUG*/
/* Otherwise, fall back to a memory buffer.
*/
return( vips_image_new_memory() );
}
|
vips_foreign_load_temp( VipsForeignLoad *load )
{
const guint64 disc_threshold = vips_get_disc_threshold();
const guint64 image_size = VIPS_IMAGE_SIZEOF_IMAGE( load->out );
/* If this is a partial operation, we can open directly.
*/
if( load->flags & VIPS_FOREIGN_PARTIAL ) {
#ifdef DEBUG
printf( "vips_foreign_load_temp: partial temp\n" );
#endif /*DEBUG*/
return( vips_image_new() );
}
/* If it can do sequential access and it's been requested, we can open
* directly.
*/
if( (load->flags & VIPS_FOREIGN_SEQUENTIAL) &&
load->access != VIPS_ACCESS_RANDOM ) {
#ifdef DEBUG
printf( "vips_foreign_load_temp: partial sequential temp\n" );
#endif /*DEBUG*/
return( vips_image_new() );
}
/* ->memory used to be called ->disc and default TRUE. If it's been
* forced FALSE, set memory TRUE.
*/
if( !load->disc )
load->memory = TRUE;
/* We open via disc if:
* - 'memory' is off
* - the uncompressed image will be larger than
* vips_get_disc_threshold()
*/
if( !load->memory &&
image_size > disc_threshold ) {
#ifdef DEBUG
printf( "vips_foreign_load_temp: disc temp\n" );
#endif /*DEBUG*/
return( vips_image_new_temp_file( "%s.v" ) );
}
#ifdef DEBUG
printf( "vips_foreign_load_temp: memory temp\n" );
#endif /*DEBUG*/
/* Otherwise, fall back to a memory buffer.
*/
return( vips_image_new_memory() );
}
|
C
|
libvips
| 0 |
CVE-2010-1152
|
https://www.cvedetails.com/cve/CVE-2010-1152/
|
CWE-20
|
https://github.com/memcached/memcached/commit/d9cd01ede97f4145af9781d448c62a3318952719
|
d9cd01ede97f4145af9781d448c62a3318952719
|
Use strncmp when checking for large ascii multigets.
|
static void usage_license(void) {
printf(PACKAGE " " VERSION "\n\n");
printf(
"Copyright (c) 2003, Danga Interactive, Inc. <http://www.danga.com/>\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions are\n"
"met:\n"
"\n"
" * Redistributions of source code must retain the above copyright\n"
"notice, this list of conditions and the following disclaimer.\n"
"\n"
" * Redistributions in binary form must reproduce the above\n"
"copyright notice, this list of conditions and the following disclaimer\n"
"in the documentation and/or other materials provided with the\n"
"distribution.\n"
"\n"
" * Neither the name of the Danga Interactive nor the names of its\n"
"contributors may be used to endorse or promote products derived from\n"
"this software without specific prior written permission.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n"
"\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n"
"LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n"
"A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n"
"OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n"
"SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n"
"LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n"
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n"
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n"
"OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
"\n"
"\n"
"This product includes software developed by Niels Provos.\n"
"\n"
"[ libevent ]\n"
"\n"
"Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions\n"
"are met:\n"
"1. Redistributions of source code must retain the above copyright\n"
" notice, this list of conditions and the following disclaimer.\n"
"2. Redistributions in binary form must reproduce the above copyright\n"
" notice, this list of conditions and the following disclaimer in the\n"
" documentation and/or other materials provided with the distribution.\n"
"3. All advertising materials mentioning features or use of this software\n"
" must display the following acknowledgement:\n"
" This product includes software developed by Niels Provos.\n"
"4. The name of the author may not be used to endorse or promote products\n"
" derived from this software without specific prior written permission.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n"
"IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n"
"OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n"
"IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n"
"INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n"
"NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n"
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n"
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n"
"THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
);
return;
}
|
static void usage_license(void) {
printf(PACKAGE " " VERSION "\n\n");
printf(
"Copyright (c) 2003, Danga Interactive, Inc. <http://www.danga.com/>\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions are\n"
"met:\n"
"\n"
" * Redistributions of source code must retain the above copyright\n"
"notice, this list of conditions and the following disclaimer.\n"
"\n"
" * Redistributions in binary form must reproduce the above\n"
"copyright notice, this list of conditions and the following disclaimer\n"
"in the documentation and/or other materials provided with the\n"
"distribution.\n"
"\n"
" * Neither the name of the Danga Interactive nor the names of its\n"
"contributors may be used to endorse or promote products derived from\n"
"this software without specific prior written permission.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n"
"\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n"
"LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n"
"A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n"
"OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n"
"SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n"
"LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n"
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n"
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n"
"OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
"\n"
"\n"
"This product includes software developed by Niels Provos.\n"
"\n"
"[ libevent ]\n"
"\n"
"Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions\n"
"are met:\n"
"1. Redistributions of source code must retain the above copyright\n"
" notice, this list of conditions and the following disclaimer.\n"
"2. Redistributions in binary form must reproduce the above copyright\n"
" notice, this list of conditions and the following disclaimer in the\n"
" documentation and/or other materials provided with the distribution.\n"
"3. All advertising materials mentioning features or use of this software\n"
" must display the following acknowledgement:\n"
" This product includes software developed by Niels Provos.\n"
"4. The name of the author may not be used to endorse or promote products\n"
" derived from this software without specific prior written permission.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n"
"IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n"
"OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n"
"IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n"
"INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n"
"NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n"
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n"
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n"
"THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
);
return;
}
|
C
|
memcached
| 0 |
CVE-2016-3074
|
https://www.cvedetails.com/cve/CVE-2016-3074/
|
CWE-189
|
https://github.com/libgd/libgd/commit/2bb97f407c1145c850416a3bfbcc8cf124e68a19
|
2bb97f407c1145c850416a3bfbcc8cf124e68a19
|
gd2: handle corrupt images better (CVE-2016-3074)
Make sure we do some range checking on corrupted chunks.
Thanks to Hans Jerry Illikainen <hji@dyntopia.com> for indepth report
and reproducer information. Made for easy test case writing :).
|
_gd2GetHeader (gdIOCtxPtr in, int *sx, int *sy,
int *cs, int *vers, int *fmt, int *ncx, int *ncy,
t_chunk_info ** chunkIdx)
{
int i;
int ch;
char id[5];
t_chunk_info *cidx;
int sidx;
int nc;
GD2_DBG (printf ("Reading gd2 header info\n"));
for (i = 0; i < 4; i++) {
ch = gdGetC (in);
if (ch == EOF) {
goto fail1;
};
id[i] = ch;
};
id[4] = 0;
GD2_DBG (printf ("Got file code: %s\n", id));
/* Equiv. of 'magick'. */
if (strcmp (id, GD2_ID) != 0) {
GD2_DBG (printf ("Not a valid gd2 file\n"));
goto fail1;
};
/* Version */
if (gdGetWord (vers, in) != 1) {
goto fail1;
};
GD2_DBG (printf ("Version: %d\n", *vers));
if ((*vers != 1) && (*vers != 2)) {
GD2_DBG (printf ("Bad version: %d\n", *vers));
goto fail1;
};
/* Image Size */
if (!gdGetWord (sx, in)) {
GD2_DBG (printf ("Could not get x-size\n"));
goto fail1;
}
if (!gdGetWord (sy, in)) {
GD2_DBG (printf ("Could not get y-size\n"));
goto fail1;
}
GD2_DBG (printf ("Image is %dx%d\n", *sx, *sy));
/* Chunk Size (pixels, not bytes!) */
if (gdGetWord (cs, in) != 1) {
goto fail1;
};
GD2_DBG (printf ("ChunkSize: %d\n", *cs));
if ((*cs < GD2_CHUNKSIZE_MIN) || (*cs > GD2_CHUNKSIZE_MAX)) {
GD2_DBG (printf ("Bad chunk size: %d\n", *cs));
goto fail1;
};
/* Data Format */
if (gdGetWord (fmt, in) != 1) {
goto fail1;
};
GD2_DBG (printf ("Format: %d\n", *fmt));
if ((*fmt != GD2_FMT_RAW) && (*fmt != GD2_FMT_COMPRESSED) &&
(*fmt != GD2_FMT_TRUECOLOR_RAW) &&
(*fmt != GD2_FMT_TRUECOLOR_COMPRESSED)) {
GD2_DBG (printf ("Bad data format: %d\n", *fmt));
goto fail1;
};
/* # of chunks wide */
if (gdGetWord (ncx, in) != 1) {
goto fail1;
};
GD2_DBG (printf ("%d Chunks Wide\n", *ncx));
/* # of chunks high */
if (gdGetWord (ncy, in) != 1) {
goto fail1;
};
GD2_DBG (printf ("%d Chunks vertically\n", *ncy));
if (gd2_compressed (*fmt)) {
nc = (*ncx) * (*ncy);
GD2_DBG (printf ("Reading %d chunk index entries\n", nc));
sidx = sizeof (t_chunk_info) * nc;
cidx = gdCalloc (sidx, 1);
if (!cidx) {
goto fail1;
}
for (i = 0; i < nc; i++) {
if (gdGetInt (&cidx[i].offset, in) != 1) {
goto fail2;
};
if (gdGetInt (&cidx[i].size, in) != 1) {
goto fail2;
};
if (cidx[i].offset < 0 || cidx[i].size < 0)
goto fail2;
};
*chunkIdx = cidx;
};
GD2_DBG (printf ("gd2 header complete\n"));
return 1;
fail2:
gdFree(cidx);
fail1:
return 0;
}
|
_gd2GetHeader (gdIOCtxPtr in, int *sx, int *sy,
int *cs, int *vers, int *fmt, int *ncx, int *ncy,
t_chunk_info ** chunkIdx)
{
int i;
int ch;
char id[5];
t_chunk_info *cidx;
int sidx;
int nc;
GD2_DBG (printf ("Reading gd2 header info\n"));
for (i = 0; i < 4; i++) {
ch = gdGetC (in);
if (ch == EOF) {
goto fail1;
};
id[i] = ch;
};
id[4] = 0;
GD2_DBG (printf ("Got file code: %s\n", id));
/* Equiv. of 'magick'. */
if (strcmp (id, GD2_ID) != 0) {
GD2_DBG (printf ("Not a valid gd2 file\n"));
goto fail1;
};
/* Version */
if (gdGetWord (vers, in) != 1) {
goto fail1;
};
GD2_DBG (printf ("Version: %d\n", *vers));
if ((*vers != 1) && (*vers != 2)) {
GD2_DBG (printf ("Bad version: %d\n", *vers));
goto fail1;
};
/* Image Size */
if (!gdGetWord (sx, in)) {
GD2_DBG (printf ("Could not get x-size\n"));
goto fail1;
}
if (!gdGetWord (sy, in)) {
GD2_DBG (printf ("Could not get y-size\n"));
goto fail1;
}
GD2_DBG (printf ("Image is %dx%d\n", *sx, *sy));
/* Chunk Size (pixels, not bytes!) */
if (gdGetWord (cs, in) != 1) {
goto fail1;
};
GD2_DBG (printf ("ChunkSize: %d\n", *cs));
if ((*cs < GD2_CHUNKSIZE_MIN) || (*cs > GD2_CHUNKSIZE_MAX)) {
GD2_DBG (printf ("Bad chunk size: %d\n", *cs));
goto fail1;
};
/* Data Format */
if (gdGetWord (fmt, in) != 1) {
goto fail1;
};
GD2_DBG (printf ("Format: %d\n", *fmt));
if ((*fmt != GD2_FMT_RAW) && (*fmt != GD2_FMT_COMPRESSED) &&
(*fmt != GD2_FMT_TRUECOLOR_RAW) &&
(*fmt != GD2_FMT_TRUECOLOR_COMPRESSED)) {
GD2_DBG (printf ("Bad data format: %d\n", *fmt));
goto fail1;
};
/* # of chunks wide */
if (gdGetWord (ncx, in) != 1) {
goto fail1;
};
GD2_DBG (printf ("%d Chunks Wide\n", *ncx));
/* # of chunks high */
if (gdGetWord (ncy, in) != 1) {
goto fail1;
};
GD2_DBG (printf ("%d Chunks vertically\n", *ncy));
if (gd2_compressed (*fmt)) {
nc = (*ncx) * (*ncy);
GD2_DBG (printf ("Reading %d chunk index entries\n", nc));
sidx = sizeof (t_chunk_info) * nc;
cidx = gdCalloc (sidx, 1);
if (!cidx) {
goto fail1;
}
for (i = 0; i < nc; i++) {
if (gdGetInt (&cidx[i].offset, in) != 1) {
goto fail2;
};
if (gdGetInt (&cidx[i].size, in) != 1) {
goto fail2;
};
};
*chunkIdx = cidx;
};
GD2_DBG (printf ("gd2 header complete\n"));
return 1;
fail2:
gdFree(cidx);
fail1:
return 0;
}
|
C
|
libgd
| 1 |
null | null | null |
https://github.com/chromium/chromium/commit/ea3d1d84be3d6f97bf50e76511c9e26af6895533
|
ea3d1d84be3d6f97bf50e76511c9e26af6895533
|
Fix passing pointers between processes.
BUG=31880
Review URL: http://codereview.chromium.org/558036
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@37555 0039d316-1c4b-4281-b951-d872f2087c98
|
void Initialize(unsigned long resource_id, const GURL& url,
void Initialize(unsigned long resource_id, const GURL& url, int notify_id) {
resource_id_ = resource_id;
channel_->Send(new PluginMsg_HandleURLRequestReply(
instance_id_, resource_id, url, notify_id));
}
void InitializeForSeekableStream(unsigned long resource_id,
int range_request_id) {
resource_id_ = resource_id;
multibyte_response_expected_ = true;
channel_->Send(new PluginMsg_HTTPRangeRequestReply(
instance_id_, resource_id, range_request_id));
}
|
void Initialize(unsigned long resource_id, const GURL& url,
bool notify_needed, intptr_t notify_data,
intptr_t existing_stream) {
resource_id_ = resource_id;
url_ = url;
notify_needed_ = notify_needed;
notify_data_ = notify_data;
PluginMsg_URLRequestReply_Params params;
params.resource_id = resource_id;
params.url = url_;
params.notify_needed = notify_needed_;
params.notify_data = notify_data_;
params.stream = existing_stream;
multibyte_response_expected_ = (existing_stream != 0);
channel_->Send(new PluginMsg_HandleURLRequestReply(instance_id_, params));
}
|
C
|
Chrome
| 1 |
CVE-2016-5219
|
https://www.cvedetails.com/cve/CVE-2016-5219/
|
CWE-416
|
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <kbr@chromium.org>
Reviewed-by: Kentaro Hara <haraken@chromium.org>
Reviewed-by: Geoff Lang <geofflang@chromium.org>
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#657568}
|
DrawingBufferClientRestorePixelPackBufferBinding() {}
|
DrawingBufferClientRestorePixelPackBufferBinding() {}
|
C
|
Chrome
| 0 |
CVE-2013-2887
|
https://www.cvedetails.com/cve/CVE-2013-2887/
| null |
https://github.com/chromium/chromium/commit/01924fbe6c0e0f059ca46a03f9f6b2670ae3e0fa
|
01924fbe6c0e0f059ca46a03f9f6b2670ae3e0fa
|
Pass ui::LatencyInfo correct with unified gesture detector on Aura.
BUG=379812
TEST=GestureRecognizerTest.LatencyPassedFromTouchEvent
Review URL: https://codereview.chromium.org/309823002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@274602 0039d316-1c4b-4281-b951-d872f2087c98
|
bool double_click() const { return double_click_; }
|
bool double_click() const { return double_click_; }
|
C
|
Chrome
| 0 |
CVE-2014-7841
|
https://www.cvedetails.com/cve/CVE-2014-7841/
|
CWE-399
|
https://github.com/torvalds/linux/commit/e40607cbe270a9e8360907cb1e62ddf0736e4864
|
e40607cbe270a9e8360907cb1e62ddf0736e4864
|
net: sctp: fix NULL pointer dereference in af->from_addr_param on malformed packet
An SCTP server doing ASCONF will panic on malformed INIT ping-of-death
in the form of:
------------ INIT[PARAM: SET_PRIMARY_IP] ------------>
While the INIT chunk parameter verification dissects through many things
in order to detect malformed input, it misses to actually check parameters
inside of parameters. E.g. RFC5061, section 4.2.4 proposes a 'set primary
IP address' parameter in ASCONF, which has as a subparameter an address
parameter.
So an attacker may send a parameter type other than SCTP_PARAM_IPV4_ADDRESS
or SCTP_PARAM_IPV6_ADDRESS, param_type2af() will subsequently return 0
and thus sctp_get_af_specific() returns NULL, too, which we then happily
dereference unconditionally through af->from_addr_param().
The trace for the log:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000078
IP: [<ffffffffa01e9c62>] sctp_process_init+0x492/0x990 [sctp]
PGD 0
Oops: 0000 [#1] SMP
[...]
Pid: 0, comm: swapper Not tainted 2.6.32-504.el6.x86_64 #1 Bochs Bochs
RIP: 0010:[<ffffffffa01e9c62>] [<ffffffffa01e9c62>] sctp_process_init+0x492/0x990 [sctp]
[...]
Call Trace:
<IRQ>
[<ffffffffa01f2add>] ? sctp_bind_addr_copy+0x5d/0xe0 [sctp]
[<ffffffffa01e1fcb>] sctp_sf_do_5_1B_init+0x21b/0x340 [sctp]
[<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp]
[<ffffffffa01e5c09>] ? sctp_endpoint_lookup_assoc+0xc9/0xf0 [sctp]
[<ffffffffa01e61f6>] sctp_endpoint_bh_rcv+0x116/0x230 [sctp]
[<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp]
[<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp]
[<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter]
[<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[...]
A minimal way to address this is to check for NULL as we do on all
other such occasions where we know sctp_get_af_specific() could
possibly return with NULL.
Fixes: d6de3097592b ("[SCTP]: Add the handling of "Set Primary IP Address" parameter to INIT")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Vlad Yasevich <vyasevich@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
{
struct sctp_datamsg *msg;
struct sctp_chunk *lchunk;
struct sctp_stream *stream;
__u16 ssn;
__u16 sid;
if (chunk->has_ssn)
return;
/* All fragments will be on the same stream */
sid = ntohs(chunk->subh.data_hdr->stream);
stream = &chunk->asoc->ssnmap->out;
/* Now assign the sequence number to the entire message.
* All fragments must have the same stream sequence number.
*/
msg = chunk->msg;
list_for_each_entry(lchunk, &msg->chunks, frag_list) {
if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
ssn = 0;
} else {
if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
ssn = sctp_ssn_next(stream, sid);
else
ssn = sctp_ssn_peek(stream, sid);
}
lchunk->subh.data_hdr->ssn = htons(ssn);
lchunk->has_ssn = 1;
}
}
|
void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
{
struct sctp_datamsg *msg;
struct sctp_chunk *lchunk;
struct sctp_stream *stream;
__u16 ssn;
__u16 sid;
if (chunk->has_ssn)
return;
/* All fragments will be on the same stream */
sid = ntohs(chunk->subh.data_hdr->stream);
stream = &chunk->asoc->ssnmap->out;
/* Now assign the sequence number to the entire message.
* All fragments must have the same stream sequence number.
*/
msg = chunk->msg;
list_for_each_entry(lchunk, &msg->chunks, frag_list) {
if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
ssn = 0;
} else {
if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
ssn = sctp_ssn_next(stream, sid);
else
ssn = sctp_ssn_peek(stream, sid);
}
lchunk->subh.data_hdr->ssn = htons(ssn);
lchunk->has_ssn = 1;
}
}
|
C
|
linux
| 0 |
CVE-2014-8130
|
https://www.cvedetails.com/cve/CVE-2014-8130/
|
CWE-369
|
https://github.com/vadz/libtiff/commit/3c5eb8b1be544e41d2c336191bc4936300ad7543
|
3c5eb8b1be544e41d2c336191bc4936300ad7543
|
* libtiff/tif_{unix,vms,win32}.c (_TIFFmalloc): ANSI C does not
require malloc() to return NULL pointer if requested allocation
size is zero. Assure that _TIFFmalloc does.
|
_tiffSeekProc(thandle_t fd, uint64 off, int whence)
{
LARGE_INTEGER offli;
DWORD dwMoveMethod;
offli.QuadPart = off;
switch(whence)
{
case SEEK_SET:
dwMoveMethod = FILE_BEGIN;
break;
case SEEK_CUR:
dwMoveMethod = FILE_CURRENT;
break;
case SEEK_END:
dwMoveMethod = FILE_END;
break;
default:
dwMoveMethod = FILE_BEGIN;
break;
}
offli.LowPart=SetFilePointer(fd,offli.LowPart,&offli.HighPart,dwMoveMethod);
if ((offli.LowPart==INVALID_SET_FILE_POINTER)&&(GetLastError()!=NO_ERROR))
offli.QuadPart=0;
return(offli.QuadPart);
}
|
_tiffSeekProc(thandle_t fd, uint64 off, int whence)
{
LARGE_INTEGER offli;
DWORD dwMoveMethod;
offli.QuadPart = off;
switch(whence)
{
case SEEK_SET:
dwMoveMethod = FILE_BEGIN;
break;
case SEEK_CUR:
dwMoveMethod = FILE_CURRENT;
break;
case SEEK_END:
dwMoveMethod = FILE_END;
break;
default:
dwMoveMethod = FILE_BEGIN;
break;
}
offli.LowPart=SetFilePointer(fd,offli.LowPart,&offli.HighPart,dwMoveMethod);
if ((offli.LowPart==INVALID_SET_FILE_POINTER)&&(GetLastError()!=NO_ERROR))
offli.QuadPart=0;
return(offli.QuadPart);
}
|
C
|
libtiff
| 0 |
CVE-2013-0904
|
https://www.cvedetails.com/cve/CVE-2013-0904/
|
CWE-119
|
https://github.com/chromium/chromium/commit/b2b21468c1f7f08b30a7c1755316f6026c50eb2a
|
b2b21468c1f7f08b30a7c1755316f6026c50eb2a
|
Separate repaint and layout requirements of StyleDifference (Step 1)
Previously StyleDifference was an enum that proximately bigger values
imply smaller values (e.g. StyleDifferenceLayout implies
StyleDifferenceRepaint). This causes unnecessary repaints in some cases
on layout change.
Convert StyleDifference to a structure containing relatively independent
flags.
This change doesn't directly improve the result, but can make further
repaint optimizations possible.
Step 1 doesn't change any functionality. RenderStyle still generate the
legacy StyleDifference enum when comparing styles and convert the result
to the new StyleDifference. Implicit requirements are not handled during
the conversion.
Converted call sites to use the new StyleDifference according to the
following conversion rules:
- diff == StyleDifferenceEqual (&& !context) => diff.hasNoChange()
- diff == StyleDifferenceRepaint => diff.needsRepaintObjectOnly()
- diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer()
- diff == StyleDifferenceRepaint || diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer()
- diff >= StyleDifferenceRepaint => diff.needsRepaint() || diff.needsLayout()
- diff >= StyleDifferenceRepaintLayer => diff.needsRepaintLayer() || diff.needsLayout()
- diff > StyleDifferenceRepaintLayer => diff.needsLayout()
- diff == StyleDifferencePositionedMovementLayoutOnly => diff.needsPositionedMovementLayoutOnly()
- diff == StyleDifferenceLayout => diff.needsFullLayout()
BUG=358460
TEST=All existing layout tests.
R=eseidel@chromium.org, esprehn@chromium.org, jchaffraix@chromium.org
Committed: https://src.chromium.org/viewvc/blink?view=rev&revision=171983
Review URL: https://codereview.chromium.org/236203020
git-svn-id: svn://svn.chromium.org/blink/trunk@172331 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
LayoutUnit RenderBox::constrainLogicalWidthByMinMax(LayoutUnit logicalWidth, LayoutUnit availableWidth, RenderBlock* cb) const
{
RenderStyle* styleToUse = style();
if (!styleToUse->logicalMaxWidth().isUndefined())
logicalWidth = min(logicalWidth, computeLogicalWidthUsing(MaxSize, styleToUse->logicalMaxWidth(), availableWidth, cb));
return max(logicalWidth, computeLogicalWidthUsing(MinSize, styleToUse->logicalMinWidth(), availableWidth, cb));
}
|
LayoutUnit RenderBox::constrainLogicalWidthByMinMax(LayoutUnit logicalWidth, LayoutUnit availableWidth, RenderBlock* cb) const
{
RenderStyle* styleToUse = style();
if (!styleToUse->logicalMaxWidth().isUndefined())
logicalWidth = min(logicalWidth, computeLogicalWidthUsing(MaxSize, styleToUse->logicalMaxWidth(), availableWidth, cb));
return max(logicalWidth, computeLogicalWidthUsing(MinSize, styleToUse->logicalMinWidth(), availableWidth, cb));
}
|
C
|
Chrome
| 0 |
CVE-2012-3412
|
https://www.cvedetails.com/cve/CVE-2012-3412/
|
CWE-189
|
https://github.com/torvalds/linux/commit/68cb695ccecf949d48949e72f8ce591fdaaa325c
|
68cb695ccecf949d48949e72f8ce591fdaaa325c
|
sfc: Fix maximum number of TSO segments and minimum TX queue size
[ Upstream commit 7e6d06f0de3f74ca929441add094518ae332257c ]
Currently an skb requiring TSO may not fit within a minimum-size TX
queue. The TX queue selected for the skb may stall and trigger the TX
watchdog repeatedly (since the problem skb will be retried after the
TX reset). This issue is designated as CVE-2012-3412.
Set the maximum number of TSO segments for our devices to 100. This
should make no difference to behaviour unless the actual MSS is less
than about 700. Increase the minimum TX queue size accordingly to
allow for 2 worst-case skbs, so that there will definitely be space
to add an skb after we wake a queue.
To avoid invalidating existing configurations, change
efx_ethtool_set_ringparam() to fix up values that are too small rather
than returning -EINVAL.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
|
static void efx_mac_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
mutex_lock(&efx->mac_lock);
if (efx->port_enabled) {
efx->type->push_multicast_hash(efx);
efx->mac_op->reconfigure(efx);
}
mutex_unlock(&efx->mac_lock);
}
|
static void efx_mac_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
mutex_lock(&efx->mac_lock);
if (efx->port_enabled) {
efx->type->push_multicast_hash(efx);
efx->mac_op->reconfigure(efx);
}
mutex_unlock(&efx->mac_lock);
}
|
C
|
linux
| 0 |
CVE-2011-2839
|
https://www.cvedetails.com/cve/CVE-2011-2839/
|
CWE-20
|
https://github.com/chromium/chromium/commit/c63f2b7fe4fe2977f858a8e36d5f48db17eff2e7
|
c63f2b7fe4fe2977f858a8e36d5f48db17eff2e7
|
Extend TTS extension API to support richer events returned from the engine
to the client. Previously we just had a completed event; this adds start,
word boundary, sentence boundary, and marker boundary. In addition,
interrupted and canceled, which were previously errors, now become events.
Mac and Windows implementations extended to support as many of these events
as possible.
BUG=67713
BUG=70198
BUG=75106
BUG=83404
TEST=Updates all TTS API tests to be event-based, and adds new tests.
Review URL: http://codereview.chromium.org/6792014
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@91665 0039d316-1c4b-4281-b951-d872f2087c98
|
bool ExtensionTtsStopSpeakingFunction::RunImpl() {
ExtensionTtsController::GetInstance()->Stop();
return true;
}
|
bool ExtensionTtsStopSpeakingFunction::RunImpl() {
ExtensionTtsController::GetInstance()->Stop();
return true;
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/3b7ff00418c0e7593d42e5648ba39397e23fe2f9
|
3b7ff00418c0e7593d42e5648ba39397e23fe2f9
|
sync: ensure sync init path doesn't block on CheckTime
The call to RequestEarlyExit (which calls Abort) only happens if the SyncBackendHost has received the initialization callback from the SyncManager. But during init, the SyncManager could make a call to CheckTime, meaning that call would never be aborted. This patch makes sure to cover that case.
BUG=93829
TEST=None at the moment :(
Review URL: http://codereview.chromium.org/7862011
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@100543 0039d316-1c4b-4281-b951-d872f2087c98
|
void SyncManager::Shutdown() {
data_->Shutdown();
}
|
void SyncManager::Shutdown() {
data_->Shutdown();
}
|
C
|
Chrome
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
R=jochen@chromium.org
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void activityLoggedAttrGetter1AttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
ExceptionState exceptionState(ExceptionState::SetterContext, "activityLoggedAttrGetter1", "TestObject", info.Holder(), info.GetIsolate());
TestObject* imp = V8TestObject::toNative(info.Holder());
V8TRYCATCH_EXCEPTION_VOID(int, cppValue, toInt32(jsValue, exceptionState), exceptionState);
imp->setActivityLoggedAttrGetter1(cppValue);
}
|
static void activityLoggedAttrGetter1AttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
ExceptionState exceptionState(ExceptionState::SetterContext, "activityLoggedAttrGetter1", "TestObject", info.Holder(), info.GetIsolate());
TestObject* imp = V8TestObject::toNative(info.Holder());
V8TRYCATCH_EXCEPTION_VOID(int, cppValue, toInt32(jsValue, exceptionState), exceptionState);
imp->setActivityLoggedAttrGetter1(cppValue);
}
|
C
|
Chrome
| 0 |
CVE-2013-2853
|
https://www.cvedetails.com/cve/CVE-2013-2853/
| null |
https://github.com/chromium/chromium/commit/9c18dbcb79e5f700c453d1ac01fb6d8768e4844a
|
9c18dbcb79e5f700c453d1ac01fb6d8768e4844a
|
net: don't process truncated headers on HTTPS connections.
This change causes us to not process any headers unless they are correctly
terminated with a \r\n\r\n sequence.
BUG=244260
Review URL: https://chromiumcodereview.appspot.com/15688012
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@202927 0039d316-1c4b-4281-b951-d872f2087c98
|
void HttpStreamParser::GetSSLInfo(SSLInfo* ssl_info) {
if (request_->url.SchemeIs("https") && connection_->socket()) {
SSLClientSocket* ssl_socket =
static_cast<SSLClientSocket*>(connection_->socket());
ssl_socket->GetSSLInfo(ssl_info);
}
}
|
void HttpStreamParser::GetSSLInfo(SSLInfo* ssl_info) {
if (request_->url.SchemeIs("https") && connection_->socket()) {
SSLClientSocket* ssl_socket =
static_cast<SSLClientSocket*>(connection_->socket());
ssl_socket->GetSSLInfo(ssl_info);
}
}
|
C
|
Chrome
| 0 |
CVE-2016-3839
|
https://www.cvedetails.com/cve/CVE-2016-3839/
|
CWE-284
|
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
|
472271b153c5dc53c28beac55480a8d8434b2d5c
|
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
|
static void bte_search_devices_evt(tBTA_DM_SEARCH_EVT event, tBTA_DM_SEARCH *p_data)
{
UINT16 param_len = 0;
if (p_data)
param_len += sizeof(tBTA_DM_SEARCH);
/* Allocate buffer to hold the pointers (deep copy). The pointers will point to the end of the tBTA_DM_SEARCH */
switch (event)
{
case BTA_DM_INQ_RES_EVT:
{
if (p_data->inq_res.p_eir)
param_len += HCI_EXT_INQ_RESPONSE_LEN;
}
break;
case BTA_DM_DISC_RES_EVT:
{
if (p_data->disc_res.raw_data_size && p_data->disc_res.p_raw_data)
param_len += p_data->disc_res.raw_data_size;
}
break;
}
BTIF_TRACE_DEBUG("%s event=%s param_len=%d", __FUNCTION__, dump_dm_search_event(event), param_len);
/* if remote name is available in EIR, set teh flag so that stack doesnt trigger RNR */
if (event == BTA_DM_INQ_RES_EVT)
p_data->inq_res.remt_name_not_required = check_eir_remote_name(p_data, NULL, NULL);
btif_transfer_context (btif_dm_search_devices_evt , (UINT16) event, (void *)p_data, param_len,
(param_len > sizeof(tBTA_DM_SEARCH)) ? search_devices_copy_cb : NULL);
}
|
static void bte_search_devices_evt(tBTA_DM_SEARCH_EVT event, tBTA_DM_SEARCH *p_data)
{
UINT16 param_len = 0;
if (p_data)
param_len += sizeof(tBTA_DM_SEARCH);
/* Allocate buffer to hold the pointers (deep copy). The pointers will point to the end of the tBTA_DM_SEARCH */
switch (event)
{
case BTA_DM_INQ_RES_EVT:
{
if (p_data->inq_res.p_eir)
param_len += HCI_EXT_INQ_RESPONSE_LEN;
}
break;
case BTA_DM_DISC_RES_EVT:
{
if (p_data->disc_res.raw_data_size && p_data->disc_res.p_raw_data)
param_len += p_data->disc_res.raw_data_size;
}
break;
}
BTIF_TRACE_DEBUG("%s event=%s param_len=%d", __FUNCTION__, dump_dm_search_event(event), param_len);
/* if remote name is available in EIR, set teh flag so that stack doesnt trigger RNR */
if (event == BTA_DM_INQ_RES_EVT)
p_data->inq_res.remt_name_not_required = check_eir_remote_name(p_data, NULL, NULL);
btif_transfer_context (btif_dm_search_devices_evt , (UINT16) event, (void *)p_data, param_len,
(param_len > sizeof(tBTA_DM_SEARCH)) ? search_devices_copy_cb : NULL);
}
|
C
|
Android
| 0 |
CVE-2016-5147
|
https://www.cvedetails.com/cve/CVE-2016-5147/
|
CWE-79
|
https://github.com/chromium/chromium/commit/5472db1c7eca35822219d03be5c817d9a9258c11
|
5472db1c7eca35822219d03be5c817d9a9258c11
|
Always call UpdateCompositedScrollOffset, not just for the root layer
Bug: 927560
Change-Id: I1d5522aae4f11dd3f5b8947bb089bac1bf19bdb4
Reviewed-on: https://chromium-review.googlesource.com/c/1452701
Reviewed-by: Chris Harrelson <chrishtr@chromium.org>
Commit-Queue: Mason Freed <masonfreed@chromium.org>
Cr-Commit-Position: refs/heads/master@{#628942}
|
GraphicsLayer* PaintLayerScrollableArea::LayerForScrolling() const {
return Layer()->HasCompositedLayerMapping()
? Layer()->GetCompositedLayerMapping()->ScrollingContentsLayer()
: nullptr;
}
|
GraphicsLayer* PaintLayerScrollableArea::LayerForScrolling() const {
return Layer()->HasCompositedLayerMapping()
? Layer()->GetCompositedLayerMapping()->ScrollingContentsLayer()
: nullptr;
}
|
C
|
Chrome
| 0 |
CVE-2013-4119
|
https://www.cvedetails.com/cve/CVE-2013-4119/
|
CWE-476
|
https://github.com/FreeRDP/FreeRDP/commit/0773bb9303d24473fe1185d85a424dfe159aff53
|
0773bb9303d24473fe1185d85a424dfe159aff53
|
nla: invalidate sec handle after creation
If sec pointer isn't invalidated after creation it is not possible
to check if the upper and lower pointers are valid.
This fixes a segfault in the server part if the client disconnects before
the authentication was finished.
|
void* sspi_SecureHandleGetLowerPointer(SecHandle* handle)
{
void* pointer;
if (!handle || !SecIsValidHandle(handle))
return NULL;
pointer = (void*) ~((size_t) handle->dwLower);
return pointer;
}
|
void* sspi_SecureHandleGetLowerPointer(SecHandle* handle)
{
void* pointer;
if (!handle)
return NULL;
pointer = (void*) ~((size_t) handle->dwLower);
return pointer;
}
|
C
|
FreeRDP
| 1 |
CVE-2017-14174
|
https://www.cvedetails.com/cve/CVE-2017-14174/
|
CWE-834
|
https://github.com/ImageMagick/ImageMagick/commit/04a567494786d5bb50894fc8bb8fea0cf496bea8
|
04a567494786d5bb50894fc8bb8fea0cf496bea8
|
Slightly different fix for #714
|
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
if (psd_info->mode == CMYKMode)
SetImageColorspace(layer_info->image,CMYKColorspace,exception);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
SetImageColorspace(layer_info->image,GRAYColorspace,exception);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j,
compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
|
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
if (psd_info->mode == CMYKMode)
SetImageColorspace(layer_info->image,CMYKColorspace,exception);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
SetImageColorspace(layer_info->image,GRAYColorspace,exception);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j,
compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
|
C
|
ImageMagick
| 0 |
CVE-2016-8666
|
https://www.cvedetails.com/cve/CVE-2016-8666/
|
CWE-400
|
https://github.com/torvalds/linux/commit/fac8e0f579695a3ecbc4d3cac369139d7f819971
|
fac8e0f579695a3ecbc4d3cac369139d7f819971
|
tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <jesse@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp)
{
const struct rps_sock_flow_table *sock_flow_table;
struct netdev_rx_queue *rxqueue = dev->_rx;
struct rps_dev_flow_table *flow_table;
struct rps_map *map;
int cpu = -1;
u32 tcpu;
u32 hash;
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->real_num_rx_queues)) {
WARN_ONCE(dev->real_num_rx_queues > 1,
"%s received packet on queue %u, but number "
"of RX queues is %u\n",
dev->name, index, dev->real_num_rx_queues);
goto done;
}
rxqueue += index;
}
/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
flow_table = rcu_dereference(rxqueue->rps_flow_table);
map = rcu_dereference(rxqueue->rps_map);
if (!flow_table && !map)
goto done;
skb_reset_network_header(skb);
hash = skb_get_hash(skb);
if (!hash)
goto done;
sock_flow_table = rcu_dereference(rps_sock_flow_table);
if (flow_table && sock_flow_table) {
struct rps_dev_flow *rflow;
u32 next_cpu;
u32 ident;
/* First check into global flow table if there is a match */
ident = sock_flow_table->ents[hash & sock_flow_table->mask];
if ((ident ^ hash) & ~rps_cpu_mask)
goto try_rps;
next_cpu = ident & rps_cpu_mask;
/* OK, now we know there is a match,
* we can look at the local (per receive queue) flow table
*/
rflow = &flow_table->flows[hash & flow_table->mask];
tcpu = rflow->cpu;
/*
* If the desired CPU (where last recvmsg was done) is
* different from current CPU (one in the rx-queue flow
* table entry), switch if one of the following holds:
* - Current CPU is unset (>= nr_cpu_ids).
* - Current CPU is offline.
* - The current CPU's queue tail has advanced beyond the
* last packet that was enqueued using this table entry.
* This guarantees that all previous packets for the flow
* have been dequeued, thus preserving in order delivery.
*/
if (unlikely(tcpu != next_cpu) &&
(tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
}
if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
*rflowp = rflow;
cpu = tcpu;
goto done;
}
}
try_rps:
if (map) {
tcpu = map->cpus[reciprocal_scale(hash, map->len)];
if (cpu_online(tcpu)) {
cpu = tcpu;
goto done;
}
}
done:
return cpu;
}
|
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp)
{
const struct rps_sock_flow_table *sock_flow_table;
struct netdev_rx_queue *rxqueue = dev->_rx;
struct rps_dev_flow_table *flow_table;
struct rps_map *map;
int cpu = -1;
u32 tcpu;
u32 hash;
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->real_num_rx_queues)) {
WARN_ONCE(dev->real_num_rx_queues > 1,
"%s received packet on queue %u, but number "
"of RX queues is %u\n",
dev->name, index, dev->real_num_rx_queues);
goto done;
}
rxqueue += index;
}
/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
flow_table = rcu_dereference(rxqueue->rps_flow_table);
map = rcu_dereference(rxqueue->rps_map);
if (!flow_table && !map)
goto done;
skb_reset_network_header(skb);
hash = skb_get_hash(skb);
if (!hash)
goto done;
sock_flow_table = rcu_dereference(rps_sock_flow_table);
if (flow_table && sock_flow_table) {
struct rps_dev_flow *rflow;
u32 next_cpu;
u32 ident;
/* First check into global flow table if there is a match */
ident = sock_flow_table->ents[hash & sock_flow_table->mask];
if ((ident ^ hash) & ~rps_cpu_mask)
goto try_rps;
next_cpu = ident & rps_cpu_mask;
/* OK, now we know there is a match,
* we can look at the local (per receive queue) flow table
*/
rflow = &flow_table->flows[hash & flow_table->mask];
tcpu = rflow->cpu;
/*
* If the desired CPU (where last recvmsg was done) is
* different from current CPU (one in the rx-queue flow
* table entry), switch if one of the following holds:
* - Current CPU is unset (>= nr_cpu_ids).
* - Current CPU is offline.
* - The current CPU's queue tail has advanced beyond the
* last packet that was enqueued using this table entry.
* This guarantees that all previous packets for the flow
* have been dequeued, thus preserving in order delivery.
*/
if (unlikely(tcpu != next_cpu) &&
(tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
}
if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
*rflowp = rflow;
cpu = tcpu;
goto done;
}
}
try_rps:
if (map) {
tcpu = map->cpus[reciprocal_scale(hash, map->len)];
if (cpu_online(tcpu)) {
cpu = tcpu;
goto done;
}
}
done:
return cpu;
}
|
C
|
linux
| 0 |
CVE-2013-4587
|
https://www.cvedetails.com/cve/CVE-2013-4587/
|
CWE-20
|
https://github.com/torvalds/linux/commit/338c7dbadd2671189cec7faf64c84d01071b3f96
|
338c7dbadd2671189cec7faf64c84d01071b3f96
|
KVM: Improve create VCPU parameter (CVE-2013-4587)
In multiple functions the vcpu_id is used as an offset into a bitfield. Ag
malicious user could specify a vcpu_id greater than 255 in order to set or
clear bits in kernel memory. This could be used to elevate priveges in the
kernel. This patch verifies that the vcpu_id provided is less than 255.
The api documentation already specifies that the vcpu_id must be less than
max_vcpus, but this is currently not checked.
Reported-by: Andrew Honig <ahonig@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Andrew Honig <ahonig@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
int vcpu_load(struct kvm_vcpu *vcpu)
{
int cpu;
if (mutex_lock_killable(&vcpu->mutex))
return -EINTR;
if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
/* The thread running this VCPU changed. */
struct pid *oldpid = vcpu->pid;
struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
rcu_assign_pointer(vcpu->pid, newpid);
synchronize_rcu();
put_pid(oldpid);
}
cpu = get_cpu();
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
return 0;
}
|
int vcpu_load(struct kvm_vcpu *vcpu)
{
int cpu;
if (mutex_lock_killable(&vcpu->mutex))
return -EINTR;
if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
/* The thread running this VCPU changed. */
struct pid *oldpid = vcpu->pid;
struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
rcu_assign_pointer(vcpu->pid, newpid);
synchronize_rcu();
put_pid(oldpid);
}
cpu = get_cpu();
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
return 0;
}
|
C
|
linux
| 0 |
CVE-2013-4623
|
https://www.cvedetails.com/cve/CVE-2013-4623/
|
CWE-20
|
https://github.com/polarssl/polarssl/commit/1922a4e6aade7b1d685af19d4d9339ddb5c02859
|
1922a4e6aade7b1d685af19d4d9339ddb5c02859
|
ssl_parse_certificate() now calls x509parse_crt_der() directly
|
static int ssl_rsa_decrypt( void *ctx, int mode, size_t *olen,
const unsigned char *input, unsigned char *output,
size_t output_max_len )
{
return rsa_pkcs1_decrypt( (rsa_context *) ctx, mode, olen, input, output,
output_max_len );
}
|
static int ssl_rsa_decrypt( void *ctx, int mode, size_t *olen,
const unsigned char *input, unsigned char *output,
size_t output_max_len )
{
return rsa_pkcs1_decrypt( (rsa_context *) ctx, mode, olen, input, output,
output_max_len );
}
|
C
|
polarssl
| 0 |
CVE-2017-0377
|
https://www.cvedetails.com/cve/CVE-2017-0377/
|
CWE-200
|
https://github.com/torproject/tor/commit/665baf5ed5c6186d973c46cdea165c0548027350
|
665baf5ed5c6186d973c46cdea165c0548027350
|
Consider the exit family when applying guard restrictions.
When the new path selection logic went into place, I accidentally
dropped the code that considered the _family_ of the exit node when
deciding if the guard was usable, and we didn't catch that during
code review.
This patch makes the guard_restriction_t code consider the exit
family as well, and adds some (hopefully redundant) checks for the
case where we lack a node_t for a guard but we have a bridge_info_t
for it.
Fixes bug 22753; bugfix on 0.3.0.1-alpha. Tracked as TROVE-2016-006
and CVE-2017-0377.
|
node_nickname_matches(const node_t *node, const char *nickname)
{
const char *n = node_get_nickname(node);
if (n && nickname[0]!='$' && !strcasecmp(n, nickname))
return 1;
return hex_digest_nickname_matches(nickname,
node->identity,
n,
node_is_named(node));
}
|
node_nickname_matches(const node_t *node, const char *nickname)
{
const char *n = node_get_nickname(node);
if (n && nickname[0]!='$' && !strcasecmp(n, nickname))
return 1;
return hex_digest_nickname_matches(nickname,
node->identity,
n,
node_is_named(node));
}
|
C
|
tor
| 0 |
CVE-2011-2918
|
https://www.cvedetails.com/cve/CVE-2011-2918/
|
CWE-399
|
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
static void x86_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask))
continue;
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
}
|
static void x86_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask))
continue;
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
}
|
C
|
linux
| 0 |
CVE-2015-1215
|
https://www.cvedetails.com/cve/CVE-2015-1215/
|
CWE-119
|
https://github.com/chromium/chromium/commit/2bceda4948deeaed0a5a99305d0d488eb952f64f
|
2bceda4948deeaed0a5a99305d0d488eb952f64f
|
Allow serialization of empty bluetooth uuids.
This change allows the passing WTF::Optional<String> types as
bluetooth.mojom.UUID optional parameter without needing to ensure the passed
object isn't empty.
BUG=None
R=juncai, dcheng
Review-Url: https://codereview.chromium.org/2646613003
Cr-Commit-Position: refs/heads/master@{#445809}
|
ScriptPromise BluetoothRemoteGATTCharacteristic::getDescriptors(
ScriptState* scriptState,
ExceptionState&) {
return getDescriptorsImpl(
scriptState, mojom::blink::WebBluetoothGATTQueryQuantity::MULTIPLE);
}
|
ScriptPromise BluetoothRemoteGATTCharacteristic::getDescriptors(
ScriptState* scriptState,
ExceptionState&) {
return getDescriptorsImpl(
scriptState, mojom::blink::WebBluetoothGATTQueryQuantity::MULTIPLE);
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/df831400bcb63db4259b5858281b1727ba972a2a
|
df831400bcb63db4259b5858281b1727ba972a2a
|
WebKit2: Support window bounce when panning.
https://bugs.webkit.org/show_bug.cgi?id=58065
<rdar://problem/9244367>
Reviewed by Adam Roben.
Make gestureDidScroll synchronous, as once we scroll, we need to know
whether or not we are at the beginning or end of the scrollable document.
If we are at either end of the scrollable document, we call the Windows 7
API to bounce the window to give an indication that you are past an end
of the document.
* UIProcess/WebPageProxy.cpp:
(WebKit::WebPageProxy::gestureDidScroll): Pass a boolean for the reply, and return it.
* UIProcess/WebPageProxy.h:
* UIProcess/win/WebView.cpp:
(WebKit::WebView::WebView): Inititalize a new variable.
(WebKit::WebView::onGesture): Once we send the message to scroll, check if have gone to
an end of the document, and if we have, bounce the window.
* UIProcess/win/WebView.h:
* WebProcess/WebPage/WebPage.h:
* WebProcess/WebPage/WebPage.messages.in: GestureDidScroll is now sync.
* WebProcess/WebPage/win/WebPageWin.cpp:
(WebKit::WebPage::gestureDidScroll): When we are done scrolling, check if we have a vertical
scrollbar and if we are at the beginning or the end of the scrollable document.
git-svn-id: svn://svn.chromium.org/blink/trunk@83197 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void WebView::reapplyEditCommand(WebEditCommandProxy* command)
{
if (!m_page->isValid() || !m_page->isValidEditCommand(command))
return;
command->reapply();
}
|
void WebView::reapplyEditCommand(WebEditCommandProxy* command)
{
if (!m_page->isValid() || !m_page->isValidEditCommand(command))
return;
command->reapply();
}
|
C
|
Chrome
| 0 |
CVE-2017-7533
|
https://www.cvedetails.com/cve/CVE-2017-7533/
|
CWE-362
|
https://github.com/torvalds/linux/commit/49d31c2f389acfe83417083e1208422b4091cd9e
|
49d31c2f389acfe83417083e1208422b4091cd9e
|
dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
|
static void d_walk(struct dentry *parent, void *data,
enum d_walk_ret (*enter)(void *, struct dentry *),
void (*finish)(void *))
{
struct dentry *this_parent;
struct list_head *next;
unsigned seq = 0;
enum d_walk_ret ret;
bool retry = true;
again:
read_seqbegin_or_lock(&rename_lock, &seq);
this_parent = parent;
spin_lock(&this_parent->d_lock);
ret = enter(data, this_parent);
switch (ret) {
case D_WALK_CONTINUE:
break;
case D_WALK_QUIT:
case D_WALK_SKIP:
goto out_unlock;
case D_WALK_NORETRY:
retry = false;
break;
}
repeat:
next = this_parent->d_subdirs.next;
resume:
while (next != &this_parent->d_subdirs) {
struct list_head *tmp = next;
struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
next = tmp->next;
if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
continue;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
ret = enter(data, dentry);
switch (ret) {
case D_WALK_CONTINUE:
break;
case D_WALK_QUIT:
spin_unlock(&dentry->d_lock);
goto out_unlock;
case D_WALK_NORETRY:
retry = false;
break;
case D_WALK_SKIP:
spin_unlock(&dentry->d_lock);
continue;
}
if (!list_empty(&dentry->d_subdirs)) {
spin_unlock(&this_parent->d_lock);
spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
this_parent = dentry;
spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
goto repeat;
}
spin_unlock(&dentry->d_lock);
}
/*
* All done at this level ... ascend and resume the search.
*/
rcu_read_lock();
ascend:
if (this_parent != parent) {
struct dentry *child = this_parent;
this_parent = child->d_parent;
spin_unlock(&child->d_lock);
spin_lock(&this_parent->d_lock);
/* might go back up the wrong parent if we have had a rename. */
if (need_seqretry(&rename_lock, seq))
goto rename_retry;
/* go into the first sibling still alive */
do {
next = child->d_child.next;
if (next == &this_parent->d_subdirs)
goto ascend;
child = list_entry(next, struct dentry, d_child);
} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
rcu_read_unlock();
goto resume;
}
if (need_seqretry(&rename_lock, seq))
goto rename_retry;
rcu_read_unlock();
if (finish)
finish(data);
out_unlock:
spin_unlock(&this_parent->d_lock);
done_seqretry(&rename_lock, seq);
return;
rename_retry:
spin_unlock(&this_parent->d_lock);
rcu_read_unlock();
BUG_ON(seq & 1);
if (!retry)
return;
seq = 1;
goto again;
}
|
static void d_walk(struct dentry *parent, void *data,
enum d_walk_ret (*enter)(void *, struct dentry *),
void (*finish)(void *))
{
struct dentry *this_parent;
struct list_head *next;
unsigned seq = 0;
enum d_walk_ret ret;
bool retry = true;
again:
read_seqbegin_or_lock(&rename_lock, &seq);
this_parent = parent;
spin_lock(&this_parent->d_lock);
ret = enter(data, this_parent);
switch (ret) {
case D_WALK_CONTINUE:
break;
case D_WALK_QUIT:
case D_WALK_SKIP:
goto out_unlock;
case D_WALK_NORETRY:
retry = false;
break;
}
repeat:
next = this_parent->d_subdirs.next;
resume:
while (next != &this_parent->d_subdirs) {
struct list_head *tmp = next;
struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
next = tmp->next;
if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
continue;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
ret = enter(data, dentry);
switch (ret) {
case D_WALK_CONTINUE:
break;
case D_WALK_QUIT:
spin_unlock(&dentry->d_lock);
goto out_unlock;
case D_WALK_NORETRY:
retry = false;
break;
case D_WALK_SKIP:
spin_unlock(&dentry->d_lock);
continue;
}
if (!list_empty(&dentry->d_subdirs)) {
spin_unlock(&this_parent->d_lock);
spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
this_parent = dentry;
spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
goto repeat;
}
spin_unlock(&dentry->d_lock);
}
/*
* All done at this level ... ascend and resume the search.
*/
rcu_read_lock();
ascend:
if (this_parent != parent) {
struct dentry *child = this_parent;
this_parent = child->d_parent;
spin_unlock(&child->d_lock);
spin_lock(&this_parent->d_lock);
/* might go back up the wrong parent if we have had a rename. */
if (need_seqretry(&rename_lock, seq))
goto rename_retry;
/* go into the first sibling still alive */
do {
next = child->d_child.next;
if (next == &this_parent->d_subdirs)
goto ascend;
child = list_entry(next, struct dentry, d_child);
} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
rcu_read_unlock();
goto resume;
}
if (need_seqretry(&rename_lock, seq))
goto rename_retry;
rcu_read_unlock();
if (finish)
finish(data);
out_unlock:
spin_unlock(&this_parent->d_lock);
done_seqretry(&rename_lock, seq);
return;
rename_retry:
spin_unlock(&this_parent->d_lock);
rcu_read_unlock();
BUG_ON(seq & 1);
if (!retry)
return;
seq = 1;
goto again;
}
|
C
|
linux
| 0 |
CVE-2018-12904
|
https://www.cvedetails.com/cve/CVE-2018-12904/
| null |
https://github.com/torvalds/linux/commit/727ba748e110b4de50d142edca9d6a9b7e6111d8
|
727ba748e110b4de50d142edca9d6a9b7e6111d8
|
kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: stable@vger.kernel.org
Signed-off-by: Felix Wilhelm <fwilhelm@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
{
/*
* When running L2, updating RVI is only relevant when
* vmcs12 virtual-interrupt-delivery enabled.
* However, it can be enabled only when L1 also
* intercepts external-interrupts and in that case
* we should not update vmcs02 RVI but instead intercept
* interrupt. Therefore, do nothing when running L2.
*/
if (!is_guest_mode(vcpu))
vmx_set_rvi(max_irr);
}
|
static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
{
/*
* When running L2, updating RVI is only relevant when
* vmcs12 virtual-interrupt-delivery enabled.
* However, it can be enabled only when L1 also
* intercepts external-interrupts and in that case
* we should not update vmcs02 RVI but instead intercept
* interrupt. Therefore, do nothing when running L2.
*/
if (!is_guest_mode(vcpu))
vmx_set_rvi(max_irr);
}
|
C
|
linux
| 0 |
CVE-2018-11590
|
https://www.cvedetails.com/cve/CVE-2018-11590/
|
CWE-190
|
https://github.com/espruino/Espruino/commit/a0d7f432abee692402c00e8b615ff5982dde9780
|
a0d7f432abee692402c00e8b615ff5982dde9780
|
Fix stack size detection on Linux (fix #1427)
|
NO_INLINE void jsError_flash(const char *fmt, ...) {
size_t len = flash_strlen(fmt);
char buff[len+1];
flash_strncpy(buff, fmt, len+1);
jsiConsoleRemoveInputLine();
jsiConsolePrint("ERROR: ");
va_list argp;
va_start(argp, fmt);
vcbprintf((vcbprintf_callback)jsiConsolePrintString,0, buff, argp);
va_end(argp);
jsiConsolePrint("\n");
}
|
NO_INLINE void jsError_flash(const char *fmt, ...) {
size_t len = flash_strlen(fmt);
char buff[len+1];
flash_strncpy(buff, fmt, len+1);
jsiConsoleRemoveInputLine();
jsiConsolePrint("ERROR: ");
va_list argp;
va_start(argp, fmt);
vcbprintf((vcbprintf_callback)jsiConsolePrintString,0, buff, argp);
va_end(argp);
jsiConsolePrint("\n");
}
|
C
|
Espruino
| 0 |
CVE-2019-6978
|
https://www.cvedetails.com/cve/CVE-2019-6978/
|
CWE-415
|
https://github.com/php/php-src/commit/089f7c0bc28d399b0420aa6ef058e4c1c120b2ae
|
089f7c0bc28d399b0420aa6ef058e4c1c120b2ae
|
Sync with upstream
Even though libgd/libgd#492 is not a relevant bug fix for PHP, since
the binding doesn't use the `gdImage*Ptr()` functions at all, we're
porting the fix to stay in sync here.
|
gdImagePtr gdImageCreateFromJpeg (FILE * inFile)
{
return gdImageCreateFromJpegEx(inFile, 1);
}
|
gdImagePtr gdImageCreateFromJpeg (FILE * inFile)
{
return gdImageCreateFromJpegEx(inFile, 1);
}
|
C
|
php-src
| 0 |
CVE-2016-3751
|
https://www.cvedetails.com/cve/CVE-2016-3751/
| null |
https://android.googlesource.com/platform/external/libpng/+/9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
|
gp_ag16(Pixel *p, png_const_voidp pb)
{
png_const_uint_16p pp = voidcast(png_const_uint_16p, pb);
p->r = p->g = p->b = pp[1];
p->a = pp[0];
}
|
gp_ag16(Pixel *p, png_const_voidp pb)
{
png_const_uint_16p pp = voidcast(png_const_uint_16p, pb);
p->r = p->g = p->b = pp[1];
p->a = pp[0];
}
|
C
|
Android
| 0 |
CVE-2015-6769
|
https://www.cvedetails.com/cve/CVE-2015-6769/
|
CWE-264
|
https://github.com/chromium/chromium/commit/33c5e0a9db05dbd2f7793c23ac23b7aa6a556c05
|
33c5e0a9db05dbd2f7793c23ac23b7aa6a556c05
|
Fixing names of password_manager kEnableManualFallbacksFilling feature.
Fixing names of password_manager kEnableManualFallbacksFilling feature
as per the naming convention.
Bug: 785953
Change-Id: I4a4baa1649fe9f02c3783a5e4c40bc75e717cc03
Reviewed-on: https://chromium-review.googlesource.com/900566
Reviewed-by: Vaclav Brozek <vabr@chromium.org>
Commit-Queue: NIKHIL SAHNI <nikhil.sahni@samsung.com>
Cr-Commit-Position: refs/heads/master@{#534923}
|
bool PasswordAutofillManager::RemoveSuggestion(const base::string16& value,
int identifier) {
return false;
}
|
bool PasswordAutofillManager::RemoveSuggestion(const base::string16& value,
int identifier) {
return false;
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/327585cb0eab0859518643a2d00917081f7e7645
|
327585cb0eab0859518643a2d00917081f7e7645
|
2010-10-26 Kenneth Russell <kbr@google.com>
Reviewed by Andreas Kling.
Valgrind failure in GraphicsContext3DInternal::reshape
https://bugs.webkit.org/show_bug.cgi?id=48284
* src/WebGraphicsContext3DDefaultImpl.cpp:
(WebKit::WebGraphicsContext3DDefaultImpl::WebGraphicsContext3DDefaultImpl):
git-svn-id: svn://svn.chromium.org/blink/trunk@70534 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
bool WebGraphicsContext3DDefaultImpl::isErrorGeneratedOnOutOfBoundsAccesses()
{
return false;
}
|
bool WebGraphicsContext3DDefaultImpl::isErrorGeneratedOnOutOfBoundsAccesses()
{
return false;
}
|
C
|
Chrome
| 0 |
CVE-2017-11144
|
https://www.cvedetails.com/cve/CVE-2017-11144/
|
CWE-754
|
https://git.php.net/?p=php-src.git;a=commit;h=73cabfedf519298e1a11192699f44d53c529315e
|
73cabfedf519298e1a11192699f44d53c529315e
| null |
PHP_FUNCTION(openssl_pkcs7_verify)
{
X509_STORE * store = NULL;
zval * cainfo = NULL;
STACK_OF(X509) *signers= NULL;
STACK_OF(X509) *others = NULL;
PKCS7 * p7 = NULL;
BIO * in = NULL, * datain = NULL, * dataout = NULL;
zend_long flags = 0;
char * filename;
size_t filename_len;
char * extracerts = NULL;
size_t extracerts_len = 0;
char * signersfilename = NULL;
size_t signersfilename_len = 0;
char * datafilename = NULL;
size_t datafilename_len = 0;
RETVAL_LONG(-1);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "pl|papp", &filename, &filename_len,
&flags, &signersfilename, &signersfilename_len, &cainfo,
&extracerts, &extracerts_len, &datafilename, &datafilename_len) == FAILURE) {
return;
}
if (extracerts) {
others = load_all_certs_from_file(extracerts);
if (others == NULL) {
goto clean_exit;
}
}
flags = flags & ~PKCS7_DETACHED;
store = setup_verify(cainfo);
if (!store) {
goto clean_exit;
}
if (php_openssl_open_base_dir_chk(filename)) {
goto clean_exit;
}
in = BIO_new_file(filename, (flags & PKCS7_BINARY) ? "rb" : "r");
if (in == NULL) {
goto clean_exit;
}
p7 = SMIME_read_PKCS7(in, &datain);
if (p7 == NULL) {
#if DEBUG_SMIME
zend_printf("SMIME_read_PKCS7 failed\n");
#endif
goto clean_exit;
}
if (datafilename) {
if (php_openssl_open_base_dir_chk(datafilename)) {
goto clean_exit;
}
dataout = BIO_new_file(datafilename, "w");
if (dataout == NULL) {
goto clean_exit;
}
}
#if DEBUG_SMIME
zend_printf("Calling PKCS7 verify\n");
#endif
if (PKCS7_verify(p7, others, store, datain, dataout, (int)flags)) {
RETVAL_TRUE;
if (signersfilename) {
BIO *certout;
if (php_openssl_open_base_dir_chk(signersfilename)) {
goto clean_exit;
}
certout = BIO_new_file(signersfilename, "w");
if (certout) {
int i;
signers = PKCS7_get0_signers(p7, NULL, (int)flags);
for(i = 0; i < sk_X509_num(signers); i++) {
PEM_write_bio_X509(certout, sk_X509_value(signers, i));
}
BIO_free(certout);
sk_X509_free(signers);
} else {
php_error_docref(NULL, E_WARNING, "signature OK, but cannot open %s for writing", signersfilename);
RETVAL_LONG(-1);
}
}
goto clean_exit;
} else {
RETVAL_FALSE;
}
clean_exit:
X509_STORE_free(store);
BIO_free(datain);
BIO_free(in);
BIO_free(dataout);
PKCS7_free(p7);
sk_X509_free(others);
}
|
PHP_FUNCTION(openssl_pkcs7_verify)
{
X509_STORE * store = NULL;
zval * cainfo = NULL;
STACK_OF(X509) *signers= NULL;
STACK_OF(X509) *others = NULL;
PKCS7 * p7 = NULL;
BIO * in = NULL, * datain = NULL, * dataout = NULL;
zend_long flags = 0;
char * filename;
size_t filename_len;
char * extracerts = NULL;
size_t extracerts_len = 0;
char * signersfilename = NULL;
size_t signersfilename_len = 0;
char * datafilename = NULL;
size_t datafilename_len = 0;
RETVAL_LONG(-1);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "pl|papp", &filename, &filename_len,
&flags, &signersfilename, &signersfilename_len, &cainfo,
&extracerts, &extracerts_len, &datafilename, &datafilename_len) == FAILURE) {
return;
}
if (extracerts) {
others = load_all_certs_from_file(extracerts);
if (others == NULL) {
goto clean_exit;
}
}
flags = flags & ~PKCS7_DETACHED;
store = setup_verify(cainfo);
if (!store) {
goto clean_exit;
}
if (php_openssl_open_base_dir_chk(filename)) {
goto clean_exit;
}
in = BIO_new_file(filename, (flags & PKCS7_BINARY) ? "rb" : "r");
if (in == NULL) {
goto clean_exit;
}
p7 = SMIME_read_PKCS7(in, &datain);
if (p7 == NULL) {
#if DEBUG_SMIME
zend_printf("SMIME_read_PKCS7 failed\n");
#endif
goto clean_exit;
}
if (datafilename) {
if (php_openssl_open_base_dir_chk(datafilename)) {
goto clean_exit;
}
dataout = BIO_new_file(datafilename, "w");
if (dataout == NULL) {
goto clean_exit;
}
}
#if DEBUG_SMIME
zend_printf("Calling PKCS7 verify\n");
#endif
if (PKCS7_verify(p7, others, store, datain, dataout, (int)flags)) {
RETVAL_TRUE;
if (signersfilename) {
BIO *certout;
if (php_openssl_open_base_dir_chk(signersfilename)) {
goto clean_exit;
}
certout = BIO_new_file(signersfilename, "w");
if (certout) {
int i;
signers = PKCS7_get0_signers(p7, NULL, (int)flags);
for(i = 0; i < sk_X509_num(signers); i++) {
PEM_write_bio_X509(certout, sk_X509_value(signers, i));
}
BIO_free(certout);
sk_X509_free(signers);
} else {
php_error_docref(NULL, E_WARNING, "signature OK, but cannot open %s for writing", signersfilename);
RETVAL_LONG(-1);
}
}
goto clean_exit;
} else {
RETVAL_FALSE;
}
clean_exit:
X509_STORE_free(store);
BIO_free(datain);
BIO_free(in);
BIO_free(dataout);
PKCS7_free(p7);
sk_X509_free(others);
}
|
C
|
php
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/ec14f31eca3a51f665432973552ee575635132b3
|
ec14f31eca3a51f665432973552ee575635132b3
|
[EFL] Change the behavior of ewk_view_scale_set.
https://bugs.webkit.org/show_bug.cgi?id=70078
Reviewed by Eric Seidel.
Remove center point basis zoom alignment from ewk_view_scale_set to call
Page::setPageScaleFactor without any adjustment.
* ewk/ewk_view.cpp:
(ewk_view_scale_set):
* ewk/ewk_view.h:
git-svn-id: svn://svn.chromium.org/blink/trunk@103288 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void ewk_view_popup_new(Evas_Object* ewkView, WebCore::PopupMenuClient* client, int selected, const WebCore::IntRect& rect)
{
INF("ewkView=%p", ewkView);
EWK_VIEW_SD_GET_OR_RETURN(ewkView, smartData);
EWK_VIEW_PRIV_GET_OR_RETURN(smartData, priv);
if (priv->popup.menuClient)
ewk_view_popup_destroy(ewkView);
priv->popup.menuClient = client;
const int size = client->listSize();
for (int i = 0; i < size; ++i) {
Ewk_Menu_Item* item = static_cast<Ewk_Menu_Item*>(malloc(sizeof(*item)));
if (client->itemIsSeparator(i))
item->type = EWK_MENU_SEPARATOR;
else if (client->itemIsLabel(i))
item->type = EWK_MENU_GROUP;
else
item->type = EWK_MENU_OPTION;
item->text = eina_stringshare_add(client->itemText(i).utf8().data());
priv->popup.menu.items = eina_list_append(priv->popup.menu.items, item);
}
priv->popup.menu.x = rect.x();
priv->popup.menu.y = rect.y();
priv->popup.menu.width = rect.width();
priv->popup.menu.height = rect.height();
evas_object_smart_callback_call(ewkView, "popup,create", &priv->popup.menu);
}
|
void ewk_view_popup_new(Evas_Object* ewkView, WebCore::PopupMenuClient* client, int selected, const WebCore::IntRect& rect)
{
INF("ewkView=%p", ewkView);
EWK_VIEW_SD_GET_OR_RETURN(ewkView, smartData);
EWK_VIEW_PRIV_GET_OR_RETURN(smartData, priv);
if (priv->popup.menuClient)
ewk_view_popup_destroy(ewkView);
priv->popup.menuClient = client;
const int size = client->listSize();
for (int i = 0; i < size; ++i) {
Ewk_Menu_Item* item = static_cast<Ewk_Menu_Item*>(malloc(sizeof(*item)));
if (client->itemIsSeparator(i))
item->type = EWK_MENU_SEPARATOR;
else if (client->itemIsLabel(i))
item->type = EWK_MENU_GROUP;
else
item->type = EWK_MENU_OPTION;
item->text = eina_stringshare_add(client->itemText(i).utf8().data());
priv->popup.menu.items = eina_list_append(priv->popup.menu.items, item);
}
priv->popup.menu.x = rect.x();
priv->popup.menu.y = rect.y();
priv->popup.menu.width = rect.width();
priv->popup.menu.height = rect.height();
evas_object_smart_callback_call(ewkView, "popup,create", &priv->popup.menu);
}
|
C
|
Chrome
| 0 |
CVE-2013-2867
|
https://www.cvedetails.com/cve/CVE-2013-2867/
| null |
https://github.com/chromium/chromium/commit/d358f57009b85fb7440208afa5ba87636b491889
|
d358f57009b85fb7440208afa5ba87636b491889
|
Refactor to support default Bluetooth pairing delegate
In order to support a default pairing delegate we need to move the agent
service provider delegate implementation from BluetoothDevice to
BluetoothAdapter while retaining the existing API.
BUG=338492
TEST=device_unittests, unit_tests, browser_tests
Review URL: https://codereview.chromium.org/148293003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@252216 0039d316-1c4b-4281-b951-d872f2087c98
|
virtual void TearDown() {
adapter_ = NULL;
DBusThreadManager::Shutdown();
}
|
virtual void TearDown() {
adapter_ = NULL;
DBusThreadManager::Shutdown();
}
|
C
|
Chrome
| 0 |
CVE-2013-2007
|
https://www.cvedetails.com/cve/CVE-2013-2007/
|
CWE-264
|
https://git.qemu.org/?p=qemu.git;a=commit;h=c689b4f1bac352dcfd6ecb9a1d45337de0f1de67
|
c689b4f1bac352dcfd6ecb9a1d45337de0f1de67
| null |
static gboolean channel_event_cb(GIOCondition condition, gpointer data)
{
GAState *s = data;
gchar buf[QGA_READ_COUNT_DEFAULT+1];
gsize count;
GError *err = NULL;
GIOStatus status = ga_channel_read(s->channel, buf, QGA_READ_COUNT_DEFAULT, &count);
if (err != NULL) {
g_warning("error reading channel: %s", err->message);
g_error_free(err);
return false;
}
switch (status) {
case G_IO_STATUS_ERROR:
g_warning("error reading channel");
return false;
case G_IO_STATUS_NORMAL:
buf[count] = 0;
g_debug("read data, count: %d, data: %s", (int)count, buf);
json_message_parser_feed(&s->parser, (char *)buf, (int)count);
break;
case G_IO_STATUS_EOF:
g_debug("received EOF");
if (!s->virtio) {
return false;
}
/* fall through */
case G_IO_STATUS_AGAIN:
/* virtio causes us to spin here when no process is attached to
* host-side chardev. sleep a bit to mitigate this
*/
if (s->virtio) {
usleep(100*1000);
}
return true;
default:
g_warning("unknown channel read status, closing");
return false;
}
return true;
}
|
static gboolean channel_event_cb(GIOCondition condition, gpointer data)
{
GAState *s = data;
gchar buf[QGA_READ_COUNT_DEFAULT+1];
gsize count;
GError *err = NULL;
GIOStatus status = ga_channel_read(s->channel, buf, QGA_READ_COUNT_DEFAULT, &count);
if (err != NULL) {
g_warning("error reading channel: %s", err->message);
g_error_free(err);
return false;
}
switch (status) {
case G_IO_STATUS_ERROR:
g_warning("error reading channel");
return false;
case G_IO_STATUS_NORMAL:
buf[count] = 0;
g_debug("read data, count: %d, data: %s", (int)count, buf);
json_message_parser_feed(&s->parser, (char *)buf, (int)count);
break;
case G_IO_STATUS_EOF:
g_debug("received EOF");
if (!s->virtio) {
return false;
}
/* fall through */
case G_IO_STATUS_AGAIN:
/* virtio causes us to spin here when no process is attached to
* host-side chardev. sleep a bit to mitigate this
*/
if (s->virtio) {
usleep(100*1000);
}
return true;
default:
g_warning("unknown channel read status, closing");
return false;
}
return true;
}
|
C
|
qemu
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/1161a49d663dd395bd639549c2dfe7324f847938
|
1161a49d663dd395bd639549c2dfe7324f847938
|
Don't populate URL data in WebDropData when dragging files.
This is considered a potential security issue as well, since it leaks
filesystem paths.
BUG=332579
Review URL: https://codereview.chromium.org/135633002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@244538 0039d316-1c4b-4281-b951-d872f2087c98
|
int TabStrip::GetSizeNeededForTabs(const std::vector<Tab*>& tabs) {
int width = 0;
for (size_t i = 0; i < tabs.size(); ++i) {
Tab* tab = tabs[i];
width += tab->width();
if (i > 0 && tab->data().mini != tabs[i - 1]->data().mini)
width += kMiniToNonMiniGap;
}
if (tabs.size() > 0)
width += tab_h_offset() * static_cast<int>(tabs.size() - 1);
return width;
}
|
int TabStrip::GetSizeNeededForTabs(const std::vector<Tab*>& tabs) {
int width = 0;
for (size_t i = 0; i < tabs.size(); ++i) {
Tab* tab = tabs[i];
width += tab->width();
if (i > 0 && tab->data().mini != tabs[i - 1]->data().mini)
width += kMiniToNonMiniGap;
}
if (tabs.size() > 0)
width += tab_h_offset() * static_cast<int>(tabs.size() - 1);
return width;
}
|
C
|
Chrome
| 0 |
CVE-2016-1665
|
https://www.cvedetails.com/cve/CVE-2016-1665/
|
CWE-20
|
https://github.com/chromium/chromium/commit/282f53ffdc3b1902da86f6a0791af736837efbf8
|
282f53ffdc3b1902da86f6a0791af736837efbf8
|
[signin] Add metrics to track the source for refresh token updated events
This CL add a source for update and revoke credentials operations. It then
surfaces the source in the chrome://signin-internals page.
This CL also records the following histograms that track refresh token events:
* Signin.RefreshTokenUpdated.ToValidToken.Source
* Signin.RefreshTokenUpdated.ToInvalidToken.Source
* Signin.RefreshTokenRevoked.Source
These histograms are needed to validate the assumptions of how often tokens
are revoked by the browser and the sources for the token revocations.
Bug: 896182
Change-Id: I2fcab80ee8e5699708e695bc3289fa6d34859a90
Reviewed-on: https://chromium-review.googlesource.com/c/1286464
Reviewed-by: Jochen Eisinger <jochen@chromium.org>
Reviewed-by: David Roger <droger@chromium.org>
Reviewed-by: Ilya Sherman <isherman@chromium.org>
Commit-Queue: Mihai Sardarescu <msarda@chromium.org>
Cr-Commit-Position: refs/heads/master@{#606181}
|
void ProfileChooserView::ShowViewFromMode(profiles::BubbleViewMode mode) {
if (SigninViewController::ShouldShowSigninForMode(mode)) {
Hide();
browser_->signin_view_controller()->ShowSignin(mode, browser_,
access_point_);
} else {
ShowView(mode, avatar_menu_.get());
}
}
|
void ProfileChooserView::ShowViewFromMode(profiles::BubbleViewMode mode) {
if (SigninViewController::ShouldShowSigninForMode(mode)) {
Hide();
browser_->signin_view_controller()->ShowSignin(mode, browser_,
access_point_);
} else {
ShowView(mode, avatar_menu_.get());
}
}
|
C
|
Chrome
| 0 |
CVE-2016-2107
|
https://www.cvedetails.com/cve/CVE-2016-2107/
|
CWE-310
|
https://git.openssl.org/?p=openssl.git;a=commit;h=68595c0c2886e7942a14f98c17a55a88afb6c292
|
68595c0c2886e7942a14f98c17a55a88afb6c292
| null |
static size_t tls1_1_multi_block_encrypt(EVP_AES_HMAC_SHA256 *key,
unsigned char *out,
const unsigned char *inp,
size_t inp_len, int n4x)
{ /* n4x is 1 or 2 */
HASH_DESC hash_d[8], edges[8];
CIPH_DESC ciph_d[8];
unsigned char storage[sizeof(SHA256_MB_CTX) + 32];
union {
u64 q[16];
u32 d[32];
u8 c[128];
} blocks[8];
SHA256_MB_CTX *ctx;
unsigned int frag, last, packlen, i, x4 = 4 * n4x, minblocks, processed =
0;
size_t ret = 0;
u8 *IVs;
# if defined(BSWAP8)
u64 seqnum;
# endif
/* ask for IVs in bulk */
if (RAND_bytes((IVs = blocks[0].c), 16 * x4) <= 0)
return 0;
/* align */
ctx = (SHA256_MB_CTX *) (storage + 32 - ((size_t)storage % 32));
frag = (unsigned int)inp_len >> (1 + n4x);
last = (unsigned int)inp_len + frag - (frag << (1 + n4x));
if (last > frag && ((last + 13 + 9) % 64) < (x4 - 1)) {
frag++;
last -= x4 - 1;
}
packlen = 5 + 16 + ((frag + 32 + 16) & -16);
/* populate descriptors with pointers and IVs */
hash_d[0].ptr = inp;
ciph_d[0].inp = inp;
/* 5+16 is place for header and explicit IV */
ciph_d[0].out = out + 5 + 16;
memcpy(ciph_d[0].out - 16, IVs, 16);
memcpy(ciph_d[0].iv, IVs, 16);
IVs += 16;
for (i = 1; i < x4; i++) {
ciph_d[i].inp = hash_d[i].ptr = hash_d[i - 1].ptr + frag;
ciph_d[i].out = ciph_d[i - 1].out + packlen;
memcpy(ciph_d[i].out - 16, IVs, 16);
memcpy(ciph_d[i].iv, IVs, 16);
IVs += 16;
}
# if defined(BSWAP8)
memcpy(blocks[0].c, key->md.data, 8);
seqnum = BSWAP8(blocks[0].q[0]);
# endif
for (i = 0; i < x4; i++) {
unsigned int len = (i == (x4 - 1) ? last : frag);
# if !defined(BSWAP8)
unsigned int carry, j;
# endif
ctx->A[i] = key->md.h[0];
ctx->B[i] = key->md.h[1];
ctx->C[i] = key->md.h[2];
ctx->D[i] = key->md.h[3];
ctx->E[i] = key->md.h[4];
ctx->F[i] = key->md.h[5];
ctx->G[i] = key->md.h[6];
ctx->H[i] = key->md.h[7];
/* fix seqnum */
# if defined(BSWAP8)
blocks[i].q[0] = BSWAP8(seqnum + i);
# else
for (carry = i, j = 8; j--;) {
blocks[i].c[j] = ((u8 *)key->md.data)[j] + carry;
carry = (blocks[i].c[j] - carry) >> (sizeof(carry) * 8 - 1);
}
# endif
blocks[i].c[8] = ((u8 *)key->md.data)[8];
blocks[i].c[9] = ((u8 *)key->md.data)[9];
blocks[i].c[10] = ((u8 *)key->md.data)[10];
/* fix length */
blocks[i].c[11] = (u8)(len >> 8);
blocks[i].c[12] = (u8)(len);
memcpy(blocks[i].c + 13, hash_d[i].ptr, 64 - 13);
hash_d[i].ptr += 64 - 13;
hash_d[i].blocks = (len - (64 - 13)) / 64;
edges[i].ptr = blocks[i].c;
edges[i].blocks = 1;
}
/* hash 13-byte headers and first 64-13 bytes of inputs */
sha256_multi_block(ctx, edges, n4x);
/* hash bulk inputs */
# define MAXCHUNKSIZE 2048
# if MAXCHUNKSIZE%64
# error "MAXCHUNKSIZE is not divisible by 64"
# elif MAXCHUNKSIZE
/*
* goal is to minimize pressure on L1 cache by moving in shorter steps,
* so that hashed data is still in the cache by the time we encrypt it
*/
minblocks = ((frag <= last ? frag : last) - (64 - 13)) / 64;
if (minblocks > MAXCHUNKSIZE / 64) {
for (i = 0; i < x4; i++) {
edges[i].ptr = hash_d[i].ptr;
edges[i].blocks = MAXCHUNKSIZE / 64;
ciph_d[i].blocks = MAXCHUNKSIZE / 16;
}
do {
sha256_multi_block(ctx, edges, n4x);
aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x);
for (i = 0; i < x4; i++) {
edges[i].ptr = hash_d[i].ptr += MAXCHUNKSIZE;
hash_d[i].blocks -= MAXCHUNKSIZE / 64;
edges[i].blocks = MAXCHUNKSIZE / 64;
ciph_d[i].inp += MAXCHUNKSIZE;
ciph_d[i].out += MAXCHUNKSIZE;
ciph_d[i].blocks = MAXCHUNKSIZE / 16;
memcpy(ciph_d[i].iv, ciph_d[i].out - 16, 16);
}
processed += MAXCHUNKSIZE;
minblocks -= MAXCHUNKSIZE / 64;
} while (minblocks > MAXCHUNKSIZE / 64);
}
# endif
# undef MAXCHUNKSIZE
sha256_multi_block(ctx, hash_d, n4x);
memset(blocks, 0, sizeof(blocks));
for (i = 0; i < x4; i++) {
unsigned int len = (i == (x4 - 1) ? last : frag),
off = hash_d[i].blocks * 64;
const unsigned char *ptr = hash_d[i].ptr + off;
off = (len - processed) - (64 - 13) - off; /* remainder actually */
memcpy(blocks[i].c, ptr, off);
blocks[i].c[off] = 0x80;
len += 64 + 13; /* 64 is HMAC header */
len *= 8; /* convert to bits */
if (off < (64 - 8)) {
# ifdef BSWAP4
blocks[i].d[15] = BSWAP4(len);
# else
PUTU32(blocks[i].c + 60, len);
# endif
edges[i].blocks = 1;
} else {
# ifdef BSWAP4
blocks[i].d[31] = BSWAP4(len);
# else
PUTU32(blocks[i].c + 124, len);
# endif
edges[i].blocks = 2;
}
edges[i].ptr = blocks[i].c;
}
/* hash input tails and finalize */
sha256_multi_block(ctx, edges, n4x);
memset(blocks, 0, sizeof(blocks));
for (i = 0; i < x4; i++) {
# ifdef BSWAP4
blocks[i].d[0] = BSWAP4(ctx->A[i]);
ctx->A[i] = key->tail.h[0];
blocks[i].d[1] = BSWAP4(ctx->B[i]);
ctx->B[i] = key->tail.h[1];
blocks[i].d[2] = BSWAP4(ctx->C[i]);
ctx->C[i] = key->tail.h[2];
blocks[i].d[3] = BSWAP4(ctx->D[i]);
ctx->D[i] = key->tail.h[3];
blocks[i].d[4] = BSWAP4(ctx->E[i]);
ctx->E[i] = key->tail.h[4];
blocks[i].d[5] = BSWAP4(ctx->F[i]);
ctx->F[i] = key->tail.h[5];
blocks[i].d[6] = BSWAP4(ctx->G[i]);
ctx->G[i] = key->tail.h[6];
blocks[i].d[7] = BSWAP4(ctx->H[i]);
ctx->H[i] = key->tail.h[7];
blocks[i].c[32] = 0x80;
blocks[i].d[15] = BSWAP4((64 + 32) * 8);
# else
PUTU32(blocks[i].c + 0, ctx->A[i]);
ctx->A[i] = key->tail.h[0];
PUTU32(blocks[i].c + 4, ctx->B[i]);
ctx->B[i] = key->tail.h[1];
PUTU32(blocks[i].c + 8, ctx->C[i]);
ctx->C[i] = key->tail.h[2];
PUTU32(blocks[i].c + 12, ctx->D[i]);
ctx->D[i] = key->tail.h[3];
PUTU32(blocks[i].c + 16, ctx->E[i]);
ctx->E[i] = key->tail.h[4];
PUTU32(blocks[i].c + 20, ctx->F[i]);
ctx->F[i] = key->tail.h[5];
PUTU32(blocks[i].c + 24, ctx->G[i]);
ctx->G[i] = key->tail.h[6];
PUTU32(blocks[i].c + 28, ctx->H[i]);
ctx->H[i] = key->tail.h[7];
blocks[i].c[32] = 0x80;
PUTU32(blocks[i].c + 60, (64 + 32) * 8);
# endif
edges[i].ptr = blocks[i].c;
edges[i].blocks = 1;
}
/* finalize MACs */
sha256_multi_block(ctx, edges, n4x);
for (i = 0; i < x4; i++) {
unsigned int len = (i == (x4 - 1) ? last : frag), pad, j;
unsigned char *out0 = out;
memcpy(ciph_d[i].out, ciph_d[i].inp, len - processed);
ciph_d[i].inp = ciph_d[i].out;
out += 5 + 16 + len;
/* write MAC */
PUTU32(out + 0, ctx->A[i]);
PUTU32(out + 4, ctx->B[i]);
PUTU32(out + 8, ctx->C[i]);
PUTU32(out + 12, ctx->D[i]);
PUTU32(out + 16, ctx->E[i]);
PUTU32(out + 20, ctx->F[i]);
PUTU32(out + 24, ctx->G[i]);
PUTU32(out + 28, ctx->H[i]);
out += 32;
len += 32;
/* pad */
pad = 15 - len % 16;
for (j = 0; j <= pad; j++)
*(out++) = pad;
len += pad + 1;
ciph_d[i].blocks = (len - processed) / 16;
len += 16; /* account for explicit iv */
/* arrange header */
out0[0] = ((u8 *)key->md.data)[8];
out0[1] = ((u8 *)key->md.data)[9];
out0[2] = ((u8 *)key->md.data)[10];
out0[3] = (u8)(len >> 8);
out0[4] = (u8)(len);
ret += len + 5;
inp += frag;
}
aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x);
OPENSSL_cleanse(blocks, sizeof(blocks));
OPENSSL_cleanse(ctx, sizeof(*ctx));
return ret;
}
|
static size_t tls1_1_multi_block_encrypt(EVP_AES_HMAC_SHA256 *key,
unsigned char *out,
const unsigned char *inp,
size_t inp_len, int n4x)
{ /* n4x is 1 or 2 */
HASH_DESC hash_d[8], edges[8];
CIPH_DESC ciph_d[8];
unsigned char storage[sizeof(SHA256_MB_CTX) + 32];
union {
u64 q[16];
u32 d[32];
u8 c[128];
} blocks[8];
SHA256_MB_CTX *ctx;
unsigned int frag, last, packlen, i, x4 = 4 * n4x, minblocks, processed =
0;
size_t ret = 0;
u8 *IVs;
# if defined(BSWAP8)
u64 seqnum;
# endif
/* ask for IVs in bulk */
if (RAND_bytes((IVs = blocks[0].c), 16 * x4) <= 0)
return 0;
/* align */
ctx = (SHA256_MB_CTX *) (storage + 32 - ((size_t)storage % 32));
frag = (unsigned int)inp_len >> (1 + n4x);
last = (unsigned int)inp_len + frag - (frag << (1 + n4x));
if (last > frag && ((last + 13 + 9) % 64) < (x4 - 1)) {
frag++;
last -= x4 - 1;
}
packlen = 5 + 16 + ((frag + 32 + 16) & -16);
/* populate descriptors with pointers and IVs */
hash_d[0].ptr = inp;
ciph_d[0].inp = inp;
/* 5+16 is place for header and explicit IV */
ciph_d[0].out = out + 5 + 16;
memcpy(ciph_d[0].out - 16, IVs, 16);
memcpy(ciph_d[0].iv, IVs, 16);
IVs += 16;
for (i = 1; i < x4; i++) {
ciph_d[i].inp = hash_d[i].ptr = hash_d[i - 1].ptr + frag;
ciph_d[i].out = ciph_d[i - 1].out + packlen;
memcpy(ciph_d[i].out - 16, IVs, 16);
memcpy(ciph_d[i].iv, IVs, 16);
IVs += 16;
}
# if defined(BSWAP8)
memcpy(blocks[0].c, key->md.data, 8);
seqnum = BSWAP8(blocks[0].q[0]);
# endif
for (i = 0; i < x4; i++) {
unsigned int len = (i == (x4 - 1) ? last : frag);
# if !defined(BSWAP8)
unsigned int carry, j;
# endif
ctx->A[i] = key->md.h[0];
ctx->B[i] = key->md.h[1];
ctx->C[i] = key->md.h[2];
ctx->D[i] = key->md.h[3];
ctx->E[i] = key->md.h[4];
ctx->F[i] = key->md.h[5];
ctx->G[i] = key->md.h[6];
ctx->H[i] = key->md.h[7];
/* fix seqnum */
# if defined(BSWAP8)
blocks[i].q[0] = BSWAP8(seqnum + i);
# else
for (carry = i, j = 8; j--;) {
blocks[i].c[j] = ((u8 *)key->md.data)[j] + carry;
carry = (blocks[i].c[j] - carry) >> (sizeof(carry) * 8 - 1);
}
# endif
blocks[i].c[8] = ((u8 *)key->md.data)[8];
blocks[i].c[9] = ((u8 *)key->md.data)[9];
blocks[i].c[10] = ((u8 *)key->md.data)[10];
/* fix length */
blocks[i].c[11] = (u8)(len >> 8);
blocks[i].c[12] = (u8)(len);
memcpy(blocks[i].c + 13, hash_d[i].ptr, 64 - 13);
hash_d[i].ptr += 64 - 13;
hash_d[i].blocks = (len - (64 - 13)) / 64;
edges[i].ptr = blocks[i].c;
edges[i].blocks = 1;
}
/* hash 13-byte headers and first 64-13 bytes of inputs */
sha256_multi_block(ctx, edges, n4x);
/* hash bulk inputs */
# define MAXCHUNKSIZE 2048
# if MAXCHUNKSIZE%64
# error "MAXCHUNKSIZE is not divisible by 64"
# elif MAXCHUNKSIZE
/*
* goal is to minimize pressure on L1 cache by moving in shorter steps,
* so that hashed data is still in the cache by the time we encrypt it
*/
minblocks = ((frag <= last ? frag : last) - (64 - 13)) / 64;
if (minblocks > MAXCHUNKSIZE / 64) {
for (i = 0; i < x4; i++) {
edges[i].ptr = hash_d[i].ptr;
edges[i].blocks = MAXCHUNKSIZE / 64;
ciph_d[i].blocks = MAXCHUNKSIZE / 16;
}
do {
sha256_multi_block(ctx, edges, n4x);
aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x);
for (i = 0; i < x4; i++) {
edges[i].ptr = hash_d[i].ptr += MAXCHUNKSIZE;
hash_d[i].blocks -= MAXCHUNKSIZE / 64;
edges[i].blocks = MAXCHUNKSIZE / 64;
ciph_d[i].inp += MAXCHUNKSIZE;
ciph_d[i].out += MAXCHUNKSIZE;
ciph_d[i].blocks = MAXCHUNKSIZE / 16;
memcpy(ciph_d[i].iv, ciph_d[i].out - 16, 16);
}
processed += MAXCHUNKSIZE;
minblocks -= MAXCHUNKSIZE / 64;
} while (minblocks > MAXCHUNKSIZE / 64);
}
# endif
# undef MAXCHUNKSIZE
sha256_multi_block(ctx, hash_d, n4x);
memset(blocks, 0, sizeof(blocks));
for (i = 0; i < x4; i++) {
unsigned int len = (i == (x4 - 1) ? last : frag),
off = hash_d[i].blocks * 64;
const unsigned char *ptr = hash_d[i].ptr + off;
off = (len - processed) - (64 - 13) - off; /* remainder actually */
memcpy(blocks[i].c, ptr, off);
blocks[i].c[off] = 0x80;
len += 64 + 13; /* 64 is HMAC header */
len *= 8; /* convert to bits */
if (off < (64 - 8)) {
# ifdef BSWAP4
blocks[i].d[15] = BSWAP4(len);
# else
PUTU32(blocks[i].c + 60, len);
# endif
edges[i].blocks = 1;
} else {
# ifdef BSWAP4
blocks[i].d[31] = BSWAP4(len);
# else
PUTU32(blocks[i].c + 124, len);
# endif
edges[i].blocks = 2;
}
edges[i].ptr = blocks[i].c;
}
/* hash input tails and finalize */
sha256_multi_block(ctx, edges, n4x);
memset(blocks, 0, sizeof(blocks));
for (i = 0; i < x4; i++) {
# ifdef BSWAP4
blocks[i].d[0] = BSWAP4(ctx->A[i]);
ctx->A[i] = key->tail.h[0];
blocks[i].d[1] = BSWAP4(ctx->B[i]);
ctx->B[i] = key->tail.h[1];
blocks[i].d[2] = BSWAP4(ctx->C[i]);
ctx->C[i] = key->tail.h[2];
blocks[i].d[3] = BSWAP4(ctx->D[i]);
ctx->D[i] = key->tail.h[3];
blocks[i].d[4] = BSWAP4(ctx->E[i]);
ctx->E[i] = key->tail.h[4];
blocks[i].d[5] = BSWAP4(ctx->F[i]);
ctx->F[i] = key->tail.h[5];
blocks[i].d[6] = BSWAP4(ctx->G[i]);
ctx->G[i] = key->tail.h[6];
blocks[i].d[7] = BSWAP4(ctx->H[i]);
ctx->H[i] = key->tail.h[7];
blocks[i].c[32] = 0x80;
blocks[i].d[15] = BSWAP4((64 + 32) * 8);
# else
PUTU32(blocks[i].c + 0, ctx->A[i]);
ctx->A[i] = key->tail.h[0];
PUTU32(blocks[i].c + 4, ctx->B[i]);
ctx->B[i] = key->tail.h[1];
PUTU32(blocks[i].c + 8, ctx->C[i]);
ctx->C[i] = key->tail.h[2];
PUTU32(blocks[i].c + 12, ctx->D[i]);
ctx->D[i] = key->tail.h[3];
PUTU32(blocks[i].c + 16, ctx->E[i]);
ctx->E[i] = key->tail.h[4];
PUTU32(blocks[i].c + 20, ctx->F[i]);
ctx->F[i] = key->tail.h[5];
PUTU32(blocks[i].c + 24, ctx->G[i]);
ctx->G[i] = key->tail.h[6];
PUTU32(blocks[i].c + 28, ctx->H[i]);
ctx->H[i] = key->tail.h[7];
blocks[i].c[32] = 0x80;
PUTU32(blocks[i].c + 60, (64 + 32) * 8);
# endif
edges[i].ptr = blocks[i].c;
edges[i].blocks = 1;
}
/* finalize MACs */
sha256_multi_block(ctx, edges, n4x);
for (i = 0; i < x4; i++) {
unsigned int len = (i == (x4 - 1) ? last : frag), pad, j;
unsigned char *out0 = out;
memcpy(ciph_d[i].out, ciph_d[i].inp, len - processed);
ciph_d[i].inp = ciph_d[i].out;
out += 5 + 16 + len;
/* write MAC */
PUTU32(out + 0, ctx->A[i]);
PUTU32(out + 4, ctx->B[i]);
PUTU32(out + 8, ctx->C[i]);
PUTU32(out + 12, ctx->D[i]);
PUTU32(out + 16, ctx->E[i]);
PUTU32(out + 20, ctx->F[i]);
PUTU32(out + 24, ctx->G[i]);
PUTU32(out + 28, ctx->H[i]);
out += 32;
len += 32;
/* pad */
pad = 15 - len % 16;
for (j = 0; j <= pad; j++)
*(out++) = pad;
len += pad + 1;
ciph_d[i].blocks = (len - processed) / 16;
len += 16; /* account for explicit iv */
/* arrange header */
out0[0] = ((u8 *)key->md.data)[8];
out0[1] = ((u8 *)key->md.data)[9];
out0[2] = ((u8 *)key->md.data)[10];
out0[3] = (u8)(len >> 8);
out0[4] = (u8)(len);
ret += len + 5;
inp += frag;
}
aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x);
OPENSSL_cleanse(blocks, sizeof(blocks));
OPENSSL_cleanse(ctx, sizeof(*ctx));
return ret;
}
|
C
|
openssl
| 0 |
CVE-2011-3191
|
https://www.cvedetails.com/cve/CVE-2011-3191/
|
CWE-189
|
https://github.com/torvalds/linux/commit/9438fabb73eb48055b58b89fc51e0bc4db22fabd
|
9438fabb73eb48055b58b89fc51e0bc4db22fabd
|
cifs: fix possible memory corruption in CIFSFindNext
The name_len variable in CIFSFindNext is a signed int that gets set to
the resume_name_len in the cifs_search_info. The resume_name_len however
is unsigned and for some infolevels is populated directly from a 32 bit
value sent by the server.
If the server sends a very large value for this, then that value could
look negative when converted to a signed int. That would make that
value pass the PATH_MAX check later in CIFSFindNext. The name_len would
then be used as a length value for a memcpy. It would then be treated
as unsigned again, and the memcpy scribbles over a ton of memory.
Fix this by making the name_len an unsigned value in CIFSFindNext.
Cc: <stable@kernel.org>
Reported-by: Darren Lavender <dcl@hppine99.gbr.hp.com>
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Steve French <sfrench@us.ibm.com>
|
CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, const unsigned char *ea_name,
char *EAData, size_t buf_size,
const struct nls_table *nls_codepage, int remap)
{
/* BB assumes one setup word */
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
int list_len;
struct fealist *ea_response_data;
struct fea *temp_fea;
char *temp_ptr;
char *end_of_smb;
__u16 params, byte_count, data_offset;
unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
cFYI(1, "In Query All EAs path %s", searchName);
QAllEAsRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
list_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
list_len++; /* trailing null */
list_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
list_len = strnlen(searchName, PATH_MAX);
list_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, list_len);
}
params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QueryAllEAs = %d", rc);
goto QAllEAsOut;
}
/* BB also check enough total bytes returned */
/* BB we need to improve the validity checking
of these trans2 responses */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < 4) {
rc = -EIO; /* bad smb */
goto QAllEAsOut;
}
/* check that length of list is not more than bcc */
/* check that each entry does not go beyond length
of list */
/* check that each element of each entry does not
go beyond end of list */
/* validate_trans2_offsets() */
/* BB check if start of smb + data_offset > &bcc+ bcc */
data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
ea_response_data = (struct fealist *)
(((char *) &pSMBr->hdr.Protocol) + data_offset);
list_len = le32_to_cpu(ea_response_data->list_len);
cFYI(1, "ea length %d", list_len);
if (list_len <= 8) {
cFYI(1, "empty EA list returned from server");
goto QAllEAsOut;
}
/* make sure list_len doesn't go past end of SMB */
end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr);
if ((char *)ea_response_data + list_len > end_of_smb) {
cFYI(1, "EA list appears to go beyond SMB");
rc = -EIO;
goto QAllEAsOut;
}
/* account for ea list len */
list_len -= 4;
temp_fea = ea_response_data->list;
temp_ptr = (char *)temp_fea;
while (list_len > 0) {
unsigned int name_len;
__u16 value_len;
list_len -= 4;
temp_ptr += 4;
/* make sure we can read name_len and value_len */
if (list_len < 0) {
cFYI(1, "EA entry goes beyond length of list");
rc = -EIO;
goto QAllEAsOut;
}
name_len = temp_fea->name_len;
value_len = le16_to_cpu(temp_fea->value_len);
list_len -= name_len + 1 + value_len;
if (list_len < 0) {
cFYI(1, "EA entry goes beyond length of list");
rc = -EIO;
goto QAllEAsOut;
}
if (ea_name) {
if (ea_name_len == name_len &&
strncmp(ea_name, temp_ptr, name_len) == 0) {
temp_ptr += name_len + 1;
rc = value_len;
if (buf_size == 0)
goto QAllEAsOut;
if ((size_t)value_len > buf_size) {
rc = -ERANGE;
goto QAllEAsOut;
}
memcpy(EAData, temp_ptr, value_len);
goto QAllEAsOut;
}
} else {
/* account for prefix user. and trailing null */
rc += (5 + 1 + name_len);
if (rc < (int) buf_size) {
memcpy(EAData, "user.", 5);
EAData += 5;
memcpy(EAData, temp_ptr, name_len);
EAData += name_len;
/* null terminate name */
*EAData = 0;
++EAData;
} else if (buf_size == 0) {
/* skip copy - calc size only */
} else {
/* stop before overrun buffer */
rc = -ERANGE;
break;
}
}
temp_ptr += name_len + 1 + value_len;
temp_fea = (struct fea *)temp_ptr;
}
/* didn't find the named attribute */
if (ea_name)
rc = -ENODATA;
QAllEAsOut:
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QAllEAsRetry;
return (ssize_t)rc;
}
|
CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, const unsigned char *ea_name,
char *EAData, size_t buf_size,
const struct nls_table *nls_codepage, int remap)
{
/* BB assumes one setup word */
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
int list_len;
struct fealist *ea_response_data;
struct fea *temp_fea;
char *temp_ptr;
char *end_of_smb;
__u16 params, byte_count, data_offset;
unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
cFYI(1, "In Query All EAs path %s", searchName);
QAllEAsRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
list_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
list_len++; /* trailing null */
list_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
list_len = strnlen(searchName, PATH_MAX);
list_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, list_len);
}
params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QueryAllEAs = %d", rc);
goto QAllEAsOut;
}
/* BB also check enough total bytes returned */
/* BB we need to improve the validity checking
of these trans2 responses */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < 4) {
rc = -EIO; /* bad smb */
goto QAllEAsOut;
}
/* check that length of list is not more than bcc */
/* check that each entry does not go beyond length
of list */
/* check that each element of each entry does not
go beyond end of list */
/* validate_trans2_offsets() */
/* BB check if start of smb + data_offset > &bcc+ bcc */
data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
ea_response_data = (struct fealist *)
(((char *) &pSMBr->hdr.Protocol) + data_offset);
list_len = le32_to_cpu(ea_response_data->list_len);
cFYI(1, "ea length %d", list_len);
if (list_len <= 8) {
cFYI(1, "empty EA list returned from server");
goto QAllEAsOut;
}
/* make sure list_len doesn't go past end of SMB */
end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr);
if ((char *)ea_response_data + list_len > end_of_smb) {
cFYI(1, "EA list appears to go beyond SMB");
rc = -EIO;
goto QAllEAsOut;
}
/* account for ea list len */
list_len -= 4;
temp_fea = ea_response_data->list;
temp_ptr = (char *)temp_fea;
while (list_len > 0) {
unsigned int name_len;
__u16 value_len;
list_len -= 4;
temp_ptr += 4;
/* make sure we can read name_len and value_len */
if (list_len < 0) {
cFYI(1, "EA entry goes beyond length of list");
rc = -EIO;
goto QAllEAsOut;
}
name_len = temp_fea->name_len;
value_len = le16_to_cpu(temp_fea->value_len);
list_len -= name_len + 1 + value_len;
if (list_len < 0) {
cFYI(1, "EA entry goes beyond length of list");
rc = -EIO;
goto QAllEAsOut;
}
if (ea_name) {
if (ea_name_len == name_len &&
strncmp(ea_name, temp_ptr, name_len) == 0) {
temp_ptr += name_len + 1;
rc = value_len;
if (buf_size == 0)
goto QAllEAsOut;
if ((size_t)value_len > buf_size) {
rc = -ERANGE;
goto QAllEAsOut;
}
memcpy(EAData, temp_ptr, value_len);
goto QAllEAsOut;
}
} else {
/* account for prefix user. and trailing null */
rc += (5 + 1 + name_len);
if (rc < (int) buf_size) {
memcpy(EAData, "user.", 5);
EAData += 5;
memcpy(EAData, temp_ptr, name_len);
EAData += name_len;
/* null terminate name */
*EAData = 0;
++EAData;
} else if (buf_size == 0) {
/* skip copy - calc size only */
} else {
/* stop before overrun buffer */
rc = -ERANGE;
break;
}
}
temp_ptr += name_len + 1 + value_len;
temp_fea = (struct fea *)temp_ptr;
}
/* didn't find the named attribute */
if (ea_name)
rc = -ENODATA;
QAllEAsOut:
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QAllEAsRetry;
return (ssize_t)rc;
}
|
C
|
linux
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.