CVE ID
stringlengths
13
43
CVE Page
stringlengths
45
48
CWE ID
stringclasses
90 values
codeLink
stringlengths
46
139
commit_id
stringlengths
6
81
commit_message
stringlengths
3
13.3k
func_after
stringlengths
14
241k
func_before
stringlengths
14
241k
lang
stringclasses
3 values
project
stringclasses
309 values
vul
int8
0
1
CVE-2015-1339
https://www.cvedetails.com/cve/CVE-2015-1339/
CWE-399
https://github.com/torvalds/linux/commit/2c5816b4beccc8ba709144539f6fdd764f8fa49c
2c5816b4beccc8ba709144539f6fdd764f8fa49c
cuse: fix memory leak The problem is that fuse_dev_alloc() acquires an extra reference to cc.fc, and the original ref count is never dropped. Reported-by: Colin Ian King <colin.king@canonical.com> Signed-off-by: Miklos Szeredi <miklos@szeredi.hu> Fixes: cc080e9e9be1 ("fuse: introduce per-instance fuse_dev structure") Cc: <stable@vger.kernel.org> # v4.2+
static int cuse_channel_open(struct inode *inode, struct file *file) { struct fuse_dev *fud; struct cuse_conn *cc; int rc; /* set up cuse_conn */ cc = kzalloc(sizeof(*cc), GFP_KERNEL); if (!cc) return -ENOMEM; fuse_conn_init(&cc->fc); fud = fuse_dev_alloc(&cc->fc); if (!fud) { kfree(cc); return -ENOMEM; } INIT_LIST_HEAD(&cc->list); cc->fc.release = cuse_fc_release; cc->fc.initialized = 1; rc = cuse_send_init(cc); if (rc) { fuse_dev_free(fud); return rc; } file->private_data = fud; return 0; }
static int cuse_channel_open(struct inode *inode, struct file *file) { struct fuse_dev *fud; struct cuse_conn *cc; int rc; /* set up cuse_conn */ cc = kzalloc(sizeof(*cc), GFP_KERNEL); if (!cc) return -ENOMEM; fuse_conn_init(&cc->fc); fud = fuse_dev_alloc(&cc->fc); if (!fud) { kfree(cc); return -ENOMEM; } INIT_LIST_HEAD(&cc->list); cc->fc.release = cuse_fc_release; cc->fc.initialized = 1; rc = cuse_send_init(cc); if (rc) { fuse_dev_free(fud); return rc; } file->private_data = fud; return 0; }
C
linux
0
CVE-2017-5104
https://www.cvedetails.com/cve/CVE-2017-5104/
CWE-20
https://github.com/chromium/chromium/commit/adca986a53b31b6da4cb22f8e755f6856daea89a
adca986a53b31b6da4cb22f8e755f6856daea89a
Don't show current RenderWidgetHostView while interstitial is showing. Also moves interstitial page tracking from RenderFrameHostManager to WebContents, since interstitial pages are not frame-specific. This was necessary for subframes to detect if an interstitial page is showing. BUG=729105 TEST=See comment 13 of bug for repro steps CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.linux:linux_site_isolation Review-Url: https://codereview.chromium.org/2938313002 Cr-Commit-Position: refs/heads/master@{#480117}
void WebContentsImpl::GetManifest(const GetManifestCallback& callback) { manifest_manager_host_->GetManifest(callback); }
void WebContentsImpl::GetManifest(const GetManifestCallback& callback) { manifest_manager_host_->GetManifest(callback); }
C
Chrome
0
CVE-2013-0886
https://www.cvedetails.com/cve/CVE-2013-0886/
null
https://github.com/chromium/chromium/commit/18d67244984a574ba2dd8779faabc0e3e34f4b76
18d67244984a574ba2dd8779faabc0e3e34f4b76
Implement TextureImageTransportSurface using texture mailbox This has a couple of advantages: - allow tearing down and recreating the UI parent context without losing the renderer contexts - do not require a context to be able to generate textures when creating the GLSurfaceHandle - clearer ownership semantics that potentially allows for more robust and easier lost context handling/thumbnailing/etc., since a texture is at any given time owned by either: UI parent, mailbox, or TextureImageTransportSurface - simplify frontbuffer protection logic; the frontbuffer textures are now owned by RWHV where they are refcounted The TextureImageTransportSurface informs RenderWidgetHostView of the mailbox names for the front- and backbuffer textures by associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message. During SwapBuffers() or PostSubBuffer() cycles, it then uses produceTextureCHROMIUM() and consumeTextureCHROMIUM() to transfer ownership between renderer and browser compositor. RWHV sends back the surface_handle of the buffer being returned with the Swap ACK (or 0 if no buffer is being returned in which case TextureImageTransportSurface will allocate a new texture - note that this could be used to simply keep textures for thumbnailing). BUG=154815,139616 TBR=sky@chromium.org Review URL: https://chromiumcodereview.appspot.com/11194042 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
void RenderWidgetHostImpl::AcknowledgeSwapBuffersToRenderer() { if (!is_threaded_compositing_enabled_) Send(new ViewMsg_SwapBuffers_ACK(routing_id_)); }
void RenderWidgetHostImpl::AcknowledgeSwapBuffersToRenderer() { if (!is_threaded_compositing_enabled_) Send(new ViewMsg_SwapBuffers_ACK(routing_id_)); }
C
Chrome
0
CVE-2013-2548
https://www.cvedetails.com/cve/CVE-2013-2548/
CWE-310
https://github.com/torvalds/linux/commit/9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6
9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6
crypto: user - fix info leaks in report API Three errors resulting in kernel memory disclosure: 1/ The structures used for the netlink based crypto algorithm report API are located on the stack. As snprintf() does not fill the remainder of the buffer with null bytes, those stack bytes will be disclosed to users of the API. Switch to strncpy() to fix this. 2/ crypto_report_one() does not initialize all field of struct crypto_user_alg. Fix this to fix the heap info leak. 3/ For the module name we should copy only as many bytes as module_name() returns -- not as much as the destination buffer could hold. But the current code does not and therefore copies random data from behind the end of the module name, as the module name is always shorter than CRYPTO_MAX_ALG_NAME. Also switch to use strncpy() to copy the algorithm's name and driver_name. They are strings, after all. Signed-off-by: Mathias Krause <minipli@googlemail.com> Cc: Steffen Klassert <steffen.klassert@secunet.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask) { struct crypto_alg *alg; int err; type = crypto_skcipher_type(type); mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV; alg = crypto_alg_mod_lookup(name, type, mask); if (IS_ERR(alg)) return PTR_ERR(alg); err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); crypto_mod_put(alg); return err; }
static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask) { struct crypto_alg *alg; int err; type = crypto_skcipher_type(type); mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV; alg = crypto_alg_mod_lookup(name, type, mask); if (IS_ERR(alg)) return PTR_ERR(alg); err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); crypto_mod_put(alg); return err; }
C
linux
0
CVE-2016-3839
https://www.cvedetails.com/cve/CVE-2016-3839/
CWE-284
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
472271b153c5dc53c28beac55480a8d8434b2d5c
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release
static struct packet *packet_alloc(const uint8_t *data, uint32_t len) { struct packet *p = osi_calloc(sizeof(*p)); uint8_t *buf = osi_malloc(len); if (p && buf) { p->data = buf; p->len = len; memcpy(p->data, data, len); return p; } else if (p) osi_free(p); else if (buf) osi_free(buf); return NULL; }
static struct packet *packet_alloc(const uint8_t *data, uint32_t len) { struct packet *p = osi_calloc(sizeof(*p)); uint8_t *buf = osi_malloc(len); if (p && buf) { p->data = buf; p->len = len; memcpy(p->data, data, len); return p; } else if (p) osi_free(p); else if (buf) osi_free(buf); return NULL; }
C
Android
0
CVE-2016-3751
https://www.cvedetails.com/cve/CVE-2016-3751/
null
https://android.googlesource.com/platform/external/libpng/+/9d4853418ab2f754c2b63e091c29c5529b8b86ca
9d4853418ab2f754c2b63e091c29c5529b8b86ca
DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
get32(png_bytep buffer, int offset) /* Read a 32-bit value from an 8-byte circular buffer (used only below). */ { return (buffer[ offset & 7] << 24) + (buffer[(offset+1) & 7] << 16) + (buffer[(offset+2) & 7] << 8) + (buffer[(offset+3) & 7] ); }
get32(png_bytep buffer, int offset) /* Read a 32-bit value from an 8-byte circular buffer (used only below). */ { return (buffer[ offset & 7] << 24) + (buffer[(offset+1) & 7] << 16) + (buffer[(offset+2) & 7] << 8) + (buffer[(offset+3) & 7] ); }
C
Android
0
CVE-2011-1833
https://www.cvedetails.com/cve/CVE-2011-1833/
CWE-264
https://github.com/torvalds/linux/commit/764355487ea220fdc2faf128d577d7f679b91f97
764355487ea220fdc2faf128d577d7f679b91f97
Ecryptfs: Add mount option to check uid of device being mounted = expect uid Close a TOCTOU race for mounts done via ecryptfs-mount-private. The mount source (device) can be raced when the ownership test is done in userspace. Provide Ecryptfs a means to force the uid check at mount time. Signed-off-by: John Johansen <john.johansen@canonical.com> Cc: <stable@kernel.org> Signed-off-by: Tyler Hicks <tyhicks@linux.vnet.ibm.com>
static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options) static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, uid_t *check_ruid) { char *p; int rc = 0; int sig_set = 0; int cipher_name_set = 0; int fn_cipher_name_set = 0; int cipher_key_bytes; int cipher_key_bytes_set = 0; int fn_cipher_key_bytes; int fn_cipher_key_bytes_set = 0; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &sbi->mount_crypt_stat; substring_t args[MAX_OPT_ARGS]; int token; char *sig_src; char *cipher_name_dst; char *cipher_name_src; char *fn_cipher_name_dst; char *fn_cipher_name_src; char *fnek_dst; char *fnek_src; char *cipher_key_bytes_src; char *fn_cipher_key_bytes_src; *check_ruid = 0; if (!options) { rc = -EINVAL; goto out; } ecryptfs_init_mount_crypt_stat(mount_crypt_stat); while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case ecryptfs_opt_sig: case ecryptfs_opt_ecryptfs_sig: sig_src = args[0].from; rc = ecryptfs_add_global_auth_tok(mount_crypt_stat, sig_src, 0); if (rc) { printk(KERN_ERR "Error attempting to register " "global sig; rc = [%d]\n", rc); goto out; } sig_set = 1; break; case ecryptfs_opt_cipher: case ecryptfs_opt_ecryptfs_cipher: cipher_name_src = args[0].from; cipher_name_dst = mount_crypt_stat-> global_default_cipher_name; strncpy(cipher_name_dst, cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); cipher_name_dst[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; cipher_name_set = 1; break; case ecryptfs_opt_ecryptfs_key_bytes: cipher_key_bytes_src = args[0].from; cipher_key_bytes = (int)simple_strtol(cipher_key_bytes_src, &cipher_key_bytes_src, 0); mount_crypt_stat->global_default_cipher_key_size = cipher_key_bytes; cipher_key_bytes_set = 1; break; case ecryptfs_opt_passthrough: mount_crypt_stat->flags |= ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED; break; case ecryptfs_opt_xattr_metadata: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; break; case ecryptfs_opt_encrypted_view: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; mount_crypt_stat->flags |= ECRYPTFS_ENCRYPTED_VIEW_ENABLED; break; case ecryptfs_opt_fnek_sig: fnek_src = args[0].from; fnek_dst = mount_crypt_stat->global_default_fnek_sig; strncpy(fnek_dst, fnek_src, ECRYPTFS_SIG_SIZE_HEX); mount_crypt_stat->global_default_fnek_sig[ ECRYPTFS_SIG_SIZE_HEX] = '\0'; rc = ecryptfs_add_global_auth_tok( mount_crypt_stat, mount_crypt_stat->global_default_fnek_sig, ECRYPTFS_AUTH_TOK_FNEK); if (rc) { printk(KERN_ERR "Error attempting to register " "global fnek sig [%s]; rc = [%d]\n", mount_crypt_stat->global_default_fnek_sig, rc); goto out; } mount_crypt_stat->flags |= (ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES | ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK); break; case ecryptfs_opt_fn_cipher: fn_cipher_name_src = args[0].from; fn_cipher_name_dst = mount_crypt_stat->global_default_fn_cipher_name; strncpy(fn_cipher_name_dst, fn_cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); mount_crypt_stat->global_default_fn_cipher_name[ ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; fn_cipher_name_set = 1; break; case ecryptfs_opt_fn_cipher_key_bytes: fn_cipher_key_bytes_src = args[0].from; fn_cipher_key_bytes = (int)simple_strtol(fn_cipher_key_bytes_src, &fn_cipher_key_bytes_src, 0); mount_crypt_stat->global_default_fn_cipher_key_bytes = fn_cipher_key_bytes; fn_cipher_key_bytes_set = 1; break; case ecryptfs_opt_unlink_sigs: mount_crypt_stat->flags |= ECRYPTFS_UNLINK_SIGS; break; case ecryptfs_opt_mount_auth_tok_only: mount_crypt_stat->flags |= ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY; break; case ecryptfs_opt_check_dev_ruid: *check_ruid = 1; break; case ecryptfs_opt_err: default: printk(KERN_WARNING "%s: eCryptfs: unrecognized option [%s]\n", __func__, p); } } if (!sig_set) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "You must supply at least one valid " "auth tok signature as a mount " "parameter; see the eCryptfs README\n"); goto out; } if (!cipher_name_set) { int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER); BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE); strcpy(mount_crypt_stat->global_default_cipher_name, ECRYPTFS_DEFAULT_CIPHER); } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_name_set) strcpy(mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_cipher_name); if (!cipher_key_bytes_set) mount_crypt_stat->global_default_cipher_key_size = 0; if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_key_bytes_set) mount_crypt_stat->global_default_fn_cipher_key_bytes = mount_crypt_stat->global_default_cipher_key_size; mutex_lock(&key_tfm_list_mutex); if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !ecryptfs_tfm_exists( mount_crypt_stat->global_default_fn_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } mutex_unlock(&key_tfm_list_mutex); rc = ecryptfs_init_global_auth_toks(mount_crypt_stat); if (rc) printk(KERN_WARNING "One or more global auth toks could not " "properly register; rc = [%d]\n", rc); out: return rc; }
static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options) { char *p; int rc = 0; int sig_set = 0; int cipher_name_set = 0; int fn_cipher_name_set = 0; int cipher_key_bytes; int cipher_key_bytes_set = 0; int fn_cipher_key_bytes; int fn_cipher_key_bytes_set = 0; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &sbi->mount_crypt_stat; substring_t args[MAX_OPT_ARGS]; int token; char *sig_src; char *cipher_name_dst; char *cipher_name_src; char *fn_cipher_name_dst; char *fn_cipher_name_src; char *fnek_dst; char *fnek_src; char *cipher_key_bytes_src; char *fn_cipher_key_bytes_src; if (!options) { rc = -EINVAL; goto out; } ecryptfs_init_mount_crypt_stat(mount_crypt_stat); while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case ecryptfs_opt_sig: case ecryptfs_opt_ecryptfs_sig: sig_src = args[0].from; rc = ecryptfs_add_global_auth_tok(mount_crypt_stat, sig_src, 0); if (rc) { printk(KERN_ERR "Error attempting to register " "global sig; rc = [%d]\n", rc); goto out; } sig_set = 1; break; case ecryptfs_opt_cipher: case ecryptfs_opt_ecryptfs_cipher: cipher_name_src = args[0].from; cipher_name_dst = mount_crypt_stat-> global_default_cipher_name; strncpy(cipher_name_dst, cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); cipher_name_dst[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; cipher_name_set = 1; break; case ecryptfs_opt_ecryptfs_key_bytes: cipher_key_bytes_src = args[0].from; cipher_key_bytes = (int)simple_strtol(cipher_key_bytes_src, &cipher_key_bytes_src, 0); mount_crypt_stat->global_default_cipher_key_size = cipher_key_bytes; cipher_key_bytes_set = 1; break; case ecryptfs_opt_passthrough: mount_crypt_stat->flags |= ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED; break; case ecryptfs_opt_xattr_metadata: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; break; case ecryptfs_opt_encrypted_view: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; mount_crypt_stat->flags |= ECRYPTFS_ENCRYPTED_VIEW_ENABLED; break; case ecryptfs_opt_fnek_sig: fnek_src = args[0].from; fnek_dst = mount_crypt_stat->global_default_fnek_sig; strncpy(fnek_dst, fnek_src, ECRYPTFS_SIG_SIZE_HEX); mount_crypt_stat->global_default_fnek_sig[ ECRYPTFS_SIG_SIZE_HEX] = '\0'; rc = ecryptfs_add_global_auth_tok( mount_crypt_stat, mount_crypt_stat->global_default_fnek_sig, ECRYPTFS_AUTH_TOK_FNEK); if (rc) { printk(KERN_ERR "Error attempting to register " "global fnek sig [%s]; rc = [%d]\n", mount_crypt_stat->global_default_fnek_sig, rc); goto out; } mount_crypt_stat->flags |= (ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES | ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK); break; case ecryptfs_opt_fn_cipher: fn_cipher_name_src = args[0].from; fn_cipher_name_dst = mount_crypt_stat->global_default_fn_cipher_name; strncpy(fn_cipher_name_dst, fn_cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); mount_crypt_stat->global_default_fn_cipher_name[ ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; fn_cipher_name_set = 1; break; case ecryptfs_opt_fn_cipher_key_bytes: fn_cipher_key_bytes_src = args[0].from; fn_cipher_key_bytes = (int)simple_strtol(fn_cipher_key_bytes_src, &fn_cipher_key_bytes_src, 0); mount_crypt_stat->global_default_fn_cipher_key_bytes = fn_cipher_key_bytes; fn_cipher_key_bytes_set = 1; break; case ecryptfs_opt_unlink_sigs: mount_crypt_stat->flags |= ECRYPTFS_UNLINK_SIGS; break; case ecryptfs_opt_mount_auth_tok_only: mount_crypt_stat->flags |= ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY; break; case ecryptfs_opt_err: default: printk(KERN_WARNING "%s: eCryptfs: unrecognized option [%s]\n", __func__, p); } } if (!sig_set) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "You must supply at least one valid " "auth tok signature as a mount " "parameter; see the eCryptfs README\n"); goto out; } if (!cipher_name_set) { int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER); BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE); strcpy(mount_crypt_stat->global_default_cipher_name, ECRYPTFS_DEFAULT_CIPHER); } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_name_set) strcpy(mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_cipher_name); if (!cipher_key_bytes_set) mount_crypt_stat->global_default_cipher_key_size = 0; if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_key_bytes_set) mount_crypt_stat->global_default_fn_cipher_key_bytes = mount_crypt_stat->global_default_cipher_key_size; mutex_lock(&key_tfm_list_mutex); if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !ecryptfs_tfm_exists( mount_crypt_stat->global_default_fn_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } mutex_unlock(&key_tfm_list_mutex); rc = ecryptfs_init_global_auth_toks(mount_crypt_stat); if (rc) printk(KERN_WARNING "One or more global auth toks could not " "properly register; rc = [%d]\n", rc); out: return rc; }
C
linux
1
CVE-2016-9754
https://www.cvedetails.com/cve/CVE-2016-9754/
CWE-190
https://github.com/torvalds/linux/commit/59643d1535eb220668692a5359de22545af579f6
59643d1535eb220668692a5359de22545af579f6
ring-buffer: Prevent overflow of size in ring_buffer_resize() If the size passed to ring_buffer_resize() is greater than MAX_LONG - BUF_PAGE_SIZE then the DIV_ROUND_UP() will return zero. Here's the details: # echo 18014398509481980 > /sys/kernel/debug/tracing/buffer_size_kb tracing_entries_write() processes this and converts kb to bytes. 18014398509481980 << 10 = 18446744073709547520 and this is passed to ring_buffer_resize() as unsigned long size. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); Where DIV_ROUND_UP(a, b) is (a + b - 1)/b BUF_PAGE_SIZE is 4080 and here 18446744073709547520 + 4080 - 1 = 18446744073709551599 where 18446744073709551599 is still smaller than 2^64 2^64 - 18446744073709551599 = 17 But now 18446744073709551599 / 4080 = 4521260802379792 and size = size * 4080 = 18446744073709551360 This is checked to make sure its still greater than 2 * 4080, which it is. Then we convert to the number of buffer pages needed. nr_page = DIV_ROUND_UP(size, BUF_PAGE_SIZE) but this time size is 18446744073709551360 and 2^64 - (18446744073709551360 + 4080 - 1) = -3823 Thus it overflows and the resulting number is less than 4080, which makes 3823 / 4080 = 0 an nr_pages is set to this. As we already checked against the minimum that nr_pages may be, this causes the logic to fail as well, and we crash the kernel. There's no reason to have the two DIV_ROUND_UP() (that's just result of historical code changes), clean up the code and fix this bug. Cc: stable@vger.kernel.org # 3.5+ Fixes: 83f40318dab00 ("ring-buffer: Make removal of ring buffer pages atomic") Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *head, struct buffer_page *prev, int old_flag) { return rb_head_page_set(cpu_buffer, head, prev, old_flag, RB_PAGE_UPDATE); }
static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *head, struct buffer_page *prev, int old_flag) { return rb_head_page_set(cpu_buffer, head, prev, old_flag, RB_PAGE_UPDATE); }
C
linux
0
CVE-2018-12249
https://www.cvedetails.com/cve/CVE-2018-12249/
CWE-476
https://github.com/mruby/mruby/commit/faa4eaf6803bd11669bc324b4c34e7162286bfa3
faa4eaf6803bd11669bc324b4c34e7162286bfa3
`mrb_class_real()` did not work for `BasicObject`; fix #4037
mrb_class_defined(mrb_state *mrb, const char *name) { mrb_value sym = mrb_check_intern_cstr(mrb, name); if (mrb_nil_p(sym)) { return FALSE; } return mrb_const_defined(mrb, mrb_obj_value(mrb->object_class), mrb_symbol(sym)); }
mrb_class_defined(mrb_state *mrb, const char *name) { mrb_value sym = mrb_check_intern_cstr(mrb, name); if (mrb_nil_p(sym)) { return FALSE; } return mrb_const_defined(mrb, mrb_obj_value(mrb->object_class), mrb_symbol(sym)); }
C
mruby
0
CVE-2019-13454
https://www.cvedetails.com/cve/CVE-2019-13454/
CWE-369
https://github.com/ImageMagick/ImageMagick6/commit/4f31d78716ac94c85c244efcea368fea202e2ed4
4f31d78716ac94c85c244efcea368fea202e2ed4
https://github.com/ImageMagick/ImageMagick/issues/1629
MagickExport Image *DisposeImages(const Image *images,ExceptionInfo *exception) { Image *dispose_image, *dispose_images; RectangleInfo bounds; register Image *image, *next; /* Run the image through the animation sequence */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=GetFirstImageInList(images); dispose_image=CloneImage(image,image->page.width,image->page.height, MagickTrue,exception); if (dispose_image == (Image *) NULL) return((Image *) NULL); dispose_image->page=image->page; dispose_image->page.x=0; dispose_image->page.y=0; dispose_image->dispose=NoneDispose; dispose_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(dispose_image); dispose_images=NewImageList(); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { Image *current_image; /* Overlay this frame's image over the previous disposal image. */ current_image=CloneImage(dispose_image,0,0,MagickTrue,exception); if (current_image == (Image *) NULL) { dispose_images=DestroyImageList(dispose_images); dispose_image=DestroyImage(dispose_image); return((Image *) NULL); } (void) CompositeImage(current_image,next->matte != MagickFalse ? OverCompositeOp : CopyCompositeOp,next,next->page.x,next->page.y); /* Handle Background dispose: image is displayed for the delay period. */ if (next->dispose == BackgroundDispose) { bounds=next->page; bounds.width=next->columns; bounds.height=next->rows; if (bounds.x < 0) { bounds.width+=bounds.x; bounds.x=0; } if ((ssize_t) (bounds.x+bounds.width) > (ssize_t) current_image->columns) bounds.width=current_image->columns-bounds.x; if (bounds.y < 0) { bounds.height+=bounds.y; bounds.y=0; } if ((ssize_t) (bounds.y+bounds.height) > (ssize_t) current_image->rows) bounds.height=current_image->rows-bounds.y; ClearBounds(current_image,&bounds); } /* Select the appropriate previous/disposed image. */ if (next->dispose == PreviousDispose) current_image=DestroyImage(current_image); else { dispose_image=DestroyImage(dispose_image); dispose_image=current_image; current_image=(Image *) NULL; } /* Save the dispose image just calculated for return. */ { Image *dispose; dispose=CloneImage(dispose_image,0,0,MagickTrue,exception); if (dispose == (Image *) NULL) { dispose_images=DestroyImageList(dispose_images); dispose_image=DestroyImage(dispose_image); return((Image *) NULL); } (void) CloneImageProfiles(dispose,next); (void) CloneImageProperties(dispose,next); (void) CloneImageArtifacts(dispose,next); dispose->page.x=0; dispose->page.y=0; dispose->dispose=next->dispose; AppendImageToList(&dispose_images,dispose); } } dispose_image=DestroyImage(dispose_image); return(GetFirstImageInList(dispose_images)); }
MagickExport Image *DisposeImages(const Image *images,ExceptionInfo *exception) { Image *dispose_image, *dispose_images; RectangleInfo bounds; register Image *image, *next; /* Run the image through the animation sequence */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=GetFirstImageInList(images); dispose_image=CloneImage(image,image->page.width,image->page.height, MagickTrue,exception); if (dispose_image == (Image *) NULL) return((Image *) NULL); dispose_image->page=image->page; dispose_image->page.x=0; dispose_image->page.y=0; dispose_image->dispose=NoneDispose; dispose_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(dispose_image); dispose_images=NewImageList(); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { Image *current_image; /* Overlay this frame's image over the previous disposal image. */ current_image=CloneImage(dispose_image,0,0,MagickTrue,exception); if (current_image == (Image *) NULL) { dispose_images=DestroyImageList(dispose_images); dispose_image=DestroyImage(dispose_image); return((Image *) NULL); } (void) CompositeImage(current_image,next->matte != MagickFalse ? OverCompositeOp : CopyCompositeOp,next,next->page.x,next->page.y); /* Handle Background dispose: image is displayed for the delay period. */ if (next->dispose == BackgroundDispose) { bounds=next->page; bounds.width=next->columns; bounds.height=next->rows; if (bounds.x < 0) { bounds.width+=bounds.x; bounds.x=0; } if ((ssize_t) (bounds.x+bounds.width) > (ssize_t) current_image->columns) bounds.width=current_image->columns-bounds.x; if (bounds.y < 0) { bounds.height+=bounds.y; bounds.y=0; } if ((ssize_t) (bounds.y+bounds.height) > (ssize_t) current_image->rows) bounds.height=current_image->rows-bounds.y; ClearBounds(current_image,&bounds); } /* Select the appropriate previous/disposed image. */ if (next->dispose == PreviousDispose) current_image=DestroyImage(current_image); else { dispose_image=DestroyImage(dispose_image); dispose_image=current_image; current_image=(Image *) NULL; } /* Save the dispose image just calculated for return. */ { Image *dispose; dispose=CloneImage(dispose_image,0,0,MagickTrue,exception); if (dispose == (Image *) NULL) { dispose_images=DestroyImageList(dispose_images); dispose_image=DestroyImage(dispose_image); return((Image *) NULL); } (void) CloneImageProfiles(dispose,next); (void) CloneImageProperties(dispose,next); (void) CloneImageArtifacts(dispose,next); dispose->page.x=0; dispose->page.y=0; dispose->dispose=next->dispose; AppendImageToList(&dispose_images,dispose); } } dispose_image=DestroyImage(dispose_image); return(GetFirstImageInList(dispose_images)); }
C
ImageMagick6
0
null
null
null
https://github.com/chromium/chromium/commit/a1ce1b69e269a7e61ea0bf0691b90be0cbe9b4c5
a1ce1b69e269a7e61ea0bf0691b90be0cbe9b4c5
2009-05-04 Kai Brüning <kai@granus.net> Reviewed by Eric Seidel. https://bugs.webkit.org/show_bug.cgi?id=24883 24883: Bad success test in parseXMLDocumentFragment in XMLTokenizerLibxml2.cpp Fixed test whether all the chunk has been processed to correctly count utf8 bytes. Test: fast/innerHTML/innerHTML-nbsp.xhtml * dom/XMLTokenizerLibxml2.cpp: (WebCore::parseXMLDocumentFragment): git-svn-id: svn://svn.chromium.org/blink/trunk@43195 bbb929c8-8fbe-4397-9dbb-9b2b20218538
static void externalSubsetHandler(void* closure, const xmlChar*, const xmlChar* externalId, const xmlChar*) { String extId = toString(externalId); if ((extId == "-//W3C//DTD XHTML 1.0 Transitional//EN") || (extId == "-//W3C//DTD XHTML 1.1//EN") || (extId == "-//W3C//DTD XHTML 1.0 Strict//EN") || (extId == "-//W3C//DTD XHTML 1.0 Frameset//EN") || (extId == "-//W3C//DTD XHTML Basic 1.0//EN") || (extId == "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN") || (extId == "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN") || (extId == "-//WAPFORUM//DTD XHTML Mobile 1.0//EN")) getTokenizer(closure)->setIsXHTMLDocument(true); // controls if we replace entities or not. }
static void externalSubsetHandler(void* closure, const xmlChar*, const xmlChar* externalId, const xmlChar*) { String extId = toString(externalId); if ((extId == "-//W3C//DTD XHTML 1.0 Transitional//EN") || (extId == "-//W3C//DTD XHTML 1.1//EN") || (extId == "-//W3C//DTD XHTML 1.0 Strict//EN") || (extId == "-//W3C//DTD XHTML 1.0 Frameset//EN") || (extId == "-//W3C//DTD XHTML Basic 1.0//EN") || (extId == "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN") || (extId == "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN") || (extId == "-//WAPFORUM//DTD XHTML Mobile 1.0//EN")) getTokenizer(closure)->setIsXHTMLDocument(true); // controls if we replace entities or not. }
C
Chrome
0
CVE-2019-7395
https://www.cvedetails.com/cve/CVE-2019-7395/
CWE-399
https://github.com/ImageMagick/ImageMagick/commit/8a43abefb38c5e29138e1c9c515b313363541c06
8a43abefb38c5e29138e1c9c515b313363541c06
https://github.com/ImageMagick/ImageMagick/issues/1451
static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
C
ImageMagick
0
CVE-2013-0836
https://www.cvedetails.com/cve/CVE-2013-0836/
CWE-399
https://github.com/chromium/chromium/commit/f7038db6ef172459f14b1b67a5155b8dd210be0f
f7038db6ef172459f14b1b67a5155b8dd210be0f
Progressive JPEG outputScanlines() calls should handle failure outputScanlines() can fail and delete |this|, so any attempt to access members thereafter should be avoided. Copy the decoder pointer member, and use that copy to detect and handle the failure case. BUG=232763 R=pkasting@chromium.org Review URL: https://codereview.chromium.org/14844003 git-svn-id: svn://svn.chromium.org/blink/trunk@150545 bbb929c8-8fbe-4397-9dbb-9b2b20218538
JPEGImageDecoder* decoder() { return m_decoder; }
JPEGImageDecoder* decoder() { return m_decoder; }
C
Chrome
0
CVE-2017-5120
https://www.cvedetails.com/cve/CVE-2017-5120/
null
https://github.com/chromium/chromium/commit/b7277af490d28ac7f802c015bb0ff31395768556
b7277af490d28ac7f802c015bb0ff31395768556
bindings: Support "attribute FrozenArray<T>?" Adds a quick hack to support a case of "attribute FrozenArray<T>?". Bug: 1028047 Change-Id: Ib3cecc4beb6bcc0fb0dbc667aca595454cc90c86 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1933866 Reviewed-by: Hitoshi Yoshida <peria@chromium.org> Commit-Queue: Yuki Shiino <yukishiino@chromium.org> Cr-Commit-Position: refs/heads/master@{#718676}
static void MeasureAsVoidMethodMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { TestObject* impl = V8TestObject::ToImpl(info.Holder()); impl->measureAsVoidMethod(); }
static void MeasureAsVoidMethodMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { TestObject* impl = V8TestObject::ToImpl(info.Holder()); impl->measureAsVoidMethod(); }
C
Chrome
0
CVE-2016-3920
https://www.cvedetails.com/cve/CVE-2016-3920/
CWE-20
https://android.googlesource.com/platform/frameworks/av/+/6d0249be2275fd4086783f259f4e2c54722a7c55
6d0249be2275fd4086783f259f4e2c54722a7c55
better validation lengths of strings in ID3 tags Validate lengths on strings in ID3 tags, particularly around 0. Also added code to handle cases when we can't get memory for copies of strings we want to extract from these tags. Affects L/M/N/master, same patch for all of them. Bug: 30744884 Change-Id: I2675a817a39f0927ec1f7e9f9c09f2e61020311e Test: play mp3 file which caused a <0 length. (cherry picked from commit d23c01546c4f82840a01a380def76ab6cae5d43f)
MemorySource(const uint8_t *data, size_t size) : mData(data), mSize(size) { }
MemorySource(const uint8_t *data, size_t size) : mData(data), mSize(size) { }
C
Android
0
CVE-2019-15296
https://www.cvedetails.com/cve/CVE-2019-15296/
CWE-119
https://github.com/knik0/faad2/commit/942c3e0aee748ea6fe97cb2c1aa5893225316174
942c3e0aee748ea6fe97cb2c1aa5893225316174
Fix a couple buffer overflows https://hackerone.com/reports/502816 https://hackerone.com/reports/507858 https://github.com/videolan/vlc/blob/master/contrib/src/faad2/faad2-fix-overflows.patch
static void tns_data(ic_stream *ics, tns_info *tns, bitfile *ld) { uint8_t w, filt, i, start_coef_bits, coef_bits; uint8_t n_filt_bits = 2; uint8_t length_bits = 6; uint8_t order_bits = 5; if (ics->window_sequence == EIGHT_SHORT_SEQUENCE) { n_filt_bits = 1; length_bits = 4; order_bits = 3; } for (w = 0; w < ics->num_windows; w++) { tns->n_filt[w] = (uint8_t)faad_getbits(ld, n_filt_bits DEBUGVAR(1,74,"tns_data(): n_filt")); #if 0 printf("%d\n", tns->n_filt[w]); #endif if (tns->n_filt[w]) { if ((tns->coef_res[w] = faad_get1bit(ld DEBUGVAR(1,75,"tns_data(): coef_res"))) & 1) { start_coef_bits = 4; } else { start_coef_bits = 3; } #if 0 printf("%d\n", tns->coef_res[w]); #endif } for (filt = 0; filt < tns->n_filt[w]; filt++) { tns->length[w][filt] = (uint8_t)faad_getbits(ld, length_bits DEBUGVAR(1,76,"tns_data(): length")); #if 0 printf("%d\n", tns->length[w][filt]); #endif tns->order[w][filt] = (uint8_t)faad_getbits(ld, order_bits DEBUGVAR(1,77,"tns_data(): order")); #if 0 printf("%d\n", tns->order[w][filt]); #endif if (tns->order[w][filt]) { tns->direction[w][filt] = faad_get1bit(ld DEBUGVAR(1,78,"tns_data(): direction")); #if 0 printf("%d\n", tns->direction[w][filt]); #endif tns->coef_compress[w][filt] = faad_get1bit(ld DEBUGVAR(1,79,"tns_data(): coef_compress")); #if 0 printf("%d\n", tns->coef_compress[w][filt]); #endif coef_bits = start_coef_bits - tns->coef_compress[w][filt]; for (i = 0; i < tns->order[w][filt]; i++) { tns->coef[w][filt][i] = (uint8_t)faad_getbits(ld, coef_bits DEBUGVAR(1,80,"tns_data(): coef")); #if 0 printf("%d\n", tns->coef[w][filt][i]); #endif } } } } }
static void tns_data(ic_stream *ics, tns_info *tns, bitfile *ld) { uint8_t w, filt, i, start_coef_bits, coef_bits; uint8_t n_filt_bits = 2; uint8_t length_bits = 6; uint8_t order_bits = 5; if (ics->window_sequence == EIGHT_SHORT_SEQUENCE) { n_filt_bits = 1; length_bits = 4; order_bits = 3; } for (w = 0; w < ics->num_windows; w++) { tns->n_filt[w] = (uint8_t)faad_getbits(ld, n_filt_bits DEBUGVAR(1,74,"tns_data(): n_filt")); #if 0 printf("%d\n", tns->n_filt[w]); #endif if (tns->n_filt[w]) { if ((tns->coef_res[w] = faad_get1bit(ld DEBUGVAR(1,75,"tns_data(): coef_res"))) & 1) { start_coef_bits = 4; } else { start_coef_bits = 3; } #if 0 printf("%d\n", tns->coef_res[w]); #endif } for (filt = 0; filt < tns->n_filt[w]; filt++) { tns->length[w][filt] = (uint8_t)faad_getbits(ld, length_bits DEBUGVAR(1,76,"tns_data(): length")); #if 0 printf("%d\n", tns->length[w][filt]); #endif tns->order[w][filt] = (uint8_t)faad_getbits(ld, order_bits DEBUGVAR(1,77,"tns_data(): order")); #if 0 printf("%d\n", tns->order[w][filt]); #endif if (tns->order[w][filt]) { tns->direction[w][filt] = faad_get1bit(ld DEBUGVAR(1,78,"tns_data(): direction")); #if 0 printf("%d\n", tns->direction[w][filt]); #endif tns->coef_compress[w][filt] = faad_get1bit(ld DEBUGVAR(1,79,"tns_data(): coef_compress")); #if 0 printf("%d\n", tns->coef_compress[w][filt]); #endif coef_bits = start_coef_bits - tns->coef_compress[w][filt]; for (i = 0; i < tns->order[w][filt]; i++) { tns->coef[w][filt][i] = (uint8_t)faad_getbits(ld, coef_bits DEBUGVAR(1,80,"tns_data(): coef")); #if 0 printf("%d\n", tns->coef[w][filt][i]); #endif } } } } }
C
faad2
0
CVE-2015-3194
https://www.cvedetails.com/cve/CVE-2015-3194/
null
https://git.openssl.org/?p=openssl.git;a=commit;h=c394a488942387246653833359a5c94b5832674e
c394a488942387246653833359a5c94b5832674e
null
static RSA_OAEP_PARAMS *rsa_oaep_decode(const X509_ALGOR *alg, X509_ALGOR **pmaskHash) { const unsigned char *p; int plen; RSA_OAEP_PARAMS *pss; *pmaskHash = NULL; if (!alg->parameter || alg->parameter->type != V_ASN1_SEQUENCE) return NULL; p = alg->parameter->value.sequence->data; plen = alg->parameter->value.sequence->length; pss = d2i_RSA_OAEP_PARAMS(NULL, &p, plen); if (!pss) return NULL; *pmaskHash = rsa_mgf1_decode(pss->maskGenFunc); return pss; }
static RSA_OAEP_PARAMS *rsa_oaep_decode(const X509_ALGOR *alg, X509_ALGOR **pmaskHash) { const unsigned char *p; int plen; RSA_OAEP_PARAMS *pss; *pmaskHash = NULL; if (!alg->parameter || alg->parameter->type != V_ASN1_SEQUENCE) return NULL; p = alg->parameter->value.sequence->data; plen = alg->parameter->value.sequence->length; pss = d2i_RSA_OAEP_PARAMS(NULL, &p, plen); if (!pss) return NULL; *pmaskHash = rsa_mgf1_decode(pss->maskGenFunc); return pss; }
C
openssl
0
null
null
null
https://github.com/chromium/chromium/commit/d193f6bb5aa5bdc05e07f314abacf7d7bc466d3d
d193f6bb5aa5bdc05e07f314abacf7d7bc466d3d
cc: Make the PictureLayerImpl raster source null until commit. No need to make a raster source that is never used. R=enne, vmpstr BUG=387116 Review URL: https://codereview.chromium.org/809433003 Cr-Commit-Position: refs/heads/master@{#308466}
bool PictureLayerImpl::AllTilesRequiredAreReadyToDraw( TileRequirementCheck is_tile_required_callback) const { if (!HasValidTilePriorities()) return true; if (!tilings_) return true; if (visible_rect_for_tile_priority_.IsEmpty()) return true; gfx::Rect rect = GetViewportForTilePriorityInContentSpace(); rect.Intersect(visible_rect_for_tile_priority_); PictureLayerTiling* tiling = tilings_->FindTilingWithResolution(HIGH_RESOLUTION); if (!tiling) return true; for (PictureLayerTiling::CoverageIterator iter(tiling, 1.f, rect); iter; ++iter) { const Tile* tile = *iter; if (!tile) continue; if ((tiling->*is_tile_required_callback)(tile) && !tile->IsReadyToDraw()) { TRACE_EVENT_INSTANT0("cc", "Tile required, but not ready to draw.", TRACE_EVENT_SCOPE_THREAD); return false; } } return true; }
bool PictureLayerImpl::AllTilesRequiredAreReadyToDraw( TileRequirementCheck is_tile_required_callback) const { if (!HasValidTilePriorities()) return true; if (!tilings_) return true; if (visible_rect_for_tile_priority_.IsEmpty()) return true; gfx::Rect rect = GetViewportForTilePriorityInContentSpace(); rect.Intersect(visible_rect_for_tile_priority_); PictureLayerTiling* tiling = tilings_->FindTilingWithResolution(HIGH_RESOLUTION); if (!tiling) return true; for (PictureLayerTiling::CoverageIterator iter(tiling, 1.f, rect); iter; ++iter) { const Tile* tile = *iter; if (!tile) continue; if ((tiling->*is_tile_required_callback)(tile) && !tile->IsReadyToDraw()) { TRACE_EVENT_INSTANT0("cc", "Tile required, but not ready to draw.", TRACE_EVENT_SCOPE_THREAD); return false; } } return true; }
C
Chrome
0
CVE-2013-0925
https://www.cvedetails.com/cve/CVE-2013-0925/
CWE-264
https://github.com/chromium/chromium/commit/f7ae1f7a918f1973dca241a7a23169906eaf4fe3
f7ae1f7a918f1973dca241a7a23169906eaf4fe3
Do not pass URLs in onUpdated events to extensions unless they have the "tabs" permission. BUG=168442 Review URL: https://chromiumcodereview.appspot.com/11824004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@176406 0039d316-1c4b-4281-b951-d872f2087c98
MessageService::MessageService( LazyBackgroundTaskQueue* queue) : lazy_background_task_queue_(queue), weak_factory_(this) { registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_TERMINATED, content::NotificationService::AllBrowserContextsAndSources()); registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_CLOSED, content::NotificationService::AllBrowserContextsAndSources()); }
MessageService::MessageService( LazyBackgroundTaskQueue* queue) : lazy_background_task_queue_(queue), weak_factory_(this) { registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_TERMINATED, content::NotificationService::AllBrowserContextsAndSources()); registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_CLOSED, content::NotificationService::AllBrowserContextsAndSources()); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/fc790462b4f248712bbc8c3734664dd6b05f80f2
fc790462b4f248712bbc8c3734664dd6b05f80f2
Set the job name for the print job on the Mac. BUG=http://crbug.com/29188 TEST=as in bug Review URL: http://codereview.chromium.org/1997016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@47056 0039d316-1c4b-4281-b951-d872f2087c98
void ResourceMessageFilter::OnOpenFile(const FilePath& path, int mode, IPC::Message* reply_msg) { if (!ChildProcessSecurityPolicy::GetInstance()->CanUploadFile(id(), path)) { ViewHostMsg_OpenFile::WriteReplyParams( reply_msg, base::kInvalidPlatformFileValue); Send(reply_msg); return; } ChromeThread::PostTask( ChromeThread::FILE, FROM_HERE, NewRunnableMethod( this, &ResourceMessageFilter::OnOpenFileOnFileThread, path, mode, reply_msg)); }
void ResourceMessageFilter::OnOpenFile(const FilePath& path, int mode, IPC::Message* reply_msg) { if (!ChildProcessSecurityPolicy::GetInstance()->CanUploadFile(id(), path)) { ViewHostMsg_OpenFile::WriteReplyParams( reply_msg, base::kInvalidPlatformFileValue); Send(reply_msg); return; } ChromeThread::PostTask( ChromeThread::FILE, FROM_HERE, NewRunnableMethod( this, &ResourceMessageFilter::OnOpenFileOnFileThread, path, mode, reply_msg)); }
C
Chrome
0
CVE-2016-5204
https://www.cvedetails.com/cve/CVE-2016-5204/
CWE-79
https://github.com/chromium/chromium/commit/e1e67d5d341d82c61cab2c41ff4163f17caf14ae
e1e67d5d341d82c61cab2c41ff4163f17caf14ae
Add boolean to UserIntiatedInfo noting if an input event led to navigation. Also refactor UkmPageLoadMetricsObserver to use this new boolean to report the user initiated metric in RecordPageLoadExtraInfoMetrics, so that it works correctly in the case when the page load failed. Bug: 925104 Change-Id: Ie08e7d3912cb1da484190d838005e95e57a209ff Reviewed-on: https://chromium-review.googlesource.com/c/1450460 Commit-Queue: Annie Sullivan <sullivan@chromium.org> Reviewed-by: Bryan McQuade <bmcquade@chromium.org> Cr-Commit-Position: refs/heads/master@{#630870}
void MetricsWebContentsObserver::NotifyPageEndAllLoadsWithTimestamp( PageEndReason page_end_reason, UserInitiatedInfo user_initiated_info, base::TimeTicks timestamp, bool is_certainly_browser_timestamp) { if (committed_load_) { committed_load_->NotifyPageEnd(page_end_reason, user_initiated_info, timestamp, is_certainly_browser_timestamp); } for (const auto& kv : provisional_loads_) { kv.second->NotifyPageEnd(page_end_reason, user_initiated_info, timestamp, is_certainly_browser_timestamp); } for (const auto& tracker : aborted_provisional_loads_) { if (tracker->IsLikelyProvisionalAbort(timestamp)) { tracker->UpdatePageEnd(page_end_reason, user_initiated_info, timestamp, is_certainly_browser_timestamp); } } aborted_provisional_loads_.clear(); }
void MetricsWebContentsObserver::NotifyPageEndAllLoadsWithTimestamp( PageEndReason page_end_reason, UserInitiatedInfo user_initiated_info, base::TimeTicks timestamp, bool is_certainly_browser_timestamp) { if (committed_load_) { committed_load_->NotifyPageEnd(page_end_reason, user_initiated_info, timestamp, is_certainly_browser_timestamp); } for (const auto& kv : provisional_loads_) { kv.second->NotifyPageEnd(page_end_reason, user_initiated_info, timestamp, is_certainly_browser_timestamp); } for (const auto& tracker : aborted_provisional_loads_) { if (tracker->IsLikelyProvisionalAbort(timestamp)) { tracker->UpdatePageEnd(page_end_reason, user_initiated_info, timestamp, is_certainly_browser_timestamp); } } aborted_provisional_loads_.clear(); }
C
Chrome
0
CVE-2018-18351
https://www.cvedetails.com/cve/CVE-2018-18351/
CWE-20
https://github.com/chromium/chromium/commit/07fbae50670ea44e35e1d554db1bbece7fe3711f
07fbae50670ea44e35e1d554db1bbece7fe3711f
Check ancestors when setting an <iframe> navigation's "site for cookies". Currently, we're setting the "site for cookies" only by looking at the top-level document. We ought to be verifying that the ancestor frames are same-site before doing so. We do this correctly in Blink (see `Document::SiteForCookies`), but didn't do so when navigating in the browser. This patch addresses the majority of the problem by walking the ancestor chain when processing a NavigationRequest. If all the ancestors are same-site, we set the "site for cookies" to the top-level document's URL. If they aren't all same-site, we set it to an empty URL to ensure that we don't send SameSite cookies. Bug: 833847 Change-Id: Icd77f31fa618fa9f8b59fc3b15e1bed6ee05aabd Reviewed-on: https://chromium-review.googlesource.com/1025772 Reviewed-by: Alex Moshchuk <alexmos@chromium.org> Commit-Queue: Mike West <mkwst@chromium.org> Cr-Commit-Position: refs/heads/master@{#553942}
void NavigationRequest::OnStartChecksComplete( NavigationThrottle::ThrottleCheckResult result) { DCHECK(result.action() != NavigationThrottle::DEFER); DCHECK(result.action() != NavigationThrottle::BLOCK_RESPONSE); if (on_start_checks_complete_closure_) on_start_checks_complete_closure_.Run(); if (result.action() == NavigationThrottle::CANCEL_AND_IGNORE || result.action() == NavigationThrottle::CANCEL || result.action() == NavigationThrottle::BLOCK_REQUEST || result.action() == NavigationThrottle::BLOCK_REQUEST_AND_COLLAPSE) { #if DCHECK_IS_ON() if (result.action() == NavigationThrottle::BLOCK_REQUEST) { DCHECK(result.net_error_code() == net::ERR_BLOCKED_BY_CLIENT || result.net_error_code() == net::ERR_BLOCKED_BY_ADMINISTRATOR); } else if (result.action() == NavigationThrottle::CANCEL_AND_IGNORE) { DCHECK_EQ(result.net_error_code(), net::ERR_ABORTED); } #endif BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::BindOnce(&NavigationRequest::OnRequestFailedInternal, weak_factory_.GetWeakPtr(), false, result.net_error_code(), base::nullopt, true, result.error_page_content())); return; } DCHECK_NE(AssociatedSiteInstanceType::NONE, associated_site_instance_type_); RenderFrameHostImpl* navigating_frame_host = associated_site_instance_type_ == AssociatedSiteInstanceType::SPECULATIVE ? frame_tree_node_->render_manager()->speculative_frame_host() : frame_tree_node_->current_frame_host(); DCHECK(navigating_frame_host); navigation_handle_->SetExpectedProcess(navigating_frame_host->GetProcess()); BrowserContext* browser_context = frame_tree_node_->navigator()->GetController()->GetBrowserContext(); StoragePartition* partition = BrowserContext::GetStoragePartition( browser_context, navigating_frame_host->GetSiteInstance()); DCHECK(partition); bool can_create_service_worker = (frame_tree_node_->pending_frame_policy().sandbox_flags & blink::WebSandboxFlags::kOrigin) != blink::WebSandboxFlags::kOrigin; request_params_.should_create_service_worker = can_create_service_worker; if (can_create_service_worker) { ServiceWorkerContextWrapper* service_worker_context = static_cast<ServiceWorkerContextWrapper*>( partition->GetServiceWorkerContext()); navigation_handle_->InitServiceWorkerHandle(service_worker_context); } if (IsSchemeSupportedForAppCache(common_params_.url)) { if (navigating_frame_host->GetRenderViewHost() ->GetWebkitPreferences() .application_cache_enabled) { navigation_handle_->InitAppCacheHandle( static_cast<ChromeAppCacheService*>(partition->GetAppCacheService())); } } request_params_.navigation_timing.fetch_start = base::TimeTicks::Now(); GURL base_url; #if defined(OS_ANDROID) NavigationEntry* last_committed_entry = frame_tree_node_->navigator()->GetController()->GetLastCommittedEntry(); if (last_committed_entry) base_url = last_committed_entry->GetBaseURLForDataURL(); #endif const GURL& top_document_url = !base_url.is_empty() ? base_url : frame_tree_node_->frame_tree()->root()->current_url(); // Walk the ancestor chain to determine whether all frames are same-site. If // not, the |site_for_cookies| is set to an empty URL. // // 'Document::SiteForCookies()' in Blink, which special-cases extension // URLs and a few other sharp edges. const FrameTreeNode* current = frame_tree_node_->parent(); bool ancestors_are_same_site = true; while (current && ancestors_are_same_site) { if (!net::registry_controlled_domains::SameDomainOrHost( top_document_url, current->current_url(), net::registry_controlled_domains::INCLUDE_PRIVATE_REGISTRIES)) { ancestors_are_same_site = false; } current = current->parent(); } const GURL& site_for_cookies = ancestors_are_same_site ? (frame_tree_node_->IsMainFrame() ? common_params_.url : top_document_url) : GURL::EmptyGURL(); bool parent_is_main_frame = !frame_tree_node_->parent() ? false : frame_tree_node_->parent()->IsMainFrame(); std::unique_ptr<NavigationUIData> navigation_ui_data; if (navigation_handle_->GetNavigationUIData()) navigation_ui_data = navigation_handle_->GetNavigationUIData()->Clone(); bool is_for_guests_only = navigation_handle_->GetStartingSiteInstance()->GetSiteURL(). SchemeIs(kGuestScheme); bool report_raw_headers = false; RenderFrameDevToolsAgentHost::ApplyOverrides( frame_tree_node_, begin_params_.get(), &report_raw_headers); RenderFrameDevToolsAgentHost::OnNavigationRequestWillBeSent(*this); loader_ = NavigationURLLoader::Create( browser_context->GetResourceContext(), partition, std::make_unique<NavigationRequestInfo>( common_params_, begin_params_.Clone(), site_for_cookies, frame_tree_node_->IsMainFrame(), parent_is_main_frame, IsSecureFrame(frame_tree_node_->parent()), frame_tree_node_->frame_tree_node_id(), is_for_guests_only, report_raw_headers, navigating_frame_host->GetVisibilityState() == blink::mojom::PageVisibilityState::kPrerender, blob_url_loader_factory_ ? blob_url_loader_factory_->Clone() : nullptr), std::move(navigation_ui_data), navigation_handle_->service_worker_handle(), navigation_handle_->appcache_handle(), this); }
void NavigationRequest::OnStartChecksComplete( NavigationThrottle::ThrottleCheckResult result) { DCHECK(result.action() != NavigationThrottle::DEFER); DCHECK(result.action() != NavigationThrottle::BLOCK_RESPONSE); if (on_start_checks_complete_closure_) on_start_checks_complete_closure_.Run(); if (result.action() == NavigationThrottle::CANCEL_AND_IGNORE || result.action() == NavigationThrottle::CANCEL || result.action() == NavigationThrottle::BLOCK_REQUEST || result.action() == NavigationThrottle::BLOCK_REQUEST_AND_COLLAPSE) { #if DCHECK_IS_ON() if (result.action() == NavigationThrottle::BLOCK_REQUEST) { DCHECK(result.net_error_code() == net::ERR_BLOCKED_BY_CLIENT || result.net_error_code() == net::ERR_BLOCKED_BY_ADMINISTRATOR); } else if (result.action() == NavigationThrottle::CANCEL_AND_IGNORE) { DCHECK_EQ(result.net_error_code(), net::ERR_ABORTED); } #endif BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::BindOnce(&NavigationRequest::OnRequestFailedInternal, weak_factory_.GetWeakPtr(), false, result.net_error_code(), base::nullopt, true, result.error_page_content())); return; } DCHECK_NE(AssociatedSiteInstanceType::NONE, associated_site_instance_type_); RenderFrameHostImpl* navigating_frame_host = associated_site_instance_type_ == AssociatedSiteInstanceType::SPECULATIVE ? frame_tree_node_->render_manager()->speculative_frame_host() : frame_tree_node_->current_frame_host(); DCHECK(navigating_frame_host); navigation_handle_->SetExpectedProcess(navigating_frame_host->GetProcess()); BrowserContext* browser_context = frame_tree_node_->navigator()->GetController()->GetBrowserContext(); StoragePartition* partition = BrowserContext::GetStoragePartition( browser_context, navigating_frame_host->GetSiteInstance()); DCHECK(partition); bool can_create_service_worker = (frame_tree_node_->pending_frame_policy().sandbox_flags & blink::WebSandboxFlags::kOrigin) != blink::WebSandboxFlags::kOrigin; request_params_.should_create_service_worker = can_create_service_worker; if (can_create_service_worker) { ServiceWorkerContextWrapper* service_worker_context = static_cast<ServiceWorkerContextWrapper*>( partition->GetServiceWorkerContext()); navigation_handle_->InitServiceWorkerHandle(service_worker_context); } if (IsSchemeSupportedForAppCache(common_params_.url)) { if (navigating_frame_host->GetRenderViewHost() ->GetWebkitPreferences() .application_cache_enabled) { navigation_handle_->InitAppCacheHandle( static_cast<ChromeAppCacheService*>(partition->GetAppCacheService())); } } request_params_.navigation_timing.fetch_start = base::TimeTicks::Now(); GURL base_url; #if defined(OS_ANDROID) NavigationEntry* last_committed_entry = frame_tree_node_->navigator()->GetController()->GetLastCommittedEntry(); if (last_committed_entry) base_url = last_committed_entry->GetBaseURLForDataURL(); #endif const GURL& top_document_url = !base_url.is_empty() ? base_url : frame_tree_node_->frame_tree()->root()->current_url(); const GURL& site_for_cookies = frame_tree_node_->IsMainFrame() ? common_params_.url : top_document_url; bool parent_is_main_frame = !frame_tree_node_->parent() ? false : frame_tree_node_->parent()->IsMainFrame(); std::unique_ptr<NavigationUIData> navigation_ui_data; if (navigation_handle_->GetNavigationUIData()) navigation_ui_data = navigation_handle_->GetNavigationUIData()->Clone(); bool is_for_guests_only = navigation_handle_->GetStartingSiteInstance()->GetSiteURL(). SchemeIs(kGuestScheme); bool report_raw_headers = false; RenderFrameDevToolsAgentHost::ApplyOverrides( frame_tree_node_, begin_params_.get(), &report_raw_headers); RenderFrameDevToolsAgentHost::OnNavigationRequestWillBeSent(*this); loader_ = NavigationURLLoader::Create( browser_context->GetResourceContext(), partition, std::make_unique<NavigationRequestInfo>( common_params_, begin_params_.Clone(), site_for_cookies, frame_tree_node_->IsMainFrame(), parent_is_main_frame, IsSecureFrame(frame_tree_node_->parent()), frame_tree_node_->frame_tree_node_id(), is_for_guests_only, report_raw_headers, navigating_frame_host->GetVisibilityState() == blink::mojom::PageVisibilityState::kPrerender, blob_url_loader_factory_ ? blob_url_loader_factory_->Clone() : nullptr), std::move(navigation_ui_data), navigation_handle_->service_worker_handle(), navigation_handle_->appcache_handle(), this); }
C
Chrome
1
CVE-2016-5164
https://www.cvedetails.com/cve/CVE-2016-5164/
CWE-79
https://github.com/chromium/chromium/commit/93bc623489bdcfc7e9127614fcfb3258edf3f0f9
93bc623489bdcfc7e9127614fcfb3258edf3f0f9
[DevTools] Copy objects from debugger context to inspected context properly. BUG=637594 Review-Url: https://codereview.chromium.org/2253643002 Cr-Commit-Position: refs/heads/master@{#412436}
void V8Debugger::setBreakpointsActivated(bool activated) { if (!enabled()) { NOTREACHED(); return; } v8::HandleScope scope(m_isolate); v8::Context::Scope contextScope(debuggerContext()); v8::Local<v8::Object> info = v8::Object::New(m_isolate); info->Set(toV8StringInternalized(m_isolate, "enabled"), v8::Boolean::New(m_isolate, activated)); v8::Local<v8::Function> setBreakpointsActivated = v8::Local<v8::Function>::Cast(m_debuggerScript.Get(m_isolate)->Get(toV8StringInternalized(m_isolate, "setBreakpointsActivated"))); v8::Debug::Call(debuggerContext(), setBreakpointsActivated, info).ToLocalChecked(); m_breakpointsActivated = activated; }
void V8Debugger::setBreakpointsActivated(bool activated) { if (!enabled()) { NOTREACHED(); return; } v8::HandleScope scope(m_isolate); v8::Context::Scope contextScope(debuggerContext()); v8::Local<v8::Object> info = v8::Object::New(m_isolate); info->Set(toV8StringInternalized(m_isolate, "enabled"), v8::Boolean::New(m_isolate, activated)); v8::Local<v8::Function> setBreakpointsActivated = v8::Local<v8::Function>::Cast(m_debuggerScript.Get(m_isolate)->Get(toV8StringInternalized(m_isolate, "setBreakpointsActivated"))); v8::Debug::Call(debuggerContext(), setBreakpointsActivated, info).ToLocalChecked(); m_breakpointsActivated = activated; }
C
Chrome
0
CVE-2016-1639
https://www.cvedetails.com/cve/CVE-2016-1639/
null
https://github.com/chromium/chromium/commit/c66b1fc49870c514b1c1e8b53498153176d7ec2b
c66b1fc49870c514b1c1e8b53498153176d7ec2b
cros: Check initial auth type when showing views login. Bug: 859611 Change-Id: I0298db9bbf4aed6bd40600aef2e1c5794e8cd058 Reviewed-on: https://chromium-review.googlesource.com/1123056 Reviewed-by: Xiaoyin Hu <xiaoyinh@chromium.org> Commit-Queue: Jacob Dufault <jdufault@chromium.org> Cr-Commit-Position: refs/heads/master@{#572224}
bool IsEnterpriseManaged() { policy::BrowserPolicyConnectorChromeOS* connector = g_browser_process->platform_part()->browser_policy_connector_chromeos(); return connector->IsEnterpriseManaged(); }
bool IsEnterpriseManaged() { policy::BrowserPolicyConnectorChromeOS* connector = g_browser_process->platform_part()->browser_policy_connector_chromeos(); return connector->IsEnterpriseManaged(); }
C
Chrome
0
CVE-2018-11469
https://www.cvedetails.com/cve/CVE-2018-11469/
CWE-200
https://git.haproxy.org/?p=haproxy-1.8.git;a=commit;h=17514045e5d934dede62116216c1b016fe23dd06
17514045e5d934dede62116216c1b016fe23dd06
null
int del_hdr_value(struct buffer *buf, char **from, char *next) { char *prev = *from; if (*prev == ':') { /* We're removing the first value, preserve the colon and add a * space if possible. */ if (!HTTP_IS_CRLF(*next)) next++; prev++; if (prev < next) *prev++ = ' '; while (HTTP_IS_SPHT(*next)) next++; } else { /* Remove useless spaces before the old delimiter. */ while (HTTP_IS_SPHT(*(prev-1))) prev--; *from = prev; /* copy the delimiter and if possible a space if we're * not at the end of the line. */ if (!HTTP_IS_CRLF(*next)) { *prev++ = *next++; if (prev + 1 < next) *prev++ = ' '; while (HTTP_IS_SPHT(*next)) next++; } } return buffer_replace2(buf, prev, next, NULL, 0); }
int del_hdr_value(struct buffer *buf, char **from, char *next) { char *prev = *from; if (*prev == ':') { /* We're removing the first value, preserve the colon and add a * space if possible. */ if (!HTTP_IS_CRLF(*next)) next++; prev++; if (prev < next) *prev++ = ' '; while (HTTP_IS_SPHT(*next)) next++; } else { /* Remove useless spaces before the old delimiter. */ while (HTTP_IS_SPHT(*(prev-1))) prev--; *from = prev; /* copy the delimiter and if possible a space if we're * not at the end of the line. */ if (!HTTP_IS_CRLF(*next)) { *prev++ = *next++; if (prev + 1 < next) *prev++ = ' '; while (HTTP_IS_SPHT(*next)) next++; } } return buffer_replace2(buf, prev, next, NULL, 0); }
C
haproxy
0
CVE-2012-4467
https://www.cvedetails.com/cve/CVE-2012-4467/
CWE-399
https://github.com/torvalds/linux/commit/ed6fe9d614fc1bca95eb8c0ccd0e92db00ef9d5d
ed6fe9d614fc1bca95eb8c0ccd0e92db00ef9d5d
Fix order of arguments to compat_put_time[spec|val] Commit 644595f89620 ("compat: Handle COMPAT_USE_64BIT_TIME in net/socket.c") introduced a bug where the helper functions to take either a 64-bit or compat time[spec|val] got the arguments in the wrong order, passing the kernel stack pointer off as a user pointer (and vice versa). Because of the user address range check, that in turn then causes an EFAULT due to the user pointer range checking failing for the kernel address. Incorrectly resuling in a failed system call for 32-bit processes with a 64-bit kernel. On odder architectures like HP-PA (with separate user/kernel address spaces), it can be used read kernel memory. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); }
void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); }
C
linux
0
CVE-2016-3861
https://www.cvedetails.com/cve/CVE-2016-3861/
CWE-119
https://android.googlesource.com/platform/frameworks/native/+/1f4b49e64adf4623eefda503bca61e253597b9bf
1f4b49e64adf4623eefda503bca61e253597b9bf
Add bound checks to utf16_to_utf8 Bug: 29250543 Change-Id: I518e7b2fe10aaa3f1c1987586a09b1110aff7e1a (cherry picked from commit 7e93b2ddcb49b5365fbe1dab134ffb38e6f1c719)
status_t Parcel::writeChar(char16_t val) { return writeInt32(int32_t(val)); }
status_t Parcel::writeChar(char16_t val) { return writeInt32(int32_t(val)); }
C
Android
0
CVE-2015-7515
https://www.cvedetails.com/cve/CVE-2015-7515/
null
https://github.com/torvalds/linux/commit/8e20cf2bce122ce9262d6034ee5d5b76fbb92f96
8e20cf2bce122ce9262d6034ee5d5b76fbb92f96
Input: aiptek - fix crash on detecting device without endpoints The aiptek driver crashes in aiptek_probe() when a specially crafted USB device without endpoints is detected. This fix adds a check that the device has proper configuration expected by the driver. Also an error return value is changed to more matching one in one of the error paths. Reported-by: Ralf Spenneberg <ralf@spenneberg.net> Signed-off-by: Vladis Dronov <vdronov@redhat.com> Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
store_tabletToolMode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct aiptek *aiptek = dev_get_drvdata(dev); int new_mode = map_str_to_val(tool_mode_map, buf, count); if (new_mode == AIPTEK_INVALID_VALUE) return -EINVAL; aiptek->newSetting.toolMode = new_mode; return count; }
store_tabletToolMode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct aiptek *aiptek = dev_get_drvdata(dev); int new_mode = map_str_to_val(tool_mode_map, buf, count); if (new_mode == AIPTEK_INVALID_VALUE) return -EINVAL; aiptek->newSetting.toolMode = new_mode; return count; }
C
linux
0
CVE-2016-2508
https://www.cvedetails.com/cve/CVE-2016-2508/
CWE-119
https://android.googlesource.com/platform/frameworks/av/+/f81038006b4c59a5a148dcad887371206033c28f
f81038006b4c59a5a148dcad887371206033c28f
MPEG4Extractor: ensure kKeyTrackID exists before creating an MPEG4Source as track. GenericSource: return error when no track exists. SampleIterator: make sure mSamplesPerChunk is not zero before using it as divisor. Bug: 21657957 Bug: 23705695 Bug: 22802344 Bug: 28799341 Change-Id: I7664992ade90b935d3f255dcd43ecc2898f30b04 (cherry picked from commit 0386c91b8a910a134e5898ffa924c1b6c7560b13)
NuPlayer::GenericSource::GenericSource( const sp<AMessage> &notify, bool uidValid, uid_t uid) : Source(notify), mAudioTimeUs(0), mAudioLastDequeueTimeUs(0), mVideoTimeUs(0), mVideoLastDequeueTimeUs(0), mFetchSubtitleDataGeneration(0), mFetchTimedTextDataGeneration(0), mDurationUs(-1ll), mAudioIsVorbis(false), mIsWidevine(false), mIsSecure(false), mIsStreaming(false), mUIDValid(uidValid), mUID(uid), mFd(-1), mDrmManagerClient(NULL), mBitrate(-1ll), mPollBufferingGeneration(0), mPendingReadBufferTypes(0), mBuffering(false), mPrepareBuffering(false), mPrevBufferPercentage(-1) { resetDataSource(); DataSource::RegisterDefaultSniffers(); }
NuPlayer::GenericSource::GenericSource( const sp<AMessage> &notify, bool uidValid, uid_t uid) : Source(notify), mAudioTimeUs(0), mAudioLastDequeueTimeUs(0), mVideoTimeUs(0), mVideoLastDequeueTimeUs(0), mFetchSubtitleDataGeneration(0), mFetchTimedTextDataGeneration(0), mDurationUs(-1ll), mAudioIsVorbis(false), mIsWidevine(false), mIsSecure(false), mIsStreaming(false), mUIDValid(uidValid), mUID(uid), mFd(-1), mDrmManagerClient(NULL), mBitrate(-1ll), mPollBufferingGeneration(0), mPendingReadBufferTypes(0), mBuffering(false), mPrepareBuffering(false), mPrevBufferPercentage(-1) { resetDataSource(); DataSource::RegisterDefaultSniffers(); }
C
Android
0
null
null
null
https://github.com/chromium/chromium/commit/c58d6ae09d0c916b6003238de09e34f14cce758f
c58d6ae09d0c916b6003238de09e34f14cce758f
Introduce a method to build the tree from a CompactHTMLToken https://bugs.webkit.org/show_bug.cgi?id=107082 Reviewed by Adam Barth. No new tests because covered by existing fast/parser tests. * html/parser/HTMLDocumentParser.cpp: (WebCore): (WebCore::HTMLDocumentParser::constructTreeFromCompactHTMLToken): * html/parser/HTMLDocumentParser.h: * html/parser/HTMLToken.h: (AtomicHTMLToken): (WebCore::AtomicHTMLToken::create): (WebCore::AtomicHTMLToken::AtomicHTMLToken): * xml/parser/MarkupTokenBase.h: (WebCore::AtomicMarkupTokenBase::AtomicMarkupTokenBase): (AtomicMarkupTokenBase): git-svn-id: svn://svn.chromium.org/blink/trunk@139953 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void HTMLDocumentParser::append(const SegmentedString& source) { if (isStopped()) return; RefPtr<HTMLDocumentParser> protect(this); if (m_preloadScanner) { if (m_input.current().isEmpty() && !isWaitingForScripts()) { m_preloadScanner.clear(); } else { m_preloadScanner->appendToEnd(source); if (isWaitingForScripts()) m_preloadScanner->scan(); } } m_input.appendToEnd(source); if (inPumpSession()) { return; } pumpTokenizerIfPossible(AllowYield); endIfDelayed(); }
void HTMLDocumentParser::append(const SegmentedString& source) { if (isStopped()) return; RefPtr<HTMLDocumentParser> protect(this); if (m_preloadScanner) { if (m_input.current().isEmpty() && !isWaitingForScripts()) { m_preloadScanner.clear(); } else { m_preloadScanner->appendToEnd(source); if (isWaitingForScripts()) m_preloadScanner->scan(); } } m_input.appendToEnd(source); if (inPumpSession()) { return; } pumpTokenizerIfPossible(AllowYield); endIfDelayed(); }
C
Chrome
0
CVE-2018-14363
https://www.cvedetails.com/cve/CVE-2018-14363/
CWE-22
https://github.com/neomutt/neomutt/commit/9bfab35522301794483f8f9ed60820bdec9be59e
9bfab35522301794483f8f9ed60820bdec9be59e
sanitise cache paths Co-authored-by: JerikoOne <jeriko.one@gmx.us>
static int pop_fetch_message(struct Context *ctx, struct Message *msg, int msgno) { void *uidl = NULL; char buf[LONG_STRING]; char path[PATH_MAX]; struct Progress progressbar; struct PopData *pop_data = (struct PopData *) ctx->data; struct PopCache *cache = NULL; struct Header *h = ctx->hdrs[msgno]; unsigned short bcache = 1; /* see if we already have the message in body cache */ msg->fp = mutt_bcache_get(pop_data->bcache, cache_id(h->data)); if (msg->fp) return 0; /* * see if we already have the message in our cache in * case $message_cachedir is unset */ cache = &pop_data->cache[h->index % POP_CACHE_LEN]; if (cache->path) { if (cache->index == h->index) { /* yes, so just return a pointer to the message */ msg->fp = fopen(cache->path, "r"); if (msg->fp) return 0; mutt_perror(cache->path); return -1; } else { /* clear the previous entry */ unlink(cache->path); FREE(&cache->path); } } while (true) { if (pop_reconnect(ctx) < 0) return -1; /* verify that massage index is correct */ if (h->refno < 0) { mutt_error( _("The message index is incorrect. Try reopening the mailbox.")); return -1; } mutt_progress_init(&progressbar, _("Fetching message..."), MUTT_PROGRESS_SIZE, NetInc, h->content->length + h->content->offset - 1); /* see if we can put in body cache; use our cache as fallback */ msg->fp = mutt_bcache_put(pop_data->bcache, cache_id(h->data)); if (!msg->fp) { /* no */ bcache = 0; mutt_mktemp(path, sizeof(path)); msg->fp = mutt_file_fopen(path, "w+"); if (!msg->fp) { mutt_perror(path); return -1; } } snprintf(buf, sizeof(buf), "RETR %d\r\n", h->refno); const int ret = pop_fetch_data(pop_data, buf, &progressbar, fetch_message, msg->fp); if (ret == 0) break; mutt_file_fclose(&msg->fp); /* if RETR failed (e.g. connection closed), be sure to remove either * the file in bcache or from POP's own cache since the next iteration * of the loop will re-attempt to put() the message */ if (!bcache) unlink(path); if (ret == -2) { mutt_error("%s", pop_data->err_msg); return -1; } if (ret == -3) { mutt_error(_("Can't write message to temporary file!")); return -1; } } /* Update the header information. Previously, we only downloaded a * portion of the headers, those required for the main display. */ if (bcache) mutt_bcache_commit(pop_data->bcache, cache_id(h->data)); else { cache->index = h->index; cache->path = mutt_str_strdup(path); } rewind(msg->fp); uidl = h->data; /* we replace envelop, key in subj_hash has to be updated as well */ if (ctx->subj_hash && h->env->real_subj) mutt_hash_delete(ctx->subj_hash, h->env->real_subj, h); mutt_label_hash_remove(ctx, h); mutt_env_free(&h->env); h->env = mutt_rfc822_read_header(msg->fp, h, 0, 0); if (ctx->subj_hash && h->env->real_subj) mutt_hash_insert(ctx->subj_hash, h->env->real_subj, h); mutt_label_hash_add(ctx, h); h->data = uidl; h->lines = 0; fgets(buf, sizeof(buf), msg->fp); while (!feof(msg->fp)) { ctx->hdrs[msgno]->lines++; fgets(buf, sizeof(buf), msg->fp); } h->content->length = ftello(msg->fp) - h->content->offset; /* This needs to be done in case this is a multipart message */ if (!WithCrypto) h->security = crypt_query(h->content); mutt_clear_error(); rewind(msg->fp); return 0; }
static int pop_fetch_message(struct Context *ctx, struct Message *msg, int msgno) { void *uidl = NULL; char buf[LONG_STRING]; char path[PATH_MAX]; struct Progress progressbar; struct PopData *pop_data = (struct PopData *) ctx->data; struct PopCache *cache = NULL; struct Header *h = ctx->hdrs[msgno]; unsigned short bcache = 1; /* see if we already have the message in body cache */ msg->fp = mutt_bcache_get(pop_data->bcache, h->data); if (msg->fp) return 0; /* * see if we already have the message in our cache in * case $message_cachedir is unset */ cache = &pop_data->cache[h->index % POP_CACHE_LEN]; if (cache->path) { if (cache->index == h->index) { /* yes, so just return a pointer to the message */ msg->fp = fopen(cache->path, "r"); if (msg->fp) return 0; mutt_perror(cache->path); return -1; } else { /* clear the previous entry */ unlink(cache->path); FREE(&cache->path); } } while (true) { if (pop_reconnect(ctx) < 0) return -1; /* verify that massage index is correct */ if (h->refno < 0) { mutt_error( _("The message index is incorrect. Try reopening the mailbox.")); return -1; } mutt_progress_init(&progressbar, _("Fetching message..."), MUTT_PROGRESS_SIZE, NetInc, h->content->length + h->content->offset - 1); /* see if we can put in body cache; use our cache as fallback */ msg->fp = mutt_bcache_put(pop_data->bcache, h->data); if (!msg->fp) { /* no */ bcache = 0; mutt_mktemp(path, sizeof(path)); msg->fp = mutt_file_fopen(path, "w+"); if (!msg->fp) { mutt_perror(path); return -1; } } snprintf(buf, sizeof(buf), "RETR %d\r\n", h->refno); const int ret = pop_fetch_data(pop_data, buf, &progressbar, fetch_message, msg->fp); if (ret == 0) break; mutt_file_fclose(&msg->fp); /* if RETR failed (e.g. connection closed), be sure to remove either * the file in bcache or from POP's own cache since the next iteration * of the loop will re-attempt to put() the message */ if (!bcache) unlink(path); if (ret == -2) { mutt_error("%s", pop_data->err_msg); return -1; } if (ret == -3) { mutt_error(_("Can't write message to temporary file!")); return -1; } } /* Update the header information. Previously, we only downloaded a * portion of the headers, those required for the main display. */ if (bcache) mutt_bcache_commit(pop_data->bcache, h->data); else { cache->index = h->index; cache->path = mutt_str_strdup(path); } rewind(msg->fp); uidl = h->data; /* we replace envelop, key in subj_hash has to be updated as well */ if (ctx->subj_hash && h->env->real_subj) mutt_hash_delete(ctx->subj_hash, h->env->real_subj, h); mutt_label_hash_remove(ctx, h); mutt_env_free(&h->env); h->env = mutt_rfc822_read_header(msg->fp, h, 0, 0); if (ctx->subj_hash && h->env->real_subj) mutt_hash_insert(ctx->subj_hash, h->env->real_subj, h); mutt_label_hash_add(ctx, h); h->data = uidl; h->lines = 0; fgets(buf, sizeof(buf), msg->fp); while (!feof(msg->fp)) { ctx->hdrs[msgno]->lines++; fgets(buf, sizeof(buf), msg->fp); } h->content->length = ftello(msg->fp) - h->content->offset; /* This needs to be done in case this is a multipart message */ if (!WithCrypto) h->security = crypt_query(h->content); mutt_clear_error(); rewind(msg->fp); return 0; }
C
neomutt
1
CVE-2014-3171
https://www.cvedetails.com/cve/CVE-2014-3171/
null
https://github.com/chromium/chromium/commit/d10a8dac48d3a9467e81c62cb45208344f4542db
d10a8dac48d3a9467e81c62cb45208344f4542db
Replace further questionable HashMap::add usages in bindings BUG=390928 R=dcarney@chromium.org Review URL: https://codereview.chromium.org/411273002 git-svn-id: svn://svn.chromium.org/blink/trunk@178823 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void writeArrayBuffer(const ArrayBuffer& arrayBuffer) { append(ArrayBufferTag); doWriteArrayBuffer(arrayBuffer); }
void writeArrayBuffer(const ArrayBuffer& arrayBuffer) { append(ArrayBufferTag); doWriteArrayBuffer(arrayBuffer); }
C
Chrome
0
CVE-2016-2464
https://www.cvedetails.com/cve/CVE-2016-2464/
CWE-20
https://android.googlesource.com/platform/external/libvpx/+/65c49d5b382de4085ee5668732bcb0f6ecaf7148
65c49d5b382de4085ee5668732bcb0f6ecaf7148
Fix ParseElementHeader to support 0 payload elements Cherry-pick'ing Change 5c83bbec9a5f6f00a349674ddad85b753d2ea219 from upstream. This fixes regression in some edge cases for mkv playback. BUG=26499283 Change-Id: I88de03219a3d941b6b2f251d384e29c36bdd4d9b
int Track::Info::CopyStr(char* Info::*str, Info& dst_) const { if (str == static_cast<char * Info::*>(NULL)) return -1; char*& dst = dst_.*str; if (dst) // should be NULL already return -1; const char* const src = this->*str; if (src == NULL) return 0; const size_t len = strlen(src); dst = SafeArrayAlloc<char>(1, len + 1); if (dst == NULL) return -1; strcpy(dst, src); return 0; }
int Track::Info::CopyStr(char* Info::*str, Info& dst_) const { if (str == static_cast<char * Info::*>(NULL)) return -1; char*& dst = dst_.*str; if (dst) // should be NULL already return -1; const char* const src = this->*str; if (src == NULL) return 0; const size_t len = strlen(src); dst = SafeArrayAlloc<char>(1, len + 1); if (dst == NULL) return -1; strcpy(dst, src); return 0; }
C
Android
0
CVE-2011-4324
https://www.cvedetails.com/cve/CVE-2011-4324/
null
https://github.com/torvalds/linux/commit/dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
NFSv4: Convert the open and close ops to use fmode Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res) { struct xdr_stream xdr; struct compound_hdr hdr; int status; xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); if ((status = decode_compound_hdr(&xdr, &hdr)) != 0) goto out; if ((status = decode_putfh(&xdr)) != 0) goto out; if ((status = decode_savefh(&xdr)) != 0) goto out; if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0) goto out; if ((status = decode_getfh(&xdr, res->fh)) != 0) goto out; if (decode_getfattr(&xdr, res->fattr, res->server) != 0) goto out; if ((status = decode_restorefh(&xdr)) != 0) goto out; decode_getfattr(&xdr, res->dir_fattr, res->server); out: return status; }
static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res) { struct xdr_stream xdr; struct compound_hdr hdr; int status; xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); if ((status = decode_compound_hdr(&xdr, &hdr)) != 0) goto out; if ((status = decode_putfh(&xdr)) != 0) goto out; if ((status = decode_savefh(&xdr)) != 0) goto out; if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0) goto out; if ((status = decode_getfh(&xdr, res->fh)) != 0) goto out; if (decode_getfattr(&xdr, res->fattr, res->server) != 0) goto out; if ((status = decode_restorefh(&xdr)) != 0) goto out; decode_getfattr(&xdr, res->dir_fattr, res->server); out: return status; }
C
linux
0
CVE-2018-19044
https://www.cvedetails.com/cve/CVE-2018-19044/
CWE-59
https://github.com/acassen/keepalived/commit/04f2d32871bb3b11d7dc024039952f2fe2750306
04f2d32871bb3b11d7dc024039952f2fe2750306
When opening files for write, ensure they aren't symbolic links Issue #1048 identified that if, for example, a non privileged user created a symbolic link from /etc/keepalvied.data to /etc/passwd, writing to /etc/keepalived.data (which could be invoked via DBus) would cause /etc/passwd to be overwritten. This commit stops keepalived writing to pathnames where the ultimate component is a symbolic link, by setting O_NOFOLLOW whenever opening a file for writing. This might break some setups, where, for example, /etc/keepalived.data was a symbolic link to /home/fred/keepalived.data. If this was the case, instead create a symbolic link from /home/fred/keepalived.data to /tmp/keepalived.data, so that the file is still accessible via /home/fred/keepalived.data. There doesn't appear to be a way around this backward incompatibility, since even checking if the pathname is a symbolic link prior to opening for writing would create a race condition. Signed-off-by: Quentin Armitage <quentin@armitage.org.uk>
try_up_instance(vrrp_t *vrrp, bool leaving_init) { int wantstate; if (leaving_init) { if (vrrp->num_script_if_fault) return; } else if (--vrrp->num_script_if_fault || vrrp->num_script_init) return; if (vrrp->wantstate == VRRP_STATE_MAST && vrrp->base_priority == VRRP_PRIO_OWNER) { vrrp->wantstate = VRRP_STATE_MAST; #ifdef _WITH_SNMP_RFCV3_ vrrp->stats->next_master_reason = VRRPV3_MASTER_REASON_PREEMPTED; #endif } else { vrrp->wantstate = VRRP_STATE_BACK; #ifdef _WITH_SNMP_RFCV3_ vrrp->stats->next_master_reason = VRRPV3_MASTER_REASON_MASTER_NO_RESPONSE; #endif } vrrp->master_adver_int = vrrp->adver_int; if (vrrp->wantstate == VRRP_STATE_MAST && vrrp->base_priority == VRRP_PRIO_OWNER) vrrp->ms_down_timer = vrrp->master_adver_int + VRRP_TIMER_SKEW(vrrp); else vrrp->ms_down_timer = 3 * vrrp->master_adver_int + VRRP_TIMER_SKEW(vrrp); if (vrrp->sync) { if (leaving_init) { if (vrrp->sync->num_member_fault) return; } else if (--vrrp->sync->num_member_fault || vrrp->sync->num_member_init) return; } /* If the sync group can't go to master, we must go to backup state */ wantstate = vrrp->wantstate; if (vrrp->sync && vrrp->wantstate == VRRP_STATE_MAST && !vrrp_sync_can_goto_master(vrrp)) vrrp->wantstate = VRRP_STATE_BACK; /* We can come up */ vrrp_state_leave_fault(vrrp); vrrp_init_instance_sands(vrrp); vrrp_thread_requeue_read(vrrp); vrrp->wantstate = wantstate; if (vrrp->sync) { if (vrrp->state == VRRP_STATE_MAST) vrrp_sync_master(vrrp); else vrrp_sync_backup(vrrp); } }
try_up_instance(vrrp_t *vrrp, bool leaving_init) { int wantstate; if (leaving_init) { if (vrrp->num_script_if_fault) return; } else if (--vrrp->num_script_if_fault || vrrp->num_script_init) return; if (vrrp->wantstate == VRRP_STATE_MAST && vrrp->base_priority == VRRP_PRIO_OWNER) { vrrp->wantstate = VRRP_STATE_MAST; #ifdef _WITH_SNMP_RFCV3_ vrrp->stats->next_master_reason = VRRPV3_MASTER_REASON_PREEMPTED; #endif } else { vrrp->wantstate = VRRP_STATE_BACK; #ifdef _WITH_SNMP_RFCV3_ vrrp->stats->next_master_reason = VRRPV3_MASTER_REASON_MASTER_NO_RESPONSE; #endif } vrrp->master_adver_int = vrrp->adver_int; if (vrrp->wantstate == VRRP_STATE_MAST && vrrp->base_priority == VRRP_PRIO_OWNER) vrrp->ms_down_timer = vrrp->master_adver_int + VRRP_TIMER_SKEW(vrrp); else vrrp->ms_down_timer = 3 * vrrp->master_adver_int + VRRP_TIMER_SKEW(vrrp); if (vrrp->sync) { if (leaving_init) { if (vrrp->sync->num_member_fault) return; } else if (--vrrp->sync->num_member_fault || vrrp->sync->num_member_init) return; } /* If the sync group can't go to master, we must go to backup state */ wantstate = vrrp->wantstate; if (vrrp->sync && vrrp->wantstate == VRRP_STATE_MAST && !vrrp_sync_can_goto_master(vrrp)) vrrp->wantstate = VRRP_STATE_BACK; /* We can come up */ vrrp_state_leave_fault(vrrp); vrrp_init_instance_sands(vrrp); vrrp_thread_requeue_read(vrrp); vrrp->wantstate = wantstate; if (vrrp->sync) { if (vrrp->state == VRRP_STATE_MAST) vrrp_sync_master(vrrp); else vrrp_sync_backup(vrrp); } }
C
keepalived
0
CVE-2012-2875
https://www.cvedetails.com/cve/CVE-2012-2875/
null
https://github.com/chromium/chromium/commit/d345af9ed62ee5f431be327967f41c3cc3fe936a
d345af9ed62ee5f431be327967f41c3cc3fe936a
[BlackBerry] Adapt to new BlackBerry::Platform::TouchPoint API https://bugs.webkit.org/show_bug.cgi?id=105143 RIM PR 171941 Reviewed by Rob Buis. Internally reviewed by George Staikos. Source/WebCore: TouchPoint instances now provide document coordinates for the viewport and content position of the touch event. The pixel coordinates stored in the TouchPoint should no longer be needed in WebKit. Also adapt to new method names and encapsulation of TouchPoint data members. No change in behavior, no new tests. * platform/blackberry/PlatformTouchPointBlackBerry.cpp: (WebCore::PlatformTouchPoint::PlatformTouchPoint): Source/WebKit/blackberry: TouchPoint instances now provide document coordinates for the viewport and content position of the touch event. The pixel coordinates stored in the TouchPoint should no longer be needed in WebKit. One exception is when passing events to a full screen plugin. Also adapt to new method names and encapsulation of TouchPoint data members. * Api/WebPage.cpp: (BlackBerry::WebKit::WebPage::touchEvent): (BlackBerry::WebKit::WebPage::touchPointAsMouseEvent): (BlackBerry::WebKit::WebPagePrivate::dispatchTouchEventToFullScreenPlugin): (BlackBerry::WebKit::WebPagePrivate::dispatchTouchPointAsMouseEventToFullScreenPlugin): * WebKitSupport/InputHandler.cpp: (BlackBerry::WebKit::InputHandler::shouldRequestSpellCheckingOptionsForPoint): * WebKitSupport/InputHandler.h: (InputHandler): * WebKitSupport/TouchEventHandler.cpp: (BlackBerry::WebKit::TouchEventHandler::doFatFingers): (BlackBerry::WebKit::TouchEventHandler::handleTouchPoint): * WebKitSupport/TouchEventHandler.h: (TouchEventHandler): Tools: Adapt to new method names and encapsulation of TouchPoint data members. * DumpRenderTree/blackberry/EventSender.cpp: (addTouchPointCallback): (updateTouchPointCallback): (touchEndCallback): (releaseTouchPointCallback): (sendTouchEvent): git-svn-id: svn://svn.chromium.org/blink/trunk@137880 bbb929c8-8fbe-4397-9dbb-9b2b20218538
InRegionScroller* WebPage::inRegionScroller() const { return d->m_inRegionScroller.get(); }
InRegionScroller* WebPage::inRegionScroller() const { return d->m_inRegionScroller.get(); }
C
Chrome
0
CVE-2016-2476
https://www.cvedetails.com/cve/CVE-2016-2476/
CWE-119
https://android.googlesource.com/platform/frameworks/av/+/94d9e646454f6246bf823b6897bd6aea5f08eda3
94d9e646454f6246bf823b6897bd6aea5f08eda3
Fix initialization of AAC presentation struct Otherwise the new size checks trip on this. Bug: 27207275 Change-Id: I1f8f01097e3a88ff041b69279a6121be842f1766
status_t ACodec::setVideoFormatOnPort( OMX_U32 portIndex, int32_t width, int32_t height, OMX_VIDEO_CODINGTYPE compressionFormat, float frameRate) { OMX_PARAM_PORTDEFINITIONTYPE def; InitOMXParams(&def); def.nPortIndex = portIndex; OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video; status_t err = mOMX->getParameter( mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); if (err != OK) { return err; } if (portIndex == kPortIndexInput) { const size_t X = 64 * 1024; if (def.nBufferSize < X) { def.nBufferSize = X; } } if (def.eDomain != OMX_PortDomainVideo) { ALOGE("expected video port, got %s(%d)", asString(def.eDomain), def.eDomain); return FAILED_TRANSACTION; } video_def->nFrameWidth = width; video_def->nFrameHeight = height; if (portIndex == kPortIndexInput) { video_def->eCompressionFormat = compressionFormat; video_def->eColorFormat = OMX_COLOR_FormatUnused; if (frameRate >= 0) { video_def->xFramerate = (OMX_U32)(frameRate * 65536.0f); } } err = mOMX->setParameter( mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); return err; }
status_t ACodec::setVideoFormatOnPort( OMX_U32 portIndex, int32_t width, int32_t height, OMX_VIDEO_CODINGTYPE compressionFormat, float frameRate) { OMX_PARAM_PORTDEFINITIONTYPE def; InitOMXParams(&def); def.nPortIndex = portIndex; OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video; status_t err = mOMX->getParameter( mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); if (err != OK) { return err; } if (portIndex == kPortIndexInput) { const size_t X = 64 * 1024; if (def.nBufferSize < X) { def.nBufferSize = X; } } if (def.eDomain != OMX_PortDomainVideo) { ALOGE("expected video port, got %s(%d)", asString(def.eDomain), def.eDomain); return FAILED_TRANSACTION; } video_def->nFrameWidth = width; video_def->nFrameHeight = height; if (portIndex == kPortIndexInput) { video_def->eCompressionFormat = compressionFormat; video_def->eColorFormat = OMX_COLOR_FormatUnused; if (frameRate >= 0) { video_def->xFramerate = (OMX_U32)(frameRate * 65536.0f); } } err = mOMX->setParameter( mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); return err; }
C
Android
0
CVE-2014-1713
https://www.cvedetails.com/cve/CVE-2014-1713/
CWE-399
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
f85a87ec670ad0fce9d98d90c9a705b72a288154
document.location bindings fix BUG=352374 R=jochen@chromium.org Review URL: https://codereview.chromium.org/196343011 git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
static void variadicNodeMethodMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { if (UNLIKELY(info.Length() < 1)) { throwTypeError(ExceptionMessages::failedToExecute("variadicNodeMethod", "TestObject", ExceptionMessages::notEnoughArguments(1, info.Length())), info.GetIsolate()); return; } TestObject* imp = V8TestObject::toNative(info.Holder()); V8TRYCATCH_VOID(Node*, head, V8Node::toNativeWithTypeCheck(info.GetIsolate(), info[0])); Vector<RefPtr<Node> > tail; for (int i = 1; i < info.Length(); ++i) { if (!V8Node::hasInstance(info[i], info.GetIsolate())) { throwTypeError(ExceptionMessages::failedToExecute("variadicNodeMethod", "TestObject", "parameter 2 is not of type 'Node'."), info.GetIsolate()); return; } tail.append(V8Node::toNative(v8::Handle<v8::Object>::Cast(info[i]))); } imp->variadicNodeMethod(head, tail); }
static void variadicNodeMethodMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { if (UNLIKELY(info.Length() < 1)) { throwTypeError(ExceptionMessages::failedToExecute("variadicNodeMethod", "TestObject", ExceptionMessages::notEnoughArguments(1, info.Length())), info.GetIsolate()); return; } TestObject* imp = V8TestObject::toNative(info.Holder()); V8TRYCATCH_VOID(Node*, head, V8Node::toNativeWithTypeCheck(info.GetIsolate(), info[0])); Vector<RefPtr<Node> > tail; for (int i = 1; i < info.Length(); ++i) { if (!V8Node::hasInstance(info[i], info.GetIsolate())) { throwTypeError(ExceptionMessages::failedToExecute("variadicNodeMethod", "TestObject", "parameter 2 is not of type 'Node'."), info.GetIsolate()); return; } tail.append(V8Node::toNative(v8::Handle<v8::Object>::Cast(info[i]))); } imp->variadicNodeMethod(head, tail); }
C
Chrome
0
CVE-2013-4387
https://www.cvedetails.com/cve/CVE-2013-4387/
CWE-119
https://github.com/torvalds/linux/commit/2811ebac2521ceac84f2bdae402455baa6a7fb47
2811ebac2521ceac84f2bdae402455baa6a7fb47
ipv6: udp packets following an UFO enqueued packet need also be handled by UFO In the following scenario the socket is corked: If the first UDP packet is larger then the mtu we try to append it to the write queue via ip6_ufo_append_data. A following packet, which is smaller than the mtu would be appended to the already queued up gso-skb via plain ip6_append_data. This causes random memory corruptions. In ip6_ufo_append_data we also have to be careful to not queue up the same skb multiple times. So setup the gso frame only when no first skb is available. This also fixes a shortcoming where we add the current packet's length to cork->length but return early because of a packet > mtu with dontfrag set (instead of sutracting it again). Found with trinity. Cc: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Reported-by: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
static int ip6_finish_output2(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; struct neighbour *neigh; struct in6_addr *nexthop; int ret; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && ((mroute6_socket(dev_net(dev), skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); /* Do not check for IFF_ALLMULTI; multicast routing is not supported in any case. */ if (newskb) NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, newskb, NULL, newskb->dev, dev_loopback_xmit); if (ipv6_hdr(skb)->hop_limit == 0) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } } IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST, skb->len); if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <= IPV6_ADDR_SCOPE_NODELOCAL && !(dev->flags & IFF_LOOPBACK)) { kfree_skb(skb); return 0; } } rcu_read_lock_bh(); nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); if (!IS_ERR(neigh)) { ret = dst_neigh_output(dst, neigh, skb); rcu_read_unlock_bh(); return ret; } rcu_read_unlock_bh(); IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; }
static int ip6_finish_output2(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; struct neighbour *neigh; struct in6_addr *nexthop; int ret; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && ((mroute6_socket(dev_net(dev), skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); /* Do not check for IFF_ALLMULTI; multicast routing is not supported in any case. */ if (newskb) NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, newskb, NULL, newskb->dev, dev_loopback_xmit); if (ipv6_hdr(skb)->hop_limit == 0) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } } IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST, skb->len); if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <= IPV6_ADDR_SCOPE_NODELOCAL && !(dev->flags & IFF_LOOPBACK)) { kfree_skb(skb); return 0; } } rcu_read_lock_bh(); nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); if (!IS_ERR(neigh)) { ret = dst_neigh_output(dst, neigh, skb); rcu_read_unlock_bh(); return ret; } rcu_read_unlock_bh(); IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; }
C
linux
0
CVE-2017-18203
https://www.cvedetails.com/cve/CVE-2017-18203/
CWE-362
https://github.com/torvalds/linux/commit/b9a41d21dceadf8104812626ef85dc56ee8a60ed
b9a41d21dceadf8104812626ef85dc56ee8a60ed
dm: fix race between dm_get_from_kobject() and __dm_destroy() The following BUG_ON was hit when testing repeat creation and removal of DM devices: kernel BUG at drivers/md/dm.c:2919! CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44 Call Trace: [<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a [<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e [<ffffffff817b46d1>] ? mutex_lock+0x26/0x44 [<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf [<ffffffff811de257>] kernfs_seq_show+0x23/0x25 [<ffffffff81199118>] seq_read+0x16f/0x325 [<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f [<ffffffff8117b625>] __vfs_read+0x26/0x9d [<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44 [<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9 [<ffffffff8117be9d>] vfs_read+0x8f/0xcf [<ffffffff81193e34>] ? __fdget_pos+0x12/0x41 [<ffffffff8117c686>] SyS_read+0x4b/0x76 [<ffffffff817b606e>] system_call_fastpath+0x12/0x71 The bug can be easily triggered, if an extra delay (e.g. 10ms) is added between the test of DMF_FREEING & DMF_DELETING and dm_get() in dm_get_from_kobject(). To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and dm_get() are done in an atomic way, so _minor_lock is used. The other callers of dm_get() have also been checked to be OK: some callers invoke dm_get() under _minor_lock, some callers invoke it under _hash_lock, and dm_start_request() invoke it after increasing md->open_count. Cc: stable@vger.kernel.org Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; struct bio *bio = io->bio; int cpu; int rw = bio_data_dir(bio); io->start_time = jiffies; cpu = part_stat_lock(); part_round_stats(md->queue, cpu, &dm_disk(md)->part0); part_stat_unlock(); atomic_set(&dm_disk(md)->part0.in_flight[rw], atomic_inc_return(&md->pending[rw])); if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), false, 0, &io->stats_aux); }
static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; struct bio *bio = io->bio; int cpu; int rw = bio_data_dir(bio); io->start_time = jiffies; cpu = part_stat_lock(); part_round_stats(md->queue, cpu, &dm_disk(md)->part0); part_stat_unlock(); atomic_set(&dm_disk(md)->part0.in_flight[rw], atomic_inc_return(&md->pending[rw])); if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), false, 0, &io->stats_aux); }
C
linux
0
CVE-2017-0635
https://www.cvedetails.com/cve/CVE-2017-0635/
CWE-476
https://android.googlesource.com/platform/frameworks/av/+/523f6b49c1a2289161f40cf9fe80b92e592e9441
523f6b49c1a2289161f40cf9fe80b92e592e9441
Validate lengths in HEVC metadata parsing Add code to validate the size parameter passed to HecvParameterSets::addNalUnit(). Previously vulnerable to decrementing an unsigned past 0, yielding a huge result value. Bug: 35467107 Test: ran POC, no crash, emitted new "bad length" log entry Change-Id: Ia169b9edc1e0f7c5302e3c68aa90a54e8863d79e (cherry picked from commit e0dcf097cc029d056926029a29419e1650cbdf1b)
status_t HevcParameterSets::makeHvcc(uint8_t *hvcc, size_t *hvccSize, size_t nalSizeLength) { if (hvcc == NULL || hvccSize == NULL || (nalSizeLength != 4 && nalSizeLength != 2)) { return BAD_VALUE; } size_t size = 23; // 23 bytes in the header size_t numOfArrays = 0; const size_t numNalUnits = getNumNalUnits(); for (size_t i = 0; i < ARRAY_SIZE(kHevcNalUnitTypes); ++i) { uint8_t type = kHevcNalUnitTypes[i]; size_t numNalus = getNumNalUnitsOfType(type); if (numNalus == 0) { continue; } ++numOfArrays; size += 3; for (size_t j = 0; j < numNalUnits; ++j) { if (getType(j) != type) { continue; } size += 2 + getSize(j); } } uint8_t generalProfileSpace, generalTierFlag, generalProfileIdc; if (!findParam8(kGeneralProfileSpace, &generalProfileSpace) || !findParam8(kGeneralTierFlag, &generalTierFlag) || !findParam8(kGeneralProfileIdc, &generalProfileIdc)) { return ERROR_MALFORMED; } uint32_t compatibilityFlags; uint64_t constraintIdcFlags; if (!findParam32(kGeneralProfileCompatibilityFlags, &compatibilityFlags) || !findParam64(kGeneralConstraintIndicatorFlags, &constraintIdcFlags)) { return ERROR_MALFORMED; } uint8_t generalLevelIdc; if (!findParam8(kGeneralLevelIdc, &generalLevelIdc)) { return ERROR_MALFORMED; } uint8_t chromaFormatIdc, bitDepthLumaMinus8, bitDepthChromaMinus8; if (!findParam8(kChromaFormatIdc, &chromaFormatIdc) || !findParam8(kBitDepthLumaMinus8, &bitDepthLumaMinus8) || !findParam8(kBitDepthChromaMinus8, &bitDepthChromaMinus8)) { return ERROR_MALFORMED; } if (size > *hvccSize) { return NO_MEMORY; } *hvccSize = size; uint8_t *header = hvcc; header[0] = 1; header[1] = (kGeneralProfileSpace << 6) | (kGeneralTierFlag << 5) | kGeneralProfileIdc; header[2] = (compatibilityFlags >> 24) & 0xff; header[3] = (compatibilityFlags >> 16) & 0xff; header[4] = (compatibilityFlags >> 8) & 0xff; header[5] = compatibilityFlags & 0xff; header[6] = (constraintIdcFlags >> 40) & 0xff; header[7] = (constraintIdcFlags >> 32) & 0xff; header[8] = (constraintIdcFlags >> 24) & 0xff; header[9] = (constraintIdcFlags >> 16) & 0xff; header[10] = (constraintIdcFlags >> 8) & 0xff; header[11] = constraintIdcFlags & 0xff; header[12] = generalLevelIdc; header[13] = 0xf0; header[14] = 0; header[15] = 0xfc; header[16] = 0xfc | chromaFormatIdc; header[17] = 0xf8 | bitDepthLumaMinus8; header[18] = 0xf8 | bitDepthChromaMinus8; header[19] = 0; header[20] = 0; header[21] = nalSizeLength - 1; header[22] = numOfArrays; header += 23; for (size_t i = 0; i < ARRAY_SIZE(kHevcNalUnitTypes); ++i) { uint8_t type = kHevcNalUnitTypes[i]; size_t numNalus = getNumNalUnitsOfType(type); if (numNalus == 0) { continue; } header[0] = type; header[1] = (numNalus >> 8) & 0xff; header[2] = numNalus & 0xff; header += 3; for (size_t j = 0; j < numNalUnits; ++j) { if (getType(j) != type) { continue; } header[0] = (getSize(j) >> 8) & 0xff; header[1] = getSize(j) & 0xff; if (!write(j, header + 2, size - (header - (uint8_t *)hvcc))) { return NO_MEMORY; } header += (2 + getSize(j)); } } CHECK_EQ(header - size, hvcc); return OK; }
status_t HevcParameterSets::makeHvcc(uint8_t *hvcc, size_t *hvccSize, size_t nalSizeLength) { if (hvcc == NULL || hvccSize == NULL || (nalSizeLength != 4 && nalSizeLength != 2)) { return BAD_VALUE; } size_t size = 23; // 23 bytes in the header size_t numOfArrays = 0; const size_t numNalUnits = getNumNalUnits(); for (size_t i = 0; i < ARRAY_SIZE(kHevcNalUnitTypes); ++i) { uint8_t type = kHevcNalUnitTypes[i]; size_t numNalus = getNumNalUnitsOfType(type); if (numNalus == 0) { continue; } ++numOfArrays; size += 3; for (size_t j = 0; j < numNalUnits; ++j) { if (getType(j) != type) { continue; } size += 2 + getSize(j); } } uint8_t generalProfileSpace, generalTierFlag, generalProfileIdc; if (!findParam8(kGeneralProfileSpace, &generalProfileSpace) || !findParam8(kGeneralTierFlag, &generalTierFlag) || !findParam8(kGeneralProfileIdc, &generalProfileIdc)) { return ERROR_MALFORMED; } uint32_t compatibilityFlags; uint64_t constraintIdcFlags; if (!findParam32(kGeneralProfileCompatibilityFlags, &compatibilityFlags) || !findParam64(kGeneralConstraintIndicatorFlags, &constraintIdcFlags)) { return ERROR_MALFORMED; } uint8_t generalLevelIdc; if (!findParam8(kGeneralLevelIdc, &generalLevelIdc)) { return ERROR_MALFORMED; } uint8_t chromaFormatIdc, bitDepthLumaMinus8, bitDepthChromaMinus8; if (!findParam8(kChromaFormatIdc, &chromaFormatIdc) || !findParam8(kBitDepthLumaMinus8, &bitDepthLumaMinus8) || !findParam8(kBitDepthChromaMinus8, &bitDepthChromaMinus8)) { return ERROR_MALFORMED; } if (size > *hvccSize) { return NO_MEMORY; } *hvccSize = size; uint8_t *header = hvcc; header[0] = 1; header[1] = (kGeneralProfileSpace << 6) | (kGeneralTierFlag << 5) | kGeneralProfileIdc; header[2] = (compatibilityFlags >> 24) & 0xff; header[3] = (compatibilityFlags >> 16) & 0xff; header[4] = (compatibilityFlags >> 8) & 0xff; header[5] = compatibilityFlags & 0xff; header[6] = (constraintIdcFlags >> 40) & 0xff; header[7] = (constraintIdcFlags >> 32) & 0xff; header[8] = (constraintIdcFlags >> 24) & 0xff; header[9] = (constraintIdcFlags >> 16) & 0xff; header[10] = (constraintIdcFlags >> 8) & 0xff; header[11] = constraintIdcFlags & 0xff; header[12] = generalLevelIdc; header[13] = 0xf0; header[14] = 0; header[15] = 0xfc; header[16] = 0xfc | chromaFormatIdc; header[17] = 0xf8 | bitDepthLumaMinus8; header[18] = 0xf8 | bitDepthChromaMinus8; header[19] = 0; header[20] = 0; header[21] = nalSizeLength - 1; header[22] = numOfArrays; header += 23; for (size_t i = 0; i < ARRAY_SIZE(kHevcNalUnitTypes); ++i) { uint8_t type = kHevcNalUnitTypes[i]; size_t numNalus = getNumNalUnitsOfType(type); if (numNalus == 0) { continue; } header[0] = type; header[1] = (numNalus >> 8) & 0xff; header[2] = numNalus & 0xff; header += 3; for (size_t j = 0; j < numNalUnits; ++j) { if (getType(j) != type) { continue; } header[0] = (getSize(j) >> 8) & 0xff; header[1] = getSize(j) & 0xff; if (!write(j, header + 2, size - (header - (uint8_t *)hvcc))) { return NO_MEMORY; } header += (2 + getSize(j)); } } CHECK_EQ(header - size, hvcc); return OK; }
C
Android
0
CVE-2013-6644
https://www.cvedetails.com/cve/CVE-2013-6644/
null
https://github.com/chromium/chromium/commit/db93178bcaaf7e99ebb18bd51fa99b2feaf47e1f
db93178bcaaf7e99ebb18bd51fa99b2feaf47e1f
[Extensions] Add GetInstalledExtension() method to ExtensionRegistry This CL adds GetInstalledExtension() method to ExtensionRegistry and uses it instead of deprecated ExtensionService::GetInstalledExtension() in chrome/browser/ui/app_list/. Part of removing the deprecated GetInstalledExtension() call from the ExtensionService. BUG=489687 Review URL: https://codereview.chromium.org/1130353010 Cr-Commit-Position: refs/heads/master@{#333036}
AppListSyncableService::SyncItem* AppListSyncableService::FindOrAddSyncItem( AppListItem* app_item) { const std::string& item_id = app_item->id(); if (item_id.empty()) { LOG(ERROR) << "AppListItem item with empty ID"; return NULL; } SyncItem* sync_item = FindSyncItem(item_id); if (sync_item) { if (sync_item->item_type != sync_pb::AppListSpecifics::TYPE_REMOVE_DEFAULT_APP) { DVLOG(2) << this << ": AddItem already exists: " << sync_item->ToString(); return sync_item; } if (RemoveDefaultApp(app_item, sync_item)) return NULL; } return CreateSyncItemFromAppItem(app_item); }
AppListSyncableService::SyncItem* AppListSyncableService::FindOrAddSyncItem( AppListItem* app_item) { const std::string& item_id = app_item->id(); if (item_id.empty()) { LOG(ERROR) << "AppListItem item with empty ID"; return NULL; } SyncItem* sync_item = FindSyncItem(item_id); if (sync_item) { if (sync_item->item_type != sync_pb::AppListSpecifics::TYPE_REMOVE_DEFAULT_APP) { DVLOG(2) << this << ": AddItem already exists: " << sync_item->ToString(); return sync_item; } if (RemoveDefaultApp(app_item, sync_item)) return NULL; } return CreateSyncItemFromAppItem(app_item); }
C
Chrome
0
CVE-2014-3173
https://www.cvedetails.com/cve/CVE-2014-3173/
CWE-119
https://github.com/chromium/chromium/commit/ee7579229ff7e9e5ae28bf53aea069251499d7da
ee7579229ff7e9e5ae28bf53aea069251499d7da
Framebuffer clear() needs to consider the situation some draw buffers are disabled. This is when we expose DrawBuffers extension. BUG=376951 TEST=the attached test case, webgl conformance R=kbr@chromium.org,bajones@chromium.org Review URL: https://codereview.chromium.org/315283002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@275338 0039d316-1c4b-4281-b951-d872f2087c98
Renderbuffer* GetRenderbufferInfoForTarget( GLenum target) { Renderbuffer* renderbuffer = NULL; switch (target) { case GL_RENDERBUFFER: renderbuffer = state_.bound_renderbuffer.get(); break; default: NOTREACHED(); break; } return renderbuffer; }
Renderbuffer* GetRenderbufferInfoForTarget( GLenum target) { Renderbuffer* renderbuffer = NULL; switch (target) { case GL_RENDERBUFFER: renderbuffer = state_.bound_renderbuffer.get(); break; default: NOTREACHED(); break; } return renderbuffer; }
C
Chrome
0
CVE-2011-2349
https://www.cvedetails.com/cve/CVE-2011-2349/
CWE-399
https://github.com/chromium/chromium/commit/e755d9faf5c7d75a8ea290892cb1b5cc07c412ec
e755d9faf5c7d75a8ea290892cb1b5cc07c412ec
cros: The next 100 clang plugin errors. BUG=none TEST=none TBR=dpolukhin Review URL: http://codereview.chromium.org/7022008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85418 0039d316-1c4b-4281-b951-d872f2087c98
void PluginSelectionPolicy::StartInit() { BrowserThread::PostTask( BrowserThread::FILE, FROM_HERE, NewRunnableMethod(this, &chromeos::PluginSelectionPolicy::Init)); }
void PluginSelectionPolicy::StartInit() { BrowserThread::PostTask( BrowserThread::FILE, FROM_HERE, NewRunnableMethod(this, &chromeos::PluginSelectionPolicy::Init)); }
C
Chrome
0
CVE-2018-6085
https://www.cvedetails.com/cve/CVE-2018-6085/
CWE-20
https://github.com/chromium/chromium/commit/df5b1e1f88e013bc96107cc52c4a4f33a8238444
df5b1e1f88e013bc96107cc52c4a4f33a8238444
Blockfile cache: fix long-standing sparse + evict reentrancy problem Thanks to nedwilliamson@ (on gmail) for an alternative perspective plus a reduction to make fixing this much easier. Bug: 826626, 518908, 537063, 802886 Change-Id: Ibfa01416f9a8e7f7b361e4f93b4b6b134728b85f Reviewed-on: https://chromium-review.googlesource.com/985052 Reviewed-by: Matt Menke <mmenke@chromium.org> Commit-Queue: Maks Orlovich <morlovich@chromium.org> Cr-Commit-Position: refs/heads/master@{#547103}
BackendImpl::~BackendImpl() { if (user_flags_ & kNoRandom) { background_queue_.WaitForPendingIO(); } else { background_queue_.DropPendingIO(); } if (background_queue_.BackgroundIsCurrentSequence()) { CleanupCache(); } else { background_queue_.background_thread()->PostTask( FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this))); base::ThreadRestrictions::ScopedAllowWait allow_wait; done_.Wait(); } }
BackendImpl::~BackendImpl() { if (user_flags_ & kNoRandom) { background_queue_.WaitForPendingIO(); } else { background_queue_.DropPendingIO(); } if (background_queue_.BackgroundIsCurrentSequence()) { CleanupCache(); } else { background_queue_.background_thread()->PostTask( FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this))); base::ThreadRestrictions::ScopedAllowWait allow_wait; done_.Wait(); } }
C
Chrome
0
CVE-2011-2918
https://www.cvedetails.com/cve/CVE-2011-2918/
CWE-399
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
perf: Remove the nmi parameter from the swevent and overflow interface The nmi parameter indicated if we could do wakeups from the current context, if not, we would set some state and self-IPI and let the resulting interrupt do the wakeup. For the various event classes: - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from the PMI-tail (ARM etc.) - tracepoint: nmi=0; since tracepoint could be from NMI context. - software: nmi=[0,1]; some, like the schedule thing cannot perform wakeups, and hence need 0. As one can see, there is very little nmi=1 usage, and the down-side of not using it is that on some platforms some software events can have a jiffy delay in wakeup (when arch_irq_work_raise isn't implemented). The up-side however is that we can remove the nmi parameter and save a bunch of conditionals in fast paths. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Michael Cree <mcree@orcon.net.nz> Cc: Will Deacon <will.deacon@arm.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: Anton Blanchard <anton@samba.org> Cc: Eric B Munson <emunson@mgebm.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: David S. Miller <davem@davemloft.net> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jason Wessel <jason.wessel@windriver.com> Cc: Don Zickus <dzickus@redhat.com> Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
static void sched_domain_node_span(int node, struct cpumask *span) { nodemask_t used_nodes; int i; cpumask_clear(span); nodes_clear(used_nodes); cpumask_or(span, span, cpumask_of_node(node)); node_set(node, used_nodes); for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { int next_node = find_next_best_node(node, &used_nodes); if (next_node < 0) break; cpumask_or(span, span, cpumask_of_node(next_node)); } }
static void sched_domain_node_span(int node, struct cpumask *span) { nodemask_t used_nodes; int i; cpumask_clear(span); nodes_clear(used_nodes); cpumask_or(span, span, cpumask_of_node(node)); node_set(node, used_nodes); for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { int next_node = find_next_best_node(node, &used_nodes); if (next_node < 0) break; cpumask_or(span, span, cpumask_of_node(next_node)); } }
C
linux
0
CVE-2011-2799
https://www.cvedetails.com/cve/CVE-2011-2799/
CWE-399
https://github.com/chromium/chromium/commit/5a2de6455f565783c73e53eae2c8b953e7d48520
5a2de6455f565783c73e53eae2c8b953e7d48520
2011-06-02 Joone Hur <joone.hur@collabora.co.uk> Reviewed by Martin Robinson. [GTK] Only load dictionaries if spell check is enabled https://bugs.webkit.org/show_bug.cgi?id=32879 We don't need to call enchant if enable-spell-checking is false. * webkit/webkitwebview.cpp: (webkit_web_view_update_settings): Skip loading dictionaries when enable-spell-checking is false. (webkit_web_view_settings_notify): Ditto. git-svn-id: svn://svn.chromium.org/blink/trunk@87925 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void webViewEnterFullscreen(WebKitWebView* webView, Node* node) { if (!node->hasTagName(HTMLNames::videoTag)) return; #if ENABLE(VIDEO) HTMLMediaElement* videoElement = static_cast<HTMLMediaElement*>(node); WebKitWebViewPrivate* priv = webView->priv; if (priv->fullscreenVideoController) priv->fullscreenVideoController->exitFullscreen(); priv->fullscreenVideoController = new FullscreenVideoController; priv->fullscreenVideoController->setMediaElement(videoElement); priv->fullscreenVideoController->enterFullscreen(); #endif }
void webViewEnterFullscreen(WebKitWebView* webView, Node* node) { if (!node->hasTagName(HTMLNames::videoTag)) return; #if ENABLE(VIDEO) HTMLMediaElement* videoElement = static_cast<HTMLMediaElement*>(node); WebKitWebViewPrivate* priv = webView->priv; if (priv->fullscreenVideoController) priv->fullscreenVideoController->exitFullscreen(); priv->fullscreenVideoController = new FullscreenVideoController; priv->fullscreenVideoController->setMediaElement(videoElement); priv->fullscreenVideoController->enterFullscreen(); #endif }
C
Chrome
0
CVE-2011-0006
https://www.cvedetails.com/cve/CVE-2011-0006/
CWE-264
https://github.com/torvalds/linux/commit/867c20265459d30a01b021a9c1e81fb4c5832aa9
867c20265459d30a01b021a9c1e81fb4c5832aa9
ima: fix add LSM rule bug If security_filter_rule_init() doesn't return a rule, then not everything is as fine as the return code implies. This bug only occurs when the LSM (eg. SELinux) is disabled at runtime. Adding an empty LSM rule causes ima_match_rules() to always succeed, ignoring any remaining rules. default IMA TCB policy: # PROC_SUPER_MAGIC dont_measure fsmagic=0x9fa0 # SYSFS_MAGIC dont_measure fsmagic=0x62656572 # DEBUGFS_MAGIC dont_measure fsmagic=0x64626720 # TMPFS_MAGIC dont_measure fsmagic=0x01021994 # SECURITYFS_MAGIC dont_measure fsmagic=0x73636673 < LSM specific rule > dont_measure obj_type=var_log_t measure func=BPRM_CHECK measure func=FILE_MMAP mask=MAY_EXEC measure func=FILE_CHECK mask=MAY_READ uid=0 Thus without the patch, with the boot parameters 'tcb selinux=0', adding the above 'dont_measure obj_type=var_log_t' rule to the default IMA TCB measurement policy, would result in nothing being measured. The patch prevents the default TCB policy from being replaced. Signed-off-by: Mimi Zohar <zohar@us.ibm.com> Cc: James Morris <jmorris@namei.org> Acked-by: Serge Hallyn <serge.hallyn@canonical.com> Cc: David Safford <safford@watson.ibm.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
void __init ima_init_policy(void) { int i, entries; /* if !ima_use_tcb set entries = 0 so we load NO default rules */ if (ima_use_tcb) entries = ARRAY_SIZE(default_rules); else entries = 0; for (i = 0; i < entries; i++) list_add_tail(&default_rules[i].list, &measure_default_rules); ima_measure = &measure_default_rules; }
void __init ima_init_policy(void) { int i, entries; /* if !ima_use_tcb set entries = 0 so we load NO default rules */ if (ima_use_tcb) entries = ARRAY_SIZE(default_rules); else entries = 0; for (i = 0; i < entries; i++) list_add_tail(&default_rules[i].list, &measure_default_rules); ima_measure = &measure_default_rules; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/b7e899141194fa27d55a990e38ae8bdcc5183a90
b7e899141194fa27d55a990e38ae8bdcc5183a90
C++ readability change for cindylau. BUG=none TEST=none Review URL: http://codereview.chromium.org/2090008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@48733 0039d316-1c4b-4281-b951-d872f2087c98
void set_linked_profile(OtrTestingProfile* profile) { linked_profile_ = profile; }
void set_linked_profile(OtrTestingProfile* profile) { linked_profile_ = profile; }
C
Chrome
0
CVE-2017-16528
https://www.cvedetails.com/cve/CVE-2017-16528/
CWE-416
https://github.com/torvalds/linux/commit/fc27fe7e8deef2f37cba3f2be2d52b6ca5eb9d57
fc27fe7e8deef2f37cba3f2be2d52b6ca5eb9d57
ALSA: seq: Cancel pending autoload work at unbinding device ALSA sequencer core has a mechanism to load the enumerated devices automatically, and it's performed in an off-load work. This seems causing some race when a sequencer is removed while the pending autoload work is running. As syzkaller spotted, it may lead to some use-after-free: BUG: KASAN: use-after-free in snd_rawmidi_dev_seq_free+0x69/0x70 sound/core/rawmidi.c:1617 Write of size 8 at addr ffff88006c611d90 by task kworker/2:1/567 CPU: 2 PID: 567 Comm: kworker/2:1 Not tainted 4.13.0+ #29 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: events autoload_drivers Call Trace: __dump_stack lib/dump_stack.c:16 [inline] dump_stack+0x192/0x22c lib/dump_stack.c:52 print_address_description+0x78/0x280 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 [inline] kasan_report+0x230/0x340 mm/kasan/report.c:409 __asan_report_store8_noabort+0x1c/0x20 mm/kasan/report.c:435 snd_rawmidi_dev_seq_free+0x69/0x70 sound/core/rawmidi.c:1617 snd_seq_dev_release+0x4f/0x70 sound/core/seq_device.c:192 device_release+0x13f/0x210 drivers/base/core.c:814 kobject_cleanup lib/kobject.c:648 [inline] kobject_release lib/kobject.c:677 [inline] kref_put include/linux/kref.h:70 [inline] kobject_put+0x145/0x240 lib/kobject.c:694 put_device+0x25/0x30 drivers/base/core.c:1799 klist_devices_put+0x36/0x40 drivers/base/bus.c:827 klist_next+0x264/0x4a0 lib/klist.c:403 next_device drivers/base/bus.c:270 [inline] bus_for_each_dev+0x17e/0x210 drivers/base/bus.c:312 autoload_drivers+0x3b/0x50 sound/core/seq_device.c:117 process_one_work+0x9fb/0x1570 kernel/workqueue.c:2097 worker_thread+0x1e4/0x1350 kernel/workqueue.c:2231 kthread+0x324/0x3f0 kernel/kthread.c:231 ret_from_fork+0x25/0x30 arch/x86/entry/entry_64.S:425 The fix is simply to assure canceling the autoload work at removing the device. Reported-by: Andrey Konovalov <andreyknvl@google.com> Tested-by: Andrey Konovalov <andreyknvl@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Takashi Iwai <tiwai@suse.de>
static int snd_seq_bus_match(struct device *dev, struct device_driver *drv) { struct snd_seq_device *sdev = to_seq_dev(dev); struct snd_seq_driver *sdrv = to_seq_drv(drv); return strcmp(sdrv->id, sdev->id) == 0 && sdrv->argsize == sdev->argsize; }
static int snd_seq_bus_match(struct device *dev, struct device_driver *drv) { struct snd_seq_device *sdev = to_seq_dev(dev); struct snd_seq_driver *sdrv = to_seq_drv(drv); return strcmp(sdrv->id, sdev->id) == 0 && sdrv->argsize == sdev->argsize; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/df831400bcb63db4259b5858281b1727ba972a2a
df831400bcb63db4259b5858281b1727ba972a2a
WebKit2: Support window bounce when panning. https://bugs.webkit.org/show_bug.cgi?id=58065 <rdar://problem/9244367> Reviewed by Adam Roben. Make gestureDidScroll synchronous, as once we scroll, we need to know whether or not we are at the beginning or end of the scrollable document. If we are at either end of the scrollable document, we call the Windows 7 API to bounce the window to give an indication that you are past an end of the document. * UIProcess/WebPageProxy.cpp: (WebKit::WebPageProxy::gestureDidScroll): Pass a boolean for the reply, and return it. * UIProcess/WebPageProxy.h: * UIProcess/win/WebView.cpp: (WebKit::WebView::WebView): Inititalize a new variable. (WebKit::WebView::onGesture): Once we send the message to scroll, check if have gone to an end of the document, and if we have, bounce the window. * UIProcess/win/WebView.h: * WebProcess/WebPage/WebPage.h: * WebProcess/WebPage/WebPage.messages.in: GestureDidScroll is now sync. * WebProcess/WebPage/win/WebPageWin.cpp: (WebKit::WebPage::gestureDidScroll): When we are done scrolling, check if we have a vertical scrollbar and if we are at the beginning or the end of the scrollable document. git-svn-id: svn://svn.chromium.org/blink/trunk@83197 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void WebView::registerEditCommand(PassRefPtr<WebEditCommandProxy> prpCommand, WebPageProxy::UndoOrRedo undoOrRedo) { RefPtr<WebEditCommandProxy> command = prpCommand; m_undoClient.registerEditCommand(this, command, undoOrRedo); }
void WebView::registerEditCommand(PassRefPtr<WebEditCommandProxy> prpCommand, WebPageProxy::UndoOrRedo undoOrRedo) { RefPtr<WebEditCommandProxy> command = prpCommand; m_undoClient.registerEditCommand(this, command, undoOrRedo); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
DevTools: 'Overrides' UI overlay obstructs page and element inspector BUG=302862 R=vsevik@chromium.org Review URL: https://codereview.chromium.org/40233006 git-svn-id: svn://svn.chromium.org/blink/trunk@160559 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void InspectorPageAgent::updateViewMetrics(int width, int height, double deviceScaleFactor, bool fitWindow, bool textAutosizing) { m_client->overrideDeviceMetrics(width, height, static_cast<float>(deviceScaleFactor), fitWindow); Settings& settings = m_page->settings(); if (m_enabled && textAutosizing) { IntSize textAutosizingWindowSizeOverride = IntSize(width, height); textAutosizingWindowSizeOverride.scale((float)(1.0 / deviceScaleFactor)); settings.setTextAutosizingWindowSizeOverride(textAutosizingWindowSizeOverride); } Document* document = mainFrame()->document(); if (document) document->styleResolverChanged(RecalcStyleImmediately); InspectorInstrumentation::mediaQueryResultChanged(document); bool override = width && height; m_client->setShowFPSCounter(m_state->getBoolean(PageAgentState::pageAgentShowFPSCounter) && !override); m_client->setContinuousPaintingEnabled(m_state->getBoolean(PageAgentState::pageAgentContinuousPaintingEnabled) && !override); }
void InspectorPageAgent::updateViewMetrics(int width, int height, double deviceScaleFactor, bool fitWindow, bool textAutosizing) { m_client->overrideDeviceMetrics(width, height, static_cast<float>(deviceScaleFactor), fitWindow); Settings& settings = m_page->settings(); if (m_enabled && textAutosizing) { IntSize textAutosizingWindowSizeOverride = IntSize(width, height); textAutosizingWindowSizeOverride.scale((float)(1.0 / deviceScaleFactor)); settings.setTextAutosizingWindowSizeOverride(textAutosizingWindowSizeOverride); } Document* document = mainFrame()->document(); if (document) document->styleResolverChanged(RecalcStyleImmediately); InspectorInstrumentation::mediaQueryResultChanged(document); m_overlay->setOverride(InspectorOverlay::ViewportOverride, width && height); bool override = width && height; m_client->setShowFPSCounter(m_state->getBoolean(PageAgentState::pageAgentShowFPSCounter) && !override); m_client->setContinuousPaintingEnabled(m_state->getBoolean(PageAgentState::pageAgentContinuousPaintingEnabled) && !override); updateOverridesTopOffset(); }
C
Chrome
1
CVE-2011-2803
https://www.cvedetails.com/cve/CVE-2011-2803/
CWE-119
https://github.com/chromium/chromium/commit/48f2ec5c24570c9b96bb2798a9ffe956117c5066
48f2ec5c24570c9b96bb2798a9ffe956117c5066
Add OVERRIDE to ui::TreeModelObserver overridden methods. BUG=None TEST=None R=sky@chromium.org Review URL: http://codereview.chromium.org/7046093 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@88827 0039d316-1c4b-4281-b951-d872f2087c98
void TreeView::CreateItem(HTREEITEM parent_item, HTREEITEM after, TreeModelNode* node) { DCHECK(node); TVINSERTSTRUCT insert_struct = {0}; insert_struct.hParent = parent_item; insert_struct.hInsertAfter = after; insert_struct.itemex.mask = TVIF_PARAM | TVIF_CHILDREN | TVIF_TEXT | TVIF_SELECTEDIMAGE | TVIF_IMAGE; insert_struct.itemex.pszText = LPSTR_TEXTCALLBACK; insert_struct.itemex.cChildren = I_CHILDRENCALLBACK; int icon_index = model_->GetIconIndex(node); if (icon_index == -1) { insert_struct.itemex.iImage = 0; insert_struct.itemex.iSelectedImage = 1; } else { insert_struct.itemex.iImage = icon_index + 2; insert_struct.itemex.iSelectedImage = icon_index + 2; } int node_id = next_id_++; insert_struct.itemex.lParam = node_id; NodeDetails* node_details = new NodeDetails(node_id, node); DCHECK(node_to_details_map_.count(node) == 0); DCHECK(id_to_details_map_.count(node_id) == 0); node_to_details_map_[node] = node_details; id_to_details_map_[node_id] = node_details; node_details->tree_item = TreeView_InsertItem(tree_view_, &insert_struct); }
void TreeView::CreateItem(HTREEITEM parent_item, HTREEITEM after, TreeModelNode* node) { DCHECK(node); TVINSERTSTRUCT insert_struct = {0}; insert_struct.hParent = parent_item; insert_struct.hInsertAfter = after; insert_struct.itemex.mask = TVIF_PARAM | TVIF_CHILDREN | TVIF_TEXT | TVIF_SELECTEDIMAGE | TVIF_IMAGE; insert_struct.itemex.pszText = LPSTR_TEXTCALLBACK; insert_struct.itemex.cChildren = I_CHILDRENCALLBACK; int icon_index = model_->GetIconIndex(node); if (icon_index == -1) { insert_struct.itemex.iImage = 0; insert_struct.itemex.iSelectedImage = 1; } else { insert_struct.itemex.iImage = icon_index + 2; insert_struct.itemex.iSelectedImage = icon_index + 2; } int node_id = next_id_++; insert_struct.itemex.lParam = node_id; NodeDetails* node_details = new NodeDetails(node_id, node); DCHECK(node_to_details_map_.count(node) == 0); DCHECK(id_to_details_map_.count(node_id) == 0); node_to_details_map_[node] = node_details; id_to_details_map_[node_id] = node_details; node_details->tree_item = TreeView_InsertItem(tree_view_, &insert_struct); }
C
Chrome
0
CVE-2013-7421
https://www.cvedetails.com/cve/CVE-2013-7421/
CWE-264
https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
crypto: prefix module autoloading with "crypto-" This prefixes all crypto module loading with "crypto-" so we never run the risk of exposing module auto-loading to userspace via a crypto API, as demonstrated by Mathias Krause: https://lkml.org/lkml/2013/3/4/70 Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keysize) { struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); salsa20_keysetup(ctx, key, keysize); return 0; }
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keysize) { struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); salsa20_keysetup(ctx, key, keysize); return 0; }
C
linux
0
CVE-2017-9992
https://www.cvedetails.com/cve/CVE-2017-9992/
CWE-119
https://github.com/FFmpeg/FFmpeg/commit/f52fbf4f3ed02a7d872d8a102006f29b4421f360
f52fbf4f3ed02a7d872d8a102006f29b4421f360
avcodec/dfa: Fix off by 1 error Fixes out of array access Fixes: 1345/clusterfuzz-testcase-minimized-6062963045695488 Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
static int decode_wdlt(GetByteContext *gb, uint8_t *frame, int width, int height) { const uint8_t *frame_end = frame + width * height; uint8_t *line_ptr; int count, i, v, lines, segments; int y = 0; lines = bytestream2_get_le16(gb); if (lines > height) return AVERROR_INVALIDDATA; while (lines--) { if (bytestream2_get_bytes_left(gb) < 2) return AVERROR_INVALIDDATA; segments = bytestream2_get_le16u(gb); while ((segments & 0xC000) == 0xC000) { unsigned skip_lines = -(int16_t)segments; unsigned delta = -((int16_t)segments * width); if (frame_end - frame <= delta || y + lines + skip_lines > height) return AVERROR_INVALIDDATA; frame += delta; y += skip_lines; segments = bytestream2_get_le16(gb); } if (frame_end <= frame) return AVERROR_INVALIDDATA; if (segments & 0x8000) { frame[width - 1] = segments & 0xFF; segments = bytestream2_get_le16(gb); } line_ptr = frame; if (frame_end - frame < width) return AVERROR_INVALIDDATA; frame += width; y++; while (segments--) { if (frame - line_ptr <= bytestream2_peek_byte(gb)) return AVERROR_INVALIDDATA; line_ptr += bytestream2_get_byte(gb); count = (int8_t)bytestream2_get_byte(gb); if (count >= 0) { if (frame - line_ptr < count * 2) return AVERROR_INVALIDDATA; if (bytestream2_get_buffer(gb, line_ptr, count * 2) != count * 2) return AVERROR_INVALIDDATA; line_ptr += count * 2; } else { count = -count; if (frame - line_ptr < count * 2) return AVERROR_INVALIDDATA; v = bytestream2_get_le16(gb); for (i = 0; i < count; i++) bytestream_put_le16(&line_ptr, v); } } } return 0; }
static int decode_wdlt(GetByteContext *gb, uint8_t *frame, int width, int height) { const uint8_t *frame_end = frame + width * height; uint8_t *line_ptr; int count, i, v, lines, segments; int y = 0; lines = bytestream2_get_le16(gb); if (lines > height) return AVERROR_INVALIDDATA; while (lines--) { if (bytestream2_get_bytes_left(gb) < 2) return AVERROR_INVALIDDATA; segments = bytestream2_get_le16u(gb); while ((segments & 0xC000) == 0xC000) { unsigned skip_lines = -(int16_t)segments; unsigned delta = -((int16_t)segments * width); if (frame_end - frame <= delta || y + lines + skip_lines > height) return AVERROR_INVALIDDATA; frame += delta; y += skip_lines; segments = bytestream2_get_le16(gb); } if (frame_end <= frame) return AVERROR_INVALIDDATA; if (segments & 0x8000) { frame[width - 1] = segments & 0xFF; segments = bytestream2_get_le16(gb); } line_ptr = frame; if (frame_end - frame < width) return AVERROR_INVALIDDATA; frame += width; y++; while (segments--) { if (frame - line_ptr <= bytestream2_peek_byte(gb)) return AVERROR_INVALIDDATA; line_ptr += bytestream2_get_byte(gb); count = (int8_t)bytestream2_get_byte(gb); if (count >= 0) { if (frame - line_ptr < count * 2) return AVERROR_INVALIDDATA; if (bytestream2_get_buffer(gb, line_ptr, count * 2) != count * 2) return AVERROR_INVALIDDATA; line_ptr += count * 2; } else { count = -count; if (frame - line_ptr < count * 2) return AVERROR_INVALIDDATA; v = bytestream2_get_le16(gb); for (i = 0; i < count; i++) bytestream_put_le16(&line_ptr, v); } } } return 0; }
C
FFmpeg
0
CVE-2016-2543
https://www.cvedetails.com/cve/CVE-2016-2543/
null
https://github.com/torvalds/linux/commit/030e2c78d3a91dd0d27fef37e91950dde333eba1
030e2c78d3a91dd0d27fef37e91950dde333eba1
ALSA: seq: Fix missing NULL check at remove_events ioctl snd_seq_ioctl_remove_events() calls snd_seq_fifo_clear() unconditionally even if there is no FIFO assigned, and this leads to an Oops due to NULL dereference. The fix is just to add a proper NULL check. Reported-by: Dmitry Vyukov <dvyukov@google.com> Tested-by: Dmitry Vyukov <dvyukov@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Takashi Iwai <tiwai@suse.de>
int snd_seq_delete_kernel_client(int client) { struct snd_seq_client *ptr; if (snd_BUG_ON(in_interrupt())) return -EBUSY; ptr = clientptr(client); if (ptr == NULL) return -EINVAL; seq_free_client(ptr); kfree(ptr); return 0; }
int snd_seq_delete_kernel_client(int client) { struct snd_seq_client *ptr; if (snd_BUG_ON(in_interrupt())) return -EBUSY; ptr = clientptr(client); if (ptr == NULL) return -EINVAL; seq_free_client(ptr); kfree(ptr); return 0; }
C
linux
0
CVE-2009-3605
https://www.cvedetails.com/cve/CVE-2009-3605/
CWE-189
https://cgit.freedesktop.org/poppler/poppler/commit/?id=7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
null
GBool GfxShadingBitBuf::getBits(int n, Guint *val) { int x; if (nBits >= n) { x = (bitBuf >> (nBits - n)) & ((1 << n) - 1); nBits -= n; } else { x = 0; if (nBits > 0) { x = bitBuf & ((1 << nBits) - 1); n -= nBits; nBits = 0; } while (n > 0) { if ((bitBuf = str->getChar()) == EOF) { nBits = 0; return gFalse; } if (n >= 8) { x = (x << 8) | bitBuf; n -= 8; } else { x = (x << n) | (bitBuf >> (8 - n)); nBits = 8 - n; n = 0; } } } *val = x; return gTrue; }
GBool GfxShadingBitBuf::getBits(int n, Guint *val) { int x; if (nBits >= n) { x = (bitBuf >> (nBits - n)) & ((1 << n) - 1); nBits -= n; } else { x = 0; if (nBits > 0) { x = bitBuf & ((1 << nBits) - 1); n -= nBits; nBits = 0; } while (n > 0) { if ((bitBuf = str->getChar()) == EOF) { nBits = 0; return gFalse; } if (n >= 8) { x = (x << 8) | bitBuf; n -= 8; } else { x = (x << n) | (bitBuf >> (8 - n)); nBits = 8 - n; n = 0; } } } *val = x; return gTrue; }
CPP
poppler
0
CVE-2015-6763
https://www.cvedetails.com/cve/CVE-2015-6763/
null
https://github.com/chromium/chromium/commit/f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
MacViews: Enable secure text input for password Textfields. In Cocoa the NSTextInputContext automatically enables secure text input when activated and it's in the secure text entry mode. RenderWidgetHostViewMac did the similar thing for ages following the WebKit example. views::Textfield needs to do the same thing in a fashion that's sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions are possible when the Textfield gets focus, activates the secure text input mode and the RWHVM loses focus immediately afterwards and disables the secure text input instead of leaving it in the enabled state. BUG=818133,677220 Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b Reviewed-on: https://chromium-review.googlesource.com/943064 Commit-Queue: Michail Pishchagin <mblsha@yandex-team.ru> Reviewed-by: Pavel Feldman <pfeldman@chromium.org> Reviewed-by: Avi Drissman <avi@chromium.org> Reviewed-by: Peter Kasting <pkasting@chromium.org> Cr-Commit-Position: refs/heads/master@{#542517}
unsigned HTMLInputElement::selectionEndForBinding( bool& is_null, ExceptionState& exception_state) const { if (!input_type_->SupportsSelectionAPI()) { is_null = true; return 0; } return TextControlElement::selectionEnd(); }
unsigned HTMLInputElement::selectionEndForBinding( bool& is_null, ExceptionState& exception_state) const { if (!input_type_->SupportsSelectionAPI()) { is_null = true; return 0; } return TextControlElement::selectionEnd(); }
C
Chrome
0
CVE-2016-9840
https://www.cvedetails.com/cve/CVE-2016-9840/
CWE-189
https://github.com/madler/zlib/commit/6a043145ca6e9c55184013841a67b2fef87e44c0
6a043145ca6e9c55184013841a67b2fef87e44c0
Remove offset pointer optimization in inftrees.c. inftrees.c was subtracting an offset from a pointer to an array, in order to provide a pointer that allowed indexing starting at the offset. This is not compliant with the C standard, for which the behavior of a pointer decremented before its allocated memory is undefined. Per the recommendation of a security audit of the zlib code by Trail of Bits and TrustInSoft, in support of the Mozilla Foundation, this tiny optimization was removed, in order to avoid the possibility of undefined behavior.
int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work) codetype type; unsigned short FAR *lens; unsigned codes; code FAR * FAR *table; unsigned FAR *bits; unsigned short FAR *work; { unsigned len; /* a code's length in bits */ unsigned sym; /* index of code symbols */ unsigned min, max; /* minimum and maximum code lengths */ unsigned root; /* number of index bits for root table */ unsigned curr; /* number of index bits for current table */ unsigned drop; /* code bits to drop for sub-table */ int left; /* number of prefix codes available */ unsigned used; /* code entries in table used */ unsigned huff; /* Huffman code */ unsigned incr; /* for incrementing code, index */ unsigned fill; /* index for replicating entries */ unsigned low; /* low bits for current root entry */ unsigned mask; /* mask for low root bits */ code here; /* table entry for duplication */ code FAR *next; /* next available space in table */ const unsigned short FAR *base; /* base value table to use */ const unsigned short FAR *extra; /* extra bits table to use */ unsigned match; /* use base and extra for symbol >= match */ unsigned short count[MAXBITS+1]; /* number of codes of each length */ unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ static const unsigned short lbase[31] = { /* Length codes 257..285 base */ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const unsigned short lext[31] = { /* Length codes 257..285 extra */ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 203, 198}; static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const unsigned short dext[32] = { /* Distance codes 0..29 extra */ 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 64, 64}; /* Process a set of code lengths to create a canonical Huffman code. The code lengths are lens[0..codes-1]. Each length corresponds to the symbols 0..codes-1. The Huffman code is generated by first sorting the symbols by length from short to long, and retaining the symbol order for codes with equal lengths. Then the code starts with all zero bits for the first code of the shortest length, and the codes are integer increments for the same length, and zeros are appended as the length increases. For the deflate format, these bits are stored backwards from their more natural integer increment ordering, and so when the decoding tables are built in the large loop below, the integer codes are incremented backwards. This routine assumes, but does not check, that all of the entries in lens[] are in the range 0..MAXBITS. The caller must assure this. 1..MAXBITS is interpreted as that code length. zero means that that symbol does not occur in this code. The codes are sorted by computing a count of codes for each length, creating from that a table of starting indices for each length in the sorted table, and then entering the symbols in order in the sorted table. The sorted table is work[], with that space being provided by the caller. The length counts are used for other purposes as well, i.e. finding the minimum and maximum length codes, determining if there are any codes at all, checking for a valid set of lengths, and looking ahead at length counts to determine sub-table sizes when building the decoding tables. */ /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ for (len = 0; len <= MAXBITS; len++) count[len] = 0; for (sym = 0; sym < codes; sym++) count[lens[sym]]++; /* bound code lengths, force root to be within code lengths */ root = *bits; for (max = MAXBITS; max >= 1; max--) if (count[max] != 0) break; if (root > max) root = max; if (max == 0) { /* no symbols to code at all */ here.op = (unsigned char)64; /* invalid code marker */ here.bits = (unsigned char)1; here.val = (unsigned short)0; *(*table)++ = here; /* make a table to force an error */ *(*table)++ = here; *bits = 1; return 0; /* no symbols, but wait for decoding to report error */ } for (min = 1; min < max; min++) if (count[min] != 0) break; if (root < min) root = min; /* check for an over-subscribed or incomplete set of lengths */ left = 1; for (len = 1; len <= MAXBITS; len++) { left <<= 1; left -= count[len]; if (left < 0) return -1; /* over-subscribed */ } if (left > 0 && (type == CODES || max != 1)) return -1; /* incomplete set */ /* generate offsets into symbol table for each length for sorting */ offs[1] = 0; for (len = 1; len < MAXBITS; len++) offs[len + 1] = offs[len] + count[len]; /* sort symbols by length, by symbol order within each length */ for (sym = 0; sym < codes; sym++) if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym; /* Create and fill in decoding tables. In this loop, the table being filled is at next and has curr index bits. The code being used is huff with length len. That code is converted to an index by dropping drop bits off of the bottom. For codes where len is less than drop + curr, those top drop + curr - len bits are incremented through all values to fill the table with replicated entries. root is the number of index bits for the root table. When len exceeds root, sub-tables are created pointed to by the root entry with an index of the low root bits of huff. This is saved in low to check for when a new sub-table should be started. drop is zero when the root table is being filled, and drop is root when sub-tables are being filled. When a new sub-table is needed, it is necessary to look ahead in the code lengths to determine what size sub-table is needed. The length counts are used for this, and so count[] is decremented as codes are entered in the tables. used keeps track of how many table entries have been allocated from the provided *table space. It is checked for LENS and DIST tables against the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in the initial root table size constants. See the comments in inftrees.h for more information. sym increments through all symbols, and the loop terminates when all codes of length max, i.e. all codes, have been processed. This routine permits incomplete codes, so another loop after this one fills in the rest of the decoding tables with invalid code markers. */ /* set up for code type */ switch (type) { case CODES: base = extra = work; /* dummy value--not used */ match = 20; break; case LENS: base = lbase; extra = lext; match = 257; break; default: /* DISTS */ base = dbase; extra = dext; match = 0; } /* initialize state for loop */ huff = 0; /* starting code */ sym = 0; /* starting code symbol */ len = min; /* starting code length */ next = *table; /* current table to fill in */ curr = root; /* current table index bits */ drop = 0; /* current bits to drop from code for index */ low = (unsigned)(-1); /* trigger new sub-table when len > root */ used = 1U << root; /* use root table entries */ mask = used - 1; /* mask for comparing low */ /* check available table space */ if ((type == LENS && used > ENOUGH_LENS) || (type == DISTS && used > ENOUGH_DISTS)) return 1; /* process all codes and make table entries */ for (;;) { /* create table entry */ here.bits = (unsigned char)(len - drop); if (work[sym] + 1 < match) { here.op = (unsigned char)0; here.val = work[sym]; } else if (work[sym] >= match) { here.op = (unsigned char)(extra[work[sym] - match]); here.val = base[work[sym] - match]; } else { here.op = (unsigned char)(32 + 64); /* end of block */ here.val = 0; } /* replicate for those indices with low len bits equal to huff */ incr = 1U << (len - drop); fill = 1U << curr; min = fill; /* save offset to next table */ do { fill -= incr; next[(huff >> drop) + fill] = here; } while (fill != 0); /* backwards increment the len-bit code huff */ incr = 1U << (len - 1); while (huff & incr) incr >>= 1; if (incr != 0) { huff &= incr - 1; huff += incr; } else huff = 0; /* go to next symbol, update count, len */ sym++; if (--(count[len]) == 0) { if (len == max) break; len = lens[work[sym]]; } /* create new sub-table if needed */ if (len > root && (huff & mask) != low) { /* if first time, transition to sub-tables */ if (drop == 0) drop = root; /* increment past last table */ next += min; /* here min is 1 << curr */ /* determine length of next table */ curr = len - drop; left = (int)(1 << curr); while (curr + drop < max) { left -= count[curr + drop]; if (left <= 0) break; curr++; left <<= 1; } /* check for enough space */ used += 1U << curr; if ((type == LENS && used > ENOUGH_LENS) || (type == DISTS && used > ENOUGH_DISTS)) return 1; /* point entry in root table to sub-table */ low = huff & mask; (*table)[low].op = (unsigned char)curr; (*table)[low].bits = (unsigned char)root; (*table)[low].val = (unsigned short)(next - *table); } } /* fill in remaining table entry if code is incomplete (guaranteed to have at most one remaining entry, since if the code is incomplete, the maximum code length that was allowed to get this far is one bit) */ if (huff != 0) { here.op = (unsigned char)64; /* invalid code marker */ here.bits = (unsigned char)(len - drop); here.val = (unsigned short)0; next[huff] = here; } /* set return parameters */ *table += used; *bits = root; return 0; }
int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work) codetype type; unsigned short FAR *lens; unsigned codes; code FAR * FAR *table; unsigned FAR *bits; unsigned short FAR *work; { unsigned len; /* a code's length in bits */ unsigned sym; /* index of code symbols */ unsigned min, max; /* minimum and maximum code lengths */ unsigned root; /* number of index bits for root table */ unsigned curr; /* number of index bits for current table */ unsigned drop; /* code bits to drop for sub-table */ int left; /* number of prefix codes available */ unsigned used; /* code entries in table used */ unsigned huff; /* Huffman code */ unsigned incr; /* for incrementing code, index */ unsigned fill; /* index for replicating entries */ unsigned low; /* low bits for current root entry */ unsigned mask; /* mask for low root bits */ code here; /* table entry for duplication */ code FAR *next; /* next available space in table */ const unsigned short FAR *base; /* base value table to use */ const unsigned short FAR *extra; /* extra bits table to use */ int end; /* use base and extra for symbol > end */ unsigned short count[MAXBITS+1]; /* number of codes of each length */ unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ static const unsigned short lbase[31] = { /* Length codes 257..285 base */ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const unsigned short lext[31] = { /* Length codes 257..285 extra */ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 203, 198}; static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const unsigned short dext[32] = { /* Distance codes 0..29 extra */ 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 64, 64}; /* Process a set of code lengths to create a canonical Huffman code. The code lengths are lens[0..codes-1]. Each length corresponds to the symbols 0..codes-1. The Huffman code is generated by first sorting the symbols by length from short to long, and retaining the symbol order for codes with equal lengths. Then the code starts with all zero bits for the first code of the shortest length, and the codes are integer increments for the same length, and zeros are appended as the length increases. For the deflate format, these bits are stored backwards from their more natural integer increment ordering, and so when the decoding tables are built in the large loop below, the integer codes are incremented backwards. This routine assumes, but does not check, that all of the entries in lens[] are in the range 0..MAXBITS. The caller must assure this. 1..MAXBITS is interpreted as that code length. zero means that that symbol does not occur in this code. The codes are sorted by computing a count of codes for each length, creating from that a table of starting indices for each length in the sorted table, and then entering the symbols in order in the sorted table. The sorted table is work[], with that space being provided by the caller. The length counts are used for other purposes as well, i.e. finding the minimum and maximum length codes, determining if there are any codes at all, checking for a valid set of lengths, and looking ahead at length counts to determine sub-table sizes when building the decoding tables. */ /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ for (len = 0; len <= MAXBITS; len++) count[len] = 0; for (sym = 0; sym < codes; sym++) count[lens[sym]]++; /* bound code lengths, force root to be within code lengths */ root = *bits; for (max = MAXBITS; max >= 1; max--) if (count[max] != 0) break; if (root > max) root = max; if (max == 0) { /* no symbols to code at all */ here.op = (unsigned char)64; /* invalid code marker */ here.bits = (unsigned char)1; here.val = (unsigned short)0; *(*table)++ = here; /* make a table to force an error */ *(*table)++ = here; *bits = 1; return 0; /* no symbols, but wait for decoding to report error */ } for (min = 1; min < max; min++) if (count[min] != 0) break; if (root < min) root = min; /* check for an over-subscribed or incomplete set of lengths */ left = 1; for (len = 1; len <= MAXBITS; len++) { left <<= 1; left -= count[len]; if (left < 0) return -1; /* over-subscribed */ } if (left > 0 && (type == CODES || max != 1)) return -1; /* incomplete set */ /* generate offsets into symbol table for each length for sorting */ offs[1] = 0; for (len = 1; len < MAXBITS; len++) offs[len + 1] = offs[len] + count[len]; /* sort symbols by length, by symbol order within each length */ for (sym = 0; sym < codes; sym++) if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym; /* Create and fill in decoding tables. In this loop, the table being filled is at next and has curr index bits. The code being used is huff with length len. That code is converted to an index by dropping drop bits off of the bottom. For codes where len is less than drop + curr, those top drop + curr - len bits are incremented through all values to fill the table with replicated entries. root is the number of index bits for the root table. When len exceeds root, sub-tables are created pointed to by the root entry with an index of the low root bits of huff. This is saved in low to check for when a new sub-table should be started. drop is zero when the root table is being filled, and drop is root when sub-tables are being filled. When a new sub-table is needed, it is necessary to look ahead in the code lengths to determine what size sub-table is needed. The length counts are used for this, and so count[] is decremented as codes are entered in the tables. used keeps track of how many table entries have been allocated from the provided *table space. It is checked for LENS and DIST tables against the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in the initial root table size constants. See the comments in inftrees.h for more information. sym increments through all symbols, and the loop terminates when all codes of length max, i.e. all codes, have been processed. This routine permits incomplete codes, so another loop after this one fills in the rest of the decoding tables with invalid code markers. */ /* set up for code type */ switch (type) { case CODES: base = extra = work; /* dummy value--not used */ end = 19; break; case LENS: base = lbase; base -= 257; extra = lext; extra -= 257; end = 256; break; default: /* DISTS */ base = dbase; extra = dext; end = -1; } /* initialize state for loop */ huff = 0; /* starting code */ sym = 0; /* starting code symbol */ len = min; /* starting code length */ next = *table; /* current table to fill in */ curr = root; /* current table index bits */ drop = 0; /* current bits to drop from code for index */ low = (unsigned)(-1); /* trigger new sub-table when len > root */ used = 1U << root; /* use root table entries */ mask = used - 1; /* mask for comparing low */ /* check available table space */ if ((type == LENS && used > ENOUGH_LENS) || (type == DISTS && used > ENOUGH_DISTS)) return 1; /* process all codes and make table entries */ for (;;) { /* create table entry */ here.bits = (unsigned char)(len - drop); if ((int)(work[sym]) < end) { here.op = (unsigned char)0; here.val = work[sym]; } else if ((int)(work[sym]) > end) { here.op = (unsigned char)(extra[work[sym]]); here.val = base[work[sym]]; } else { here.op = (unsigned char)(32 + 64); /* end of block */ here.val = 0; } /* replicate for those indices with low len bits equal to huff */ incr = 1U << (len - drop); fill = 1U << curr; min = fill; /* save offset to next table */ do { fill -= incr; next[(huff >> drop) + fill] = here; } while (fill != 0); /* backwards increment the len-bit code huff */ incr = 1U << (len - 1); while (huff & incr) incr >>= 1; if (incr != 0) { huff &= incr - 1; huff += incr; } else huff = 0; /* go to next symbol, update count, len */ sym++; if (--(count[len]) == 0) { if (len == max) break; len = lens[work[sym]]; } /* create new sub-table if needed */ if (len > root && (huff & mask) != low) { /* if first time, transition to sub-tables */ if (drop == 0) drop = root; /* increment past last table */ next += min; /* here min is 1 << curr */ /* determine length of next table */ curr = len - drop; left = (int)(1 << curr); while (curr + drop < max) { left -= count[curr + drop]; if (left <= 0) break; curr++; left <<= 1; } /* check for enough space */ used += 1U << curr; if ((type == LENS && used > ENOUGH_LENS) || (type == DISTS && used > ENOUGH_DISTS)) return 1; /* point entry in root table to sub-table */ low = huff & mask; (*table)[low].op = (unsigned char)curr; (*table)[low].bits = (unsigned char)root; (*table)[low].val = (unsigned short)(next - *table); } } /* fill in remaining table entry if code is incomplete (guaranteed to have at most one remaining entry, since if the code is incomplete, the maximum code length that was allowed to get this far is one bit) */ if (huff != 0) { here.op = (unsigned char)64; /* invalid code marker */ here.bits = (unsigned char)(len - drop); here.val = (unsigned short)0; next[huff] = here; } /* set return parameters */ *table += used; *bits = root; return 0; }
C
zlib
1
CVE-2018-20839
https://www.cvedetails.com/cve/CVE-2018-20839/
CWE-255
https://github.com/systemd/systemd/commit/9725f1a10f80f5e0ae7d9b60547458622aeb322f
9725f1a10f80f5e0ae7d9b60547458622aeb322f
Merge pull request #12378 from rbalint/vt-kbd-reset-check VT kbd reset check
int acquire_terminal( const char *name, AcquireTerminalFlags flags, usec_t timeout) { _cleanup_close_ int notify = -1, fd = -1; usec_t ts = USEC_INFINITY; int r, wd = -1; assert(name); assert(IN_SET(flags & ~ACQUIRE_TERMINAL_PERMISSIVE, ACQUIRE_TERMINAL_TRY, ACQUIRE_TERMINAL_FORCE, ACQUIRE_TERMINAL_WAIT)); /* We use inotify to be notified when the tty is closed. We create the watch before checking if we can actually * acquire it, so that we don't lose any event. * * Note: strictly speaking this actually watches for the device being closed, it does *not* really watch * whether a tty loses its controlling process. However, unless some rogue process uses TIOCNOTTY on /dev/tty * *after* closing its tty otherwise this will not become a problem. As long as the administrator makes sure to * not configure any service on the same tty as an untrusted user this should not be a problem. (Which they * probably should not do anyway.) */ if ((flags & ~ACQUIRE_TERMINAL_PERMISSIVE) == ACQUIRE_TERMINAL_WAIT) { notify = inotify_init1(IN_CLOEXEC | (timeout != USEC_INFINITY ? IN_NONBLOCK : 0)); if (notify < 0) return -errno; wd = inotify_add_watch(notify, name, IN_CLOSE); if (wd < 0) return -errno; if (timeout != USEC_INFINITY) ts = now(CLOCK_MONOTONIC); } for (;;) { struct sigaction sa_old, sa_new = { .sa_handler = SIG_IGN, .sa_flags = SA_RESTART, }; if (notify >= 0) { r = flush_fd(notify); if (r < 0) return r; } /* We pass here O_NOCTTY only so that we can check the return value TIOCSCTTY and have a reliable way * to figure out if we successfully became the controlling process of the tty */ fd = open_terminal(name, O_RDWR|O_NOCTTY|O_CLOEXEC); if (fd < 0) return fd; /* Temporarily ignore SIGHUP, so that we don't get SIGHUP'ed if we already own the tty. */ assert_se(sigaction(SIGHUP, &sa_new, &sa_old) == 0); /* First, try to get the tty */ r = ioctl(fd, TIOCSCTTY, (flags & ~ACQUIRE_TERMINAL_PERMISSIVE) == ACQUIRE_TERMINAL_FORCE) < 0 ? -errno : 0; /* Reset signal handler to old value */ assert_se(sigaction(SIGHUP, &sa_old, NULL) == 0); /* Success? Exit the loop now! */ if (r >= 0) break; /* Any failure besides -EPERM? Fail, regardless of the mode. */ if (r != -EPERM) return r; if (flags & ACQUIRE_TERMINAL_PERMISSIVE) /* If we are in permissive mode, then EPERM is fine, turn this * into a success. Note that EPERM is also returned if we * already are the owner of the TTY. */ break; if (flags != ACQUIRE_TERMINAL_WAIT) /* If we are in TRY or FORCE mode, then propagate EPERM as EPERM */ return r; assert(notify >= 0); assert(wd >= 0); for (;;) { union inotify_event_buffer buffer; struct inotify_event *e; ssize_t l; if (timeout != USEC_INFINITY) { usec_t n; assert(ts != USEC_INFINITY); n = now(CLOCK_MONOTONIC); if (ts + timeout < n) return -ETIMEDOUT; r = fd_wait_for_event(notify, POLLIN, ts + timeout - n); if (r < 0) return r; if (r == 0) return -ETIMEDOUT; } l = read(notify, &buffer, sizeof(buffer)); if (l < 0) { if (IN_SET(errno, EINTR, EAGAIN)) continue; return -errno; } FOREACH_INOTIFY_EVENT(e, buffer, l) { if (e->mask & IN_Q_OVERFLOW) /* If we hit an inotify queue overflow, simply check if the terminal is up for grabs now. */ break; if (e->wd != wd || !(e->mask & IN_CLOSE)) /* Safety checks */ return -EIO; } break; } /* We close the tty fd here since if the old session ended our handle will be dead. It's important that * we do this after sleeping, so that we don't enter an endless loop. */ fd = safe_close(fd); } return TAKE_FD(fd); }
int acquire_terminal( const char *name, AcquireTerminalFlags flags, usec_t timeout) { _cleanup_close_ int notify = -1, fd = -1; usec_t ts = USEC_INFINITY; int r, wd = -1; assert(name); assert(IN_SET(flags & ~ACQUIRE_TERMINAL_PERMISSIVE, ACQUIRE_TERMINAL_TRY, ACQUIRE_TERMINAL_FORCE, ACQUIRE_TERMINAL_WAIT)); /* We use inotify to be notified when the tty is closed. We create the watch before checking if we can actually * acquire it, so that we don't lose any event. * * Note: strictly speaking this actually watches for the device being closed, it does *not* really watch * whether a tty loses its controlling process. However, unless some rogue process uses TIOCNOTTY on /dev/tty * *after* closing its tty otherwise this will not become a problem. As long as the administrator makes sure to * not configure any service on the same tty as an untrusted user this should not be a problem. (Which they * probably should not do anyway.) */ if ((flags & ~ACQUIRE_TERMINAL_PERMISSIVE) == ACQUIRE_TERMINAL_WAIT) { notify = inotify_init1(IN_CLOEXEC | (timeout != USEC_INFINITY ? IN_NONBLOCK : 0)); if (notify < 0) return -errno; wd = inotify_add_watch(notify, name, IN_CLOSE); if (wd < 0) return -errno; if (timeout != USEC_INFINITY) ts = now(CLOCK_MONOTONIC); } for (;;) { struct sigaction sa_old, sa_new = { .sa_handler = SIG_IGN, .sa_flags = SA_RESTART, }; if (notify >= 0) { r = flush_fd(notify); if (r < 0) return r; } /* We pass here O_NOCTTY only so that we can check the return value TIOCSCTTY and have a reliable way * to figure out if we successfully became the controlling process of the tty */ fd = open_terminal(name, O_RDWR|O_NOCTTY|O_CLOEXEC); if (fd < 0) return fd; /* Temporarily ignore SIGHUP, so that we don't get SIGHUP'ed if we already own the tty. */ assert_se(sigaction(SIGHUP, &sa_new, &sa_old) == 0); /* First, try to get the tty */ r = ioctl(fd, TIOCSCTTY, (flags & ~ACQUIRE_TERMINAL_PERMISSIVE) == ACQUIRE_TERMINAL_FORCE) < 0 ? -errno : 0; /* Reset signal handler to old value */ assert_se(sigaction(SIGHUP, &sa_old, NULL) == 0); /* Success? Exit the loop now! */ if (r >= 0) break; /* Any failure besides -EPERM? Fail, regardless of the mode. */ if (r != -EPERM) return r; if (flags & ACQUIRE_TERMINAL_PERMISSIVE) /* If we are in permissive mode, then EPERM is fine, turn this * into a success. Note that EPERM is also returned if we * already are the owner of the TTY. */ break; if (flags != ACQUIRE_TERMINAL_WAIT) /* If we are in TRY or FORCE mode, then propagate EPERM as EPERM */ return r; assert(notify >= 0); assert(wd >= 0); for (;;) { union inotify_event_buffer buffer; struct inotify_event *e; ssize_t l; if (timeout != USEC_INFINITY) { usec_t n; assert(ts != USEC_INFINITY); n = now(CLOCK_MONOTONIC); if (ts + timeout < n) return -ETIMEDOUT; r = fd_wait_for_event(notify, POLLIN, ts + timeout - n); if (r < 0) return r; if (r == 0) return -ETIMEDOUT; } l = read(notify, &buffer, sizeof(buffer)); if (l < 0) { if (IN_SET(errno, EINTR, EAGAIN)) continue; return -errno; } FOREACH_INOTIFY_EVENT(e, buffer, l) { if (e->mask & IN_Q_OVERFLOW) /* If we hit an inotify queue overflow, simply check if the terminal is up for grabs now. */ break; if (e->wd != wd || !(e->mask & IN_CLOSE)) /* Safety checks */ return -EIO; } break; } /* We close the tty fd here since if the old session ended our handle will be dead. It's important that * we do this after sleeping, so that we don't enter an endless loop. */ fd = safe_close(fd); } return TAKE_FD(fd); }
C
systemd
0
CVE-2017-18203
https://www.cvedetails.com/cve/CVE-2017-18203/
CWE-362
https://github.com/torvalds/linux/commit/b9a41d21dceadf8104812626ef85dc56ee8a60ed
b9a41d21dceadf8104812626ef85dc56ee8a60ed
dm: fix race between dm_get_from_kobject() and __dm_destroy() The following BUG_ON was hit when testing repeat creation and removal of DM devices: kernel BUG at drivers/md/dm.c:2919! CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44 Call Trace: [<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a [<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e [<ffffffff817b46d1>] ? mutex_lock+0x26/0x44 [<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf [<ffffffff811de257>] kernfs_seq_show+0x23/0x25 [<ffffffff81199118>] seq_read+0x16f/0x325 [<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f [<ffffffff8117b625>] __vfs_read+0x26/0x9d [<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44 [<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9 [<ffffffff8117be9d>] vfs_read+0x8f/0xcf [<ffffffff81193e34>] ? __fdget_pos+0x12/0x41 [<ffffffff8117c686>] SyS_read+0x4b/0x76 [<ffffffff817b606e>] system_call_fastpath+0x12/0x71 The bug can be easily triggered, if an extra delay (e.g. 10ms) is added between the test of DMF_FREEING & DMF_DELETING and dm_get() in dm_get_from_kobject(). To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and dm_get() are done in an atomic way, so _minor_lock is used. The other callers of dm_get() have also been checked to be OK: some callers invoke dm_get() under _minor_lock, some callers invoke it under _hash_lock, and dm_start_request() invoke it after increasing md->open_count. Cc: stable@vger.kernel.org Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
int dm_create(int minor, struct mapped_device **result) { struct mapped_device *md; md = alloc_dev(minor); if (!md) return -ENXIO; dm_sysfs_init(md); *result = md; return 0; }
int dm_create(int minor, struct mapped_device **result) { struct mapped_device *md; md = alloc_dev(minor); if (!md) return -ENXIO; dm_sysfs_init(md); *result = md; return 0; }
C
linux
0
CVE-2015-8955
https://www.cvedetails.com/cve/CVE-2015-8955/
CWE-264
https://github.com/torvalds/linux/commit/8fff105e13041e49b82f92eef034f363a6b1c071
8fff105e13041e49b82f92eef034f363a6b1c071
arm64: perf: reject groups spanning multiple HW PMUs The perf core implicitly rejects events spanning multiple HW PMUs, as in these cases the event->ctx will differ. However this validation is performed after pmu::event_init() is called in perf_init_event(), and thus pmu::event_init() may be called with a group leader from a different HW PMU. The ARM64 PMU driver does not take this fact into account, and when validating groups assumes that it can call to_arm_pmu(event->pmu) for any HW event. When the event in question is from another HW PMU this is wrong, and results in dereferencing garbage. This patch updates the ARM64 PMU driver to first test for and reject events from other PMUs, moving the to_arm_pmu and related logic after this test. Fixes a crash triggered by perf_fuzzer on Linux-4.0-rc2, with a CCI PMU present: Bad mode in Synchronous Abort handler detected, code 0x86000006 -- IABT (current EL) CPU: 0 PID: 1371 Comm: perf_fuzzer Not tainted 3.19.0+ #249 Hardware name: V2F-1XV7 Cortex-A53x2 SMM (DT) task: ffffffc07c73a280 ti: ffffffc07b0a0000 task.ti: ffffffc07b0a0000 PC is at 0x0 LR is at validate_event+0x90/0xa8 pc : [<0000000000000000>] lr : [<ffffffc000090228>] pstate: 00000145 sp : ffffffc07b0a3ba0 [< (null)>] (null) [<ffffffc0000907d8>] armpmu_event_init+0x174/0x3cc [<ffffffc00015d870>] perf_try_init_event+0x34/0x70 [<ffffffc000164094>] perf_init_event+0xe0/0x10c [<ffffffc000164348>] perf_event_alloc+0x288/0x358 [<ffffffc000164c5c>] SyS_perf_event_open+0x464/0x98c Code: bad PC value Also cleans up the code to use the arm_pmu only when we know that we are dealing with an arm pmu event. Cc: Will Deacon <will.deacon@arm.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Peter Ziljstra (Intel) <peterz@infradead.org> Signed-off-by: Suzuki K. Poulose <suzuki.poulose@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
armpmu_enable_percpu_irq(void *data) { unsigned int irq = *(unsigned int *)data; enable_percpu_irq(irq, IRQ_TYPE_NONE); }
armpmu_enable_percpu_irq(void *data) { unsigned int irq = *(unsigned int *)data; enable_percpu_irq(irq, IRQ_TYPE_NONE); }
C
linux
0
CVE-2016-1641
https://www.cvedetails.com/cve/CVE-2016-1641/
null
https://github.com/chromium/chromium/commit/75ca8ffd7bd7c58ace1144df05e1307d8d707662
75ca8ffd7bd7c58ace1144df05e1307d8d707662
Don't call WebContents::DownloadImage() callback if the WebContents were deleted BUG=583718 Review URL: https://codereview.chromium.org/1685343004 Cr-Commit-Position: refs/heads/master@{#375700}
void WebContentsImpl::CopyToFindPboard() { #if defined(OS_MACOSX) RenderFrameHost* focused_frame = GetFocusedFrame(); if (!focused_frame) return; focused_frame->Send( new InputMsg_CopyToFindPboard(focused_frame->GetRoutingID())); RecordAction(base::UserMetricsAction("CopyToFindPboard")); #endif }
void WebContentsImpl::CopyToFindPboard() { #if defined(OS_MACOSX) RenderFrameHost* focused_frame = GetFocusedFrame(); if (!focused_frame) return; focused_frame->Send( new InputMsg_CopyToFindPboard(focused_frame->GetRoutingID())); RecordAction(base::UserMetricsAction("CopyToFindPboard")); #endif }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/a44b00c88bc5ea35b5b150217c5fd6e4ce168e58
a44b00c88bc5ea35b5b150217c5fd6e4ce168e58
Apply behaviour change fix from upstream for previous XPath change. BUG=58731 TEST=NONE Review URL: http://codereview.chromium.org/4027006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@63572 0039d316-1c4b-4281-b951-d872f2087c98
xmlXPathCompFunctionCall(xmlXPathParserContextPtr ctxt) { xmlChar *name; xmlChar *prefix; int nbargs = 0; int sort = 1; name = xmlXPathParseQName(ctxt, &prefix); if (name == NULL) { xmlFree(prefix); XP_ERROR(XPATH_EXPR_ERROR); } SKIP_BLANKS; #ifdef DEBUG_EXPR if (prefix == NULL) xmlGenericError(xmlGenericErrorContext, "Calling function %s\n", name); else xmlGenericError(xmlGenericErrorContext, "Calling function %s:%s\n", prefix, name); #endif if (CUR != '(') { XP_ERROR(XPATH_EXPR_ERROR); } NEXT; SKIP_BLANKS; /* * Optimization for count(): we don't need the node-set to be sorted. */ if ((prefix == NULL) && (name[0] == 'c') && xmlStrEqual(name, BAD_CAST "count")) { sort = 0; } ctxt->comp->last = -1; if (CUR != ')') { while (CUR != 0) { int op1 = ctxt->comp->last; ctxt->comp->last = -1; xmlXPathCompileExpr(ctxt, sort); if (ctxt->error != XPATH_EXPRESSION_OK) { xmlFree(name); xmlFree(prefix); return; } PUSH_BINARY_EXPR(XPATH_OP_ARG, op1, ctxt->comp->last, 0, 0); nbargs++; if (CUR == ')') break; if (CUR != ',') { XP_ERROR(XPATH_EXPR_ERROR); } NEXT; SKIP_BLANKS; } } PUSH_LONG_EXPR(XPATH_OP_FUNCTION, nbargs, 0, 0, name, prefix); NEXT; SKIP_BLANKS; }
xmlXPathCompFunctionCall(xmlXPathParserContextPtr ctxt) { xmlChar *name; xmlChar *prefix; int nbargs = 0; int sort = 1; name = xmlXPathParseQName(ctxt, &prefix); if (name == NULL) { xmlFree(prefix); XP_ERROR(XPATH_EXPR_ERROR); } SKIP_BLANKS; #ifdef DEBUG_EXPR if (prefix == NULL) xmlGenericError(xmlGenericErrorContext, "Calling function %s\n", name); else xmlGenericError(xmlGenericErrorContext, "Calling function %s:%s\n", prefix, name); #endif if (CUR != '(') { XP_ERROR(XPATH_EXPR_ERROR); } NEXT; SKIP_BLANKS; /* * Optimization for count(): we don't need the node-set to be sorted. */ if ((prefix == NULL) && (name[0] == 'c') && xmlStrEqual(name, BAD_CAST "count")) { sort = 0; } ctxt->comp->last = -1; if (CUR != ')') { while (CUR != 0) { int op1 = ctxt->comp->last; ctxt->comp->last = -1; xmlXPathCompileExpr(ctxt, sort); if (ctxt->error != XPATH_EXPRESSION_OK) { xmlFree(name); xmlFree(prefix); return; } PUSH_BINARY_EXPR(XPATH_OP_ARG, op1, ctxt->comp->last, 0, 0); nbargs++; if (CUR == ')') break; if (CUR != ',') { XP_ERROR(XPATH_EXPR_ERROR); } NEXT; SKIP_BLANKS; } } PUSH_LONG_EXPR(XPATH_OP_FUNCTION, nbargs, 0, 0, name, prefix); NEXT; SKIP_BLANKS; }
C
Chrome
0
CVE-2018-10021
https://www.cvedetails.com/cve/CVE-2018-10021/
null
https://github.com/torvalds/linux/commit/318aaf34f1179b39fa9c30fa0f3288b645beee39
318aaf34f1179b39fa9c30fa0f3288b645beee39
scsi: libsas: defer ata device eh commands to libata When ata device doing EH, some commands still attached with tasks are not passed to libata when abort failed or recover failed, so libata did not handle these commands. After these commands done, sas task is freed, but ata qc is not freed. This will cause ata qc leak and trigger a warning like below: WARNING: CPU: 0 PID: 28512 at drivers/ata/libata-eh.c:4037 ata_eh_finish+0xb4/0xcc CPU: 0 PID: 28512 Comm: kworker/u32:2 Tainted: G W OE 4.14.0#1 ...... Call trace: [<ffff0000088b7bd0>] ata_eh_finish+0xb4/0xcc [<ffff0000088b8420>] ata_do_eh+0xc4/0xd8 [<ffff0000088b8478>] ata_std_error_handler+0x44/0x8c [<ffff0000088b8068>] ata_scsi_port_error_handler+0x480/0x694 [<ffff000008875fc4>] async_sas_ata_eh+0x4c/0x80 [<ffff0000080f6be8>] async_run_entry_fn+0x4c/0x170 [<ffff0000080ebd70>] process_one_work+0x144/0x390 [<ffff0000080ec100>] worker_thread+0x144/0x418 [<ffff0000080f2c98>] kthread+0x10c/0x138 [<ffff0000080855dc>] ret_from_fork+0x10/0x18 If ata qc leaked too many, ata tag allocation will fail and io blocked for ever. As suggested by Dan Williams, defer ata device commands to libata and merge sas_eh_finish_cmd() with sas_eh_defer_cmd(). libata will handle ata qcs correctly after this. Signed-off-by: Jason Yan <yanaijie@huawei.com> CC: Xiaofei Tan <tanxiaofei@huawei.com> CC: John Garry <john.garry@huawei.com> CC: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
static void sas_scsi_task_done(struct sas_task *task) { struct scsi_cmnd *sc = task->uldd_task; struct domain_device *dev = task->dev; struct sas_ha_struct *ha = dev->port->ha; unsigned long flags; spin_lock_irqsave(&dev->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) task = NULL; else ASSIGN_SAS_TASK(sc, NULL); spin_unlock_irqrestore(&dev->done_lock, flags); if (unlikely(!task)) { /* task will be completed by the error handler */ SAS_DPRINTK("task done but aborted\n"); return; } if (unlikely(!sc)) { SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); sas_free_task(task); return; } sas_end_task(sc, task); sc->scsi_done(sc); }
static void sas_scsi_task_done(struct sas_task *task) { struct scsi_cmnd *sc = task->uldd_task; struct domain_device *dev = task->dev; struct sas_ha_struct *ha = dev->port->ha; unsigned long flags; spin_lock_irqsave(&dev->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) task = NULL; else ASSIGN_SAS_TASK(sc, NULL); spin_unlock_irqrestore(&dev->done_lock, flags); if (unlikely(!task)) { /* task will be completed by the error handler */ SAS_DPRINTK("task done but aborted\n"); return; } if (unlikely(!sc)) { SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); sas_free_task(task); return; } sas_end_task(sc, task); sc->scsi_done(sc); }
C
linux
0
CVE-2015-1335
https://www.cvedetails.com/cve/CVE-2015-1335/
CWE-59
https://github.com/lxc/lxc/commit/592fd47a6245508b79fe6ac819fe6d3b2c1289be
592fd47a6245508b79fe6ac819fe6d3b2c1289be
CVE-2015-1335: Protect container mounts against symlinks When a container starts up, lxc sets up the container's inital fstree by doing a bunch of mounting, guided by the container configuration file. The container config is owned by the admin or user on the host, so we do not try to guard against bad entries. However, since the mount target is in the container, it's possible that the container admin could divert the mount with symbolic links. This could bypass proper container startup (i.e. confinement of a root-owned container by the restrictive apparmor policy, by diverting the required write to /proc/self/attr/current), or bypass the (path-based) apparmor policy by diverting, say, /proc to /mnt in the container. To prevent this, 1. do not allow mounts to paths containing symbolic links 2. do not allow bind mounts from relative paths containing symbolic links. Details: Define safe_mount which ensures that the container has not inserted any symbolic links into any mount targets for mounts to be done during container setup. The host's mount path may contain symbolic links. As it is under the control of the administrator, that's ok. So safe_mount begins the check for symbolic links after the rootfs->mount, by opening that directory. It opens each directory along the path using openat() relative to the parent directory using O_NOFOLLOW. When the target is reached, it mounts onto /proc/self/fd/<targetfd>. Use safe_mount() in mount_entry(), when mounting container proc, and when needed. In particular, safe_mount() need not be used in any case where: 1. the mount is done in the container's namespace 2. the mount is for the container's rootfs 3. the mount is relative to a tmpfs or proc/sysfs which we have just safe_mount()ed ourselves Since we were using proc/net as a temporary placeholder for /proc/sys/net during container startup, and proc/net is a symbolic link, use proc/tty instead. Update the lxc.container.conf manpage with details about the new restrictions. Finally, add a testcase to test some symbolic link possibilities. Reported-by: Roman Fiedler Signed-off-by: Serge Hallyn <serge.hallyn@ubuntu.com> Acked-by: Stéphane Graber <stgraber@ubuntu.com>
static void lxc_cgroup_hierarchy_free(struct cgroup_hierarchy *h) { if (!h) return; lxc_free_array((void **)h->subsystems, free); free(h->all_mount_points); free(h); }
static void lxc_cgroup_hierarchy_free(struct cgroup_hierarchy *h) { if (!h) return; lxc_free_array((void **)h->subsystems, free); free(h->all_mount_points); free(h); }
C
lxc
0
CVE-2014-0069
https://www.cvedetails.com/cve/CVE-2014-0069/
CWE-119
https://github.com/torvalds/linux/commit/5d81de8e8667da7135d3a32a964087c0faf5483f
5d81de8e8667da7135d3a32a964087c0faf5483f
cifs: ensure that uncached writes handle unmapped areas correctly It's possible for userland to pass down an iovec via writev() that has a bogus user pointer in it. If that happens and we're doing an uncached write, then we can end up getting less bytes than we expect from the call to iov_iter_copy_from_user. This is CVE-2014-0069 cifs_iovec_write isn't set up to handle that situation however. It'll blindly keep chugging through the page array and not filling those pages with anything useful. Worse yet, we'll later end up with a negative number in wdata->tailsz, which will confuse the sending routines and cause an oops at the very least. Fix this by having the copy phase of cifs_iovec_write stop copying data in this situation and send the last write as a short one. At the same time, we want to avoid sending a zero-length write to the server, so break out of the loop and set rc to -EFAULT if that happens. This also allows us to handle the case where no address in the iovec is valid. [Note: Marking this for stable on v3.4+ kernels, but kernels as old as v2.6.38 may have a similar problem and may need similar fix] Cc: <stable@vger.kernel.org> # v3.4+ Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru> Reported-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Steve French <smfrench@gmail.com>
cifs_uncached_read_into_pages(struct TCP_Server_Info *server, struct cifs_readdata *rdata, unsigned int len) { int total_read = 0, result = 0; unsigned int i; unsigned int nr_pages = rdata->nr_pages; struct kvec iov; rdata->tailsz = PAGE_SIZE; for (i = 0; i < nr_pages; i++) { struct page *page = rdata->pages[i]; if (len >= PAGE_SIZE) { /* enough data to fill the page */ iov.iov_base = kmap(page); iov.iov_len = PAGE_SIZE; cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n", i, iov.iov_base, iov.iov_len); len -= PAGE_SIZE; } else if (len > 0) { /* enough for partial page, fill and zero the rest */ iov.iov_base = kmap(page); iov.iov_len = len; cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n", i, iov.iov_base, iov.iov_len); memset(iov.iov_base + len, '\0', PAGE_SIZE - len); rdata->tailsz = len; len = 0; } else { /* no need to hold page hostage */ rdata->pages[i] = NULL; rdata->nr_pages--; put_page(page); continue; } result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len); kunmap(page); if (result < 0) break; total_read += result; } return total_read > 0 ? total_read : result; }
cifs_uncached_read_into_pages(struct TCP_Server_Info *server, struct cifs_readdata *rdata, unsigned int len) { int total_read = 0, result = 0; unsigned int i; unsigned int nr_pages = rdata->nr_pages; struct kvec iov; rdata->tailsz = PAGE_SIZE; for (i = 0; i < nr_pages; i++) { struct page *page = rdata->pages[i]; if (len >= PAGE_SIZE) { /* enough data to fill the page */ iov.iov_base = kmap(page); iov.iov_len = PAGE_SIZE; cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n", i, iov.iov_base, iov.iov_len); len -= PAGE_SIZE; } else if (len > 0) { /* enough for partial page, fill and zero the rest */ iov.iov_base = kmap(page); iov.iov_len = len; cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n", i, iov.iov_base, iov.iov_len); memset(iov.iov_base + len, '\0', PAGE_SIZE - len); rdata->tailsz = len; len = 0; } else { /* no need to hold page hostage */ rdata->pages[i] = NULL; rdata->nr_pages--; put_page(page); continue; } result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len); kunmap(page); if (result < 0) break; total_read += result; } return total_read > 0 ? total_read : result; }
C
linux
0
CVE-2013-6051
https://www.cvedetails.com/cve/CVE-2013-6051/
null
https://git.savannah.gnu.org/gitweb/?p=quagga.git;a=commitdiff;h=8794e8d229dc9fe29ea31424883433d4880ef408
8794e8d229dc9fe29ea31424883433d4880ef408
null
attrhash_init (void) { attrhash = hash_create (attrhash_key_make, attrhash_cmp); }
attrhash_init (void) { attrhash = hash_create (attrhash_key_make, attrhash_cmp); }
C
savannah
0
CVE-2014-1713
https://www.cvedetails.com/cve/CVE-2014-1713/
CWE-399
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
f85a87ec670ad0fce9d98d90c9a705b72a288154
document.location bindings fix BUG=352374 R=jochen@chromium.org Review URL: https://codereview.chromium.org/196343011 git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
static void objMethodWithArgsMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { ExceptionState exceptionState(ExceptionState::ExecutionContext, "objMethodWithArgs", "TestObject", info.Holder(), info.GetIsolate()); if (UNLIKELY(info.Length() < 3)) { exceptionState.throwTypeError(ExceptionMessages::notEnoughArguments(3, info.Length())); exceptionState.throwIfNeeded(); return; } TestObject* imp = V8TestObject::toNative(info.Holder()); V8TRYCATCH_EXCEPTION_VOID(int, longArg, toInt32(info[0], exceptionState), exceptionState); V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, strArg, info[1]); V8TRYCATCH_VOID(TestObject*, objArg, V8TestObject::toNativeWithTypeCheck(info.GetIsolate(), info[2])); v8SetReturnValue(info, imp->objMethodWithArgs(longArg, strArg, objArg)); }
static void objMethodWithArgsMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { ExceptionState exceptionState(ExceptionState::ExecutionContext, "objMethodWithArgs", "TestObject", info.Holder(), info.GetIsolate()); if (UNLIKELY(info.Length() < 3)) { exceptionState.throwTypeError(ExceptionMessages::notEnoughArguments(3, info.Length())); exceptionState.throwIfNeeded(); return; } TestObject* imp = V8TestObject::toNative(info.Holder()); V8TRYCATCH_EXCEPTION_VOID(int, longArg, toInt32(info[0], exceptionState), exceptionState); V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, strArg, info[1]); V8TRYCATCH_VOID(TestObject*, objArg, V8TestObject::toNativeWithTypeCheck(info.GetIsolate(), info[2])); v8SetReturnValue(info, imp->objMethodWithArgs(longArg, strArg, objArg)); }
C
Chrome
0
CVE-2018-6079
https://www.cvedetails.com/cve/CVE-2018-6079/
CWE-200
https://github.com/chromium/chromium/commit/d128139d53e9268e87921e82d89b3f2053cb83fd
d128139d53e9268e87921e82d89b3f2053cb83fd
Fix tabs sharing TEXTURE_2D_ARRAY/TEXTURE_3D data. In linux and android, we are seeing an issue where texture data from one tab overwrites the texture data of another tab. This is happening for apps which are using webgl2 texture of type TEXTURE_2D_ARRAY/TEXTURE_3D. Due to a bug in virtual context save/restore code for above texture formats, the texture data is not properly restored while switching tabs. Hence texture data from one tab overwrites other. This CL has fix for that issue, an update for existing test expectations and a new unit test for this bug. Bug: 788448 Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel Change-Id: Ie933984cdd2d1381f42eb4638f730c8245207a28 Reviewed-on: https://chromium-review.googlesource.com/930327 Reviewed-by: Zhenyao Mo <zmo@chromium.org> Commit-Queue: vikas soni <vikassoni@chromium.org> Cr-Commit-Position: refs/heads/master@{#539111}
GLuint GetBufferId(const Buffer* buffer) { if (buffer) return buffer->service_id(); return 0; }
GLuint GetBufferId(const Buffer* buffer) { if (buffer) return buffer->service_id(); return 0; }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/6a13a6c2fbae0b3269743e6a141fdfe0d9ec9793
6a13a6c2fbae0b3269743e6a141fdfe0d9ec9793
Don't delete the current NavigationEntry when leaving an interstitial page. BUG=107182 TEST=See bug Review URL: http://codereview.chromium.org/8976014 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115189 0039d316-1c4b-4281-b951-d872f2087c98
FakeMalwareDetails(SafeBrowsingService* sb_service, TabContents* tab_contents, const SafeBrowsingService::UnsafeResource& unsafe_resource) : MalwareDetails(sb_service, tab_contents, unsafe_resource) { }
FakeMalwareDetails(SafeBrowsingService* sb_service, TabContents* tab_contents, const SafeBrowsingService::UnsafeResource& unsafe_resource) : MalwareDetails(sb_service, tab_contents, unsafe_resource) { }
C
Chrome
0
CVE-2016-5218
https://www.cvedetails.com/cve/CVE-2016-5218/
CWE-20
https://github.com/chromium/chromium/commit/45d901b56f578a74b19ba0d10fa5c4c467f19303
45d901b56f578a74b19ba0d10fa5c4c467f19303
Paint tab groups with the group color. * The background of TabGroupHeader now uses the group color. * The backgrounds of tabs in the group are tinted with the group color. This treatment, along with the colors chosen, are intended to be a placeholder. Bug: 905491 Change-Id: Ic808548f8eba23064606e7fb8c9bba281d0d117f Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1610504 Commit-Queue: Bret Sepulveda <bsep@chromium.org> Reviewed-by: Taylor Bergquist <tbergquist@chromium.org> Cr-Commit-Position: refs/heads/master@{#660498}
Tab* tab() { return tab_; }
Tab* tab() { return tab_; }
C
Chrome
0
CVE-2017-11328
https://www.cvedetails.com/cve/CVE-2017-11328/
CWE-119
https://github.com/VirusTotal/yara/commit/4a342f01e5439b9bb901aff1c6c23c536baeeb3f
4a342f01e5439b9bb901aff1c6c23c536baeeb3f
Fix heap overflow (reported by Jurriaan Bremer) When setting a new array item with yr_object_array_set_item() the array size is doubled if the index for the new item is larger than the already allocated ones. No further checks were made to ensure that the index fits into the array after doubling its capacity. If the array capacity was for example 64, and a new object is assigned to an index larger than 128 the overflow occurs. As yr_object_array_set_item() is usually invoked with indexes that increase monotonically by one, this bug never triggered before. But the new "dotnet" module has the potential to allow the exploitation of this bug by scanning a specially crafted .NET binary.
YR_OBJECT* yr_object_array_get_item( YR_OBJECT* object, int flags, int index) { YR_OBJECT* result = NULL; YR_OBJECT_ARRAY* array; assert(object->type == OBJECT_TYPE_ARRAY); if (index < 0) return NULL; array = object_as_array(object); if (array->items != NULL && array->items->count > index) result = array->items->objects[index]; if (result == NULL && flags & OBJECT_CREATE) { yr_object_copy(array->prototype_item, &result); if (result != NULL) yr_object_array_set_item(object, result, index); } return result; }
YR_OBJECT* yr_object_array_get_item( YR_OBJECT* object, int flags, int index) { YR_OBJECT* result = NULL; YR_OBJECT_ARRAY* array; assert(object->type == OBJECT_TYPE_ARRAY); if (index < 0) return NULL; array = object_as_array(object); if (array->items != NULL && array->items->count > index) result = array->items->objects[index]; if (result == NULL && flags & OBJECT_CREATE) { yr_object_copy(array->prototype_item, &result); if (result != NULL) yr_object_array_set_item(object, result, index); } return result; }
C
yara
0
CVE-2016-9137
https://www.cvedetails.com/cve/CVE-2016-9137/
CWE-416
https://git.php.net/?p=php-src.git;a=commit;h=0e6fe3a4c96be2d3e88389a5776f878021b4c59f
0e6fe3a4c96be2d3e88389a5776f878021b4c59f
null
ZEND_API int zend_get_object_classname(const zval *object, const char **class_name, zend_uint *class_name_len TSRMLS_DC) /* {{{ */ { if (Z_OBJ_HT_P(object)->get_class_name == NULL || Z_OBJ_HT_P(object)->get_class_name(object, class_name, class_name_len, 0 TSRMLS_CC) != SUCCESS) { zend_class_entry *ce = Z_OBJCE_P(object); *class_name = ce->name; *class_name_len = ce->name_length; return 1; } return 0; } /* }}} */
ZEND_API int zend_get_object_classname(const zval *object, const char **class_name, zend_uint *class_name_len TSRMLS_DC) /* {{{ */ { if (Z_OBJ_HT_P(object)->get_class_name == NULL || Z_OBJ_HT_P(object)->get_class_name(object, class_name, class_name_len, 0 TSRMLS_CC) != SUCCESS) { zend_class_entry *ce = Z_OBJCE_P(object); *class_name = ce->name; *class_name_len = ce->name_length; return 1; } return 0; } /* }}} */
C
php
0
null
null
null
https://github.com/chromium/chromium/commit/a44b00c88bc5ea35b5b150217c5fd6e4ce168e58
a44b00c88bc5ea35b5b150217c5fd6e4ce168e58
Apply behaviour change fix from upstream for previous XPath change. BUG=58731 TEST=NONE Review URL: http://codereview.chromium.org/4027006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@63572 0039d316-1c4b-4281-b951-d872f2087c98
xmlXPathNodeLeadingSorted (xmlNodeSetPtr nodes, xmlNodePtr node) { int i, l; xmlNodePtr cur; xmlNodeSetPtr ret; if (node == NULL) return(nodes); ret = xmlXPathNodeSetCreate(NULL); if (ret == NULL) return(ret); if (xmlXPathNodeSetIsEmpty(nodes) || (!xmlXPathNodeSetContains(nodes, node))) return(ret); l = xmlXPathNodeSetGetLength(nodes); for (i = 0; i < l; i++) { cur = xmlXPathNodeSetItem(nodes, i); if (cur == node) break; xmlXPathNodeSetAddUnique(ret, cur); } return(ret); }
xmlXPathNodeLeadingSorted (xmlNodeSetPtr nodes, xmlNodePtr node) { int i, l; xmlNodePtr cur; xmlNodeSetPtr ret; if (node == NULL) return(nodes); ret = xmlXPathNodeSetCreate(NULL); if (ret == NULL) return(ret); if (xmlXPathNodeSetIsEmpty(nodes) || (!xmlXPathNodeSetContains(nodes, node))) return(ret); l = xmlXPathNodeSetGetLength(nodes); for (i = 0; i < l; i++) { cur = xmlXPathNodeSetItem(nodes, i); if (cur == node) break; xmlXPathNodeSetAddUnique(ret, cur); } return(ret); }
C
Chrome
0
CVE-2014-4014
https://www.cvedetails.com/cve/CVE-2014-4014/
CWE-264
https://github.com/torvalds/linux/commit/23adbe12ef7d3d4195e80800ab36b37bee28cd03
23adbe12ef7d3d4195e80800ab36b37bee28cd03
fs,userns: Change inode_capable to capable_wrt_inode_uidgid The kernel has no concept of capabilities with respect to inodes; inodes exist independently of namespaces. For example, inode_capable(inode, CAP_LINUX_IMMUTABLE) would be nonsense. This patch changes inode_capable to check for uid and gid mappings and renames it to capable_wrt_inode_uidgid, which should make it more obvious what it does. Fixes CVE-2014-4014. Cc: Theodore Ts'o <tytso@mit.edu> Cc: Serge Hallyn <serge.hallyn@ubuntu.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Dave Chinner <david@fromorbit.com> Cc: stable@vger.kernel.org Signed-off-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
void unlock_new_inode(struct inode *inode) { lockdep_annotate_inode_mutex_key(inode); spin_lock(&inode->i_lock); WARN_ON(!(inode->i_state & I_NEW)); inode->i_state &= ~I_NEW; smp_mb(); wake_up_bit(&inode->i_state, __I_NEW); spin_unlock(&inode->i_lock); }
void unlock_new_inode(struct inode *inode) { lockdep_annotate_inode_mutex_key(inode); spin_lock(&inode->i_lock); WARN_ON(!(inode->i_state & I_NEW)); inode->i_state &= ~I_NEW; smp_mb(); wake_up_bit(&inode->i_state, __I_NEW); spin_unlock(&inode->i_lock); }
C
linux
0
CVE-2016-1705
https://www.cvedetails.com/cve/CVE-2016-1705/
null
https://github.com/chromium/chromium/commit/4afb628e068367d5b73440537555902cd12416f8
4afb628e068367d5b73440537555902cd12416f8
gpu/android : Add support for partial swap with surface control. Add support for PostSubBuffer to GLSurfaceEGLSurfaceControl. This should allow the display compositor to draw the minimum sub-rect necessary from the damage tracking in BufferQueue on the client-side, and also to pass this damage rect to the framework. R=piman@chromium.org Bug: 926020 Change-Id: I73d3320cab68250d4c6865bf21c5531682d8bf61 Reviewed-on: https://chromium-review.googlesource.com/c/1457467 Commit-Queue: Khushal <khushalsagar@chromium.org> Commit-Queue: Antoine Labour <piman@chromium.org> Reviewed-by: Antoine Labour <piman@chromium.org> Auto-Submit: Khushal <khushalsagar@chromium.org> Cr-Commit-Position: refs/heads/master@{#629852}
void Compositor::Initialize() { DCHECK(!CompositorImpl::IsInitialized()); g_initialized = true; }
void Compositor::Initialize() { DCHECK(!CompositorImpl::IsInitialized()); g_initialized = true; }
C
Chrome
0
CVE-2019-12984
https://www.cvedetails.com/cve/CVE-2019-12984/
CWE-476
https://github.com/torvalds/linux/commit/385097a3675749cbc9e97c085c0e5dfe4269ca51
385097a3675749cbc9e97c085c0e5dfe4269ca51
nfc: Ensure presence of required attributes in the deactivate_target handler Check that the NFC_ATTR_TARGET_INDEX attributes (in addition to NFC_ATTR_DEVICE_INDEX) are provided by the netlink client prior to accessing them. This prevents potential unhandled NULL pointer dereference exceptions which can be triggered by malicious user-mode programs, if they omit one or both of these attributes. Signed-off-by: Young Xiao <92siuyang@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
void nfc_genl_data_exit(struct nfc_genl_data *genl_data) { mutex_destroy(&genl_data->genl_data_mutex); }
void nfc_genl_data_exit(struct nfc_genl_data *genl_data) { mutex_destroy(&genl_data->genl_data_mutex); }
C
linux
0
CVE-2013-2867
https://www.cvedetails.com/cve/CVE-2013-2867/
null
https://github.com/chromium/chromium/commit/d358f57009b85fb7440208afa5ba87636b491889
d358f57009b85fb7440208afa5ba87636b491889
Refactor to support default Bluetooth pairing delegate In order to support a default pairing delegate we need to move the agent service provider delegate implementation from BluetoothDevice to BluetoothAdapter while retaining the existing API. BUG=338492 TEST=device_unittests, unit_tests, browser_tests Review URL: https://codereview.chromium.org/148293003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@252216 0039d316-1c4b-4281-b951-d872f2087c98
BluetoothDeviceChromeOS::~BluetoothDeviceChromeOS() { }
BluetoothDeviceChromeOS::~BluetoothDeviceChromeOS() { }
C
Chrome
0
CVE-2015-6787
https://www.cvedetails.com/cve/CVE-2015-6787/
null
https://github.com/chromium/chromium/commit/f911e11e7f6b5c0d6f5ee694a9871de6619889f7
f911e11e7f6b5c0d6f5ee694a9871de6619889f7
Reland "[CI] Make paint property nodes non-ref-counted" This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7. Reason for revert: Retry in M69. Original change's description: > Revert "[CI] Make paint property nodes non-ref-counted" > > This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123. > > Reason for revert: Caused bugs found by clusterfuzz > > Original change's description: > > [CI] Make paint property nodes non-ref-counted > > > > Now all paint property nodes are owned by ObjectPaintProperties > > (and LocalFrameView temporarily before removing non-RLS mode). > > Others just use raw pointers or references. > > > > Bug: 833496 > > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2 > > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae > > Reviewed-on: https://chromium-review.googlesource.com/1031101 > > Reviewed-by: Tien-Ren Chen <trchen@chromium.org> > > Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org> > > Cr-Commit-Position: refs/heads/master@{#554626} > > TBR=wangxianzhu@chromium.org,trchen@chromium.org,chrishtr@chromium.org > > Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f > No-Presubmit: true > No-Tree-Checks: true > No-Try: true > Bug: 833496,837932,837943 > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2 > Reviewed-on: https://chromium-review.googlesource.com/1034292 > Reviewed-by: Xianzhu Wang <wangxianzhu@chromium.org> > Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org> > Cr-Commit-Position: refs/heads/master@{#554653} TBR=wangxianzhu@chromium.org,trchen@chromium.org,chrishtr@chromium.org # Not skipping CQ checks because original CL landed > 1 day ago. Bug: 833496, 837932, 837943 Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992 Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2 Reviewed-on: https://chromium-review.googlesource.com/1083491 Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org> Reviewed-by: Xianzhu Wang <wangxianzhu@chromium.org> Cr-Commit-Position: refs/heads/master@{#563930}
static bool NeedsPaintOffsetTranslation(const LayoutObject& object) { if (!object.IsBoxModelObject()) return false; if (object.IsSVGForeignObject()) return false; const LayoutBoxModelObject& box_model = ToLayoutBoxModelObject(object); if (box_model.IsLayoutView()) { return true; } if (box_model.HasLayer() && box_model.Layer()->PaintsWithTransform( kGlobalPaintFlattenCompositingLayers)) { return true; } if (NeedsScrollOrScrollTranslation(object)) return true; if (NeedsPaintOffsetTranslationForScrollbars(box_model)) return true; if (NeedsSVGLocalToBorderBoxTransform(object)) return true; if (!RuntimeEnabledFeatures::SlimmingPaintV2Enabled() && (object.IsLayoutBlock() || object.IsLayoutReplaced()) && object.HasLayer() && !ToLayoutBoxModelObject(object).Layer()->EnclosingPaginationLayer() && object.GetCompositingState() == kPaintsIntoOwnBacking) return true; return false; }
static bool NeedsPaintOffsetTranslation(const LayoutObject& object) { if (!object.IsBoxModelObject()) return false; if (object.IsSVGForeignObject()) return false; const LayoutBoxModelObject& box_model = ToLayoutBoxModelObject(object); if (box_model.IsLayoutView()) { return true; } if (box_model.HasLayer() && box_model.Layer()->PaintsWithTransform( kGlobalPaintFlattenCompositingLayers)) { return true; } if (NeedsScrollOrScrollTranslation(object)) return true; if (NeedsPaintOffsetTranslationForScrollbars(box_model)) return true; if (NeedsSVGLocalToBorderBoxTransform(object)) return true; if (!RuntimeEnabledFeatures::SlimmingPaintV2Enabled() && (object.IsLayoutBlock() || object.IsLayoutReplaced()) && object.HasLayer() && !ToLayoutBoxModelObject(object).Layer()->EnclosingPaginationLayer() && object.GetCompositingState() == kPaintsIntoOwnBacking) return true; return false; }
C
Chrome
0
CVE-2016-2543
https://www.cvedetails.com/cve/CVE-2016-2543/
null
https://github.com/torvalds/linux/commit/030e2c78d3a91dd0d27fef37e91950dde333eba1
030e2c78d3a91dd0d27fef37e91950dde333eba1
ALSA: seq: Fix missing NULL check at remove_events ioctl snd_seq_ioctl_remove_events() calls snd_seq_fifo_clear() unconditionally even if there is no FIFO assigned, and this leads to an Oops due to NULL dereference. The fix is just to add a proper NULL check. Reported-by: Dmitry Vyukov <dvyukov@google.com> Tested-by: Dmitry Vyukov <dvyukov@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Takashi Iwai <tiwai@suse.de>
static int check_subscription_permission(struct snd_seq_client *client, struct snd_seq_client_port *sport, struct snd_seq_client_port *dport, struct snd_seq_port_subscribe *subs) { if (client->number != subs->sender.client && client->number != subs->dest.client) { /* connection by third client - check export permission */ if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT)) return -EPERM; if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT)) return -EPERM; } /* check read permission */ /* if sender or receiver is the subscribing client itself, * no permission check is necessary */ if (client->number != subs->sender.client) { if (! check_port_perm(sport, PERM_RD)) return -EPERM; } /* check write permission */ if (client->number != subs->dest.client) { if (! check_port_perm(dport, PERM_WR)) return -EPERM; } return 0; }
static int check_subscription_permission(struct snd_seq_client *client, struct snd_seq_client_port *sport, struct snd_seq_client_port *dport, struct snd_seq_port_subscribe *subs) { if (client->number != subs->sender.client && client->number != subs->dest.client) { /* connection by third client - check export permission */ if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT)) return -EPERM; if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT)) return -EPERM; } /* check read permission */ /* if sender or receiver is the subscribing client itself, * no permission check is necessary */ if (client->number != subs->sender.client) { if (! check_port_perm(sport, PERM_RD)) return -EPERM; } /* check write permission */ if (client->number != subs->dest.client) { if (! check_port_perm(dport, PERM_WR)) return -EPERM; } return 0; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/4f1f3d0f03c79ddaace56f067cf28a27f9466b7d
4f1f3d0f03c79ddaace56f067cf28a27f9466b7d
Improve handling and testing of reparse points. BUG=28804 TEST=unit tests. Review URL: http://codereview.chromium.org/553080 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@37286 0039d316-1c4b-4281-b951-d872f2087c98
bool DeleteReparsePoint(HANDLE source) {
bool DeleteReparsePoint(HANDLE source) { DWORD returned; REPARSE_DATA_BUFFER data = {0}; data.ReparseTag = 0xa0000003; if (!DeviceIoControl(source, FSCTL_DELETE_REPARSE_POINT, &data, 8, NULL, 0, &returned, NULL)) { return false; } return true; }
C
Chrome
1
CVE-2016-2188
https://www.cvedetails.com/cve/CVE-2016-2188/
null
https://github.com/torvalds/linux/commit/4ec0ef3a82125efc36173062a50624550a900ae0
4ec0ef3a82125efc36173062a50624550a900ae0
USB: iowarrior: fix oops with malicious USB descriptors The iowarrior driver expects at least one valid endpoint. If given malicious descriptors that specify 0 for the number of endpoints, it will crash in the probe function. Ensure there is at least one endpoint on the interface before using it. The full report of this issue can be found here: http://seclists.org/bugtraq/2016/Mar/87 Reported-by: Ralf Spenneberg <ralf@spenneberg.net> Cc: stable <stable@vger.kernel.org> Signed-off-by: Josh Boyer <jwboyer@fedoraproject.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
static void iowarrior_callback(struct urb *urb) { struct iowarrior *dev = urb->context; int intr_idx; int read_idx; int aux_idx; int offset; int status = urb->status; int retval; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: goto exit; } spin_lock(&dev->intr_idx_lock); intr_idx = atomic_read(&dev->intr_idx); /* aux_idx become previous intr_idx */ aux_idx = (intr_idx == 0) ? (MAX_INTERRUPT_BUFFER - 1) : (intr_idx - 1); read_idx = atomic_read(&dev->read_idx); /* queue is not empty and it's interface 0 */ if ((intr_idx != read_idx) && (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0)) { /* + 1 for serial number */ offset = aux_idx * (dev->report_size + 1); if (!memcmp (dev->read_queue + offset, urb->transfer_buffer, dev->report_size)) { /* equal values on interface 0 will be ignored */ spin_unlock(&dev->intr_idx_lock); goto exit; } } /* aux_idx become next intr_idx */ aux_idx = (intr_idx == (MAX_INTERRUPT_BUFFER - 1)) ? 0 : (intr_idx + 1); if (read_idx == aux_idx) { /* queue full, dropping oldest input */ read_idx = (++read_idx == MAX_INTERRUPT_BUFFER) ? 0 : read_idx; atomic_set(&dev->read_idx, read_idx); atomic_set(&dev->overflow_flag, 1); } /* +1 for serial number */ offset = intr_idx * (dev->report_size + 1); memcpy(dev->read_queue + offset, urb->transfer_buffer, dev->report_size); *(dev->read_queue + offset + (dev->report_size)) = dev->serial_number++; atomic_set(&dev->intr_idx, aux_idx); spin_unlock(&dev->intr_idx_lock); /* tell the blocking read about the new data */ wake_up_interruptible(&dev->read_wait); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); }
static void iowarrior_callback(struct urb *urb) { struct iowarrior *dev = urb->context; int intr_idx; int read_idx; int aux_idx; int offset; int status = urb->status; int retval; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: goto exit; } spin_lock(&dev->intr_idx_lock); intr_idx = atomic_read(&dev->intr_idx); /* aux_idx become previous intr_idx */ aux_idx = (intr_idx == 0) ? (MAX_INTERRUPT_BUFFER - 1) : (intr_idx - 1); read_idx = atomic_read(&dev->read_idx); /* queue is not empty and it's interface 0 */ if ((intr_idx != read_idx) && (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0)) { /* + 1 for serial number */ offset = aux_idx * (dev->report_size + 1); if (!memcmp (dev->read_queue + offset, urb->transfer_buffer, dev->report_size)) { /* equal values on interface 0 will be ignored */ spin_unlock(&dev->intr_idx_lock); goto exit; } } /* aux_idx become next intr_idx */ aux_idx = (intr_idx == (MAX_INTERRUPT_BUFFER - 1)) ? 0 : (intr_idx + 1); if (read_idx == aux_idx) { /* queue full, dropping oldest input */ read_idx = (++read_idx == MAX_INTERRUPT_BUFFER) ? 0 : read_idx; atomic_set(&dev->read_idx, read_idx); atomic_set(&dev->overflow_flag, 1); } /* +1 for serial number */ offset = intr_idx * (dev->report_size + 1); memcpy(dev->read_queue + offset, urb->transfer_buffer, dev->report_size); *(dev->read_queue + offset + (dev->report_size)) = dev->serial_number++; atomic_set(&dev->intr_idx, aux_idx); spin_unlock(&dev->intr_idx_lock); /* tell the blocking read about the new data */ wake_up_interruptible(&dev->read_wait); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); }
C
linux
0
CVE-2018-14358
https://www.cvedetails.com/cve/CVE-2018-14358/
CWE-119
https://github.com/neomutt/neomutt/commit/1b0f0d0988e6df4e32e9f4bf8780846ea95d4485
1b0f0d0988e6df4e32e9f4bf8780846ea95d4485
Don't overflow stack buffer in msg_parse_fetch
static void alloc_msn_index(struct ImapData *idata, size_t msn_count) { size_t new_size; if (msn_count <= idata->msn_index_size) return; /* This is a conservative check to protect against a malicious imap * server. Most likely size_t is bigger than an unsigned int, but * if msn_count is this big, we have a serious problem. */ if (msn_count >= (UINT_MAX / sizeof(struct Header *))) { mutt_error(_("Integer overflow -- can't allocate memory.")); mutt_exit(1); } /* Add a little padding, like mx_allloc_memory() */ new_size = msn_count + 25; if (!idata->msn_index) idata->msn_index = mutt_mem_calloc(new_size, sizeof(struct Header *)); else { mutt_mem_realloc(&idata->msn_index, sizeof(struct Header *) * new_size); memset(idata->msn_index + idata->msn_index_size, 0, sizeof(struct Header *) * (new_size - idata->msn_index_size)); } idata->msn_index_size = new_size; }
static void alloc_msn_index(struct ImapData *idata, size_t msn_count) { size_t new_size; if (msn_count <= idata->msn_index_size) return; /* This is a conservative check to protect against a malicious imap * server. Most likely size_t is bigger than an unsigned int, but * if msn_count is this big, we have a serious problem. */ if (msn_count >= (UINT_MAX / sizeof(struct Header *))) { mutt_error(_("Integer overflow -- can't allocate memory.")); mutt_exit(1); } /* Add a little padding, like mx_allloc_memory() */ new_size = msn_count + 25; if (!idata->msn_index) idata->msn_index = mutt_mem_calloc(new_size, sizeof(struct Header *)); else { mutt_mem_realloc(&idata->msn_index, sizeof(struct Header *) * new_size); memset(idata->msn_index + idata->msn_index_size, 0, sizeof(struct Header *) * (new_size - idata->msn_index_size)); } idata->msn_index_size = new_size; }
C
neomutt
0
CVE-2010-1166
https://www.cvedetails.com/cve/CVE-2010-1166/
CWE-189
https://cgit.freedesktop.org/xorg/xserver/commit/?id=d2f813f7db
d2f813f7db157fc83abc4b3726821c36ee7e40b1
null
fbCombineOverReverseC (CARD32 *dest, CARD32 *src, CARD32 *mask, int width) { int i; for (i = 0; i < width; ++i) { CARD32 d = READ(dest + i); CARD32 a = ~d >> 24; if (a) { CARD32 s = READ(src + i); CARD32 m = READ(mask + i); fbCombineMaskValueC (&s, &m); if (a != 0xff) { FbByteMulAdd(s, a, d); } WRITE(dest + i, s); } } }
fbCombineOverReverseC (CARD32 *dest, CARD32 *src, CARD32 *mask, int width) { int i; for (i = 0; i < width; ++i) { CARD32 d = READ(dest + i); CARD32 a = ~d >> 24; if (a) { CARD32 s = READ(src + i); CARD32 m = READ(mask + i); fbCombineMaskValueC (&s, &m); if (a != 0xff) { FbByteMulAdd(s, a, d); } WRITE(dest + i, s); } } }
C
xserver
0
CVE-2017-14170
https://www.cvedetails.com/cve/CVE-2017-14170/
CWE-834
https://github.com/FFmpeg/FFmpeg/commit/900f39692ca0337a98a7cf047e4e2611071810c2
900f39692ca0337a98a7cf047e4e2611071810c2
avformat/mxfdec: Fix DoS issues in mxf_read_index_entry_array() Fixes: 20170829A.mxf Co-Author: 张洪亮(望初)" <wangchu.zhl@alibaba-inc.com> Found-by: Xiaohei and Wangchu from Alibaba Security Team Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
static int mxf_set_audio_pts(MXFContext *mxf, AVCodecParameters *par, AVPacket *pkt) { MXFTrack *track = mxf->fc->streams[pkt->stream_index]->priv_data; int64_t bits_per_sample = par->bits_per_coded_sample; if (!bits_per_sample) bits_per_sample = av_get_bits_per_sample(par->codec_id); pkt->pts = track->sample_count; if ( par->channels <= 0 || bits_per_sample <= 0 || par->channels * (int64_t)bits_per_sample < 8) return AVERROR(EINVAL); track->sample_count += pkt->size / (par->channels * (int64_t)bits_per_sample / 8); return 0; }
static int mxf_set_audio_pts(MXFContext *mxf, AVCodecParameters *par, AVPacket *pkt) { MXFTrack *track = mxf->fc->streams[pkt->stream_index]->priv_data; int64_t bits_per_sample = par->bits_per_coded_sample; if (!bits_per_sample) bits_per_sample = av_get_bits_per_sample(par->codec_id); pkt->pts = track->sample_count; if ( par->channels <= 0 || bits_per_sample <= 0 || par->channels * (int64_t)bits_per_sample < 8) return AVERROR(EINVAL); track->sample_count += pkt->size / (par->channels * (int64_t)bits_per_sample / 8); return 0; }
C
FFmpeg
0
CVE-2013-0885
https://www.cvedetails.com/cve/CVE-2013-0885/
CWE-264
https://github.com/chromium/chromium/commit/f335421145bb7f82c60fb9d61babcd6ce2e4b21e
f335421145bb7f82c60fb9d61babcd6ce2e4b21e
Tighten restrictions on hosted apps calling extension APIs Only allow component apps to make any API calls, and for them only allow the namespaces they explicitly have permission for (plus chrome.test - I need to see if I can rework some WebStore tests to remove even this). BUG=172369 Review URL: https://chromiumcodereview.appspot.com/12095095 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@180426 0039d316-1c4b-4281-b951-d872f2087c98
bool Extension::CanSpecifyHostPermission(const URLPattern& pattern, const APIPermissionSet& permissions) const { if (!pattern.match_all_urls() && pattern.MatchesScheme(chrome::kChromeUIScheme)) { if (pattern.host() == chrome::kChromeUIFaviconHost) return true; if (pattern.host() == chrome::kChromeUIThumbnailHost) { return permissions.find(APIPermission::kExperimental) != permissions.end(); } if (CanExecuteScriptEverywhere()) return true; return false; } return true; }
bool Extension::CanSpecifyHostPermission(const URLPattern& pattern, const APIPermissionSet& permissions) const { if (!pattern.match_all_urls() && pattern.MatchesScheme(chrome::kChromeUIScheme)) { if (pattern.host() == chrome::kChromeUIFaviconHost) return true; if (pattern.host() == chrome::kChromeUIThumbnailHost) { return permissions.find(APIPermission::kExperimental) != permissions.end(); } if (CanExecuteScriptEverywhere()) return true; return false; } return true; }
C
Chrome
0
CVE-2014-3515
https://www.cvedetails.com/cve/CVE-2014-3515/
null
https://git.php.net/?p=php-src.git;a=commit;h=88223c5245e9b470e1e6362bfd96829562ffe6ab
88223c5245e9b470e1e6362bfd96829562ffe6ab
null
static HashTable* spl_array_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(obj TSRMLS_CC); zval *tmp, *storage; int name_len; char *zname; zend_class_entry *base; *is_temp = 0; if (!intern->std.properties) { rebuild_object_properties(&intern->std); } if (HASH_OF(intern->array) == intern->std.properties) { return intern->std.properties; } else { if (intern->debug_info == NULL) { ALLOC_HASHTABLE(intern->debug_info); ZEND_INIT_SYMTABLE_EX(intern->debug_info, zend_hash_num_elements(intern->std.properties) + 1, 0); } if (intern->debug_info->nApplyCount == 0) { zend_hash_clean(intern->debug_info); zend_hash_copy(intern->debug_info, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); storage = intern->array; zval_add_ref(&storage); base = (Z_OBJ_HT_P(obj) == &spl_handler_ArrayIterator) ? spl_ce_ArrayIterator : spl_ce_ArrayObject; zname = spl_gen_private_prop_name(base, "storage", sizeof("storage")-1, &name_len TSRMLS_CC); zend_symtable_update(intern->debug_info, zname, name_len+1, &storage, sizeof(zval *), NULL); efree(zname); } return intern->debug_info; } } /* }}} */
static HashTable* spl_array_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(obj TSRMLS_CC); zval *tmp, *storage; int name_len; char *zname; zend_class_entry *base; *is_temp = 0; if (!intern->std.properties) { rebuild_object_properties(&intern->std); } if (HASH_OF(intern->array) == intern->std.properties) { return intern->std.properties; } else { if (intern->debug_info == NULL) { ALLOC_HASHTABLE(intern->debug_info); ZEND_INIT_SYMTABLE_EX(intern->debug_info, zend_hash_num_elements(intern->std.properties) + 1, 0); } if (intern->debug_info->nApplyCount == 0) { zend_hash_clean(intern->debug_info); zend_hash_copy(intern->debug_info, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); storage = intern->array; zval_add_ref(&storage); base = (Z_OBJ_HT_P(obj) == &spl_handler_ArrayIterator) ? spl_ce_ArrayIterator : spl_ce_ArrayObject; zname = spl_gen_private_prop_name(base, "storage", sizeof("storage")-1, &name_len TSRMLS_CC); zend_symtable_update(intern->debug_info, zname, name_len+1, &storage, sizeof(zval *), NULL); efree(zname); } return intern->debug_info; } } /* }}} */
C
php
0
CVE-2019-5797
null
null
https://github.com/chromium/chromium/commit/ba169c14aa9cc2efd708a878ae21ff34f3898fe0
ba169c14aa9cc2efd708a878ae21ff34f3898fe0
Fixing BadMessageCallback usage by SessionStorage TBR: jam@chromium.org Bug: 916523 Change-Id: I027cc818cfba917906844ad2ec0edd7fa4761bd1 Reviewed-on: https://chromium-review.googlesource.com/c/1401604 Commit-Queue: Daniel Murphy <dmurph@chromium.org> Reviewed-by: Marijn Kruisselbrink <mek@chromium.org> Reviewed-by: Ken Rockot <rockot@google.com> Cr-Commit-Position: refs/heads/master@{#621772}
PaymentAppContextImpl* StoragePartitionImpl::GetPaymentAppContext() { return payment_app_context_.get(); }
PaymentAppContextImpl* StoragePartitionImpl::GetPaymentAppContext() { return payment_app_context_.get(); }
C
Chrome
0
CVE-2016-7970
https://www.cvedetails.com/cve/CVE-2016-7970/
CWE-119
https://github.com/libass/libass/pull/240/commits/08e754612019ed84d1db0d1fc4f5798248decd75
08e754612019ed84d1db0d1fc4f5798248decd75
Fix blur coefficient calculation buffer overflow Found by fuzzer test case id:000082,sig:11,src:002579,op:havoc,rep:8. Correctness should be checked, but this fixes the overflow for good.
static void find_best_method(BlurMethod *blur, double r2) { static const int index[][4] = { { 1, 2, 3, 4 }, { 1, 2, 3, 5 }, { 1, 2, 4, 6 }, }; double mu[5]; if (r2 < 1.9) { blur->level = blur->prefilter = blur->filter = 0; if (r2 < 0.5) { mu[2] = 0.085 * r2 * r2 * r2; mu[1] = 0.5 * r2 - 4 * mu[2]; mu[3] = mu[4] = 0; } else { calc_gauss(mu, 4, r2); } } else { double mul = 1; if (r2 < 6.693) { blur->level = 0; if (r2 < 2.8) blur->prefilter = 1; else if (r2 < 4.4) blur->prefilter = 2; else blur->prefilter = 3; blur->filter = blur->prefilter - 1; } else { frexp((r2 + 0.7) / 26.5, &blur->level); blur->level = (blur->level + 3) >> 1; mul = pow(0.25, blur->level); r2 *= mul; if (r2 < 3.15 - 1.5 * mul) blur->prefilter = 0; else if (r2 < 5.3 - 5.2 * mul) blur->prefilter = 1; else blur->prefilter = 2; blur->filter = blur->prefilter; } calc_coeff(mu + 1, index[blur->filter], blur->prefilter, r2, mul); } for (int i = 1; i <= 4; ++i) blur->coeff[i - 1] = (int)(0x10000 * mu[i] + 0.5); }
static void find_best_method(BlurMethod *blur, double r2) { static const int index[][4] = { { 1, 2, 3, 4 }, { 1, 2, 3, 5 }, { 1, 2, 4, 6 }, }; double mu[5]; if (r2 < 1.9) { blur->level = blur->prefilter = blur->filter = 0; if (r2 < 0.5) { mu[2] = 0.085 * r2 * r2 * r2; mu[1] = 0.5 * r2 - 4 * mu[2]; mu[3] = mu[4] = 0; } else { calc_gauss(mu, 4, r2); } } else { double mul = 1; if (r2 < 6.693) { blur->level = 0; if (r2 < 2.8) blur->prefilter = 1; else if (r2 < 4.4) blur->prefilter = 2; else blur->prefilter = 3; blur->filter = blur->prefilter - 1; } else { frexp((r2 + 0.7) / 26.5, &blur->level); blur->level = (blur->level + 3) >> 1; mul = pow(0.25, blur->level); r2 *= mul; if (r2 < 3.15 - 1.5 * mul) blur->prefilter = 0; else if (r2 < 5.3 - 5.2 * mul) blur->prefilter = 1; else blur->prefilter = 2; blur->filter = blur->prefilter; } calc_coeff(mu + 1, index[blur->filter], blur->prefilter, r2, mul); } for (int i = 1; i <= 4; ++i) blur->coeff[i - 1] = (int)(0x10000 * mu[i] + 0.5); }
C
libass
0
CVE-2016-7144
https://www.cvedetails.com/cve/CVE-2016-7144/
CWE-287
https://github.com/unrealircd/unrealircd/commit/f473e355e1dc422c4f019dbf86bc50ba1a34a766
f473e355e1dc422c4f019dbf86bc50ba1a34a766
Fix AUTHENTICATE bug
CMD_FUNC(m_svslogin) { if (!SASL_SERVER || MyClient(sptr) || (parc < 3) || !parv[3]) return 0; if (!stricmp(parv[1], me.name)) { aClient *target_p; /* is the PUID valid? */ if ((target_p = decode_puid(parv[2])) == NULL) return 0; if (target_p->user == NULL) make_user(target_p); strlcpy(target_p->user->svid, parv[3], sizeof(target_p->user->svid)); sendto_one(target_p, err_str(RPL_LOGGEDIN), me.name, BadPtr(target_p->name) ? "*" : target_p->name, BadPtr(target_p->name) ? "*" : target_p->name, BadPtr(target_p->user->username) ? "*" : target_p->user->username, BadPtr(target_p->user->realhost) ? "*" : target_p->user->realhost, target_p->user->svid, target_p->user->svid); return 0; } /* not for us; propagate. */ sendto_server(cptr, 0, 0, ":%s SVSLOGIN %s %s %s", sptr->name, parv[1], parv[2], parv[3]); return 0; }
CMD_FUNC(m_svslogin) { if (!SASL_SERVER || MyClient(sptr) || (parc < 3) || !parv[3]) return 0; if (!stricmp(parv[1], me.name)) { aClient *target_p; /* is the PUID valid? */ if ((target_p = decode_puid(parv[2])) == NULL) return 0; if (target_p->user == NULL) make_user(target_p); strlcpy(target_p->user->svid, parv[3], sizeof(target_p->user->svid)); sendto_one(target_p, err_str(RPL_LOGGEDIN), me.name, BadPtr(target_p->name) ? "*" : target_p->name, BadPtr(target_p->name) ? "*" : target_p->name, BadPtr(target_p->user->username) ? "*" : target_p->user->username, BadPtr(target_p->user->realhost) ? "*" : target_p->user->realhost, target_p->user->svid, target_p->user->svid); return 0; } /* not for us; propagate. */ sendto_server(cptr, 0, 0, ":%s SVSLOGIN %s %s %s", sptr->name, parv[1], parv[2], parv[3]); return 0; }
C
unrealircd
0
CVE-2014-1874
https://www.cvedetails.com/cve/CVE-2014-1874/
CWE-20
https://github.com/torvalds/linux/commit/2172fa709ab32ca60e86179dc67d0857be8e2c98
2172fa709ab32ca60e86179dc67d0857be8e2c98
SELinux: Fix kernel BUG on empty security contexts. Setting an empty security context (length=0) on a file will lead to incorrectly dereferencing the type and other fields of the security context structure, yielding a kernel BUG. As a zero-length security context is never valid, just reject all such security contexts whether coming from userspace via setxattr or coming from the filesystem upon a getxattr request by SELinux. Setting a security context value (empty or otherwise) unknown to SELinux in the first place is only possible for a root process (CAP_MAC_ADMIN), and, if running SELinux in enforcing mode, only if the corresponding SELinux mac_admin permission is also granted to the domain by policy. In Fedora policies, this is only allowed for specific domains such as livecd for setting down security contexts that are not defined in the build host policy. Reproducer: su setenforce 0 touch foo setfattr -n security.selinux foo Caveat: Relabeling or removing foo after doing the above may not be possible without booting with SELinux disabled. Any subsequent access to foo after doing the above will also trigger the BUG. BUG output from Matthew Thode: [ 473.893141] ------------[ cut here ]------------ [ 473.962110] kernel BUG at security/selinux/ss/services.c:654! [ 473.995314] invalid opcode: 0000 [#6] SMP [ 474.027196] Modules linked in: [ 474.058118] CPU: 0 PID: 8138 Comm: ls Tainted: G D I 3.13.0-grsec #1 [ 474.116637] Hardware name: Supermicro X8ST3/X8ST3, BIOS 2.0 07/29/10 [ 474.149768] task: ffff8805f50cd010 ti: ffff8805f50cd488 task.ti: ffff8805f50cd488 [ 474.183707] RIP: 0010:[<ffffffff814681c7>] [<ffffffff814681c7>] context_struct_compute_av+0xce/0x308 [ 474.219954] RSP: 0018:ffff8805c0ac3c38 EFLAGS: 00010246 [ 474.252253] RAX: 0000000000000000 RBX: ffff8805c0ac3d94 RCX: 0000000000000100 [ 474.287018] RDX: ffff8805e8aac000 RSI: 00000000ffffffff RDI: ffff8805e8aaa000 [ 474.321199] RBP: ffff8805c0ac3cb8 R08: 0000000000000010 R09: 0000000000000006 [ 474.357446] R10: 0000000000000000 R11: ffff8805c567a000 R12: 0000000000000006 [ 474.419191] R13: ffff8805c2b74e88 R14: 00000000000001da R15: 0000000000000000 [ 474.453816] FS: 00007f2e75220800(0000) GS:ffff88061fc00000(0000) knlGS:0000000000000000 [ 474.489254] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 474.522215] CR2: 00007f2e74716090 CR3: 00000005c085e000 CR4: 00000000000207f0 [ 474.556058] Stack: [ 474.584325] ffff8805c0ac3c98 ffffffff811b549b ffff8805c0ac3c98 ffff8805f1190a40 [ 474.618913] ffff8805a6202f08 ffff8805c2b74e88 00068800d0464990 ffff8805e8aac860 [ 474.653955] ffff8805c0ac3cb8 000700068113833a ffff880606c75060 ffff8805c0ac3d94 [ 474.690461] Call Trace: [ 474.723779] [<ffffffff811b549b>] ? lookup_fast+0x1cd/0x22a [ 474.778049] [<ffffffff81468824>] security_compute_av+0xf4/0x20b [ 474.811398] [<ffffffff8196f419>] avc_compute_av+0x2a/0x179 [ 474.843813] [<ffffffff8145727b>] avc_has_perm+0x45/0xf4 [ 474.875694] [<ffffffff81457d0e>] inode_has_perm+0x2a/0x31 [ 474.907370] [<ffffffff81457e76>] selinux_inode_getattr+0x3c/0x3e [ 474.938726] [<ffffffff81455cf6>] security_inode_getattr+0x1b/0x22 [ 474.970036] [<ffffffff811b057d>] vfs_getattr+0x19/0x2d [ 475.000618] [<ffffffff811b05e5>] vfs_fstatat+0x54/0x91 [ 475.030402] [<ffffffff811b063b>] vfs_lstat+0x19/0x1b [ 475.061097] [<ffffffff811b077e>] SyS_newlstat+0x15/0x30 [ 475.094595] [<ffffffff8113c5c1>] ? __audit_syscall_entry+0xa1/0xc3 [ 475.148405] [<ffffffff8197791e>] system_call_fastpath+0x16/0x1b [ 475.179201] Code: 00 48 85 c0 48 89 45 b8 75 02 0f 0b 48 8b 45 a0 48 8b 3d 45 d0 b6 00 8b 40 08 89 c6 ff ce e8 d1 b0 06 00 48 85 c0 49 89 c7 75 02 <0f> 0b 48 8b 45 b8 4c 8b 28 eb 1e 49 8d 7d 08 be 80 01 00 00 e8 [ 475.255884] RIP [<ffffffff814681c7>] context_struct_compute_av+0xce/0x308 [ 475.296120] RSP <ffff8805c0ac3c38> [ 475.328734] ---[ end trace f076482e9d754adc ]--- Reported-by: Matthew Thode <mthode@mthode.org> Signed-off-by: Stephen Smalley <sds@tycho.nsa.gov> Cc: stable@vger.kernel.org Signed-off-by: Paul Moore <pmoore@redhat.com>
static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask) { int i, fail = 0; for (i = 0; i < 4; i++) if (addr[i] != (input[i] & mask[i])) { fail = 1; break; } return !fail; }
static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask) { int i, fail = 0; for (i = 0; i < 4; i++) if (addr[i] != (input[i] & mask[i])) { fail = 1; break; } return !fail; }
C
linux
0
CVE-2017-14106
https://www.cvedetails.com/cve/CVE-2017-14106/
CWE-369
https://github.com/torvalds/linux/commit/499350a5a6e7512d9ed369ed63a4244b6536f4f8
499350a5a6e7512d9ed369ed63a4244b6536f4f8
tcp: initialize rcv_mss to TCP_MIN_MSS instead of 0 When tcp_disconnect() is called, inet_csk_delack_init() sets icsk->icsk_ack.rcv_mss to 0. This could potentially cause tcp_recvmsg() => tcp_cleanup_rbuf() => __tcp_select_window() call path to have division by 0 issue. So this patch initializes rcv_mss to TCP_MIN_MSS instead of 0. Reported-by: Andrey Konovalov <andreyknvl@google.com> Signed-off-by: Wei Wang <weiwan@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { const struct inet_connection_sock *icsk = inet_csk(sk); if (level != SOL_TCP) return icsk->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); return do_tcp_setsockopt(sk, level, optname, optval, optlen); }
int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { const struct inet_connection_sock *icsk = inet_csk(sk); if (level != SOL_TCP) return icsk->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); return do_tcp_setsockopt(sk, level, optname, optval, optlen); }
C
linux
0
CVE-2011-3896
https://www.cvedetails.com/cve/CVE-2011-3896/
CWE-119
https://github.com/chromium/chromium/commit/5925dff83699508b5e2735afb0297dfb310e159d
5925dff83699508b5e2735afb0297dfb310e159d
Implement a bubble that appears at the top of the screen when a tab enters fullscreen mode via webkitRequestFullScreen(), telling the user how to exit fullscreen. This is implemented as an NSView rather than an NSWindow because the floating chrome that appears in presentation mode should overlap the bubble. Content-initiated fullscreen mode makes use of 'presentation mode' on the Mac: the mode in which the UI is hidden, accessible by moving the cursor to the top of the screen. On Snow Leopard, this mode is synonymous with fullscreen mode. On Lion, however, fullscreen mode does not imply presentation mode: in non-presentation fullscreen mode, the chrome is permanently shown. It is possible to switch between presentation mode and fullscreen mode using the presentation mode UI control. When a tab initiates fullscreen mode on Lion, we enter presentation mode if not in presentation mode already. When the user exits fullscreen mode using Chrome UI (i.e. keyboard shortcuts, menu items, buttons, switching tabs, etc.) we return the user to the mode they were in before the tab entered fullscreen. BUG=14471 TEST=Enter fullscreen mode using webkitRequestFullScreen. You should see a bubble pop down from the top of the screen. Need to test the Lion logic somehow, with no Lion trybots. BUG=96883 Original review http://codereview.chromium.org/7890056/ TBR=thakis Review URL: http://codereview.chromium.org/7920024 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@101624 0039d316-1c4b-4281-b951-d872f2087c98
void Browser::UpdateBookmarkBarState(BookmarkBarStateChangeReason reason) { BookmarkBar::State state; if ((profile_->GetPrefs()->GetBoolean(prefs::kShowBookmarkBar) && profile_->GetPrefs()->GetBoolean(prefs::kEnableBookmarkBar)) && (!window_ || !window_->IsFullscreen())) { state = BookmarkBar::SHOW; } else { TabContentsWrapper* tab = GetSelectedTabContentsWrapper(); if (tab && tab->bookmark_tab_helper()->ShouldShowBookmarkBar()) state = BookmarkBar::DETACHED; else state = BookmarkBar::HIDDEN; } if (state == bookmark_bar_state_) return; bookmark_bar_state_ = state; if (!window_) return; // This is called from the constructor when window_ is NULL. if (reason == BOOKMARK_BAR_STATE_CHANGE_TAB_SWITCH) { return; } BookmarkBar::AnimateChangeType animate_type = (reason == BOOKMARK_BAR_STATE_CHANGE_PREF_CHANGE) ? BookmarkBar::ANIMATE_STATE_CHANGE : BookmarkBar::DONT_ANIMATE_STATE_CHANGE; window_->BookmarkBarStateChanged(animate_type); }
void Browser::UpdateBookmarkBarState(BookmarkBarStateChangeReason reason) { BookmarkBar::State state; if ((profile_->GetPrefs()->GetBoolean(prefs::kShowBookmarkBar) && profile_->GetPrefs()->GetBoolean(prefs::kEnableBookmarkBar)) && (!window_ || !window_->IsFullscreen())) { state = BookmarkBar::SHOW; } else { TabContentsWrapper* tab = GetSelectedTabContentsWrapper(); if (tab && tab->bookmark_tab_helper()->ShouldShowBookmarkBar()) state = BookmarkBar::DETACHED; else state = BookmarkBar::HIDDEN; } if (state == bookmark_bar_state_) return; bookmark_bar_state_ = state; if (!window_) return; // This is called from the constructor when window_ is NULL. if (reason == BOOKMARK_BAR_STATE_CHANGE_TAB_SWITCH) { return; } BookmarkBar::AnimateChangeType animate_type = (reason == BOOKMARK_BAR_STATE_CHANGE_PREF_CHANGE) ? BookmarkBar::ANIMATE_STATE_CHANGE : BookmarkBar::DONT_ANIMATE_STATE_CHANGE; window_->BookmarkBarStateChanged(animate_type); }
C
Chrome
0
CVE-2018-6096
https://www.cvedetails.com/cve/CVE-2018-6096/
null
https://github.com/chromium/chromium/commit/36f801fdbec07d116a6f4f07bb363f10897d6a51
36f801fdbec07d116a6f4f07bb363f10897d6a51
If a page calls |window.focus()|, kick it out of fullscreen. BUG=776418, 800056 Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017 Reviewed-on: https://chromium-review.googlesource.com/852378 Reviewed-by: Nasko Oskov <nasko@chromium.org> Reviewed-by: Philip Jägenstedt <foolip@chromium.org> Commit-Queue: Avi Drissman <avi@chromium.org> Cr-Commit-Position: refs/heads/master@{#533790}
void RenderFrameImpl::UpdateSubresourceLoaderFactories( std::unique_ptr<URLLoaderFactoryBundleInfo> subresource_loaders) { DCHECK(loader_factories_); static_cast<URLLoaderFactoryBundle*>(loader_factories_.get()) ->Update(std::move(subresource_loaders)); }
void RenderFrameImpl::UpdateSubresourceLoaderFactories( std::unique_ptr<URLLoaderFactoryBundleInfo> subresource_loaders) { DCHECK(loader_factories_); static_cast<URLLoaderFactoryBundle*>(loader_factories_.get()) ->Update(std::move(subresource_loaders)); }
C
Chrome
0
CVE-2016-3839
https://www.cvedetails.com/cve/CVE-2016-3839/
CWE-284
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
472271b153c5dc53c28beac55480a8d8434b2d5c
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release
static inline int create_server_socket(const char* name) { int s = socket(AF_LOCAL, SOCK_STREAM, 0); if(s < 0) return -1; APPL_TRACE_DEBUG("covert name to android abstract name:%s", name); if(socket_local_server_bind(s, name, ANDROID_SOCKET_NAMESPACE_ABSTRACT) >= 0) { if(listen(s, 5) == 0) { APPL_TRACE_DEBUG("listen to local socket:%s, fd:%d", name, s); return s; } else APPL_TRACE_ERROR("listen to local socket:%s, fd:%d failed, errno:%d", name, s, errno); } else APPL_TRACE_ERROR("create local socket:%s fd:%d, failed, errno:%d", name, s, errno); close(s); return -1; }
static inline int create_server_socket(const char* name) { int s = socket(AF_LOCAL, SOCK_STREAM, 0); if(s < 0) return -1; APPL_TRACE_DEBUG("covert name to android abstract name:%s", name); if(socket_local_server_bind(s, name, ANDROID_SOCKET_NAMESPACE_ABSTRACT) >= 0) { if(listen(s, 5) == 0) { APPL_TRACE_DEBUG("listen to local socket:%s, fd:%d", name, s); return s; } else APPL_TRACE_ERROR("listen to local socket:%s, fd:%d failed, errno:%d", name, s, errno); } else APPL_TRACE_ERROR("create local socket:%s fd:%d, failed, errno:%d", name, s, errno); close(s); return -1; }
C
Android
0
CVE-2016-3839
https://www.cvedetails.com/cve/CVE-2016-3839/
CWE-284
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
472271b153c5dc53c28beac55480a8d8434b2d5c
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release
static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev, const struct audio_config *config) { UNUSED(dev); UNUSED(config); FNLOG(); return 320; }
static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev, const struct audio_config *config) { UNUSED(dev); UNUSED(config); FNLOG(); return 320; }
C
Android
0
null
null
null
https://github.com/chromium/chromium/commit/99844692ee805d18d5ee7fd9c62f14d2dffa2e06
99844692ee805d18d5ee7fd9c62f14d2dffa2e06
Removing unnecessary DCHECK from SafeBrowsing interstitial. BUG=30079 TEST=None. Review URL: http://codereview.chromium.org/1131003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42049 0039d316-1c4b-4281-b951-d872f2087c98
virtual void Run() { switch (action_) { case BLOCK: resource_dispatcher_host_->BlockRequestsForRoute( process_id_, render_view_host_id_); break; case RESUME: resource_dispatcher_host_->ResumeBlockedRequestsForRoute( process_id_, render_view_host_id_); break; case CANCEL: resource_dispatcher_host_->CancelBlockedRequestsForRoute( process_id_, render_view_host_id_); break; default: NOTREACHED(); } }
virtual void Run() { switch (action_) { case BLOCK: resource_dispatcher_host_->BlockRequestsForRoute( process_id_, render_view_host_id_); break; case RESUME: resource_dispatcher_host_->ResumeBlockedRequestsForRoute( process_id_, render_view_host_id_); break; case CANCEL: resource_dispatcher_host_->CancelBlockedRequestsForRoute( process_id_, render_view_host_id_); break; default: NOTREACHED(); } }
C
Chrome
0
CVE-2019-14763
https://www.cvedetails.com/cve/CVE-2019-14763/
CWE-189
https://github.com/torvalds/linux/commit/c91815b596245fd7da349ecc43c8def670d2269e
c91815b596245fd7da349ecc43c8def670d2269e
usb: dwc3: gadget: never call ->complete() from ->ep_queue() This is a requirement which has always existed but, somehow, wasn't reflected in the documentation and problems weren't found until now when Tuba Yavuz found a possible deadlock happening between dwc3 and f_hid. She described the situation as follows: spin_lock_irqsave(&hidg->write_spinlock, flags); // first acquire /* we our function has been disabled by host */ if (!hidg->req) { free_ep_req(hidg->in_ep, hidg->req); goto try_again; } [...] status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); => [...] => usb_gadget_giveback_request => f_hidg_req_complete => spin_lock_irqsave(&hidg->write_spinlock, flags); // second acquire Note that this happens because dwc3 would call ->complete() on a failed usb_ep_queue() due to failed Start Transfer command. This is, anyway, a theoretical situation because dwc3 currently uses "No Response Update Transfer" command for Bulk and Interrupt endpoints. It's still good to make this case impossible to happen even if the "No Reponse Update Transfer" command is changed. Reported-by: Tuba Yavuz <tuba@ece.ufl.edu> Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, struct dwc3_request *req) { struct scatterlist *sg = req->sg; struct scatterlist *s; int i; for_each_sg(sg, s, req->num_pending_sgs, i) { unsigned int length = req->request.length; unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int rem = length % maxp; unsigned chain = true; if (sg_is_last(s)) chain = false; if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; req->unaligned = true; /* prepare normal TRB */ dwc3_prepare_one_trb(dep, req, true, i); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, false, 0, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); } else { dwc3_prepare_one_trb(dep, req, chain, i); } if (!dwc3_calc_trbs_left(dep)) break; } }
static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, struct dwc3_request *req) { struct scatterlist *sg = req->sg; struct scatterlist *s; int i; for_each_sg(sg, s, req->num_pending_sgs, i) { unsigned int length = req->request.length; unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int rem = length % maxp; unsigned chain = true; if (sg_is_last(s)) chain = false; if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; req->unaligned = true; /* prepare normal TRB */ dwc3_prepare_one_trb(dep, req, true, i); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, false, 0, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); } else { dwc3_prepare_one_trb(dep, req, chain, i); } if (!dwc3_calc_trbs_left(dep)) break; } }
C
linux
0
CVE-2015-4601
https://www.cvedetails.com/cve/CVE-2015-4601/
null
https://git.php.net/?p=php-src.git;a=commit;h=0c136a2abd49298b66acb0cad504f0f972f5bfe8
0c136a2abd49298b66acb0cad504f0f972f5bfe8
null
PHP_METHOD(SoapServer, addSoapHeader) { soapServicePtr service; zval *fault; soapHeader **p; SOAP_SERVER_BEGIN_CODE(); FETCH_THIS_SERVICE(service); if (!service || !service->soap_headers_ptr) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "The SoapServer::addSoapHeader function may be called only during SOAP request processing"); return; } if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O", &fault, soap_header_class_entry) == FAILURE) { return; } p = service->soap_headers_ptr; while (*p != NULL) { p = &(*p)->next; } *p = emalloc(sizeof(soapHeader)); memset(*p, 0, sizeof(soapHeader)); ZVAL_NULL(&(*p)->function_name); (*p)->retval = *fault; zval_copy_ctor(&(*p)->retval); SOAP_SERVER_END_CODE(); }
PHP_METHOD(SoapServer, addSoapHeader) { soapServicePtr service; zval *fault; soapHeader **p; SOAP_SERVER_BEGIN_CODE(); FETCH_THIS_SERVICE(service); if (!service || !service->soap_headers_ptr) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "The SoapServer::addSoapHeader function may be called only during SOAP request processing"); return; } if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O", &fault, soap_header_class_entry) == FAILURE) { return; } p = service->soap_headers_ptr; while (*p != NULL) { p = &(*p)->next; } *p = emalloc(sizeof(soapHeader)); memset(*p, 0, sizeof(soapHeader)); ZVAL_NULL(&(*p)->function_name); (*p)->retval = *fault; zval_copy_ctor(&(*p)->retval); SOAP_SERVER_END_CODE(); }
C
php
0