func
string
target
string
cwe
list
project
string
commit_id
string
hash
string
size
int64
message
string
vul
int64
static int r_cmd_java_print_json_definitions( RBinJavaObj *obj ) { DsoJsonObj *json_obj = r_bin_java_get_bin_obj_json (obj); char *str = dso_json_obj_to_str (json_obj); dso_json_obj_del (json_obj); // XXX memleak r_cons_println (str); return true; }
Safe
[ "CWE-703", "CWE-193" ]
radare2
ced0223c7a1b3b5344af315715cd28fe7c0d9ebc
1.4696624836935105e+38
7
Fix unmatched array length in core_java.c (issue #16304) (#16313)
0
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) { if (!(s->flags & SLAB_STORE_USER)) return; lockdep_assert_held(&n->list_lock); list_del(&page->slab_list); }
Safe
[]
linux
fd4d9c7d0c71866ec0c2825189ebd2ce35bd95b8
3.179648224375219e+37
8
mm: slub: add missing TID bump in kmem_cache_alloc_bulk() When kmem_cache_alloc_bulk() attempts to allocate N objects from a percpu freelist of length M, and N > M > 0, it will first remove the M elements from the percpu freelist, then call ___slab_alloc() to allocate the next element and repopulate the percpu freelist. ___slab_alloc() can re-enable IRQs via allocate_slab(), so the TID must be bumped before ___slab_alloc() to properly commit the freelist head change. Fix it by unconditionally bumping c->tid when entering the slowpath. Cc: stable@vger.kernel.org Fixes: ebe909e0fdb3 ("slub: improve bulk alloc strategy") Signed-off-by: Jann Horn <jannh@google.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
MONGO_EXPORT const char *gridfile_get_contenttype( gridfile *gfile ) { bson_iterator it; if ( bson_find( &it, gfile->meta, "contentType" ) ) return bson_iterator_string( &it ); else return NULL; }
Safe
[ "CWE-190" ]
mongo-c-driver-legacy
1a1f5e26a4309480d88598913f9eebf9e9cba8ca
3.996265649315778e+37
7
don't mix up int and size_t (first pass to fix that)
0
static int __init vhost_vdpa_init(void) { int r; r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX, "vhost-vdpa"); if (r) goto err_alloc_chrdev; r = vdpa_register_driver(&vhost_vdpa_driver); if (r) goto err_vdpa_register_driver; return 0; err_vdpa_register_driver: unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX); err_alloc_chrdev: return r; }
Safe
[ "CWE-416" ]
linux
f6bbf0010ba004f5e90c7aefdebc0ee4bd3283b9
2.4559815566073493e+38
20
vhost-vdpa: fix use-after-free of v->config_ctx When the 'v->config_ctx' eventfd_ctx reference is released we didn't set it to NULL. So if the same character device (e.g. /dev/vhost-vdpa-0) is re-opened, the 'v->config_ctx' is invalid and calling again vhost_vdpa_config_put() causes use-after-free issues like the following refcount_t underflow: refcount_t: underflow; use-after-free. WARNING: CPU: 2 PID: 872 at lib/refcount.c:28 refcount_warn_saturate+0xae/0xf0 RIP: 0010:refcount_warn_saturate+0xae/0xf0 Call Trace: eventfd_ctx_put+0x5b/0x70 vhost_vdpa_release+0xcd/0x150 [vhost_vdpa] __fput+0x8e/0x240 ____fput+0xe/0x10 task_work_run+0x66/0xa0 exit_to_user_mode_prepare+0x118/0x120 syscall_exit_to_user_mode+0x21/0x50 ? __x64_sys_close+0x12/0x40 do_syscall_64+0x45/0x50 entry_SYSCALL_64_after_hwframe+0x44/0xae Fixes: 776f395004d8 ("vhost_vdpa: Support config interrupt in vdpa") Cc: lingshan.zhu@intel.com Cc: stable@vger.kernel.org Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Link: https://lore.kernel.org/r/20210311135257.109460-2-sgarzare@redhat.com Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Zhu Lingshan <lingshan.zhu@intel.com> Acked-by: Jason Wang <jasowang@redhat.com>
0
_mpegts_section_get_structure (GstMpegtsSection * section) { GstStructure *st; GQuark quark; switch (section->section_type) { case GST_MPEGTS_SECTION_PAT: quark = QUARK_PAT; break; case GST_MPEGTS_SECTION_PMT: quark = QUARK_PMT; break; case GST_MPEGTS_SECTION_CAT: quark = QUARK_CAT; break; case GST_MPEGTS_SECTION_EIT: quark = QUARK_EIT; break; case GST_MPEGTS_SECTION_BAT: quark = QUARK_BAT; break; case GST_MPEGTS_SECTION_NIT: quark = QUARK_NIT; break; case GST_MPEGTS_SECTION_SDT: quark = QUARK_SDT; break; case GST_MPEGTS_SECTION_TDT: quark = QUARK_TDT; break; case GST_MPEGTS_SECTION_TOT: quark = QUARK_TOT; break; default: GST_DEBUG ("Creating structure for unknown GstMpegtsSection"); quark = QUARK_SECTION; break; } st = gst_structure_new_id (quark, QUARK_SECTION, MPEG_TYPE_TS_SECTION, section, NULL); return st; }
Safe
[ "CWE-125" ]
gst-plugins-bad
d58f668ece8795bddb3316832e1848c7b7cf38ac
7.817728121116862e+37
44
mpegtssection: Add more section size checks The smallest section ever needs to be at least 3 bytes (i.e. just the short header). Non-short headers need to be at least 11 bytes long (3 for the minimum header, 5 for the non-short header, and 4 for the CRC). https://bugzilla.gnome.org/show_bug.cgi?id=775048
0
GF_Err paen_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs);
Safe
[ "CWE-476", "CWE-787" ]
gpac
b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8
2.5911661397039803e+38
4
fixed #1757
0
static errno_t sdap_get_ad_tokengroups_recv(TALLOC_CTX *mem_ctx, struct tevent_req *req, size_t *_num_sids, char ***_sids) { struct sdap_get_ad_tokengroups_state *state = NULL; state = tevent_req_data(req, struct sdap_get_ad_tokengroups_state); TEVENT_REQ_RETURN_ON_ERROR(req); if (_num_sids != NULL) { *_num_sids = state->num_sids; } if (_sids != NULL) { *_sids = talloc_steal(mem_ctx, state->sids); } return EOK; }
Safe
[ "CWE-264" ]
sssd
191d7f7ce3de10d9e19eaa0a6ab3319bcd4ca95d
1.7456563417934266e+38
20
AD: process non-posix nested groups using tokenGroups When initgr is performed for AD supporting tokenGroups, do not skip non-posix groups. Resolves: https://fedorahosted.org/sssd/ticket/2343 Reviewed-by: Michal Židek <mzidek@redhat.com> (cherry picked from commit 4932db6258ccfb612a3a28eb6a618c2f042b9d58)
0
GF_Err styl_box_size(GF_Box *s) { GF_TextStyleBox*ptr = (GF_TextStyleBox*)s; s->size += 2 + ptr->entry_count * GPP_STYLE_SIZE; return GF_OK; }
Safe
[ "CWE-476" ]
gpac
d527325a9b72218612455a534a508f9e1753f76e
1.3569060632902639e+38
7
fixed #1768
0
void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) const { BSONArrayBuilder sub(b.subarrayStart(name)); for (const auto& username : _userNames) { BSONObjBuilder nameObj(sub.subobjStart()); nameObj << AuthorizationManager::USER_NAME_FIELD_NAME << username.getUser() << AuthorizationManager::USER_DB_FIELD_NAME << username.getDB(); } }
Safe
[ "CWE-613" ]
mongo
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
1.0262878544300455e+38
8
SERVER-38984 Validate unique User ID on UserCache hit
0
GBool isReverseVideo() { return gFalse; }
Safe
[]
poppler
abf167af8b15e5f3b510275ce619e6fdb42edd40
9.353900421075624e+37
1
Implement tiling/patterns in SplashOutputDev Fixes bug 13518
0
void gf_media_get_reduced_frame_rate(u32 *timescale, u32 *sample_dur) { u32 res; if (!*sample_dur) return; res = *timescale / *sample_dur; if (res * (*sample_dur) == *timescale) { *timescale = res; *sample_dur = 1; } else if ((double)(*timescale * 1001 - (res + 1) * *sample_dur * 1000) / ((res + 1) * *sample_dur * 1000) < 0.001) { *timescale = (res + 1) * 1000; *sample_dur = 1001; } }
Safe
[ "CWE-190", "CWE-787" ]
gpac
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
3.237744249721091e+37
14
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
0
void rtnl_lock(void) { rtnl_shlock(); }
Safe
[ "CWE-200" ]
linux-2.6
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
8.68574272223788e+37
4
[NETLINK]: Missing initializations in dumped data Mostly missing initialization of padding fields of 1 or 2 bytes length, two instances of uninitialized nlmsgerr->msg of 16 bytes length. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static int inflate_get_next_window(STATE_PARAM_ONLY) { gunzip_outbuf_count = 0; while (1) { int ret; if (need_another_block) { if (end_reached) { calculate_gunzip_crc(PASS_STATE_ONLY); end_reached = 0; /* NB: need_another_block is still set */ return 0; /* Last block */ } method = inflate_block(PASS_STATE &end_reached); need_another_block = 0; } switch (method) { case -1: ret = inflate_stored(PASS_STATE_ONLY); break; case -2: ret = inflate_codes(PASS_STATE_ONLY); break; default: /* cannot happen */ abort_unzip(PASS_STATE_ONLY); } if (ret == 1) { calculate_gunzip_crc(PASS_STATE_ONLY); return 1; /* more data left */ } need_another_block = 1; /* end of that block */ } /* Doesnt get here */ }
Safe
[ "CWE-476" ]
busybox
1de25a6e87e0e627aa34298105a3d17c60a1f44e
3.3624956389501055e+37
37
unzip: test for bad archive SEGVing function old new delta huft_build 1296 1300 +4 Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
0
int RGWPostObj_ObjStore_S3::get_tags() { string tags_str; if (part_str(parts, "tagging", &tags_str)) { RGWXMLParser parser; if (!parser.init()){ ldpp_dout(this, 0) << "Couldn't init RGWObjTags XML parser" << dendl; err_msg = "Server couldn't process the request"; return -EINVAL; // TODO: This class of errors in rgw code should be a 5XX error } if (!parser.parse(tags_str.c_str(), tags_str.size(), 1)) { ldpp_dout(this,0 ) << "Invalid Tagging XML" << dendl; err_msg = "Invalid Tagging XML"; return -EINVAL; } RGWObjTagging_S3 tagging; try { RGWXMLDecoder::decode_xml("Tagging", tagging, &parser); } catch (RGWXMLDecoder::err& err) { ldpp_dout(this, 5) << "Malformed tagging request: " << err << dendl; return -EINVAL; } RGWObjTags obj_tags; int r = tagging.rebuild(obj_tags); if (r < 0) return r; bufferlist tags_bl; obj_tags.encode(tags_bl); ldpp_dout(this, 20) << "Read " << obj_tags.count() << "tags" << dendl; attrs[RGW_ATTR_TAGS] = tags_bl; } return 0; }
Safe
[ "CWE-79" ]
ceph
8f90658c731499722d5f4393c8ad70b971d05f77
1.865325447550311e+38
39
rgw: reject unauthenticated response-header actions Signed-off-by: Matt Benjamin <mbenjamin@redhat.com> Reviewed-by: Casey Bodley <cbodley@redhat.com> (cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
0
void SplashOutputDev::paintTransparencyGroup(GfxState *state, const double *bbox) { SplashBitmap *tBitmap; SplashTransparencyGroup *transpGroup; bool isolated; int tx, ty; tx = transpGroupStack->tx; ty = transpGroupStack->ty; tBitmap = transpGroupStack->tBitmap; isolated = transpGroupStack->isolated; // paint the transparency group onto the parent bitmap // - the clip path was set in the parent's state) if (tx < bitmap->getWidth() && ty < bitmap->getHeight()) { SplashCoord knockoutOpacity = (transpGroupStack->next != nullptr) ? transpGroupStack->next->knockoutOpacity : transpGroupStack->knockoutOpacity; splash->setOverprintMask(0xffffffff, false); splash->composite(tBitmap, 0, 0, tx, ty, tBitmap->getWidth(), tBitmap->getHeight(), false, !isolated, transpGroupStack->next != nullptr && transpGroupStack->next->knockout, knockoutOpacity); fontEngine->setAA(transpGroupStack->fontAA); if (transpGroupStack->next != nullptr && transpGroupStack->next->shape != nullptr) { transpGroupStack->next->knockout = true; } } // pop the stack transpGroup = transpGroupStack; transpGroupStack = transpGroup->next; if (transpGroupStack != nullptr && transpGroup->knockoutOpacity < transpGroupStack->knockoutOpacity) { transpGroupStack->knockoutOpacity = transpGroup->knockoutOpacity; } delete transpGroup->shape; delete transpGroup; delete tBitmap; }
Safe
[ "CWE-369" ]
poppler
b224e2f5739fe61de9fa69955d016725b2a4b78d
1.7126763494614834e+38
37
SplashOutputDev::tilingPatternFill: Fix crash on broken file Issue #802
0
gdImageFillToBorder (gdImagePtr im, int x, int y, int border, int color) { int lastBorder; /* Seek left */ int leftLimit, rightLimit; int i; leftLimit = (-1); if (border < 0) { /* Refuse to fill to a non-solid border */ return; } for (i = x; (i >= 0); i--) { if (gdImageGetPixel (im, i, y) == border) { break; } gdImageSetPixel (im, i, y, color); leftLimit = i; } if (leftLimit == (-1)) { return; } /* Seek right */ rightLimit = x; for (i = (x + 1); (i < im->sx); i++) { if (gdImageGetPixel (im, i, y) == border) { break; } gdImageSetPixel (im, i, y, color); rightLimit = i; } /* Look at lines above and below and start paints */ /* Above */ if (y > 0) { lastBorder = 1; for (i = leftLimit; (i <= rightLimit); i++) { int c; c = gdImageGetPixel (im, i, y - 1); if (lastBorder) { if ((c != border) && (c != color)) { gdImageFillToBorder (im, i, y - 1, border, color); lastBorder = 0; } } else if ((c == border) || (c == color)) { lastBorder = 1; } } } /* Below */ if (y < ((im->sy) - 1)) { lastBorder = 1; for (i = leftLimit; (i <= rightLimit); i++) { int c; c = gdImageGetPixel (im, i, y + 1); if (lastBorder) { if ((c != border) && (c != color)) { gdImageFillToBorder (im, i, y + 1, border, color); lastBorder = 0; } } else if ((c == border) || (c == color)) { lastBorder = 1; } } } }
Vulnerable
[ "CWE-119" ]
php-src
feba44546c27b0158f9ac20e72040a224b918c75
1.8404125940036667e+38
84
Fixed bug #22965 (Crash in gd lib's ImageFillToBorder()).
1
int RGWListBucket_ObjStore_SWIFT::get_params() { prefix = s->info.args.get("prefix"); marker = s->info.args.get("marker"); end_marker = s->info.args.get("end_marker"); max_keys = s->info.args.get("limit"); // non-standard s->info.args.get_bool("allow_unordered", &allow_unordered, false); delimiter = s->info.args.get("delimiter"); op_ret = parse_max_keys(); if (op_ret < 0) { return op_ret; } // S3 behavior is to silently cap the max-keys. // Swift behavior is to abort. if (max > default_max) return -ERR_PRECONDITION_FAILED; string path_args; if (s->info.args.exists("path")) { // should handle empty path path_args = s->info.args.get("path"); if (!delimiter.empty() || !prefix.empty()) { return -EINVAL; } prefix = path_args; delimiter="/"; path = prefix; if (path.size() && path[path.size() - 1] != '/') path.append("/"); int len = prefix.size(); int delim_size = delimiter.size(); if (len >= delim_size) { if (prefix.substr(len - delim_size).compare(delimiter) != 0) prefix.append(delimiter); } } return 0; }
Safe
[ "CWE-617" ]
ceph
f44a8ae8aa27ecef69528db9aec220f12492810e
2.967845843190235e+38
45
rgw: RGWSwiftWebsiteHandler::is_web_dir checks empty subdir_name checking for empty name avoids later assertion in RGWObjectCtx::set_atomic Fixes: CVE-2021-3531 Reviewed-by: Casey Bodley <cbodley@redhat.com> Signed-off-by: Casey Bodley <cbodley@redhat.com> (cherry picked from commit 7196a469b4470f3c8628489df9a41ec8b00a5610)
0
static void php_cli_server_content_sender_ctor(php_cli_server_content_sender *sender) /* {{{ */ { php_cli_server_buffer_ctor(&sender->buffer); } /* }}} */
Safe
[]
php-src
2438490addfbfba51e12246a74588b2382caa08a
3.2652734561714146e+38
4
slim post data
0
virtual void Flush() { calledFlush = TRUE; }
Safe
[ "CWE-190", "CWE-787" ]
icu
b7d08bc04a4296982fcef8b6b8a354a9e4e7afca
3.1919558230183275e+38
1
ICU-20958 Prevent SEGV_MAPERR in append See #971
0
void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& filter_sizes = context->input(1); const Tensor& out_backprop = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsVector(filter_sizes.shape()), errors::InvalidArgument( "Conv2DCustomBackpropFilter: filter_sizes input must be 1-dim, " "not ", filter_sizes.dims())); TensorShape filter_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( filter_sizes.vec<int32>(), &filter_shape)); ConvBackpropDimensions dims; OP_REQUIRES_OK( context, ConvBackpropComputeDimensionsV2( "Conv2DCustomBackpropFilter", /*num_spatial_dims=*/2, input.shape(), filter_shape, out_backprop.shape(), dilations_, strides_, padding_, explicit_paddings_, data_format_, &dims)); Tensor* filter_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, filter_shape, &filter_backprop)); // If there is nothing to compute, return. if (filter_shape.num_elements() == 0) { return; } int64 pad_top, pad_bottom; int64 pad_left, pad_right; if (padding_ == Padding::EXPLICIT) { pad_top = explicit_paddings_[2]; pad_bottom = explicit_paddings_[3]; pad_left = explicit_paddings_[4]; pad_right = explicit_paddings_[5]; } OP_REQUIRES_OK( context, GetWindowedOutputSizeVerbose( dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, dims.spatial_dims[0].stride, padding_, &dims.spatial_dims[0].output_size, &pad_top, &pad_bottom)); OP_REQUIRES_OK( context, GetWindowedOutputSizeVerbose( dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, dims.spatial_dims[1].stride, padding_, &dims.spatial_dims[1].output_size, &pad_left, &pad_right)); #if defined TENSORFLOW_USE_LIBXSMM_CONVOLUTIONS && \ defined TENSORFLOW_USE_LIBXSMM_BACKWARD_CONVOLUTIONS if (pad_left == pad_right && pad_top == pad_bottom) { if (LaunchXsmmBackwardFilter<Device, T>()( context, context->eigen_device<Device>(), input.tensor<T, 4>(), filter_backprop->tensor<T, 4>(), out_backprop.tensor<T, 4>(), dims.spatial_dims[0].input_size, dims.spatial_dims[1].input_size, static_cast<int>(dims.spatial_dims[0].stride), static_cast<int>(dims.spatial_dims[1].stride), static_cast<int>(pad_top), static_cast<int>(pad_left), data_format_)) { return; } } #endif // The total dimension size of each kernel. const int filter_total_size = dims.spatial_dims[0].filter_size * dims.spatial_dims[1].filter_size * dims.in_depth; // The output image size is the spatial size of the output. const int output_image_size = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size; // Shard 'batch' images into 'shard_size' groups of images to be fed // into the parallel matmul. Calculate 'shard_size' by dividing the L3 cache // size ('target_working_set_size') by the matmul size of an individual // image ('work_unit_size'). // TODO(andydavis) // *) Get L3 cache size from device at runtime (30MB is from ivybridge). // *) Consider reducing 'target_working_set_size' if L3 is shared by // other concurrently running tensorflow ops. const size_t target_working_set_size = (30LL << 20) / sizeof(T); const size_t size_A = output_image_size * filter_total_size; const size_t size_B = output_image_size * dims.out_depth; const size_t size_C = filter_total_size * dims.out_depth; const size_t work_unit_size = size_A + size_B + size_C; const size_t shard_size = (target_working_set_size + work_unit_size - 1) / work_unit_size; Tensor col_buffer; OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, TensorShape({static_cast<int64>(shard_size), static_cast<int64>(output_image_size), static_cast<int64>(filter_total_size)}), &col_buffer)); // The input offset corresponding to a single input image. const int input_offset = dims.spatial_dims[0].input_size * dims.spatial_dims[1].input_size * dims.in_depth; // The output offset corresponding to a single output image. const int output_offset = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size * dims.out_depth; const T* input_data = input.template flat<T>().data(); T* col_buffer_data = col_buffer.template flat<T>().data(); const T* out_backprop_data = out_backprop.template flat<T>().data(); T* filter_backprop_data = filter_backprop->template flat<T>().data(); typedef Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor>, Eigen::Unaligned> TensorMap; typedef Eigen::TensorMap<Eigen::Tensor<const T, 2, Eigen::RowMajor>, Eigen::Unaligned> ConstTensorMap; TensorMap C(filter_backprop_data, filter_total_size, dims.out_depth); C.setZero(); // Initialize contraction dims (we need to transpose 'A' below). Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> contract_dims; contract_dims[0].first = 0; contract_dims[0].second = 0; auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); for (int image_id = 0; image_id < dims.batch_size; image_id += shard_size) { const int shard_limit = std::min(static_cast<int>(shard_size), static_cast<int>(dims.batch_size) - image_id); auto shard = [&input_data, &col_buffer_data, &dims, &pad_top, &pad_left, &pad_bottom, &pad_right, &input_offset, &size_A](int64 start, int64 limit) { for (int shard_id = start; shard_id < limit; ++shard_id) { const T* input_data_shard = input_data + shard_id * input_offset; T* col_data_shard = col_buffer_data + shard_id * size_A; // When we compute the gradient with respect to the filters, we need // to do im2col to allow gemm-type computation. Im2col<T>( input_data_shard, dims.in_depth, dims.spatial_dims[0].input_size, dims.spatial_dims[1].input_size, dims.spatial_dims[0].filter_size, dims.spatial_dims[1].filter_size, pad_top, pad_left, pad_bottom, pad_right, dims.spatial_dims[0].stride, dims.spatial_dims[1].stride, col_data_shard); } }; Shard(worker_threads.num_threads, worker_threads.workers, shard_limit, size_A, shard); ConstTensorMap A(col_buffer_data, output_image_size * shard_limit, filter_total_size); ConstTensorMap B(out_backprop_data, output_image_size * shard_limit, dims.out_depth); // Gradient with respect to filter. C.device(context->eigen_cpu_device()) += A.contract(B, contract_dims); input_data += input_offset * shard_limit; out_backprop_data += output_offset * shard_limit; } }
Vulnerable
[ "CWE-369", "CWE-787" ]
tensorflow
c570e2ecfc822941335ad48f6e10df4e21f11c96
5.115886011177379e+37
172
Fix issues in Conv2DBackpropFilter. PiperOrigin-RevId: 369772454 Change-Id: I49b465f2ae2ce91def61b56cea8000197d5177d8
1
static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) { if (sgs->sum_nr_running > sgs->nr_numa_running) return regular; if (sgs->sum_nr_running > sgs->nr_preferred_running) return remote; return all; }
Safe
[ "CWE-400", "CWE-703", "CWE-835" ]
linux
c40f7d74c741a907cfaeb73a7697081881c497d0
1.5388797313425691e+38
8
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the scheduler under high loads, starting at around the v4.18 time frame, and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list manipulation. Do a (manual) revert of: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") It turns out that the list_del_leaf_cfs_rq() introduced by this commit is a surprising property that was not considered in followup commits such as: 9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list") As Vincent Guittot explains: "I think that there is a bigger problem with commit a9e7f6544b9c and cfs_rq throttling: Let take the example of the following topology TG2 --> TG1 --> root: 1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1 cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in one path because it has never been used and can't be throttled so tmp_alone_branch will point to leaf_cfs_rq_list at the end. 2) Then TG1 is throttled 3) and we add TG3 as a new child of TG1. 4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1 cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list. With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list. So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1 cfs_rq is removed from the list. Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list but tmp_alone_branch still points to TG3 cfs_rq because its throttled parent can't be enqueued when the lock is released. tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should. So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch points on another TG cfs_rq, the next TG cfs_rq that will be added, will be linked outside rq->leaf_cfs_rq_list - which is bad. In addition, we can break the ordering of the cfs_rq in rq->leaf_cfs_rq_list but this ordering is used to update and propagate the update from leaf down to root." Instead of trying to work through all these cases and trying to reproduce the very high loads that produced the lockup to begin with, simplify the code temporarily by reverting a9e7f6544b9c - which change was clearly not thought through completely. This (hopefully) gives us a kernel that doesn't lock up so people can continue to enjoy their holidays without worrying about regressions. ;-) [ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ] Analyzed-by: Xie XiuQi <xiexiuqi@huawei.com> Analyzed-by: Vincent Guittot <vincent.guittot@linaro.org> Reported-by: Zhipeng Xie <xiezhipeng1@huawei.com> Reported-by: Sargun Dhillon <sargun@sargun.me> Reported-by: Xie XiuQi <xiexiuqi@huawei.com> Tested-by: Zhipeng Xie <xiezhipeng1@huawei.com> Tested-by: Sargun Dhillon <sargun@sargun.me> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Vincent Guittot <vincent.guittot@linaro.org> Cc: <stable@vger.kernel.org> # v4.13+ Cc: Bin Li <huawei.libin@huawei.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") Link: http://lkml.kernel.org/r/1545879866-27809-1-git-send-email-xiexiuqi@huawei.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
0
void EC_GROUP_free(EC_GROUP *group) { if (!group) return; if (group->meth->group_finish != 0) group->meth->group_finish(group); EC_EX_DATA_free_all_data(&group->extra_data); if (group->mont_data) BN_MONT_CTX_free(group->mont_data); if (group->generator != NULL) EC_POINT_free(group->generator); BN_free(&group->order); BN_free(&group->cofactor); if (group->seed) OPENSSL_free(group->seed); OPENSSL_free(group); }
Safe
[ "CWE-320" ]
openssl
8aed2a7548362e88e84a7feb795a3a97e8395008
3.033630448574509e+37
22
Reserve option to use BN_mod_exp_mont_consttime in ECDSA. Submitted by Shay Gueron, Intel Corp. RT: 3149 Reviewed-by: Rich Salz <rsalz@openssl.org> (cherry picked from commit f54be179aa4cbbd944728771d7d59ed588158a12)
0
static int _server_handle_qSupported(libgdbr_t *g) { int ret; char *buf; if (!(buf = malloc (128))) { return -1; } snprintf (buf, 127, "PacketSize=%x", (ut32) (g->read_max - 1)); if ((ret = handle_qSupported (g)) < 0) { return -1; } ret = send_msg (g, buf); free (buf); return ret; }
Safe
[ "CWE-703", "CWE-787" ]
radare2
796dd28aaa6b9fa76d99c42c4d5ff8b257cc2191
4.19838363715566e+36
14
Fix ext2 buffer overflow in r2_sbu_grub_memmove
0
static int l2cap_resegment(struct l2cap_chan *chan) { /* Placeholder */ return 0; }
Safe
[ "CWE-787" ]
linux
e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3
2.5675399774722797e+38
5
Bluetooth: Properly check L2CAP config option output buffer length Validate the output buffer length for L2CAP config requests and responses to avoid overflowing the stack buffer used for building the option blocks. Cc: stable@vger.kernel.org Signed-off-by: Ben Seri <ben@armis.com> Signed-off-by: Marcel Holtmann <marcel@holtmann.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
ProgressReporter(char const* filename) : filename(filename) { }
Safe
[ "CWE-787" ]
qpdf
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
2.562724856066918e+38
4
Fix sign and conversion warnings (major) This makes all integer type conversions that have potential data loss explicit with calls that do range checks and raise an exception. After this commit, qpdf builds with no warnings when -Wsign-conversion -Wconversion is used with gcc or clang or when -W3 -Wd4800 is used with MSVC. This significantly reduces the likelihood of potential crashes from bogus integer values. There are some parts of the code that take int when they should take size_t or an offset. Such places would make qpdf not support files with more than 2^31 of something that usually wouldn't be so large. In the event that such a file shows up and is valid, at least qpdf would raise an error in the right spot so the issue could be legitimately addressed rather than failing in some weird way because of a silent overflow condition.
0
static void SerializeGltfLight(Light &light, json &o) { if (!light.name.empty()) SerializeStringProperty("name", light.name, o); SerializeNumberProperty("intensity", light.intensity, o); if (light.range > 0.0) { SerializeNumberProperty("range", light.range, o); } SerializeNumberArrayProperty("color", light.color, o); SerializeStringProperty("type", light.type, o); if (light.type == "spot") { json spot; SerializeSpotLight(light.spot, spot); JsonAddMember(o, "spot", std::move(spot)); } SerializeExtensionMap(light.extensions, o); if (light.extras.Type() != NULL_TYPE) { SerializeValue("extras", light.extras, o); } }
Safe
[ "CWE-20" ]
tinygltf
52ff00a38447f06a17eab1caa2cf0730a119c751
1.6502647411501802e+38
18
Do not expand file path since its not necessary for glTF asset path(URI) and for security reason(`wordexp`).
0
static void irda_connect_response(struct irda_sock *self) { struct sk_buff *skb; IRDA_DEBUG(2, "%s()\n", __func__); skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_ATOMIC); if (skb == NULL) { IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", __func__); return; } /* Reserve space for MUX_CONTROL and LAP header */ skb_reserve(skb, IRDA_MAX_HEADER); irttp_connect_response(self->tsap, self->max_sdu_size_rx, skb); }
Safe
[ "CWE-200" ]
linux-2.6
09384dfc76e526c3993c09c42e016372dc9dd22c
2.3515729257205908e+38
19
irda: Fix irda_getname() leak irda_getname() can leak kernel memory to user. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
int DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g) { if(!p || !g) /* q is optional */ return 0; BN_free(dh->p); BN_free(dh->q); BN_free(dh->g); dh->p=p; dh->q=q; dh->g=g; if(q) dh->length=BN_num_bits(q); return 1; }
Safe
[ "CWE-295" ]
stunnel
ebad9ddc4efb2635f37174c9d800d06206f1edf9
1.378593403137283e+38
13
stunnel-5.57
0
void VP8ComponentDecoder::SendToVirtualThread::drain(Sirikata::MuxReader&reader) { while (!reader.eof) { ResizableByteBufferListNode *data = new ResizableByteBufferListNode; auto ret = reader.nextDataPacket(*data); if (ret.second != Sirikata::JpegError::nil()) { set_eof(); break; } data->stream_id = ret.first; always_assert(data->size()); // the protocol can't store empty runs send(data); } }
Safe
[ "CWE-1187" ]
lepton
82167c144a322cc956da45407f6dce8d4303d346
1.2591044502161585e+38
13
fix #87 : always check that threads_required set up the appropriate number of threads---fire off nop functions on unused threads for consistency
0
static int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) return -EINVAL; if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags) return -EINVAL; req->cancel.addr = READ_ONCE(sqe->addr); return 0;
Safe
[ "CWE-667" ]
linux
3ebba796fa251d042be42b929a2d916ee5c34a49
1.8647101161408133e+38
13
io_uring: ensure that SQPOLL thread is started for exit If we create it in a disabled state because IORING_SETUP_R_DISABLED is set on ring creation, we need to ensure that we've kicked the thread if we're exiting before it's been explicitly disabled. Otherwise we can run into a deadlock where exit is waiting go park the SQPOLL thread, but the SQPOLL thread itself is waiting to get a signal to start. That results in the below trace of both tasks hung, waiting on each other: INFO: task syz-executor458:8401 blocked for more than 143 seconds. Not tainted 5.11.0-next-20210226-syzkaller #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz-executor458 state:D stack:27536 pid: 8401 ppid: 8400 flags:0x00004004 Call Trace: context_switch kernel/sched/core.c:4324 [inline] __schedule+0x90c/0x21a0 kernel/sched/core.c:5075 schedule+0xcf/0x270 kernel/sched/core.c:5154 schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868 do_wait_for_common kernel/sched/completion.c:85 [inline] __wait_for_common kernel/sched/completion.c:106 [inline] wait_for_common kernel/sched/completion.c:117 [inline] wait_for_completion+0x168/0x270 kernel/sched/completion.c:138 io_sq_thread_park fs/io_uring.c:7115 [inline] io_sq_thread_park+0xd5/0x130 fs/io_uring.c:7103 io_uring_cancel_task_requests+0x24c/0xd90 fs/io_uring.c:8745 __io_uring_files_cancel+0x110/0x230 fs/io_uring.c:8840 io_uring_files_cancel include/linux/io_uring.h:47 [inline] do_exit+0x299/0x2a60 kernel/exit.c:780 do_group_exit+0x125/0x310 kernel/exit.c:922 __do_sys_exit_group kernel/exit.c:933 [inline] __se_sys_exit_group kernel/exit.c:931 [inline] __x64_sys_exit_group+0x3a/0x50 kernel/exit.c:931 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x43e899 RSP: 002b:00007ffe89376d48 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7 RAX: ffffffffffffffda RBX: 00000000004af2f0 RCX: 000000000043e899 RDX: 000000000000003c RSI: 00000000000000e7 RDI: 0000000000000000 RBP: 0000000000000000 R08: ffffffffffffffc0 R09: 0000000010000000 R10: 0000000000008011 R11: 0000000000000246 R12: 00000000004af2f0 R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000001 INFO: task iou-sqp-8401:8402 can't die for more than 143 seconds. task:iou-sqp-8401 state:D stack:30272 pid: 8402 ppid: 8400 flags:0x00004004 Call Trace: context_switch kernel/sched/core.c:4324 [inline] __schedule+0x90c/0x21a0 kernel/sched/core.c:5075 schedule+0xcf/0x270 kernel/sched/core.c:5154 schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868 do_wait_for_common kernel/sched/completion.c:85 [inline] __wait_for_common kernel/sched/completion.c:106 [inline] wait_for_common kernel/sched/completion.c:117 [inline] wait_for_completion+0x168/0x270 kernel/sched/completion.c:138 io_sq_thread+0x27d/0x1ae0 fs/io_uring.c:6717 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294 INFO: task iou-sqp-8401:8402 blocked for more than 143 seconds. Reported-by: syzbot+fb5458330b4442f2090d@syzkaller.appspotmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
0
static void tomoyo_cred_transfer(struct cred *new, const struct cred *old) { /* * Since "struct tomoyo_domain_info *" is a sharable pointer, * we don't need to duplicate. */ new->security = old->security; }
Safe
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
1.5998672397993308e+38
8
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: James Morris <jmorris@namei.org>
0
bool CModules::OnRawMode2(const CNick* pOpNick, CChan& Channel, const CString& sModes, const CString& sArgs) { MODUNLOADCHK(OnRawMode2(pOpNick, Channel, sModes, sArgs)); return false; }
Safe
[ "CWE-20", "CWE-264" ]
znc
8de9e376ce531fe7f3c8b0aa4876d15b479b7311
2.872954256335956e+38
5
Fix remote code execution and privilege escalation vulnerability. To trigger this, need to have a user already. Thanks for Jeriko One <jeriko.one@gmx.us> for finding and reporting this. CVE-2019-12816
0
static av_cold int vc2_encode_end(AVCodecContext *avctx) { int i; VC2EncContext *s = avctx->priv_data; for (i = 0; i < 3; i++) { ff_vc2enc_free_transforms(&s->transform_args[i].t); av_freep(&s->plane[i].coef_buf); } av_freep(&s->slice_args); av_freep(&s->coef_lut_len); av_freep(&s->coef_lut_val); return 0;
Safe
[ "CWE-125" ]
FFmpeg
94e538aebbc9f9c529e8b1f2eda860cfb8c473b1
3.2723195881591027e+38
16
vc2enc_dwt: pad the temporary buffer by the slice size Since non-Haar wavelets need to look into pixels outside the frame, we need to pad the buffer. The old factor of two seemed to be a workaround that fact and only padded to the left and bottom. This correctly pads by the slice size and as such reduces memory usage and potential exploits. Reported by Liu Bingchang. Ideally, there should be no temporary buffer but the encoder is designed to deinterleave the coefficients into the classical wavelet structure with the lower frequency values in the top left corner. Signed-off-by: Rostislav Pehlivanov <atomnuker@gmail.com> (cherry picked from commit 3228ac730c11eca49d5680d5550128e397061c85)
0
check_entry(const struct ipt_entry *e, const char *name) { const struct xt_entry_target *t; if (!ip_checkentry(&e->ip)) { duprintf("ip check failed %p %s.\n", e, par->match->name); return -EINVAL; } if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = ipt_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; }
Safe
[ "CWE-200" ]
linux-2.6
78b79876761b86653df89c48a7010b5cbd41a84a
2.2099363593078516e+38
19
netfilter: ip_tables: fix infoleak to userspace Structures ipt_replace, compat_ipt_replace, and xt_get_revision are copied from userspace. Fields of these structs that are zero-terminated strings are not checked. When they are used as argument to a format string containing "%s" in request_module(), some sensitive information is leaked to userspace via argument of spawned modprobe process. The first and the third bugs were introduced before the git epoch; the second was introduced in 2722971c (v2.6.17-rc1). To trigger the bug one should have CAP_NET_ADMIN. Signed-off-by: Vasiliy Kulikov <segoon@openwall.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
0
static void lsr_read_matrix(GF_LASeRCodec *lsr, SVG_Transform *mx) { u32 flag; gf_mx2d_init(mx->mat); mx->is_ref = 0; GF_LSR_READ_INT(lsr, flag, 1, "isNotMatrix"); if (flag) { GF_LSR_READ_INT(lsr, flag, 1, "isRef"); if (flag) { GF_LSR_READ_INT(lsr, flag, 1, "hasXY"); if (flag) { mx->mat.m[2] = lsr_read_fixed_16_8(lsr, "valueX"); mx->mat.m[5] = lsr_read_fixed_16_8(lsr, "valueY"); } } else { lsr_read_extension(lsr, "ext"); } } else { lsr->coord_bits += lsr->scale_bits; GF_LSR_READ_INT(lsr, flag, 1, "xx_yy_present"); if (flag) { GF_LSR_READ_INT(lsr, flag, lsr->coord_bits, "xx"); mx->mat.m[0] = lsr_translate_scale(lsr, flag); GF_LSR_READ_INT(lsr, flag, lsr->coord_bits, "yy"); mx->mat.m[4] = lsr_translate_scale(lsr, flag); } else { mx->mat.m[0] = mx->mat.m[4] = FIX_ONE; } GF_LSR_READ_INT(lsr, flag, 1, "xy_yx_present"); if (flag) { GF_LSR_READ_INT(lsr, flag, lsr->coord_bits, "xy"); mx->mat.m[1] = lsr_translate_scale(lsr, flag); GF_LSR_READ_INT(lsr, flag, lsr->coord_bits, "yx"); mx->mat.m[3] = lsr_translate_scale(lsr, flag); } GF_LSR_READ_INT(lsr, flag, 1, "xz_yz_present"); if (flag) { GF_LSR_READ_INT(lsr, flag, lsr->coord_bits, "xz"); mx->mat.m[2] = lsr_translate_coords(lsr, flag, lsr->coord_bits); GF_LSR_READ_INT(lsr, flag, lsr->coord_bits, "yz"); mx->mat.m[5] = lsr_translate_coords(lsr, flag, lsr->coord_bits); } lsr->coord_bits -= lsr->scale_bits; } }
Safe
[ "CWE-190" ]
gpac
faa75edde3dfeba1e2cf6ffa48e45a50f1042096
8.200425247802742e+37
46
fixed #2213
0
void tr_variantInitDict(tr_variant* v, size_t reserve_count) { tr_variantInit(v, TR_VARIANT_TYPE_DICT); tr_variantDictReserve(v, reserve_count); }
Safe
[ "CWE-416", "CWE-284" ]
transmission
2123adf8e5e1c2b48791f9d22fc8c747e974180e
8.536445804808406e+37
5
CVE-2018-10756: Fix heap-use-after-free in tr_variantWalk In libtransmission/variant.c, function tr_variantWalk, when the variant stack is reallocated, a pointer to the previously allocated memory region is kept. This address is later accessed (heap use-after-free) while walking back down the stack, causing the application to crash. The application can be any application which uses libtransmission, such as transmission-daemon, transmission-gtk, transmission-show, etc. Reported-by: Tom Richards <tom@tomrichards.net>
0
void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; }
Safe
[ "CWE-787" ]
folly
4f304af1411e68851bdd00ef6140e9de4616f7d3
3.050834059911225e+38
3
[folly] Add additional overflow checks to IOBuf - CVE-2021-24036 Summary: As per title CVE-2021-24036 Reviewed By: jan Differential Revision: D27938605 fbshipit-source-id: 7481c54ae6fbb7b67b15b3631d5357c2f7043f9c
0
TEE_Result syscall_unmask_cancellation(uint32_t *old_mask) { TEE_Result res; struct tee_ta_session *s = NULL; uint32_t m; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; m = s->cancel_mask; s->cancel_mask = false; return tee_svc_copy_to_user(old_mask, &m, sizeof(m)); }
Safe
[ "CWE-119", "CWE-787" ]
optee_os
d5c5b0b77b2b589666024d219a8007b3f5b6faeb
2.4424420176699943e+38
14
core: svc: always check ta parameters Always check TA parameters from a user TA. This prevents a user TA from passing invalid pointers to a pseudo TA. Fixes: OP-TEE-2018-0007: "Buffer checks missing when calling pseudo TAs". Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org> Tested-by: Joakim Bech <joakim.bech@linaro.org> (QEMU v7, v8) Reviewed-by: Joakim Bech <joakim.bech@linaro.org> Reported-by: Riscure <inforequest@riscure.com> Reported-by: Alyssa Milburn <a.a.milburn@vu.nl> Acked-by: Etienne Carriere <etienne.carriere@linaro.org>
0
void extr_box_del(GF_Box *s) { GF_ExtraDataBox *ptr = (GF_ExtraDataBox *)s; if (ptr == NULL) return; if (ptr->feci) gf_isom_box_del((GF_Box*)ptr->feci); if (ptr->data) gf_free(ptr->data); gf_free(ptr); }
Safe
[ "CWE-787" ]
gpac
77510778516803b7f7402d7423c6d6bef50254c3
8.529665481267118e+37
8
fixed #2255
0
static void fetch_result_str(MYSQL_BIND *param, MYSQL_FIELD *field __attribute__((unused)), uchar **row) { ulong length= net_field_length(row); ulong copy_length= min(length, param->buffer_length); memcpy(param->buffer, (char *)*row, copy_length); /* Add an end null if there is room in the buffer */ if (copy_length != param->buffer_length) ((uchar *)param->buffer)[copy_length]= '\0'; *param->length= length; /* return total length */ *param->error= copy_length < length; *row+= length; }
Safe
[]
mysql-server
3d8134d2c9b74bc8883ffe2ef59c168361223837
3.8462297759805333e+36
14
Bug#25988681: USE-AFTER-FREE IN MYSQL_STMT_CLOSE() Description: If mysql_stmt_close() encountered error, it recorded error in prepared statement but then frees memory assigned to prepared statement. If mysql_stmt_error() is used to get error information, it will result into use after free. In all cases where mysql_stmt_close() can fail, error would have been set by cli_advanced_command in MYSQL structure. Solution: Don't copy error from MYSQL using set_stmt_errmsg. There is no automated way to test the fix since it is in mysql_stmt_close() which does not expect any reply from server. Reviewed-By: Georgi Kodinov <georgi.kodinov@oracle.com> Reviewed-By: Ramil Kalimullin <ramil.kalimullin@oracle.com>
0
TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { initialize(); InSequence sequence; MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "Chunked"}, }; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); Buffer::OwnedImpl expected_data("Hello World"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); EXPECT_CALL(decoder, decodeData(_, true)); Buffer::OwnedImpl buffer( "POST / HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n"); auto status = codec_->dispatch(buffer); EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); }
Safe
[ "CWE-770" ]
envoy
7ca28ff7d46454ae930e193d97b7d08156b1ba59
1.1196717853069851e+38
24
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145) Signed-off-by: antonio <avd@google.com>
0
void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe) { if (bprm->e_uid != current->uid || bprm->e_gid != current->gid || !cap_issubset(bprm->cap_post_exec_permitted, current->cap_permitted)) { set_dumpable(current->mm, suid_dumpable); current->pdeath_signal = 0; if (unsafe & ~LSM_UNSAFE_PTRACE_CAP) { if (!capable(CAP_SETUID)) { bprm->e_uid = current->uid; bprm->e_gid = current->gid; } if (cap_limit_ptraced_target()) { bprm->cap_post_exec_permitted = cap_intersect( bprm->cap_post_exec_permitted, current->cap_permitted); } } } current->suid = current->euid = current->fsuid = bprm->e_uid; current->sgid = current->egid = current->fsgid = bprm->e_gid; /* For init, we want to retain the capabilities set * in the init_task struct. Thus we skip the usual * capability rules */ if (!is_global_init(current)) { current->cap_permitted = bprm->cap_post_exec_permitted; if (bprm->cap_effective) current->cap_effective = bprm->cap_post_exec_permitted; else cap_clear(current->cap_effective); } /* AUD: Audit candidate if current->cap_effective is set */ current->securebits &= ~issecure_mask(SECURE_KEEP_CAPS); }
Safe
[]
linux-2.6
3318a386e4ca68c76e0294363d29bdc46fcad670
2.4319625603604615e+37
39
file caps: always start with clear bprm->caps_* While Linux doesn't honor setuid on scripts. However, it mistakenly behaves differently for file capabilities. This patch fixes that behavior by making sure that get_file_caps() begins with empty bprm->caps_*. That way when a script is loaded, its bprm->caps_* may be filled when binfmt_misc calls prepare_binprm(), but they will be cleared again when binfmt_elf calls prepare_binprm() next to read the interpreter's file capabilities. Signed-off-by: Serge Hallyn <serue@us.ibm.com> Acked-by: David Howells <dhowells@redhat.com> Acked-by: Andrew G. Morgan <morgan@kernel.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx, const struct iovec *iov, int count) { ssize_t buflen; uint8_t *buf; buflen = iov_buflen(iov, count); if (buflen == -1) { return NULL; } buf = talloc_array(mem_ctx, uint8_t, buflen); if (buf == NULL) { return NULL; } iov_buf(iov, count, buf, buflen); return buf; }
Safe
[ "CWE-20" ]
samba
a819d2b440aafa3138d95ff6e8b824da885a70e9
2.1800562734367057e+38
21
CVE-2015-5296: libcli/smb: make sure we require signing when we demand encryption on a session BUG: https://bugzilla.samba.org/show_bug.cgi?id=11536 Signed-off-by: Stefan Metzmacher <metze@samba.org> Reviewed-by: Jeremy Allison <jra@samba.org>
0
Bool gf_fs_filter_exists(GF_FilterSession *fsess, const char *name) { u32 i, count; if (!strcmp(name, "enc")) return GF_TRUE; count = gf_list_count(fsess->registry); for (i=0;i<count;i++) { const GF_FilterRegister *f_reg = gf_list_get(fsess->registry, i); if (!strcmp(f_reg->name, name)) { return GF_TRUE; } } return GF_FALSE; }
Safe
[ "CWE-787" ]
gpac
da37ec8582266983d0ec4b7550ec907401ec441e
7.8713427020575875e+37
15
fixed crashes for very long path - cf #1908
0
static bool is_simm32(s64 value) { return value == (s64)(s32)value; }
Safe
[ "CWE-77" ]
linux
e4d4d456436bfb2fe412ee2cd489f7658449b098
8.152821407128477e+37
4
bpf, x86: Validate computation of branch displacements for x86-64 The branch displacement logic in the BPF JIT compilers for x86 assumes that, for any generated branch instruction, the distance cannot increase between optimization passes. But this assumption can be violated due to how the distances are computed. Specifically, whenever a backward branch is processed in do_jit(), the distance is computed by subtracting the positions in the machine code from different optimization passes. This is because part of addrs[] is already updated for the current optimization pass, before the branch instruction is visited. And so the optimizer can expand blocks of machine code in some cases. This can confuse the optimizer logic, where it assumes that a fixed point has been reached for all machine code blocks once the total program size stops changing. And then the JIT compiler can output abnormal machine code containing incorrect branch displacements. To mitigate this issue, we assert that a fixed point is reached while populating the output image. This rejects any problematic programs. The issue affects both x86-32 and x86-64. We mitigate separately to ease backporting. Signed-off-by: Piotr Krysiuk <piotras@gmail.com> Reviewed-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
0
static void _gnutls_global_deinit(unsigned destructor) { GNUTLS_STATIC_MUTEX_LOCK(global_init_mutex); if (_gnutls_init == 1) { _gnutls_init = 0; if (_gnutls_init_ret < 0) { /* only deinitialize if gnutls_global_init() has * succeeded */ gnutls_assert(); goto fail; } gnutls_crypto_deinit(); _gnutls_rnd_deinit(); _gnutls_ext_deinit(); asn1_delete_structure(&_gnutls_gnutls_asn); asn1_delete_structure(&_gnutls_pkix1_asn); _gnutls_crypto_deregister(); gnutls_system_global_deinit(); _gnutls_cryptodev_deinit(); #ifdef ENABLE_PKCS11 /* Do not try to deinitialize the PKCS #11 libraries * from the destructor. If we do and the PKCS #11 modules * are already being unloaded, we may crash. */ if (destructor == 0) { gnutls_pkcs11_deinit(); } #endif gnutls_mutex_deinit(&_gnutls_file_mutex); gnutls_mutex_deinit(&_gnutls_pkcs11_mutex); } else { if (_gnutls_init > 0) _gnutls_init--; } fail: GNUTLS_STATIC_MUTEX_UNLOCK(global_init_mutex); }
Safe
[ "CWE-20" ]
gnutls
b0a3048e56611a2deee4976aeba3b8c0740655a6
6.488796453058158e+37
43
env: use secure_getenv when reading environment variables
0
static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt, oldcnt, lost; unsigned int mss; /* Use SACK to deduce losses of new sequences sent during recovery */ const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; WARN_ON(packets > tp->packets_out); if (tp->lost_skb_hint) { skb = tp->lost_skb_hint; cnt = tp->lost_cnt_hint; /* Head already handled? */ if (mark_head && skb != tcp_write_queue_head(sk)) return; } else { skb = tcp_write_queue_head(sk); cnt = 0; } tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; /* TODO: do this better */ /* this is not the most efficient way to do this... */ tp->lost_skb_hint = skb; tp->lost_cnt_hint = cnt; if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) break; oldcnt = cnt; if (tcp_is_fack(tp) || tcp_is_reno(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) cnt += tcp_skb_pcount(skb); if (cnt > packets) { if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || (oldcnt >= packets)) break; mss = tcp_skb_mss(skb); /* If needed, chop off the prefix to mark as lost. */ lost = (packets - oldcnt) * mss; if (lost < skb->len && tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0) break; cnt = packets; } tcp_skb_mark_lost(tp, skb); if (mark_head) break; } tcp_verify_left_out(tp); }
Safe
[ "CWE-200" ]
net
75ff39ccc1bd5d3c455b6822ab09e533c551f758
2.912711146573797e+38
59
tcp: make challenge acks less predictable Yue Cao claims that current host rate limiting of challenge ACKS (RFC 5961) could leak enough information to allow a patient attacker to hijack TCP sessions. He will soon provide details in an academic paper. This patch increases the default limit from 100 to 1000, and adds some randomization so that the attacker can no longer hijack sessions without spending a considerable amount of probes. Based on initial analysis and patch from Linus. Note that we also have per socket rate limiting, so it is tempting to remove the host limit in the future. v2: randomize the count of challenge acks per second, not the period. Fixes: 282f23c6ee34 ("tcp: implement RFC 5961 3.2") Reported-by: Yue Cao <ycao009@ucr.edu> Signed-off-by: Eric Dumazet <edumazet@google.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Yuchung Cheng <ycheng@google.com> Cc: Neal Cardwell <ncardwell@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
file_method(const char *s) /* I - Filename or URL */ { if (strncmp(s, "data:", 5) == 0) return ("data"); else if (strncmp(s, "http:", 5) == 0) return ("http"); else if (strncmp(s, "https:", 6) == 0) return ("https"); else if (strncmp(s, "ftp:", 4) == 0) return ("ftp"); else if (strncmp(s, "mailto:", 7) == 0) return ("mailto"); else return (NULL); }
Safe
[ "CWE-476", "CWE-415", "CWE-787" ]
htmldoc
369b2ea1fd0d0537ba707f20a2f047b6afd2fbdc
1.350242904422879e+38
15
Fix JPEG error handling (Issue #415)
0
static void flush_gro_hash(struct napi_struct *napi) { int i; for (i = 0; i < GRO_HASH_BUCKETS; i++) { struct sk_buff *skb, *n; list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) kfree_skb(skb); napi->gro_hash[i].count = 0; }
Safe
[ "CWE-416" ]
linux
a4270d6795b0580287453ea55974d948393e66ef
7.877514034585888e+37
12
net-gro: fix use-after-free read in napi_gro_frags() If a network driver provides to napi_gro_frags() an skb with a page fragment of exactly 14 bytes, the call to gro_pull_from_frag0() will 'consume' the fragment by calling skb_frag_unref(skb, 0), and the page might be freed and reused. Reading eth->h_proto at the end of napi_frags_skb() might read mangled data, or crash under specific debugging features. BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline] BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 Read of size 2 at addr ffff88809366840c by task syz-executor599/8957 CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x172/0x1f0 lib/dump_stack.c:113 print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188 __kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317 kasan_report+0x12/0x20 mm/kasan/common.c:614 __asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142 napi_frags_skb net/core/dev.c:5833 [inline] napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991 tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037 call_write_iter include/linux/fs.h:1872 [inline] do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693 do_iter_write fs/read_write.c:970 [inline] do_iter_write+0x184/0x610 fs/read_write.c:951 vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015 do_writev+0x15b/0x330 fs/read_write.c:1058 Fixes: a50e233c50db ("net-gro: restore frag0 optimization") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
reply_get_NS_rrset(struct reply_info* rep) { size_t i; for(i=0; i<rep->rrset_count; i++) { if(rep->rrsets[i]->rk.type == htons(LDNS_RR_TYPE_NS)) { return rep->rrsets[i]; } } return NULL; }
Safe
[ "CWE-400" ]
unbound
ba0f382eee814e56900a535778d13206b86b6d49
1.7909704540969328e+38
10
- CVE-2020-12662 Unbound can be tricked into amplifying an incoming query into a large number of queries directed to a target. - CVE-2020-12663 Malformed answers from upstream name servers can be used to make Unbound unresponsive.
0
ostream& print_array(ostream& m, Iterator begin, Iterator end) { if (begin == end) { m << "["; } else { auto beforelast = end - 1; m << "[ "; for (auto i = begin; i != end; ++i) { m << *i; if (i != beforelast) { m << ", "; } else { m << " "; } } } m << "]"; return m; }
Safe
[ "CWE-617" ]
ceph
b3118cabb8060a8cc6a01c4e8264cb18e7b1745a
1.2040378565873864e+38
18
rgw: Remove assertions in IAM Policy A couple of them could be triggered by user input. Signed-off-by: Adam C. Emerson <aemerson@redhat.com>
0
static int trace_die_handler(struct notifier_block *self, unsigned long val, void *data) { switch (val) { case DIE_OOPS: if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); break; default: break; } return NOTIFY_OK; }
Safe
[ "CWE-415" ]
linux
4397f04575c44e1440ec2e49b6302785c95fd2f8
1.6058328121101418e+38
14
tracing: Fix possible double free on failure of allocating trace buffer Jing Xia and Chunyan Zhang reported that on failing to allocate part of the tracing buffer, memory is freed, but the pointers that point to them are not initialized back to NULL, and later paths may try to free the freed memory again. Jing and Chunyan fixed one of the locations that does this, but missed a spot. Link: http://lkml.kernel.org/r/20171226071253.8968-1-chunyan.zhang@spreadtrum.com Cc: stable@vger.kernel.org Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code") Reported-by: Jing Xia <jing.xia@spreadtrum.com> Reported-by: Chunyan Zhang <chunyan.zhang@spreadtrum.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
0
void handle() { // TODO(EDev): cancel_deadline_timer should be looked into, it might be a good idea to add it to handle_url() and then restart the timer once everything passes cancel_deadline_timer(); bool is_invalid_request = false; add_keep_alive_ = false; req_.remote_ip_address = adaptor_.remote_endpoint().address().to_string(); add_keep_alive_ = req_.keep_alive; close_connection_ = req_.close_connection; if (req_.check_version(1, 1)) // HTTP/1.1 { if (!req_.headers.count("host")) { is_invalid_request = true; res = response(400); } else if (req_.upgrade) { // h2 or h2c headers if (req_.get_header_value("upgrade").substr(0, 2) == "h2") { // TODO(ipkn): HTTP/2 // currently, ignore upgrade header } else { close_connection_ = true; handler_->handle_upgrade(req_, res, std::move(adaptor_)); return; } } } CROW_LOG_INFO << "Request: " << utility::lexical_cast<std::string>(adaptor_.remote_endpoint()) << " " << this << " HTTP/" << (char)(req_.http_ver_major + '0') << "." << (char)(req_.http_ver_minor + '0') << ' ' << method_name(req_.method) << " " << req_.url; need_to_call_after_handlers_ = false; if (!is_invalid_request) { res.complete_request_handler_ = [] {}; res.is_alive_helper_ = [this]() -> bool { return adaptor_.is_open(); }; ctx_ = detail::context<Middlewares...>(); req_.middleware_context = static_cast<void*>(&ctx_); req_.middleware_container = static_cast<void*>(middlewares_); req_.io_service = &adaptor_.get_io_service(); detail::middleware_call_helper<detail::middleware_call_criteria_only_global, 0, decltype(ctx_), decltype(*middlewares_)>({}, *middlewares_, req_, res, ctx_); if (!res.completed_) { res.complete_request_handler_ = [this] { this->complete_request(); }; need_to_call_after_handlers_ = true; handler_->handle(req_, res, routing_handle_result_); if (add_keep_alive_) res.set_header("connection", "Keep-Alive"); } else { complete_request(); } } else { complete_request(); } }
Safe
[ "CWE-416" ]
Crow
fba01dc76d6ea940ad7c8392e8f39f9647241d8e
2.188704253807719e+38
75
Prevent HTTP pipelining which Crow doesn't support.
0
void RegexMatchExpression::_init() { uassert( ErrorCodes::BadValue, "Regular expression is too long", _regex.size() <= kMaxPatternSize); uassert(ErrorCodes::BadValue, "Regular expression cannot contain an embedded null byte", _regex.find('\0') == std::string::npos); uassert(ErrorCodes::BadValue, "Regular expression options string cannot contain an embedded null byte", _flags.find('\0') == std::string::npos); // isValidUTF8() checks for UTF-8 which does not map to a series of codepoints but does not // check the validity of the code points themselves. These situations do not cause problems // downstream so we do not do additional work to enforce that the code points are valid. uassert( 5108300, "Regular expression is invalid UTF-8", isValidUTF8(_regex) && isValidUTF8(_flags)); }
Safe
[]
mongo
64095239f41e9f3841d8be9088347db56d35c891
2.9641770404940693e+38
18
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
0
static char *make_parm_name(const char *label) { static char parmname[1024]; char *p = parmname; while (*label) { if (*label == ' ') *p++ = '_'; else *p++ = *label; ++label; } *p = '\0'; return parmname; }
Safe
[]
samba
71225948a249f079120282740fcc39fd6faa880e
2.0804653702000574e+38
13
swat: Use X-Frame-Options header to avoid clickjacking Jann Horn reported a potential clickjacking vulnerability in SWAT where the SWAT page could be embedded into an attacker's page using a frame or iframe and then used to trick the user to change Samba settings. Avoid this by telling the browser to refuse the frame embedding via the X-Frame-Options: DENY header. Signed-off-by: Kai Blin <kai@samba.org> Fix bug #9576 - CVE-2013-0213: Clickjacking issue in SWAT.
0
static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo) { __be32 *savep; uint32_t attrlen, bitmap[3]; int status; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto xdr_error; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto xdr_error; fsinfo->rtmult = fsinfo->wtmult = 512; /* ??? */ if ((status = decode_attr_lease_time(xdr, bitmap, &fsinfo->lease_time)) != 0) goto xdr_error; if ((status = decode_attr_maxfilesize(xdr, bitmap, &fsinfo->maxfilesize)) != 0) goto xdr_error; if ((status = decode_attr_maxread(xdr, bitmap, &fsinfo->rtmax)) != 0) goto xdr_error; fsinfo->rtpref = fsinfo->dtpref = fsinfo->rtmax; if ((status = decode_attr_maxwrite(xdr, bitmap, &fsinfo->wtmax)) != 0) goto xdr_error; fsinfo->wtpref = fsinfo->wtmax; status = decode_attr_time_delta(xdr, bitmap, &fsinfo->time_delta); if (status != 0) goto xdr_error; status = decode_attr_pnfstype(xdr, bitmap, &fsinfo->layouttype); if (status != 0) goto xdr_error; status = decode_attr_layout_blksize(xdr, bitmap, &fsinfo->blksize); if (status) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); return status; }
Safe
[ "CWE-703", "CWE-189" ]
linux
bf118a342f10dafe44b14451a1392c3254629a1f
1.8217831457531117e+38
40
NFSv4: include bitmap in nfsv4 get acl data The NFSv4 bitmap size is unbounded: a server can return an arbitrary sized bitmap in an FATTR4_WORD0_ACL request. Replace using the nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data xdr length to the (cached) acl page data. This is a general solution to commit e5012d1f "NFSv4.1: update nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead when getting ACLs. Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved. Cc: stable@kernel.org Signed-off-by: Andy Adamson <andros@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
0
dwarf_formsig8(Dwarf_Attribute attr, Dwarf_Sig8 * returned_sig_bytes, Dwarf_Error* error) { int res = _dwarf_formsig8_internal(attr, DW_FORM_ref_sig8, returned_sig_bytes,error); return res; }
Safe
[ "CWE-703", "CWE-125" ]
libdwarf-code
7ef09e1fc9ba07653dd078edb2408631c7969162
3.149003510578413e+38
8
Fixes old bug(which could result in Denial of Service) due to a missing check before reading the 8 bytes of a DW_FORM_ref_sig8. DW202206-001 modified: src/lib/libdwarf/dwarf_form.c
0
TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInTrailersCallback) { InSequence s; setup(false, ""); EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, false); RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"bazzz", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); return Http::okStatus(); })); setupFilterChain(2, 2); Http::LowerCaseString trailer_key("foo"); std::string trailers_data("trailers"); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::StopIteration)); EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) .WillOnce(Return(FilterTrailersStatus::Continue)); EXPECT_CALL(*decoder_filters_[0], decodeComplete()); EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::StopIteration)); EXPECT_CALL(*decoder_filters_[1], decodeData(_, false)) .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) .WillOnce(Invoke([&](Http::HeaderMap& trailers) -> FilterTrailersStatus { Http::LowerCaseString key("foo"); EXPECT_EQ(trailers.get(key), nullptr); return FilterTrailersStatus::Continue; })); EXPECT_CALL(*decoder_filters_[1], decodeComplete()); // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); // set up encodeHeaders expectations EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); // invoke encodeHeaders decoder_filters_[0]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); // set up encodeData expectations EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) .WillOnce(Return(FilterDataStatus::Continue)); EXPECT_CALL(*encoder_filters_[0], encodeData(_, false)) .WillOnce(Return(FilterDataStatus::Continue)); EXPECT_CALL(response_encoder_, encodeData(_, false)); // invoke encodeData Buffer::OwnedImpl response_body("response"); decoder_filters_[0]->callbacks_->encodeData(response_body, false); // set up encodeTrailer expectations EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) .WillOnce(Return(FilterTrailersStatus::Continue)); EXPECT_CALL(*encoder_filters_[1], encodeComplete()); EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) .WillOnce(Invoke([&](Http::HeaderMap& trailers) -> FilterTrailersStatus { // assert that the trailers set in the previous filter was ignored Http::LowerCaseString key("foo"); EXPECT_EQ(trailers.get(key), nullptr); return FilterTrailersStatus::Continue; })); EXPECT_CALL(*encoder_filters_[0], encodeComplete()); EXPECT_CALL(response_encoder_, encodeTrailers(_)); expectOnDestroy(); // invoke encodeTrailers decoder_filters_[0]->callbacks_->encodeTrailers( ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); }
Safe
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
2.4822609323850954e+38
86
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <mklein@lyft.com>
0
typename EK::Plane_3 read_plane(std::istream& in) { typename K::RT a, b, c, d; in >> a >> b >> c >> d; return typename EK::Plane_3(a, b, c, d); }
Safe
[ "CWE-125" ]
cgal
5a1ab45058112f8647c14c02f58905ecc597ec76
8.032319929890864e+37
5
Fix Nef_3
0
static void lan9118_mac_changed(lan9118_state *s) { qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); }
Safe
[ "CWE-835" ]
qemu
37cee01784ff0df13e5209517e1b3594a5e792d1
2.880709962502461e+37
4
lan9118: switch to use qemu_receive_packet() for loopback This patch switches to use qemu_receive_packet() which can detect reentrancy and return early. This is intended to address CVE-2021-3416. Cc: Prasad J Pandit <ppandit@redhat.com> Cc: qemu-stable@nongnu.org Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com Signed-off-by: Alexander Bulekov <alxndr@bu.edu> Signed-off-by: Jason Wang <jasowang@redhat.com>
0
static inline Quantum ScaleAnyToQuantum(const QuantumAny quantum, const QuantumAny range) { if (quantum > range) return(QuantumRange); #if !defined(MAGICKCORE_HDRI_SUPPORT) return((Quantum) (((double) QuantumRange*quantum)* PerceptibleReciprocal((double) range)+0.5)); #else return((Quantum) (((double) QuantumRange*quantum)* PerceptibleReciprocal((double) range))); #endif }
Safe
[ "CWE-190" ]
ImageMagick
f60d59cc3a7e3402d403361e0985ffa56f746a82
2.8843006362379312e+38
13
https://github.com/ImageMagick/ImageMagick/issues/1727
0
ms_escher_read_BSE (MSEscherState *state, MSEscherHeader *h) { /* read the header */ gboolean needs_free; guint8 const * data = ms_escher_get_data (state, h->offset + COMMON_HEADER_LEN, 36, &needs_free); guint8 const win_type = GSF_LE_GET_GUINT8 (data + 0); guint8 const mac_type = GSF_LE_GET_GUINT8 (data + 1); /*guint16 const tag = GSF_LE_GET_GUINT16 (data + 18);*/ guint32 const size = GSF_LE_GET_GUINT32 (data + 20); guint32 const ref_count = GSF_LE_GET_GUINT32 (data + 24); gint32 const del_offset = GSF_LE_GET_GUINT32 (data + 28); guint8 const is_texture = GSF_LE_GET_GUINT8 (data + 32); guint8 const name_len = GSF_LE_GET_GUINT8 (data + 33); guint8 checksum[16]; /* RSA Data Security, Inc. MD4 Message-Digest Algorithm */ char const *name = "unknown"; int i; for (i = 16; i-- > 0;) checksum[i] = GSF_LE_GET_GUINT8 (data + 2 + i); d (0 , { g_printerr ("Win type = %s;\n", bliptype_name (win_type)); g_printerr ("Mac type = %s;\n", bliptype_name (mac_type)); g_printerr ("Size = 0x%x(=%d) RefCount = 0x%x DelayOffset = 0x%x '%s';\n", size, size, ref_count, del_offset, name); switch (is_texture) { case 0: g_printerr ("Default usage;\n"); break; case 1: g_printerr ("Is texture;\n"); break; default:g_printerr ("UNKNOWN USAGE : %d;\n", is_texture); } g_printerr ("Checksum = 0x"); for (i = 0; i < 16; ++i) g_printerr ("%02x", checksum[i]); g_printerr (";\n"); }); /* Very red herring I think */ if (name_len != 0) { g_printerr ("WARNING : Maybe a name?\n"); /* name = biff_get_text (data+36, name_len, &txt_byte_len); */ } /* Ignore empties */ if (h->len > 36 + COMMON_HEADER_LEN) return ms_escher_read_container (state, h, 36, FALSE); /* Store a blank */ ms_container_add_blip (state->container, NULL); return FALSE; }
Safe
[ "CWE-119" ]
gnumeric
b5480b69345b3c6d56ee0ed9c9e9880bb2a08cdc
2.1574092674144708e+38
52
xls: fuzzed file crash.
0
static void php_image_filter_brightness(INTERNAL_FUNCTION_PARAMETERS) { zval *SIM; gdImagePtr im_src; zend_long brightness, tmp; if (zend_parse_parameters(ZEND_NUM_ARGS(), "zll", &SIM, &tmp, &brightness) == FAILURE) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (im_src == NULL) { RETURN_FALSE; } if (gdImageBrightness(im_src, (int)brightness) == 1) { RETURN_TRUE; } RETURN_FALSE; }
Safe
[ "CWE-787" ]
php-src
28022c9b1fd937436ab67bb3d61f652c108baf96
1.3722073245423714e+38
24
Fix bug#72697 - select_colors write out-of-bounds (cherry picked from commit b6f13a5ef9d6280cf984826a5de012a32c396cd4) Conflicts: ext/gd/gd.c
0
int test_gf2m_mod_mul(BIO *bp, BN_CTX *ctx) { BIGNUM *a, *b[2], *c, *d, *e, *f, *g, *h; int i, j, ret = 0; int p0[] = { 163, 7, 6, 3, 0, -1 }; int p1[] = { 193, 15, 0, -1 }; a = BN_new(); b[0] = BN_new(); b[1] = BN_new(); c = BN_new(); d = BN_new(); e = BN_new(); f = BN_new(); g = BN_new(); h = BN_new(); BN_GF2m_arr2poly(p0, b[0]); BN_GF2m_arr2poly(p1, b[1]); for (i = 0; i < num0; i++) { BN_bntest_rand(a, 1024, 0, 0); BN_bntest_rand(c, 1024, 0, 0); BN_bntest_rand(d, 1024, 0, 0); for (j = 0; j < 2; j++) { BN_GF2m_mod_mul(e, a, c, b[j], ctx); # if 0 /* make test uses ouput in bc but bc can't * handle GF(2^m) arithmetic */ if (bp != NULL) { if (!results) { BN_print(bp, a); BIO_puts(bp, " * "); BN_print(bp, c); BIO_puts(bp, " % "); BN_print(bp, b[j]); BIO_puts(bp, " - "); BN_print(bp, e); BIO_puts(bp, "\n"); } } # endif BN_GF2m_add(f, a, d); BN_GF2m_mod_mul(g, f, c, b[j], ctx); BN_GF2m_mod_mul(h, d, c, b[j], ctx); BN_GF2m_add(f, e, g); BN_GF2m_add(f, f, h); /* Test that (a+d)*c = a*c + d*c. */ if (!BN_is_zero(f)) { fprintf(stderr, "GF(2^m) modular multiplication test failed!\n"); goto err; } } } ret = 1; err: BN_free(a); BN_free(b[0]); BN_free(b[1]); BN_free(c); BN_free(d); BN_free(e); BN_free(f); BN_free(g); BN_free(h); return ret; }
Safe
[ "CWE-200" ]
openssl
d73cc256c8e256c32ed959456101b73ba9842f72
3.1467243459495212e+38
67
bn/asm/x86_64-mont5.pl: fix carry propagating bug (CVE-2015-3193). Reviewed-by: Richard Levitte <levitte@openssl.org> (cherry picked from commit e7c078db57908cbf16074c68034977565ffaf107)
0
static void kdb_md_line(const char *fmtstr, unsigned long addr, int symbolic, int nosect, int bytesperword, int num, int repeat, int phys) { /* print just one line of data */ kdb_symtab_t symtab; char cbuf[32]; char *c = cbuf; int i; int j; unsigned long word; memset(cbuf, '\0', sizeof(cbuf)); if (phys) kdb_printf("phys " kdb_machreg_fmt0 " ", addr); else kdb_printf(kdb_machreg_fmt0 " ", addr); for (i = 0; i < num && repeat--; i++) { if (phys) { if (kdb_getphysword(&word, addr, bytesperword)) break; } else if (kdb_getword(&word, addr, bytesperword)) break; kdb_printf(fmtstr, word); if (symbolic) kdbnearsym(word, &symtab); else memset(&symtab, 0, sizeof(symtab)); if (symtab.sym_name) { kdb_symbol_print(word, &symtab, 0); if (!nosect) { kdb_printf("\n"); kdb_printf(" %s %s " kdb_machreg_fmt " " kdb_machreg_fmt " " kdb_machreg_fmt, symtab.mod_name, symtab.sec_name, symtab.sec_start, symtab.sym_start, symtab.sym_end); } addr += bytesperword; } else { union { u64 word; unsigned char c[8]; } wc; unsigned char *cp; #ifdef __BIG_ENDIAN cp = wc.c + 8 - bytesperword; #else cp = wc.c; #endif wc.word = word; #define printable_char(c) \ ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; }) for (j = 0; j < bytesperword; j++) *c++ = printable_char(*cp++); addr += bytesperword; #undef printable_char } } kdb_printf("%*s %s\n", (int)((num-i)*(2*bytesperword + 1)+1), " ", cbuf); }
Safe
[ "CWE-787" ]
linux
eadb2f47a3ced5c64b23b90fd2a3463f63726066
4.660593593505474e+37
64
lockdown: also lock down previous kgdb use KGDB and KDB allow read and write access to kernel memory, and thus should be restricted during lockdown. An attacker with access to a serial port (for example, via a hypervisor console, which some cloud vendors provide over the network) could trigger the debugger so it is important that the debugger respect the lockdown mode when/if it is triggered. Fix this by integrating lockdown into kdb's existing permissions mechanism. Unfortunately kgdb does not have any permissions mechanism (although it certainly could be added later) so, for now, kgdb is simply and brutally disabled by immediately exiting the gdb stub without taking any action. For lockdowns established early in the boot (e.g. the normal case) then this should be fine but on systems where kgdb has set breakpoints before the lockdown is enacted than "bad things" will happen. CVE: CVE-2022-21499 Co-developed-by: Stephen Brennan <stephen.s.brennan@oracle.com> Signed-off-by: Stephen Brennan <stephen.s.brennan@oracle.com> Reviewed-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv, u32 ramask, int sgi) { struct h2c_cmd h2c; u8 bw = 0; memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.b_macid_cfg.cmd = H2C_8723B_MACID_CFG_RAID; h2c.b_macid_cfg.ramask0 = ramask & 0xff; h2c.b_macid_cfg.ramask1 = (ramask >> 8) & 0xff; h2c.b_macid_cfg.ramask2 = (ramask >> 16) & 0xff; h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff; h2c.ramask.arg = 0x80; h2c.b_macid_cfg.data1 = 0; if (sgi) h2c.b_macid_cfg.data1 |= BIT(7); h2c.b_macid_cfg.data2 = bw; dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n", __func__, ramask, h2c.ramask.arg, sizeof(h2c.b_macid_cfg)); rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg)); }
Safe
[ "CWE-400", "CWE-401" ]
linux
a2cdd07488e666aa93a49a3fc9c9b1299e27ef3c
3.3971463689849934e+38
25
rtl8xxxu: prevent leaking urb In rtl8xxxu_submit_int_urb if usb_submit_urb fails the allocated urb should be released. Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com> Reviewed-by: Chris Chiu <chiu@endlessm.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
0
void DoRealForwardFFT(OpKernelContext* ctx, uint64* fft_shape, const Tensor& in, Tensor* out) { // Create the axes (which are always trailing). const auto axes = Eigen::ArrayXi::LinSpaced(FFTRank, 1, FFTRank); auto device = ctx->eigen_device<CPUDevice>(); auto input = Tensor(in).flat_inner_dims<RealT, FFTRank + 1>(); const auto input_dims = input.dimensions(); // Slice input to fft_shape on its inner-most dimensions. Eigen::DSizes<Eigen::DenseIndex, FFTRank + 1> input_slice_sizes; input_slice_sizes[0] = input_dims[0]; TensorShape temp_shape{input_dims[0]}; for (int i = 1; i <= FFTRank; ++i) { input_slice_sizes[i] = fft_shape[i - 1]; temp_shape.AddDim(fft_shape[i - 1]); } auto output = out->flat_inner_dims<ComplexT, FFTRank + 1>(); const Eigen::DSizes<Eigen::DenseIndex, FFTRank + 1> zero_start_indices; // Compute the full FFT using a temporary tensor. Tensor temp; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<ComplexT>::v(), temp_shape, &temp)); auto full_fft = temp.flat_inner_dims<ComplexT, FFTRank + 1>(); full_fft.device(device) = input.slice(zero_start_indices, input_slice_sizes) .template fft<Eigen::BothParts, Eigen::FFT_FORWARD>(axes); // Slice away the negative frequency components. output.device(device) = full_fft.slice(zero_start_indices, output.dimensions()); }
Vulnerable
[ "CWE-617", "CWE-703" ]
tensorflow
31bd5026304677faa8a0b77602c6154171b9aec1
2.8099251676123944e+38
33
Prevent check fail in FFT PiperOrigin-RevId: 372031044 Change-Id: I50994e3e8a5d1342d01bde80256f6bf2730ca299
1
static int __bprm_mm_init(struct linux_binprm *bprm) { bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *); return 0; }
Safe
[ "CWE-200" ]
linux-2.6
b66c5984017533316fd1951770302649baf1aa33
2.579344212664391e+38
5
exec: do not leave bprm->interp on stack If a series of scripts are executed, each triggering module loading via unprintable bytes in the script header, kernel stack contents can leak into the command line. Normally execution of binfmt_script and binfmt_misc happens recursively. However, when modules are enabled, and unprintable bytes exist in the bprm->buf, execution will restart after attempting to load matching binfmt modules. Unfortunately, the logic in binfmt_script and binfmt_misc does not expect to get restarted. They leave bprm->interp pointing to their local stack. This means on restart bprm->interp is left pointing into unused stack memory which can then be copied into the userspace argv areas. After additional study, it seems that both recursion and restart remains the desirable way to handle exec with scripts, misc, and modules. As such, we need to protect the changes to interp. This changes the logic to require allocation for any changes to the bprm->interp. To avoid adding a new kmalloc to every exec, the default value is left as-is. Only when passing through binfmt_script or binfmt_misc does an allocation take place. For a proof of concept, see DoTest.sh from: http://www.halfdog.net/Security/2012/LinuxKernelBinfmtScriptStackDataDisclosure/ Signed-off-by: Kees Cook <keescook@chromium.org> Cc: halfdog <me@halfdog.net> Cc: P J P <ppandit@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
intrusive_ptr<Expression> ExpressionAnd::optimize() { /* optimize the conjunction as much as possible */ intrusive_ptr<Expression> pE(ExpressionNary::optimize()); /* if the result isn't a conjunction, we can't do anything */ ExpressionAnd* pAnd = dynamic_cast<ExpressionAnd*>(pE.get()); if (!pAnd) return pE; /* Check the last argument on the result; if it's not constant (as promised by ExpressionNary::optimize(),) then there's nothing we can do. */ const size_t n = pAnd->vpOperand.size(); // ExpressionNary::optimize() generates an ExpressionConstant for {$and:[]}. verify(n > 0); intrusive_ptr<Expression> pLast(pAnd->vpOperand[n - 1]); const ExpressionConstant* pConst = dynamic_cast<ExpressionConstant*>(pLast.get()); if (!pConst) return pE; /* Evaluate and coerce the last argument to a boolean. If it's false, then we can replace this entire expression. */ bool last = pConst->getValue().coerceToBool(); if (!last) { intrusive_ptr<ExpressionConstant> pFinal( ExpressionConstant::create(getExpressionContext(), Value(false))); return pFinal; } /* If we got here, the final operand was true, so we don't need it anymore. If there was only one other operand, we don't need the conjunction either. Note we still need to keep the promise that the result will be a boolean. */ if (n == 2) { intrusive_ptr<Expression> pFinal( ExpressionCoerceToBool::create(getExpressionContext(), pAnd->vpOperand[0])); return pFinal; } /* Remove the final "true" value, and return the new expression. CW TODO: Note that because of any implicit conversions, we may need to apply an implicit boolean conversion. */ pAnd->vpOperand.resize(n - 1); return pE; }
Safe
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
2.1581130811484014e+38
55
SERVER-38070 fix infinite loop in agg expression
0
static char *block_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid) { struct gendisk *disk = dev_to_disk(dev); if (disk->devnode) return disk->devnode(disk, mode); return NULL; }
Safe
[ "CWE-416" ]
linux-stable
77da160530dd1dc94f6ae15a981f24e5f0021e84
4.0629120347357285e+37
9
block: fix use-after-free in seq file I got a KASAN report of use-after-free: ================================================================== BUG: KASAN: use-after-free in klist_iter_exit+0x61/0x70 at addr ffff8800b6581508 Read of size 8 by task trinity-c1/315 ============================================================================= BUG kmalloc-32 (Not tainted): kasan: bad access detected ----------------------------------------------------------------------------- Disabling lock debugging due to kernel taint INFO: Allocated in disk_seqf_start+0x66/0x110 age=144 cpu=1 pid=315 ___slab_alloc+0x4f1/0x520 __slab_alloc.isra.58+0x56/0x80 kmem_cache_alloc_trace+0x260/0x2a0 disk_seqf_start+0x66/0x110 traverse+0x176/0x860 seq_read+0x7e3/0x11a0 proc_reg_read+0xbc/0x180 do_loop_readv_writev+0x134/0x210 do_readv_writev+0x565/0x660 vfs_readv+0x67/0xa0 do_preadv+0x126/0x170 SyS_preadv+0xc/0x10 do_syscall_64+0x1a1/0x460 return_from_SYSCALL_64+0x0/0x6a INFO: Freed in disk_seqf_stop+0x42/0x50 age=160 cpu=1 pid=315 __slab_free+0x17a/0x2c0 kfree+0x20a/0x220 disk_seqf_stop+0x42/0x50 traverse+0x3b5/0x860 seq_read+0x7e3/0x11a0 proc_reg_read+0xbc/0x180 do_loop_readv_writev+0x134/0x210 do_readv_writev+0x565/0x660 vfs_readv+0x67/0xa0 do_preadv+0x126/0x170 SyS_preadv+0xc/0x10 do_syscall_64+0x1a1/0x460 return_from_SYSCALL_64+0x0/0x6a CPU: 1 PID: 315 Comm: trinity-c1 Tainted: G B 4.7.0+ #62 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014 ffffea0002d96000 ffff880119b9f918 ffffffff81d6ce81 ffff88011a804480 ffff8800b6581500 ffff880119b9f948 ffffffff8146c7bd ffff88011a804480 ffffea0002d96000 ffff8800b6581500 fffffffffffffff4 ffff880119b9f970 Call Trace: [<ffffffff81d6ce81>] dump_stack+0x65/0x84 [<ffffffff8146c7bd>] print_trailer+0x10d/0x1a0 [<ffffffff814704ff>] object_err+0x2f/0x40 [<ffffffff814754d1>] kasan_report_error+0x221/0x520 [<ffffffff8147590e>] __asan_report_load8_noabort+0x3e/0x40 [<ffffffff83888161>] klist_iter_exit+0x61/0x70 [<ffffffff82404389>] class_dev_iter_exit+0x9/0x10 [<ffffffff81d2e8ea>] disk_seqf_stop+0x3a/0x50 [<ffffffff8151f812>] seq_read+0x4b2/0x11a0 [<ffffffff815f8fdc>] proc_reg_read+0xbc/0x180 [<ffffffff814b24e4>] do_loop_readv_writev+0x134/0x210 [<ffffffff814b4c45>] do_readv_writev+0x565/0x660 [<ffffffff814b8a17>] vfs_readv+0x67/0xa0 [<ffffffff814b8de6>] do_preadv+0x126/0x170 [<ffffffff814b92ec>] SyS_preadv+0xc/0x10 This problem can occur in the following situation: open() - pread() - .seq_start() - iter = kmalloc() // succeeds - seqf->private = iter - .seq_stop() - kfree(seqf->private) - pread() - .seq_start() - iter = kmalloc() // fails - .seq_stop() - class_dev_iter_exit(seqf->private) // boom! old pointer As the comment in disk_seqf_stop() says, stop is called even if start failed, so we need to reinitialise the private pointer to NULL when seq iteration stops. An alternative would be to set the private pointer to NULL when the kmalloc() in disk_seqf_start() fails. Cc: stable@vger.kernel.org Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@fb.com>
0
static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) { struct nfs_client *clp = server->nfs_client; struct nfs4_state *state = exception->state; struct inode *inode = exception->inode; int ret = errorcode; exception->retry = 0; switch(errorcode) { case 0: return 0; case -NFS4ERR_OPENMODE: if (nfs_have_delegation(inode, FMODE_READ)) { nfs_inode_return_delegation(inode); exception->retry = 1; return 0; } if (state == NULL) break; nfs4_schedule_stateid_recovery(server, state); goto wait_on_recovery; case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: if (state != NULL) nfs_remove_bad_delegation(state->inode); if (state == NULL) break; nfs4_schedule_stateid_recovery(server, state); goto wait_on_recovery; case -NFS4ERR_EXPIRED: if (state != NULL) nfs4_schedule_stateid_recovery(server, state); case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_CLIENTID: nfs4_schedule_lease_recovery(clp); goto wait_on_recovery; #if defined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: case -NFS4ERR_SEQ_FALSE_RETRY: case -NFS4ERR_SEQ_MISORDERED: dprintk("%s ERROR: %d Reset session\n", __func__, errorcode); nfs4_schedule_session_recovery(clp->cl_session); exception->retry = 1; break; #endif /* defined(CONFIG_NFS_V4_1) */ case -NFS4ERR_FILE_OPEN: if (exception->timeout > HZ) { /* We have retried a decent amount, time to * fail */ ret = -EBUSY; break; } case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: case -EKEYEXPIRED: ret = nfs4_delay(server->client, &exception->timeout); if (ret != 0) break; case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_OLD_STATEID: exception->retry = 1; break; case -NFS4ERR_BADOWNER: /* The following works around a Linux server bug! */ case -NFS4ERR_BADNAME: if (server->caps & NFS_CAP_UIDGID_NOMAP) { server->caps &= ~NFS_CAP_UIDGID_NOMAP; exception->retry = 1; printk(KERN_WARNING "NFS: v4 server %s " "does not accept raw " "uid/gids. " "Reenabling the idmapper.\n", server->nfs_client->cl_hostname); } } /* We failed to handle the error */ return nfs4_map_errors(ret); wait_on_recovery: ret = nfs4_wait_clnt_recover(clp); if (ret == 0) exception->retry = 1; return ret; }
Safe
[ "CWE-703", "CWE-189" ]
linux
20e0fa98b751facf9a1101edaefbc19c82616a68
2.5959504415114905e+37
90
Fix length of buffer copied in __nfs4_get_acl_uncached _copy_from_pages() used to copy data from the temporary buffer to the user passed buffer is passed the wrong size parameter when copying data. res.acl_len contains both the bitmap and acl lenghts while acl_len contains the acl length after adjusting for the bitmap size. Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
0
static int __init xen_blkif_init(void) { int rc = 0; if (!xen_domain()) return -ENODEV; if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER; } if (xenblk_max_queues == 0) xenblk_max_queues = num_online_cpus(); rc = xen_blkif_interface_init(); if (rc) goto failed_init; rc = xen_blkif_xenbus_init(); if (rc) goto failed_init; failed_init: return rc; }
Safe
[ "CWE-200" ]
linux
089bc0143f489bd3a4578bdff5f4ca68fb26f341
3.269958582492933e+38
27
xen-blkback: don't leak stack data via response ring Rather than constructing a local structure instance on the stack, fill the fields directly on the shared ring, just like other backends do. Build on the fact that all response structure flavors are actually identical (the old code did make this assumption too). This is XSA-216. Cc: stable@vger.kernel.org Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
0
void intel_pmu_enable_bts(u64 config) { unsigned long debugctlmsr; debugctlmsr = get_debugctlmsr(); debugctlmsr |= DEBUGCTLMSR_TR; debugctlmsr |= DEBUGCTLMSR_BTS; if (config & ARCH_PERFMON_EVENTSEL_INT) debugctlmsr |= DEBUGCTLMSR_BTINT; if (!(config & ARCH_PERFMON_EVENTSEL_OS)) debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS; if (!(config & ARCH_PERFMON_EVENTSEL_USR)) debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR; update_debugctlmsr(debugctlmsr); }
Safe
[ "CWE-755" ]
linux
d88d05a9e0b6d9356e97129d4ff9942d765f46ea
2.101616335635452e+38
19
perf/x86/intel: Fix a crash caused by zero PEBS status A repeatable crash can be triggered by the perf_fuzzer on some Haswell system. https://lore.kernel.org/lkml/7170d3b-c17f-1ded-52aa-cc6d9ae999f4@maine.edu/ For some old CPUs (HSW and earlier), the PEBS status in a PEBS record may be mistakenly set to 0. To minimize the impact of the defect, the commit was introduced to try to avoid dropping the PEBS record for some cases. It adds a check in the intel_pmu_drain_pebs_nhm(), and updates the local pebs_status accordingly. However, it doesn't correct the PEBS status in the PEBS record, which may trigger the crash, especially for the large PEBS. It's possible that all the PEBS records in a large PEBS have the PEBS status 0. If so, the first get_next_pebs_record_by_bit() in the __intel_pmu_pebs_event() returns NULL. The at = NULL. Since it's a large PEBS, the 'count' parameter must > 1. The second get_next_pebs_record_by_bit() will crash. Besides the local pebs_status, correct the PEBS status in the PEBS record as well. Fixes: 01330d7288e0 ("perf/x86: Allow zero PEBS status with only single active event") Reported-by: Vince Weaver <vincent.weaver@maine.edu> Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/1615555298-140216-1-git-send-email-kan.liang@linux.intel.com
0
static int check_chain_extensions(X509_STORE_CTX *ctx) { #ifdef OPENSSL_NO_CHAIN_VERIFY return 1; #else int i, ok=0, must_be_ca, plen = 0; X509 *x; int (*cb)(int xok,X509_STORE_CTX *xctx); int proxy_path_length = 0; int purpose; int allow_proxy_certs; cb=ctx->verify_cb; /* must_be_ca can have 1 of 3 values: -1: we accept both CA and non-CA certificates, to allow direct use of self-signed certificates (which are marked as CA). 0: we only accept non-CA certificates. This is currently not used, but the possibility is present for future extensions. 1: we only accept CA certificates. This is currently used for all certificates in the chain except the leaf certificate. */ must_be_ca = -1; /* CRL path validation */ if (ctx->parent) { allow_proxy_certs = 0; purpose = X509_PURPOSE_CRL_SIGN; } else { allow_proxy_certs = !!(ctx->param->flags & X509_V_FLAG_ALLOW_PROXY_CERTS); /* A hack to keep people who don't want to modify their software happy */ if (getenv("OPENSSL_ALLOW_PROXY_CERTS")) allow_proxy_certs = 1; purpose = ctx->param->purpose; } /* Check all untrusted certificates */ for (i = 0; i < ctx->last_untrusted; i++) { int ret; x = sk_X509_value(ctx->chain, i); if (!(ctx->param->flags & X509_V_FLAG_IGNORE_CRITICAL) && (x->ex_flags & EXFLAG_CRITICAL)) { ctx->error = X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION; ctx->error_depth = i; ctx->current_cert = x; ok=cb(0,ctx); if (!ok) goto end; } if (!allow_proxy_certs && (x->ex_flags & EXFLAG_PROXY)) { ctx->error = X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED; ctx->error_depth = i; ctx->current_cert = x; ok=cb(0,ctx); if (!ok) goto end; } ret = X509_check_ca(x); switch(must_be_ca) { case -1: if ((ctx->param->flags & X509_V_FLAG_X509_STRICT) && (ret != 1) && (ret != 0)) { ret = 0; ctx->error = X509_V_ERR_INVALID_CA; } else ret = 1; break; case 0: if (ret != 0) { ret = 0; ctx->error = X509_V_ERR_INVALID_NON_CA; } else ret = 1; break; default: if ((ret == 0) || ((ctx->param->flags & X509_V_FLAG_X509_STRICT) && (ret != 1))) { ret = 0; ctx->error = X509_V_ERR_INVALID_CA; } else ret = 1; break; } if (ret == 0) { ctx->error_depth = i; ctx->current_cert = x; ok=cb(0,ctx); if (!ok) goto end; } if (ctx->param->purpose > 0) { ret = X509_check_purpose(x, purpose, must_be_ca > 0); if ((ret == 0) || ((ctx->param->flags & X509_V_FLAG_X509_STRICT) && (ret != 1))) { ctx->error = X509_V_ERR_INVALID_PURPOSE; ctx->error_depth = i; ctx->current_cert = x; ok=cb(0,ctx); if (!ok) goto end; } } /* Check pathlen if not self issued */ if ((i > 1) && !(x->ex_flags & EXFLAG_SI) && (x->ex_pathlen != -1) && (plen > (x->ex_pathlen + proxy_path_length + 1))) { ctx->error = X509_V_ERR_PATH_LENGTH_EXCEEDED; ctx->error_depth = i; ctx->current_cert = x; ok=cb(0,ctx); if (!ok) goto end; } /* Increment path length if not self issued */ if (!(x->ex_flags & EXFLAG_SI)) plen++; /* If this certificate is a proxy certificate, the next certificate must be another proxy certificate or a EE certificate. If not, the next certificate must be a CA certificate. */ if (x->ex_flags & EXFLAG_PROXY) { if (x->ex_pcpathlen != -1 && i > x->ex_pcpathlen) { ctx->error = X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED; ctx->error_depth = i; ctx->current_cert = x; ok=cb(0,ctx); if (!ok) goto end; } proxy_path_length++; must_be_ca = 0; } else must_be_ca = 1; } ok = 1; end: return ok; #endif }
Safe
[]
openssl
d65b8b2162f33ac0d53dace588a0847ed827626c
5.852652561964959e+37
157
Backport OCSP fixes.
0
sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *reply = NULL; SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS); ((struct sctp_association *)asoc)->shutdown_retries++; if (asoc->overall_error_count >= asoc->max_retrans) { sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_NO_ERROR)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_DISPOSITION_DELETE_TCB; } switch (asoc->state) { case SCTP_STATE_SHUTDOWN_SENT: reply = sctp_make_shutdown(asoc, NULL); break; case SCTP_STATE_SHUTDOWN_ACK_SENT: reply = sctp_make_shutdown_ack(asoc, NULL); break; default: BUG(); break; } if (!reply) goto nomem; /* Do some failure management (Section 8.2). * If we remove the transport an SHUTDOWN was last sent to, don't * do failure management. */ if (asoc->shutdown_last_sent_to) sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(asoc->shutdown_last_sent_to)); /* Set the transport for the SHUTDOWN/ACK chunk and the timeout for * the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); /* Restart the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); return SCTP_DISPOSITION_CONSUME; nomem: return SCTP_DISPOSITION_NOMEM; }
Safe
[]
linux
196d67593439b03088913227093e374235596e33
1.6121479432950556e+38
64
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call The current SCTP stack is lacking a mechanism to have per association statistics. This is an implementation modeled after OpenSolaris' SCTP_GET_ASSOC_STATS. Userspace part will follow on lksctp if/when there is a general ACK on this. V4: - Move ipackets++ before q->immediate.func() for consistency reasons - Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid returning bogus RTO values - return asoc->rto_min when max_obs_rto value has not changed V3: - Increase ictrlchunks in sctp_assoc_bh_rcv() as well - Move ipackets++ to sctp_inq_push() - return 0 when no rto updates took place since the last call V2: - Implement partial retrieval of stat struct to cope for future expansion - Kill the rtxpackets counter as it cannot be precise anyway - Rename outseqtsns to outofseqtsns to make it clearer that these are out of sequence unexpected TSNs - Move asoc->ipackets++ under a lock to avoid potential miscounts - Fold asoc->opackets++ into the already existing asoc check - Kill unneeded (q->asoc) test when increasing rtxchunks - Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0) - Don't count SHUTDOWNs as SACKs - Move SCTP_GET_ASSOC_STATS to the private space API - Adjust the len check in sctp_getsockopt_assoc_stats() to allow for future struct growth - Move association statistics in their own struct - Update idupchunks when we send a SACK with dup TSNs - return min_rto in max_rto when RTO has not changed. Also return the transport when max_rto last changed. Signed-off: Michele Baldessari <michele@acksyn.org> Acked-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
isdn_ppp_get_slot(void) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (!ippp_table[i]->state) return i; } return -1; }
Safe
[]
linux
4ab42d78e37a294ac7bc56901d563c642e03c4ae
9.95160061343232e+37
9
ppp, slip: Validate VJ compression slot parameters completely Currently slhc_init() treats out-of-range values of rslots and tslots as equivalent to 0, except that if tslots is too large it will dereference a null pointer (CVE-2015-7799). Add a range-check at the top of the function and make it return an ERR_PTR() on error instead of NULL. Change the callers accordingly. Compile-tested only. Reported-by: 郭永刚 <guoyonggang@360.cn> References: http://article.gmane.org/gmane.comp.security.oss.general/17908 Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: David S. Miller <davem@davemloft.net>
0
flatpak_dir_remote_fetch_summary (FlatpakDir *self, const char *name_or_uri, GBytes **out_summary, GBytes **out_summary_sig, GCancellable *cancellable, GError **error) { g_autofree char *url = NULL; gboolean is_local; g_autoptr(GError) local_error = NULL; g_autoptr(GBytes) summary = NULL; g_autoptr(GBytes) summary_sig = NULL; if (!ostree_repo_remote_get_url (self->repo, name_or_uri, &url, error)) return FALSE; if (!g_str_has_prefix (name_or_uri, "file:") && flatpak_dir_get_remote_disabled (self, name_or_uri)) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_DATA, "Can't fetch summary from disabled remote ‘%s’", name_or_uri); return FALSE; } is_local = g_str_has_prefix (url, "file:"); /* No caching for local files */ if (!is_local) { if (flatpak_dir_lookup_cached_summary (self, out_summary, out_summary_sig, name_or_uri, url)) return TRUE; } /* Seems ostree asserts if this is NULL */ if (error == NULL) error = &local_error; if (flatpak_dir_get_remote_oci (self, name_or_uri)) { if (!flatpak_dir_remote_make_oci_summary (self, name_or_uri, &summary, cancellable, error)) return FALSE; } else { g_debug ("Fetching summary file for remote ‘%s’", name_or_uri); if (!ostree_repo_remote_fetch_summary (self->repo, name_or_uri, &summary, &summary_sig, cancellable, error)) return FALSE; } if (summary == NULL) return flatpak_fail_error (error, FLATPAK_ERROR_INVALID_DATA, _("Remote listing for %s not available; server has no summary file. Check the URL passed to remote-add was valid."), name_or_uri); if (!is_local) flatpak_dir_cache_summary (self, summary, summary_sig, name_or_uri, url); *out_summary = g_steal_pointer (&summary); if (out_summary_sig) *out_summary_sig = g_steal_pointer (&summary_sig); return TRUE; }
Safe
[ "CWE-668" ]
flatpak
cd2142888fc4c199723a0dfca1f15ea8788a5483
1.9597531887882966e+38
67
Don't expose /proc when running apply_extra As shown by CVE-2019-5736, it is sometimes possible for the sandbox app to access outside files using /proc/self/exe. This is not typically an issue for flatpak as the sandbox runs as the user which has no permissions to e.g. modify the host files. However, when installing apps using extra-data into the system repo we *do* actually run a sandbox as root. So, in this case we disable mounting /proc in the sandbox, which will neuter attacks like this.
0
hook_remove_from_list (struct t_hook *hook) { struct t_hook *new_hooks; int type; type = hook->type; if (last_weechat_hook[hook->type] == hook) last_weechat_hook[hook->type] = hook->prev_hook; if (hook->prev_hook) { (hook->prev_hook)->next_hook = hook->next_hook; new_hooks = weechat_hooks[hook->type]; } else new_hooks = hook->next_hook; if (hook->next_hook) (hook->next_hook)->prev_hook = hook->prev_hook; free (hook); weechat_hooks[type] = new_hooks; }
Safe
[ "CWE-20" ]
weechat
c265cad1c95b84abfd4e8d861f25926ef13b5d91
2.9819082639356046e+38
24
Fix verification of SSL certificates by calling gnutls verify callback (patch #7459)
0
void __detach_mounts(struct dentry *dentry) { struct mountpoint *mp; struct mount *mnt; namespace_lock(); mp = lookup_mountpoint(dentry); if (IS_ERR_OR_NULL(mp)) goto out_unlock; lock_mount_hash(); event++; while (!hlist_empty(&mp->m_list)) { mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); if (mnt->mnt.mnt_flags & MNT_UMOUNT) { hlist_add_head(&mnt->mnt_umount.s_list, &unmounted); umount_mnt(mnt); } else umount_tree(mnt, UMOUNT_CONNECTED); } unlock_mount_hash(); put_mountpoint(mp); out_unlock: namespace_unlock(); }
Safe
[ "CWE-400", "CWE-703" ]
linux
d29216842a85c7970c536108e093963f02714498
2.366472675509456e+38
25
mnt: Add a per mount namespace limit on the number of mounts CAI Qian <caiqian@redhat.com> pointed out that the semantics of shared subtrees make it possible to create an exponentially increasing number of mounts in a mount namespace. mkdir /tmp/1 /tmp/2 mount --make-rshared / for i in $(seq 1 20) ; do mount --bind /tmp/1 /tmp/2 ; done Will create create 2^20 or 1048576 mounts, which is a practical problem as some people have managed to hit this by accident. As such CVE-2016-6213 was assigned. Ian Kent <raven@themaw.net> described the situation for autofs users as follows: > The number of mounts for direct mount maps is usually not very large because of > the way they are implemented, large direct mount maps can have performance > problems. There can be anywhere from a few (likely case a few hundred) to less > than 10000, plus mounts that have been triggered and not yet expired. > > Indirect mounts have one autofs mount at the root plus the number of mounts that > have been triggered and not yet expired. > > The number of autofs indirect map entries can range from a few to the common > case of several thousand and in rare cases up to between 30000 and 50000. I've > not heard of people with maps larger than 50000 entries. > > The larger the number of map entries the greater the possibility for a large > number of active mounts so it's not hard to expect cases of a 1000 or somewhat > more active mounts. So I am setting the default number of mounts allowed per mount namespace at 100,000. This is more than enough for any use case I know of, but small enough to quickly stop an exponential increase in mounts. Which should be perfect to catch misconfigurations and malfunctioning programs. For anyone who needs a higher limit this can be changed by writing to the new /proc/sys/fs/mount-max sysctl. Tested-by: CAI Qian <caiqian@redhat.com> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
0
restore_vimvar(int idx, typval_T *save_tv) { hashitem_T *hi; vimvars[idx].vv_tv = *save_tv; if (vimvars[idx].vv_tv_type == VAR_UNKNOWN) { hi = hash_find(&vimvarht, vimvars[idx].vv_di.di_key); if (HASHITEM_EMPTY(hi)) internal_error("restore_vimvar()"); else hash_remove(&vimvarht, hi); } }
Safe
[ "CWE-476" ]
vim
0f6e28f686dbb59ab3b562408ab9b2234797b9b1
1.8407280820910033e+37
14
patch 8.2.4428: crash when switching tabpage while in the cmdline window Problem: Crash when switching tabpage while in the cmdline window. Solution: Disallow switching tabpage when in the cmdline window.
0
perf_event_parse_addr_filter(struct perf_event *event, char *fstr, struct list_head *filters) { struct perf_addr_filter *filter = NULL; char *start, *orig, *filename = NULL; struct path path; substring_t args[MAX_OPT_ARGS]; int state = IF_STATE_ACTION, token; unsigned int kernel = 0; int ret = -EINVAL; orig = fstr = kstrdup(fstr, GFP_KERNEL); if (!fstr) return -ENOMEM; while ((start = strsep(&fstr, " ,\n")) != NULL) { ret = -EINVAL; if (!*start) continue; /* filter definition begins */ if (state == IF_STATE_ACTION) { filter = perf_addr_filter_new(event, filters); if (!filter) goto fail; } token = match_token(start, if_tokens, args); switch (token) { case IF_ACT_FILTER: case IF_ACT_START: filter->filter = 1; case IF_ACT_STOP: if (state != IF_STATE_ACTION) goto fail; state = IF_STATE_SOURCE; break; case IF_SRC_KERNELADDR: case IF_SRC_KERNEL: kernel = 1; case IF_SRC_FILEADDR: case IF_SRC_FILE: if (state != IF_STATE_SOURCE) goto fail; if (token == IF_SRC_FILE || token == IF_SRC_KERNEL) filter->range = 1; *args[0].to = 0; ret = kstrtoul(args[0].from, 0, &filter->offset); if (ret) goto fail; if (filter->range) { *args[1].to = 0; ret = kstrtoul(args[1].from, 0, &filter->size); if (ret) goto fail; } if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) { int fpos = filter->range ? 2 : 1; filename = match_strdup(&args[fpos]); if (!filename) { ret = -ENOMEM; goto fail; } } state = IF_STATE_END; break; default: goto fail; } /* * Filter definition is fully parsed, validate and install it. * Make sure that it doesn't contradict itself or the event's * attribute. */ if (state == IF_STATE_END) { ret = -EINVAL; if (kernel && event->attr.exclude_kernel) goto fail; if (!kernel) { if (!filename) goto fail; /* * For now, we only support file-based filters * in per-task events; doing so for CPU-wide * events requires additional context switching * trickery, since same object code will be * mapped at different virtual addresses in * different processes. */ ret = -EOPNOTSUPP; if (!event->ctx->task) goto fail_free_name; /* look up the path and grab its inode */ ret = kern_path(filename, LOOKUP_FOLLOW, &path); if (ret) goto fail_free_name; filter->inode = igrab(d_inode(path.dentry)); path_put(&path); kfree(filename); filename = NULL; ret = -EINVAL; if (!filter->inode || !S_ISREG(filter->inode->i_mode)) /* free_filters_list() will iput() */ goto fail; event->addr_filters.nr_file_filters++; } /* ready to consume more filters */ state = IF_STATE_ACTION; filter = NULL; } } if (state != IF_STATE_ACTION) goto fail; kfree(orig); return 0; fail_free_name: kfree(filename); fail: free_filters_list(filters); kfree(orig); return ret; }
Safe
[ "CWE-190" ]
linux
1572e45a924f254d9570093abde46430c3172e3d
8.339070438683497e+37
148
perf/core: Fix the perf_cpu_time_max_percent check Use "proc_dointvec_minmax" instead of "proc_dointvec" to check the input value from user-space. If not, we can set a big value and some vars will overflow like "sysctl_perf_event_sample_rate" which will cause a lot of unexpected problems. Signed-off-by: Tan Xiaojun <tanxiaojun@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: <acme@kernel.org> Cc: <alexander.shishkin@linux.intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Link: http://lkml.kernel.org/r/1487829879-56237-1-git-send-email-tanxiaojun@huawei.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
0
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); int offset = offset_in_page(gpa); gfn_t start_gfn = gpa >> PAGE_SHIFT; gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; gfn_t nr_pages_needed = end_gfn - start_gfn + 1; gfn_t nr_pages_avail; ghc->gpa = gpa; ghc->generation = slots->generation; ghc->len = len; ghc->memslot = gfn_to_memslot(kvm, start_gfn); ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { ghc->hva += offset; } else { /* * If the requested region crosses two memslots, we still * verify that the entire region is valid here. */ while (start_gfn <= end_gfn) { ghc->memslot = gfn_to_memslot(kvm, start_gfn); ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; start_gfn += nr_pages_avail; } /* Use the slow path for cross page reads and writes. */ ghc->memslot = NULL; } return 0; }
Safe
[ "CWE-416", "CWE-284" ]
linux
a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
5.78346517828682e+37
35
KVM: use after free in kvm_ioctl_create_device() We should move the ops->destroy(dev) after the list_del(&dev->vm_node) so that we don't use "dev" after freeing it. Fixes: a28ebea2adc4 ("KVM: Protect device ops->create and list_add with kvm->lock") Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
0
SpoolssClosePrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; }
Safe
[ "CWE-399" ]
wireshark
b4d16b4495b732888e12baf5b8a7e9bf2665e22b
2.26780105390436e+38
16
SPOOLSS: Try to avoid an infinite loop. Use tvb_reported_length_remaining in dissect_spoolss_uint16uni. Make sure our offset always increments in dissect_spoolss_keybuffer. Change-Id: I7017c9685bb2fa27161d80a03b8fca4ef630e793 Reviewed-on: https://code.wireshark.org/review/14687 Reviewed-by: Gerald Combs <gerald@wireshark.org> Petri-Dish: Gerald Combs <gerald@wireshark.org> Tested-by: Petri Dish Buildbot <buildbot-no-reply@wireshark.org> Reviewed-by: Michael Mann <mmann78@netscape.net>
0
static struct sctp_chunk *sctp_make_op_error_space( const struct sctp_association *asoc, const struct sctp_chunk *chunk, size_t size) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0, sizeof(sctp_errhdr_t) + size); if (!retval) goto nodata; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, etc.) to the same destination transport * address from which it received the DATA or control chunk * to which it is replying. * */ if (chunk) retval->transport = chunk->transport; nodata: return retval; }
Safe
[ "CWE-20", "CWE-399" ]
linux
9de7922bc709eee2f609cd01d98aaedc4cf5ea74
2.051310997270102e+38
26
net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks Commit 6f4c618ddb0 ("SCTP : Add paramters validity check for ASCONF chunk") added basic verification of ASCONF chunks, however, it is still possible to remotely crash a server by sending a special crafted ASCONF chunk, even up to pre 2.6.12 kernels: skb_over_panic: text:ffffffffa01ea1c3 len:31056 put:30768 head:ffff88011bd81800 data:ffff88011bd81800 tail:0x7950 end:0x440 dev:<NULL> ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:129! [...] Call Trace: <IRQ> [<ffffffff8144fb1c>] skb_put+0x5c/0x70 [<ffffffffa01ea1c3>] sctp_addto_chunk+0x63/0xd0 [sctp] [<ffffffffa01eadaf>] sctp_process_asconf+0x1af/0x540 [sctp] [<ffffffff8152d025>] ? _read_unlock_bh+0x15/0x20 [<ffffffffa01e0038>] sctp_sf_do_asconf+0x168/0x240 [sctp] [<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp] [<ffffffff8147645d>] ? fib_rules_lookup+0xad/0xf0 [<ffffffffa01e6b22>] ? sctp_cmp_addr_exact+0x32/0x40 [sctp] [<ffffffffa01e8393>] sctp_assoc_bh_rcv+0xd3/0x180 [sctp] [<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp] [<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp] [<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter] [<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0 [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0 [<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120 [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0 [<ffffffff81496ded>] ip_local_deliver_finish+0xdd/0x2d0 [<ffffffff81497078>] ip_local_deliver+0x98/0xa0 [<ffffffff8149653d>] ip_rcv_finish+0x12d/0x440 [<ffffffff81496ac5>] ip_rcv+0x275/0x350 [<ffffffff8145c88b>] __netif_receive_skb+0x4ab/0x750 [<ffffffff81460588>] netif_receive_skb+0x58/0x60 This can be triggered e.g., through a simple scripted nmap connection scan injecting the chunk after the handshake, for example, ... -------------- INIT[ASCONF; ASCONF_ACK] -------------> <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------ -------------------- COOKIE-ECHO --------------------> <-------------------- COOKIE-ACK --------------------- ------------------ ASCONF; UNKNOWN ------------------> ... where ASCONF chunk of length 280 contains 2 parameters ... 1) Add IP address parameter (param length: 16) 2) Add/del IP address parameter (param length: 255) ... followed by an UNKNOWN chunk of e.g. 4 bytes. Here, the Address Parameter in the ASCONF chunk is even missing, too. This is just an example and similarly-crafted ASCONF chunks could be used just as well. The ASCONF chunk passes through sctp_verify_asconf() as all parameters passed sanity checks, and after walking, we ended up successfully at the chunk end boundary, and thus may invoke sctp_process_asconf(). Parameter walking is done with WORD_ROUND() to take padding into account. In sctp_process_asconf()'s TLV processing, we may fail in sctp_process_asconf_param() e.g., due to removal of the IP address that is also the source address of the packet containing the ASCONF chunk, and thus we need to add all TLVs after the failure to our ASCONF response to remote via helper function sctp_add_asconf_response(), which basically invokes a sctp_addto_chunk() adding the error parameters to the given skb. When walking to the next parameter this time, we proceed with ... length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; ... instead of the WORD_ROUND()'ed length, thus resulting here in an off-by-one that leads to reading the follow-up garbage parameter length of 12336, and thus throwing an skb_over_panic for the reply when trying to sctp_addto_chunk() next time, which implicitly calls the skb_put() with that length. Fix it by using sctp_walk_params() [ which is also used in INIT parameter processing ] macro in the verification *and* in ASCONF processing: it will make sure we don't spill over, that we walk parameters WORD_ROUND()'ed. Moreover, we're being more defensive and guard against unknown parameter types and missized addresses. Joint work with Vlad Yasevich. Fixes: b896b82be4ae ("[SCTP] ADDIP: Support for processing incoming ASCONF_ACK chunks.") Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: Vlad Yasevich <vyasevich@gmail.com> Acked-by: Neil Horman <nhorman@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
ins_compl_active(void) { return compl_started; }
Safe
[ "CWE-125" ]
vim
f12129f1714f7d2301935bb21d896609bdac221c
2.9648996878572617e+38
4
patch 9.0.0020: with some completion reading past end of string Problem: With some completion reading past end of string. Solution: Check the length of the string.
0
TEST_F(GroupVerifierTest, TestRequiresAnyLastIsJwtMissed) { TestUtility::loadFromYaml(RequiresAnyConfig, proto_config_); auto mock_auth = std::make_unique<MockAuthenticator>(); createSyncMockAuthsAndVerifier(StatusMap{{"example_provider", Status::JwtHeaderBadKid}, {"other_provider", Status::JwtMissed}}); // onComplete with failure status, not payload EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtHeaderBadKid)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, {"other-auth-userinfo", ""}, }; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); verifier_->verify(context_); EXPECT_FALSE(headers.has("example-auth-userinfo")); EXPECT_FALSE(headers.has("other-auth-userinfo")); }
Safe
[ "CWE-303", "CWE-703" ]
envoy
ea39e3cba652bcc4b11bb0d5c62b017e584d2e5a
2.6684411132738302e+38
18
jwt_authn: fix a bug where JWT with wrong issuer is allowed in allow_missing case (#15194) [jwt] When allow_missing is used inside RequiresAny, the requests with JWT with wrong issuer are accepted. This is a bug, allow_missing should only allow requests without any JWT. This change fixed the above issue by preserving JwtUnknownIssuer in allow_missing case. Signed-off-by: Wayne Zhang <qiwzhang@google.com>
0
static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { // need to generate 2x2 samples for every one in input int i=0,t0,t1; if (w == 1) { out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); return out; } t1 = 3*in_near[0] + in_far[0]; // process groups of 8 pixels for as long as we can. // note we can't handle the last pixel in a row in this loop // because we need to handle the filter boundary conditions. for (; i < ((w-1) & ~7); i += 8) { #if defined(STBI_SSE2) // load and perform the vertical filtering pass // this uses 3*x + y = 4*x + (y - x) __m128i zero = _mm_setzero_si128(); __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); __m128i farw = _mm_unpacklo_epi8(farb, zero); __m128i nearw = _mm_unpacklo_epi8(nearb, zero); __m128i diff = _mm_sub_epi16(farw, nearw); __m128i nears = _mm_slli_epi16(nearw, 2); __m128i curr = _mm_add_epi16(nears, diff); // current row // horizontal filter works the same based on shifted vers of current // row. "prev" is current row shifted right by 1 pixel; we need to // insert the previous pixel value (from t1). // "next" is current row shifted left by 1 pixel, with first pixel // of next block of 8 pixels added in. __m128i prv0 = _mm_slli_si128(curr, 2); __m128i nxt0 = _mm_srli_si128(curr, 2); __m128i prev = _mm_insert_epi16(prv0, t1, 0); __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); // horizontal filter, polyphase implementation since it's convenient: // even pixels = 3*cur + prev = cur*4 + (prev - cur) // odd pixels = 3*cur + next = cur*4 + (next - cur) // note the shared term. __m128i bias = _mm_set1_epi16(8); __m128i curs = _mm_slli_epi16(curr, 2); __m128i prvd = _mm_sub_epi16(prev, curr); __m128i nxtd = _mm_sub_epi16(next, curr); __m128i curb = _mm_add_epi16(curs, bias); __m128i even = _mm_add_epi16(prvd, curb); __m128i odd = _mm_add_epi16(nxtd, curb); // interleave even and odd pixels, then undo scaling. __m128i int0 = _mm_unpacklo_epi16(even, odd); __m128i int1 = _mm_unpackhi_epi16(even, odd); __m128i de0 = _mm_srli_epi16(int0, 4); __m128i de1 = _mm_srli_epi16(int1, 4); // pack and write output __m128i outv = _mm_packus_epi16(de0, de1); _mm_storeu_si128((__m128i *) (out + i*2), outv); #elif defined(STBI_NEON) // load and perform the vertical filtering pass // this uses 3*x + y = 4*x + (y - x) uint8x8_t farb = vld1_u8(in_far + i); uint8x8_t nearb = vld1_u8(in_near + i); int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); int16x8_t curr = vaddq_s16(nears, diff); // current row // horizontal filter works the same based on shifted vers of current // row. "prev" is current row shifted right by 1 pixel; we need to // insert the previous pixel value (from t1). // "next" is current row shifted left by 1 pixel, with first pixel // of next block of 8 pixels added in. int16x8_t prv0 = vextq_s16(curr, curr, 7); int16x8_t nxt0 = vextq_s16(curr, curr, 1); int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); // horizontal filter, polyphase implementation since it's convenient: // even pixels = 3*cur + prev = cur*4 + (prev - cur) // odd pixels = 3*cur + next = cur*4 + (next - cur) // note the shared term. int16x8_t curs = vshlq_n_s16(curr, 2); int16x8_t prvd = vsubq_s16(prev, curr); int16x8_t nxtd = vsubq_s16(next, curr); int16x8_t even = vaddq_s16(curs, prvd); int16x8_t odd = vaddq_s16(curs, nxtd); // undo scaling and round, then store with even/odd phases interleaved uint8x8x2_t o; o.val[0] = vqrshrun_n_s16(even, 4); o.val[1] = vqrshrun_n_s16(odd, 4); vst2_u8(out + i*2, o); #endif // "previous" value for next iter t1 = 3*in_near[i+7] + in_far[i+7]; } t0 = t1; t1 = 3*in_near[i] + in_far[i]; out[i*2] = stbi__div16(3*t1 + t0 + 8); for (++i; i < w; ++i) { t0 = t1; t1 = 3*in_near[i]+in_far[i]; out[i*2-1] = stbi__div16(3*t0 + t1 + 8); out[i*2 ] = stbi__div16(3*t1 + t0 + 8); } out[w*2-1] = stbi__div4(t1+2); STBI_NOTUSED(hs); return out; }
Safe
[ "CWE-787" ]
stb
5ba0baaa269b3fd681828e0e3b3ac0f1472eaf40
3.1028099931454396e+38
114
stb_image: Reject fractional JPEG component subsampling ratios The component resamplers are not written to support this and I've never seen it happen in a real (non-crafted) JPEG file so I'm fine rejecting this as outright corrupt. Fixes issue #1178.
0
target_count_increase_nx(struct iter_qstate* iq, int num) { target_count_create(iq); if(iq->target_count) iq->target_count[TARGET_COUNT_NX] += num; }
Safe
[ "CWE-613", "CWE-703" ]
unbound
f6753a0f1018133df552347a199e0362fc1dac68
5.314831876365887e+37
6
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
0
lyp_sort_revisions(struct lys_module *module) { uint8_t i, r; struct lys_revision rev; for (i = 1, r = 0; i < module->rev_size; i++) { if (strcmp(module->rev[i].date, module->rev[r].date) > 0) { r = i; } } if (r) { /* the newest revision is not on position 0, switch them */ memcpy(&rev, &module->rev[0], sizeof rev); memcpy(&module->rev[0], &module->rev[r], sizeof rev); memcpy(&module->rev[r], &rev, sizeof rev); } }
Safe
[ "CWE-787" ]
libyang
f6d684ade99dd37b21babaa8a856f64faa1e2e0d
8.026485320156343e+37
18
parser BUGFIX long identity name buffer overflow STRING_OVERFLOW (CWE-120)
0
vncProperties::LoadInt(HKEY key, LPCSTR valname, LONG defval) { LONG pref; ULONG type = REG_DWORD; ULONG prefsize = sizeof(pref); if (RegQueryValueEx(key, valname, NULL, &type, (LPBYTE) &pref, &prefsize) != ERROR_SUCCESS) return defval; if (type != REG_DWORD) return defval; if (prefsize != sizeof(pref)) return defval; return pref; }
Safe
[ "CWE-787" ]
UltraVNC
36a31b37b98f70c1db0428f5ad83170d604fb352
2.373218271507365e+38
22
security fix
0
void CLASS parse_exif (int base) { unsigned kodak, entries, tag, type, len, save, c; double expo,ape; kodak = !strncmp(make,"EASTMAN",7) && tiff_nifds < 3; entries = get2(); if(!strncmp(make,"Hasselblad",10) && (tiff_nifds > 3) && (entries > 512)) return; #ifdef LIBRAW_LIBRARY_BUILD INT64 fsize = ifp->size(); #endif while (entries--) { tiff_get (base, &tag, &type, &len, &save); #ifdef LIBRAW_LIBRARY_BUILD INT64 savepos = ftell(ifp); if(len > 8 && savepos + len > fsize*2) continue; if(callbacks.exif_cb) { callbacks.exif_cb(callbacks.exifparser_data,tag,type,len,order,ifp); fseek(ifp,savepos,SEEK_SET); } #endif switch (tag) { #ifdef LIBRAW_LIBRARY_BUILD case 0xa405: // FocalLengthIn35mmFormat imgdata.lens.FocalLengthIn35mmFormat = get2(); break; case 0xa431: // BodySerialNumber stmread(imgdata.shootinginfo.BodySerial, len, ifp); break; case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard imgdata.lens.MinFocal = getreal(type); imgdata.lens.MaxFocal = getreal(type); imgdata.lens.MaxAp4MinFocal = getreal(type); imgdata.lens.MaxAp4MaxFocal = getreal(type); break; case 0xa435: // LensSerialNumber stmread(imgdata.lens.LensSerial, len, ifp); break; case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard imgdata.lens.dng.MinFocal = getreal(type); imgdata.lens.dng.MaxFocal = getreal(type); imgdata.lens.dng.MaxAp4MinFocal = getreal(type); imgdata.lens.dng.MaxAp4MaxFocal = getreal(type); break; case 0xa433: // LensMake stmread(imgdata.lens.LensMake, len, ifp); break; case 0xa434: // LensModel stmread(imgdata.lens.Lens, len, ifp); if (!strncmp(imgdata.lens.Lens, "----", 4)) imgdata.lens.Lens[0] = 0; break; case 0x9205: imgdata.lens.EXIF_MaxAp = powf64(2.0f, (getreal(type) / 2.0f)); break; #endif case 33434: tiff_ifd[tiff_nifds-1].t_shutter = shutter = getreal(type); break; case 33437: aperture = getreal(type); break; // 0x829d FNumber case 34855: iso_speed = get2(); break; case 34866: if (iso_speed == 0xffff && (!strncasecmp(make, "SONY",4) || !strncasecmp(make, "CANON",5))) iso_speed = getreal(type); break; case 36867: case 36868: get_timestamp(0); break; case 37377: if ((expo = -getreal(type)) < 128 && shutter == 0.) tiff_ifd[tiff_nifds-1].t_shutter = shutter = powf64(2.0, expo); break; case 37378: // 0x9202 ApertureValue if ((fabs(ape = getreal(type))<256.0) && (!aperture)) aperture = powf64(2.0, ape/2); break; case 37385: flash_used = getreal(type); break; case 37386: focal_len = getreal(type); break; case 37500: // tag 0x927c #ifdef LIBRAW_LIBRARY_BUILD if (((make[0] == '\0') && (!strncmp(model, "ov5647",6))) || ((!strncmp(make, "RaspberryPi",11)) && (!strncmp(model, "RP_OV5647",9))) || ((!strncmp(make, "RaspberryPi",11)) && (!strncmp(model, "RP_imx219",9)))) { char mn_text[512]; char* pos; char ccms[512]; ushort l; float num; fgets(mn_text, len, ifp); pos = strstr(mn_text, "gain_r="); if (pos) cam_mul[0] = atof(pos+7); pos = strstr(mn_text, "gain_b="); if (pos) cam_mul[2] = atof(pos+7); if ((cam_mul[0] > 0.001f) && (cam_mul[2] > 0.001f)) cam_mul[1] = cam_mul[3] = 1.0f; else cam_mul[0] = cam_mul[2] = 0.0f; pos = strstr(mn_text, "ccm=") + 4; l = strstr(pos, " ") - pos; memcpy (ccms, pos, l); ccms[l] = '\0'; pos = strtok (ccms, ","); for (l=0; l<4; l++) { num = 0.0; for (c=0; c<3; c++) { imgdata.color.ccm[l][c] = (float)atoi(pos); num += imgdata.color.ccm[l][c]; pos = strtok (NULL, ","); } if (num > 0.01) FORC3 imgdata.color.ccm[l][c] = imgdata.color.ccm[l][c] / num; } } else #endif parse_makernote (base, 0); break; case 40962: if (kodak) raw_width = get4(); break; case 40963: if (kodak) raw_height = get4(); break; case 41730: if (get4() == 0x20002) for (exif_cfa=c=0; c < 8; c+=2) exif_cfa |= fgetc(ifp) * 0x01010101 << c; } fseek (ifp, save, SEEK_SET); } }
Safe
[ "CWE-119", "CWE-125", "CWE-787" ]
LibRaw
d13e8f6d1e987b7491182040a188c16a395f1d21
1.9142035624150828e+38
127
CVE-2017-1438 credits; fix for Kodak 65000 out of bounds access
0
static int llc_ui_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int rc = -EINVAL; lock_sock(sk); if (unlikely(sock->state != SS_UNCONNECTED)) goto out; rc = -EOPNOTSUPP; if (unlikely(sk->sk_type != SOCK_STREAM)) goto out; rc = -EAGAIN; if (sock_flag(sk, SOCK_ZAPPED)) goto out; rc = 0; if (!(unsigned int)backlog) /* BSDism */ backlog = 1; sk->sk_max_ack_backlog = backlog; if (sk->sk_state != TCP_LISTEN) { sk->sk_ack_backlog = 0; sk->sk_state = TCP_LISTEN; } sk->sk_socket->flags |= __SO_ACCEPTCON; out: release_sock(sk); return rc; }
Safe
[ "CWE-200" ]
net
b8670c09f37bdf2847cc44f36511a53afc6161fd
2.4693050133176527e+38
27
net: fix infoleak in llc The stack object “info” has a total size of 12 bytes. Its last byte is padding which is not initialized and leaked via “put_cmsg”. Signed-off-by: Kangjie Lu <kjlu@gatech.edu> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static void FVExpose(FontView *fv,GWindow pixmap, GEvent *event) { int i, j, y, width, gid; int changed; GRect old, old2, r; GClut clut; struct _GImage base; GImage gi; SplineChar dummy; int styles, laststyles=0; Color bg, def_fg; int fgxor; def_fg = GDrawGetDefaultForeground(NULL); memset(&gi,'\0',sizeof(gi)); memset(&base,'\0',sizeof(base)); if ( fv->show->clut!=NULL ) { gi.u.image = &base; base.image_type = it_index; base.clut = fv->show->clut; GDrawSetDither(NULL, false); base.trans = -1; } else { memset(&clut,'\0',sizeof(clut)); gi.u.image = &base; base.image_type = it_mono; base.clut = &clut; clut.clut_len = 2; clut.clut[0] = view_bgcol; } GDrawSetFont(pixmap,fv->fontset[0]); GDrawSetLineWidth(pixmap,0); GDrawPushClip(pixmap,&event->u.expose.rect,&old); GDrawFillRect(pixmap,NULL,view_bgcol); for ( i=0; i<=fv->rowcnt; ++i ) { GDrawDrawLine(pixmap,0,i*fv->cbh,fv->width,i*fv->cbh,def_fg); GDrawDrawLine(pixmap,0,i*fv->cbh+fv->lab_height,fv->width,i*fv->cbh+fv->lab_height,0x808080); } for ( i=0; i<=fv->colcnt; ++i ) GDrawDrawLine(pixmap,i*fv->cbw,0,i*fv->cbw,fv->height,def_fg); for ( i=event->u.expose.rect.y/fv->cbh; i<=fv->rowcnt && (event->u.expose.rect.y+event->u.expose.rect.height+fv->cbh-1)/fv->cbh; ++i ) for ( j=0; j<fv->colcnt; ++j ) { int index = (i+fv->rowoff)*fv->colcnt+j; SplineChar *sc; styles = 0; if ( index < fv->b.map->enccount && index!=-1 ) { unichar_t buf[60]; char cbuf[8]; char utf8_buf[8]; int use_utf8 = false; Color fg; int uni; struct cidmap *cidmap = NULL; sc = (gid=fv->b.map->map[index])!=-1 ? fv->b.sf->glyphs[gid]: NULL; if ( fv->b.cidmaster!=NULL ) cidmap = FindCidMap(fv->b.cidmaster->cidregistry,fv->b.cidmaster->ordering,fv->b.cidmaster->supplement,fv->b.cidmaster); if ( ( fv->b.map->enc==&custom && index<256 ) || ( fv->b.map->enc!=&custom && index<fv->b.map->enc->char_cnt ) || ( cidmap!=NULL && index<MaxCID(cidmap) )) fg = def_fg; else fg = 0x505050; if ( sc==NULL ) sc = SCBuildDummy(&dummy,fv->b.sf,fv->b.map,index); uni = sc->unicodeenc; buf[0] = buf[1] = 0; if ( fv->b.sf->uni_interp==ui_ams && uni>=0xe000 && uni<=0xf8ff && amspua[uni-0xe000]!=0 ) uni = amspua[uni-0xe000]; switch ( fv->glyphlabel ) { case gl_name: uc_strncpy(buf,sc->name,sizeof(buf)/sizeof(buf[0])); break; case gl_unicode: if ( sc->unicodeenc!=-1 ) { sprintf(cbuf,"%04x",sc->unicodeenc); uc_strcpy(buf,cbuf); } else uc_strcpy(buf,"?"); break; case gl_encoding: if ( fv->b.map->enc->only_1byte || (fv->b.map->enc->has_1byte && index<256)) sprintf(cbuf,"%02x",index); else sprintf(cbuf,"%04x",index); uc_strcpy(buf,cbuf); break; case gl_glyph: if ( uni==0xad ) buf[0] = '-'; else if ( fv->b.sf->uni_interp==ui_adobe && uni>=0xf600 && uni<=0xf7ff && adobes_pua_alts[uni-0xf600]!=0 ) { use_utf8 = false; do_Adobe_Pua(buf,sizeof(buf),uni); } else if ( uni>=0xe0020 && uni<=0xe007e ) { buf[0] = uni-0xe0000; /* A map of Ascii for language names */ #if HANYANG } else if ( sc->compositionunit ) { if ( sc->jamo<19 ) buf[0] = 0x1100+sc->jamo; else if ( sc->jamo<19+21 ) buf[0] = 0x1161 + sc->jamo-19; else /* Leave a hole for the blank char */ buf[0] = 0x11a8 + sc->jamo-(19+21+1); #endif } else if ( uni>0 && uni<unicode4_size ) { char *pt = utf8_buf; use_utf8 = true; *pt = '\0'; // We terminate the string in case the appendage (?) fails. pt = utf8_idpb(pt,uni,0); if (pt) *pt = '\0'; else fprintf(stderr, "Invalid Unicode alert.\n"); } else { char *pt = strchr(sc->name,'.'); buf[0] = '?'; fg = 0xff0000; if ( pt!=NULL ) { int i, n = pt-sc->name; char *end; SplineFont *cm = fv->b.sf->cidmaster; if ( n==7 && sc->name[0]=='u' && sc->name[1]=='n' && sc->name[2]=='i' && (i=strtol(sc->name+3,&end,16), end-sc->name==7)) buf[0] = i; else if ( n>=5 && n<=7 && sc->name[0]=='u' && (i=strtol(sc->name+1,&end,16), end-sc->name==n)) buf[0] = i; else if ( cm!=NULL && (i=CIDFromName(sc->name,cm))!=-1 ) { int uni; uni = CID2Uni(FindCidMap(cm->cidregistry,cm->ordering,cm->supplement,cm), i); if ( uni!=-1 ) buf[0] = uni; } else { int uni; *pt = '\0'; uni = UniFromName(sc->name,fv->b.sf->uni_interp,fv->b.map->enc); if ( uni!=-1 ) buf[0] = uni; *pt = '.'; } if ( strstr(pt,".vert")!=NULL ) styles = _uni_vertical; if ( buf[0]!='?' ) { fg = def_fg; if ( strstr(pt,".italic")!=NULL ) styles = _uni_italic; } } else if ( strncmp(sc->name,"hwuni",5)==0 ) { int uni=-1; sscanf(sc->name,"hwuni%x", (unsigned *) &uni ); if ( uni!=-1 ) buf[0] = uni; } else if ( strncmp(sc->name,"italicuni",9)==0 ) { int uni=-1; sscanf(sc->name,"italicuni%x", (unsigned *) &uni ); if ( uni!=-1 ) { buf[0] = uni; styles=_uni_italic; } fg = def_fg; } else if ( strncmp(sc->name,"vertcid_",8)==0 || strncmp(sc->name,"vertuni",7)==0 ) { styles = _uni_vertical; } } break; } r.x = j*fv->cbw+1; r.width = fv->cbw-1; r.y = i*fv->cbh+1; r.height = fv->lab_height-1; bg = view_bgcol; fgxor = 0x000000; changed = sc->changed; if ( fv->b.sf->onlybitmaps && gid<fv->show->glyphcnt ) changed = gid==-1 || fv->show->glyphs[gid]==NULL? false : fv->show->glyphs[gid]->changed; if ( changed || sc->layers[ly_back].splines!=NULL || sc->layers[ly_back].images!=NULL || sc->color!=COLOR_DEFAULT ) { if ( sc->layers[ly_back].splines!=NULL || sc->layers[ly_back].images!=NULL || sc->color!=COLOR_DEFAULT ) bg = sc->color!=COLOR_DEFAULT?sc->color:0x808080; if ( sc->changed ) { fgxor = bg ^ fvchangedcol; bg = fvchangedcol; } GDrawFillRect(pixmap,&r,bg); } if ( (!fv->b.sf->layers[fv->b.active_layer].order2 && sc->changedsincelasthinted ) || ( fv->b.sf->layers[fv->b.active_layer].order2 && sc->layers[fv->b.active_layer].splines!=NULL && sc->ttf_instrs_len<=0 ) || ( fv->b.sf->layers[fv->b.active_layer].order2 && sc->instructions_out_of_date ) ) { Color hintcol = fvhintingneededcol; if ( fv->b.sf->layers[fv->b.active_layer].order2 && sc->instructions_out_of_date && sc->ttf_instrs_len>0 ) hintcol = 0xff0000; GDrawDrawLine(pixmap,r.x,r.y,r.x,r.y+r.height-1,hintcol); GDrawDrawLine(pixmap,r.x+1,r.y,r.x+1,r.y+r.height-1,hintcol); GDrawDrawLine(pixmap,r.x+2,r.y,r.x+2,r.y+r.height-1,hintcol); GDrawDrawLine(pixmap,r.x+r.width-1,r.y,r.x+r.width-1,r.y+r.height-1,hintcol); GDrawDrawLine(pixmap,r.x+r.width-2,r.y,r.x+r.width-2,r.y+r.height-1,hintcol); GDrawDrawLine(pixmap,r.x+r.width-3,r.y,r.x+r.width-3,r.y+r.height-1,hintcol); } if ( use_utf8 && sc->unicodeenc!=-1 && /* Pango complains if we try to draw non characters */ /* These two are guaranteed "NOT A UNICODE CHARACTER" in all planes */ ((sc->unicodeenc&0xffff)==0xfffe || (sc->unicodeenc&0xffff)==0xffff || (sc->unicodeenc>=0xfdd0 && sc->unicodeenc<=0xfdef) || /* noncharacters */ (sc->unicodeenc>=0xfe00 && sc->unicodeenc<=0xfe0f) || /* variation selectors */ (sc->unicodeenc>=0xe0110 && sc->unicodeenc<=0xe01ff) || /* variation selectors */ /* The surrogates in BMP aren't valid either */ (sc->unicodeenc>=0xd800 && sc->unicodeenc<=0xdfff))) { /* surrogates */ GDrawDrawLine(pixmap,r.x,r.y,r.x+r.width-1,r.y+r.height-1,0x000000); GDrawDrawLine(pixmap,r.x,r.y+r.height-1,r.x+r.width-1,r.y,0x000000); } else if ( use_utf8 ) { GTextBounds size; if ( styles!=laststyles ) GDrawSetFont(pixmap,FVCheckFont(fv,styles)); width = GDrawGetText8Bounds(pixmap,utf8_buf,-1,&size); if ( size.lbearing==0 && size.rbearing==0 ) { utf8_buf[0] = 0xe0 | (0xfffd>>12); utf8_buf[1] = 0x80 | ((0xfffd>>6)&0x3f); utf8_buf[2] = 0x80 | (0xfffd&0x3f); utf8_buf[3] = 0; width = GDrawGetText8Bounds(pixmap,utf8_buf,-1,&size); } width = size.rbearing - size.lbearing+1; if ( width >= fv->cbw-1 ) { GDrawPushClip(pixmap,&r,&old2); width = fv->cbw-1; } if ( sc->unicodeenc<0x80 || sc->unicodeenc>=0xa0 ) { y = i*fv->cbh+fv->lab_as+1; /* move rotated glyph up a bit to center it */ if (styles&_uni_vertical) y -= fv->lab_as/2; GDrawDrawText8(pixmap,j*fv->cbw+(fv->cbw-1-width)/2-size.lbearing,y,utf8_buf,-1,fg^fgxor); } if ( width >= fv->cbw-1 ) GDrawPopClip(pixmap,&old2); laststyles = styles; } else { if ( styles!=laststyles ) GDrawSetFont(pixmap,FVCheckFont(fv,styles)); width = GDrawGetTextWidth(pixmap,buf,-1); if ( width >= fv->cbw-1 ) { GDrawPushClip(pixmap,&r,&old2); width = fv->cbw-1; } if ( sc->unicodeenc<0x80 || sc->unicodeenc>=0xa0 ) { y = i*fv->cbh+fv->lab_as+1; /* move rotated glyph up a bit to center it */ if (styles&_uni_vertical) y -= fv->lab_as/2; GDrawDrawText(pixmap,j*fv->cbw+(fv->cbw-1-width)/2,y,buf,-1,fg^fgxor); } if ( width >= fv->cbw-1 ) GDrawPopClip(pixmap,&old2); laststyles = styles; } } FVDrawGlyph(pixmap,fv,index,false); } if ( fv->showhmetrics&fvm_baseline ) { for ( i=0; i<=fv->rowcnt; ++i ) GDrawDrawLine(pixmap,0,i*fv->cbh+fv->lab_height+fv->magnify*fv->show->ascent+1,fv->width,i*fv->cbh+fv->lab_height+fv->magnify*fv->show->ascent+1,METRICS_BASELINE); } GDrawPopClip(pixmap,&old); GDrawSetDither(NULL, true); }
Safe
[ "CWE-119", "CWE-787" ]
fontforge
626f751752875a0ddd74b9e217b6f4828713573c
2.5823476980432097e+38
262
Warn users before discarding their unsaved scripts (#3852) * Warn users before discarding their unsaved scripts This closes #3846.
0
static int rsi_mac80211_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; struct vif_priv *vif_info = (struct vif_priv *)vif->drv_priv; enum opmode intf_mode; enum vap_status vap_status; int vap_idx = -1, i; vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; mutex_lock(&common->mutex); intf_mode = rsi_map_intf_mode(vif->type); if (intf_mode == RSI_OPMODE_UNSUPPORTED) { rsi_dbg(ERR_ZONE, "%s: Interface type %d not supported\n", __func__, vif->type); mutex_unlock(&common->mutex); return -EOPNOTSUPP; } if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) || (vif->type == NL80211_IFTYPE_P2P_CLIENT) || (vif->type == NL80211_IFTYPE_P2P_GO)) common->p2p_enabled = true; /* Get free vap index */ for (i = 0; i < RSI_MAX_VIFS; i++) { if (!adapter->vifs[i]) { vap_idx = i; break; } } if (vap_idx < 0) { rsi_dbg(ERR_ZONE, "Reject: Max VAPs reached\n"); mutex_unlock(&common->mutex); return -EOPNOTSUPP; } vif_info->vap_id = vap_idx; adapter->vifs[vap_idx] = vif; adapter->sc_nvifs++; vap_status = VAP_ADD; if (rsi_set_vap_capabilities(common, intf_mode, vif->addr, vif_info->vap_id, vap_status)) { rsi_dbg(ERR_ZONE, "Failed to set VAP capabilities\n"); mutex_unlock(&common->mutex); return -EINVAL; } if ((vif->type == NL80211_IFTYPE_AP) || (vif->type == NL80211_IFTYPE_P2P_GO)) { rsi_send_rx_filter_frame(common, DISALLOW_BEACONS); common->min_rate = RSI_RATE_AUTO; for (i = 0; i < common->max_stations; i++) common->stations[i].sta = NULL; } mutex_unlock(&common->mutex); return 0; }
Safe
[ "CWE-416" ]
linux
abd39c6ded9db53aa44c2540092bdd5fb6590fa8
6.675398057949068e+36
62
rsi: add fix for crash during assertions Observed crash in some scenarios when assertion has occurred, this is because hw structure is freed and is tried to get accessed in some functions where null check is already present. So, avoided the crash by making the hw to NULL after freeing. Signed-off-by: Sanjay Konduri <sanjay.konduri@redpinesignals.com> Signed-off-by: Sushant Kumar Mishra <sushant.mishra@redpinesignals.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
0
int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(newinfo, loc_cpu_entry, repl); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __arpt_unregister_table(new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; }
Safe
[ "CWE-476" ]
linux
57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
5.510385566942438e+37
44
netfilter: add back stackpointer size checks The rationale for removing the check is only correct for rulesets generated by ip(6)tables. In iptables, a jump can only occur to a user-defined chain, i.e. because we size the stack based on number of user-defined chains we cannot exceed stack size. However, the underlying binary format has no such restriction, and the validation step only ensures that the jump target is a valid rule start point. IOW, its possible to build a rule blob that has no user-defined chains but does contain a jump. If this happens, no jump stack gets allocated and crash occurs because no jumpstack was allocated. Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset") Reported-by: syzbot+e783f671527912cd9403@syzkaller.appspotmail.com Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
0
InsertFormPackage ( IN VOID *PackageHdr, IN EFI_HII_DATABASE_NOTIFY_TYPE NotifyType, IN OUT HII_DATABASE_PACKAGE_LIST_INSTANCE *PackageList, OUT HII_IFR_PACKAGE_INSTANCE **Package ) { HII_IFR_PACKAGE_INSTANCE *FormPackage; EFI_HII_PACKAGE_HEADER PackageHeader; if (PackageHdr == NULL || PackageList == NULL) { return EFI_INVALID_PARAMETER; } // // Get the length of the package, including package header itself // CopyMem (&PackageHeader, PackageHdr, sizeof (EFI_HII_PACKAGE_HEADER)); // // Create a Form package node // FormPackage = (HII_IFR_PACKAGE_INSTANCE *) AllocateZeroPool (sizeof (HII_IFR_PACKAGE_INSTANCE)); if (FormPackage == NULL) { return EFI_OUT_OF_RESOURCES; } FormPackage->IfrData = (UINT8 *) AllocateZeroPool (PackageHeader.Length - sizeof (EFI_HII_PACKAGE_HEADER)); if (FormPackage->IfrData == NULL) { FreePool (FormPackage); return EFI_OUT_OF_RESOURCES; } FormPackage->Signature = HII_IFR_PACKAGE_SIGNATURE; // // Copy Package Header // CopyMem (&FormPackage->FormPkgHdr, &PackageHeader, sizeof (EFI_HII_PACKAGE_HEADER)); // // Copy Ifr contents // CopyMem ( FormPackage->IfrData, (UINT8 *) PackageHdr + sizeof (EFI_HII_PACKAGE_HEADER), PackageHeader.Length - sizeof (EFI_HII_PACKAGE_HEADER) ); InsertTailList (&PackageList->FormPkgHdr, &FormPackage->IfrEntry); *Package = FormPackage; // // Update FormPackage with the default setting // UpdateDefaultSettingInFormPackage (FormPackage); if (NotifyType == EFI_HII_DATABASE_NOTIFY_ADD_PACK) { PackageList->PackageListHdr.PackageLength += FormPackage->FormPkgHdr.Length; } return EFI_SUCCESS; }
Safe
[ "CWE-416" ]
edk2
c32be82e99ef272e7fa742c2f06ff9a4c3756613
1.8698458618798675e+38
61
MdeModulePkg/HiiDB: Remove configuration table when it's freed (CVE-2019-14586) REF: https://bugzilla.tianocore.org/show_bug.cgi?id=1995 Fix the corner case issue that the original configuration runtime memory is freed, but it is still exposed to the OS runtime. So this patch is to remove the configuration table to avoid being used in OS runtime when the configuration runtime memory is freed. Cc: Liming Gao <liming.gao@intel.com> Cc: Eric Dong <eric.dong@intel.com> Cc: Jian J Wang <jian.j.wang@intel.com> Signed-off-by: Dandan Bi <dandan.bi@intel.com> Reviewed-by: Eric Dong <eric.dong@intel.com> Reviewed-by: Jian J Wang <jian.j.wang@intel.com>
0
bool dpy_gfx_check_format(QemuConsole *con, pixman_format_code_t format) { DisplayChangeListener *dcl; DisplayState *s = con->ds; QLIST_FOREACH(dcl, &s->listeners, next) { if (dcl->con && dcl->con != con) { /* dcl bound to another console -> skip */ continue; } if (dcl->ops->dpy_gfx_check_format) { if (!dcl->ops->dpy_gfx_check_format(dcl, format)) { return false; } } else { /* default is to whitelist native 32 bpp only */ if (format != qemu_default_pixman_format(32, true)) { return false; } } } return true; }
Safe
[ "CWE-416" ]
qemu
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
2.8678911618095912e+38
24
char: move front end handlers in CharBackend Since the hanlders are associated with a CharBackend, rather than the CharDriverState, it is more appropriate to store in CharBackend. This avoids the handler copy dance in qemu_chr_fe_set_handlers() then mux_chr_update_read_handler(), by storing the CharBackend pointer directly. Also a mux CharDriver should go through mux->backends[focused], since chr->be will stay NULL. Before that, it was possible to call chr->handler by mistake with surprising results, for ex through qemu_chr_be_can_write(), which would result in calling the last set handler front end, not the one with focus. Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20161022095318.17775-22-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
MonCapParser() : MonCapParser::base_type(moncap) { using qi::char_; using qi::int_; using qi::ulong_long; using qi::lexeme; using qi::alnum; using qi::_val; using qi::_1; using qi::_2; using qi::_3; using qi::eps; using qi::lit; quoted_string %= lexeme['"' >> +(char_ - '"') >> '"'] | lexeme['\'' >> +(char_ - '\'') >> '\'']; unquoted_word %= +char_("a-zA-Z0-9_.-"); str %= quoted_string | unquoted_word; spaces = +(lit(' ') | lit('\n') | lit('\t')); // command := command[=]cmd [k1=v1 k2=v2 ...] str_match = '=' >> qi::attr(StringConstraint::MATCH_TYPE_EQUAL) >> str; str_prefix = spaces >> lit("prefix") >> spaces >> qi::attr(StringConstraint::MATCH_TYPE_PREFIX) >> str; str_regex = spaces >> lit("regex") >> spaces >> qi::attr(StringConstraint::MATCH_TYPE_REGEX) >> str; kv_pair = str >> (str_match | str_prefix | str_regex); kv_map %= kv_pair >> *(spaces >> kv_pair); command_match = -spaces >> lit("allow") >> spaces >> lit("command") >> (lit('=') | spaces) >> qi::attr(string()) >> qi::attr(string()) >> str >> -(spaces >> lit("with") >> spaces >> kv_map) >> qi::attr(0); // service foo rwxa service_match %= -spaces >> lit("allow") >> spaces >> lit("service") >> (lit('=') | spaces) >> str >> qi::attr(string()) >> qi::attr(string()) >> qi::attr(map<string,StringConstraint>()) >> spaces >> rwxa; // profile foo profile_match %= -spaces >> -(lit("allow") >> spaces) >> lit("profile") >> (lit('=') | spaces) >> qi::attr(string()) >> str >> qi::attr(string()) >> qi::attr(map<string,StringConstraint>()) >> qi::attr(0); // rwxa rwxa_match %= -spaces >> lit("allow") >> spaces >> qi::attr(string()) >> qi::attr(string()) >> qi::attr(string()) >> qi::attr(map<string,StringConstraint>()) >> rwxa; // rwxa := * | [r][w][x] rwxa = (lit("*")[_val = MON_CAP_ANY]) | (lit("all")[_val = MON_CAP_ANY]) | ( eps[_val = 0] >> ( lit('r')[_val |= MON_CAP_R] || lit('w')[_val |= MON_CAP_W] || lit('x')[_val |= MON_CAP_X] ) ); // grant := allow ... grant = -spaces >> (rwxa_match | profile_match | service_match | command_match) >> -spaces; // moncap := grant [grant ...] grants %= (grant % (*lit(' ') >> (lit(';') | lit(',')) >> *lit(' '))); moncap = grants [_val = phoenix::construct<MonCap>(_1)]; }
Vulnerable
[ "CWE-285" ]
ceph
a2acedd2a7e12d58af6db35edbd8a9d29c557578
2.9897137116479896e+38
76
mon/config-key: limit caps allowed to access the store Henceforth, we'll require explicit `allow` caps for commands, or for the config-key service. Blanket caps are no longer allowed for the config-key service, except for 'allow *'. (for luminous and mimic, we're also ensuring MonCap's parser is able to understand forward slashes '/' when parsing prefixes) Signed-off-by: Joao Eduardo Luis <joao@suse.de> (cherry picked from commit 5fff611041c5afeaf3c8eb09e4de0cc919d69237)
1
str_lower_case_match(OnigEncoding enc, int case_fold_flag, const UChar* t, const UChar* tend, const UChar* p, const UChar* end) { int lowlen; UChar *q, lowbuf[ONIGENC_MBC_CASE_FOLD_MAXLEN]; while (t < tend) { lowlen = ONIGENC_MBC_CASE_FOLD(enc, case_fold_flag, &p, end, lowbuf); q = lowbuf; while (lowlen > 0) { if (t >= tend) return 0; if (*t++ != *q++) return 0; lowlen--; } } return 1; }
Safe
[ "CWE-125" ]
oniguruma
d3e402928b6eb3327f8f7d59a9edfa622fec557b
9.886513227962666e+37
19
fix heap-buffer-overflow
0
template<typename t> CImg(const unsigned int size_x, const unsigned int size_y, const unsigned int size_z, const unsigned int size_c, const std::initializer_list<t> values, const bool repeat_values=true): _width(0),_height(0),_depth(0),_spectrum(0),_is_shared(false),_data(0) { #define _cimg_constructor_cpp11(repeat_values) \ auto it = values.begin(); \ size_t siz = size(); \ if (repeat_values) for (T *ptrd = _data; siz--; ) { \ *(ptrd++) = (T)(*(it++)); if (it==values.end()) it = values.begin(); } \ else { siz = std::min(siz,values.size()); for (T *ptrd = _data; siz--; ) *(ptrd++) = (T)(*(it++)); } assign(size_x,size_y,size_z,size_c); _cimg_constructor_cpp11(repeat_values);
Safe
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
2.09870948590654e+37
13
Fix other issues in 'CImg<T>::load_bmp()'.
0