func
string
target
string
cwe
list
project
string
commit_id
string
hash
string
size
int64
message
string
vul
int64
show_dlts_and_exit(pcap_t *pc, const char *device) { int n_dlts, i; int *dlts = 0; const char *dlt_name; n_dlts = pcap_list_datalinks(pc, &dlts); if (n_dlts < 0) error("%s", pcap_geterr(pc)); else if (n_dlts == 0 || !dlts) error("No data link types."); /* * If the interface is known to support monitor mode, indicate * whether these are the data link types available when not in * monitor mode, if -I wasn't specified, or when in monitor mode, * when -I was specified (the link-layer types available in * monitor mode might be different from the ones available when * not in monitor mode). */ if (supports_monitor_mode) (void) fprintf(stderr, "Data link types for %s %s (use option -y to set):\n", device, Iflag ? "when in monitor mode" : "when not in monitor mode"); else (void) fprintf(stderr, "Data link types for %s (use option -y to set):\n", device); for (i = 0; i < n_dlts; i++) { dlt_name = pcap_datalink_val_to_name(dlts[i]); if (dlt_name != NULL) { (void) fprintf(stderr, " %s (%s)", dlt_name, pcap_datalink_val_to_description(dlts[i])); /* * OK, does tcpdump handle that type? */ if (!has_printer(dlts[i])) (void) fprintf(stderr, " (printing not supported)"); fprintf(stderr, "\n"); } else { (void) fprintf(stderr, " DLT %d (printing not supported)\n", dlts[i]); } } #ifdef HAVE_PCAP_FREE_DATALINKS pcap_free_datalinks(dlts); #endif exit_tcpdump(0); }
Safe
[ "CWE-120", "CWE-787" ]
tcpdump
9ba91381954ad325ea4fd26b9c65a8bd9a2a85b6
2.768436569568501e+38
50
(for 4.9.3) CVE-2018-14879/fix -V to fail invalid input safely get_next_file() did not check the return value of strlen() and underflowed an array index if the line read by fgets() from the file started with \0. This caused an out-of-bounds read and could cause a write. Add the missing check. This vulnerability was discovered by Brian Carpenter & Geeknik Labs.
0
else if (!strcmp(canURN, "e2719d58-a985-b3c9-781a-b030af78d30e")) return "ClearKey1.0"; else if (!strcmp(canURN, "94CE86FB-07FF-4F43-ADB8-93D2FA968CA2")) return "FairPlay"; else if (!strcmp(canURN, "279fe473-512c-48fe-ade8-d176fee6b40f")) return "Arris Titanium"; else if (!strcmp(canURN, "aa11967f-cc01-4a4a-8e99-c5d3dddfea2d")) return "UDRM"; return "unknown"; } static GF_List *dasher_get_content_protection_desc(GF_DasherCtx *ctx, GF_DashStream *ds, GF_MPD_AdaptationSet *for_set) { char sCan[40]; u32 prot_scheme=0; u32 i, count; const GF_PropertyValue *p; GF_List *res = NULL; GF_BitStream *bs_r; count = gf_list_count(ctx->current_period->streams); bs_r = gf_bs_new((const char *) &count, 1, GF_BITSTREAM_READ); for (i=0; i<count; i++) { GF_MPD_Descriptor *desc; GF_DashStream *a_ds = gf_list_get(ctx->current_period->streams, i); if (!a_ds->is_encrypted) continue; if (for_set) { if (a_ds->set != for_set) continue; //for now only insert for the stream holding the set if (!a_ds->owns_set) continue; } else if ((a_ds != ds) && (a_ds->muxed_base != ds) ) { continue; } p = gf_filter_pid_get_property(a_ds->ipid, GF_PROP_PID_PROTECTION_SCHEME_TYPE); if (p) prot_scheme = p->value.uint; if ((prot_scheme==GF_ISOM_CENC_SCHEME) || (prot_scheme==GF_ISOM_CBC_SCHEME) || (prot_scheme==GF_ISOM_CENS_SCHEME) || (prot_scheme==GF_ISOM_CBCS_SCHEME) ) { const GF_PropertyValue *ki; u32 j, nb_pssh; GF_XMLAttribute *att; char szVal[GF_MAX_PATH]; ctx->use_cenc = GF_TRUE; ki = gf_filter_pid_get_property(a_ds->ipid, GF_PROP_PID_CENC_KEY_INFO); if (!ki || !ki->value.data.ptr) { continue; } if (!res) res = gf_list_new(); desc = gf_mpd_descriptor_new(NULL, "urn:mpeg:dash:mp4protection:2011", gf_4cc_to_str(prot_scheme)); gf_list_add(res, desc); get_canon_urn(ki->value.data.ptr + 4, sCan); att = gf_xml_dom_create_attribute("cenc:default_KID", sCan); if (!desc->x_attributes) desc->x_attributes = gf_list_new(); gf_list_add(desc->x_attributes, att); if (ctx->pssh <= GF_DASH_PSSH_MOOF) { continue; } //(data) binary blob containing (u32)N [(bin128)SystemID(u32)version(u32)KID_count[(bin128)keyID](u32)priv_size(char*priv_size)priv_data] p = gf_filter_pid_get_property(a_ds->ipid, GF_PROP_PID_CENC_PSSH); if (!p) continue; gf_bs_reassign_buffer(bs_r, p->value.data.ptr, p->value.data.size); nb_pssh = gf_bs_read_u32(bs_r); //add pssh for (j=0; j<nb_pssh; j++) { u32 pssh_idx; bin128 sysID; GF_XMLNode *node; u32 version, k_count; u8 *pssh_data=NULL; u32 pssh_len, size_64; GF_BitStream *bs_w = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); //rewrite PSSH box gf_bs_write_u32(bs_w, 0); gf_bs_write_u32(bs_w, GF_ISOM_BOX_TYPE_PSSH); gf_bs_read_data(bs_r, sysID, 16); version = gf_bs_read_u32(bs_r); k_count = version ? gf_bs_read_u32(bs_r) : 0; gf_bs_write_u8(bs_w, version); gf_bs_write_u24(bs_w, 0); gf_bs_write_data(bs_w, sysID, 16); if (version) { gf_bs_write_u32(bs_w, k_count); for (j=0; j<k_count; j++) { bin128 keyID; gf_bs_read_data(bs_r, keyID, 16); gf_bs_write_data(bs_w, keyID, 16); } } k_count = gf_bs_read_u32(bs_r); gf_bs_write_u32(bs_w, k_count); for (pssh_idx=0; pssh_idx<k_count; pssh_idx++) { gf_bs_write_u8(bs_w, gf_bs_read_u8(bs_r) ); } pssh_len = (u32) gf_bs_get_position(bs_w); gf_bs_seek(bs_w, 0); gf_bs_write_u32(bs_w, pssh_len); gf_bs_seek(bs_w, pssh_len); gf_bs_get_content(bs_w, &pssh_data, &pssh_len); gf_bs_del(bs_w); get_canon_urn(sysID, sCan); desc = gf_mpd_descriptor_new(NULL, NULL, NULL); desc->x_children = gf_list_new(); sprintf(szVal, "urn:uuid:%s", sCan); desc->scheme_id_uri = gf_strdup(szVal); desc->value = gf_strdup(get_drm_kms_name(sCan)); gf_list_add(res, desc); GF_SAFEALLOC(node, GF_XMLNode); if (node) { GF_XMLNode *pnode; node->type = GF_XML_NODE_TYPE; node->name = gf_strdup("cenc:pssh"); node->content = gf_list_new(); gf_list_add(desc->x_children, node); GF_SAFEALLOC(pnode, GF_XMLNode); if (pnode) { pnode->type = GF_XML_TEXT_TYPE; gf_list_add(node->content, pnode); size_64 = 2*pssh_len; pnode->name = gf_malloc(size_64); if (pnode->name) { size_64 = gf_base64_encode((const char *)pssh_data, pssh_len, (char *)pnode->name, size_64); pnode->name[size_64] = 0; } } } gf_free(pssh_data); } } else { if (ctx->do_mpd) { GF_LOG(GF_LOG_WARNING, GF_LOG_DASH, ("[Dasher] Protection scheme %s has no official DASH mapping, using URI \"urn:gpac:dash:mp4protection:2018\"\n", gf_4cc_to_str(prot_scheme))); } if (!res) res = gf_list_new();
Vulnerable
[ "CWE-787" ]
gpac
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
1.2415194184282854e+38
145
fixed #2138
1
GF_Err reftype_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; ptr->type = ptr->reference_type; if (!ptr->trackIDCount) return GF_OK; e = gf_isom_box_write_header(s, bs); ptr->type = GF_ISOM_BOX_TYPE_REFT; if (e) return e; for (i = 0; i < ptr->trackIDCount; i++) { gf_bs_write_u32(bs, ptr->trackIDs[i]); } return GF_OK;
Safe
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
1.9661450688307634e+38
16
prevent dref memleak on invalid input (#1183)
0
void SoundTouch::setChannels(uint numChannels) { if (!verifyNumberOfChannels(numChannels)) return; channels = numChannels; pRateTransposer->setChannels((int)numChannels); pTDStretch->setChannels((int)numChannels); }
Safe
[ "CWE-617" ]
soundtouch
107f2c5d201a4dfea1b7f15c5957ff2ac9e5f260
3.6459192790650075e+37
8
Replaced illegal-number-of-channel assertions with run-time exception
0
void HGraphBuilder::VisitIfStatement(IfStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); if (stmt->condition()->ToBooleanIsTrue()) { AddSimulate(stmt->ThenId()); Visit(stmt->then_statement()); } else if (stmt->condition()->ToBooleanIsFalse()) { AddSimulate(stmt->ElseId()); Visit(stmt->else_statement()); } else { HBasicBlock* cond_true = graph()->CreateBasicBlock(); HBasicBlock* cond_false = graph()->CreateBasicBlock(); CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false)); if (cond_true->HasPredecessor()) { cond_true->SetJoinId(stmt->ThenId()); set_current_block(cond_true); CHECK_BAILOUT(Visit(stmt->then_statement())); cond_true = current_block(); } else { cond_true = NULL; } if (cond_false->HasPredecessor()) { cond_false->SetJoinId(stmt->ElseId()); set_current_block(cond_false); CHECK_BAILOUT(Visit(stmt->else_statement())); cond_false = current_block(); } else { cond_false = NULL; } HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->IfId()); set_current_block(join); } }
Safe
[]
node
fd80a31e0697d6317ce8c2d289575399f4e06d21
1.722217862833386e+38
37
deps: backport 5f836c from v8 upstream Original commit message: Fix Hydrogen bounds check elimination When combining bounds checks, they must all be moved before the first load/store that they are guarding. BUG=chromium:344186 LOG=y R=svenpanne@chromium.org Review URL: https://codereview.chromium.org/172093002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 fix #8070
0
static int __init inet6_init(void) { struct list_head *r; int err = 0; BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb)); /* Register the socket-side information for inet6_create. */ for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) INIT_LIST_HEAD(r); if (disable_ipv6_mod) { pr_info("Loaded, but administratively disabled, reboot required to enable\n"); goto out; } err = proto_register(&tcpv6_prot, 1); if (err) goto out; err = proto_register(&udpv6_prot, 1); if (err) goto out_unregister_tcp_proto; err = proto_register(&udplitev6_prot, 1); if (err) goto out_unregister_udp_proto; err = proto_register(&rawv6_prot, 1); if (err) goto out_unregister_udplite_proto; err = proto_register(&pingv6_prot, 1); if (err) goto out_unregister_ping_proto; /* We MUST register RAW sockets before we create the ICMP6, * IGMP6, or NDISC control sockets. */ err = rawv6_init(); if (err) goto out_unregister_raw_proto; /* Register the family here so that the init calls below will * be able to create sockets. (?? is this dangerous ??) */ err = sock_register(&inet6_family_ops); if (err) goto out_sock_register_fail; tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem; /* * ipngwg API draft makes clear that the correct semantics * for TCP and UDP is to consider one TCP and UDP instance * in a host available by both INET and INET6 APIs and * able to communicate via both network protocols. */ err = register_pernet_subsys(&inet6_net_ops); if (err) goto register_pernet_fail; err = icmpv6_init(); if (err) goto icmp_fail; err = ip6_mr_init(); if (err) goto ipmr_fail; err = ndisc_init(); if (err) goto ndisc_fail; err = igmp6_init(); if (err) goto igmp_fail; ipv6_stub = &ipv6_stub_impl; err = ipv6_netfilter_init(); if (err) goto netfilter_fail; /* Create /proc/foo6 entries. */ #ifdef CONFIG_PROC_FS err = -ENOMEM; if (raw6_proc_init()) goto proc_raw6_fail; if (udplite6_proc_init()) goto proc_udplite6_fail; if (ipv6_misc_proc_init()) goto proc_misc6_fail; if (if6_proc_init()) goto proc_if6_fail; #endif err = ip6_route_init(); if (err) goto ip6_route_fail; err = ip6_flowlabel_init(); if (err) goto ip6_flowlabel_fail; err = addrconf_init(); if (err) goto addrconf_fail; /* Init v6 extension headers. */ err = ipv6_exthdrs_init(); if (err) goto ipv6_exthdrs_fail; err = ipv6_frag_init(); if (err) goto ipv6_frag_fail; /* Init v6 transport protocols. */ err = udpv6_init(); if (err) goto udpv6_fail; err = udplitev6_init(); if (err) goto udplitev6_fail; err = tcpv6_init(); if (err) goto tcpv6_fail; err = ipv6_packet_init(); if (err) goto ipv6_packet_fail; err = pingv6_init(); if (err) goto pingv6_fail; #ifdef CONFIG_SYSCTL err = ipv6_sysctl_register(); if (err) goto sysctl_fail; #endif out: return err; #ifdef CONFIG_SYSCTL sysctl_fail: ipv6_packet_cleanup(); #endif pingv6_fail: pingv6_exit(); ipv6_packet_fail: tcpv6_exit(); tcpv6_fail: udplitev6_exit(); udplitev6_fail: udpv6_exit(); udpv6_fail: ipv6_frag_exit(); ipv6_frag_fail: ipv6_exthdrs_exit(); ipv6_exthdrs_fail: addrconf_cleanup(); addrconf_fail: ip6_flowlabel_cleanup(); ip6_flowlabel_fail: ip6_route_cleanup(); ip6_route_fail: #ifdef CONFIG_PROC_FS if6_proc_exit(); proc_if6_fail: ipv6_misc_proc_exit(); proc_misc6_fail: udplite6_proc_exit(); proc_udplite6_fail: raw6_proc_exit(); proc_raw6_fail: #endif ipv6_netfilter_fini(); netfilter_fail: igmp6_cleanup(); igmp_fail: ndisc_cleanup(); ndisc_fail: ip6_mr_cleanup(); ipmr_fail: icmpv6_cleanup(); icmp_fail: unregister_pernet_subsys(&inet6_net_ops); register_pernet_fail: sock_unregister(PF_INET6); rtnl_unregister_all(PF_INET6); out_sock_register_fail: rawv6_exit(); out_unregister_ping_proto: proto_unregister(&pingv6_prot); out_unregister_raw_proto: proto_unregister(&rawv6_prot); out_unregister_udplite_proto: proto_unregister(&udplitev6_prot); out_unregister_udp_proto: proto_unregister(&udpv6_prot); out_unregister_tcp_proto: proto_unregister(&tcpv6_prot); goto out; }
Safe
[]
net
5f81bd2e5d804ca93f3ec8873451b22d2f454721
8.708356037908829e+36
201
ipv6: export a stub for IPv6 symbols used by vxlan In case IPv6 is compiled as a module, introduce a stub for ipv6_sock_mc_join and ipv6_sock_mc_drop etc.. It will be used by vxlan module. Suggested by Ben. This is an ugly but easy solution for now. Cc: Ben Hutchings <bhutchings@solarflare.com> Cc: Stephen Hemminger <stephen@networkplumber.org> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Cong Wang <amwang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static struct list_head *rb_list_head(struct list_head *list) { unsigned long val = (unsigned long)list; return (struct list_head *)(val & ~RB_FLAG_MASK); }
Safe
[ "CWE-190" ]
linux-stable
59643d1535eb220668692a5359de22545af579f6
7.875124180803699e+37
6
ring-buffer: Prevent overflow of size in ring_buffer_resize() If the size passed to ring_buffer_resize() is greater than MAX_LONG - BUF_PAGE_SIZE then the DIV_ROUND_UP() will return zero. Here's the details: # echo 18014398509481980 > /sys/kernel/debug/tracing/buffer_size_kb tracing_entries_write() processes this and converts kb to bytes. 18014398509481980 << 10 = 18446744073709547520 and this is passed to ring_buffer_resize() as unsigned long size. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); Where DIV_ROUND_UP(a, b) is (a + b - 1)/b BUF_PAGE_SIZE is 4080 and here 18446744073709547520 + 4080 - 1 = 18446744073709551599 where 18446744073709551599 is still smaller than 2^64 2^64 - 18446744073709551599 = 17 But now 18446744073709551599 / 4080 = 4521260802379792 and size = size * 4080 = 18446744073709551360 This is checked to make sure its still greater than 2 * 4080, which it is. Then we convert to the number of buffer pages needed. nr_page = DIV_ROUND_UP(size, BUF_PAGE_SIZE) but this time size is 18446744073709551360 and 2^64 - (18446744073709551360 + 4080 - 1) = -3823 Thus it overflows and the resulting number is less than 4080, which makes 3823 / 4080 = 0 an nr_pages is set to this. As we already checked against the minimum that nr_pages may be, this causes the logic to fail as well, and we crash the kernel. There's no reason to have the two DIV_ROUND_UP() (that's just result of historical code changes), clean up the code and fix this bug. Cc: stable@vger.kernel.org # 3.5+ Fixes: 83f40318dab00 ("ring-buffer: Make removal of ring buffer pages atomic") Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
0
int aes_test() { AES_CBC_Encryption enc; AES_CBC_Decryption dec; const int bs(TaoCrypt::AES::BLOCK_SIZE); byte key[] = "0123456789abcdef "; // align byte iv[] = "1234567890abcdef "; // align enc.SetKey(key, bs, iv); dec.SetKey(key, bs, iv); enc.Process(cipher, msg, bs); dec.Process(plain, cipher, bs); if (memcmp(plain, msg, bs)) return -60; const byte verify[] = { 0x95,0x94,0x92,0x57,0x5f,0x42,0x81,0x53, 0x2c,0xcc,0x9d,0x46,0x77,0xa2,0x33,0xcb }; if (memcmp(cipher, verify, bs)) return -61; AES_ECB_Encryption enc2; AES_ECB_Decryption dec2; enc2.SetKey(key, bs, iv); dec2.SetKey(key, bs, iv); enc2.Process(cipher, msg, bs); dec2.Process(plain, cipher, bs); if (memcmp(plain, msg, bs)) return -62; const byte verify2[] = { 0xd0,0xc9,0xd9,0xc9,0x40,0xe8,0x97,0xb6, 0xc8,0x8c,0x33,0x3b,0xb5,0x8f,0x85,0xd1 }; if (memcmp(cipher, verify2, bs)) return -63; return 0; }
Safe
[]
mysql-server
5c6169fb309981b564a17bee31b367a18866d674
2.4533191423575154e+38
50
Bug #24740291: YASSL UPDATE TO 2.4.2
0
static int CheckCertSignature_ex(const byte* cert, word32 certSz, void* heap, void* cm, const byte* pubKey, word32 pubKeySz, int pubKeyOID, int req) { #ifndef WOLFSSL_SMALL_STACK SignatureCtx sigCtx[1]; #else SignatureCtx* sigCtx; #endif byte hash[KEYID_SIZE]; Signer* ca = NULL; word32 idx = 0; int len; word32 tbsCertIdx = 0; word32 sigIndex = 0; word32 signatureOID = 0; word32 oid = 0; word32 issuerIdx = 0; word32 issuerSz = 0; #ifndef NO_SKID int extLen = 0; word32 extIdx = 0; word32 extEndIdx = 0; int extAuthKeyIdSet = 0; #endif int ret = 0; word32 localIdx; byte tag; if (cert == NULL) { return BAD_FUNC_ARG; } #ifdef WOLFSSL_SMALL_STACK sigCtx = (SignatureCtx*)XMALLOC(sizeof(*sigCtx), heap, DYNAMIC_TYPE_SIGNATURE); if (sigCtx == NULL) return MEMORY_E; #endif InitSignatureCtx(sigCtx, heap, INVALID_DEVID); /* Certificate SEQUENCE */ if (GetSequence(cert, &idx, &len, certSz) < 0) ret = ASN_PARSE_E; if (ret == 0) { tbsCertIdx = idx; /* TBSCertificate SEQUENCE */ if (GetSequence(cert, &idx, &len, certSz) < 0) ret = ASN_PARSE_E; } if (ret == 0) { sigIndex = len + idx; if ((idx + 1) > certSz) ret = BUFFER_E; } if (ret == 0) { /* version - optional */ localIdx = idx; if (GetASNTag(cert, &localIdx, &tag, certSz) == 0) { if (tag == (ASN_CONTEXT_SPECIFIC | ASN_CONSTRUCTED)) { idx++; if (GetLength(cert, &idx, &len, certSz) < 0) ret = ASN_PARSE_E; idx += len; } } } if (ret == 0) { /* serialNumber */ if (GetASNHeader(cert, ASN_INTEGER, &idx, &len, certSz) < 0) ret = ASN_PARSE_E; } if (ret == 0) { idx += len; /* signature */ if (!req && GetAlgoId(cert, &idx, &signatureOID, oidSigType, certSz) < 0) ret = ASN_PARSE_E; } if (ret == 0) { issuerIdx = idx; /* issuer for cert or subject for csr */ if (GetSequence(cert, &idx, &len, certSz) < 0) ret = ASN_PARSE_E; } if (ret == 0) { issuerSz = len + idx - issuerIdx; } #ifndef NO_SKID if (!req && ret == 0) { idx += len; /* validity */ if (GetSequence(cert, &idx, &len, certSz) < 0) ret = ASN_PARSE_E; } if (!req && ret == 0) { idx += len; /* subject */ if (GetSequence(cert, &idx, &len, certSz) < 0) ret = ASN_PARSE_E; } if (ret == 0) { idx += len; /* subjectPublicKeyInfo */ if (GetSequence(cert, &idx, &len, certSz) < 0) ret = ASN_PARSE_E; } if (req && ret == 0) { idx += len; /* attributes */ if (GetASNHeader_ex(cert, ASN_CONTEXT_SPECIFIC | ASN_CONSTRUCTED, &idx, &len, certSz, 1) < 0) ret = ASN_PARSE_E; } if (!req) { if (ret == 0) { idx += len; if ((idx + 1) > certSz) ret = BUFFER_E; } if (ret == 0) { /* issuerUniqueID - optional */ localIdx = idx; if (GetASNTag(cert, &localIdx, &tag, certSz) == 0) { if (tag == (ASN_CONTEXT_SPECIFIC | ASN_CONSTRUCTED | 1)) { idx++; if (GetLength(cert, &idx, &len, certSz) < 0) ret = ASN_PARSE_E; idx += len; } } } if (ret == 0) { if ((idx + 1) > certSz) ret = BUFFER_E; } if (ret == 0) { /* subjectUniqueID - optional */ localIdx = idx; if (GetASNTag(cert, &localIdx, &tag, certSz) == 0) { if (tag == (ASN_CONTEXT_SPECIFIC | ASN_CONSTRUCTED | 2)) { idx++; if (GetLength(cert, &idx, &len, certSz) < 0) ret = ASN_PARSE_E; idx += len; } } } if (ret == 0) { if ((idx + 1) > certSz) ret = BUFFER_E; } /* extensions - optional */ localIdx = idx; if (ret == 0 && GetASNTag(cert, &localIdx, &tag, certSz) == 0 && tag == (ASN_CONTEXT_SPECIFIC | ASN_CONSTRUCTED | 3)) { idx++; if (GetLength(cert, &idx, &extLen, certSz) < 0) ret = ASN_PARSE_E; if (ret == 0) { if (GetSequence(cert, &idx, &extLen, certSz) < 0) ret = ASN_PARSE_E; } if (ret == 0) { extEndIdx = idx + extLen; /* Check each extension for the ones we want. */ while (ret == 0 && idx < extEndIdx) { if (GetSequence(cert, &idx, &len, certSz) < 0) ret = ASN_PARSE_E; if (ret == 0) { extIdx = idx; if (GetObjectId(cert, &extIdx, &oid, oidCertExtType, certSz) < 0) { ret = ASN_PARSE_E; } if (ret == 0) { if ((extIdx + 1) > certSz) ret = BUFFER_E; } } if (ret == 0) { localIdx = extIdx; if (GetASNTag(cert, &localIdx, &tag, certSz) == 0 && tag == ASN_BOOLEAN) { if (GetBoolean(cert, &extIdx, certSz) < 0) ret = ASN_PARSE_E; } } if (ret == 0) { if (GetOctetString(cert, &extIdx, &extLen, certSz) < 0) ret = ASN_PARSE_E; } if (ret == 0) { switch (oid) { case AUTH_KEY_OID: if (GetSequence(cert, &extIdx, &extLen, certSz) < 0) ret = ASN_PARSE_E; if (ret == 0 && (extIdx + 1) >= certSz) ret = BUFFER_E; if (ret == 0 && GetASNTag(cert, &extIdx, &tag, certSz) == 0 && tag == (ASN_CONTEXT_SPECIFIC | 0)) { if (GetLength(cert, &extIdx, &extLen, certSz) <= 0) ret = ASN_PARSE_E; if (ret == 0) { extAuthKeyIdSet = 1; if (extLen == KEYID_SIZE) XMEMCPY(hash, cert + extIdx, extLen); else { ret = CalcHashId(cert + extIdx, extLen, hash); } } } break; default: break; } } idx += len; } } } } else if (ret == 0) { idx += len; } if (ret == 0 && pubKey == NULL) { if (extAuthKeyIdSet) ca = GetCA(cm, hash); if (ca == NULL) { ret = CalcHashId(cert + issuerIdx, issuerSz, hash); if (ret == 0) ca = GetCAByName(cm, hash); } } #else if (ret == 0 && pubKey == NULL) { ret = CalcHashId(cert + issuerIdx, issuerSz, hash); if (ret == 0) ca = GetCA(cm, hash); } #endif /* !NO_SKID */ if (ca == NULL && pubKey == NULL) ret = ASN_NO_SIGNER_E; if (ret == 0) { idx = sigIndex; /* signatureAlgorithm */ if (GetAlgoId(cert, &idx, &oid, oidSigType, certSz) < 0) ret = ASN_PARSE_E; /* In CSR signature data is not present in body */ if (req) signatureOID = oid; } if (ret == 0) { if (oid != signatureOID) ret = ASN_SIG_OID_E; } if (ret == 0) { /* signatureValue */ if (CheckBitString(cert, &idx, &len, certSz, 1, NULL) < 0) ret = ASN_PARSE_E; } if (ret == 0) { if (pubKey != NULL) { ret = ConfirmSignature(sigCtx, cert + tbsCertIdx, sigIndex - tbsCertIdx, pubKey, pubKeySz, pubKeyOID, cert + idx, len, signatureOID, NULL); } else { ret = ConfirmSignature(sigCtx, cert + tbsCertIdx, sigIndex - tbsCertIdx, ca->publicKey, ca->pubKeySize, ca->keyOID, cert + idx, len, signatureOID, NULL); } if (ret != 0) { WOLFSSL_MSG("Confirm signature failed"); } } FreeSignatureCtx(sigCtx); #ifdef WOLFSSL_SMALL_STACK if (sigCtx != NULL) XFREE(sigCtx, heap, DYNAMIC_TYPE_SIGNATURE); #endif return ret; }
Safe
[ "CWE-125", "CWE-345" ]
wolfssl
f93083be72a3b3d956b52a7ec13f307a27b6e093
3.0490272861650466e+37
309
OCSP: improve handling of OCSP no check extension
0
void Gfx::doImage(Object *ref, Stream *str, GBool inlineImg) { Dict *dict, *maskDict; int width, height; int bits, maskBits; GBool interpolate; StreamColorSpaceMode csMode; GBool mask; GBool invert; GfxColorSpace *colorSpace, *maskColorSpace; GfxImageColorMap *colorMap, *maskColorMap; Object maskObj, smaskObj; GBool haveColorKeyMask, haveExplicitMask, haveSoftMask; int maskColors[2*gfxColorMaxComps]; int maskWidth, maskHeight; GBool maskInvert; GBool maskInterpolate; Stream *maskStr; Object obj1, obj2; int i; // get info from the stream bits = 0; csMode = streamCSNone; str->getImageParams(&bits, &csMode); // get stream dict dict = str->getDict(); // get size dict->lookup("Width", &obj1); if (obj1.isNull()) { obj1.free(); dict->lookup("W", &obj1); } if (obj1.isInt()) width = obj1.getInt(); else if (obj1.isReal()) width = (int)obj1.getReal(); else goto err2; obj1.free(); dict->lookup("Height", &obj1); if (obj1.isNull()) { obj1.free(); dict->lookup("H", &obj1); } if (obj1.isInt()) height = obj1.getInt(); else if (obj1.isReal()) height = (int)obj1.getReal(); else goto err2; obj1.free(); if (width < 1 || height < 1) goto err1; // image interpolation dict->lookup("Interpolate", &obj1); if (obj1.isNull()) { obj1.free(); dict->lookup("I", &obj1); } if (obj1.isBool()) interpolate = obj1.getBool(); else interpolate = gFalse; obj1.free(); maskInterpolate = gFalse; // image or mask? dict->lookup("ImageMask", &obj1); if (obj1.isNull()) { obj1.free(); dict->lookup("IM", &obj1); } mask = gFalse; if (obj1.isBool()) mask = obj1.getBool(); else if (!obj1.isNull()) goto err2; obj1.free(); // bit depth if (bits == 0) { dict->lookup("BitsPerComponent", &obj1); if (obj1.isNull()) { obj1.free(); dict->lookup("BPC", &obj1); } if (obj1.isInt()) { bits = obj1.getInt(); } else if (mask) { bits = 1; } else { goto err2; } obj1.free(); } // display a mask if (mask) { // check for inverted mask if (bits != 1) goto err1; invert = gFalse; dict->lookup("Decode", &obj1); if (obj1.isNull()) { obj1.free(); dict->lookup("D", &obj1); } if (obj1.isArray()) { obj1.arrayGet(0, &obj2); // Table 4.39 says /Decode must be [1 0] or [0 1]. Adobe // accepts [1.0 0.0] as well. if (obj2.isNum() && obj2.getNum() >= 0.9) invert = gTrue; obj2.free(); } else if (!obj1.isNull()) { goto err2; } obj1.free(); // draw it if (!contentIsHidden()) { out->drawImageMask(state, ref, str, width, height, invert, interpolate, inlineImg); if (out->fillMaskCSPattern(state)) { maskHaveCSPattern = gTrue; doPatternFill(gTrue); out->endMaskClip(state); maskHaveCSPattern = gFalse; } } } else { // get color space and color map dict->lookup("ColorSpace", &obj1); if (obj1.isNull()) { obj1.free(); dict->lookup("CS", &obj1); } if (obj1.isName() && inlineImg) { res->lookupColorSpace(obj1.getName(), &obj2); if (!obj2.isNull()) { obj1.free(); obj1 = obj2; } else { obj2.free(); } } if (!obj1.isNull()) { colorSpace = GfxColorSpace::parse(&obj1, this); } else if (csMode == streamCSDeviceGray) { colorSpace = new GfxDeviceGrayColorSpace(); } else if (csMode == streamCSDeviceRGB) { colorSpace = new GfxDeviceRGBColorSpace(); } else if (csMode == streamCSDeviceCMYK) { colorSpace = new GfxDeviceCMYKColorSpace(); } else { colorSpace = NULL; } obj1.free(); if (!colorSpace) { goto err1; } dict->lookup("Decode", &obj1); if (obj1.isNull()) { obj1.free(); dict->lookup("D", &obj1); } colorMap = new GfxImageColorMap(bits, &obj1, colorSpace); obj1.free(); if (!colorMap->isOk()) { delete colorMap; goto err1; } // get the mask haveColorKeyMask = haveExplicitMask = haveSoftMask = gFalse; maskStr = NULL; // make gcc happy maskWidth = maskHeight = 0; // make gcc happy maskInvert = gFalse; // make gcc happy maskColorMap = NULL; // make gcc happy dict->lookup("Mask", &maskObj); dict->lookup("SMask", &smaskObj); if (smaskObj.isStream()) { // soft mask if (inlineImg) { goto err1; } maskStr = smaskObj.getStream(); maskDict = smaskObj.streamGetDict(); maskDict->lookup("Width", &obj1); if (obj1.isNull()) { obj1.free(); maskDict->lookup("W", &obj1); } if (!obj1.isInt()) { goto err2; } maskWidth = obj1.getInt(); obj1.free(); maskDict->lookup("Height", &obj1); if (obj1.isNull()) { obj1.free(); maskDict->lookup("H", &obj1); } if (!obj1.isInt()) { goto err2; } maskHeight = obj1.getInt(); obj1.free(); maskDict->lookup("Interpolate", &obj1); if (obj1.isNull()) { obj1.free(); maskDict->lookup("I", &obj1); } if (obj1.isBool()) maskInterpolate = obj1.getBool(); else maskInterpolate = gFalse; obj1.free(); maskDict->lookup("BitsPerComponent", &obj1); if (obj1.isNull()) { obj1.free(); maskDict->lookup("BPC", &obj1); } if (!obj1.isInt()) { goto err2; } maskBits = obj1.getInt(); obj1.free(); maskDict->lookup("ColorSpace", &obj1); if (obj1.isNull()) { obj1.free(); maskDict->lookup("CS", &obj1); } if (obj1.isName()) { res->lookupColorSpace(obj1.getName(), &obj2); if (!obj2.isNull()) { obj1.free(); obj1 = obj2; } else { obj2.free(); } } maskColorSpace = GfxColorSpace::parse(&obj1, this); obj1.free(); if (!maskColorSpace || maskColorSpace->getMode() != csDeviceGray) { goto err1; } maskDict->lookup("Decode", &obj1); if (obj1.isNull()) { obj1.free(); maskDict->lookup("D", &obj1); } maskColorMap = new GfxImageColorMap(maskBits, &obj1, maskColorSpace); obj1.free(); if (!maskColorMap->isOk()) { delete maskColorMap; goto err1; } //~ handle the Matte entry haveSoftMask = gTrue; } else if (maskObj.isArray()) { // color key mask for (i = 0; i < maskObj.arrayGetLength() && i < 2*gfxColorMaxComps; ++i) { maskObj.arrayGet(i, &obj1); if (obj1.isInt()) { maskColors[i] = obj1.getInt(); } else if (obj1.isReal()) { error(-1, "Mask entry should be an integer but it's a real, trying to use it"); maskColors[i] = (int) obj1.getReal(); } else { error(-1, "Mask entry should be an integer but it's of type %d", obj1.getType()); obj1.free(); goto err1; } obj1.free(); } haveColorKeyMask = gTrue; } else if (maskObj.isStream()) { // explicit mask if (inlineImg) { goto err1; } maskStr = maskObj.getStream(); maskDict = maskObj.streamGetDict(); maskDict->lookup("Width", &obj1); if (obj1.isNull()) { obj1.free(); maskDict->lookup("W", &obj1); } if (!obj1.isInt()) { goto err2; } maskWidth = obj1.getInt(); obj1.free(); maskDict->lookup("Height", &obj1); if (obj1.isNull()) { obj1.free(); maskDict->lookup("H", &obj1); } if (!obj1.isInt()) { goto err2; } maskHeight = obj1.getInt(); obj1.free(); maskDict->lookup("Interpolate", &obj1); if (obj1.isNull()) { obj1.free(); maskDict->lookup("I", &obj1); } if (obj1.isBool()) maskInterpolate = obj1.getBool(); else maskInterpolate = gFalse; obj1.free(); maskDict->lookup("ImageMask", &obj1); if (obj1.isNull()) { obj1.free(); maskDict->lookup("IM", &obj1); } if (!obj1.isBool() || !obj1.getBool()) { goto err2; } obj1.free(); maskInvert = gFalse; maskDict->lookup("Decode", &obj1); if (obj1.isNull()) { obj1.free(); maskDict->lookup("D", &obj1); } if (obj1.isArray()) { obj1.arrayGet(0, &obj2); // Table 4.39 says /Decode must be [1 0] or [0 1]. Adobe // accepts [1.0 0.0] as well. if (obj2.isNum() && obj2.getNum() >= 0.9) { maskInvert = gTrue; } obj2.free(); } else if (!obj1.isNull()) { goto err2; } obj1.free(); haveExplicitMask = gTrue; } // draw it if (haveSoftMask) { if (!contentIsHidden()) { out->drawSoftMaskedImage(state, ref, str, width, height, colorMap, interpolate, maskStr, maskWidth, maskHeight, maskColorMap, maskInterpolate); } delete maskColorMap; } else if (haveExplicitMask && !contentIsHidden ()) { out->drawMaskedImage(state, ref, str, width, height, colorMap, interpolate, maskStr, maskWidth, maskHeight, maskInvert, maskInterpolate); } else if (!contentIsHidden()) { out->drawImage(state, ref, str, width, height, colorMap, interpolate, haveColorKeyMask ? maskColors : (int *)NULL, inlineImg); } delete colorMap; maskObj.free(); smaskObj.free(); } if ((i = width * height) > 1000) { i = 1000; } updateLevel += i; return; err2: obj1.free(); err1: error(getPos(), "Bad image parameters"); }
Safe
[]
poppler
abf167af8b15e5f3b510275ce619e6fdb42edd40
4.7808235297614627e+36
383
Implement tiling/patterns in SplashOutputDev Fixes bug 13518
0
void clear_has_explicit_value() { bitmap_clear_bit(&table->has_value_set, field_index); }
Safe
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
2.528706644477134e+38
4
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <serg@mariadb.org>
0
void decode(bufferlist::iterator& bl) { __u8 struct_v; ::decode(struct_v, bl); ::decode(nonce_plus_one, bl); }
Safe
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
3.0776292174486536e+38
5
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <sage@redhat.com> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
0
write_ucschar(wchar hwc, wchar wc, int width) { cattrflags attr = term.curs.attr.attr; ucschar c = hwc ? combine_surrogates(hwc, wc) : wc; uchar cf = scriptfont(c); #ifdef debug_scriptfonts if (c && (cf || c > 0xFF)) printf("write_ucschar %04X scriptfont %d\n", c, cf); #endif if (cf && cf <= 10 && !(attr & FONTFAM_MASK)) term.curs.attr.attr = attr | ((cattrflags)cf << ATTR_FONTFAM_SHIFT); if (hwc) { if (width == 1 && (cfg.charwidth == 10 || cs_single_forced) && (is_wide(c) || (cs_ambig_wide && is_ambig(c))) ) { // ensure indication of cjksingle width handling to trigger down-zooming width = 2; } write_char(hwc, width); write_char(wc, -1); // -1 indicates low surrogate } else write_char(wc, width); term.curs.attr.attr = attr; }
Safe
[ "CWE-703", "CWE-770" ]
mintty
bd52109993440b6996760aaccb66e68e782762b9
2.0369770507650775e+38
28
tame some window operations, just in case
0
TEST_F(DocumentSourceMatchTest, ShouldOnlyAddOuterFieldAsDependencyOfImplicitEqualityPredicate) { // Parses to {a: {$eq: {notAField: {$gte: 4}}}}. auto match = DocumentSourceMatch::create(fromjson("{a: {notAField: {$gte: 4}}}"), getExpCtx()); DepsTracker dependencies; ASSERT_EQUALS(DepsTracker::State::SEE_NEXT, match->getDependencies(&dependencies)); ASSERT_EQUALS(1U, dependencies.fields.count("a")); ASSERT_EQUALS(1U, dependencies.fields.size()); ASSERT_EQUALS(false, dependencies.needWholeDocument); ASSERT_EQUALS(false, dependencies.getNeedsMetadata(DocumentMetadataFields::kTextScore)); }
Safe
[]
mongo
b3107d73a2c58d7e016b834dae0acfd01c0db8d7
1.9819619797791152e+38
10
SERVER-59299: Flatten top-level nested $match stages in doOptimizeAt (cherry picked from commit 4db5eceda2cff697f35c84cd08232bac8c33beec)
0
static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio) { spi_gpio->mosi = devm_gpiod_get_optional(dev, "mosi", GPIOD_OUT_LOW); if (IS_ERR(spi_gpio->mosi)) return PTR_ERR(spi_gpio->mosi); spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN); if (IS_ERR(spi_gpio->miso)) return PTR_ERR(spi_gpio->miso); spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW); return PTR_ERR_OR_ZERO(spi_gpio->sck); }
Safe
[ "CWE-400", "CWE-401" ]
linux
d3b0ffa1d75d5305ebe34735598993afbb8a869d
7.371191940049007e+37
13
spi: gpio: prevent memory leak in spi_gpio_probe In spi_gpio_probe an SPI master is allocated via spi_alloc_master, but this controller should be released if devm_add_action_or_reset fails, otherwise memory leaks. In order to avoid leak spi_contriller_put must be called in case of failure for devm_add_action_or_reset. Fixes: 8b797490b4db ("spi: gpio: Make sure spi_master_put() is called in every error path") Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com> Link: https://lore.kernel.org/r/20190930205241.5483-1-navid.emamdoost@gmail.com Signed-off-by: Mark Brown <broonie@kernel.org>
0
static u8 secs_to_retrans(int seconds, int timeout, int rto_max) { u8 res = 0; if (seconds > 0) { int period = timeout; res = 1; while (seconds > period && res < 255) { res++; timeout <<= 1; if (timeout > rto_max) timeout = rto_max; period += timeout; } } return res; }
Safe
[ "CWE-399", "CWE-835" ]
linux
ccf7abb93af09ad0868ae9033d1ca8108bdaec82
1.5411850013909381e+38
18
tcp: avoid infinite loop in tcp_splice_read() Splicing from TCP socket is vulnerable when a packet with URG flag is received and stored into receive queue. __tcp_splice_read() returns 0, and sk_wait_data() immediately returns since there is the problematic skb in queue. This is a nice way to burn cpu (aka infinite loop) and trigger soft lockups. Again, this gem was found by syzkaller tool. Fixes: 9c55e01c0cc8 ("[TCP]: Splice receive support.") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Dmitry Vyukov <dvyukov@google.com> Cc: Willy Tarreau <w@1wt.eu> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static int match_func(struct libmnt_fs *fs, void *data __attribute__ ((__unused__))) { int rc = flags & FL_INVERT ? 1 : 0; const char *m; void *md; m = get_match(COL_FSTYPE); if (m && !mnt_fs_match_fstype(fs, m)) return rc; m = get_match(COL_OPTIONS); if (m && !mnt_fs_match_options(fs, m)) return rc; md = get_match_data(COL_MAJMIN); if (md && mnt_fs_get_devno(fs) != *((dev_t *) md)) return rc; m = get_match(COL_TARGET); if (m && !mnt_fs_match_target(fs, m, cache)) return rc; m = get_match(COL_SOURCE); if (m && !mnt_fs_match_source(fs, m, cache)) return rc; if ((flags & FL_DF) && !(flags & FL_ALL)) { const char *type = mnt_fs_get_fstype(fs); if (type && strstr(type, "tmpfs")) /* tmpfs is wanted */ return !rc; if (mnt_fs_is_pseudofs(fs)) return rc; } if ((flags & FL_REAL) && mnt_fs_is_pseudofs(fs)) return rc; if ((flags & FL_PSEUDO) && !mnt_fs_is_pseudofs(fs)) return rc; if ((flags & FL_SHADOWED)) { struct libmnt_table *tb = NULL; mnt_fs_get_table(fs, &tb); if (tb && mnt_table_over_fs(tb, fs, NULL) != 0) return rc; } if ((flags & FL_DELETED) && !mnt_fs_is_deleted(fs)) return rc; return !rc; }
Vulnerable
[ "CWE-552", "CWE-703" ]
util-linux
166e87368ae88bf31112a30e078cceae637f4cdb
1.5107994875260822e+38
56
libmount: remove support for deleted mount table entries The "(deleted)" suffix has been originally used by kernel for deleted mountpoints. Since kernel commit 9d4d65748a5ca26ea8650e50ba521295549bf4e3 (Dec 2014) kernel does not use this suffix for mount stuff in /proc at all. Let's remove this support from libmount too. Signed-off-by: Karel Zak <kzak@redhat.com>
1
//! Load image from an ascii file \overloading. CImg<T>& load_ascii(std::FILE *const file) { return _load_ascii(file,0);
Safe
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
1.2313491440663473e+38
3
Fix other issues in 'CImg<T>::load_bmp()'.
0
GF_Err prhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ProjectionHeaderBox *ptr = (GF_ProjectionHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->yaw); gf_bs_write_u32(bs, ptr->pitch); gf_bs_write_u32(bs, ptr->roll); return GF_OK;
Safe
[ "CWE-476", "CWE-787" ]
gpac
b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8
9.7986403187705e+37
12
fixed #1757
0
static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net_device_stats *stats = &t->dev->stats; int ret; if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) goto tx_err; switch (skb->protocol) { case htons(ETH_P_IP): ret = ip6gre_xmit_ipv4(skb, dev); break; case htons(ETH_P_IPV6): ret = ip6gre_xmit_ipv6(skb, dev); break; default: ret = ip6gre_xmit_other(skb, dev); break; } if (ret < 0) goto tx_err; return NETDEV_TX_OK; tx_err: stats->tx_errors++; stats->tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; }
Safe
[ "CWE-125" ]
net
7892032cfe67f4bde6fc2ee967e45a8fbaf33756
8.449142795791187e+36
33
ip6_gre: fix ip6gre_err() invalid reads Andrey Konovalov reported out of bound accesses in ip6gre_err() If GRE flags contains GRE_KEY, the following expression *(((__be32 *)p) + (grehlen / 4) - 1) accesses data ~40 bytes after the expected point, since grehlen includes the size of IPv6 headers. Let's use a "struct gre_base_hdr *greh" pointer to make this code more readable. p[1] becomes greh->protocol. grhlen is the GRE header length. Fixes: c12b395a4664 ("gre: Support GRE over IPv6") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Andrey Konovalov <andreyknvl@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
re_compile_pattern (const char *pattern, size_t length, struct re_pattern_buffer *bufp) #endif { reg_errcode_t ret; /* And GNU code determines whether or not to get register information by passing null for the REGS argument to re_match, etc., not by setting no_sub, unless RE_NO_SUB is set. */ bufp->no_sub = !!(re_syntax_options & RE_NO_SUB); /* Match anchors at newline. */ bufp->newline_anchor = 1; ret = re_compile_internal (bufp, pattern, length, re_syntax_options); if (!ret) return NULL; return gettext (__re_error_msgid + __re_error_msgid_idx[(int) ret]); }
Safe
[ "CWE-19" ]
gnulib
5513b40999149090987a0341c018d05d3eea1272
5.939009852762289e+37
20
Diagnose ERE '()|\1' Problem reported by Hanno Böck in: http://bugs.gnu.org/21513 * lib/regcomp.c (parse_reg_exp): While parsing alternatives, keep track of the set of previously-completed subexpressions available before the first alternative, and restore this set just before parsing each subsequent alternative. This lets us diagnose the invalid back-reference in the ERE '()|\1'.
0
*/ xmlXPathObjectPtr xmlXPathEval(const xmlChar *str, xmlXPathContextPtr ctx) { xmlXPathParserContextPtr ctxt; xmlXPathObjectPtr res; CHECK_CTXT(ctx) xmlXPathInit(); ctxt = xmlXPathNewParserContext(str, ctx); if (ctxt == NULL) return NULL; xmlXPathEvalExpr(ctxt); if (ctxt->error != XPATH_EXPRESSION_OK) { res = NULL; } else { res = valuePop(ctxt); if (res == NULL) { xmlGenericError(xmlGenericErrorContext, "xmlXPathCompiledEval: No result on the stack.\n"); } else if (ctxt->valueNr > 0) { xmlGenericError(xmlGenericErrorContext, "xmlXPathCompiledEval: %d object(s) left on the stack.\n", ctxt->valueNr); } } xmlXPathFreeParserContext(ctxt);
Safe
[ "CWE-416" ]
libxml2
0f3b843b3534784ef57a4f9b874238aa1fda5a73
1.5397346949911455e+37
30
Fix XPath stack frame logic Move the calls to xmlXPathSetFrame and xmlXPathPopFrame around in xmlXPathCompOpEvalPositionalPredicate to make sure that the context object on the stack is actually protected. Otherwise, memory corruption can occur when calling sloppily coded XPath extension functions. Fixes bug 783160.
0
static int setup_ctxt(struct file *fp) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; int ret = 0; /* * Context should be set up only once, including allocation and * programming of eager buffers. This is done if context sharing * is not requested or by the master process. */ if (!uctxt->subctxt_cnt || !fd->subctxt) { ret = hfi1_init_ctxt(uctxt->sc); if (ret) goto done; /* Now allocate the RcvHdr queue and eager buffers. */ ret = hfi1_create_rcvhdrq(dd, uctxt); if (ret) goto done; ret = hfi1_setup_eagerbufs(uctxt); if (ret) goto done; if (uctxt->subctxt_cnt && !fd->subctxt) { ret = setup_subctxt(uctxt); if (ret) goto done; } } else { ret = wait_event_interruptible(uctxt->wait, !test_bit( HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags)); if (ret) goto done; } ret = hfi1_user_sdma_alloc_queues(uctxt, fp); if (ret) goto done; /* * Expected receive has to be setup for all processes (including * shared contexts). However, it has to be done after the master * context has been fully configured as it depends on the * eager/expected split of the RcvArray entries. * Setting it up here ensures that the subcontexts will be waiting * (due to the above wait_event_interruptible() until the master * is setup. */ ret = hfi1_user_exp_rcv_init(fp); if (ret) goto done; set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags); done: return ret; }
Safe
[ "CWE-284", "CWE-264" ]
linux
e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3
1.0808465397978543e+38
57
IB/security: Restrict use of the write() interface The drivers/infiniband stack uses write() as a replacement for bi-directional ioctl(). This is not safe. There are ways to trigger write calls that result in the return structure that is normally written to user space being shunted off to user specified kernel memory instead. For the immediate repair, detect and deny suspicious accesses to the write API. For long term, update the user space libraries and the kernel API to something that doesn't present the same security vulnerabilities (likely a structured ioctl() interface). The impacted uAPI interfaces are generally only available if hardware from drivers/infiniband is installed in the system. Reported-by: Jann Horn <jann@thejh.net> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> [ Expanded check to all known write() entry points ] Cc: stable@vger.kernel.org Signed-off-by: Doug Ledford <dledford@redhat.com>
0
static inline atomic_t *compound_mapcount_ptr(struct page *page) { return &page[1].compound_mapcount; }
Safe
[ "CWE-119" ]
linux
1be7107fbe18eed3e319a6c3e83c78254b693acb
1.893926124187738e+38
4
mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov <oleg@redhat.com> Original-patch-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Michal Hocko <mhocko@suse.com> Tested-by: Helge Deller <deller@gmx.de> # parisc Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
zone_signwithkey(dns_zone_t *zone, dns_secalg_t algorithm, uint16_t keyid, bool deleteit) { dns_signing_t *signing; dns_signing_t *current; isc_result_t result = ISC_R_SUCCESS; isc_time_t now; dns_db_t *db = NULL; signing = isc_mem_get(zone->mctx, sizeof *signing); if (signing == NULL) return (ISC_R_NOMEMORY); signing->magic = 0; signing->db = NULL; signing->dbiterator = NULL; signing->algorithm = algorithm; signing->keyid = keyid; signing->deleteit = deleteit; signing->done = false; TIME_NOW(&now); ZONEDB_LOCK(&zone->dblock, isc_rwlocktype_read); if (zone->db != NULL) dns_db_attach(zone->db, &db); ZONEDB_UNLOCK(&zone->dblock, isc_rwlocktype_read); if (db == NULL) { result = ISC_R_NOTFOUND; goto cleanup; } dns_db_attach(db, &signing->db); for (current = ISC_LIST_HEAD(zone->signing); current != NULL; current = ISC_LIST_NEXT(current, link)) { if (current->db == signing->db && current->algorithm == signing->algorithm && current->keyid == signing->keyid) { if (current->deleteit != signing->deleteit) current->done = true; else goto cleanup; } } result = dns_db_createiterator(signing->db, 0, &signing->dbiterator); if (result == ISC_R_SUCCESS) result = dns_dbiterator_first(signing->dbiterator); if (result == ISC_R_SUCCESS) { dns_dbiterator_pause(signing->dbiterator); ISC_LIST_INITANDAPPEND(zone->signing, signing, link); signing = NULL; if (isc_time_isepoch(&zone->signingtime)) { zone->signingtime = now; if (zone->task != NULL) zone_settimer(zone, &now); } } cleanup: if (signing != NULL) { if (signing->db != NULL) dns_db_detach(&signing->db); if (signing->dbiterator != NULL) dns_dbiterator_destroy(&signing->dbiterator); isc_mem_put(zone->mctx, signing, sizeof *signing); } if (db != NULL) dns_db_detach(&db); return (result); }
Safe
[ "CWE-327" ]
bind9
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
2.279100841651848e+38
76
Update keyfetch_done compute_tag check If in keyfetch_done the compute_tag fails (because for example the algorithm is not supported), don't crash, but instead ignore the key.
0
int dtls1_buffer_message(SSL *s, int is_ccs) { pitem *item; hm_fragment *frag; unsigned char seq64be[8]; /* * this function is called immediately after a message has been * serialized */ OPENSSL_assert(s->init_off == 0); frag = dtls1_hm_fragment_new(s->init_num, 0); if (!frag) return 0; memcpy(frag->fragment, s->init_buf->data, s->init_num); if (is_ccs) { OPENSSL_assert(s->d1->w_msg_hdr.msg_len + ((s->version == DTLS1_VERSION) ? DTLS1_CCS_HEADER_LENGTH : 3) == (unsigned int)s->init_num); } else { OPENSSL_assert(s->d1->w_msg_hdr.msg_len + DTLS1_HM_HEADER_LENGTH == (unsigned int)s->init_num); } frag->msg_header.msg_len = s->d1->w_msg_hdr.msg_len; frag->msg_header.seq = s->d1->w_msg_hdr.seq; frag->msg_header.type = s->d1->w_msg_hdr.type; frag->msg_header.frag_off = 0; frag->msg_header.frag_len = s->d1->w_msg_hdr.msg_len; frag->msg_header.is_ccs = is_ccs; /* save current state */ frag->msg_header.saved_retransmit_state.enc_write_ctx = s->enc_write_ctx; frag->msg_header.saved_retransmit_state.write_hash = s->write_hash; frag->msg_header.saved_retransmit_state.compress = s->compress; frag->msg_header.saved_retransmit_state.session = s->session; frag->msg_header.saved_retransmit_state.epoch = s->d1->w_epoch; memset(seq64be, 0, sizeof(seq64be)); seq64be[6] = (unsigned char)(dtls1_get_queue_priority(frag->msg_header.seq, frag->msg_header.is_ccs) >> 8); seq64be[7] = (unsigned char)(dtls1_get_queue_priority(frag->msg_header.seq, frag->msg_header.is_ccs)); item = pitem_new(seq64be, frag); if (item == NULL) { dtls1_hm_fragment_free(frag); return 0; } #if 0 fprintf(stderr, "buffered messge: \ttype = %xx\n", msg_buf->type); fprintf(stderr, "\t\t\t\t\tlen = %d\n", msg_buf->len); fprintf(stderr, "\t\t\t\t\tseq_num = %d\n", msg_buf->seq_num); #endif pqueue_insert(s->d1->sent_messages, item); return 1; }
Safe
[ "CWE-399" ]
openssl
00a4c1421407b6ac796688871b0a49a179c694d9
2.568511515097768e+38
66
Fix DTLS buffered message DoS attack DTLS can handle out of order record delivery. Additionally since handshake messages can be bigger than will fit into a single packet, the messages can be fragmented across multiple records (as with normal TLS). That means that the messages can arrive mixed up, and we have to reassemble them. We keep a queue of buffered messages that are "from the future", i.e. messages we're not ready to deal with yet but have arrived early. The messages held there may not be full yet - they could be one or more fragments that are still in the process of being reassembled. The code assumes that we will eventually complete the reassembly and when that occurs the complete message is removed from the queue at the point that we need to use it. However, DTLS is also tolerant of packet loss. To get around that DTLS messages can be retransmitted. If we receive a full (non-fragmented) message from the peer after previously having received a fragment of that message, then we ignore the message in the queue and just use the non-fragmented version. At that point the queued message will never get removed. Additionally the peer could send "future" messages that we never get to in order to complete the handshake. Each message has a sequence number (starting from 0). We will accept a message fragment for the current message sequence number, or for any sequence up to 10 into the future. However if the Finished message has a sequence number of 2, anything greater than that in the queue is just left there. So, in those two ways we can end up with "orphaned" data in the queue that will never get removed - except when the connection is closed. At that point all the queues are flushed. An attacker could seek to exploit this by filling up the queues with lots of large messages that are never going to be used in order to attempt a DoS by memory exhaustion. I will assume that we are only concerned with servers here. It does not seem reasonable to be concerned about a memory exhaustion attack on a client. They are unlikely to process enough connections for this to be an issue. A "long" handshake with many messages might be 5 messages long (in the incoming direction), e.g. ClientHello, Certificate, ClientKeyExchange, CertificateVerify, Finished. So this would be message sequence numbers 0 to 4. Additionally we can buffer up to 10 messages in the future. Therefore the maximum number of messages that an attacker could send that could get orphaned would typically be 15. The maximum size that a DTLS message is allowed to be is defined by max_cert_list, which by default is 100k. Therefore the maximum amount of "orphaned" memory per connection is 1500k. Message sequence numbers get reset after the Finished message, so renegotiation will not extend the maximum number of messages that can be orphaned per connection. As noted above, the queues do get cleared when the connection is closed. Therefore in order to mount an effective attack, an attacker would have to open many simultaneous connections. Issue reported by Quan Luo. CVE-2016-2179 Reviewed-by: Richard Levitte <levitte@openssl.org>
0
static inline int ube16_to_cpu(const uint8_t *buf) { return (buf[0] << 8) | buf[1]; }
Safe
[]
qemu
ce560dcf20c14194db5ef3b9fc1ea592d4e68109
2.86553257350282e+38
4
ATAPI: STARTSTOPUNIT only eject/load media if powercondition is 0 The START STOP UNIT command will only eject/load media if power condition is zero. If power condition is !0 then LOEJ and START will be ignored. From MMC (sbc contains similar wordings too) The Power Conditions field requests the block device to be placed in the power condition defined in Table 558. If this field has a value other than 0h then the Start and LoEj bits shall be ignored. Signed-off-by: Ronnie Sahlberg <ronniesahlberg@gmail.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
0
asmlinkage long sys_statfs(const char __user * path, struct statfs __user * buf) { struct nameidata nd; int error; error = user_path_walk(path, &nd); if (!error) { struct statfs tmp; error = vfs_statfs_native(nd.dentry, &tmp); if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) error = -EFAULT; path_release(&nd); } return error; }
Safe
[ "CWE-264" ]
linux-2.6
7b82dc0e64e93f430182f36b46b79fcee87d3532
1.5524376862091986e+38
15
Remove suid/sgid bits on [f]truncate() .. to match what we do on write(). This way, people who write to files by using [f]truncate + writable mmap have the same semantics as if they were using the write() family of system calls. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
rb_str_rindex(VALUE str, VALUE sub, long pos) { long len, slen; char *s, *sbeg, *e, *t; rb_encoding *enc; int singlebyte = single_byte_optimizable(str); enc = rb_enc_check(str, sub); if (is_broken_string(sub)) { return -1; } len = str_strlen(str, enc); slen = str_strlen(sub, enc); /* substring longer than string */ if (len < slen) return -1; if (len - pos < slen) { pos = len - slen; } if (len == 0) { return pos; } sbeg = RSTRING_PTR(str); e = RSTRING_END(str); t = RSTRING_PTR(sub); slen = RSTRING_LEN(sub); for (;;) { s = str_nth(sbeg, e, pos, enc, singlebyte); if (!s) return -1; if (memcmp(s, t, slen) == 0) { return pos; } if (pos == 0) break; pos--; } return -1; }
Safe
[ "CWE-119" ]
ruby
1c2ef610358af33f9ded3086aa2d70aac03dcac5
3.276085410454183e+38
36
* string.c (rb_str_justify): CVE-2009-4124. Fixes a bug reported by Emmanouel Kellinis <Emmanouel.Kellinis AT kpmg.co.uk>, KPMG London; Patch by nobu. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@26038 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
0
static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, gpa_t *vmpointer) { gva_t gva; gpa_t vmptr; struct x86_exception e; struct page *page; struct vcpu_vmx *vmx = to_vmx(vcpu); int maxphyaddr = cpuid_maxphyaddr(vcpu); if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, sizeof(vmptr), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } switch (exit_reason) { case EXIT_REASON_VMON: /* * SDM 3: 24.11.5 * The first 4 bytes of VMXON region contain the supported * VMCS revision identifier * * Note - IA32_VMX_BASIC[48] will never be 1 * for the nested case; * which replaces physical address width with 32 * */ if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failInvalid(vcpu); skip_emulated_instruction(vcpu); return 1; } page = nested_get_page(vcpu, vmptr); if (page == NULL || *(u32 *)kmap(page) != VMCS12_REVISION) { nested_vmx_failInvalid(vcpu); kunmap(page); skip_emulated_instruction(vcpu); return 1; } kunmap(page); vmx->nested.vmxon_ptr = vmptr; break; case EXIT_REASON_VMCLEAR: if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); skip_emulated_instruction(vcpu); return 1; } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); skip_emulated_instruction(vcpu); return 1; } break; case EXIT_REASON_VMPTRLD: if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); skip_emulated_instruction(vcpu); return 1; } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); skip_emulated_instruction(vcpu); return 1; } break; default: return 1; /* shouldn't happen */ } if (vmpointer) *vmpointer = vmptr; return 0; }
Safe
[ "CWE-284", "CWE-264" ]
linux
3ce424e45411cf5a13105e0386b6ecf6eeb4f66f
9.914867404528754e+37
87
kvm:vmx: more complete state update on APICv on/off The function to update APICv on/off state (in particular, to deactivate it when enabling Hyper-V SynIC) is incomplete: it doesn't adjust APICv-related fields among secondary processor-based VM-execution controls. As a result, Windows 2012 guests get stuck when SynIC-based auto-EOI interrupt intersected with e.g. an IPI in the guest. In addition, the MSR intercept bitmap isn't updated every time "virtualize x2APIC mode" is toggled. This path can only be triggered by a malicious guest, because Windows didn't use x2APIC but rather their own synthetic APIC access MSRs; however a guest running in a SynIC-enabled VM could switch to x2APIC and thus obtain direct access to host APIC MSRs (CVE-2016-4440). The patch fixes those omissions. Signed-off-by: Roman Kagan <rkagan@virtuozzo.com> Reported-by: Steve Rutherford <srutherford@google.com> Reported-by: Yang Zhang <yang.zhang.wz@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
static void credential_apply_config(struct credential *c) { if (c->configured) return; git_config(credential_config_callback, c); c->configured = 1; if (!c->use_http_path && proto_is_http(c->protocol)) { FREE_AND_NULL(c->path); } }
Safe
[ "CWE-522", "CWE-61" ]
git
9a6bbee8006c24b46a85d29e7b38cfa79e9ab21b
1.9664545316810522e+37
11
credential: avoid writing values with newlines The credential protocol that we use to speak to helpers can't represent values with newlines in them. This was an intentional design choice to keep the protocol simple, since none of the values we pass should generally have newlines. However, if we _do_ encounter a newline in a value, we blindly transmit it in credential_write(). Such values may break the protocol syntax, or worse, inject new valid lines into the protocol stream. The most likely way for a newline to end up in a credential struct is by decoding a URL with a percent-encoded newline. However, since the bug occurs at the moment we write the value to the protocol, we'll catch it there. That should leave no possibility of accidentally missing a code path that can trigger the problem. At this level of the code we have little choice but to die(). However, since we'd not ever expect to see this case outside of a malicious URL, that's an acceptable outcome. Reported-by: Felix Wilhelm <fwilhelm@google.com>
0
void pop_front() { if (size() == 0) { return; } front() = SlicePtr(); size_--; start_++; if (start_ == capacity_) { start_ = 0; } }
Safe
[ "CWE-401" ]
envoy
5eba69a1f375413fb93fab4173f9c393ac8c2818
1.2343248917325648e+38
11
[buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144) Signed-off-by: antonio <avd@google.com>
0
static int pgpPrtSubType(const uint8_t *h, size_t hlen, pgpSigType sigtype, pgpDigParams _digp) { const uint8_t *p = h; size_t plen = 0, i; int rc = 0; while (hlen > 0 && rc == 0) { int impl = 0; i = pgpLen(p, hlen, &plen); if (i == 0 || plen < 1 || i + plen > hlen) break; p += i; hlen -= i; pgpPrtVal(" ", pgpSubTypeTbl, (p[0]&(~PGPSUBTYPE_CRITICAL))); if (p[0] & PGPSUBTYPE_CRITICAL) if (_print) fprintf(stderr, " *CRITICAL*"); switch (*p) { case PGPSUBTYPE_PREFER_SYMKEY: /* preferred symmetric algorithms */ for (i = 1; i < plen; i++) pgpPrtVal(" ", pgpSymkeyTbl, p[i]); break; case PGPSUBTYPE_PREFER_HASH: /* preferred hash algorithms */ for (i = 1; i < plen; i++) pgpPrtVal(" ", pgpHashTbl, p[i]); break; case PGPSUBTYPE_PREFER_COMPRESS:/* preferred compression algorithms */ for (i = 1; i < plen; i++) pgpPrtVal(" ", pgpCompressionTbl, p[i]); break; case PGPSUBTYPE_KEYSERVER_PREFERS:/* key server preferences */ for (i = 1; i < plen; i++) pgpPrtVal(" ", pgpKeyServerPrefsTbl, p[i]); break; case PGPSUBTYPE_SIG_CREATE_TIME: impl = *p; if (!(_digp->saved & PGPDIG_SAVED_TIME) && (sigtype == PGPSIGTYPE_POSITIVE_CERT || sigtype == PGPSIGTYPE_BINARY || sigtype == PGPSIGTYPE_TEXT || sigtype == PGPSIGTYPE_STANDALONE)) { if (plen-1 != sizeof(_digp->time)) break; _digp->saved |= PGPDIG_SAVED_TIME; _digp->time = pgpGrab(p+1, sizeof(_digp->time)); } case PGPSUBTYPE_SIG_EXPIRE_TIME: case PGPSUBTYPE_KEY_EXPIRE_TIME: pgpPrtTime(" ", p+1, plen-1); break; case PGPSUBTYPE_ISSUER_KEYID: /* issuer key ID */ impl = *p; if (!(_digp->saved & PGPDIG_SAVED_ID) && (sigtype == PGPSIGTYPE_POSITIVE_CERT || sigtype == PGPSIGTYPE_BINARY || sigtype == PGPSIGTYPE_TEXT || sigtype == PGPSIGTYPE_STANDALONE)) { if (plen-1 != sizeof(_digp->signid)) break; _digp->saved |= PGPDIG_SAVED_ID; memcpy(_digp->signid, p+1, sizeof(_digp->signid)); } case PGPSUBTYPE_EXPORTABLE_CERT: case PGPSUBTYPE_TRUST_SIG: case PGPSUBTYPE_REGEX: case PGPSUBTYPE_REVOCABLE: case PGPSUBTYPE_ARR: case PGPSUBTYPE_REVOKE_KEY: case PGPSUBTYPE_NOTATION: case PGPSUBTYPE_PREFER_KEYSERVER: case PGPSUBTYPE_PRIMARY_USERID: case PGPSUBTYPE_POLICY_URL: case PGPSUBTYPE_KEY_FLAGS: case PGPSUBTYPE_SIGNER_USERID: case PGPSUBTYPE_REVOKE_REASON: case PGPSUBTYPE_FEATURES: case PGPSUBTYPE_EMBEDDED_SIG: case PGPSUBTYPE_INTERNAL_100: case PGPSUBTYPE_INTERNAL_101: case PGPSUBTYPE_INTERNAL_102: case PGPSUBTYPE_INTERNAL_103: case PGPSUBTYPE_INTERNAL_104: case PGPSUBTYPE_INTERNAL_105: case PGPSUBTYPE_INTERNAL_106: case PGPSUBTYPE_INTERNAL_107: case PGPSUBTYPE_INTERNAL_108: case PGPSUBTYPE_INTERNAL_109: case PGPSUBTYPE_INTERNAL_110: default: pgpPrtHex("", p+1, plen-1); break; } pgpPrtNL(); if (!impl && (p[0] & PGPSUBTYPE_CRITICAL)) rc = 1; p += plen; hlen -= plen; } if (hlen != 0) rc = 1; return rc; }
Safe
[ "CWE-347", "CWE-284" ]
rpm
bd36c5dc9fb6d90c46fbfed8c2d67516fc571ec8
3.0544365391028878e+38
106
Validate and require subkey binding signatures on PGP public keys All subkeys must be followed by a binding signature by the primary key as per the OpenPGP RFC, enforce the presence and validity in the parser. The implementation is as kludgey as they come to work around our simple-minded parser structure without touching API, to maximise backportability. Store all the raw packets internally as we decode them to be able to access previous elements at will, needed to validate ordering and access the actual data. Add testcases for manipulated keys whose import previously would succeed. Depends on the two previous commits: 7b399fcb8f52566e6f3b4327197a85facd08db91 and 236b802a4aa48711823a191d1b7f753c82a89ec5 Fixes CVE-2021-3521.
0
int ssh_buffer_add_u32(struct ssh_buffer_struct *buffer,uint32_t data) { int rc; rc = ssh_buffer_add_data(buffer, &data, sizeof(data)); if (rc < 0) { return -1; } return 0; }
Safe
[ "CWE-476" ]
libssh-mirror
10b3ebbe61a7031a3dae97f05834442220447181
2.5801335592263114e+38
11
buffer: Reformat ssh_buffer_add_data() Signed-off-by: Andreas Schneider <asn@cryptomilk.org> Reviewed-by: Anderson Toshiyuki Sasaki <ansasaki@redhat.com> Reviewed-by: Jakub Jelen <jjelen@redhat.com>
0
bgp_show_peer (struct vty *vty, struct peer *p) { struct bgp *bgp; char buf1[BUFSIZ]; char timebuf[BGP_UPTIME_LEN]; afi_t afi; safi_t safi; bgp = p->bgp; /* Configured IP address. */ vty_out (vty, "BGP neighbor is %s, ", p->host); vty_out (vty, "remote AS %d, ", p->as); vty_out (vty, "local AS %d%s, ", p->change_local_as ? p->change_local_as : p->local_as, CHECK_FLAG (p->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND) ? " no-prepend" : ""); vty_out (vty, "%s link%s", p->as == p->local_as ? "internal" : "external", VTY_NEWLINE); /* Description. */ if (p->desc) vty_out (vty, " Description: %s%s", p->desc, VTY_NEWLINE); /* Peer-group */ if (p->group) vty_out (vty, " Member of peer-group %s for session parameters%s", p->group->name, VTY_NEWLINE); /* Administrative shutdown. */ if (CHECK_FLAG (p->flags, PEER_FLAG_SHUTDOWN)) vty_out (vty, " Administratively shut down%s", VTY_NEWLINE); /* BGP Version. */ vty_out (vty, " BGP version 4"); vty_out (vty, ", remote router ID %s%s", inet_ntop (AF_INET, &p->remote_id, buf1, BUFSIZ), VTY_NEWLINE); /* Confederation */ if (CHECK_FLAG (bgp->config, BGP_CONFIG_CONFEDERATION) && bgp_confederation_peers_check (bgp, p->as)) vty_out (vty, " Neighbor under common administration%s", VTY_NEWLINE); /* Status. */ vty_out (vty, " BGP state = %s", LOOKUP (bgp_status_msg, p->status)); if (p->status == Established) vty_out (vty, ", up for %8s", peer_uptime (p->uptime, timebuf, BGP_UPTIME_LEN)); else if (p->status == Active) { if (CHECK_FLAG (p->flags, PEER_FLAG_PASSIVE)) vty_out (vty, " (passive)"); else if (CHECK_FLAG (p->sflags, PEER_STATUS_NSF_WAIT)) vty_out (vty, " (NSF passive)"); } vty_out (vty, "%s", VTY_NEWLINE); /* read timer */ vty_out (vty, " Last read %s", peer_uptime (p->readtime, timebuf, BGP_UPTIME_LEN)); /* Configured timer values. */ vty_out (vty, ", hold time is %d, keepalive interval is %d seconds%s", p->v_holdtime, p->v_keepalive, VTY_NEWLINE); if (CHECK_FLAG (p->config, PEER_CONFIG_TIMER)) { vty_out (vty, " Configured hold time is %d", p->holdtime); vty_out (vty, ", keepalive interval is %d seconds%s", p->keepalive, VTY_NEWLINE); } /* Capability. */ if (p->status == Established) { if (p->cap || p->afc_adv[AFI_IP][SAFI_UNICAST] || p->afc_recv[AFI_IP][SAFI_UNICAST] || p->afc_adv[AFI_IP][SAFI_MULTICAST] || p->afc_recv[AFI_IP][SAFI_MULTICAST] #ifdef HAVE_IPV6 || p->afc_adv[AFI_IP6][SAFI_UNICAST] || p->afc_recv[AFI_IP6][SAFI_UNICAST] || p->afc_adv[AFI_IP6][SAFI_MULTICAST] || p->afc_recv[AFI_IP6][SAFI_MULTICAST] #endif /* HAVE_IPV6 */ || p->afc_adv[AFI_IP][SAFI_MPLS_VPN] || p->afc_recv[AFI_IP][SAFI_MPLS_VPN]) { vty_out (vty, " Neighbor capabilities:%s", VTY_NEWLINE); /* Dynamic */ if (CHECK_FLAG (p->cap, PEER_CAP_DYNAMIC_RCV) || CHECK_FLAG (p->cap, PEER_CAP_DYNAMIC_ADV)) { vty_out (vty, " Dynamic:"); if (CHECK_FLAG (p->cap, PEER_CAP_DYNAMIC_ADV)) vty_out (vty, " advertised"); if (CHECK_FLAG (p->cap, PEER_CAP_DYNAMIC_RCV)) vty_out (vty, " %sreceived", CHECK_FLAG (p->cap, PEER_CAP_DYNAMIC_ADV) ? "and " : ""); vty_out (vty, "%s", VTY_NEWLINE); } /* Route Refresh */ if (CHECK_FLAG (p->cap, PEER_CAP_REFRESH_ADV) || CHECK_FLAG (p->cap, PEER_CAP_REFRESH_NEW_RCV) || CHECK_FLAG (p->cap, PEER_CAP_REFRESH_OLD_RCV)) { vty_out (vty, " Route refresh:"); if (CHECK_FLAG (p->cap, PEER_CAP_REFRESH_ADV)) vty_out (vty, " advertised"); if (CHECK_FLAG (p->cap, PEER_CAP_REFRESH_NEW_RCV) || CHECK_FLAG (p->cap, PEER_CAP_REFRESH_OLD_RCV)) vty_out (vty, " %sreceived(%s)", CHECK_FLAG (p->cap, PEER_CAP_REFRESH_ADV) ? "and " : "", (CHECK_FLAG (p->cap, PEER_CAP_REFRESH_OLD_RCV) && CHECK_FLAG (p->cap, PEER_CAP_REFRESH_NEW_RCV)) ? "old & new" : CHECK_FLAG (p->cap, PEER_CAP_REFRESH_OLD_RCV) ? "old" : "new"); vty_out (vty, "%s", VTY_NEWLINE); } /* Multiprotocol Extensions */ for (afi = AFI_IP ; afi < AFI_MAX ; afi++) for (safi = SAFI_UNICAST ; safi < SAFI_MAX ; safi++) if (p->afc_adv[afi][safi] || p->afc_recv[afi][safi]) { vty_out (vty, " Address family %s:", afi_safi_print (afi, safi)); if (p->afc_adv[afi][safi]) vty_out (vty, " advertised"); if (p->afc_recv[afi][safi]) vty_out (vty, " %sreceived", p->afc_adv[afi][safi] ? "and " : ""); vty_out (vty, "%s", VTY_NEWLINE); } /* Gracefull Restart */ if (CHECK_FLAG (p->cap, PEER_CAP_RESTART_RCV) || CHECK_FLAG (p->cap, PEER_CAP_RESTART_ADV)) { vty_out (vty, " Graceful Restart Capabilty:"); if (CHECK_FLAG (p->cap, PEER_CAP_RESTART_ADV)) vty_out (vty, " advertised"); if (CHECK_FLAG (p->cap, PEER_CAP_RESTART_RCV)) vty_out (vty, " %sreceived", CHECK_FLAG (p->cap, PEER_CAP_RESTART_ADV) ? "and " : ""); vty_out (vty, "%s", VTY_NEWLINE); if (CHECK_FLAG (p->cap, PEER_CAP_RESTART_RCV)) { int restart_af_count = 0; vty_out (vty, " Remote Restart timer is %d seconds%s", p->v_gr_restart, VTY_NEWLINE); vty_out (vty, " Address families by peer:%s ", VTY_NEWLINE); for (afi = AFI_IP ; afi < AFI_MAX ; afi++) for (safi = SAFI_UNICAST ; safi < SAFI_MAX ; safi++) if (CHECK_FLAG (p->af_cap[afi][safi], PEER_CAP_RESTART_AF_RCV)) { vty_out (vty, "%s%s(%s)", restart_af_count ? ", " : "", afi_safi_print (afi, safi), CHECK_FLAG (p->af_cap[afi][safi], PEER_CAP_RESTART_AF_PRESERVE_RCV) ? "preserved" : "not preserved"); restart_af_count++; } if (! restart_af_count) vty_out (vty, "none"); vty_out (vty, "%s", VTY_NEWLINE); } } } } /* graceful restart information */ if (CHECK_FLAG (p->cap, PEER_CAP_RESTART_RCV) || p->t_gr_restart || p->t_gr_stale) { int eor_send_af_count = 0; int eor_receive_af_count = 0; vty_out (vty, " Graceful restart informations:%s", VTY_NEWLINE); if (p->status == Established) { vty_out (vty, " End-of-RIB send: "); for (afi = AFI_IP ; afi < AFI_MAX ; afi++) for (safi = SAFI_UNICAST ; safi < SAFI_MAX ; safi++) if (CHECK_FLAG (p->af_sflags[afi][safi], PEER_STATUS_EOR_SEND)) { vty_out (vty, "%s%s", eor_send_af_count ? ", " : "", afi_safi_print (afi, safi)); eor_send_af_count++; } vty_out (vty, "%s", VTY_NEWLINE); vty_out (vty, " End-of-RIB received: "); for (afi = AFI_IP ; afi < AFI_MAX ; afi++) for (safi = SAFI_UNICAST ; safi < SAFI_MAX ; safi++) if (CHECK_FLAG (p->af_sflags[afi][safi], PEER_STATUS_EOR_RECEIVED)) { vty_out (vty, "%s%s", eor_receive_af_count ? ", " : "", afi_safi_print (afi, safi)); eor_receive_af_count++; } vty_out (vty, "%s", VTY_NEWLINE); } if (p->t_gr_restart) { vty_out (vty, " The remaining time of restart timer is %ld%s", thread_timer_remain_second (p->t_gr_restart), VTY_NEWLINE); } if (p->t_gr_stale) { vty_out (vty, " The remaining time of stalepath timer is %ld%s", thread_timer_remain_second (p->t_gr_stale), VTY_NEWLINE); } } /* Packet counts. */ vty_out (vty, " Message statistics:%s", VTY_NEWLINE); vty_out (vty, " Inq depth is 0%s", VTY_NEWLINE); vty_out (vty, " Outq depth is %lu%s", (unsigned long)p->obuf->count, VTY_NEWLINE); vty_out (vty, " Sent Rcvd%s", VTY_NEWLINE); vty_out (vty, " Opens: %10d %10d%s", p->open_out, p->open_in, VTY_NEWLINE); vty_out (vty, " Notifications: %10d %10d%s", p->notify_out, p->notify_in, VTY_NEWLINE); vty_out (vty, " Updates: %10d %10d%s", p->update_out, p->update_in, VTY_NEWLINE); vty_out (vty, " Keepalives: %10d %10d%s", p->keepalive_out, p->keepalive_in, VTY_NEWLINE); vty_out (vty, " Route Refresh: %10d %10d%s", p->refresh_out, p->refresh_in, VTY_NEWLINE); vty_out (vty, " Capability: %10d %10d%s", p->dynamic_cap_out, p->dynamic_cap_in, VTY_NEWLINE); vty_out (vty, " Total: %10d %10d%s", p->open_out + p->notify_out + p->update_out + p->keepalive_out + p->refresh_out + p->dynamic_cap_out, p->open_in + p->notify_in + p->update_in + p->keepalive_in + p->refresh_in + p->dynamic_cap_in, VTY_NEWLINE); /* advertisement-interval */ vty_out (vty, " Minimum time between advertisement runs is %d seconds%s", p->v_routeadv, VTY_NEWLINE); /* Update-source. */ if (p->update_if || p->update_source) { vty_out (vty, " Update source is "); if (p->update_if) vty_out (vty, "%s", p->update_if); else if (p->update_source) vty_out (vty, "%s", sockunion2str (p->update_source, buf1, SU_ADDRSTRLEN)); vty_out (vty, "%s", VTY_NEWLINE); } /* Default weight */ if (CHECK_FLAG (p->config, PEER_CONFIG_WEIGHT)) vty_out (vty, " Default weight %d%s", p->weight, VTY_NEWLINE); vty_out (vty, "%s", VTY_NEWLINE); /* Address Family Information */ for (afi = AFI_IP ; afi < AFI_MAX ; afi++) for (safi = SAFI_UNICAST ; safi < SAFI_MAX ; safi++) if (p->afc[afi][safi]) bgp_show_peer_afi (vty, p, afi, safi); vty_out (vty, " Connections established %d; dropped %d%s", p->established, p->dropped, VTY_NEWLINE); if (! p->dropped) vty_out (vty, " Last reset never%s", VTY_NEWLINE); else vty_out (vty, " Last reset %s, due to %s%s", peer_uptime (p->resettime, timebuf, BGP_UPTIME_LEN), peer_down_str[(int) p->last_reset], VTY_NEWLINE); if (CHECK_FLAG (p->sflags, PEER_STATUS_PREFIX_OVERFLOW)) { vty_out (vty, " Peer had exceeded the max. no. of prefixes configured.%s", VTY_NEWLINE); if (p->t_pmax_restart) vty_out (vty, " Reduce the no. of prefix from %s, will restart in %ld seconds%s", p->host, thread_timer_remain_second (p->t_pmax_restart), VTY_NEWLINE); else vty_out (vty, " Reduce the no. of prefix and clear ip bgp %s to restore peering%s", p->host, VTY_NEWLINE); } /* EBGP Multihop */ if (peer_sort (p) != BGP_PEER_IBGP && p->ttl > 1) vty_out (vty, " External BGP neighbor may be up to %d hops away.%s", p->ttl, VTY_NEWLINE); /* Local address. */ if (p->su_local) { vty_out (vty, "Local host: %s, Local port: %d%s", sockunion2str (p->su_local, buf1, SU_ADDRSTRLEN), ntohs (p->su_local->sin.sin_port), VTY_NEWLINE); } /* Remote address. */ if (p->su_remote) { vty_out (vty, "Foreign host: %s, Foreign port: %d%s", sockunion2str (p->su_remote, buf1, SU_ADDRSTRLEN), ntohs (p->su_remote->sin.sin_port), VTY_NEWLINE); } /* Nexthop display. */ if (p->su_local) { vty_out (vty, "Nexthop: %s%s", inet_ntop (AF_INET, &p->nexthop.v4, buf1, BUFSIZ), VTY_NEWLINE); #ifdef HAVE_IPV6 vty_out (vty, "Nexthop global: %s%s", inet_ntop (AF_INET6, &p->nexthop.v6_global, buf1, BUFSIZ), VTY_NEWLINE); vty_out (vty, "Nexthop local: %s%s", inet_ntop (AF_INET6, &p->nexthop.v6_local, buf1, BUFSIZ), VTY_NEWLINE); vty_out (vty, "BGP connection: %s%s", p->shared_network ? "shared network" : "non shared network", VTY_NEWLINE); #endif /* HAVE_IPV6 */ } /* Timer information. */ if (p->t_start) vty_out (vty, "Next start timer due in %ld seconds%s", thread_timer_remain_second (p->t_start), VTY_NEWLINE); if (p->t_connect) vty_out (vty, "Next connect timer due in %ld seconds%s", thread_timer_remain_second (p->t_connect), VTY_NEWLINE); vty_out (vty, "Read thread: %s Write thread: %s%s", p->t_read ? "on" : "off", p->t_write ? "on" : "off", VTY_NEWLINE); if (p->notify.code == BGP_NOTIFY_OPEN_ERR && p->notify.subcode == BGP_NOTIFY_OPEN_UNSUP_CAPBL) bgp_capability_vty_out (vty, p); vty_out (vty, "%s", VTY_NEWLINE); }
Safe
[ "CWE-125" ]
frr
6d58272b4cf96f0daa846210dd2104877900f921
2.8036868238946407e+38
351
[bgpd] cleanup, compact and consolidate capability parsing code 2007-07-26 Paul Jakma <paul.jakma@sun.com> * (general) Clean up and compact capability parsing slightly. Consolidate validation of length and logging of generic TLV, and memcpy of capability data, thus removing such from cap specifc code (not always present or correct). * bgp_open.h: Add structures for the generic capability TLV header and for the data formats of the various specific capabilities we support. Hence remove the badly named, or else misdefined, struct capability. * bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data. Do the length checks *before* memcpy()'ing based on that length (stored capability - should have been validated anyway on input, but..). (bgp_afi_safi_valid_indices) new function to validate (afi,safi) which is about to be used as index into arrays, consolidates several instances of same, at least one of which appeared to be incomplete.. (bgp_capability_mp) Much condensed. (bgp_capability_orf_entry) New, process one ORF entry (bgp_capability_orf) Condensed. Fixed to process all ORF entries. (bgp_capability_restart) Condensed, and fixed to use a cap-specific type, rather than abusing capability_mp. (struct message capcode_str) added to aid generic logging. (size_t cap_minsizes[]) added to aid generic validation of capability length field. (bgp_capability_parse) Generic logging and validation of TLV consolidated here. Code compacted as much as possible. * bgp_packet.c: (bgp_open_receive) Capability parsers now use streams, so no more need here to manually fudge the input stream getp. (bgp_capability_msg_parse) use struct capability_mp_data. Validate lengths /before/ memcpy. Use bgp_afi_safi_valid_indices. (bgp_capability_receive) Exported for use by test harness. * bgp_vty.c: (bgp_show_summary) fix conversion warning (bgp_show_peer) ditto * bgp_debug.h: Fix storage 'extern' after type 'const'. * lib/log.c: (mes_lookup) warning about code not being in same-number array slot should be debug, not warning. E.g. BGP has several discontigious number spaces, allocating from different parts of a space is not uncommon (e.g. IANA assigned versus vendor-assigned code points in some number space).
0
gdm_session_worker_handle_setup_for_user (GdmDBusWorker *object, GDBusMethodInvocation *invocation, const char *service, const char *username, const char *x11_display_name, const char *x11_authority_file, const char *console, const char *seat_id, const char *hostname, gboolean display_is_local, gboolean display_is_initial) { GdmSessionWorker *worker = GDM_SESSION_WORKER (object); if (!validate_state_change (worker, invocation, GDM_SESSION_WORKER_STATE_SETUP_COMPLETE)) return TRUE; worker->priv->service = g_strdup (service); worker->priv->x11_display_name = g_strdup (x11_display_name); worker->priv->x11_authority_file = g_strdup (x11_authority_file); worker->priv->display_device = g_strdup (console); worker->priv->display_seat_id = g_strdup (seat_id); worker->priv->hostname = g_strdup (hostname); worker->priv->display_is_local = display_is_local; worker->priv->display_is_initial = display_is_initial; worker->priv->username = g_strdup (username); g_signal_connect_swapped (worker->priv->user_settings, "notify::language-name", G_CALLBACK (on_saved_language_name_read), worker); g_signal_connect_swapped (worker->priv->user_settings, "notify::session-name", G_CALLBACK (on_saved_session_name_read), worker); /* Load settings from accounts daemon before continuing */ worker->priv->pending_invocation = invocation; if (gdm_session_settings_load (worker->priv->user_settings, username)) { queue_state_change (worker); } else { g_signal_connect (G_OBJECT (worker->priv->user_settings), "notify::is-loaded", G_CALLBACK (on_settings_is_loaded_changed), worker); } return TRUE; }
Safe
[ "CWE-362" ]
gdm
dcdbaaa04012541ad2813cf83559d91d52f208b9
4.865276332733298e+37
51
session-worker: Don't switch back VTs until session is fully exited There's a race condition on shutdown where the session worker is switching VTs back to the initial VT at the same time as the session exit is being processed. This means that manager may try to start a login screen (because of the VT switch) when autologin is enabled when there shouldn't be a login screen. This commit makes sure both the PostSession script, and session-exited signal emission are complete before initiating the VT switch back to the initial VT. https://gitlab.gnome.org/GNOME/gdm/-/issues/660
0
sock_filter (void *opaque, int control, iobuf_t chain, byte * buf, size_t * ret_len) { sock_filter_ctx_t *a = opaque; size_t size = *ret_len; size_t nbytes = 0; int rc = 0; (void)chain; if (control == IOBUFCTRL_UNDERFLOW) { assert (size); /* need a buffer */ if (a->eof_seen) { rc = -1; *ret_len = 0; } else { int nread; nread = recv (a->sock, buf, size, 0); if (nread == SOCKET_ERROR) { int ec = (int) WSAGetLastError (); rc = gpg_error_from_errno (ec); log_error ("socket read error: ec=%d\n", ec); } else if (!nread) { a->eof_seen = 1; rc = -1; } else { nbytes = nread; } *ret_len = nbytes; } } else if (control == IOBUFCTRL_FLUSH) { if (size) { byte *p = buf; int n; nbytes = size; do { n = send (a->sock, p, nbytes, 0); if (n == SOCKET_ERROR) { int ec = (int) WSAGetLastError (); rc = gpg_error_from_errno (ec); log_error ("socket write error: ec=%d\n", ec); break; } p += n; nbytes -= n; } while (nbytes); nbytes = p - buf; } *ret_len = nbytes; } else if (control == IOBUFCTRL_INIT) { a->eof_seen = 0; a->keep_open = 0; a->no_cache = 0; } else if (control == IOBUFCTRL_DESC) { *(char **) buf = "sock_filter"; } else if (control == IOBUFCTRL_FREE) { if (!a->keep_open) closesocket (a->sock); xfree (a); /* we can free our context now */ } return rc; }
Safe
[ "CWE-20" ]
gnupg
2183683bd633818dd031b090b5530951de76f392
3.3510797927620573e+38
85
Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <wk@gnupg.org>
0
void Monitor::remove_all_sessions() { Mutex::Locker l(session_map_lock); while (!session_map.sessions.empty()) { MonSession *s = session_map.sessions.front(); remove_session(s); if (logger) logger->inc(l_mon_session_rm); } if (logger) logger->set(l_mon_num_sessions, session_map.get_size()); }
Safe
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
1.464473093200022e+38
12
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <sage@redhat.com> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
0
int sys_fcntl_long(int fd, int cmd, long arg) { int ret; do { ret = fcntl(fd, cmd, arg); } while (ret == -1 && errno == EINTR); return ret; }
Safe
[ "CWE-20" ]
samba
d77a74237e660dd2ce9f1e14b02635f8a2569653
3.080695914488875e+37
9
s3: nmbd: Fix bug 10633 - nmbd denial of service The Linux kernel has a bug in that it can give spurious wakeups on a non-blocking UDP socket for a non-deliverable packet. When nmbd was changed to use non-blocking sockets it became vulnerable to a spurious wakeup from poll/epoll. Fix sys_recvfile() to return on EWOULDBLOCK/EAGAIN. CVE-2014-0244 https://bugzilla.samba.org/show_bug.cgi?id=10633 Signed-off-by: Jeremy Allison <jra@samba.org> Reviewed-by: Andreas Schneider <asn@samba.org>
0
PyImaging_LibTiffEncoderNew(PyObject* self, PyObject* args) { ImagingEncoderObject* encoder; char* mode; char* rawmode; char* compname; char* filename; int compression; int fp; PyObject *dir; PyObject *key, *value; Py_ssize_t pos = 0; int status; Py_ssize_t d_size; PyObject *keys, *values; if (! PyArg_ParseTuple(args, "sssisO", &mode, &rawmode, &compname, &fp, &filename, &dir)) { return NULL; } if (!PyDict_Check(dir)) { PyErr_SetString(PyExc_ValueError, "Invalid Dictionary"); return NULL; } else { d_size = PyDict_Size(dir); TRACE(("dict size: %d\n", (int)d_size)); keys = PyDict_Keys(dir); values = PyDict_Values(dir); for (pos=0;pos<d_size;pos++){ TRACE((" key: %d\n", (int)PyInt_AsLong(PyList_GetItem(keys,pos)))); } pos = 0; } TRACE(("new tiff encoder %s fp: %d, filename: %s \n", compname, fp, filename)); /* UNDONE -- we can probably do almost any arbitrary compression here, * so long as we're doing row/stripe based actions and not tiles. */ if (strcasecmp(compname, "tiff_ccitt") == 0) { compression = COMPRESSION_CCITTRLE; } else if (strcasecmp(compname, "group3") == 0) { compression = COMPRESSION_CCITTFAX3; } else if (strcasecmp(compname, "group4") == 0) { compression = COMPRESSION_CCITTFAX4; } else if (strcasecmp(compname, "tiff_raw_16") == 0) { compression = COMPRESSION_CCITTRLEW; } else { PyErr_SetString(PyExc_ValueError, "unknown compession"); return NULL; } TRACE(("Found compression: %d\n", compression)); encoder = PyImaging_EncoderNew(sizeof(TIFFSTATE)); if (encoder == NULL) return NULL; if (get_packer(encoder, mode, rawmode) < 0) return NULL; if (! ImagingLibTiffEncodeInit(&encoder->state, filename, fp)) { Py_DECREF(encoder); PyErr_SetString(PyExc_RuntimeError, "tiff codec initialization failed"); return NULL; } // While failes on 64 bit machines, complains that pos is an int instead of a Py_ssize_t // while (PyDict_Next(dir, &pos, &key, &value)) { for (pos=0;pos<d_size;pos++){ key = PyList_GetItem(keys,pos); value = PyList_GetItem(values,pos); status = 0; TRACE(("Attempting to set key: %d\n", (int)PyInt_AsLong(key))); if (PyInt_Check(value)) { TRACE(("Setting from Int: %d %ld \n", (int)PyInt_AsLong(key),PyInt_AsLong(value))); status = ImagingLibTiffSetField(&encoder->state, (ttag_t) PyInt_AsLong(key), PyInt_AsLong(value)); } else if(PyBytes_Check(value)) { TRACE(("Setting from String: %d, %s \n", (int)PyInt_AsLong(key),PyBytes_AsString(value))); status = ImagingLibTiffSetField(&encoder->state, (ttag_t) PyInt_AsLong(key), PyBytes_AsString(value)); } else if(PyList_Check(value)) { int len,i; float *floatav; TRACE(("Setting from List: %d \n", (int)PyInt_AsLong(key))); len = (int)PyList_Size(value); TRACE((" %d elements, setting as floats \n", len)); floatav = malloc(sizeof(float)*len); if (floatav) { for (i=0;i<len;i++) { floatav[i] = (float)PyFloat_AsDouble(PyList_GetItem(value,i)); } status = ImagingLibTiffSetField(&encoder->state, (ttag_t) PyInt_AsLong(key), floatav); free(floatav); } } else if (PyFloat_Check(value)) { TRACE(("Setting from String: %d, %f \n", (int)PyInt_AsLong(key),PyFloat_AsDouble(value))); status = ImagingLibTiffSetField(&encoder->state, (ttag_t) PyInt_AsLong(key), (float)PyFloat_AsDouble(value)); } else { TRACE(("Unhandled type for key %d : %s ", (int)PyInt_AsLong(key), PyBytes_AsString(PyObject_Str(value)))); } if (!status) { TRACE(("Error setting Field\n")); Py_DECREF(encoder); PyErr_SetString(PyExc_RuntimeError, "Error setting from dictionary"); return NULL; } } encoder->encode = ImagingLibTiffEncode; return (PyObject*) encoder; }
Vulnerable
[ "CWE-119" ]
Pillow
a130c45990578a1bb0a6a000ed1b110e27324910
3.2128627617094288e+38
133
add several TIFF decoders and encoders
1
nft_obj_type_get(struct net *net, u32 objtype) { const struct nft_object_type *type; type = __nft_obj_type_get(objtype); if (type != NULL && try_module_get(type->owner)) return type; lockdep_nfnl_nft_mutex_not_held(); #ifdef CONFIG_MODULES if (type == NULL) { if (nft_request_module(net, "nft-obj-%u", objtype) == -EAGAIN) return ERR_PTR(-EAGAIN); } #endif return ERR_PTR(-ENOENT); }
Safe
[ "CWE-665" ]
linux
ad9f151e560b016b6ad3280b48e42fa11e1a5440
1.4633647901253225e+38
17
netfilter: nf_tables: initialize set before expression setup nft_set_elem_expr_alloc() needs an initialized set if expression sets on the NFT_EXPR_GC flag. Move set fields initialization before expression setup. [4512935.019450] ================================================================== [4512935.019456] BUG: KASAN: null-ptr-deref in nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019487] Read of size 8 at addr 0000000000000070 by task nft/23532 [4512935.019494] CPU: 1 PID: 23532 Comm: nft Not tainted 5.12.0-rc4+ #48 [...] [4512935.019502] Call Trace: [4512935.019505] dump_stack+0x89/0xb4 [4512935.019512] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019536] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019560] kasan_report.cold.12+0x5f/0xd8 [4512935.019566] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019590] nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019615] nf_tables_newset+0xc7f/0x1460 [nf_tables] Reported-by: syzbot+ce96ca2b1d0b37c6422d@syzkaller.appspotmail.com Fixes: 65038428b2c6 ("netfilter: nf_tables: allow to specify stateful expression in set definition") Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
0
void mysql_read_default_options(struct st_mysql_options *options, const char *filename,const char *group) { int argc; char *argv_buff[1],**argv; const char *groups[5]; DBUG_ENTER("mysql_read_default_options"); DBUG_PRINT("enter",("file: %s group: %s",filename,group ? group :"NULL")); compile_time_assert(OPT_keep_this_one_last == array_elements(default_options)); argc=1; argv=argv_buff; argv_buff[0]= (char*) "client"; groups[0]= (char*) "client"; groups[1]= (char*) "client-server"; groups[2]= (char*) "client-mariadb"; groups[3]= (char*) group; groups[4]=0; my_load_defaults(filename, groups, &argc, &argv, NULL); if (argc != 1) /* If some default option */ { char **option=argv; while (*++option) { if (my_getopt_is_args_separator(option[0])) /* skip arguments separator */ continue; /* DBUG_PRINT("info",("option: %s",option[0])); */ if (option[0][0] == '-' && option[0][1] == '-') { char *end=strcend(*option,'='); char *opt_arg=0; if (*end) { opt_arg=end+1; *end=0; /* Remove '=' */ } /* Change all '_' in variable name to '-' */ for (end= *option ; *(end= strcend(end,'_')) ; ) *end= '-'; switch (find_type(*option + 2, &option_types, FIND_TYPE_BASIC)) { case OPT_port: if (opt_arg) options->port=atoi(opt_arg); break; case OPT_socket: if (opt_arg) { my_free(options->unix_socket); options->unix_socket=my_strdup(opt_arg,MYF(MY_WME)); } break; case OPT_compress: options->compress=1; options->client_flag|= CLIENT_COMPRESS; break; case OPT_password: if (opt_arg) { my_free(options->password); options->password=my_strdup(opt_arg,MYF(MY_WME)); } break; case OPT_pipe: options->protocol = MYSQL_PROTOCOL_PIPE; break; case OPT_connect_timeout: case OPT_timeout: if (opt_arg) options->connect_timeout=atoi(opt_arg); break; case OPT_user: if (opt_arg) { my_free(options->user); options->user=my_strdup(opt_arg,MYF(MY_WME)); } break; case OPT_init_command: add_init_command(options,opt_arg); break; case OPT_host: if (opt_arg) { my_free(options->host); options->host=my_strdup(opt_arg,MYF(MY_WME)); } break; case OPT_database: if (opt_arg) { my_free(options->db); options->db=my_strdup(opt_arg,MYF(MY_WME)); } break; case OPT_debug: #ifdef MYSQL_CLIENT mysql_debug(opt_arg ? opt_arg : "d:t:o,/tmp/client.trace"); break; #endif case OPT_return_found_rows: options->client_flag|=CLIENT_FOUND_ROWS; break; #if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY) case OPT_ssl_key: my_free(options->ssl_key); options->ssl_key = my_strdup(opt_arg, MYF(MY_WME)); break; case OPT_ssl_cert: my_free(options->ssl_cert); options->ssl_cert = my_strdup(opt_arg, MYF(MY_WME)); break; case OPT_ssl_ca: my_free(options->ssl_ca); options->ssl_ca = my_strdup(opt_arg, MYF(MY_WME)); break; case OPT_ssl_capath: my_free(options->ssl_capath); options->ssl_capath = my_strdup(opt_arg, MYF(MY_WME)); break; case OPT_ssl_cipher: my_free(options->ssl_cipher); options->ssl_cipher= my_strdup(opt_arg, MYF(MY_WME)); break; #else case OPT_ssl_key: case OPT_ssl_cert: case OPT_ssl_ca: case OPT_ssl_capath: case OPT_ssl_cipher: break; #endif /* HAVE_OPENSSL && !EMBEDDED_LIBRARY */ case OPT_character_sets_dir: my_free(options->charset_dir); options->charset_dir = my_strdup(opt_arg, MYF(MY_WME)); break; case OPT_default_character_set: my_free(options->charset_name); options->charset_name = my_strdup(opt_arg, MYF(MY_WME)); break; case OPT_interactive_timeout: options->client_flag|= CLIENT_INTERACTIVE; break; case OPT_local_infile: if (!opt_arg || atoi(opt_arg) != 0) options->client_flag|= CLIENT_LOCAL_FILES; else options->client_flag&= ~CLIENT_LOCAL_FILES; break; case OPT_disable_local_infile: options->client_flag&= ~CLIENT_LOCAL_FILES; break; case OPT_max_allowed_packet: if (opt_arg) options->max_allowed_packet= atoi(opt_arg); break; case OPT_protocol: if ((options->protocol= find_type(opt_arg, &sql_protocol_typelib, FIND_TYPE_BASIC)) <= 0) { fprintf(stderr, "Unknown option to protocol: %s\n", opt_arg); exit(1); } break; case OPT_shared_memory_base_name: #ifdef HAVE_SMEM if (options->shared_memory_base_name != def_shared_memory_base_name) my_free(options->shared_memory_base_name); options->shared_memory_base_name=my_strdup(opt_arg,MYF(MY_WME)); #endif break; case OPT_multi_results: options->client_flag|= CLIENT_MULTI_RESULTS; break; case OPT_multi_statements: case OPT_multi_queries: options->client_flag|= CLIENT_MULTI_STATEMENTS | CLIENT_MULTI_RESULTS; break; case OPT_secure_auth: options->secure_auth= TRUE; break; case OPT_report_data_truncation: options->report_data_truncation= opt_arg ? test(atoi(opt_arg)) : 1; break; case OPT_plugin_dir: { char buff[FN_REFLEN], buff2[FN_REFLEN]; if (strlen(opt_arg) >= FN_REFLEN) opt_arg[FN_REFLEN]= '\0'; if (my_realpath(buff, opt_arg, 0)) { DBUG_PRINT("warning",("failed to normalize the plugin path: %s", opt_arg)); break; } convert_dirname(buff2, buff, NULL); EXTENSION_SET_STRING(options, plugin_dir, buff2); } break; case OPT_default_auth: EXTENSION_SET_STRING(options, default_auth, opt_arg); break; case OPT_enable_cleartext_plugin: break; default: DBUG_PRINT("warning",("unknown option: %s",option[0])); } } } } free_defaults(argv); DBUG_VOID_RETURN; }
Safe
[]
server
f5369faf5bbfb56b5e945836eb3f7c7ee88b4079
5.020526754468905e+37
213
don't disable SSL when connecting via libmysqld
0
static void blkcg_bind(struct cgroup_subsys_state *root_css) { int i; mutex_lock(&blkcg_pol_mutex); for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg *blkcg; if (!pol || !pol->cpd_bind_fn) continue; list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) if (blkcg->cpd[pol->plid]) pol->cpd_bind_fn(blkcg->cpd[pol->plid]); } mutex_unlock(&blkcg_pol_mutex); }
Safe
[ "CWE-415" ]
linux
9b54d816e00425c3a517514e0d677bb3cec49258
2.2657464780585807e+38
19
blkcg: fix double free of new_blkg in blkcg_init_queue If blkg_create fails, new_blkg passed as an argument will be freed by blkg_create, so there is no need to free it again. Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Jens Axboe <axboe@fb.com>
0
rb_str_to_s(str) VALUE str; { if (rb_obj_class(str) != rb_cString) { VALUE dup = str_alloc(rb_cString); rb_str_replace(dup, str); return dup; } return str; }
Safe
[ "CWE-20" ]
ruby
e926ef5233cc9f1035d3d51068abe9df8b5429da
1.3547114005371628e+37
10
* random.c (rb_genrand_int32, rb_genrand_real), intern.h: Export. * string.c (rb_str_tmp_new), intern.h: New function. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_8@16014 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
0
TEST_P(Security, BuiltinAuthenticationAndAccessAndCryptoPlugin_PermissionsDisableDiscoveryDisableAccessEncrypt_validation_ok_enable_discovery_disable_access_encrypt) // *INDENT-ON* { PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME); PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME); std::string governance_file("governance_disable_discovery_disable_access_encrypt.smime"); BuiltinAuthenticationAndAccessAndCryptoPlugin_Permissions_validation_ok_common(reader, writer, governance_file); }
Safe
[ "CWE-284" ]
Fast-DDS
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
2.1208098300153485e+38
9
check remote permissions (#1387) * Refs 5346. Blackbox test Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. one-way string compare Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Do not add partition separator on last partition Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Access control unit testing It only covers Partition and Topic permissions Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs #3680. Fix partition check on Permissions plugin. Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Fix tests on mac Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Fix windows tests Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Avoid memory leak on test Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Proxy data mocks should not return temporary objects Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * refs 3680. uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> Co-authored-by: Miguel Company <MiguelCompany@eprosima.com>
0
static filter_list_entry php_find_filter(long id) /* {{{ */ { int i, size = sizeof(filter_list) / sizeof(filter_list_entry); for (i = 0; i < size; ++i) { if (filter_list[i].id == id) { return filter_list[i]; } } /* Fallback to "string" filter */ for (i = 0; i < size; ++i) { if (filter_list[i].id == FILTER_DEFAULT) { return filter_list[i]; } } /* To shut up GCC */ return filter_list[0]; }
Safe
[ "CWE-190" ]
php-src
a5b5743d71fbd5ae944469a1ca443a1cdb30663a
2.611549128346735e+38
18
full_special_chars filter from trunk - approved by johannes
0
do_writecert (app_t app, ctrl_t ctrl, const char *certidstr, gpg_error_t (*pincb)(void*, const char *, char **), void *pincb_arg, const unsigned char *certdata, size_t certdatalen) { (void)ctrl; #if GNUPG_MAJOR_VERSION > 1 if (strcmp (certidstr, "OPENPGP.3")) return gpg_error (GPG_ERR_INV_ID); if (!certdata || !certdatalen) return gpg_error (GPG_ERR_INV_ARG); if (!app->app_local->extcap.is_v2) return gpg_error (GPG_ERR_NOT_SUPPORTED); if (certdatalen > app->app_local->extcap.max_certlen_3) return gpg_error (GPG_ERR_TOO_LARGE); return do_setattr (app, "CERT-3", pincb, pincb_arg, certdata, certdatalen); #else return gpg_error (GPG_ERR_NOT_IMPLEMENTED); #endif }
Safe
[ "CWE-20" ]
gnupg
2183683bd633818dd031b090b5530951de76f392
1.9554344317358255e+36
21
Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <wk@gnupg.org>
0
get_update_permission (const char *app_id) { g_autoptr(GVariant) out_perms = NULL; g_autoptr(GVariant) out_data = NULL; g_autoptr(GError) error = NULL; guint32 ret = UNSET; if (permission_store == NULL) { g_debug ("No portals installed, assume no permissions"); return NO; } if (!xdp_dbus_permission_store_call_lookup_sync (permission_store, PERMISSION_TABLE, PERMISSION_ID, &out_perms, &out_data, NULL, &error)) { g_dbus_error_strip_remote_error (error); g_debug ("No updates permissions found: %s", error->message); g_clear_error (&error); } if (out_perms != NULL) { const char **perms; if (g_variant_lookup (out_perms, app_id, "^a&s", &perms)) { if (strcmp (perms[0], "ask") == 0) ret = ASK; else if (strcmp (perms[0], "yes") == 0) ret = YES; else ret = NO; } } g_debug ("Updates permissions for %s: %d", app_id, ret); return ret; }
Safe
[ "CWE-94", "CWE-74" ]
flatpak
aeb6a7ab0abaac4a8f4ad98b3df476d9de6b8bd4
4.1955703777353296e+37
45
portal: Convert --env in extra-args into --env-fd This hides overridden variables from the command-line, which means processes running under other uids can't see them in /proc/*/cmdline, which might be important if they contain secrets. Signed-off-by: Simon McVittie <smcv@collabora.com> Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2
0
int rtl_usb_resume(struct usb_interface *pusb_intf) { return 0; }
Safe
[ "CWE-400", "CWE-401" ]
linux
3f93616951138a598d930dcaec40f2bfd9ce43bb
1.572647846062587e+38
4
rtlwifi: prevent memory leak in rtl_usb_probe In rtl_usb_probe if allocation for usb_data fails the allocated hw should be released. In addition the allocated rtlpriv->usb_data should be released on error handling path. Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
0
int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, uintptr_t data, const char *name, struct task_struct **thread_ptr) { struct kvm_vm_worker_thread_context init_context = {}; struct task_struct *thread; *thread_ptr = NULL; init_context.kvm = kvm; init_context.parent = current; init_context.thread_fn = thread_fn; init_context.data = data; init_completion(&init_context.init_done); thread = kthread_run(kvm_vm_worker_thread, &init_context, "%s-%d", name, task_pid_nr(current)); if (IS_ERR(thread)) return PTR_ERR(thread); /* kthread_run is never supposed to return NULL */ WARN_ON(thread == NULL); wait_for_completion(&init_context.init_done); if (!init_context.err) *thread_ptr = thread; return init_context.err;
Safe
[ "CWE-459" ]
linux
683412ccf61294d727ead4a73d97397396e69a6b
1.5277870549035648e+38
29
KVM: SEV: add cache flush to solve SEV cache incoherency issues Flush the CPU caches when memory is reclaimed from an SEV guest (where reclaim also includes it being unmapped from KVM's memslots). Due to lack of coherency for SEV encrypted memory, failure to flush results in silent data corruption if userspace is malicious/broken and doesn't ensure SEV guest memory is properly pinned and unpinned. Cache coherency is not enforced across the VM boundary in SEV (AMD APM vol.2 Section 15.34.7). Confidential cachelines, generated by confidential VM guests have to be explicitly flushed on the host side. If a memory page containing dirty confidential cachelines was released by VM and reallocated to another user, the cachelines may corrupt the new user at a later time. KVM takes a shortcut by assuming all confidential memory remain pinned until the end of VM lifetime. Therefore, KVM does not flush cache at mmu_notifier invalidation events. Because of this incorrect assumption and the lack of cache flushing, malicous userspace can crash the host kernel: creating a malicious VM and continuously allocates/releases unpinned confidential memory pages when the VM is running. Add cache flush operations to mmu_notifier operations to ensure that any physical memory leaving the guest VM get flushed. In particular, hook mmu_notifier_invalidate_range_start and mmu_notifier_release events and flush cache accordingly. The hook after releasing the mmu lock to avoid contention with other vCPUs. Cc: stable@vger.kernel.org Suggested-by: Sean Christpherson <seanjc@google.com> Reported-by: Mingwei Zhang <mizhang@google.com> Signed-off-by: Mingwei Zhang <mizhang@google.com> Message-Id: <20220421031407.2516575-4-mizhang@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
void var_set_errno(int sql_errno) { var_set_int("$mysql_errno", sql_errno); var_set_string("$mysql_errname", get_errname_from_code(sql_errno)); }
Safe
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
6.195631423172232e+37
5
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
0
type_option (const gchar *type, const gchar *option, const gchar *default_str) { if (type && cockpit_conf_string (type, option)) return cockpit_conf_string (type, option); return default_str; }
Safe
[ "CWE-1021" ]
cockpit
46f6839d1af4e662648a85f3e54bba2d57f39f0e
1.1827171832660238e+38
9
ws: Restrict our cookie to the login host only Mark our cookie as `SameSite: Strict` [1]. The current `None` default will soon be moved to `Lax` by Firefox and Chromium, and recent versions started to throw a warning about it. [1] https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite https://bugzilla.redhat.com/show_bug.cgi?id=1891944
0
static NTSTATUS dcesrv_lsa_ClearAuditLog(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, struct lsa_ClearAuditLog *r) { DCESRV_FAULT(DCERPC_FAULT_OP_RNG_ERROR); }
Safe
[ "CWE-200" ]
samba
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
4.883936730682699e+37
5
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message This aims to minimise usage of the error-prone pattern of searching for a just-added message element in order to make modifications to it (and potentially finding the wrong element). BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009 Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz>
0
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { BUILD_BUG(); return 0; }
Safe
[ "CWE-362" ]
linux
17839856fd588f4ab6b789f482ed3ffd7c403e1f
2.944841536057174e+38
7
gup: document and work around "COW can break either way" issue Doing a "get_user_pages()" on a copy-on-write page for reading can be ambiguous: the page can be COW'ed at any time afterwards, and the direction of a COW event isn't defined. Yes, whoever writes to it will generally do the COW, but if the thread that did the get_user_pages() unmapped the page before the write (and that could happen due to memory pressure in addition to any outright action), the writer could also just take over the old page instead. End result: the get_user_pages() call might result in a page pointer that is no longer associated with the original VM, and is associated with - and controlled by - another VM having taken it over instead. So when doing a get_user_pages() on a COW mapping, the only really safe thing to do would be to break the COW when getting the page, even when only getting it for reading. At the same time, some users simply don't even care. For example, the perf code wants to look up the page not because it cares about the page, but because the code simply wants to look up the physical address of the access for informational purposes, and doesn't really care about races when a page might be unmapped and remapped elsewhere. This adds logic to force a COW event by setting FOLL_WRITE on any copy-on-write mapping when FOLL_GET (or FOLL_PIN) is used to get a page pointer as a result. The current semantics end up being: - __get_user_pages_fast(): no change. If you don't ask for a write, you won't break COW. You'd better know what you're doing. - get_user_pages_fast(): the fast-case "look it up in the page tables without anything getting mmap_sem" now refuses to follow a read-only page, since it might need COW breaking. Which happens in the slow path - the fast path doesn't know if the memory might be COW or not. - get_user_pages() (including the slow-path fallback for gup_fast()): for a COW mapping, turn on FOLL_WRITE for FOLL_GET/FOLL_PIN, with very similar semantics to FOLL_FORCE. If it turns out that we want finer granularity (ie "only break COW when it might actually matter" - things like the zero page are special and don't need to be broken) we might need to push these semantics deeper into the lookup fault path. So if people care enough, it's possible that we might end up adding a new internal FOLL_BREAK_COW flag to go with the internal FOLL_COW flag we already have for tracking "I had a COW". Alternatively, if it turns out that different callers might want to explicitly control the forced COW break behavior, we might even want to make such a flag visible to the users of get_user_pages() instead of using the above default semantics. But for now, this is mostly commentary on the issue (this commit message being a lot bigger than the patch, and that patch in turn is almost all comments), with that minimal "enable COW breaking early" logic using the existing FOLL_WRITE behavior. [ It might be worth noting that we've always had this ambiguity, and it could arguably be seen as a user-space issue. You only get private COW mappings that could break either way in situations where user space is doing cooperative things (ie fork() before an execve() etc), but it _is_ surprising and very subtle, and fork() is supposed to give you independent address spaces. So let's treat this as a kernel issue and make the semantics of get_user_pages() easier to understand. Note that obviously a true shared mapping will still get a page that can change under us, so this does _not_ mean that get_user_pages() somehow returns any "stable" page ] Reported-by: Jann Horn <jannh@google.com> Tested-by: Christoph Hellwig <hch@lst.de> Acked-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Kirill Shutemov <kirill@shutemov.name> Acked-by: Jan Kara <jack@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page, size_t len, int cpu, int full) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_event *event; struct buffer_data_page *bpage; struct buffer_page *reader; unsigned long missed_events; unsigned long flags; unsigned int commit; unsigned int read; u64 save_timestamp; int ret = -1; if (!cpumask_test_cpu(cpu, buffer->cpumask)) goto out; /* * If len is not big enough to hold the page header, then * we can not copy anything. */ if (len <= BUF_PAGE_HDR_SIZE) goto out; len -= BUF_PAGE_HDR_SIZE; if (!data_page) goto out; bpage = *data_page; if (!bpage) goto out; raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); reader = rb_get_reader_page(cpu_buffer); if (!reader) goto out_unlock; event = rb_reader_event(cpu_buffer); read = reader->read; commit = rb_page_commit(reader); /* Check if any events were dropped */ missed_events = cpu_buffer->lost_events; /* * If this page has been partially read or * if len is not big enough to read the rest of the page or * a writer is still on the page, then * we must copy the data from the page to the buffer. * Otherwise, we can simply swap the page with the one passed in. */ if (read || (len < (commit - read)) || cpu_buffer->reader_page == cpu_buffer->commit_page) { struct buffer_data_page *rpage = cpu_buffer->reader_page->page; unsigned int rpos = read; unsigned int pos = 0; unsigned int size; if (full) goto out_unlock; if (len > (commit - read)) len = (commit - read); /* Always keep the time extend and data together */ size = rb_event_ts_length(event); if (len < size) goto out_unlock; /* save the current timestamp, since the user will need it */ save_timestamp = cpu_buffer->read_stamp; /* Need to copy one event at a time */ do { /* We need the size of one event, because * rb_advance_reader only advances by one event, * whereas rb_event_ts_length may include the size of * one or two events. * We have already ensured there's enough space if this * is a time extend. */ size = rb_event_length(event); memcpy(bpage->data + pos, rpage->data + rpos, size); len -= size; rb_advance_reader(cpu_buffer); rpos = reader->read; pos += size; if (rpos >= commit) break; event = rb_reader_event(cpu_buffer); /* Always keep the time extend and data together */ size = rb_event_ts_length(event); } while (len >= size); /* update bpage */ local_set(&bpage->commit, pos); bpage->time_stamp = save_timestamp; /* we copied everything to the beginning */ read = 0; } else { /* update the entry counter */ cpu_buffer->read += rb_page_entries(reader); cpu_buffer->read_bytes += BUF_PAGE_SIZE; /* swap the pages */ rb_init_page(bpage); bpage = reader->page; reader->page = *data_page; local_set(&reader->write, 0); local_set(&reader->entries, 0); reader->read = 0; *data_page = bpage; /* * Use the real_end for the data size, * This gives us a chance to store the lost events * on the page. */ if (reader->real_end) local_set(&bpage->commit, reader->real_end); } ret = read; cpu_buffer->lost_events = 0; commit = local_read(&bpage->commit); /* * Set a flag in the commit field if we lost events */ if (missed_events) { /* If there is room at the end of the page to save the * missed events, then record it there. */ if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { memcpy(&bpage->data[commit], &missed_events, sizeof(missed_events)); local_add(RB_MISSED_STORED, &bpage->commit); commit += sizeof(missed_events); } local_add(RB_MISSED_EVENTS, &bpage->commit); } /* * This page may be off to user land. Zero it out here. */ if (commit < BUF_PAGE_SIZE) memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); out_unlock: raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); out: return ret; }
Safe
[ "CWE-362" ]
linux
bbeb97464eefc65f506084fd9f18f21653e01137
1.911482838329291e+38
162
tracing: Fix race in trace_open and buffer resize call Below race can come, if trace_open and resize of cpu buffer is running parallely on different cpus CPUX CPUY ring_buffer_resize atomic_read(&buffer->resize_disabled) tracing_open tracing_reset_online_cpus ring_buffer_reset_cpu rb_reset_cpu rb_update_pages remove/insert pages resetting pointer This race can cause data abort or some times infinte loop in rb_remove_pages and rb_insert_pages while checking pages for sanity. Take buffer lock to fix this. Link: https://lkml.kernel.org/r/1601976833-24377-1-git-send-email-gkohli@codeaurora.org Cc: stable@vger.kernel.org Fixes: b23d7a5f4a07a ("ring-buffer: speed up buffer resets by avoiding synchronize_rcu for each CPU") Signed-off-by: Gaurav Kohli <gkohli@codeaurora.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
0
static const char *lookup_name(uint32_t index, const char **list, uint32_t llen) { if (index >= llen || list[index] == NULL) { return "???"; } return list[index]; }
Safe
[ "CWE-835" ]
qemu
96d87bdda3919bb16f754b3d3fd1227e1f38f13c
2.9733294243542783e+38
7
xhci: guard xhci_kick_epctx against recursive calls Track xhci_kick_epctx processing being active in a variable. Check the variable before calling xhci_kick_epctx from xhci_kick_ep. Add an assert to make sure we don't call recursively into xhci_kick_epctx. Cc: 1653384@bugs.launchpad.net Fixes: 94b037f2a451b3dc855f9f2c346e5049a361bd55 Reported-by: Fabian Lesniak <fabian@lesniak-it.de> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Message-id: 1486035372-3621-1-git-send-email-kraxel@redhat.com Message-id: 1485790607-31399-5-git-send-email-kraxel@redhat.com
0
bool asn1_read_ContextSimple(struct asn1_data *data, uint8_t num, DATA_BLOB *blob) { int len; ZERO_STRUCTP(blob); if (!asn1_start_tag(data, ASN1_CONTEXT_SIMPLE(num))) return false; len = asn1_tag_remaining(data); if (len < 0) { data->has_error = true; return false; } *blob = data_blob(NULL, len); if ((len != 0) && (!blob->data)) { data->has_error = true; return false; } asn1_read(data, blob->data, len); asn1_end_tag(data); return !data->has_error; }
Vulnerable
[ "CWE-399" ]
samba
9d989c9dd7a5b92d0c5d65287935471b83b6e884
2.2731411428629436e+38
19
CVE-2015-7540: lib: util: Check *every* asn1 return call and early return. BUG: https://bugzilla.samba.org/show_bug.cgi?id=9187 Signed-off-by: Jeremy Allison <jra@samba.org> Reviewed-by: Volker Lendecke <Volker.Lendecke@SerNet.DE> Autobuild-User(master): Jeremy Allison <jra@samba.org> Autobuild-Date(master): Fri Sep 19 01:29:00 CEST 2014 on sn-devel-104 (cherry picked from commit b9d3fd4cc551df78a7b066ee8ce43bbaa3ff994a)
1
DefragVlanQinQTest(void) { Packet *p1 = NULL, *p2 = NULL, *r = NULL; int ret = 0; DefragInit(); p1 = BuildTestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8); if (p1 == NULL) goto end; p2 = BuildTestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8); if (p2 == NULL) goto end; /* With no VLAN IDs set, packets should re-assemble. */ if ((r = Defrag(NULL, NULL, p1, NULL)) != NULL) goto end; if ((r = Defrag(NULL, NULL, p2, NULL)) == NULL) goto end; SCFree(r); /* With mismatched VLANs, packets should not re-assemble. */ p1->vlan_id[0] = 1; p2->vlan_id[0] = 1; p1->vlan_id[1] = 1; p2->vlan_id[1] = 2; if ((r = Defrag(NULL, NULL, p1, NULL)) != NULL) goto end; if ((r = Defrag(NULL, NULL, p2, NULL)) != NULL) goto end; /* Pass. */ ret = 1; end: if (p1 != NULL) SCFree(p1); if (p2 != NULL) SCFree(p2); DefragDestroy(); return ret; }
Safe
[ "CWE-358" ]
suricata
4a04f814b15762eb446a5ead4d69d021512df6f8
2.955130304888941e+38
43
defrag - take protocol into account during re-assembly The IP protocol was not being used to match fragments with their packets allowing a carefully constructed packet with a different protocol to be matched, allowing re-assembly to complete, creating a packet that would not be re-assembled by the destination host.
0
int sqlite3ResolveExprNames( NameContext *pNC, /* Namespace to resolve expressions in. */ Expr *pExpr /* The expression to be analyzed. */ ){ u16 savedHasAgg; Walker w; if( pExpr==0 ) return SQLITE_OK; savedHasAgg = pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin); pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin); w.pParse = pNC->pParse; w.xExprCallback = resolveExprStep; w.xSelectCallback = resolveSelectStep; w.xSelectCallback2 = 0; w.u.pNC = pNC; #if SQLITE_MAX_EXPR_DEPTH>0 w.pParse->nHeight += pExpr->nHeight; if( sqlite3ExprCheckHeight(w.pParse, w.pParse->nHeight) ){ return SQLITE_ERROR; } #endif sqlite3WalkExpr(&w, pExpr); #if SQLITE_MAX_EXPR_DEPTH>0 w.pParse->nHeight -= pExpr->nHeight; #endif if( pNC->ncFlags & NC_HasAgg ){ ExprSetProperty(pExpr, EP_Agg); } if( pNC->ncFlags & NC_HasWin ){ ExprSetProperty(pExpr, EP_Win); } pNC->ncFlags |= savedHasAgg; return pNC->nErr>0 || w.pParse->nErr>0; }
Safe
[ "CWE-416" ]
sqlite
4ded26a53c4df312e9fd06facbbf70377e969983
3.0354081762620207e+38
34
Prevent aliases of window functions expressions from being used as arguments to aggregate or other window functions. FossilOrigin-Name: 1e16d3e8fc60d39ca3899759ff15d355fdd7d3e23b325d8d2b0f954e11ce8dce
0
static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, unsigned long index, void *item) { while (radix_tree_insert(root, index, item)) cond_resched(); }
Safe
[ "CWE-476" ]
linux
4969c06a0d83c9c3dc50b8efcdc8eeedfce896f6
1.6771480658333814e+38
6
f2fs: support swap file w/ DIO Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
0
xsltNumberFormatGetAnyLevel(xsltTransformContextPtr context, xmlNodePtr node, xsltCompMatchPtr countPat, xsltCompMatchPtr fromPat, double *array) { int amount = 0; int cnt = 0; xmlNodePtr cur; /* select the starting node */ switch (node->type) { case XML_ELEMENT_NODE: cur = node; break; case XML_ATTRIBUTE_NODE: cur = ((xmlAttrPtr) node)->parent; break; case XML_TEXT_NODE: case XML_PI_NODE: case XML_COMMENT_NODE: cur = node->parent; break; default: cur = NULL; break; } while (cur != NULL) { /* process current node */ if (countPat == NULL) { if ((node->type == cur->type) && /* FIXME: must use expanded-name instead of local name */ xmlStrEqual(node->name, cur->name)) { if ((node->ns == cur->ns) || ((node->ns != NULL) && (cur->ns != NULL) && (xmlStrEqual(node->ns->href, cur->ns->href) ))) cnt++; } } else { if (xsltTestCompMatchList(context, cur, countPat)) cnt++; } if ((fromPat != NULL) && xsltTestCompMatchList(context, cur, fromPat)) { break; /* while */ } /* Skip to next preceding or ancestor */ if ((cur->type == XML_DOCUMENT_NODE) || #ifdef LIBXML_DOCB_ENABLED (cur->type == XML_DOCB_DOCUMENT_NODE) || #endif (cur->type == XML_HTML_DOCUMENT_NODE)) break; /* while */ while ((cur->prev != NULL) && ((cur->prev->type == XML_DTD_NODE) || (cur->prev->type == XML_XINCLUDE_START) || (cur->prev->type == XML_XINCLUDE_END))) cur = cur->prev; if (cur->prev != NULL) { for (cur = cur->prev; cur->last != NULL; cur = cur->last); } else { cur = cur->parent; } } array[amount++] = (double) cnt; return(amount); }
Vulnerable
[ "CWE-119" ]
libxslt
d182d8f6ba3071503d96ce17395c9d55871f0242
1.6479583326501987e+38
74
Fix xsltNumberFormatGetMultipleLevel Namespace nodes are actually an xmlNs, not an xmlNode. They must be special-cased in xsltNumberFormatGetMultipleLevel to avoid an out-of-bounds heap access. Move the test whether a node matches the "count" pattern to a separate function to make the code more readable. As a side effect, we also compare expanded names when walking up the ancestor axis, fixing an insignificant bug.
1
Status GetOrCreateKernelAndDevice( EagerOperation* op, TensorHandle** retvals, int* num_retvals, core::RefCountPtr<KernelAndDevice>* out_kernel) { EagerContext& ctx = op->EagerContext(); Device* device = absl::get<Device*>(op->Device()); // Set the EagerOperation's device prior to extracting the input_dev_ptrs to // avoid any redundant H2D/D2H copies. if (device == nullptr && !op->is_function()) { Fprint128 device_cache_key = GetDeviceCacheKey(op, ctx); device = ctx.GetCachedDevice(device_cache_key); if (device == nullptr) { TF_RETURN_IF_ERROR(SetOpDevice(ctx, op, &device)); ctx.AddDeviceToCache(device_cache_key, device); } else { op->SetDevice(device); } } // Save the original value of reuse_rendezvous_for_functions from the context. bool reuse_rendezvous_for_functions_original_value = ctx.GetReuseRendezvousForFunctions(); // When running in eager_op_as_function mode Send/Recv ops need to be // placed on the same rendezvous to match the behaviour of eager mode. bool reuse_rendezvous_for_functions = (ctx.RunEagerOpAsFunction() && !op->is_function()) || reuse_rendezvous_for_functions_original_value; std::vector<Device*> input_dev_ptrs; absl::flat_hash_map<string, const std::vector<string>*> composite_devices; std::unordered_map<int, DtypeAndPartialTensorShape> input_resource_variable_dtypes_and_shapes; if (op->is_function() || ctx.RunEagerOpAsFunction()) { profiler::TraceMe activity("EagerCopyToDevice", profiler::TraceMeLevel::kInfo); input_dev_ptrs.reserve(op->Inputs().size()); const absl::InlinedVector<TensorHandle*, 4>* inputs; TF_RETURN_IF_ERROR(op->TensorHandleInputs(&inputs)); for (int i = 0, end = inputs->size(); i < end; ++i) { TensorHandle* input = (*inputs)[i]; Device* input_device; TF_RETURN_IF_ERROR(GetDeviceForInput(*op, ctx, input, &input_device)); VLOG(1) << op->Name() << ":input:" << i << " " << input_device->name(); input_dev_ptrs.push_back(input_device); CompositeDevice* composite_device = nullptr; if (ctx.FindCompositeDeviceFromName(input_device->name(), &composite_device) .ok()) { composite_devices[input_device->name()] = composite_device->underlying_devices(); } if (input->dtype == DT_RESOURCE) { // We only care about data type and shape for resource variable inputs. // But we have no way to tell if input is resource variable (other than // looking it up in ResourceMgr, which is slow). So we just get // resource_dtypes_and_shapes for all DT_RESOURCE inputs. If // resource_dtypes_and_shapes is not empty, take the first element. std::vector<DtypeAndPartialTensorShape> resource_dtypes_and_shapes; TF_RETURN_IF_ERROR(input->GetResourceHandleDtypesAndShapes( &resource_dtypes_and_shapes)); if (!resource_dtypes_and_shapes.empty()) { const DtypeAndPartialTensorShape& dtype_and_shape = resource_dtypes_and_shapes.at(0); input_resource_variable_dtypes_and_shapes[i] = dtype_and_shape; } } } } TF_ASSIGN_OR_RETURN( Fprint128 cache_key, GetKernelCacheKey(*op, op->MutableAttrs()->CacheKey(op->DeviceName()), input_dev_ptrs, input_resource_variable_dtypes_and_shapes)); core::RefCountPtr<KernelAndDevice> kernel = ctx.GetCachedKernel(cache_key); AbstractOperationPtr wrapped_op_releaser; // We can eliminate some overhead by running simple functions using regular // CallOp kernel. However, it is tricky to figure out which functions should // be run using CallOp. Also, currently CallOp runs neither optimization // passes (needed for TPU/XLA) nor grappler. // Here are some cases where a function should be run in multi-device mode: // - Function takes at least two resources on different devices. // - Function takes a resource on deviceA and a body op explicitly placed // on deviceB. // - Function has a colocation constraint. // - Function has an explicit device annotation (which might not be using // full canonical device name) different from op_device. Note that false // positives are ok. // - Function has a node or a (node) attribute that can potentially make // the function multi-device after a rewrite pass (e.g. various XLA/TPU // special nodes and attributes) if (kernel == nullptr) { VLOG(2) << "Creating new kernel for " << op->Name() << " on device " << DeviceNameOrUnspecified(absl::get<Device*>(op->Device())); bool run_function_with_flr = false; bool function_outputs_on_op_device = false; absl::optional<string> xla_compile_device_type; if (op->is_function()) { bool compile_with_xla; TF_RETURN_IF_ERROR(MustCompileWithXLA(op, ctx, &compile_with_xla)); if (compile_with_xla) { if (ctx.JitCompileRewrite()) { xla_compile_device_type = op->GetDeviceParsedName().type; run_function_with_flr = true; } else { // Note that it is not ideal, but currently correct, to set this // attribute after computing the kernel cache key above. // Note: If the attribute is already set to true, this is a noop. op->MutableAttrs()->Set(kXlaMustCompileAttr, true); } } else { run_function_with_flr = true; } GetFuncAttr(op, ctx, kOutputsOnOpDevice, &function_outputs_on_op_device) .IgnoreError(); } VLOG(2) << op->Name() << " function_outputs_on_op_device: " << function_outputs_on_op_device; if (device == nullptr) { TF_RETURN_IF_ERROR(SetOpDevice(ctx, op, &device)); } else { VLOG(1) << "Device for [" << op->Name() << "] already set to: " << device->name(); } // Note: We wrap the eager op AFTER the device has been inferred to ensure // that placement of the NodeDef in the function is exactly the same as in // eager mode. This is specially important for cases where the // preferred device is not the actual device on which the op is run. // E.g. the preferred device for a `RangeDataset` op could be set to `GPU` // but `ctx->SelectDevice` would still place it on CPU. Placer on the other // hand would throw an error. // // Note: The wrapped function is never jit compiled but rather run via the // FLR. This is needed because certain ops e.g. `VarHandleOp` can not be // jit compiled. Ideally we would run this via the jit compiled path and // expect unsupported ops to be outside compiled but that is not supported // on GPUs right now. bool allow_small_function_optimizations = false; bool int_args_and_retvals_on_device = false; bool allow_control_flow_sync_execution = false; // TODO(b/176491312): Remove this if shape inference on import flag is // removed. bool shape_inference_on_tfe_dialect_import = true; if (ctx.RunEagerOpAsFunction() && !op->is_function()) { EagerOperation* wrapped_op = nullptr; TF_RETURN_IF_ERROR(ValidateOp(op)); TF_RETURN_IF_ERROR(WrapInCallOp(op, &wrapped_op)); DCHECK(wrapped_op); DCHECK(wrapped_op->is_function()); wrapped_op_releaser.reset(wrapped_op); run_function_with_flr = true; allow_small_function_optimizations = true; allow_control_flow_sync_execution = true; shape_inference_on_tfe_dialect_import = false; int_args_and_retvals_on_device = IntArgsAndRetvalsOnDevice(op); op = wrapped_op; } const NodeDef& ndef = op->MutableAttrs()->BuildNodeDef(); FunctionLibraryRuntime* flr = device == nullptr ? nullptr : ctx.func_lib(device); if (device != nullptr && flr == nullptr) { return errors::NotFound( "Unable to find a FunctionLibraryRuntime corresponding to device ", device->name()); } auto runner = (flr != nullptr && flr->runner() != nullptr) ? flr->runner() : ctx.runner(); GraphCollector* graph_collector = nullptr; if (ctx.ShouldStoreGraphs()) { graph_collector = ctx.GetGraphCollector(); } // Treat the function as multi_device only when we are not compiling // it wholly with XLA. When compiling wholly with XLA, flr->CreateKernel // will create an XlaLaunchOp kernel to compile and run the function. if (run_function_with_flr) { // Multi-device functions don't use the rendezvous from eager context. // If we use that rendezvous, multiple concurrent calls to the same // function will likely result in collisions. However, this also means // that we don't support legitimate sending/receiving across function // boundary. VLOG(2) << "Running " << ndef.op() << " using multi-device function. " << "Full node_def=" << ndef.DebugString(); std::function<int64_t()> get_op_id = nullptr; #if !defined(IS_MOBILE_PLATFORM) get_op_id = [&ctx]() { return ctx.RemoteMgr()->NextOpId(); }; #endif // IS_MOBILE_PLATFORM ctx.reuse_rendezvous_for_functions_mu()->lock(); ctx.SetReuseRendezvousForFunctions(reuse_rendezvous_for_functions); auto rendezvous_creator = ctx.RendezvousCreator(); ctx.SetReuseRendezvousForFunctions( reuse_rendezvous_for_functions_original_value); ctx.reuse_rendezvous_for_functions_mu()->unlock(); kernel.reset(new KernelAndDeviceFunc( flr, ctx.pflr(), std::move(input_dev_ptrs), std::move(composite_devices), std::move(input_resource_variable_dtypes_and_shapes), runner, ctx.GetCollectiveExecutorHandle(), ctx.HostCPU(), op->Name(), function_outputs_on_op_device, allow_small_function_optimizations, allow_control_flow_sync_execution, shape_inference_on_tfe_dialect_import, int_args_and_retvals_on_device, xla_compile_device_type, std::move(rendezvous_creator), get_op_id)); } else { VLOG(2) << "Running " << ndef.op() << " using op kernel. " << ". Full node_def=" << ndef.DebugString(); kernel.reset(new KernelAndDeviceOp( ctx.GetRendezvous(), ctx.LogMemory(), flr, runner, ctx.GetCollectiveExecutorHandle(), ctx.HostCPU())); } TF_RETURN_IF_ERROR( kernel->Init(ctx.LogDevicePlacement(), ndef, graph_collector)); if (op->is_function()) { ctx.AddKernelToCache(cache_key, kernel.get()); } else { // Exclude tf.data op kernels from being cached. The reason for this is // that tf.data op kernels that accept a user-defined function will have a // unique cache key every time they are executed (because the user-defined // function is traced every time). Caching such kernels provides no // benefit and in some cases results in linear memory growth of use // programs that build input pipeline graphs in a loop. const OpDef* op_def; TF_RETURN_IF_ERROR(OpDefForOp(op->Name().data(), &op_def)); if (KernelCacheEnabled(*op_def)) { ctx.AddKernelToCache(cache_key, kernel.get()); } } } int num_outputs = kernel->num_outputs(); if (num_outputs > *num_retvals) { return errors::InvalidArgument("Expecting ", num_outputs, " outputs, but *num_retvals is ", *num_retvals); } *num_retvals = num_outputs; kernel->Ref(); // Ownership of reference is passed to out_kernel. out_kernel->reset(kernel.get()); return Status::OK(); }
Safe
[ "CWE-476", "CWE-475" ]
tensorflow
a5b89cd68c02329d793356bda85d079e9e69b4e7
1.304709430870007e+38
246
Fix empty resource handle vulnerability. Some ops that attempt to extract a resource handle from user input can lead to nullptr dereferences. This returns an error in such a case. PiperOrigin-RevId: 445571938
0
zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask, uint16_t access_type, uint64_t fuid, uint16_t entry_type) { uint16_t type = entry_type & ACE_TYPE_FLAGS; aclp->z_ops->ace_mask_set(acep, access_mask); aclp->z_ops->ace_type_set(acep, access_type); aclp->z_ops->ace_flags_set(acep, entry_type); if ((type != ACE_OWNER && type != OWNING_GROUP && type != ACE_EVERYONE)) aclp->z_ops->ace_who_set(acep, fuid); }
Safe
[ "CWE-200", "CWE-732" ]
zfs
716b53d0a14c72bda16c0872565dd1909757e73f
1.5679727703768521e+38
12
FreeBSD: Fix UNIX permissions checking Reviewed-by: Ryan Moeller <ryan@iXsystems.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Matt Macy <mmacy@FreeBSD.org> Closes #10727
0
static void vmci_transport_handle_detach(struct sock *sk) { struct vsock_sock *vsk; vsk = vsock_sk(sk); if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) { sock_set_flag(sk, SOCK_DONE); /* On a detach the peer will not be sending or receiving * anymore. */ vsk->peer_shutdown = SHUTDOWN_MASK; /* We should not be sending anymore since the peer won't be * there to receive, but we can still receive if there is data * left in our consume queue. */ if (vsock_stream_has_data(vsk) <= 0) { if (sk->sk_state == SS_CONNECTING) { /* The peer may detach from a queue pair while * we are still in the connecting state, i.e., * if the peer VM is killed after attaching to * a queue pair, but before we complete the * handshake. In that case, we treat the detach * event like a reset. */ sk->sk_state = SS_UNCONNECTED; sk->sk_err = ECONNRESET; sk->sk_error_report(sk); return; } sk->sk_state = SS_UNCONNECTED; } sk->sk_state_change(sk); } }
Safe
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
2.2347815541554684e+38
37
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <davem@davemloft.net> Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
0
static struct tty_struct *tty_pair_get_tty(struct tty_struct *tty) { if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) tty = tty->link; return tty; }
Safe
[ "CWE-200", "CWE-362" ]
linux
5c17c861a357e9458001f021a7afa7aab9937439
1.4072593148860077e+37
7
tty: Fix unsafe ldisc reference via ioctl(TIOCGETD) ioctl(TIOCGETD) retrieves the line discipline id directly from the ldisc because the line discipline id (c_line) in termios is untrustworthy; userspace may have set termios via ioctl(TCSETS*) without actually changing the line discipline via ioctl(TIOCSETD). However, directly accessing the current ldisc via tty->ldisc is unsafe; the ldisc ptr dereferenced may be stale if the line discipline is changing via ioctl(TIOCSETD) or hangup. Wait for the line discipline reference (just like read() or write()) to retrieve the "current" line discipline id. Cc: <stable@vger.kernel.org> Signed-off-by: Peter Hurley <peter@hurleysoftware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
0
void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N){ assert( p->nOp + N <= p->nOpAlloc ); }
Safe
[ "CWE-755" ]
sqlite
8654186b0236d556aa85528c2573ee0b6ab71be3
2.491973463156878e+38
3
When an error occurs while rewriting the parser tree for window functions in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set, and make sure that this shuts down any subsequent code generation that might depend on the transformations that were implemented. This fixes a problem discovered by the Yongheng and Rui fuzzer. FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f
0
static int hsr_dev_close(struct net_device *dev) { /* Nothing to do here. */ return 0; }
Safe
[ "CWE-772", "CWE-269", "CWE-401" ]
linux
6caabe7f197d3466d238f70915d65301f1716626
1.2701938087953328e+38
5
net: hsr: fix memory leak in hsr_dev_finalize() If hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER) failed to add port, it directly returns res and forgets to free the node that allocated in hsr_create_self_node(), and forgets to delete the node->mac_list linked in hsr->self_node_db. BUG: memory leak unreferenced object 0xffff8881cfa0c780 (size 64): comm "syz-executor.0", pid 2077, jiffies 4294717969 (age 2415.377s) hex dump (first 32 bytes): e0 c7 a0 cf 81 88 ff ff 00 02 00 00 00 00 ad de ................ 00 e6 49 cd 81 88 ff ff c0 9b 87 d0 81 88 ff ff ..I............. backtrace: [<00000000e2ff5070>] hsr_dev_finalize+0x736/0x960 [hsr] [<000000003ed2e597>] hsr_newlink+0x2b2/0x3e0 [hsr] [<000000003fa8c6b6>] __rtnl_newlink+0xf1f/0x1600 net/core/rtnetlink.c:3182 [<000000001247a7ad>] rtnl_newlink+0x66/0x90 net/core/rtnetlink.c:3240 [<00000000e7d1b61d>] rtnetlink_rcv_msg+0x54e/0xb90 net/core/rtnetlink.c:5130 [<000000005556bd3a>] netlink_rcv_skb+0x129/0x340 net/netlink/af_netlink.c:2477 [<00000000741d5ee6>] netlink_unicast_kernel net/netlink/af_netlink.c:1310 [inline] [<00000000741d5ee6>] netlink_unicast+0x49a/0x650 net/netlink/af_netlink.c:1336 [<000000009d56f9b7>] netlink_sendmsg+0x88b/0xdf0 net/netlink/af_netlink.c:1917 [<0000000046b35c59>] sock_sendmsg_nosec net/socket.c:621 [inline] [<0000000046b35c59>] sock_sendmsg+0xc3/0x100 net/socket.c:631 [<00000000d208adc9>] __sys_sendto+0x33e/0x560 net/socket.c:1786 [<00000000b582837a>] __do_sys_sendto net/socket.c:1798 [inline] [<00000000b582837a>] __se_sys_sendto net/socket.c:1794 [inline] [<00000000b582837a>] __x64_sys_sendto+0xdd/0x1b0 net/socket.c:1794 [<00000000c866801d>] do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290 [<00000000fea382d9>] entry_SYSCALL_64_after_hwframe+0x49/0xbe [<00000000e01dacb3>] 0xffffffffffffffff Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.") Reported-by: Hulk Robot <hulkci@huawei.com> Signed-off-by: Mao Wenan <maowenan@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
e_ews_connection_try_credentials_sync (EEwsConnection *cnc, const ENamedParameters *credentials, ESource *use_source, gchar **out_certificate_pem, GTlsCertificateFlags *out_certificate_errors, GCancellable *cancellable, GError **error) { ESourceAuthenticationResult result; ESource *source; gboolean de_set_source; EwsFolderId *fid = NULL; GSList *ids = NULL; GError *local_error = NULL; g_return_val_if_fail (E_IS_EWS_CONNECTION (cnc), E_SOURCE_AUTHENTICATION_ERROR); e_ews_connection_update_credentials (cnc, credentials); fid = g_new0 (EwsFolderId, 1); fid->id = g_strdup ("inbox"); fid->is_distinguished_id = TRUE; ids = g_slist_append (ids, fid); source = e_ews_connection_get_source (cnc); if (use_source && use_source != source) { cnc->priv->source = g_object_ref (use_source); de_set_source = TRUE; } else { source = NULL; de_set_source = FALSE; } e_ews_connection_get_folder_sync ( cnc, EWS_PRIORITY_MEDIUM, "Default", NULL, ids, NULL, cancellable, &local_error); if (de_set_source) { g_clear_object (&cnc->priv->source); cnc->priv->source = source; } g_slist_free_full (ids, (GDestroyNotify) e_ews_folder_id_free); if (local_error == NULL) { result = E_SOURCE_AUTHENTICATION_ACCEPTED; } else if (g_error_matches (local_error, SOUP_HTTP_ERROR, SOUP_STATUS_SSL_FAILED) && e_ews_connection_get_ssl_error_details (cnc, out_certificate_pem, out_certificate_errors)) { result = E_SOURCE_AUTHENTICATION_ERROR_SSL_FAILED; } else { gboolean auth_failed; auth_failed = g_error_matches ( local_error, EWS_CONNECTION_ERROR, EWS_CONNECTION_ERROR_AUTHENTICATION_FAILED); if (auth_failed) { g_clear_error (&local_error); if (camel_ews_settings_get_auth_mechanism (cnc->priv->settings) != EWS_AUTH_TYPE_GSSAPI && camel_ews_settings_get_auth_mechanism (cnc->priv->settings) != EWS_AUTH_TYPE_OAUTH2 && (!credentials || !e_named_parameters_exists (credentials, E_SOURCE_CREDENTIAL_PASSWORD))) { result = E_SOURCE_AUTHENTICATION_REQUIRED; } else { result = E_SOURCE_AUTHENTICATION_REJECTED; } } else { g_propagate_error (error, local_error); result = E_SOURCE_AUTHENTICATION_ERROR; } e_ews_connection_set_password (cnc, NULL); } return result; }
Safe
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
7.76980276768223e+37
76
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
0
redraw_later(int type) { redraw_win_later(curwin, type); }
Safe
[ "CWE-122" ]
vim
826bfe4bbd7594188e3d74d2539d9707b1c6a14b
2.2092208236032164e+38
4
patch 8.2.3487: illegal memory access if buffer name is very long Problem: Illegal memory access if buffer name is very long. Solution: Make sure not to go over the end of the buffer.
0
GF_Box *saiz_New() { ISOM_DECL_BOX_ALLOC(GF_SampleAuxiliaryInfoSizeBox, GF_ISOM_BOX_TYPE_SAIZ); return (GF_Box *)tmp; }
Safe
[ "CWE-125" ]
gpac
bceb03fd2be95097a7b409ea59914f332fb6bc86
5.902831643287507e+37
5
fixed 2 possible heap overflows (inc. #1088)
0
static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait) { journal_t *journal = EXT4_SB(sb)->s_journal; WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait & ~__GFP_WAIT); return try_to_free_buffers(page); }
Safe
[ "CWE-703" ]
linux
744692dc059845b2a3022119871846e74d4f6e11
3.172315295090816e+38
13
ext4: use ext4_get_block_write in buffer write Allocate uninitialized extent before ext4 buffer write and convert the extent to initialized after io completes. The purpose is to make sure an extent can only be marked initialized after it has been written with new data so we can safely drop the i_mutex lock in ext4 DIO read without exposing stale data. This helps to improve multi-thread DIO read performance on high-speed disks. Skip the nobh and data=journal mount cases to make things simple for now. Signed-off-by: Jiaying Zhang <jiayingz@google.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
0
static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) { struct ioat_chan_common *chan = data; if (test_bit(IOAT_RUN, &chan->state)) tasklet_schedule(&chan->cleanup_task); return IRQ_HANDLED; }
Safe
[]
linux
7bced397510ab569d31de4c70b39e13355046387
5.7664751245766805e+37
9
net_dma: simple removal Per commit "77873803363c net_dma: mark broken" net_dma is no longer used and there is no plan to fix it. This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards. Reverting the remainder of the net_dma induced changes is deferred to subsequent patches. Marked for stable due to Roman's report of a memory leak in dma_pin_iovec_pages(): https://lkml.org/lkml/2014/9/3/177 Cc: Dave Jiang <dave.jiang@intel.com> Cc: Vinod Koul <vinod.koul@intel.com> Cc: David Whipple <whipple@securedatainnovations.ch> Cc: Alexander Duyck <alexander.h.duyck@intel.com> Cc: <stable@vger.kernel.org> Reported-by: Roman Gushchin <klamm@yandex-team.ru> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
0
DefaultSupportedMessages(rfbClient* client) { memset((char *)&client->supportedMessages,0,sizeof(client->supportedMessages)); /* Default client supported messages (universal RFB 3.3 protocol) */ SetClient2Server(client, rfbSetPixelFormat); /* SetClient2Server(client, rfbFixColourMapEntries); Not currently supported */ SetClient2Server(client, rfbSetEncodings); SetClient2Server(client, rfbFramebufferUpdateRequest); SetClient2Server(client, rfbKeyEvent); SetClient2Server(client, rfbPointerEvent); SetClient2Server(client, rfbClientCutText); /* technically, we only care what we can *send* to the server * but, we set Server2Client Just in case it ever becomes useful */ SetServer2Client(client, rfbFramebufferUpdate); SetServer2Client(client, rfbSetColourMapEntries); SetServer2Client(client, rfbBell); SetServer2Client(client, rfbServerCutText); }
Safe
[ "CWE-20" ]
libvncserver
85a778c0e45e87e35ee7199f1f25020648e8b812
7.829741836766473e+37
20
Check for MallocFrameBuffer() return value If MallocFrameBuffer() returns FALSE, frame buffer pointer is left to NULL. Subsequent writes into that buffer could lead to memory corruption, or even arbitrary code execution.
0
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, unsigned int flags, u32 ext_filter_mask) { struct ifinfomsg *ifm; struct nlmsghdr *nlh; struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats; struct nlattr *attr, *af_spec; struct rtnl_af_ops *af_ops; struct net_device *upper_dev = netdev_master_upper_dev_get(dev); ASSERT_RTNL(); nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); if (nlh == NULL) return -EMSGSIZE; ifm = nlmsg_data(nlh); ifm->ifi_family = AF_UNSPEC; ifm->__ifi_pad = 0; ifm->ifi_type = dev->type; ifm->ifi_index = dev->ifindex; ifm->ifi_flags = dev_get_flags(dev); ifm->ifi_change = change; if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || nla_put_u8(skb, IFLA_OPERSTATE, netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u32(skb, IFLA_GROUP, dev->group) || nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || #ifdef CONFIG_RPS nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || #endif (dev->ifindex != dev->iflink && nla_put_u32(skb, IFLA_LINK, dev->iflink)) || (upper_dev && nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) || nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || (dev->qdisc && nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || (dev->ifalias && nla_put_string(skb, IFLA_IFALIAS, dev->ifalias))) goto nla_put_failure; if (1) { struct rtnl_link_ifmap map = { .mem_start = dev->mem_start, .mem_end = dev->mem_end, .base_addr = dev->base_addr, .irq = dev->irq, .dma = dev->dma, .port = dev->if_port, }; if (nla_put(skb, IFLA_MAP, sizeof(map), &map)) goto nla_put_failure; } if (dev->addr_len) { if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) goto nla_put_failure; } attr = nla_reserve(skb, IFLA_STATS, sizeof(struct rtnl_link_stats)); if (attr == NULL) goto nla_put_failure; stats = dev_get_stats(dev, &temp); copy_rtnl_link_stats(nla_data(attr), stats); attr = nla_reserve(skb, IFLA_STATS64, sizeof(struct rtnl_link_stats64)); if (attr == NULL) goto nla_put_failure; copy_rtnl_link_stats64(nla_data(attr), stats); if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) && nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent))) goto nla_put_failure; if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { int i; struct nlattr *vfinfo, *vf; int num_vfs = dev_num_vf(dev->dev.parent); vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); if (!vfinfo) goto nla_put_failure; for (i = 0; i < num_vfs; i++) { struct ifla_vf_info ivi; struct ifla_vf_mac vf_mac; struct ifla_vf_vlan vf_vlan; struct ifla_vf_tx_rate vf_tx_rate; struct ifla_vf_spoofchk vf_spoofchk; /* * Not all SR-IOV capable drivers support the * spoofcheck query. Preset to -1 so the user * space tool can detect that the driver didn't * report anything. */ ivi.spoofchk = -1; memset(ivi.mac, 0, sizeof(ivi.mac)); if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) break; vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = vf_spoofchk.vf = ivi.vf; memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); vf_vlan.vlan = ivi.vlan; vf_vlan.qos = ivi.qos; vf_tx_rate.rate = ivi.tx_rate; vf_spoofchk.setting = ivi.spoofchk; vf = nla_nest_start(skb, IFLA_VF_INFO); if (!vf) { nla_nest_cancel(skb, vfinfo); goto nla_put_failure; } if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate) || nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), &vf_spoofchk)) goto nla_put_failure; nla_nest_end(skb, vf); } nla_nest_end(skb, vfinfo); } if (rtnl_port_fill(skb, dev)) goto nla_put_failure; if (dev->rtnl_link_ops) { if (rtnl_link_fill(skb, dev) < 0) goto nla_put_failure; } if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC))) goto nla_put_failure; list_for_each_entry(af_ops, &rtnl_af_ops, list) { if (af_ops->fill_link_af) { struct nlattr *af; int err; if (!(af = nla_nest_start(skb, af_ops->family))) goto nla_put_failure; err = af_ops->fill_link_af(skb, dev); /* * Caller may return ENODATA to indicate that there * was no data to be dumped. This is not an error, it * means we should trim the attribute header and * continue. */ if (err == -ENODATA) nla_nest_cancel(skb, af); else if (err < 0) goto nla_put_failure; nla_nest_end(skb, af); } } nla_nest_end(skb, af_spec); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; }
Safe
[ "CWE-399" ]
linux-2.6
84d73cd3fb142bf1298a8c13fd4ca50fd2432372
4.489120169805492e+37
183
rtnl: fix info leak on RTM_GETLINK request for VF devices Initialize the mac address buffer with 0 as the driver specific function will probably not fill the whole buffer. In fact, all in-kernel drivers fill only ETH_ALEN of the MAX_ADDR_LEN bytes, i.e. 6 of the 32 possible bytes. Therefore we currently leak 26 bytes of stack memory to userland via the netlink interface. Signed-off-by: Mathias Krause <minipli@googlemail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
0
int wc_X963_KDF(enum wc_HashType type, const byte* secret, word32 secretSz, const byte* sinfo, word32 sinfoSz, byte* out, word32 outSz) { int ret, i; int digestSz, copySz; int remaining = outSz; byte* outIdx; byte counter[4]; byte tmp[WC_MAX_DIGEST_SIZE]; #ifdef WOLFSSL_SMALL_STACK wc_HashAlg* hash; #else wc_HashAlg hash[1]; #endif if (secret == NULL || secretSz == 0 || out == NULL) return BAD_FUNC_ARG; /* X9.63 allowed algos only */ if (type != WC_HASH_TYPE_SHA && type != WC_HASH_TYPE_SHA224 && type != WC_HASH_TYPE_SHA256 && type != WC_HASH_TYPE_SHA384 && type != WC_HASH_TYPE_SHA512) return BAD_FUNC_ARG; digestSz = wc_HashGetDigestSize(type); if (digestSz < 0) return digestSz; #ifdef WOLFSSL_SMALL_STACK hash = (wc_HashAlg*)XMALLOC(sizeof(wc_HashAlg), NULL, DYNAMIC_TYPE_HASHES); if (hash == NULL) return MEMORY_E; #endif ret = wc_HashInit(hash, type); if (ret != 0) { #ifdef WOLFSSL_SMALL_STACK XFREE(hash, NULL, DYNAMIC_TYPE_HASHES); #endif return ret; } outIdx = out; XMEMSET(counter, 0, sizeof(counter)); for (i = 1; remaining > 0; i++) { IncrementX963KdfCounter(counter); ret = wc_HashUpdate(hash, type, secret, secretSz); if (ret != 0) { break; } ret = wc_HashUpdate(hash, type, counter, sizeof(counter)); if (ret != 0) { break; } if (sinfo) { ret = wc_HashUpdate(hash, type, sinfo, sinfoSz); if (ret != 0) { break; } } ret = wc_HashFinal(hash, type, tmp); if (ret != 0) { break; } copySz = min(remaining, digestSz); XMEMCPY(outIdx, tmp, copySz); remaining -= copySz; outIdx += copySz; } wc_HashFree(hash, type); #ifdef WOLFSSL_SMALL_STACK XFREE(hash, NULL, DYNAMIC_TYPE_HASHES); #endif return ret; }
Safe
[ "CWE-326", "CWE-203" ]
wolfssl
1de07da61f0c8e9926dcbd68119f73230dae283f
5.873738661834177e+37
88
Constant time EC map to affine for private operations For fast math, use a constant time modular inverse when mapping to affine when operation involves a private key - key gen, calc shared secret, sign.
0
rend_service_decrypt_intro( rend_intro_cell_t *intro, crypto_pk_t *key, char **err_msg_out) { char *err_msg = NULL; uint8_t key_digest[DIGEST_LEN]; char service_id[REND_SERVICE_ID_LEN_BASE32+1]; ssize_t key_len; uint8_t buf[RELAY_PAYLOAD_SIZE]; int result, status = -1; if (!intro || !key) { if (err_msg_out) { err_msg = tor_strdup("rend_service_decrypt_intro() called with bad " "parameters"); } status = -2; goto err; } /* Make sure we have ciphertext */ if (!(intro->ciphertext) || intro->ciphertext_len <= 0) { if (err_msg_out) { tor_asprintf(&err_msg, "rend_intro_cell_t was missing ciphertext for " "INTRODUCE%d cell", (int)(intro->type)); } status = -3; goto err; } /* Check that this cell actually matches this service key */ /* first DIGEST_LEN bytes of request is intro or service pk digest */ crypto_pk_get_digest(key, (char *)key_digest); if (tor_memneq(key_digest, intro->pk, DIGEST_LEN)) { if (err_msg_out) { base32_encode(service_id, REND_SERVICE_ID_LEN_BASE32 + 1, (char*)(intro->pk), REND_SERVICE_ID_LEN); tor_asprintf(&err_msg, "got an INTRODUCE%d cell for the wrong service (%s)", (int)(intro->type), escaped(service_id)); } status = -4; goto err; } /* Make sure the encrypted part is long enough to decrypt */ key_len = crypto_pk_keysize(key); if (intro->ciphertext_len < key_len) { if (err_msg_out) { tor_asprintf(&err_msg, "got an INTRODUCE%d cell with a truncated PK-encrypted " "part", (int)(intro->type)); } status = -5; goto err; } /* Decrypt the encrypted part */ note_crypto_pk_op(REND_SERVER); result = crypto_pk_private_hybrid_decrypt( key, (char *)buf, sizeof(buf), (const char *)(intro->ciphertext), intro->ciphertext_len, PK_PKCS1_OAEP_PADDING, 1); if (result < 0) { if (err_msg_out) { tor_asprintf(&err_msg, "couldn't decrypt INTRODUCE%d cell", (int)(intro->type)); } status = -6; goto err; } intro->plaintext_len = result; intro->plaintext = tor_malloc(intro->plaintext_len); memcpy(intro->plaintext, buf, intro->plaintext_len); status = 0; goto done; err: if (err_msg_out && !err_msg) { tor_asprintf(&err_msg, "unknown INTRODUCE%d error decrypting encrypted part", intro ? (int)(intro->type) : -1); } done: if (err_msg_out) *err_msg_out = err_msg; else tor_free(err_msg); /* clean up potentially sensitive material */ memwipe(buf, 0, sizeof(buf)); memwipe(key_digest, 0, sizeof(key_digest)); memwipe(service_id, 0, sizeof(service_id)); return status; }
Safe
[ "CWE-532" ]
tor
09ea89764a4d3a907808ed7d4fe42abfe64bd486
1.1290262894510291e+38
111
Fix log-uninitialized-stack bug in rend_service_intro_established. Fixes bug 23490; bugfix on 0.2.7.2-alpha. TROVE-2017-008 CVE-2017-0380
0
static int connect_logger_as( const Unit *unit, const ExecContext *context, const ExecParameters *params, ExecOutput output, const char *ident, int nfd, uid_t uid, gid_t gid) { _cleanup_close_ int fd = -1; int r; assert(context); assert(params); assert(output < _EXEC_OUTPUT_MAX); assert(ident); assert(nfd >= 0); fd = socket(AF_UNIX, SOCK_STREAM, 0); if (fd < 0) return -errno; r = connect_journal_socket(fd, uid, gid); if (r < 0) return r; if (shutdown(fd, SHUT_RD) < 0) return -errno; (void) fd_inc_sndbuf(fd, SNDBUF_SIZE); if (dprintf(fd, "%s\n" "%s\n" "%i\n" "%i\n" "%i\n" "%i\n" "%i\n", context->syslog_identifier ?: ident, params->flags & EXEC_PASS_LOG_UNIT ? unit->id : "", context->syslog_priority, !!context->syslog_level_prefix, is_syslog_output(output), is_kmsg_output(output), is_terminal_output(output)) < 0) return -errno; return move_fd(TAKE_FD(fd), nfd, false); }
Safe
[ "CWE-269" ]
systemd
f69567cbe26d09eac9d387c0be0fc32c65a83ada
1.82679281794417e+38
51
core: expose SUID/SGID restriction as new unit setting RestrictSUIDSGID=
0
flow_clear_conntrack(struct flow *flow) { flow->ct_state = 0; flow->ct_zone = 0; flow->ct_mark = 0; flow->ct_label = OVS_U128_ZERO; flow->ct_nw_proto = 0; flow->ct_tp_src = 0; flow->ct_tp_dst = 0; if (flow->dl_type == htons(ETH_TYPE_IP)) { flow->ct_nw_src = 0; flow->ct_nw_dst = 0; } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) { memset(&flow->ct_ipv6_src, 0, sizeof flow->ct_ipv6_src); memset(&flow->ct_ipv6_dst, 0, sizeof flow->ct_ipv6_dst); } }
Safe
[ "CWE-400" ]
ovs
79349cbab0b2a755140eedb91833ad2760520a83
2.888068588627383e+37
18
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <joakim.hindersson@elastx.se> Acked-by: Ilya Maximets <i.maximets@ovn.org> Signed-off-by: Flavio Leitner <fbl@sysclose.org> Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
0
BufferContinueFilterConfig() : EmptyHttpFilterConfig("buffer-continue-filter") {}
Safe
[ "CWE-416" ]
envoy
148de954ed3585d8b4298b424aa24916d0de6136
3.1796350464155124e+38
1
CVE-2021-43825 Response filter manager crash Signed-off-by: Yan Avlasov <yavlasov@google.com>
0
date_s__rfc2822(VALUE klass, VALUE str) { return date__rfc2822(str); }
Vulnerable
[]
date
3959accef8da5c128f8a8e2fd54e932a4fb253b0
1.2766941432044304e+38
4
Add length limit option for methods that parses date strings `Date.parse` now raises an ArgumentError when a given date string is longer than 128. You can configure the limit by giving `limit` keyword arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`, the limit is disabled. Not only `Date.parse` but also the following methods are changed. * Date._parse * Date.parse * DateTime.parse * Date._iso8601 * Date.iso8601 * DateTime.iso8601 * Date._rfc3339 * Date.rfc3339 * DateTime.rfc3339 * Date._xmlschema * Date.xmlschema * DateTime.xmlschema * Date._rfc2822 * Date.rfc2822 * DateTime.rfc2822 * Date._rfc822 * Date.rfc822 * DateTime.rfc822 * Date._jisx0301 * Date.jisx0301 * DateTime.jisx0301
1
static TEE_Result copy_in_attrs(struct user_ta_ctx *utc, const struct utee_attribute *usr_attrs, uint32_t attr_count, TEE_Attribute *attrs) { TEE_Result res; uint32_t n; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)usr_attrs, attr_count * sizeof(struct utee_attribute)); if (res != TEE_SUCCESS) return res; for (n = 0; n < attr_count; n++) { attrs[n].attributeID = usr_attrs[n].attribute_id; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) { attrs[n].content.value.a = usr_attrs[n].a; attrs[n].content.value.b = usr_attrs[n].b; } else { uintptr_t buf = usr_attrs[n].a; size_t len = usr_attrs[n].b; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, buf, len); if (res != TEE_SUCCESS) return res; attrs[n].content.ref.buffer = (void *)buf; attrs[n].content.ref.length = len; } } return TEE_SUCCESS; }
Safe
[ "CWE-119", "CWE-787" ]
optee_os
a637243270fc1faae16de059091795c32d86e65e
2.85634030077213e+38
35
svc: check for allocation overflow in crypto calls Without checking for overflow there is a risk of allocating a buffer with size smaller than anticipated and as a consequence of that it might lead to a heap based overflow with attacker controlled data written outside the boundaries of the buffer. Fixes: OP-TEE-2018-0010: "Integer overflow in crypto system calls (x2)" Signed-off-by: Joakim Bech <joakim.bech@linaro.org> Tested-by: Joakim Bech <joakim.bech@linaro.org> (QEMU v7, v8) Reviewed-by: Jens Wiklander <jens.wiklander@linaro.org> Reported-by: Riscure <inforequest@riscure.com> Reported-by: Alyssa Milburn <a.a.milburn@vu.nl> Acked-by: Etienne Carriere <etienne.carriere@linaro.org>
0
nautilus_file_get_existing (GFile *location) { return nautilus_file_get_internal (location, FALSE); }
Safe
[]
nautilus
7632a3e13874a2c5e8988428ca913620a25df983
1.6436587068772008e+38
4
Check for trusted desktop file launchers. 2009-02-24 Alexander Larsson <alexl@redhat.com> * libnautilus-private/nautilus-directory-async.c: Check for trusted desktop file launchers. * libnautilus-private/nautilus-file-private.h: * libnautilus-private/nautilus-file.c: * libnautilus-private/nautilus-file.h: Add nautilus_file_is_trusted_link. Allow unsetting of custom display name. * libnautilus-private/nautilus-mime-actions.c: Display dialog when trying to launch a non-trusted desktop file. svn path=/trunk/; revision=15003
0
add_appdata_to_deploy_data (GVariantDict *metadata_dict, GFile *deploy_dir, const char *id) { g_autofree char *appdata_xml = NULL; g_autoptr(GHashTable) names = NULL; g_autoptr(GHashTable) comments = NULL; g_autofree char *version = NULL; g_autofree char *license = NULL; g_autofree char *content_rating_type = NULL; g_autoptr(GHashTable) content_rating = NULL; appdata_xml = read_appdata_xml_from_deploy_dir (deploy_dir, id); if (appdata_xml == NULL) return; if (flatpak_parse_appdata (appdata_xml, id, &names, &comments, &version, &license, &content_rating_type, &content_rating)) { add_locale_metadata_string (metadata_dict, "appdata-name", names); add_locale_metadata_string (metadata_dict, "appdata-summary", comments); if (version) g_variant_dict_insert_value (metadata_dict, "appdata-version", g_variant_new_string (version)); if (license) g_variant_dict_insert_value (metadata_dict, "appdata-license", g_variant_new_string (license)); if (content_rating_type != NULL && content_rating != NULL) g_variant_dict_insert_value (metadata_dict, "appdata-content-rating", appdata_content_rating_to_variant (content_rating_type, content_rating)); } }
Safe
[ "CWE-74" ]
flatpak
fb473cad801c6b61706353256cab32330557374a
2.9265208686888273e+38
32
dir: Pass environment via bwrap --setenv when running apply_extra This means we can systematically pass the environment variables through bwrap(1), even if it is setuid and thus is filtering out security-sensitive environment variables. bwrap ends up being run with an empty environment instead. As with the previous commit, this regressed while fixing CVE-2021-21261. Fixes: 6d1773d2 "run: Convert all environment variables into bwrap arguments" Signed-off-by: Simon McVittie <smcv@collabora.com>
0
*/ PS_SERIALIZER_DECODE_FUNC(wddx) { zval *retval; zval **ent; char *key; uint key_length; char tmp[128]; ulong idx; int hash_type; int ret; if (vallen == 0) { return SUCCESS; } MAKE_STD_ZVAL(retval); if ((ret = php_wddx_deserialize_ex((char *)val, vallen, retval)) == SUCCESS) { if (Z_TYPE_P(retval) != IS_ARRAY) { zval_ptr_dtor(&retval); return FAILURE; } for (zend_hash_internal_pointer_reset(Z_ARRVAL_P(retval)); zend_hash_get_current_data(Z_ARRVAL_P(retval), (void **) &ent) == SUCCESS; zend_hash_move_forward(Z_ARRVAL_P(retval))) { hash_type = zend_hash_get_current_key_ex(Z_ARRVAL_P(retval), &key, &key_length, &idx, 0, NULL); switch (hash_type) { case HASH_KEY_IS_LONG: key_length = slprintf(tmp, sizeof(tmp), "%ld", idx) + 1; key = tmp; /* fallthru */ case HASH_KEY_IS_STRING: php_set_session_var(key, key_length-1, *ent, NULL TSRMLS_CC); PS_ADD_VAR(key); } } } zval_ptr_dtor(&retval); return ret;
Safe
[]
php-src
1785d2b805f64eaaacf98c14c9e13107bf085ab1
1.505693515704798e+38
43
Fixed bug #70741: Session WDDX Packet Deserialization Type Confusion Vulnerability
0
static void usbip_dump_pipe(unsigned int p) { unsigned char type = usb_pipetype(p); unsigned char ep = usb_pipeendpoint(p); unsigned char dev = usb_pipedevice(p); unsigned char dir = usb_pipein(p); pr_debug("dev(%d) ep(%d) [%s] ", dev, ep, dir ? "IN" : "OUT"); switch (type) { case PIPE_ISOCHRONOUS: pr_debug("ISO\n"); break; case PIPE_INTERRUPT: pr_debug("INT\n"); break; case PIPE_CONTROL: pr_debug("CTRL\n"); break; case PIPE_BULK: pr_debug("BULK\n"); break; default: pr_debug("ERR\n"); break; } }
Safe
[ "CWE-200", "CWE-119" ]
linux
b348d7dddb6c4fbfc810b7a0626e8ec9e29f7cbb
2.1105299127967175e+38
27
USB: usbip: fix potential out-of-bounds write Fix potential out-of-bounds write to urb->transfer_buffer usbip handles network communication directly in the kernel. When receiving a packet from its peer, usbip code parses headers according to protocol. As part of this parsing urb->actual_length is filled. Since the input for urb->actual_length comes from the network, it should be treated as untrusted. Any entity controlling the network may put any value in the input and the preallocated urb->transfer_buffer may not be large enough to hold the data. Thus, the malicious entity is able to write arbitrary data to kernel memory. Signed-off-by: Ignat Korchagin <ignat.korchagin@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
0
static int asf_read_properties(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; time_t creation_time; avio_rl64(pb); // read object size avio_skip(pb, 16); // skip File ID avio_skip(pb, 8); // skip File size creation_time = avio_rl64(pb); if (!(asf->b_flags & ASF_FLAG_BROADCAST)) { struct tm tmbuf; struct tm *tm; char buf[64]; // creation date is in 100 ns units from 1 Jan 1601, conversion to s creation_time /= 10000000; // there are 11644473600 seconds between 1 Jan 1601 and 1 Jan 1970 creation_time -= 11644473600; tm = gmtime_r(&creation_time, &tmbuf); if (tm) { if (!strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", tm)) buf[0] = '\0'; } else buf[0] = '\0'; if (buf[0]) { if (av_dict_set(&s->metadata, "creation_time", buf, 0) < 0) av_log(s, AV_LOG_WARNING, "av_dict_set failed.\n"); } } asf->nb_packets = avio_rl64(pb); asf->duration = avio_rl64(pb) / 10000; // stream duration avio_skip(pb, 8); // skip send duration asf->preroll = avio_rl64(pb); asf->duration -= asf->preroll; asf->b_flags = avio_rl32(pb); avio_skip(pb, 4); // skip minimal packet size asf->packet_size = avio_rl32(pb); avio_skip(pb, 4); // skip max_bitrate return 0; }
Safe
[ "CWE-119", "CWE-787" ]
FFmpeg
2b46ebdbff1d8dec7a3d8ea280a612b91a582869
2.688745899516654e+38
42
avformat/asfdec_o: Check size_bmp more fully Fixes: integer overflow and out of array access Fixes: asfo-crash-46080c4341572a7137a162331af77f6ded45cbd7 Found-by: Paul Ch <paulcher@icloud.com> Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
0
const SELECT_LEX *first_select_lex() const { return unit.first_select(); }
Safe
[ "CWE-703" ]
server
39feab3cd31b5414aa9b428eaba915c251ac34a2
3.007012148333082e+38
1
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT IF an INSERT/REPLACE SELECT statement contained an ON expression in the top level select and this expression used a subquery with a column reference that could not be resolved then an attempt to resolve this reference as an outer reference caused a crash of the server. This happened because the outer context field in the Name_resolution_context structure was not set to NULL for such references. Rather it pointed to the first element in the select_stack. Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select() method when parsing a SELECT construct. Approved by Oleksandr Byelkin <sanja@mariadb.com>
0
int uprobe_pre_sstep_notifier(struct pt_regs *regs) { if (!current->mm) return 0; if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) && (!current->utask || !current->utask->return_instances)) return 0; set_thread_flag(TIF_UPROBE); return 1; }
Safe
[ "CWE-416" ]
linux
355627f518978b5167256d27492fe0b343aaf2f2
3.3276268991755076e+38
12
mm, uprobes: fix multiple free of ->uprobes_state.xol_area Commit 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable") made it possible to kill a forking task while it is waiting to acquire its ->mmap_sem for write, in dup_mmap(). However, it was overlooked that this introduced an new error path before the new mm_struct's ->uprobes_state.xol_area has been set to NULL after being copied from the old mm_struct by the memcpy in dup_mm(). For a task that has previously hit a uprobe tracepoint, this resulted in the 'struct xol_area' being freed multiple times if the task was killed at just the right time while forking. Fix it by setting ->uprobes_state.xol_area to NULL in mm_init() rather than in uprobe_dup_mmap(). With CONFIG_UPROBE_EVENTS=y, the bug can be reproduced by the same C program given by commit 2b7e8665b4ff ("fork: fix incorrect fput of ->exe_file causing use-after-free"), provided that a uprobe tracepoint has been set on the fork_thread() function. For example: $ gcc reproducer.c -o reproducer -lpthread $ nm reproducer | grep fork_thread 0000000000400719 t fork_thread $ echo "p $PWD/reproducer:0x719" > /sys/kernel/debug/tracing/uprobe_events $ echo 1 > /sys/kernel/debug/tracing/events/uprobes/enable $ ./reproducer Here is the use-after-free reported by KASAN: BUG: KASAN: use-after-free in uprobe_clear_state+0x1c4/0x200 Read of size 8 at addr ffff8800320a8b88 by task reproducer/198 CPU: 1 PID: 198 Comm: reproducer Not tainted 4.13.0-rc7-00015-g36fde05f3fb5 #255 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-20170228_101828-anatol 04/01/2014 Call Trace: dump_stack+0xdb/0x185 print_address_description+0x7e/0x290 kasan_report+0x23b/0x350 __asan_report_load8_noabort+0x19/0x20 uprobe_clear_state+0x1c4/0x200 mmput+0xd6/0x360 do_exit+0x740/0x1670 do_group_exit+0x13f/0x380 get_signal+0x597/0x17d0 do_signal+0x99/0x1df0 exit_to_usermode_loop+0x166/0x1e0 syscall_return_slowpath+0x258/0x2c0 entry_SYSCALL_64_fastpath+0xbc/0xbe ... Allocated by task 199: save_stack_trace+0x1b/0x20 kasan_kmalloc+0xfc/0x180 kmem_cache_alloc_trace+0xf3/0x330 __create_xol_area+0x10f/0x780 uprobe_notify_resume+0x1674/0x2210 exit_to_usermode_loop+0x150/0x1e0 prepare_exit_to_usermode+0x14b/0x180 retint_user+0x8/0x20 Freed by task 199: save_stack_trace+0x1b/0x20 kasan_slab_free+0xa8/0x1a0 kfree+0xba/0x210 uprobe_clear_state+0x151/0x200 mmput+0xd6/0x360 copy_process.part.8+0x605f/0x65d0 _do_fork+0x1a5/0xbd0 SyS_clone+0x19/0x20 do_syscall_64+0x22f/0x660 return_from_SYSCALL_64+0x0/0x7a Note: without KASAN, you may instead see a "Bad page state" message, or simply a general protection fault. Link: http://lkml.kernel.org/r/20170830033303.17927-1-ebiggers3@gmail.com Fixes: 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable") Signed-off-by: Eric Biggers <ebiggers@google.com> Reported-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Oleg Nesterov <oleg@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: <stable@vger.kernel.org> [4.7+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
static double mp_list_set_Jxyz_v(_cimg_math_parser& mp) { if (!mp.imglist.width()) return cimg::type<double>::nan(); const unsigned int ind = (unsigned int)cimg::mod((int)_mp_arg(2),mp.imglist.width()); CImg<T> &img = mp.imglist[ind]; const double ox = mp.mem[_cimg_mp_slot_x], oy = mp.mem[_cimg_mp_slot_y], oz = mp.mem[_cimg_mp_slot_z]; const int x = (int)(ox + _mp_arg(3)), y = (int)(oy + _mp_arg(4)), z = (int)(oz + _mp_arg(5)); const double *ptrs = &_mp_arg(1) + 1; if (x>=0 && x<img.width() && y>=0 && y<img.height() && z>=0 && z<img.depth()) { const unsigned int vsiz = (unsigned int)mp.opcode[6]; T *ptrd = &img(x,y,z); const ulongT whd = (ulongT)img._width*img._height*img._depth; cimg_for_inC(img,0,vsiz - 1,c) { *ptrd = (T)*(ptrs++); ptrd+=whd; } } return cimg::type<double>::nan(); }
Safe
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
2.5500186016786298e+38
18
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
0
grub_disk_cache_fetch (unsigned long dev_id, unsigned long disk_id, grub_disk_addr_t sector) { struct grub_disk_cache *cache; unsigned index; index = grub_disk_cache_get_index (dev_id, disk_id, sector); cache = grub_disk_cache_table + index; if (cache->dev_id == dev_id && cache->disk_id == disk_id && cache->sector == sector) { cache->lock = 1; #if 0 grub_disk_cache_hits++; #endif return cache->data; } #if 0 grub_disk_cache_misses++; #endif return 0; }
Safe
[ "CWE-20", "CWE-119" ]
radare2
c57997e76ec70862174a1b3b3aeb62a6f8570e85
1.1005410044456673e+38
25
Fix r2_hbo_grub_memmove ext2 crash
0
int edns_opt_list_remove(struct edns_option** list, uint16_t code) { /* The list should already be allocated in a region. Freeing the * allocated space in a region is not possible. We just unlink the * required elements and they will be freed together with the region. */ struct edns_option* prev; struct edns_option* curr; if(!list || !(*list)) return 0; /* Unlink and repoint if the element(s) are first in list */ while(list && *list && (*list)->opt_code == code) { *list = (*list)->next; } if(!list || !(*list)) return 1; /* Unlink elements and reattach the list */ prev = *list; curr = (*list)->next; while(curr != NULL) { if(curr->opt_code == code) { prev->next = curr->next; curr = curr->next; } else { prev = curr; curr = curr->next; } } return 1; }
Safe
[ "CWE-787" ]
unbound
6c3a0b54ed8ace93d5b5ca7b8078dc87e75cd640
1.5611621984896896e+38
30
- Fix Out of Bound Write Compressed Names in rdata_copy(), reported by X41 D-Sec.
0
enum Type type() const { return STRING_ITEM; }
Safe
[]
mysql-server
f7316aa0c9a3909fc7498e7b95d5d3af044a7e21
2.0370610667071146e+38
1
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL)) Backport of Bug#19143243 fix. NAME_CONST item can return NULL_ITEM type in case of incorrect arguments. NULL_ITEM has special processing in Item_func_in function. In Item_func_in::fix_length_and_dec an array of possible comparators is created. Since NAME_CONST function has NULL_ITEM type, corresponding array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE. ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(), so the NULL_ITEM is attempted compared with an empty comparator. The fix is to disable the caching of Item_name_const item.
0
static MagickBooleanType WriteMAPImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { MagickBooleanType status; register const Quantum *p; register ssize_t i, x; register unsigned char *q; size_t depth, packet_size; ssize_t y; unsigned char *colormap, *pixels; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Allocate colormap. */ if (IsPaletteImage(image) == MagickFalse) (void) SetImageType(image,PaletteType,exception); depth=GetImageQuantumDepth(image,MagickTrue); packet_size=(size_t) (depth/8); pixels=(unsigned char *) AcquireQuantumMemory(image->columns,packet_size* sizeof(*pixels)); packet_size=(size_t) (image->colors > 256 ? 6UL : 3UL); colormap=(unsigned char *) AcquireQuantumMemory(image->colors,packet_size* sizeof(*colormap)); if ((pixels == (unsigned char *) NULL) || (colormap == (unsigned char *) NULL)) { if (colormap != (unsigned char *) NULL) colormap=(unsigned char *) RelinquishMagickMemory(colormap); if (pixels != (unsigned char *) NULL) pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } /* Write colormap to file. */ q=colormap; if (image->colors <= 256) for (i=0; i < (ssize_t) image->colors; i++) { *q++=(unsigned char) ScaleQuantumToChar(image->colormap[i].red); *q++=(unsigned char) ScaleQuantumToChar(image->colormap[i].green); *q++=(unsigned char) ScaleQuantumToChar(image->colormap[i].blue); } else for (i=0; i < (ssize_t) image->colors; i++) { *q++=(unsigned char) (ScaleQuantumToShort(image->colormap[i].red) >> 8); *q++=(unsigned char) (ScaleQuantumToShort(image->colormap[i].red) & 0xff); *q++=(unsigned char) (ScaleQuantumToShort(image->colormap[i].green) >> 8); *q++=(unsigned char) (ScaleQuantumToShort(image->colormap[i].green) & 0xff); *q++=(unsigned char) (ScaleQuantumToShort(image->colormap[i].blue) >> 8); *q++=(unsigned char) (ScaleQuantumToShort(image->colormap[i].blue) & 0xff); } (void) WriteBlob(image,packet_size*image->colors,colormap); colormap=(unsigned char *) RelinquishMagickMemory(colormap); /* Write image pixels to file. */ for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; q=pixels; for (x=0; x < (ssize_t) image->columns; x++) { if (image->colors > 256) *q++=(unsigned char) ((size_t) GetPixelIndex(image,p) >> 8); *q++=(unsigned char) GetPixelIndex(image,p); p+=GetPixelChannels(image); } (void) WriteBlob(image,(size_t) (q-pixels),pixels); } pixels=(unsigned char *) RelinquishMagickMemory(pixels); (void) CloseBlob(image); return(status); }
Safe
[ "CWE-772", "CWE-401" ]
ImageMagick
1dc0ac5016f1c4d50b100a086526d6a2453a5444
3.217568656451037e+38
110
https://github.com/ImageMagick/ImageMagick/issues/573
0
static UChar32 escLeftBracket3Routine(UChar32 c) { c = readUnicodeCharacter(); if (c == 0) return 0; return doDispatch(c, escLeftBracket3Dispatch); }
Safe
[ "CWE-200" ]
mongo
035cf2afc04988b22cb67f4ebfd77e9b344cb6e0
2.526357836447666e+38
6
SERVER-25335 avoid group and other permissions when creating .dbshell history file
0
static void hrtick_start(struct rq *rq, u64 delay) { struct hrtimer *timer = &rq->hrtick_timer; ktime_t time = ktime_add_ns(timer->base->get_time(), delay); hrtimer_set_expires(timer, time); if (rq == this_rq()) { hrtimer_restart(timer); } else if (!rq->hrtick_csd_pending) { __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); rq->hrtick_csd_pending = 1; } }
Safe
[ "CWE-703", "CWE-835" ]
linux
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
2.71514839880922e+38
14
Sched: fix skip_clock_update optimization idle_balance() drops/retakes rq->lock, leaving the previous task vulnerable to set_tsk_need_resched(). Clear it after we return from balancing instead, and in setup_thread_stack() as well, so no successfully descheduled or never scheduled task has it set. Need resched confused the skip_clock_update logic, which assumes that the next call to update_rq_clock() will come nearly immediately after being set. Make the optimization robust against the waking a sleeper before it sucessfully deschedules case by checking that the current task has not been dequeued before setting the flag, since it is that useless clock update we're trying to save, and clear unconditionally in schedule() proper instead of conditionally in put_prev_task(). Signed-off-by: Mike Galbraith <efault@gmx.de> Reported-by: Bjoern B. Brandenburg <bbb.lst@gmail.com> Tested-by: Yong Zhang <yong.zhang0@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: stable@kernel.org LKML-Reference: <1291802742.1417.9.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
0
static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) { struct select_data *data = _data; enum d_walk_ret ret = D_WALK_CONTINUE; if (data->start == dentry) goto out; if (dentry->d_flags & DCACHE_SHRINK_LIST) { data->found++; } else { if (dentry->d_flags & DCACHE_LRU_LIST) d_lru_del(dentry); if (!dentry->d_lockref.count) { d_shrink_add(dentry, &data->dispose); data->found++; } } /* * We can return to the caller if we have found some (this * ensures forward progress). We'll be coming back to find * the rest. */ if (!list_empty(&data->dispose)) ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; out: return ret; }
Safe
[ "CWE-362", "CWE-399" ]
linux
49d31c2f389acfe83417083e1208422b4091cd9e
1.572853053730812e+38
28
dentry name snapshots take_dentry_name_snapshot() takes a safe snapshot of dentry name; if the name is a short one, it gets copied into caller-supplied structure, otherwise an extra reference to external name is grabbed (those are never modified). In either case the pointer to stable string is stored into the same structure. dentry must be held by the caller of take_dentry_name_snapshot(), but may be freely dropped afterwards - the snapshot will stay until destroyed by release_dentry_name_snapshot(). Intended use: struct name_snapshot s; take_dentry_name_snapshot(&s, dentry); ... access s.name ... release_dentry_name_snapshot(&s); Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name to pass down with event. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
0
static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { *valp = msecs_to_jiffies(*negp ? -*lvalp : *lvalp); } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = (unsigned long)-val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = jiffies_to_msecs(lval); } return 0; }
Safe
[ "CWE-284", "CWE-264" ]
linux
bfdc0b497faa82a0ba2f9dddcf109231dd519fcc
2.7168128105385195e+38
20
sysctl: restrict write access to dmesg_restrict When dmesg_restrict is set to 1 CAP_SYS_ADMIN is needed to read the kernel ring buffer. But a root user without CAP_SYS_ADMIN is able to reset dmesg_restrict to 0. This is an issue when e.g. LXC (Linux Containers) are used and complete user space is running without CAP_SYS_ADMIN. A unprivileged and jailed root user can bypass the dmesg_restrict protection. With this patch writing to dmesg_restrict is only allowed when root has CAP_SYS_ADMIN. Signed-off-by: Richard Weinberger <richard@nod.at> Acked-by: Dan Rosenberg <drosenberg@vsecurity.com> Acked-by: Serge E. Hallyn <serge@hallyn.com> Cc: Eric Paris <eparis@redhat.com> Cc: Kees Cook <kees.cook@canonical.com> Cc: James Morris <jmorris@namei.org> Cc: Eugene Teo <eugeneteo@kernel.org> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0
qf_cmdtitle(char_u *cmd) { static char_u qftitle_str[IOSIZE]; vim_snprintf((char *)qftitle_str, IOSIZE, ":%s", (char *)cmd); return qftitle_str; }
Safe
[ "CWE-416" ]
vim
4f1b083be43f351bc107541e7b0c9655a5d2c0bb
2.6533460864800875e+38
7
patch 9.0.0322: crash when no errors and 'quickfixtextfunc' is set Problem: Crash when no errors and 'quickfixtextfunc' is set. Solution: Do not handle errors if there aren't any.
0
service_info *FindServiceEventURLPath( service_table *table, const char *eventURLPath) { service_info *finger = NULL; uri_type parsed_url; uri_type parsed_url_in; if (table && parse_uri(eventURLPath, strlen(eventURLPath), &parsed_url_in) == HTTP_SUCCESS) { finger = table->serviceList; while (finger) { if (finger->eventURL) { if (parse_uri(finger->eventURL, strlen(finger->eventURL), &parsed_url) == HTTP_SUCCESS) { if (!token_cmp(&parsed_url.pathquery, &parsed_url_in.pathquery)) { return finger; } } } finger = finger->next; } } return NULL; }
Vulnerable
[ "CWE-476" ]
pupnp
c805c1de1141cb22f74c0d94dd5664bda37398e0
4.814025884342885e+37
28
Fixes #177: NULL pointer dereference in FindServiceControlURLPath Also fixes its dual bug in FindServiceEventURLPath.
1
static inline int cpupid_to_cpu(int cpupid) { return -1; }
Safe
[ "CWE-119" ]
linux
1be7107fbe18eed3e319a6c3e83c78254b693acb
1.2583372749373155e+38
4
mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov <oleg@redhat.com> Original-patch-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Michal Hocko <mhocko@suse.com> Tested-by: Helge Deller <deller@gmx.de> # parisc Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
0