func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
void ReallyReportBadUDPPacket( const char *pszFrom, const char *pszMsgType, const char *pszFmt, ... )
{
char buf[ 2048 ];
va_list ap;
va_start( ap, pszFmt );
V_vsprintf_safe( buf, pszFmt, ap );
va_end( ap );
V_StripTrailingWhitespaceASCII( buf );
if ( !pszMsgType || !pszMsgType[0] )
pszMsgType = "message";
SpewMsg( "[%s] Ignored bad %s. %s\n", pszMsgType, pszFrom, buf );
}
|
Safe
|
[
"CWE-703"
] |
GameNetworkingSockets
|
d944a10808891d202bb1d5e1998de6e0423af678
|
2.1437917093935218e+38
| 14 |
Tweak pointer math to avoid possible integer overflow
| 0 |
amstar_backup(
application_argument_t *argument)
{
int dumpin;
char *cmd = NULL;
char *qdisk;
char line[32768];
amregex_t *rp;
off_t dump_size = -1;
char *type;
char startchr;
GPtrArray *argv_ptr;
int starpid;
int dataf = 1;
int mesgf = 3;
int indexf = 4;
int outf;
FILE *mesgstream;
FILE *indexstream = NULL;
FILE *outstream;
int level;
regex_t regex_root;
regex_t regex_dir;
regex_t regex_file;
regex_t regex_special;
regex_t regex_symbolic;
regex_t regex_hard;
char *option;
mesgstream = fdopen(mesgf, "w");
if (!mesgstream) {
error(_("error mesgstream(%d): %s\n"), mesgf, strerror(errno));
}
if (!argument->level) {
fprintf(mesgstream, "? No level argument\n");
error(_("No level argument"));
}
if (!argument->dle.disk) {
fprintf(mesgstream, "? No disk argument\n");
error(_("No disk argument"));
}
if (!argument->dle.device) {
fprintf(mesgstream, "? No device argument\n");
error(_("No device argument"));
}
if (!check_exec_for_suid(star_path, FALSE)) {
fprintf(mesgstream, "? '%s' binary is not secure", star_path);
error("'%s' binary is not secure", star_path);
}
if ((option = validate_command_options(argument))) {
fprintf(stdout, "? Invalid '%s' COMMAND-OPTIONS\n", option);
error("Invalid '%s' COMMAND-OPTIONS\n", option);
}
if (argument->dle.include_list &&
argument->dle.include_list->nb_element >= 0) {
fprintf(mesgstream, "? include-list not supported for backup\n");
}
level = GPOINTER_TO_INT(argument->level->data);
qdisk = quote_string(argument->dle.disk);
argv_ptr = amstar_build_argv(argument, level, CMD_BACKUP, mesgstream);
cmd = g_strdup(star_path);
starpid = pipespawnv(cmd, STDIN_PIPE|STDERR_PIPE, 1,
&dumpin, &dataf, &outf, (char **)argv_ptr->pdata);
g_ptr_array_free_full(argv_ptr);
/* close the write ends of the pipes */
aclose(dumpin);
aclose(dataf);
if (argument->dle.create_index) {
indexstream = fdopen(indexf, "w");
if (!indexstream) {
error(_("error indexstream(%d): %s\n"), indexf, strerror(errno));
}
}
outstream = fdopen(outf, "r");
if (!outstream) {
error(_("error outstream(%d): %s\n"), outf, strerror(errno));
}
regcomp(®ex_root, "^a \\.\\/ directory$", REG_EXTENDED|REG_NEWLINE);
regcomp(®ex_dir, "^a (.*) directory$", REG_EXTENDED|REG_NEWLINE);
regcomp(®ex_file, "^a (.*) (.*) bytes", REG_EXTENDED|REG_NEWLINE);
regcomp(®ex_special, "^a (.*) special", REG_EXTENDED|REG_NEWLINE);
regcomp(®ex_symbolic, "^a (.*) symbolic", REG_EXTENDED|REG_NEWLINE);
regcomp(®ex_hard, "^a (.*) link to", REG_EXTENDED|REG_NEWLINE);
while ((fgets(line, sizeof(line), outstream)) != NULL) {
regmatch_t regmatch[3];
if (strlen(line) > 0 && line[strlen(line)-1] == '\n') {
/* remove trailling \n */
line[strlen(line)-1] = '\0';
}
if (regexec(®ex_root, line, 1, regmatch, 0) == 0) {
if (argument->dle.create_index)
fprintf(indexstream, "%s\n", "/");
continue;
}
if (regexec(®ex_dir, line, 3, regmatch, 0) == 0) {
if (argument->dle.create_index && regmatch[1].rm_so == 2) {
line[regmatch[1].rm_eo]='\0';
fprintf(indexstream, "/%s\n", &line[regmatch[1].rm_so]);
}
continue;
}
if (regexec(®ex_file, line, 3, regmatch, 0) == 0 ||
regexec(®ex_special, line, 3, regmatch, 0) == 0 ||
regexec(®ex_symbolic, line, 3, regmatch, 0) == 0 ||
regexec(®ex_hard, line, 3, regmatch, 0) == 0) {
if (argument->dle.create_index && regmatch[1].rm_so == 2) {
line[regmatch[1].rm_eo]='\0';
fprintf(indexstream, "/%s\n", &line[regmatch[1].rm_so]);
}
continue;
}
for (rp = re_table; rp->regex != NULL; rp++) {
if (match(rp->regex, line)) {
break;
}
}
if (rp->typ == DMP_SIZE) {
dump_size = (off_t)((the_num(line, rp->field)* rp->scale+1023.0)/1024.0);
}
switch (rp->typ) {
case DMP_IGNORE:
continue;
case DMP_NORMAL:
type = "normal";
startchr = '|';
break;
case DMP_STRANGE:
type = "strange";
startchr = '?';
break;
case DMP_SIZE:
type = "size";
startchr = '|';
break;
case DMP_ERROR:
type = "error";
startchr = '?';
break;
default:
type = "unknown";
startchr = '!';
break;
}
dbprintf("%3d: %7s(%c): %s\n", rp->srcline, type, startchr, line);
fprintf(mesgstream,"%c %s\n", startchr, line);
}
fclose(outstream);
regfree(®ex_root);
regfree(®ex_dir);
regfree(®ex_file);
regfree(®ex_special);
regfree(®ex_symbolic);
regfree(®ex_hard);
dbprintf(_("gnutar: %s: pid %ld\n"), cmd, (long)starpid);
dbprintf("sendbackup: size %lld\n", (long long)dump_size);
fprintf(mesgstream, "sendbackup: size %lld\n", (long long)dump_size);
fclose(mesgstream);
if (argument->dle.create_index)
fclose(indexstream);
amfree(qdisk);
amfree(cmd);
}
|
Safe
|
[
"CWE-77"
] |
amanda
|
29bae2e271093cd8d06ea98f73a474c685c5a314
|
5.946154305917887e+36
| 184 |
* application-src/ambsdtar.c, application-src/amgtar.c,
application-src/amstar.c: Filter option from COMMAND-OPTIONS
* common-src/ammessage.c: Add message.
git-svn-id: https://svn.code.sf.net/p/amanda/code/amanda/trunk@6483 a8d146d6-cc15-0410-8900-af154a0219e0
| 0 |
static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
pt_element_t __user *ptep_user, unsigned index,
pt_element_t orig_pte, pt_element_t new_pte)
{
signed char r;
if (!user_access_begin(ptep_user, sizeof(pt_element_t)))
return -EFAULT;
#ifdef CMPXCHG
asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n"
"setnz %b[r]\n"
"2:"
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
: [ptr] "+m" (*ptep_user),
[old] "+a" (orig_pte),
[r] "=q" (r)
: [new] "r" (new_pte)
: "memory");
#else
asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n"
"setnz %b[r]\n"
"2:"
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
: [ptr] "+m" (*ptep_user),
[old] "+A" (orig_pte),
[r] "=q" (r)
: [new_lo] "b" ((u32)new_pte),
[new_hi] "c" ((u32)(new_pte >> 32))
: "memory");
#endif
user_access_end();
return r;
}
|
Vulnerable
|
[
"CWE-416"
] |
linux
|
f122dfe4476890d60b8c679128cd2259ec96a24c
|
8.499549290636453e+37
| 35 |
KVM: x86: Use __try_cmpxchg_user() to update guest PTE A/D bits
Use the recently introduced __try_cmpxchg_user() to update guest PTE A/D
bits instead of mapping the PTE into kernel address space. The VM_PFNMAP
path is broken as it assumes that vm_pgoff is the base pfn of the mapped
VMA range, which is conceptually wrong as vm_pgoff is the offset relative
to the file and has nothing to do with the pfn. The horrific hack worked
for the original use case (backing guest memory with /dev/mem), but leads
to accessing "random" pfns for pretty much any other VM_PFNMAP case.
Fixes: bd53cb35a3e9 ("X86/KVM: Handle PFNs outside of kernel reach when touching GPTEs")
Debugged-by: Tadeusz Struk <tadeusz.struk@linaro.org>
Tested-by: Tadeusz Struk <tadeusz.struk@linaro.org>
Reported-by: syzbot+6cde2282daa792c49ab8@syzkaller.appspotmail.com
Cc: stable@vger.kernel.org
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220202004945.2540433-4-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 1 |
int report_expand_prop(struct transaction_t *txn __attribute__((unused)),
struct meth_params *rparams __attribute__((unused)),
xmlNodePtr inroot, struct propfind_ctx *fctx)
{
int ret = expand_property(inroot, fctx, NULL, NULL, NULL,
fctx->lprops, fctx->root, fctx->depth);
return (ret ? ret : HTTP_MULTI_STATUS);
}
|
Safe
|
[] |
cyrus-imapd
|
6703ff881b6056e0c045a7b795ce8ba1bbb87027
|
8.687365131472915e+37
| 9 |
http_dav.c: add 'private' Cache-Control directive for cacheable responses that require authentication
| 0 |
EXPORTED modseq_t mailbox_modseq_dirty(struct mailbox *mailbox)
{
assert(mailbox_index_islocked(mailbox, 1));
if (mailbox->silentchanges) {
mailbox->modseq_dirty = 1;
mailbox_index_dirty(mailbox);
return mailbox->i.highestmodseq;
}
if (!mailbox->modseq_dirty) {
mailbox->i.highestmodseq = mboxname_setmodseq(mailbox_name(mailbox),
mailbox->i.highestmodseq,
mailbox_mbtype(mailbox), /*flags*/0);
mailbox->last_updated = time(0);
mailbox->modseq_dirty = 1;
mailbox_index_dirty(mailbox);
}
mailbox->i.highestmodseq++;
return mailbox->i.highestmodseq;
}
|
Safe
|
[] |
cyrus-imapd
|
1d6d15ee74e11a9bd745e80be69869e5fb8d64d6
|
4.716764339537755e+37
| 23 |
mailbox.c/reconstruct.c: Add mailbox_mbentry_from_path()
| 0 |
static int add_session(SSL *ssl, SSL_SESSION *session)
{
simple_ssl_session *sess;
unsigned char *p;
sess = OPENSSL_malloc(sizeof(simple_ssl_session));
SSL_SESSION_get_id(session, &sess->idlen);
sess->derlen = i2d_SSL_SESSION(session, NULL);
sess->id = BUF_memdup(SSL_SESSION_get_id(session, NULL), sess->idlen);
sess->der = OPENSSL_malloc(sess->derlen);
p = sess->der;
i2d_SSL_SESSION(session, &p);
sess->next = first;
first = sess;
BIO_printf(bio_err, "New session added to external cache\n");
return 0;
}
|
Safe
|
[] |
openssl
|
a70da5b3ecc3160368529677006801c58cb369db
|
1.6105101116340645e+38
| 21 |
New functions to check a hostname email or IP address against a
certificate. Add options to s_client, s_server and x509 utilities
to print results of checks.
| 0 |
void strbuf_git_path(struct strbuf *sb, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
do_git_path(NULL, sb, fmt, args);
va_end(args);
}
|
Safe
|
[
"CWE-125"
] |
git
|
11a9f4d807a0d71dc6eff51bb87baf4ca2cccf1d
|
1.786743402317151e+38
| 7 |
is_ntfs_dotgit: use a size_t for traversing string
We walk through the "name" string using an int, which can
wrap to a negative value and cause us to read random memory
before our array (e.g., by creating a tree with a name >2GB,
since "int" is still 32 bits even on most 64-bit platforms).
Worse, this is easy to trigger during the fsck_tree() check,
which is supposed to be protecting us from malicious
garbage.
Note one bit of trickiness in the existing code: we
sometimes assign -1 to "len" at the end of the loop, and
then rely on the "len++" in the for-loop's increment to take
it back to 0. This is still legal with a size_t, since
assigning -1 will turn into SIZE_MAX, which then wraps
around to 0 on increment.
Signed-off-by: Jeff King <peff@peff.net>
| 0 |
static int sctp_getsockopt_pf_expose(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EFAULT;
if (len < sizeof(params)) {
retval = -EINVAL;
goto out;
}
len = sizeof(params);
if (copy_from_user(¶ms, optval, len))
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
sctp_style(sk, UDP)) {
retval = -EINVAL;
goto out;
}
params.assoc_value = asoc ? asoc->pf_expose
: sctp_sk(sk)->pf_expose;
if (put_user(len, optlen))
goto out;
if (copy_to_user(optval, ¶ms, len))
goto out;
retval = 0;
out:
return retval;
}
|
Safe
|
[
"CWE-362"
] |
linux
|
b166a20b07382b8bc1dcee2a448715c9c2c81b5b
|
7.346915894638127e+37
| 38 |
net/sctp: fix race condition in sctp_destroy_sock
If sctp_destroy_sock is called without sock_net(sk)->sctp.addr_wq_lock
held and sp->do_auto_asconf is true, then an element is removed
from the auto_asconf_splist without any proper locking.
This can happen in the following functions:
1. In sctp_accept, if sctp_sock_migrate fails.
2. In inet_create or inet6_create, if there is a bpf program
attached to BPF_CGROUP_INET_SOCK_CREATE which denies
creation of the sctp socket.
The bug is fixed by acquiring addr_wq_lock in sctp_destroy_sock
instead of sctp_close.
This addresses CVE-2021-23133.
Reported-by: Or Cohen <orcohen@paloaltonetworks.com>
Reviewed-by: Xin Long <lucien.xin@gmail.com>
Fixes: 610236587600 ("bpf: Add new cgroup attach type to enable sock modifications")
Signed-off-by: Or Cohen <orcohen@paloaltonetworks.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static void iter_append_uuid(gpointer key, gpointer value, gpointer user_data)
{
DBusMessageIter *iter = user_data;
const char *uuid = key;
dbus_message_iter_append_basic(iter, DBUS_TYPE_STRING, &uuid);
}
|
Safe
|
[
"CWE-862",
"CWE-863"
] |
bluez
|
b497b5942a8beb8f89ca1c359c54ad67ec843055
|
1.466747322383149e+38
| 7 |
adapter: Fix storing discoverable setting
discoverable setting shall only be store when changed via Discoverable
property and not when discovery client set it as that be considered
temporary just for the lifetime of the discovery.
| 0 |
static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
gpa_t gpa;
int r;
if (vcpu->arch.mmu->direct_map)
return 0;
gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
return r;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
9f46c187e2e680ecd9de7983e4d081c3391acc76
|
2.0088741665327353e+38
| 14 |
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
With shadow paging enabled, the INVPCID instruction results in a call
to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
invlpg callback is not set and the result is a NULL pointer dereference.
Fix it trivially by checking for mmu->invlpg before every call.
There are other possibilities:
- check for CR0.PG, because KVM (like all Intel processors after P5)
flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
nop with paging disabled
- check for EFER.LMA, because KVM syncs and flushes when switching
MMU contexts outside of 64-bit mode
All of these are tricky, go for the simple solution. This is CVE-2022-1789.
Reported-by: Yongkang Jia <kangel@zju.edu.cn>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
switch (yych) {
case 'a': goto yy42;
default: goto yy41;
}
|
Vulnerable
|
[
"CWE-787"
] |
re2c
|
039c18949190c5de5397eba504d2c75dad2ea9ca
|
3.3651686716297354e+37
| 4 |
Emit an error when repetition lower bound exceeds upper bound.
Historically this was allowed and re2c swapped the bounds. However, it
most likely indicates an error in user code and there is only a single
occurrence in the tests (and the test in an artificial one), so although
the change is backwards incompatible there is low chance of breaking
real-world code.
This fixes second test case in the bug #394 "Stack overflow due to
recursion in src/dfa/dead_rules.cc" (the actual fix is to limit DFA size
but the test also has counted repetition with swapped bounds).
| 1 |
virDomainDiskSourceNetworkParse(xmlNodePtr node,
xmlXPathContextPtr ctxt,
virStorageSourcePtr src,
unsigned int flags)
{
int tlsCfgVal;
g_autofree char *protocol = NULL;
g_autofree char *haveTLS = NULL;
g_autofree char *tlsCfg = NULL;
g_autofree char *sslverifystr = NULL;
xmlNodePtr tmpnode;
if (!(protocol = virXMLPropString(node, "protocol"))) {
virReportError(VIR_ERR_XML_ERROR, "%s",
_("missing network source protocol type"));
return -1;
}
if ((src->protocol = virStorageNetProtocolTypeFromString(protocol)) <= 0) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("unknown protocol type '%s'"), protocol);
return -1;
}
if (!(src->path = virXMLPropString(node, "name")) &&
src->protocol != VIR_STORAGE_NET_PROTOCOL_NBD) {
virReportError(VIR_ERR_XML_ERROR, "%s",
_("missing name for disk source"));
return -1;
}
if ((haveTLS = virXMLPropString(node, "tls")) &&
(src->haveTLS = virTristateBoolTypeFromString(haveTLS)) <= 0) {
virReportError(VIR_ERR_XML_ERROR,
_("unknown disk source 'tls' setting '%s'"), haveTLS);
return -1;
}
if ((flags & VIR_DOMAIN_DEF_PARSE_STATUS) &&
(tlsCfg = virXMLPropString(node, "tlsFromConfig"))) {
if (virStrToLong_i(tlsCfg, NULL, 10, &tlsCfgVal) < 0) {
virReportError(VIR_ERR_XML_ERROR,
_("Invalid tlsFromConfig value: %s"),
tlsCfg);
return -1;
}
src->tlsFromConfig = !!tlsCfgVal;
}
/* for historical reasons we store the volume and image name in one XML
* element although it complicates thing when attempting to access them. */
if (src->path &&
(src->protocol == VIR_STORAGE_NET_PROTOCOL_GLUSTER ||
src->protocol == VIR_STORAGE_NET_PROTOCOL_RBD)) {
char *tmp;
if (!(tmp = strchr(src->path, '/')) ||
tmp == src->path) {
virReportError(VIR_ERR_XML_ERROR,
_("can't split path '%s' into pool name and image "
"name"), src->path);
return -1;
}
src->volume = src->path;
src->path = g_strdup(tmp + 1);
tmp[0] = '\0';
}
/* snapshot currently works only for remote disks */
src->snapshot = virXPathString("string(./snapshot/@name)", ctxt);
/* config file currently only works with remote disks */
src->configFile = virXPathString("string(./config/@file)", ctxt);
if (src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTP ||
src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS)
src->query = virXMLPropString(node, "query");
if (virDomainStorageNetworkParseHosts(node, &src->hosts, &src->nhosts) < 0)
return -1;
virStorageSourceNetworkAssignDefaultPorts(src);
virStorageSourceInitiatorParseXML(ctxt, &src->initiator);
if ((src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS ||
src->protocol == VIR_STORAGE_NET_PROTOCOL_FTPS) &&
(sslverifystr = virXPathString("string(./ssl/@verify)", ctxt))) {
int verify;
if ((verify = virTristateBoolTypeFromString(sslverifystr)) < 0) {
virReportError(VIR_ERR_XML_ERROR,
_("invalid ssl verify mode '%s'"), sslverifystr);
return -1;
}
src->sslverify = verify;
}
if ((src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTP ||
src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS) &&
(tmpnode = virXPathNode("./cookies", ctxt))) {
if (virDomainStorageNetCookiesParse(tmpnode, ctxt, src) < 0)
return -1;
}
if (src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTP ||
src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS ||
src->protocol == VIR_STORAGE_NET_PROTOCOL_FTP ||
src->protocol == VIR_STORAGE_NET_PROTOCOL_FTPS) {
if (virXPathULongLong("string(./readahead/@size)", ctxt, &src->readahead) == -2 ||
virXPathULongLong("string(./timeout/@seconds)", ctxt, &src->timeout) == -2) {
virReportError(VIR_ERR_XML_ERROR, "%s",
_("invalid readahead size or timeout"));
return -1;
}
}
return 0;
}
|
Safe
|
[
"CWE-212"
] |
libvirt
|
a5b064bf4b17a9884d7d361733737fb614ad8979
|
6.891688244744144e+37
| 122 |
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <hhan@redhat.com>
Signed-off-by: Peter Krempa <pkrempa@redhat.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
| 0 |
static inline unsigned int ScaleQuantumToMap(const Quantum quantum)
{
if (quantum >= (Quantum) MaxMap)
return((unsigned int) MaxMap);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
return((unsigned int) quantum);
#else
if ((IsNaN(quantum) != MagickFalse) || (quantum <= 0.0))
return(0U);
return((unsigned int) (quantum+0.5));
#endif
}
|
Safe
|
[
"CWE-190"
] |
ImageMagick
|
95d4e94e0353e503b71a53f5e6fad173c7c70c90
|
2.1592817133592173e+37
| 12 |
https://github.com/ImageMagick/ImageMagick/issues/1751
| 0 |
int fuse_fs_fsync(struct fuse_fs *fs, const char *path, int datasync,
struct fuse_file_info *fi)
{
fuse_get_context()->private_data = fs->user_data;
if (fs->op.fsync)
return fs->op.fsync(path, datasync, fi);
else
return -ENOSYS;
}
|
Safe
|
[] |
ntfs-3g
|
fb28eef6f1c26170566187c1ab7dc913a13ea43c
|
2.8180738528334173e+38
| 9 |
Hardened the checking of directory offset requested by a readdir
When asked for the next directory entries, make sure the chunk offset
is within valid values, otherwise return no more entries in chunk.
| 0 |
has_column_privilege_id_name(PG_FUNCTION_ARGS)
{
Oid tableoid = PG_GETARG_OID(0);
text *column = PG_GETARG_TEXT_P(1);
text *priv_type_text = PG_GETARG_TEXT_P(2);
Oid roleid;
AttrNumber colattnum;
AclMode mode;
int privresult;
roleid = GetUserId();
colattnum = convert_column_name(tableoid, column);
mode = convert_column_priv_string(priv_type_text);
privresult = column_privilege_check(tableoid, colattnum, roleid, mode);
if (privresult < 0)
PG_RETURN_NULL();
PG_RETURN_BOOL(privresult);
}
|
Safe
|
[
"CWE-264"
] |
postgres
|
fea164a72a7bfd50d77ba5fb418d357f8f2bb7d0
|
4.275395322479189e+37
| 19 |
Shore up ADMIN OPTION restrictions.
Granting a role without ADMIN OPTION is supposed to prevent the grantee
from adding or removing members from the granted role. Issuing SET ROLE
before the GRANT bypassed that, because the role itself had an implicit
right to add or remove members. Plug that hole by recognizing that
implicit right only when the session user matches the current role.
Additionally, do not recognize it during a security-restricted operation
or during execution of a SECURITY DEFINER function. The restriction on
SECURITY DEFINER is not security-critical. However, it seems best for a
user testing his own SECURITY DEFINER function to see the same behavior
others will see. Back-patch to 8.4 (all supported versions).
The SQL standards do not conflate roles and users as PostgreSQL does;
only SQL roles have members, and only SQL users initiate sessions. An
application using PostgreSQL users and roles as SQL users and roles will
never attempt to grant membership in the role that is the session user,
so the implicit right to add or remove members will never arise.
The security impact was mostly that a role member could revoke access
from others, contrary to the wishes of his own grantor. Unapproved role
member additions are less notable, because the member can still largely
achieve that by creating a view or a SECURITY DEFINER function.
Reviewed by Andres Freund and Tom Lane. Reported, independently, by
Jonas Sundman and Noah Misch.
Security: CVE-2014-0060
| 0 |
static Sdb* Pe_r_bin_store_string_table(StringTable* stringTable) {
char key[20];
char* encodedKey = NULL;
int i = 0;
Sdb* sdb = NULL;
if (!stringTable) {
return NULL;
}
sdb = sdb_new0 ();
if (!sdb) {
return NULL;
}
encodedKey = sdb_encode ((unsigned char*) stringTable->szKey, EIGHT_HEX_DIG_UTF_16_LEN);
if (!encodedKey) {
sdb_free (sdb);
return NULL;
}
sdb_set (sdb, "key", encodedKey, 0);
free (encodedKey);
for (; i < stringTable->numOfChildren; i++) {
snprintf (key, 20, "string%d", i);
sdb_ns_set (sdb, key, Pe_r_bin_store_string (stringTable->Children[i]));
}
return sdb;
}
|
Safe
|
[
"CWE-400",
"CWE-703"
] |
radare2
|
634b886e84a5c568d243e744becc6b3223e089cf
|
2.1511088704361862e+38
| 25 |
Fix DoS in PE/QNX/DYLDCACHE/PSX parsers ##crash
* Reported by lazymio
* Reproducer: AAA4AAAAAB4=
| 0 |
void tee_fprintf(FILE *file, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
#ifdef __WIN__
if (my_win_is_console_cached(file))
my_win_console_vfprintf(charset_info, fmt, args);
else
#endif
(void) vfprintf(file, fmt, args);
va_end(args);
if (opt_outfile)
{
va_start(args, fmt);
(void) vfprintf(OUTFILE, fmt, args);
va_end(args);
}
}
|
Safe
|
[
"CWE-319"
] |
mysql-server
|
0002e1380d5f8c113b6bce91f2cf3f75136fd7c7
|
3.245867903069387e+37
| 20 |
BUG#25575605: SETTING --SSL-MODE=REQUIRED SENDS CREDENTIALS BEFORE VERIFYING SSL CONNECTION
MYSQL_OPT_SSL_MODE option introduced.
It is set in case of --ssl-mode=REQUIRED and permits only SSL connection.
(cherry picked from commit f91b941842d240b8a62645e507f5554e8be76aec)
| 0 |
HiiNewImage (
IN CONST EFI_HII_IMAGE_PROTOCOL *This,
IN EFI_HII_HANDLE PackageList,
OUT EFI_IMAGE_ID *ImageId,
IN CONST EFI_IMAGE_INPUT *Image
)
{
HII_DATABASE_PRIVATE_DATA *Private;
HII_DATABASE_PACKAGE_LIST_INSTANCE *PackageListNode;
HII_IMAGE_PACKAGE_INSTANCE *ImagePackage;
EFI_HII_IMAGE_BLOCK *ImageBlocks;
UINT32 NewBlockSize;
if (This == NULL || ImageId == NULL || Image == NULL || Image->Bitmap == NULL) {
return EFI_INVALID_PARAMETER;
}
Private = HII_IMAGE_DATABASE_PRIVATE_DATA_FROM_THIS (This);
PackageListNode = LocatePackageList (&Private->DatabaseList, PackageList);
if (PackageListNode == NULL) {
return EFI_NOT_FOUND;
}
EfiAcquireLock (&mHiiDatabaseLock);
//
// Calcuate the size of new image.
// Make sure the size doesn't overflow UINT32.
// Note: 24Bit BMP occpuies 3 bytes per pixel.
//
NewBlockSize = (UINT32)Image->Width * Image->Height;
if (NewBlockSize > (MAX_UINT32 - (sizeof (EFI_HII_IIBT_IMAGE_24BIT_BLOCK) - sizeof (EFI_HII_RGB_PIXEL))) / 3) {
return EFI_OUT_OF_RESOURCES;
}
NewBlockSize = NewBlockSize * 3 + (sizeof (EFI_HII_IIBT_IMAGE_24BIT_BLOCK) - sizeof (EFI_HII_RGB_PIXEL));
//
// Get the image package in the package list,
// or create a new image package if image package does not exist.
//
if (PackageListNode->ImagePkg != NULL) {
ImagePackage = PackageListNode->ImagePkg;
//
// Output the image id of the incoming image being inserted, which is the
// image id of the EFI_HII_IIBT_END block of old image package.
//
*ImageId = 0;
GetImageIdOrAddress (ImagePackage->ImageBlock, ImageId);
//
// Update the package's image block by appending the new block to the end.
//
//
// Make sure the final package length doesn't overflow.
// Length of the package header is represented using 24 bits. So MAX length is MAX_UINT24.
//
if (NewBlockSize > MAX_UINT24 - ImagePackage->ImagePkgHdr.Header.Length) {
return EFI_OUT_OF_RESOURCES;
}
//
// Because ImagePackage->ImageBlockSize < ImagePackage->ImagePkgHdr.Header.Length,
// So (ImagePackage->ImageBlockSize + NewBlockSize) <= MAX_UINT24
//
ImageBlocks = AllocatePool (ImagePackage->ImageBlockSize + NewBlockSize);
if (ImageBlocks == NULL) {
EfiReleaseLock (&mHiiDatabaseLock);
return EFI_OUT_OF_RESOURCES;
}
//
// Copy the original content.
//
CopyMem (
ImageBlocks,
ImagePackage->ImageBlock,
ImagePackage->ImageBlockSize - sizeof (EFI_HII_IIBT_END_BLOCK)
);
FreePool (ImagePackage->ImageBlock);
ImagePackage->ImageBlock = ImageBlocks;
//
// Point to the very last block.
//
ImageBlocks = (EFI_HII_IMAGE_BLOCK *) (
(UINT8 *) ImageBlocks + ImagePackage->ImageBlockSize - sizeof (EFI_HII_IIBT_END_BLOCK)
);
//
// Update the length record.
//
ImagePackage->ImageBlockSize += NewBlockSize;
ImagePackage->ImagePkgHdr.Header.Length += NewBlockSize;
PackageListNode->PackageListHdr.PackageLength += NewBlockSize;
} else {
//
// Make sure the final package length doesn't overflow.
// Length of the package header is represented using 24 bits. So MAX length is MAX_UINT24.
//
if (NewBlockSize > MAX_UINT24 - (sizeof (EFI_HII_IMAGE_PACKAGE_HDR) + sizeof (EFI_HII_IIBT_END_BLOCK))) {
return EFI_OUT_OF_RESOURCES;
}
//
// The specified package list does not contain image package.
// Create one to add this image block.
//
ImagePackage = (HII_IMAGE_PACKAGE_INSTANCE *) AllocateZeroPool (sizeof (HII_IMAGE_PACKAGE_INSTANCE));
if (ImagePackage == NULL) {
EfiReleaseLock (&mHiiDatabaseLock);
return EFI_OUT_OF_RESOURCES;
}
//
// Output the image id of the incoming image being inserted, which is the
// first image block so that id is initially to one.
//
*ImageId = 1;
//
// Fill in image package header.
//
ImagePackage->ImagePkgHdr.Header.Length = sizeof (EFI_HII_IMAGE_PACKAGE_HDR) + NewBlockSize + sizeof (EFI_HII_IIBT_END_BLOCK);
ImagePackage->ImagePkgHdr.Header.Type = EFI_HII_PACKAGE_IMAGES;
ImagePackage->ImagePkgHdr.ImageInfoOffset = sizeof (EFI_HII_IMAGE_PACKAGE_HDR);
ImagePackage->ImagePkgHdr.PaletteInfoOffset = 0;
//
// Fill in palette info.
//
ImagePackage->PaletteBlock = NULL;
ImagePackage->PaletteInfoSize = 0;
//
// Fill in image blocks.
//
ImagePackage->ImageBlockSize = NewBlockSize + sizeof (EFI_HII_IIBT_END_BLOCK);
ImagePackage->ImageBlock = AllocateZeroPool (NewBlockSize + sizeof (EFI_HII_IIBT_END_BLOCK));
if (ImagePackage->ImageBlock == NULL) {
FreePool (ImagePackage);
EfiReleaseLock (&mHiiDatabaseLock);
return EFI_OUT_OF_RESOURCES;
}
ImageBlocks = ImagePackage->ImageBlock;
//
// Insert this image package.
//
PackageListNode->ImagePkg = ImagePackage;
PackageListNode->PackageListHdr.PackageLength += ImagePackage->ImagePkgHdr.Header.Length;
}
//
// Append the new block here
//
if (Image->Flags == EFI_IMAGE_TRANSPARENT) {
ImageBlocks->BlockType = EFI_HII_IIBT_IMAGE_24BIT_TRANS;
} else {
ImageBlocks->BlockType = EFI_HII_IIBT_IMAGE_24BIT;
}
WriteUnaligned16 ((VOID *) &((EFI_HII_IIBT_IMAGE_24BIT_BLOCK *) ImageBlocks)->Bitmap.Width, Image->Width);
WriteUnaligned16 ((VOID *) &((EFI_HII_IIBT_IMAGE_24BIT_BLOCK *) ImageBlocks)->Bitmap.Height, Image->Height);
CopyGopToRgbPixel (((EFI_HII_IIBT_IMAGE_24BIT_BLOCK *) ImageBlocks)->Bitmap.Bitmap, Image->Bitmap, (UINT32) Image->Width * Image->Height);
//
// Append the block end
//
ImageBlocks = (EFI_HII_IMAGE_BLOCK *) ((UINT8 *) ImageBlocks + NewBlockSize);
ImageBlocks->BlockType = EFI_HII_IIBT_END;
//
// Check whether need to get the contents of HiiDataBase.
// Only after ReadyToBoot to do the export.
//
if (gExportAfterReadyToBoot) {
HiiGetDatabaseInfo(&Private->HiiDatabase);
}
EfiReleaseLock (&mHiiDatabaseLock);
return EFI_SUCCESS;
}
|
Safe
|
[
"CWE-787"
] |
edk2
|
ffe5f7a6b4e978dffbe1df228963adc914451106
|
1.1919251445868561e+38
| 179 |
MdeModulePkg/HiiDatabase: Fix potential integer overflow (CVE-2018-12181)
REF: https://bugzilla.tianocore.org/show_bug.cgi?id=1135
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Ray Ni <ray.ni@intel.com>
Cc: Dandan Bi <dandan.bi@intel.com>
Cc: Hao A Wu <hao.a.wu@intel.com>
Reviewed-by: Hao Wu <hao.a.wu@intel.com>
Reviewed-by: Jian J Wang <jian.j.wang@intel.com>
| 0 |
irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
{
struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
struct kvm *kvm = resampler->kvm;
mutex_lock(&kvm->irqfds.resampler_lock);
list_del_rcu(&irqfd->resampler_link);
synchronize_srcu(&kvm->irq_srcu);
if (list_empty(&resampler->list)) {
list_del(&resampler->link);
kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
resampler->notifier.gsi, 0, false);
kfree(resampler);
}
mutex_unlock(&kvm->irqfds.resampler_lock);
}
|
Safe
|
[
"CWE-20",
"CWE-617"
] |
linux
|
36ae3c0a36b7456432fedce38ae2f7bd3e01a563
|
7.158719033005895e+37
| 20 |
KVM: Don't accept obviously wrong gsi values via KVM_IRQFD
We cannot add routes for gsi values >= KVM_MAX_IRQ_ROUTES -- see
kvm_set_irq_routing(). Hence, there is no sense in accepting them
via KVM_IRQFD. Prevent them from entering the system in the first
place.
Signed-off-by: Jan H. Schönherr <jschoenh@amazon.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
sldns_bskipcs(sldns_buffer *buffer, const char *s)
{
int found;
char c;
const char *d;
while(sldns_buffer_available_at(buffer, buffer->_position, sizeof(char))) {
c = (char) sldns_buffer_read_u8_at(buffer, buffer->_position);
found = 0;
for (d = s; *d; d++) {
if (*d == c) {
found = 1;
}
}
if (found && buffer->_limit > buffer->_position) {
buffer->_position += sizeof(char);
} else {
return;
}
}
}
|
Safe
|
[] |
unbound
|
05a5dc2d0d7d1c9054af48913079abebff06a5a1
|
2.9662569260936484e+38
| 21 |
- Fix out-of-bounds null-byte write in sldns_bget_token_par while
parsing type WKS, reported by Luis Merino from X41 D-Sec.
| 0 |
void Buffers::SetRawInput(input_buffer* ib)
{
rawInput_ = ib;
}
|
Safe
|
[
"CWE-254"
] |
mysql-server
|
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
|
9.917372848841445e+37
| 4 |
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
| 0 |
static Image *ReadDIBImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
DIBInfo
dib_info;
Image
*image;
MagickBooleanType
status;
MemoryInfo
*pixel_info;
Quantum
index;
register ssize_t
x;
register Quantum
*q;
register ssize_t
i;
register unsigned char
*p;
size_t
bytes_per_line,
length;
ssize_t
bit,
count,
y;
unsigned char
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Determine if this a DIB file.
*/
(void) memset(&dib_info,0,sizeof(dib_info));
dib_info.size=ReadBlobLSBLong(image);
if (dib_info.size != 40)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Microsoft Windows 3.X DIB image file.
*/
dib_info.width=ReadBlobLSBSignedLong(image);
dib_info.height=ReadBlobLSBSignedLong(image);
dib_info.planes=ReadBlobLSBShort(image);
dib_info.bits_per_pixel=ReadBlobLSBShort(image);
if (dib_info.bits_per_pixel > 32)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
dib_info.compression=ReadBlobLSBLong(image);
dib_info.image_size=ReadBlobLSBLong(image);
dib_info.x_pixels=ReadBlobLSBLong(image);
dib_info.y_pixels=ReadBlobLSBLong(image);
dib_info.number_colors=ReadBlobLSBLong(image);
dib_info.colors_important=ReadBlobLSBLong(image);
if ((dib_info.bits_per_pixel != 1) && (dib_info.bits_per_pixel != 4) &&
(dib_info.bits_per_pixel != 8) && (dib_info.bits_per_pixel != 16) &&
(dib_info.bits_per_pixel != 24) && (dib_info.bits_per_pixel != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((dib_info.compression == BI_BITFIELDS) &&
((dib_info.bits_per_pixel == 16) || (dib_info.bits_per_pixel == 32)))
{
dib_info.red_mask=(unsigned short) ReadBlobLSBLong(image);
dib_info.green_mask=(unsigned short) ReadBlobLSBLong(image);
dib_info.blue_mask=(unsigned short) ReadBlobLSBLong(image);
}
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
if (dib_info.width <= 0)
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
if (dib_info.height == 0)
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
if (dib_info.planes != 1)
ThrowReaderException(CorruptImageError,"StaticPlanesValueNotEqualToOne");
if ((dib_info.bits_per_pixel != 1) && (dib_info.bits_per_pixel != 4) &&
(dib_info.bits_per_pixel != 8) && (dib_info.bits_per_pixel != 16) &&
(dib_info.bits_per_pixel != 24) && (dib_info.bits_per_pixel != 32))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
if ((dib_info.bits_per_pixel < 16) &&
(dib_info.number_colors > (unsigned int) (1UL << dib_info.bits_per_pixel)))
ThrowReaderException(CorruptImageError,"UnrecognizedNumberOfColors");
if ((dib_info.compression == 1) && (dib_info.bits_per_pixel != 8))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
if ((dib_info.compression == 2) && (dib_info.bits_per_pixel != 4))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
if ((dib_info.compression == 3) && (dib_info.bits_per_pixel < 16))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
switch (dib_info.compression)
{
case BI_RGB:
case BI_RLE8:
case BI_RLE4:
case BI_BITFIELDS:
break;
case BI_JPEG:
ThrowReaderException(CoderError,"JPEGCompressNotSupported");
case BI_PNG:
ThrowReaderException(CoderError,"PNGCompressNotSupported");
default:
ThrowReaderException(CorruptImageError,"UnrecognizedImageCompression");
}
image->columns=(size_t) MagickAbsoluteValue((ssize_t) dib_info.width);
image->rows=(size_t) MagickAbsoluteValue((ssize_t) dib_info.height);
image->depth=8;
image->alpha_trait=dib_info.bits_per_pixel == 32 ? BlendPixelTrait :
UndefinedPixelTrait;
if ((dib_info.number_colors > 256) || (dib_info.colors_important > 256))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((dib_info.number_colors != 0) || (dib_info.bits_per_pixel < 16))
{
size_t
one;
image->storage_class=PseudoClass;
image->colors=dib_info.number_colors;
one=1;
if (image->colors == 0)
image->colors=one << dib_info.bits_per_pixel;
}
if (image_info->size)
{
RectangleInfo
geometry;
MagickStatusType
flags;
flags=ParseAbsoluteGeometry(image_info->size,&geometry);
if (flags & WidthValue)
if ((geometry.width != 0) && (geometry.width < image->columns))
image->columns=geometry.width;
if (flags & HeightValue)
if ((geometry.height != 0) && (geometry.height < image->rows))
image->rows=geometry.height;
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (image->storage_class == PseudoClass)
{
size_t
packet_size;
unsigned char
*dib_colormap;
/*
Read DIB raster colormap.
*/
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
length=(size_t) image->colors;
dib_colormap=(unsigned char *) AcquireQuantumMemory(length,
4*sizeof(*dib_colormap));
if (dib_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
packet_size=4;
count=ReadBlob(image,packet_size*image->colors,dib_colormap);
if (count != (ssize_t) (packet_size*image->colors))
{
dib_colormap=(unsigned char *) RelinquishMagickMemory(dib_colormap);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
p=dib_colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(*p++);
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(*p++);
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(*p++);
if (packet_size == 4)
p++;
}
dib_colormap=(unsigned char *) RelinquishMagickMemory(dib_colormap);
}
/*
Read image data.
*/
if (dib_info.compression == BI_RLE4)
dib_info.bits_per_pixel<<=1;
bytes_per_line=4*((image->columns*dib_info.bits_per_pixel+31)/32);
length=bytes_per_line*image->rows;
pixel_info=AcquireVirtualMemory((size_t) image->rows,MagickMax(
bytes_per_line,image->columns+256UL)*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
if ((dib_info.compression == BI_RGB) ||
(dib_info.compression == BI_BITFIELDS))
{
count=ReadBlob(image,length,pixels);
if (count != (ssize_t) (length))
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
}
else
{
/*
Convert run-length encoded raster pixels.
*/
status=DecodeImage(image,dib_info.compression ? MagickTrue : MagickFalse,
pixels,image->columns*image->rows);
if (status == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"UnableToRunlengthDecodeImage");
}
}
/*
Initialize image structure.
*/
image->units=PixelsPerCentimeterResolution;
image->resolution.x=(double) dib_info.x_pixels/100.0;
image->resolution.y=(double) dib_info.y_pixels/100.0;
/*
Convert DIB raster image to pixel packets.
*/
switch (dib_info.bits_per_pixel)
{
case 1:
{
/*
Convert bitmap scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
for (bit=0; bit < 8; bit++)
{
index=(Quantum) ((*p) & (0x80 >> bit) ? 0x01 : 0x00);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
}
p++;
}
if ((image->columns % 8) != 0)
{
for (bit=0; bit < (ssize_t) (image->columns % 8); bit++)
{
index=(Quantum) ((*p) & (0x80 >> bit) ? 0x01 : 0x00);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
}
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
image->rows-y-1,image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 4:
{
/*
Convert PseudoColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-1); x+=2)
{
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) (*p >> 4) &
0xf,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) *p & 0xf,
exception);
SetPixelIndex(image,index,q);
p++;
q+=GetPixelChannels(image);
}
if ((image->columns % 2) != 0)
{
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) (*p >> 4) &
0xf,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
image->rows-y-1,image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 8:
{
/*
Convert PseudoColor scanline.
*/
if ((dib_info.compression == BI_RLE8) ||
(dib_info.compression == BI_RLE4))
bytes_per_line=image->columns;
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) *p,exception);
SetPixelIndex(image,index,q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
image->rows-y-1,image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 16:
{
unsigned short
word;
/*
Convert PseudoColor scanline.
*/
image->storage_class=DirectClass;
if (dib_info.compression == BI_RLE8)
bytes_per_line=2*image->columns;
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
word=(*p++);
word|=(*p++ << 8);
if (dib_info.red_mask == 0)
{
SetPixelRed(image,ScaleCharToQuantum(ScaleColor5to8(
(unsigned char) ((word >> 10) & 0x1f))),q);
SetPixelGreen(image,ScaleCharToQuantum(ScaleColor5to8(
(unsigned char) ((word >> 5) & 0x1f))),q);
SetPixelBlue(image,ScaleCharToQuantum(ScaleColor5to8(
(unsigned char) (word & 0x1f))),q);
}
else
{
SetPixelRed(image,ScaleCharToQuantum(ScaleColor5to8(
(unsigned char) ((word >> 11) & 0x1f))),q);
SetPixelGreen(image,ScaleCharToQuantum(ScaleColor6to8(
(unsigned char) ((word >> 5) & 0x3f))),q);
SetPixelBlue(image,ScaleCharToQuantum(ScaleColor5to8(
(unsigned char) (word & 0x1f))),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
image->rows-y-1,image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 24:
case 32:
{
/*
Convert DirectColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
image->rows-y-1,image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
default:
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
pixel_info=RelinquishVirtualMemory(pixel_info);
if (strcmp(image_info->magick,"ICODIB") == 0)
{
int
c;
/*
Handle ICO mask.
*/
image->storage_class=DirectClass;
image->alpha_trait=BlendPixelTrait;
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
c=ReadBlobByte(image);
for (bit=0; bit < 8; bit++)
SetPixelAlpha(image,c & (0x80 >> bit) ? TransparentAlpha :
OpaqueAlpha,q+x*GetPixelChannels(image)+bit);
}
if ((image->columns % 8) != 0)
{
c=ReadBlobByte(image);
for (bit=0; bit < (ssize_t) (image->columns % 8); bit++)
SetPixelAlpha(image,c & (0x80 >> bit) ? TransparentAlpha :
OpaqueAlpha,q+x*GetPixelChannels(image)+bit);
}
if (image->columns % 32)
for (x=0; x < (ssize_t) ((32-(image->columns % 32))/8); x++)
c=ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
if (EOFBlob(image) != MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
if (dib_info.height < 0)
{
Image
*flipped_image;
/*
Correct image orientation.
*/
flipped_image=FlipImage(image,exception);
if (flipped_image != (Image *) NULL)
{
DuplicateBlob(flipped_image,image);
image=DestroyImage(image);
image=flipped_image;
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
|
Vulnerable
|
[
"CWE-787"
] |
ImageMagick
|
921f208c2ea3cc45847f380257f270ff424adfff
|
1.715865839406072e+38
| 524 |
https://github.com/ImageMagick/ImageMagick/issues/1178
| 1 |
static int su3000_identify_state(struct usb_device *udev,
struct dvb_usb_device_properties *props,
struct dvb_usb_device_description **desc,
int *cold)
{
info("%s", __func__);
*cold = 0;
return 0;
}
|
Safe
|
[
"CWE-476",
"CWE-119"
] |
linux
|
606142af57dad981b78707234cfbd15f9f7b7125
|
1.542909804718844e+38
| 10 |
[media] dw2102: don't do DMA on stack
On Kernel 4.9, WARNINGs about doing DMA on stack are hit at
the dw2102 driver: one in su3000_power_ctrl() and the other in tt_s2_4600_frontend_attach().
Both were due to the use of buffers on the stack as parameters to
dvb_usb_generic_rw() and the resulting attempt to do DMA with them.
The device was non-functional as a result.
So, switch this driver over to use a buffer within the device state
structure, as has been done with other DVB-USB drivers.
Tested with TechnoTrend TT-connect S2-4600.
[mchehab@osg.samsung.com: fixed a warning at su3000_i2c_transfer() that
state var were dereferenced before check 'd']
Signed-off-by: Jonathan McDowell <noodles@earth.li>
Cc: <stable@vger.kernel.org>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
| 0 |
static void update_numa_stats(struct numa_stats *ns, int nid)
{
int cpu;
memset(ns, 0, sizeof(*ns));
for_each_cpu(cpu, cpumask_of_node(nid)) {
struct rq *rq = cpu_rq(cpu);
ns->load += cpu_runnable_load(rq);
ns->compute_capacity += capacity_of(cpu);
}
}
|
Safe
|
[
"CWE-400",
"CWE-703"
] |
linux
|
de53fd7aedb100f03e5d2231cfce0e4993282425
|
9.182136622541287e+37
| 13 |
sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices
It has been observed, that highly-threaded, non-cpu-bound applications
running under cpu.cfs_quota_us constraints can hit a high percentage of
periods throttled while simultaneously not consuming the allocated
amount of quota. This use case is typical of user-interactive non-cpu
bound applications, such as those running in kubernetes or mesos when
run on multiple cpu cores.
This has been root caused to cpu-local run queue being allocated per cpu
bandwidth slices, and then not fully using that slice within the period.
At which point the slice and quota expires. This expiration of unused
slice results in applications not being able to utilize the quota for
which they are allocated.
The non-expiration of per-cpu slices was recently fixed by
'commit 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift
condition")'. Prior to that it appears that this had been broken since
at least 'commit 51f2176d74ac ("sched/fair: Fix unlocked reads of some
cfs_b->quota/period")' which was introduced in v3.16-rc1 in 2014. That
added the following conditional which resulted in slices never being
expired.
if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
/* extend local deadline, drift is bounded above by 2 ticks */
cfs_rq->runtime_expires += TICK_NSEC;
Because this was broken for nearly 5 years, and has recently been fixed
and is now being noticed by many users running kubernetes
(https://github.com/kubernetes/kubernetes/issues/67577) it is my opinion
that the mechanisms around expiring runtime should be removed
altogether.
This allows quota already allocated to per-cpu run-queues to live longer
than the period boundary. This allows threads on runqueues that do not
use much CPU to continue to use their remaining slice over a longer
period of time than cpu.cfs_period_us. However, this helps prevent the
above condition of hitting throttling while also not fully utilizing
your cpu quota.
This theoretically allows a machine to use slightly more than its
allotted quota in some periods. This overflow would be bounded by the
remaining quota left on each per-cpu runqueueu. This is typically no
more than min_cfs_rq_runtime=1ms per cpu. For CPU bound tasks this will
change nothing, as they should theoretically fully utilize all of their
quota in each period. For user-interactive tasks as described above this
provides a much better user/application experience as their cpu
utilization will more closely match the amount they requested when they
hit throttling. This means that cpu limits no longer strictly apply per
period for non-cpu bound applications, but that they are still accurate
over longer timeframes.
This greatly improves performance of high-thread-count, non-cpu bound
applications with low cfs_quota_us allocation on high-core-count
machines. In the case of an artificial testcase (10ms/100ms of quota on
80 CPU machine), this commit resulted in almost 30x performance
improvement, while still maintaining correct cpu quota restrictions.
That testcase is available at https://github.com/indeedeng/fibtest.
Fixes: 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift condition")
Signed-off-by: Dave Chiluk <chiluk+linux@indeed.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Phil Auld <pauld@redhat.com>
Reviewed-by: Ben Segall <bsegall@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: John Hammond <jhammond@indeed.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kyle Anderson <kwa@yelp.com>
Cc: Gabriel Munos <gmunoz@netflix.com>
Cc: Peter Oskolkov <posk@posk.io>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Brendan Gregg <bgregg@netflix.com>
Link: https://lkml.kernel.org/r/1563900266-19734-2-git-send-email-chiluk+linux@indeed.com
| 0 |
virtual GBool tilingPatternFill(GfxState * /*state*/, Object * /*str*/,
int /*paintType*/, Dict * /*resDict*/,
double * /*mat*/, double * /*bbox*/,
int /*x0*/, int /*y0*/, int /*x1*/, int /*y1*/,
double /*xStep*/, double /*yStep*/)
{ return gFalse; }
|
Vulnerable
|
[] |
poppler
|
abf167af8b15e5f3b510275ce619e6fdb42edd40
|
2.106760573902143e+38
| 6 |
Implement tiling/patterns in SplashOutputDev
Fixes bug 13518
| 1 |
void trgt_del(GF_Box *s)
{
GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
|
9.711763124032886e+37
| 6 |
prevent dref memleak on invalid input (#1183)
| 0 |
static int mp_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct sb_uart_state *state = tty->driver_data;
struct mp_port *info = (struct mp_port *)state->port;
int ret = -ENOIOCTLCMD;
switch (cmd) {
case TIOCSMULTIDROP:
/* set multi-drop mode enable or disable, and default operation mode is H/W mode */
if (info->port.type == PORT_16C105XA)
{
//arg &= ~0x6;
//state->port->mdmode = 0;
return set_multidrop_mode((struct sb_uart_port *)info, (unsigned int)arg);
}
ret = -ENOTSUPP;
break;
case GETDEEPFIFO:
ret = get_deep_fifo(state->port);
return ret;
case SETDEEPFIFO:
ret = set_deep_fifo(state->port,arg);
deep[state->port->line] = arg;
return ret;
case SETTTR:
if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
ret = sb1054_set_register(state->port,PAGE_4,SB105X_TTR,arg);
ttr[state->port->line] = arg;
}
return ret;
case SETRTR:
if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
ret = sb1054_set_register(state->port,PAGE_4,SB105X_RTR,arg);
rtr[state->port->line] = arg;
}
return ret;
case GETTTR:
if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
ret = sb1054_get_register(state->port,PAGE_4,SB105X_TTR);
}
return ret;
case GETRTR:
if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
ret = sb1054_get_register(state->port,PAGE_4,SB105X_RTR);
}
return ret;
case SETFCR:
if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
ret = sb1054_set_register(state->port,PAGE_1,SB105X_FCR,arg);
}
else{
serial_out(info,2,arg);
}
return ret;
case TIOCSMDADDR:
/* set multi-drop address */
if (info->port.type == PORT_16C105XA)
{
state->port->mdmode |= MDMODE_ADDR;
return set_multidrop_addr((struct sb_uart_port *)info, (unsigned int)arg);
}
ret = -ENOTSUPP;
break;
case TIOCGMDADDR:
/* set multi-drop address */
if ((info->port.type == PORT_16C105XA) && (state->port->mdmode & MDMODE_ADDR))
{
return get_multidrop_addr((struct sb_uart_port *)info);
}
ret = -ENOTSUPP;
break;
case TIOCSENDADDR:
/* send address in multi-drop mode */
if ((info->port.type == PORT_16C105XA)
&& (state->port->mdmode & (MDMODE_ENABLE)))
{
if (mp_chars_in_buffer(tty) > 0)
{
tty_wait_until_sent(tty, 0);
}
//while ((serial_in(info, UART_LSR) & 0x60) != 0x60);
//while (sb1054_get_register(state->port, PAGE_2, SB105X_TFCR) != 0);
while ((serial_in(info, UART_LSR) & 0x60) != 0x60);
serial_out(info, UART_SCR, (int)arg);
}
break;
case TIOCGSERIAL:
ret = mp_get_info(state, (struct serial_struct *)arg);
break;
case TIOCSSERIAL:
ret = mp_set_info(state, (struct serial_struct *)arg);
break;
case TIOCSERCONFIG:
ret = mp_do_autoconfig(state);
break;
case TIOCSERGWILD: /* obsolete */
case TIOCSERSWILD: /* obsolete */
ret = 0;
break;
/* for Multiport */
case TIOCGNUMOFPORT: /* Get number of ports */
return NR_PORTS;
case TIOCGGETDEVID:
return mp_devs[arg].device_id;
case TIOCGGETREV:
return mp_devs[arg].revision;
case TIOCGGETNRPORTS:
return mp_devs[arg].nr_ports;
case TIOCGGETBDNO:
return NR_BOARD;
case TIOCGGETINTERFACE:
if (mp_devs[arg].revision == 0xc0)
{
/* for SB16C1053APCI */
return (sb1053a_get_interface(info, info->port.line));
}
else
{
return (inb(mp_devs[arg].option_reg_addr+MP_OPTR_IIR0+(state->port->line/8)));
}
case TIOCGGETPORTTYPE:
ret = get_device_type(arg);;
return ret;
case TIOCSMULTIECHO: /* set to multi-drop mode(RS422) or echo mode(RS485)*/
outb( ( inb(info->interface_config_addr) & ~0x03 ) | 0x01 ,
info->interface_config_addr);
return 0;
case TIOCSPTPNOECHO: /* set to multi-drop mode(RS422) or echo mode(RS485) */
outb( ( inb(info->interface_config_addr) & ~0x03 ) ,
info->interface_config_addr);
return 0;
}
if (ret != -ENOIOCTLCMD)
goto out;
if (tty->flags & (1 << TTY_IO_ERROR)) {
ret = -EIO;
goto out;
}
switch (cmd) {
case TIOCMIWAIT:
ret = mp_wait_modem_status(state, arg);
break;
case TIOCGICOUNT:
ret = mp_get_count(state, (struct serial_icounter_struct *)arg);
break;
}
if (ret != -ENOIOCTLCMD)
goto out;
MP_STATE_LOCK(state);
switch (cmd) {
case TIOCSERGETLSR: /* Get line status register */
ret = mp_get_lsr_info(state, (unsigned int *)arg);
break;
default: {
struct sb_uart_port *port = state->port;
if (port->ops->ioctl)
ret = port->ops->ioctl(port, cmd, arg);
break;
}
}
MP_STATE_UNLOCK(state);
out:
return ret;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
a8b33654b1e3b0c74d4a1fed041c9aae50b3c427
|
1.0467961756155152e+38
| 181 |
Staging: sb105x: info leak in mp_get_count()
The icount.reserved[] array isn't initialized so it leaks stack
information to userspace.
Reported-by: Nico Golde <nico@ngolde.de>
Reported-by: Fabian Yamaguchi <fabs@goesec.de>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
png_write_chunk_end(png_structrp png_ptr)
{
png_byte buf[4];
if (png_ptr == NULL) return;
#ifdef PNG_IO_STATE_SUPPORTED
/* Inform the I/O callback that the chunk CRC is being written.
* PNG_IO_CHUNK_CRC requires a single I/O function call.
*/
png_ptr->io_state = PNG_IO_WRITING | PNG_IO_CHUNK_CRC;
#endif
/* Write the crc in a single operation */
png_save_uint_32(buf, png_ptr->crc);
png_write_data(png_ptr, buf, (png_size_t)4);
}
|
Safe
|
[
"CWE-120"
] |
libpng
|
81f44665cce4cb1373f049a76f3904e981b7a766
|
1.0936028011943389e+38
| 18 |
[libpng16] Reject attempt to write over-length PLTE chunk
| 0 |
int dsdb_werror_at(struct ldb_context *ldb, int ldb_ecode, WERROR werr,
const char *location, const char *func,
const char *reason)
{
if (reason == NULL) {
reason = win_errstr(werr);
}
ldb_asprintf_errstring(ldb, "%08X: %s at %s:%s",
W_ERROR_V(werr), reason, location, func);
return ldb_ecode;
}
|
Safe
|
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
|
1.595147470349866e+38
| 11 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz>
| 0 |
setDoubleArrayOneValue(double** vpp, double value, size_t nmemb)
{
if (*vpp)
_TIFFfree(*vpp);
*vpp = _TIFFmalloc(nmemb*sizeof(double));
if (*vpp)
{
while (nmemb--)
((double*)*vpp)[nmemb] = value;
}
}
|
Safe
|
[
"CWE-20"
] |
libtiff
|
3144e57770c1e4d26520d8abee750f8ac8b75490
|
7.371968510158028e+37
| 11 |
* libtiff/tif_dir.c, tif_dirread.c, tif_dirwrite.c: implement various clampings
of double to other data types to avoid undefined behaviour if the output range
isn't big enough to hold the input value.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2643
http://bugzilla.maptools.org/show_bug.cgi?id=2642
http://bugzilla.maptools.org/show_bug.cgi?id=2646
http://bugzilla.maptools.org/show_bug.cgi?id=2647
| 0 |
isis_print_extd_ip_reach(netdissect_options *ndo,
const uint8_t *tptr, const char *ident, uint16_t afi)
{
char ident_buffer[20];
uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */
u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen;
if (!ND_TTEST2(*tptr, 4))
return (0);
metric = EXTRACT_32BITS(tptr);
processed=4;
tptr+=4;
if (afi == AF_INET) {
if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */
return (0);
status_byte=*(tptr++);
bit_length = status_byte&0x3f;
if (bit_length > 32) {
ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u",
ident,
bit_length));
return (0);
}
processed++;
} else if (afi == AF_INET6) {
if (!ND_TTEST2(*tptr, 2)) /* fetch status & prefix_len byte */
return (0);
status_byte=*(tptr++);
bit_length=*(tptr++);
if (bit_length > 128) {
ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u",
ident,
bit_length));
return (0);
}
processed+=2;
} else
return (0); /* somebody is fooling us */
byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */
if (!ND_TTEST2(*tptr, byte_length))
return (0);
memset(prefix, 0, sizeof prefix); /* clear the copy buffer */
memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */
tptr+=byte_length;
processed+=byte_length;
if (afi == AF_INET)
ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u",
ident,
ipaddr_string(ndo, prefix),
bit_length));
else if (afi == AF_INET6)
ND_PRINT((ndo, "%sIPv6 prefix: %s/%u",
ident,
ip6addr_string(ndo, prefix),
bit_length));
ND_PRINT((ndo, ", Distribution: %s, Metric: %u",
ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up",
metric));
if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte))
ND_PRINT((ndo, ", sub-TLVs present"));
else if (afi == AF_INET6)
ND_PRINT((ndo, ", %s%s",
ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal",
ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : ""));
if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte))
|| (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte))
) {
/* assume that one prefix can hold more
than one subTLV - therefore the first byte must reflect
the aggregate bytecount of the subTLVs for this prefix
*/
if (!ND_TTEST2(*tptr, 1))
return (0);
sublen=*(tptr++);
processed+=sublen+1;
ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */
while (sublen>0) {
if (!ND_TTEST2(*tptr,2))
return (0);
subtlvtype=*(tptr++);
subtlvlen=*(tptr++);
/* prepend the indent string */
snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident);
if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer))
return(0);
tptr+=subtlvlen;
sublen-=(subtlvlen+2);
}
}
return (processed);
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
tcpdump
|
c177cb3800a9a68d79b2812f0ffcb9479abd6eb8
|
1.1524441856549975e+37
| 99 |
CVE-2017-13016/ES-IS: Fix printing of addresses in RD PDUs.
Always print the SNPA, and flag it as such; only print it as a MAC
address if it's 6 bytes long.
Identify the NET as such.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add tests using the capture files supplied by the reporter(s), modified
so the capture files won't be rejected as an invalid capture.
| 0 |
zebra_route_string(u_int zroute)
{
return zroute_lookup(zroute)->string;
}
|
Safe
|
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
|
3.1485135353229127e+38
| 4 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <paul.jakma@sun.com>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
| 0 |
nfsd4_layoutget_release(union nfsd4_op_u *u)
{
kfree(u->layoutget.lg_content);
}
|
Safe
|
[
"CWE-476"
] |
linux
|
01310bb7c9c98752cc763b36532fab028e0f8f81
|
1.4290193999316805e+38
| 4 |
nfsd: COPY and CLONE operations require the saved filehandle to be set
Make sure we have a saved filehandle, otherwise we'll oops with a null
pointer dereference in nfs4_preprocess_stateid_op().
Signed-off-by: Scott Mayhew <smayhew@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
| 0 |
int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
{
return sysfs_merge_group(&dev->kobj,
&pm_qos_latency_tolerance_attr_group);
}
|
Safe
|
[
"CWE-787"
] |
linux
|
aa838896d87af561a33ecefea1caa4c15a68bc47
|
2.8819365371072018e+38
| 5 |
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <joe@perches.com>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
set_main_loader(PyObject *d, const char *filename, const char *loader_name)
{
PyObject *filename_obj, *bootstrap, *loader_type = NULL, *loader;
int result = 0;
filename_obj = PyUnicode_DecodeFSDefault(filename);
if (filename_obj == NULL)
return -1;
PyInterpreterState *interp = _PyInterpreterState_Get();
bootstrap = PyObject_GetAttrString(interp->importlib,
"_bootstrap_external");
if (bootstrap != NULL) {
loader_type = PyObject_GetAttrString(bootstrap, loader_name);
Py_DECREF(bootstrap);
}
if (loader_type == NULL) {
Py_DECREF(filename_obj);
return -1;
}
loader = PyObject_CallFunction(loader_type, "sN", "__main__", filename_obj);
Py_DECREF(loader_type);
if (loader == NULL) {
return -1;
}
if (PyDict_SetItemString(d, "__loader__", loader) < 0) {
result = -1;
}
Py_DECREF(loader);
return result;
}
|
Safe
|
[
"CWE-125"
] |
cpython
|
dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c
|
2.571806184429108e+38
| 30 |
bpo-35766: Merge typed_ast back into CPython (GH-11645)
| 0 |
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
{
unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
}
|
Safe
|
[
"CWE-476"
] |
linux
|
0ad646c81b2182f7fa67ec0c8c825e0ee165696d
|
1.0648728479288922e+38
| 6 |
tun: call dev_get_valid_name() before register_netdevice()
register_netdevice() could fail early when we have an invalid
dev name, in which case ->ndo_uninit() is not called. For tun
device, this is a problem because a timer etc. are already
initialized and it expects ->ndo_uninit() to clean them up.
We could move these initializations into a ->ndo_init() so
that register_netdevice() knows better, however this is still
complicated due to the logic in tun_detach().
Therefore, I choose to just call dev_get_valid_name() before
register_netdevice(), which is quicker and much easier to audit.
And for this specific case, it is already enough.
Fixes: 96442e42429e ("tuntap: choose the txq based on rxq")
Reported-by: Dmitry Alexeev <avekceeb@gmail.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
void handler::lock_shared_ha_data()
{
DBUG_ASSERT(table_share);
if (table_share->tmp_table == NO_TMP_TABLE)
mysql_mutex_lock(&table_share->LOCK_ha_data);
}
|
Safe
|
[
"CWE-416"
] |
server
|
af810407f78b7f792a9bb8c47c8c532eb3b3a758
|
2.1453325401727155e+38
| 6 |
MDEV-28098 incorrect key in "dup value" error after long unique
reset errkey after using it, so that it wouldn't affect
the next error message in the next statement
| 0 |
static int add_attribute(STACK_OF(X509_ATTRIBUTE) **sk, int nid, int atrtype,
void *value)
{
X509_ATTRIBUTE *attr = NULL;
if (*sk == NULL) {
if ((*sk = sk_X509_ATTRIBUTE_new_null()) == NULL)
return 0;
new_attrib:
if ((attr = X509_ATTRIBUTE_create(nid, atrtype, value)) == NULL)
return 0;
if (!sk_X509_ATTRIBUTE_push(*sk, attr)) {
X509_ATTRIBUTE_free(attr);
return 0;
}
} else {
int i;
for (i = 0; i < sk_X509_ATTRIBUTE_num(*sk); i++) {
attr = sk_X509_ATTRIBUTE_value(*sk, i);
if (OBJ_obj2nid(X509_ATTRIBUTE_get0_object(attr)) == nid) {
X509_ATTRIBUTE_free(attr);
attr = X509_ATTRIBUTE_create(nid, atrtype, value);
if (attr == NULL)
return 0;
if (!sk_X509_ATTRIBUTE_set(*sk, i, attr)) {
X509_ATTRIBUTE_free(attr);
return 0;
}
goto end;
}
}
goto new_attrib;
}
end:
return (1);
}
|
Safe
|
[
"CWE-327"
] |
openssl
|
631f94db0065c78181ca9ba5546ebc8bb3884b97
|
1.296293875354392e+38
| 37 |
Fix a padding oracle in PKCS7_dataDecode and CMS_decrypt_set1_pkey
An attack is simple, if the first CMS_recipientInfo is valid but the
second CMS_recipientInfo is chosen ciphertext. If the second
recipientInfo decodes to PKCS #1 v1.5 form plaintext, the correct
encryption key will be replaced by garbage, and the message cannot be
decoded, but if the RSA decryption fails, the correct encryption key is
used and the recipient will not notice the attack.
As a work around for this potential attack the length of the decrypted
key must be equal to the cipher default key length, in case the
certifiate is not given and all recipientInfo are tried out.
The old behaviour can be re-enabled in the CMS code by setting the
CMS_DEBUG_DECRYPT flag.
Reviewed-by: Matt Caswell <matt@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/9777)
(cherry picked from commit 5840ed0cd1e6487d247efbc1a04136a41d7b3a37)
| 0 |
static QString mountPoint(const RemoteFsDevice::Details &details, bool create)
{
if (details.isLocalFile()) {
return details.url.path();
}
return Utils::cacheDir(QLatin1String("mount/")+details.name, create);
}
|
Safe
|
[
"CWE-20",
"CWE-22"
] |
cantata
|
afc4f8315d3e96574925fb530a7004cc9e6ce3d3
|
1.1487902731867499e+38
| 7 |
Remove internal Samba shre mounting code, this had some privilege escalation issues, and is not well tested
| 0 |
static int auth_cmd_chk_cb(cmd_rec *cmd) {
if (auth_have_authenticated == FALSE) {
unsigned char *authd;
authd = get_param_ptr(cmd->server->conf, "authenticated", FALSE);
if (authd == NULL ||
*authd == FALSE) {
pr_response_send(R_530, _("Please login with USER and PASS"));
return FALSE;
}
auth_have_authenticated = TRUE;
}
return TRUE;
}
|
Safe
|
[
"CWE-59",
"CWE-61"
] |
proftpd
|
ecff21e0d0e84f35c299ef91d7fda088e516d4ed
|
1.2841961072069701e+38
| 17 |
Backporting recursive handling of DefaultRoot path, when AllowChrootSymlinks
is off, to 1.3.5 branch.
| 0 |
static int icccomponents(i_ctx_t * i_ctx_p, ref *space, int *n)
{
int code = 0;
ref *tempref, ICCdict;
code = array_get(imemory, space, 1, &ICCdict);
if (code < 0)
return code;
code = dict_find_string(&ICCdict, "N", &tempref);
if (code < 0)
return code;
if (code == 0)
return gs_note_error(gs_error_undefined);
if (!r_has_type(tempref, t_integer))
return gs_note_error(gs_error_typecheck);
*n = tempref->value.intval;
return 0;
}
|
Safe
|
[
"CWE-704"
] |
ghostpdl
|
548bb434e81dadcc9f71adf891a3ef5bea8e2b4e
|
2.921835674431317e+38
| 20 |
PS interpreter - add some type checking
These were 'probably' safe anyway, since they mostly treat the objects
as integers without checking, which at least can't result in a crash.
Nevertheless, we ought to check.
The return from comparedictkeys could be wrong if one of the keys had
a value which was not an array, it could incorrectly decide the two
were in fact the same.
| 0 |
ModuleExport size_t RegisterMPCImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("MPC","CACHE",
"Magick Persistent Cache image format");
entry->flags|=CoderStealthFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("MPC","MPC","Magick Persistent Cache image format");
entry->decoder=(DecodeImageHandler *) ReadMPCImage;
entry->encoder=(EncodeImageHandler *) WriteMPCImage;
entry->magick=(IsImageFormatHandler *) IsMPC;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
|
Safe
|
[
"CWE-772"
] |
ImageMagick
|
399631650b38eaf21c2f3c306b8b74e66be6a0d2
|
2.186712433875678e+38
| 17 |
https://github.com/ImageMagick/ImageMagick/issues/739
| 0 |
static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
const u8 *format, const u8 *master_key,
size_t master_keylen)
{
u8 derived_key[HASH_SIZE];
u8 digest[HASH_SIZE];
int ret;
char *p;
unsigned short len;
ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen);
if (ret < 0)
goto out;
len = epayload->datablob_len;
if (!format) {
p = epayload->master_desc;
len -= strlen(epayload->format) + 1;
} else
p = epayload->format;
ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len);
if (ret < 0)
goto out;
ret = memcmp(digest, epayload->format + epayload->datablob_len,
sizeof digest);
if (ret) {
ret = -EINVAL;
dump_hmac("datablob",
epayload->format + epayload->datablob_len,
HASH_SIZE);
dump_hmac("calc", digest, HASH_SIZE);
}
out:
return ret;
}
|
Safe
|
[
"CWE-284",
"CWE-264",
"CWE-269"
] |
linux
|
096fe9eaea40a17e125569f9e657e34cdb6d73bd
|
8.962141922118281e+37
| 36 |
KEYS: Fix handling of stored error in a negatively instantiated user key
If a user key gets negatively instantiated, an error code is cached in the
payload area. A negatively instantiated key may be then be positively
instantiated by updating it with valid data. However, the ->update key
type method must be aware that the error code may be there.
The following may be used to trigger the bug in the user key type:
keyctl request2 user user "" @u
keyctl add user user "a" @u
which manifests itself as:
BUG: unable to handle kernel paging request at 00000000ffffff8a
IP: [<ffffffff810a376f>] __call_rcu.constprop.76+0x1f/0x280 kernel/rcu/tree.c:3046
PGD 7cc30067 PUD 0
Oops: 0002 [#1] SMP
Modules linked in:
CPU: 3 PID: 2644 Comm: a.out Not tainted 4.3.0+ #49
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
task: ffff88003ddea700 ti: ffff88003dd88000 task.ti: ffff88003dd88000
RIP: 0010:[<ffffffff810a376f>] [<ffffffff810a376f>] __call_rcu.constprop.76+0x1f/0x280
[<ffffffff810a376f>] __call_rcu.constprop.76+0x1f/0x280 kernel/rcu/tree.c:3046
RSP: 0018:ffff88003dd8bdb0 EFLAGS: 00010246
RAX: 00000000ffffff82 RBX: 0000000000000000 RCX: 0000000000000001
RDX: ffffffff81e3fe40 RSI: 0000000000000000 RDI: 00000000ffffff82
RBP: ffff88003dd8bde0 R08: ffff88007d2d2da0 R09: 0000000000000000
R10: 0000000000000000 R11: ffff88003e8073c0 R12: 00000000ffffff82
R13: ffff88003dd8be68 R14: ffff88007d027600 R15: ffff88003ddea700
FS: 0000000000b92880(0063) GS:ffff88007fd00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: 00000000ffffff8a CR3: 000000007cc5f000 CR4: 00000000000006e0
Stack:
ffff88003dd8bdf0 ffffffff81160a8a 0000000000000000 00000000ffffff82
ffff88003dd8be68 ffff88007d027600 ffff88003dd8bdf0 ffffffff810a39e5
ffff88003dd8be20 ffffffff812a31ab ffff88007d027600 ffff88007d027620
Call Trace:
[<ffffffff810a39e5>] kfree_call_rcu+0x15/0x20 kernel/rcu/tree.c:3136
[<ffffffff812a31ab>] user_update+0x8b/0xb0 security/keys/user_defined.c:129
[< inline >] __key_update security/keys/key.c:730
[<ffffffff8129e5c1>] key_create_or_update+0x291/0x440 security/keys/key.c:908
[< inline >] SYSC_add_key security/keys/keyctl.c:125
[<ffffffff8129fc21>] SyS_add_key+0x101/0x1e0 security/keys/keyctl.c:60
[<ffffffff8185f617>] entry_SYSCALL_64_fastpath+0x12/0x6a arch/x86/entry/entry_64.S:185
Note the error code (-ENOKEY) in EDX.
A similar bug can be tripped by:
keyctl request2 trusted user "" @u
keyctl add trusted user "a" @u
This should also affect encrypted keys - but that has to be correctly
parameterised or it will fail with EINVAL before getting to the bit that
will crashes.
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Mimi Zohar <zohar@linux.vnet.ibm.com>
Signed-off-by: James Morris <james.l.morris@oracle.com>
| 0 |
static int hso_start_net_device(struct hso_device *hso_dev)
{
int i, result = 0;
struct hso_net *hso_net = dev2net(hso_dev);
if (!hso_net)
return -ENODEV;
/* send URBs for all read buffers */
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
/* Prep a receive URB */
usb_fill_bulk_urb(hso_net->mux_bulk_rx_urb_pool[i],
hso_dev->usb,
usb_rcvbulkpipe(hso_dev->usb,
hso_net->in_endp->
bEndpointAddress & 0x7F),
hso_net->mux_bulk_rx_buf_pool[i],
MUX_BULK_RX_BUF_SIZE, read_bulk_callback,
hso_net);
/* Put it out there so the device can send us stuff */
result = usb_submit_urb(hso_net->mux_bulk_rx_urb_pool[i],
GFP_NOIO);
if (result)
dev_warn(&hso_dev->usb->dev,
"%s failed mux_bulk_rx_urb[%d] %d\n", __func__,
i, result);
}
return result;
}
|
Safe
|
[
"CWE-125"
] |
linux
|
5146f95df782b0ac61abde36567e718692725c89
|
1.6294442836573588e+38
| 32 |
USB: hso: Fix OOB memory access in hso_probe/hso_get_config_data
The function hso_probe reads if_num from the USB device (as an u8) and uses
it without a length check to index an array, resulting in an OOB memory read
in hso_probe or hso_get_config_data.
Add a length check for both locations and updated hso_probe to bail on
error.
This issue has been assigned CVE-2018-19985.
Reported-by: Hui Peng <benquike@gmail.com>
Reported-by: Mathias Payer <mathias.payer@nebelwelt.net>
Signed-off-by: Hui Peng <benquike@gmail.com>
Signed-off-by: Mathias Payer <mathias.payer@nebelwelt.net>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
read_channel_data (FILE *f,
PSPimage *ia,
guchar **pixels,
guint bytespp,
guint offset,
GimpDrawable *drawable,
guint32 compressed_len)
{
gint i, y, width = drawable->width, height = drawable->height;
gint npixels = width * height;
guchar *buf, *p, *q, *endq;
guchar *buf2 = NULL; /* please the compiler */
guchar runcount, byte;
z_stream zstream;
switch (ia->compression)
{
case PSP_COMP_NONE:
if (bytespp == 1)
{
if ((width % 4) == 0)
fread (pixels[0], height * width, 1, f);
else
{
for (y = 0; y < height; y++)
{
fread (pixels[y], width, 1, f);
fseek (f, 4 - (width % 4), SEEK_CUR);
}
}
}
else
{
buf = g_malloc (width);
for (y = 0; y < height; y++)
{
fread (buf, width, 1, f);
if (width % 4)
fseek (f, 4 - (width % 4), SEEK_CUR);
p = buf;
q = pixels[y] + offset;
for (i = 0; i < width; i++)
{
*q = *p++;
q += bytespp;
}
}
g_free (buf);
}
break;
case PSP_COMP_RLE:
q = pixels[0] + offset;
endq = q + npixels * bytespp;
buf = g_malloc (127);
while (q < endq)
{
p = buf;
fread (&runcount, 1, 1, f);
if (runcount > 128)
{
runcount -= 128;
fread (&byte, 1, 1, f);
memset (buf, byte, runcount);
}
else
fread (buf, runcount, 1, f);
/* prevent buffer overflow for bogus data */
runcount = MIN (runcount, endq - q);
if (bytespp == 1)
{
memmove (q, buf, runcount);
q += runcount;
}
else
{
p = buf;
for (i = 0; i < runcount; i++)
{
*q = *p++;
q += bytespp;
}
}
}
g_free (buf);
break;
case PSP_COMP_LZ77:
buf = g_malloc (compressed_len);
fread (buf, compressed_len, 1, f);
zstream.next_in = buf;
zstream.avail_in = compressed_len;
zstream.zalloc = psp_zalloc;
zstream.zfree = psp_zfree;
zstream.opaque = f;
if (inflateInit (&zstream) != Z_OK)
{
g_message ("zlib error");
return -1;
}
if (bytespp == 1)
zstream.next_out = pixels[0];
else
{
buf2 = g_malloc (npixels);
zstream.next_out = buf2;
}
zstream.avail_out = npixels;
if (inflate (&zstream, Z_FINISH) != Z_STREAM_END)
{
g_message ("zlib error");
inflateEnd (&zstream);
return -1;
}
inflateEnd (&zstream);
g_free (buf);
if (bytespp > 1)
{
p = buf2;
q = pixels[0] + offset;
for (i = 0; i < npixels; i++)
{
*q = *p++;
q += bytespp;
}
g_free (buf2);
}
break;
}
return 0;
}
|
Safe
|
[
"CWE-787"
] |
gimp
|
48ec15890e1751dede061f6d1f469b6508c13439
|
2.8331828098793226e+38
| 135 |
file-psp: fix for bogus input data. Fixes bug #639203
| 0 |
njs_string_prototype_trim(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_index_t mode)
{
uint32_t u, trim, length;
njs_int_t ret;
njs_value_t *value;
const u_char *p, *prev, *start, *end;
njs_string_prop_t string;
njs_unicode_decode_t ctx;
value = njs_argument(args, 0);
ret = njs_string_object_validate(vm, value);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
trim = 0;
njs_string_prop(&string, value);
start = string.start;
end = string.start + string.size;
if (njs_is_byte_or_ascii_string(&string)) {
if (mode & NJS_TRIM_START) {
for ( ;; ) {
if (start == end) {
goto empty;
}
if (njs_is_whitespace(*start)) {
start++;
trim++;
continue;
}
break;
}
}
if (mode & NJS_TRIM_END) {
for ( ;; ) {
if (start == end) {
goto empty;
}
end--;
if (njs_is_whitespace(*end)) {
trim++;
continue;
}
end++;
break;
}
}
} else {
/* UTF-8 string. */
if (mode & NJS_TRIM_START) {
njs_utf8_decode_init(&ctx);
for ( ;; ) {
if (start == end) {
goto empty;
}
p = start;
u = njs_utf8_decode(&ctx, &start, end);
if (njs_utf8_is_whitespace(u)) {
trim++;
continue;
}
start = p;
break;
}
}
if (mode & NJS_TRIM_END) {
prev = end;
njs_utf8_decode_init(&ctx);
for ( ;; ) {
if (start == prev) {
goto empty;
}
prev = njs_utf8_prev(prev);
p = prev;
u = njs_utf8_decode(&ctx, &p, end);
if (njs_utf8_is_whitespace(u)) {
trim++;
continue;
}
end = p;
break;
}
}
}
if (trim == 0) {
/* GC: retain. */
vm->retval = *value;
return NJS_OK;
}
length = (string.length != 0) ? string.length - trim : 0;
return njs_string_new(vm, &vm->retval, start, end - start, length);
empty:
vm->retval = njs_string_empty;
return NJS_OK;
}
|
Safe
|
[] |
njs
|
36f04a3178fcb6da8513cc3dbf35215c2a581b3f
|
2.3472227615187725e+37
| 125 |
Fixed String.prototype.replace() with byte strings.
This closes #522 issue on Github.
| 0 |
PHPAPI int php_date_initialize(php_date_obj *dateobj, /*const*/ char *time_str, int time_str_len, char *format, zval *timezone_object, int ctor TSRMLS_DC)
{
timelib_time *now;
timelib_tzinfo *tzi = NULL;
timelib_error_container *err = NULL;
int type = TIMELIB_ZONETYPE_ID, new_dst = 0;
char *new_abbr = NULL;
timelib_sll new_offset;
if (dateobj->time) {
timelib_time_dtor(dateobj->time);
}
if (format) {
dateobj->time = timelib_parse_from_format(format, time_str_len ? time_str : "", time_str_len ? time_str_len : 0, &err, DATE_TIMEZONEDB, php_date_parse_tzfile_wrapper);
} else {
dateobj->time = timelib_strtotime(time_str_len ? time_str : "now", time_str_len ? time_str_len : sizeof("now") -1, &err, DATE_TIMEZONEDB, php_date_parse_tzfile_wrapper);
}
/* update last errors and warnings */
update_errors_warnings(err TSRMLS_CC);
if (ctor && err && err->error_count) {
/* spit out the first library error message, at least */
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to parse time string (%s) at position %d (%c): %s", time_str,
err->error_messages[0].position, err->error_messages[0].character, err->error_messages[0].message);
}
if (err && err->error_count) {
timelib_time_dtor(dateobj->time);
dateobj->time = 0;
return 0;
}
if (timezone_object) {
php_timezone_obj *tzobj;
tzobj = (php_timezone_obj *) zend_object_store_get_object(timezone_object TSRMLS_CC);
switch (tzobj->type) {
case TIMELIB_ZONETYPE_ID:
tzi = tzobj->tzi.tz;
break;
case TIMELIB_ZONETYPE_OFFSET:
new_offset = tzobj->tzi.utc_offset;
break;
case TIMELIB_ZONETYPE_ABBR:
new_offset = tzobj->tzi.z.utc_offset;
new_dst = tzobj->tzi.z.dst;
new_abbr = strdup(tzobj->tzi.z.abbr);
break;
}
type = tzobj->type;
} else if (dateobj->time->tz_info) {
tzi = dateobj->time->tz_info;
} else {
tzi = get_timezone_info(TSRMLS_C);
}
now = timelib_time_ctor();
now->zone_type = type;
switch (type) {
case TIMELIB_ZONETYPE_ID:
now->tz_info = tzi;
break;
case TIMELIB_ZONETYPE_OFFSET:
now->z = new_offset;
break;
case TIMELIB_ZONETYPE_ABBR:
now->z = new_offset;
now->dst = new_dst;
now->tz_abbr = new_abbr;
break;
}
timelib_unixtime2local(now, (timelib_sll) time(NULL));
timelib_fill_holes(dateobj->time, now, TIMELIB_NO_CLONE);
timelib_update_ts(dateobj->time, tzi);
timelib_update_from_sse(dateobj->time);
dateobj->time->have_relative = 0;
timelib_time_dtor(now);
return 1;
|
Safe
|
[] |
php-src
|
c377f1a715476934133f3254d1e0d4bf3743e2d2
|
5.527759041009082e+37
| 84 |
Fix bug #68942 (Use after free vulnerability in unserialize() with DateTimeZone)
| 0 |
static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_cp_le_add_to_white_list *sent;
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
if (!sent)
return;
hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
sent->bdaddr_type);
}
|
Safe
|
[
"CWE-290"
] |
linux
|
3ca44c16b0dcc764b641ee4ac226909f5c421aa3
|
2.3704657658992544e+38
| 18 |
Bluetooth: Consolidate encryption handling in hci_encrypt_cfm
This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection
state is BT_CONFIG so callers don't have to check the state.
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
| 0 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size,
params->stride));
const float* input_data = GetTensorData<float>(input);
const int64_t sample_count = input->dims->data[0];
const int64_t channel_count = input->dims->data[1];
const int64_t output_width = params->spectrogram->output_frequency_channels();
float* output_flat = GetTensorData<float>(output);
std::vector<float> input_for_channel(sample_count);
for (int64_t channel = 0; channel < channel_count; ++channel) {
float* output_slice =
output_flat + (channel * params->output_height * output_width);
for (int i = 0; i < sample_count; ++i) {
input_for_channel[i] = input_data[i * channel_count + channel];
}
std::vector<std::vector<float>> spectrogram_output;
TF_LITE_ENSURE(context,
params->spectrogram->ComputeSquaredMagnitudeSpectrogram(
input_for_channel, &spectrogram_output));
TF_LITE_ENSURE_EQ(context, spectrogram_output.size(),
params->output_height);
TF_LITE_ENSURE(context, spectrogram_output.empty() ||
(spectrogram_output[0].size() == output_width));
for (int row_index = 0; row_index < params->output_height; ++row_index) {
const std::vector<float>& spectrogram_row = spectrogram_output[row_index];
TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width);
float* output_row = output_slice + (row_index * output_width);
if (params->magnitude_squared) {
for (int i = 0; i < output_width; ++i) {
output_row[i] = spectrogram_row[i];
}
} else {
for (int i = 0; i < output_width; ++i) {
output_row[i] = sqrtf(spectrogram_row[i]);
}
}
}
}
return kTfLiteOk;
}
|
Vulnerable
|
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
|
7.180820824440876e+37
| 51 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
| 1 |
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
|
Safe
|
[
"CWE-416"
] |
ImageMagick
|
ecf7c6b288e11e7e7f75387c5e9e93e423b98397
|
8.779059749226595e+37
| 45 |
...
| 0 |
MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
MemTxAttrs attrs, void *ptr,
hwaddr len, hwaddr addr1, hwaddr l,
MemoryRegion *mr)
{
uint8_t *ram_ptr;
uint64_t val;
MemTxResult result = MEMTX_OK;
bool release_lock = false;
uint8_t *buf = ptr;
fuzz_dma_read_cb(addr, len, mr);
for (;;) {
if (!flatview_access_allowed(mr, attrs, addr1, l)) {
result |= MEMTX_ACCESS_ERROR;
/* Keep going. */
} else if (!memory_access_is_direct(mr, false)) {
/* I/O case */
release_lock |= prepare_mmio_access(mr);
l = memory_access_size(mr, l, addr1);
result |= memory_region_dispatch_read(mr, addr1, &val,
size_memop(l), attrs);
stn_he_p(buf, l, val);
} else {
/* RAM case */
ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
memcpy(buf, ram_ptr, l);
}
if (release_lock) {
qemu_mutex_unlock_iothread();
release_lock = false;
}
len -= l;
buf += l;
addr += l;
if (!len) {
break;
}
l = len;
mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
}
return result;
}
|
Safe
|
[
"CWE-908"
] |
qemu
|
418ade7849ce7641c0f7333718caf5091a02fd4c
|
1.2624277404021951e+38
| 48 |
softmmu: Always initialize xlat in address_space_translate_for_iotlb
The bug is an uninitialized memory read, along the translate_fail
path, which results in garbage being read from iotlb_to_section,
which can lead to a crash in io_readx/io_writex.
The bug may be fixed by writing any value with zero
in ~TARGET_PAGE_MASK, so that the call to iotlb_to_section using
the xlat'ed address returns io_mem_unassigned, as desired by the
translate_fail path.
It is most useful to record the original physical page address,
which will eventually be logged by memory_region_access_valid
when the access is rejected by unassigned_mem_accepts.
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1065
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-Id: <20220621153829.366423-1-richard.henderson@linaro.org>
| 0 |
static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info = nr | INTR_INFO_VALID_MASK;
if (has_error_code) {
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
}
if (vcpu->arch.rmode.active) {
vmx->rmode.irq.pending = true;
vmx->rmode.irq.vector = nr;
vmx->rmode.irq.rip = kvm_rip_read(vcpu);
if (nr == BP_VECTOR || nr == OF_VECTOR)
vmx->rmode.irq.rip++;
intr_info |= INTR_TYPE_SOFT_INTR;
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
return;
}
if (nr == BP_VECTOR || nr == OF_VECTOR) {
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
intr_info |= INTR_TYPE_SOFT_EXCEPTION;
} else
intr_info |= INTR_TYPE_HARD_EXCEPTION;
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
}
|
Safe
|
[
"CWE-20"
] |
linux-2.6
|
16175a796d061833aacfbd9672235f2d2725df65
|
1.9450734552328435e+38
| 32 |
KVM: VMX: Don't allow uninhibited access to EFER on i386
vmx_set_msr() does not allow i386 guests to touch EFER, but they can still
do so through the default: label in the switch. If they set EFER_LME, they
can oops the host.
Fix by having EFER access through the normal channel (which will check for
EFER_LME) even on i386.
Reported-and-tested-by: Benjamin Gilbert <bgilbert@cs.cmu.edu>
Cc: stable@kernel.org
Signed-off-by: Avi Kivity <avi@redhat.com>
| 0 |
print_experimental_counts(Dwarf_Debug dbg, int line_version,
Dwarf_Line_Context line_context)
{
if (line_version == EXPERIMENTAL_LINE_TABLES_VERSION) {
print_experimental_subprograms_list(dbg,line_context);
}
}
|
Safe
|
[] |
libdwarf-code
|
faf99408e3f9f706fc3809dd400e831f989778d3
|
2.8918208788351884e+38
| 7 |
modified: libdwarf/dwarf_print_lines.c
* dwarf_print_lines.c: In case of corrupted
DWARF5 line header the fi_file_name field
for a file entry can be null. Now
we print a <no file name> string in that case
to avoid passing a null to dwarfstring_append.
Dwarfbug DW202010-003.
Also some lines longer than libdwarf standard
were shortened, but a few long lines really
must remain.
| 0 |
COMPS_ObjList * comps_objmrtree_get(COMPS_ObjMRTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it = NULL;
COMPS_ObjMRTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_ObjMRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found)
return NULL;
rtdata = (COMPS_ObjMRTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) return (COMPS_ObjList*)
comps_object_incref((COMPS_Object*)rtdata->data);
else if (ended == 1) offset+=x;
else return NULL;
subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
}
if (it)
return ((COMPS_ObjMRTreeData*)it->data)->data;
else return NULL;
}
|
Safe
|
[
"CWE-416",
"CWE-862"
] |
libcomps
|
e3a5d056633677959ad924a51758876d415e7046
|
2.0261534216620685e+38
| 39 |
Fix UAF in comps_objmrtree_unite function
The added field is not used at all in many places and it is probably the
left-over of some copy-paste.
| 0 |
void DepthwiseConv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims, int stride,
int pad_width, int pad_height, int depth_multiplier,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
DepthwiseConv<Ac>(input_data, input_dims, input_offset, filter_data,
filter_dims, filter_offset, bias_data, bias_dims, stride,
stride, pad_width, pad_height, depth_multiplier,
output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data,
output_dims);
}
|
Safe
|
[
"CWE-703",
"CWE-835"
] |
tensorflow
|
dfa22b348b70bb89d6d6ec0ff53973bacb4f4695
|
1.7972511499548478e+38
| 16 |
Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3
| 0 |
int main(int argc, char* argv[])
{
try
{
string_conversion_test();
std::cout << "----" << std::endl;
os_wrapper_test();
std::cout << "----" << std::endl;
fopen_wrapper_test();
std::cout << "----" << std::endl;
getenv_test();
std::cout << "----" << std::endl;
to_utf8_test();
std::cout << "----" << std::endl;
get_whoami_test();
std::cout << "----" << std::endl;
same_file_test();
std::cout << "----" << std::endl;
read_lines_from_file_test();
}
catch (std::exception& e)
{
std::cout << "unexpected exception: " << e.what() << std::endl;
}
return 0;
}
|
Safe
|
[
"CWE-125"
] |
qpdf
|
6d46346eb93d5032c08cf1e39023b5d57260a766
|
1.852861785236822e+37
| 27 |
Detect integer overflow/underflow
| 0 |
ztempfile(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
const char *pstr;
char fmode[4];
int code = parse_file_access_string(op, fmode);
char *prefix = NULL;
char *fname= NULL;
uint fnlen;
FILE *sfile;
stream *s;
byte *buf, *sbody;
if (code < 0)
return code;
prefix = (char *)gs_alloc_bytes(imemory, gp_file_name_sizeof, "ztempfile(prefix)");
fname = (char *)gs_alloc_bytes(imemory, gp_file_name_sizeof, "ztempfile(fname)");
if (!prefix || !fname) {
code = gs_note_error(gs_error_VMerror);
goto done;
}
strcat(fmode, gp_fmode_binary_suffix);
if (r_has_type(op - 1, t_null))
pstr = gp_scratch_file_name_prefix;
else {
uint psize;
check_read_type(op[-1], t_string);
psize = r_size(op - 1);
if (psize >= gp_file_name_sizeof) {
code = gs_note_error(gs_error_rangecheck);
goto done;
}
memcpy(prefix, op[-1].value.const_bytes, psize);
prefix[psize] = 0;
pstr = prefix;
}
if (gp_file_name_is_absolute(pstr, strlen(pstr))) {
if (check_file_permissions(i_ctx_p, pstr, strlen(pstr),
NULL, "PermitFileWriting") < 0) {
code = gs_note_error(gs_error_invalidfileaccess);
goto done;
}
} else if (!prefix_is_simple(pstr)) {
code = gs_note_error(gs_error_invalidfileaccess);
goto done;
}
s = file_alloc_stream(imemory, "ztempfile(stream)");
if (s == 0) {
code = gs_note_error(gs_error_VMerror);
goto done;
}
buf = gs_alloc_bytes(imemory, file_default_buffer_size,
"ztempfile(buffer)");
if (buf == 0) {
code = gs_note_error(gs_error_VMerror);
goto done;
}
sfile = gp_open_scratch_file(imemory, pstr, fname, fmode);
if (sfile == 0) {
gs_free_object(imemory, buf, "ztempfile(buffer)");
code = gs_note_error(gs_error_invalidfileaccess);
goto done;
}
fnlen = strlen(fname);
sbody = ialloc_string(fnlen, ".tempfile(fname)");
if (sbody == 0) {
gs_free_object(imemory, buf, "ztempfile(buffer)");
code = gs_note_error(gs_error_VMerror);
goto done;
}
memcpy(sbody, fname, fnlen);
file_init_stream(s, sfile, fmode, buf, file_default_buffer_size);
code = ssetfilename(s, (const unsigned char*) fname, fnlen);
if (code < 0) {
gx_io_device *iodev_dflt = iodev_default(imemory);
sclose(s);
iodev_dflt->procs.delete_file(iodev_dflt, fname);
ifree_string(sbody, fnlen, ".tempfile(fname)");
code = gs_note_error(gs_error_VMerror);
goto done;
}
make_string(op - 1, a_readonly | icurrent_space, fnlen, sbody);
make_stream_file(op, s, fmode);
done:
if (prefix)
gs_free_object(imemory, prefix, "ztempfile(prefix)");
if (fname)
gs_free_object(imemory, fname, "ztempfile(fname)");
return code;
}
|
Vulnerable
|
[] |
ghostpdl
|
0d3901189f245232f0161addf215d7268c4d05a3
|
1.2005120619415251e+38
| 95 |
Bug 699657: properly apply file permissions to .tempfile
| 1 |
static void print_stuff(BIO *bio, SSL *s, int full)
{
X509 *peer=NULL;
char *p;
static const char *space=" ";
char buf[BUFSIZ];
STACK_OF(X509) *sk;
STACK_OF(X509_NAME) *sk2;
const SSL_CIPHER *c;
X509_NAME *xn;
int j,i;
#ifndef OPENSSL_NO_COMP
const COMP_METHOD *comp, *expansion;
#endif
unsigned char *exportedkeymat;
if (full)
{
int got_a_chain = 0;
sk=SSL_get_peer_cert_chain(s);
if (sk != NULL)
{
got_a_chain = 1; /* we don't have it for SSL2 (yet) */
BIO_printf(bio,"---\nCertificate chain\n");
for (i=0; i<sk_X509_num(sk); i++)
{
X509_NAME_oneline(X509_get_subject_name(
sk_X509_value(sk,i)),buf,sizeof buf);
BIO_printf(bio,"%2d s:%s\n",i,buf);
X509_NAME_oneline(X509_get_issuer_name(
sk_X509_value(sk,i)),buf,sizeof buf);
BIO_printf(bio," i:%s\n",buf);
if (c_showcerts)
PEM_write_bio_X509(bio,sk_X509_value(sk,i));
}
}
BIO_printf(bio,"---\n");
peer=SSL_get_peer_certificate(s);
if (peer != NULL)
{
BIO_printf(bio,"Server certificate\n");
if (!(c_showcerts && got_a_chain)) /* Redundant if we showed the whole chain */
PEM_write_bio_X509(bio,peer);
X509_NAME_oneline(X509_get_subject_name(peer),
buf,sizeof buf);
BIO_printf(bio,"subject=%s\n",buf);
X509_NAME_oneline(X509_get_issuer_name(peer),
buf,sizeof buf);
BIO_printf(bio,"issuer=%s\n",buf);
}
else
BIO_printf(bio,"no peer certificate available\n");
sk2=SSL_get_client_CA_list(s);
if ((sk2 != NULL) && (sk_X509_NAME_num(sk2) > 0))
{
BIO_printf(bio,"---\nAcceptable client certificate CA names\n");
for (i=0; i<sk_X509_NAME_num(sk2); i++)
{
xn=sk_X509_NAME_value(sk2,i);
X509_NAME_oneline(xn,buf,sizeof(buf));
BIO_write(bio,buf,strlen(buf));
BIO_write(bio,"\n",1);
}
}
else
{
BIO_printf(bio,"---\nNo client certificate CA names sent\n");
}
p=SSL_get_shared_ciphers(s,buf,sizeof buf);
if (p != NULL)
{
/* This works only for SSL 2. In later protocol
* versions, the client does not know what other
* ciphers (in addition to the one to be used
* in the current connection) the server supports. */
BIO_printf(bio,"---\nCiphers common between both SSL endpoints:\n");
j=i=0;
while (*p)
{
if (*p == ':')
{
BIO_write(bio,space,15-j%25);
i++;
j=0;
BIO_write(bio,((i%3)?" ":"\n"),1);
}
else
{
BIO_write(bio,p,1);
j++;
}
p++;
}
BIO_write(bio,"\n",1);
}
ssl_print_sigalgs(bio, s);
ssl_print_tmp_key(bio, s);
BIO_printf(bio,"---\nSSL handshake has read %ld bytes and written %ld bytes\n",
BIO_number_read(SSL_get_rbio(s)),
BIO_number_written(SSL_get_wbio(s)));
}
BIO_printf(bio,(SSL_cache_hit(s)?"---\nReused, ":"---\nNew, "));
c=SSL_get_current_cipher(s);
BIO_printf(bio,"%s, Cipher is %s\n",
SSL_CIPHER_get_version(c),
SSL_CIPHER_get_name(c));
if (peer != NULL) {
EVP_PKEY *pktmp;
pktmp = X509_get_pubkey(peer);
BIO_printf(bio,"Server public key is %d bit\n",
EVP_PKEY_bits(pktmp));
EVP_PKEY_free(pktmp);
}
BIO_printf(bio, "Secure Renegotiation IS%s supported\n",
SSL_get_secure_renegotiation_support(s) ? "" : " NOT");
#ifndef OPENSSL_NO_COMP
comp=SSL_get_current_compression(s);
expansion=SSL_get_current_expansion(s);
BIO_printf(bio,"Compression: %s\n",
comp ? SSL_COMP_get_name(comp) : "NONE");
BIO_printf(bio,"Expansion: %s\n",
expansion ? SSL_COMP_get_name(expansion) : "NONE");
#endif
#ifdef SSL_DEBUG
{
/* Print out local port of connection: useful for debugging */
int sock;
struct sockaddr_in ladd;
socklen_t ladd_size = sizeof(ladd);
sock = SSL_get_fd(s);
getsockname(sock, (struct sockaddr *)&ladd, &ladd_size);
BIO_printf(bio_c_out, "LOCAL PORT is %u\n", ntohs(ladd.sin_port));
}
#endif
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
if (next_proto.status != -1) {
const unsigned char *proto;
unsigned int proto_len;
SSL_get0_next_proto_negotiated(s, &proto, &proto_len);
BIO_printf(bio, "Next protocol: (%d) ", next_proto.status);
BIO_write(bio, proto, proto_len);
BIO_write(bio, "\n", 1);
}
#endif
{
SRTP_PROTECTION_PROFILE *srtp_profile=SSL_get_selected_srtp_profile(s);
if(srtp_profile)
BIO_printf(bio,"SRTP Extension negotiated, profile=%s\n",
srtp_profile->name);
}
SSL_SESSION_print(bio,SSL_get_session(s));
if (keymatexportlabel != NULL)
{
BIO_printf(bio, "Keying material exporter:\n");
BIO_printf(bio, " Label: '%s'\n", keymatexportlabel);
BIO_printf(bio, " Length: %i bytes\n", keymatexportlen);
exportedkeymat = OPENSSL_malloc(keymatexportlen);
if (exportedkeymat != NULL)
{
if (!SSL_export_keying_material(s, exportedkeymat,
keymatexportlen,
keymatexportlabel,
strlen(keymatexportlabel),
NULL, 0, 0))
{
BIO_printf(bio, " Error\n");
}
else
{
BIO_printf(bio, " Keying material: ");
for (i=0; i<keymatexportlen; i++)
BIO_printf(bio, "%02X",
exportedkeymat[i]);
BIO_printf(bio, "\n");
}
OPENSSL_free(exportedkeymat);
}
}
BIO_printf(bio,"---\n");
if (peer != NULL)
X509_free(peer);
/* flush, or debugging output gets mixed with http response */
(void)BIO_flush(bio);
}
|
Safe
|
[] |
openssl
|
a70da5b3ecc3160368529677006801c58cb369db
|
2.8743443543263487e+38
| 196 |
New functions to check a hostname email or IP address against a
certificate. Add options to s_client, s_server and x509 utilities
to print results of checks.
| 0 |
GC_INNER void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
int k)
{
void * result = GC_generic_malloc_inner_ignore_off_page(
lb + DEBUG_BYTES, k);
if (result == 0) {
GC_err_printf("GC internal allocation (%lu bytes) returning NULL\n",
(unsigned long) lb);
return(0);
}
if (!GC_debugging_started) {
GC_start_debugging_inner();
}
ADD_CALL_CHAIN(result, GC_RETURN_ADDR);
return (GC_store_debug_info_inner(result, (word)lb, "INTERNAL", 0));
}
|
Vulnerable
|
[
"CWE-119"
] |
bdwgc
|
7292c02fac2066d39dd1bcc37d1a7054fd1e32ee
|
3.3879061896661915e+37
| 17 |
Fix malloc routines to prevent size value wrap-around
See issue #135 on Github.
* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
* malloc.c (GC_alloc_large): Likewise.
* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
overflow when computing GC_heapsize+bytes > GC_max_heapsize.
* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc,
GC_debug_generic_malloc_inner,
GC_debug_generic_malloc_inner_ignore_off_page,
GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
value.
* fnlz_mlc.c (GC_finalized_malloc): Likewise.
* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
* include/private/gcconfig.h (GET_MEM): Likewise.
* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page,
GC_calloc_explicitly_typed): Likewise.
* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
defined).
* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
malloc.c file.
* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
include gcconfig.h).
* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
to size_t.
* os_dep.c (GC_page_size): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
* include/private/gcconfig.h (GET_MEM): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
GC_unix_get_mem): Change argument type from word to int.
* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
if no value wrap around is guaranteed.
* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
wrap around).
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* misc.c (GC_init_size_map): Change "i" local variable type from int
to size_t.
* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
* misc.c (GC_envfile_init): Cast len to size_t when passed to
ROUNDUP_PAGESIZE_IF_MMAP.
* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
GETPAGESIZE() to size_t (when setting GC_page_size).
* os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection):
Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
(the argument is of word type).
* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
cast to size_t.
* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
to SBRK_ARG_T.
* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
to size_t.
* typd_mlc.c: Do not include limits.h.
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
gc_priv.h now).
| 1 |
process_multipart_headers (struct MHD_PostProcessor *pp,
size_t *ioffptr,
enum PP_State next_state)
{
char *buf = (char *) &pp[1];
size_t newline;
newline = 0;
while ( (newline < pp->buffer_pos) &&
(buf[newline] != '\r') &&
(buf[newline] != '\n') )
newline++;
if (newline == pp->buffer_size)
{
pp->state = PP_Error;
return MHD_NO; /* out of memory */
}
if (newline == pp->buffer_pos)
return MHD_NO; /* will need more data */
if (0 == newline)
{
/* empty line - end of headers */
pp->skip_rn = RN_Full;
pp->state = next_state;
return MHD_YES;
}
/* got an actual header */
if (buf[newline] == '\r')
pp->skip_rn = RN_OptN;
buf[newline] = '\0';
if (MHD_str_equal_caseless_n_ ("Content-disposition: ",
buf,
MHD_STATICSTR_LEN_ ("Content-disposition: ")))
{
try_get_value (&buf[MHD_STATICSTR_LEN_ ("Content-disposition: ")],
"name",
&pp->content_name);
try_get_value (&buf[MHD_STATICSTR_LEN_ ("Content-disposition: ")],
"filename",
&pp->content_filename);
}
else
{
try_match_header ("Content-type: ",
MHD_STATICSTR_LEN_ ("Content-type: "),
buf,
&pp->content_type);
try_match_header ("Content-Transfer-Encoding: ",
MHD_STATICSTR_LEN_ ("Content-Transfer-Encoding: "),
buf,
&pp->content_transfer_encoding);
}
(*ioffptr) += newline + 1;
return MHD_YES;
}
|
Safe
|
[
"CWE-120"
] |
libmicrohttpd
|
a110ae6276660bee3caab30e9ff3f12f85cf3241
|
2.296992262389598e+38
| 55 |
fix buffer overflow and add test
| 0 |
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
int rc, entries;
int cqe = attr->cqe;
struct bnxt_qplib_nq *nq = NULL;
unsigned int nq_alloc_cnt;
/* Validate CQ fields */
if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
return -EINVAL;
}
cq->rdev = rdev;
cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
entries = roundup_pow_of_two(cqe + 1);
if (entries > dev_attr->max_cq_wqes + 1)
entries = dev_attr->max_cq_wqes + 1;
if (udata) {
struct bnxt_re_cq_req req;
struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
udata, struct bnxt_re_ucontext, ib_uctx);
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
rc = -EFAULT;
goto fail;
}
cq->umem = ib_umem_get(udata, req.cq_va,
entries * sizeof(struct cq_base),
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(cq->umem)) {
rc = PTR_ERR(cq->umem);
goto fail;
}
cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl;
cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
cq->qplib_cq.dpi = &uctx->dpi;
} else {
cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
GFP_KERNEL);
if (!cq->cql) {
rc = -ENOMEM;
goto fail;
}
cq->qplib_cq.dpi = &rdev->dpi_privileged;
}
/*
* Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
* used for getting the NQ index.
*/
nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
cq->qplib_cq.max_wqe = entries;
cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
cq->qplib_cq.nq = nq;
rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
goto fail;
}
cq->ib_cq.cqe = entries;
cq->cq_period = cq->qplib_cq.period;
nq->budget++;
atomic_inc(&rdev->cq_count);
spin_lock_init(&cq->cq_lock);
if (udata) {
struct bnxt_re_cq_resp resp;
resp.cqid = cq->qplib_cq.id;
resp.tail = cq->qplib_cq.hwq.cons;
resp.phase = cq->qplib_cq.period;
resp.rsvd = 0;
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
goto c2fail;
}
}
return 0;
c2fail:
ib_umem_release(cq->umem);
fail:
kfree(cq->cql);
return rc;
}
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
linux
|
4a9d46a9fe14401f21df69cea97c62396d5fb053
|
2.1737912048272334e+37
| 101 |
RDMA: Fix goto target to release the allocated memory
In bnxt_re_create_srq(), when ib_copy_to_udata() fails allocated memory
should be released by goto fail.
Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
Link: https://lore.kernel.org/r/20190910222120.16517-1-navid.emamdoost@gmail.com
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
| 0 |
compilePassOpcode (FileInfo * nested, TranslationTableOpcode opcode)
{
/*Compile the operands of a pass opcode */
TranslationTableCharacterAttributes after = 0;
TranslationTableCharacterAttributes before = 0;
widechar passSubOp;
const struct CharacterClass *class;
TranslationTableOffset ruleOffset = 0;
TranslationTableRule *rule = NULL;
int k;
int kk = 0;
pass_Codes passCode;
int endTest = 0;
int isScript = 1;
passInstructions = passRuleDots.chars;
passIC = 0; /*Instruction counter */
passRuleChars.length = 0;
passNested = nested;
passOpcode = opcode;
/* passHoldString and passLine are static variables declared
* previously.*/
passLinepos = 0;
passHoldString.length = 0;
for (k = nested->linepos; k < nested->linelen; k++)
passHoldString.chars[passHoldString.length++] = nested->line[k];
if (!eqasc2uni ((unsigned char *) "script", passHoldString.chars, 6))
{
isScript = 0;
#define SEPCHAR 0x0001
for (k = 0; k < passHoldString.length && passHoldString.chars[k] > 32;
k++);
if (k < passHoldString.length)
passHoldString.chars[k] = SEPCHAR;
else
{
compileError (passNested, "Invalid multipass operands");
return 0;
}
}
parseChars (passNested, &passLine, &passHoldString);
if (isScript)
{
int more = 1;
passCode = passGetScriptToken ();
if (passCode != pass_script)
{
compileError (passNested, "Invalid multipass statement");
return 0;
}
/* Declaratives */
while (more)
{
passCode = passGetScriptToken ();
switch (passCode)
{
case pass_define:
if (!passIsLeftParen ())
return 0;
if (!passIsName ())
return 0;
if (!passIsComma ())
return 0;
if (!passIsNumber ())
return 0;
if (!passIsRightParen ())
return 0;
passAddName (&passHoldString, passHoldNumber);
break;
case pass_if:
more = 0;
break;
default:
compileError (passNested,
"invalid definition in declarative part");
return 0;
}
}
/* if part */
more = 1;
while (more)
{
passCode = passGetScriptToken ();
passSubOp = passCode;
switch (passCode)
{
case pass_not:
passInstructions[passIC++] = pass_not;
break;
case pass_first:
passInstructions[passIC++] = pass_first;
break;
case pass_last:
passInstructions[passIC++] = pass_last;
break;
case pass_search:
passInstructions[passIC++] = pass_search;
break;
case pass_string:
if (opcode != CTO_Context && opcode != CTO_Correct)
{
compileError (passNested,
"Character strings can only be used with the context and correct opcodes.");
return 0;
}
passInstructions[passIC++] = pass_string;
goto ifDoCharsDots;
case pass_dots:
if (passOpcode == CTO_Correct || passOpcode == CTO_Context)
{
compileError (passNested,
"dot patterns cannot be specified in the if part\
of the correct or context opcodes");
return 0;
}
passInstructions[passIC++] = pass_dots;
ifDoCharsDots:
passInstructions[passIC++] = passHoldString.length;
for (kk = 0; kk < passHoldString.length; kk++)
passInstructions[passIC++] = passHoldString.chars[kk];
break;
case pass_attributes:
if (!passIsLeftParen ())
return 0;
if (!passGetAttributes ())
return 0;
if (!passInsertAttributes ())
return 0;
break;
case pass_emphasis:
if (!passIsLeftParen ())
return 0;
if (!passGetEmphasis ())
return 0;
/*Right parenthis handled by subfunctiion */
break;
case pass_lookback:
passInstructions[passIC++] = pass_lookback;
passCode = passGetScriptToken ();
if (passCode != pass_leftParen)
{
passInstructions[passIC++] = 1;
passLinepos = passPrevLinepos;
break;
}
if (!passIsNumber ())
return 0;
if (!passIsRightParen ())
return 0;
passInstructions[passIC] = passHoldNumber;
break;
case pass_group:
if (!passIsLeftParen ())
return 0;
break;
case pass_mark:
passInstructions[passIC++] = pass_startReplace;
passInstructions[passIC++] = pass_endReplace;
break;
case pass_replace:
passInstructions[passIC++] = pass_startReplace;
if (!passIsLeftParen ())
return 0;
break;
case pass_rightParen:
passInstructions[passIC++] = pass_endReplace;
break;
case pass_groupstart:
case pass_groupend:
if (!passIsLeftParen ())
return 0;
if (!passGetName ())
return 0;
if (!passIsRightParen ())
return 0;
ruleOffset = findRuleName (&passHoldString);
if (ruleOffset)
rule = (TranslationTableRule *) & table->ruleArea[ruleOffset];
if (rule && rule->opcode == CTO_Grouping)
{
passInstructions[passIC++] = passSubOp;
passInstructions[passIC++] = ruleOffset >> 16;
passInstructions[passIC++] = ruleOffset & 0xffff;
break;
}
else
{
compileError (passNested, "%s is not a grouping name",
showString (&passHoldString.chars[0],
passHoldString.length));
return 0;
}
break;
case pass_class:
if (!passIsLeftParen ())
return 0;
if (!passGetName ())
return 0;
if (!passIsRightParen ())
return 0;
if (!(class = findCharacterClass (&passHoldString)))
return 0;
passAttributes = class->attribute;
passInsertAttributes ();
break;
case pass_swap:
ruleOffset = findRuleName (&passHoldString);
if (!passIsLeftParen ())
return 0;
if (!passGetName ())
return 0;
if (!passIsRightParen ())
return 0;
ruleOffset = findRuleName (&passHoldString);
if (ruleOffset)
rule = (TranslationTableRule *) & table->ruleArea[ruleOffset];
if (rule
&& (rule->opcode == CTO_SwapCc || rule->opcode == CTO_SwapCd
|| rule->opcode == CTO_SwapDd))
{
passInstructions[passIC++] = pass_swap;
passInstructions[passIC++] = ruleOffset >> 16;
passInstructions[passIC++] = ruleOffset & 0xffff;
if (!passGetRange ())
return 0;
break;
}
compileError (passNested,
"%s is not a swap name.",
showString (&passHoldString.chars[0],
passHoldString.length));
return 0;
case pass_nameFound:
passHoldNumber = passFindName (&passHoldString);
passCode = passGetScriptToken ();
if (!(passCode == pass_eq || passCode == pass_lt || passCode
== pass_gt || passCode == pass_noteq || passCode ==
pass_lteq || passCode == pass_gteq))
{
compileError (nested,
"invalid comparison operator in if part");
return 0;
}
passInstructions[passIC++] = passCode;
passInstructions[passIC++] = passHoldNumber;
if (!passIsNumber ())
return 0;
passInstructions[passIC++] = passHoldNumber;
break;
case pass_then:
passInstructions[passIC++] = pass_endTest;
more = 0;
break;
default:
compileError (passNested, "invalid choice in if part");
return 0;
}
}
/* then part */
more = 1;
while (more)
{
passCode = passGetScriptToken ();
passSubOp = passCode;
switch (passCode)
{
case pass_string:
if (opcode != CTO_Correct)
{
compileError (passNested,
"Character strings can only be used in the then part with the correct opcode.");
return 0;
}
passInstructions[passIC++] = pass_string;
goto thenDoCharsDots;
case pass_dots:
if (opcode == CTO_Correct)
{
compileError (passNested,
"Dot patterns cannot be used with the correct opcode.");
return 0;
}
passInstructions[passIC++] = pass_dots;
thenDoCharsDots:
passInstructions[passIC++] = passHoldString.length;
for (kk = 0; kk < passHoldString.length; kk++)
passInstructions[passIC++] = passHoldString.chars[kk];
break;
case pass_nameFound:
passHoldNumber = passFindName (&passHoldString);
passCode = passGetScriptToken ();
if (!(passCode == pass_plus || passCode == pass_hyphen
|| passCode == pass_eq))
{
compileError (nested,
"Invalid variable operator in then part");
return 0;
}
passInstructions[passIC++] = passCode;
passInstructions[passIC++] = passHoldNumber;
if (!passIsNumber ())
return 0;
passInstructions[passIC++] = passHoldNumber;
break;
case pass_copy:
passInstructions[passIC++] = pass_copy;
break;
case pass_omit:
passInstructions[passIC++] = pass_omit;
break;
case pass_swap:
ruleOffset = findRuleName (&passHoldString);
if (!passIsLeftParen ())
return 0;
if (!passGetName ())
return 0;
if (!passIsRightParen ())
return 0;
ruleOffset = findRuleName (&passHoldString);
if (ruleOffset)
rule = (TranslationTableRule *) & table->ruleArea[ruleOffset];
if (rule
&& (rule->opcode == CTO_SwapCc || rule->opcode == CTO_SwapCd
|| rule->opcode == CTO_SwapDd))
{
passInstructions[passIC++] = pass_swap;
passInstructions[passIC++] = ruleOffset >> 16;
passInstructions[passIC++] = ruleOffset & 0xffff;
if (!passGetRange ())
return 0;
break;
}
compileError (passNested,
"%s is not a swap name.",
showString (&passHoldString.chars[0],
passHoldString.length));
return 0;
case pass_noMoreTokens:
more = 0;
break;
default:
compileError (passNested, "invalid action in then part");
return 0;
}
}
}
else
{
/* Older machine-language-like "assembler". */
/*Compile test part */
for (k = 0; k < passLine.length && passLine.chars[k] != SEPCHAR; k++);
endTest = k;
passLine.chars[endTest] = pass_endTest;
passLinepos = 0;
while (passLinepos <= endTest)
{
switch ((passSubOp = passLine.chars[passLinepos]))
{
case pass_lookback:
passInstructions[passIC++] = pass_lookback;
passLinepos++;
passGetNumber ();
if (passHoldNumber == 0)
passHoldNumber = 1;
passInstructions[passIC++] = passHoldNumber;
break;
case pass_not:
passInstructions[passIC++] = pass_not;
passLinepos++;
break;
case pass_first:
passInstructions[passIC++] = pass_first;
passLinepos++;
break;
case pass_last:
passInstructions[passIC++] = pass_last;
passLinepos++;
break;
case pass_search:
passInstructions[passIC++] = pass_search;
passLinepos++;
break;
case pass_string:
if (opcode != CTO_Context && opcode != CTO_Correct)
{
compileError (passNested,
"Character strings can only be used with the context and correct opcodes.");
return 0;
}
passLinepos++;
passInstructions[passIC++] = pass_string;
passGetString ();
goto testDoCharsDots;
case pass_dots:
passLinepos++;
passInstructions[passIC++] = pass_dots;
passGetDots ();
testDoCharsDots:
if (passHoldString.length == 0)
return 0;
passInstructions[passIC++] = passHoldString.length;
for (kk = 0; kk < passHoldString.length; kk++)
passInstructions[passIC++] = passHoldString.chars[kk];
break;
case pass_startReplace:
passInstructions[passIC++] = pass_startReplace;
passLinepos++;
break;
case pass_endReplace:
passInstructions[passIC++] = pass_endReplace;
passLinepos++;
break;
case pass_variable:
passLinepos++;
passGetNumber ();
switch (passLine.chars[passLinepos])
{
case pass_eq:
passInstructions[passIC++] = pass_eq;
goto doComp;
case pass_lt:
if (passLine.chars[passLinepos + 1] == pass_eq)
{
passLinepos++;
passInstructions[passIC++] = pass_lteq;
}
else
passInstructions[passIC++] = pass_lt;
goto doComp;
case pass_gt:
if (passLine.chars[passLinepos + 1] == pass_eq)
{
passLinepos++;
passInstructions[passIC++] = pass_gteq;
}
else
passInstructions[passIC++] = pass_gt;
doComp:
passInstructions[passIC++] = passHoldNumber;
passLinepos++;
passGetNumber ();
passInstructions[passIC++] = passHoldNumber;
break;
default:
compileError (passNested, "incorrect comparison operator");
return 0;
}
break;
case pass_attributes:
passLinepos++;
passGetAttributes ();
insertAttributes:
passInstructions[passIC++] = pass_attributes;
passInstructions[passIC++] = passAttributes >> 16;
passInstructions[passIC++] = passAttributes & 0xffff;
getRange:
if (passLine.chars[passLinepos] == pass_until)
{
passLinepos++;
passInstructions[passIC++] = 1;
passInstructions[passIC++] = 0xffff;
break;
}
passGetNumber ();
if (passHoldNumber == 0)
{
passHoldNumber = passInstructions[passIC++] = 1;
passInstructions[passIC++] = 1; /*This is not an error */
break;
}
passInstructions[passIC++] = passHoldNumber;
if (passLine.chars[passLinepos] != pass_hyphen)
{
passInstructions[passIC++] = passHoldNumber;
break;
}
passLinepos++;
passGetNumber ();
if (passHoldNumber == 0)
{
compileError (passNested, "invalid range");
return 0;
}
passInstructions[passIC++] = passHoldNumber;
break;
case pass_groupstart:
case pass_groupend:
passLinepos++;
passGetName ();
ruleOffset = findRuleName (&passHoldString);
if (ruleOffset)
rule = (TranslationTableRule *) & table->ruleArea[ruleOffset];
if (rule && rule->opcode == CTO_Grouping)
{
passInstructions[passIC++] = passSubOp;
passInstructions[passIC++] = ruleOffset >> 16;
passInstructions[passIC++] = ruleOffset & 0xffff;
break;
}
else
{
compileError (passNested, "%s is not a grouping name",
showString (&passHoldString.chars[0],
passHoldString.length));
return 0;
}
break;
case pass_swap:
passGetName ();
if ((class = findCharacterClass (&passHoldString)))
{
passAttributes = class->attribute;
goto insertAttributes;
}
ruleOffset = findRuleName (&passHoldString);
if (ruleOffset)
rule = (TranslationTableRule *) & table->ruleArea[ruleOffset];
if (rule
&& (rule->opcode == CTO_SwapCc || rule->opcode == CTO_SwapCd
|| rule->opcode == CTO_SwapDd))
{
passInstructions[passIC++] = pass_swap;
passInstructions[passIC++] = ruleOffset >> 16;
passInstructions[passIC++] = ruleOffset & 0xffff;
goto getRange;
}
compileError (passNested,
"%s is neither a class name nor a swap name.",
showString (&passHoldString.chars[0],
passHoldString.length));
return 0;
case pass_endTest:
passInstructions[passIC++] = pass_endTest;
passLinepos++;
break;
default:
compileError (passNested,
"incorrect operator '%c ' in test part",
passLine.chars[passLinepos]);
return 0;
}
} /*Compile action part */
/* Compile action part */
while (passLinepos < passLine.length &&
passLine.chars[passLinepos] <= 32)
passLinepos++;
while (passLinepos < passLine.length &&
passLine.chars[passLinepos] > 32)
{
switch ((passSubOp = passLine.chars[passLinepos]))
{
case pass_string:
if (opcode != CTO_Correct)
{
compileError (passNested,
"Character strings can only be used with the ccorrect opcode.");
return 0;
}
passLinepos++;
passInstructions[passIC++] = pass_string;
passGetString ();
goto actionDoCharsDots;
case pass_dots:
if (opcode == CTO_Correct)
{
compileError (passNested,
"Dot patterns cannot be used with the correct opcode.");
return 0;
}
passLinepos++;
passGetDots ();
passInstructions[passIC++] = pass_dots;
actionDoCharsDots:
if (passHoldString.length == 0)
return 0;
passInstructions[passIC++] = passHoldString.length;
for (kk = 0; kk < passHoldString.length; kk++)
passInstructions[passIC++] = passHoldString.chars[kk];
break;
case pass_variable:
passLinepos++;
passGetNumber ();
switch (passLine.chars[passLinepos])
{
case pass_eq:
passInstructions[passIC++] = pass_eq;
passInstructions[passIC++] = passHoldNumber;
passLinepos++;
passGetNumber ();
passInstructions[passIC++] = passHoldNumber;
break;
case pass_plus:
case pass_hyphen:
passInstructions[passIC++] = passLine.chars[passLinepos];
passInstructions[passIC++] = passHoldNumber;
break;
default:
compileError (passNested,
"incorrect variable operator in action part");
return 0;
}
break;
case pass_copy:
passInstructions[passIC++] = pass_copy;
passLinepos++;
break;
case pass_omit:
passInstructions[passIC++] = pass_omit;
passLinepos++;
break;
case pass_groupreplace:
case pass_groupstart:
case pass_groupend:
passLinepos++;
passGetName ();
ruleOffset = findRuleName (&passHoldString);
if (ruleOffset)
rule = (TranslationTableRule *) & table->ruleArea[ruleOffset];
if (rule && rule->opcode == CTO_Grouping)
{
passInstructions[passIC++] = passSubOp;
passInstructions[passIC++] = ruleOffset >> 16;
passInstructions[passIC++] = ruleOffset & 0xffff;
break;
}
compileError (passNested, "%s is not a grouping name",
showString (&passHoldString.chars[0],
passHoldString.length));
return 0;
case pass_swap:
passLinepos++;
passGetName ();
ruleOffset = findRuleName (&passHoldString);
if (ruleOffset)
rule = (TranslationTableRule *) & table->ruleArea[ruleOffset];
if (rule
&& (rule->opcode == CTO_SwapCc || rule->opcode == CTO_SwapCd
|| rule->opcode == CTO_SwapDd))
{
passInstructions[passIC++] = pass_swap;
passInstructions[passIC++] = ruleOffset >> 16;
passInstructions[passIC++] = ruleOffset & 0xffff;
break;
}
compileError (passNested, "%s is not a swap name.",
showString (&passHoldString.chars[0],
passHoldString.length));
return 0;
break;
default:
compileError (passNested, "incorrect operator in action part");
return 0;
}
}
}
/*Analyze and add rule */
passRuleDots.length = passIC;
passIC = 0;
while (passIC < passRuleDots.length)
{
int start = 0;
switch (passInstructions[passIC])
{
case pass_string:
case pass_dots:
case pass_attributes:
case pass_swap:
start = 1;
break;
case pass_groupstart:
case pass_groupend:
start = 1;
break;
case pass_eq:
case pass_lt:
case pass_gt:
case pass_lteq:
case pass_gteq:
passIC += 3;
break;
case pass_lookback:
passIC += 2;
break;
case pass_not:
case pass_startReplace:
case pass_endReplace:
case pass_first:
passIC++;
break;
default:
compileError (passNested,
"Test/if part must contain characters, dots, attributes or class \
swap.");
return 0;
}
if (start)
break;
}
switch (passInstructions[passIC])
{
case pass_string:
case pass_dots:
for (k = 0; k < passInstructions[passIC + 1]; k++)
passRuleChars.chars[k] = passInstructions[passIC + 2 + k];
passRuleChars.length = k;
after = before = 0;
break;
case pass_attributes:
case pass_groupstart:
case pass_groupend:
case pass_swap:
after = passRuleDots.length;
before = 0;
break;
default:
break;
}
if (!addRule (passNested, opcode, &passRuleChars, &passRuleDots,
after, before))
return 0;
return 1;
}
|
Safe
|
[] |
liblouis
|
dc97ef791a4fae9da11592c79f9f79e010596e0c
|
1.6495708142260518e+37
| 727 |
Merge branch 'table_resolver'
| 0 |
ZEND_METHOD(exception, __toString)
{
zval message, file, line, *trace, *exception;
char *str, *prev_str;
int len = 0;
zend_fcall_info fci;
zval fname;
DEFAULT_0_PARAMS;
str = estrndup("", 0);
exception = getThis();
ZVAL_STRINGL(&fname, "gettraceasstring", sizeof("gettraceasstring")-1, 1);
while (exception && Z_TYPE_P(exception) == IS_OBJECT && instanceof_function(Z_OBJCE_P(exception), default_exception_ce TSRMLS_CC)) {
prev_str = str;
_default_exception_get_entry(exception, "message", sizeof("message")-1, &message TSRMLS_CC);
_default_exception_get_entry(exception, "file", sizeof("file")-1, &file TSRMLS_CC);
_default_exception_get_entry(exception, "line", sizeof("line")-1, &line TSRMLS_CC);
convert_to_string(&message);
convert_to_string(&file);
convert_to_long(&line);
trace = NULL;
fci.size = sizeof(fci);
fci.function_table = &Z_OBJCE_P(exception)->function_table;
fci.function_name = &fname;
fci.symbol_table = NULL;
fci.object_ptr = exception;
fci.retval_ptr_ptr = &trace;
fci.param_count = 0;
fci.params = NULL;
fci.no_separation = 1;
zend_call_function(&fci, NULL TSRMLS_CC);
if (trace && Z_TYPE_P(trace) != IS_STRING) {
zval_ptr_dtor(&trace);
trace = NULL;
}
if (Z_STRLEN(message) > 0) {
len = zend_spprintf(&str, 0, "exception '%s' with message '%s' in %s:%ld\nStack trace:\n%s%s%s",
Z_OBJCE_P(exception)->name, Z_STRVAL(message), Z_STRVAL(file), Z_LVAL(line),
(trace && Z_STRLEN_P(trace)) ? Z_STRVAL_P(trace) : "#0 {main}\n",
len ? "\n\nNext " : "", prev_str);
} else {
len = zend_spprintf(&str, 0, "exception '%s' in %s:%ld\nStack trace:\n%s%s%s",
Z_OBJCE_P(exception)->name, Z_STRVAL(file), Z_LVAL(line),
(trace && Z_STRLEN_P(trace)) ? Z_STRVAL_P(trace) : "#0 {main}\n",
len ? "\n\nNext " : "", prev_str);
}
efree(prev_str);
zval_dtor(&message);
zval_dtor(&file);
zval_dtor(&line);
exception = zend_read_property(default_exception_ce, exception, "previous", sizeof("previous")-1, 0 TSRMLS_CC);
if (trace) {
zval_ptr_dtor(&trace);
}
}
zval_dtor(&fname);
/* We store the result in the private property string so we can access
* the result in uncaught exception handlers without memleaks. */
zend_update_property_string(default_exception_ce, getThis(), "string", sizeof("string")-1, str TSRMLS_CC);
RETURN_STRINGL(str, len, 0);
}
|
Safe
|
[
"CWE-20"
] |
php-src
|
4d2278143a08b7522de9471d0f014d7357c28fea
|
3.178586945246761e+38
| 74 |
Fix #69793 - limit what we accept when unserializing exception
| 0 |
PHP_FUNCTION(xml_parser_free)
{
zval *pind;
xml_parser *parser;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &pind) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(parser,xml_parser *, &pind, -1, "XML Parser", le_xml_parser);
if (parser->isparsing == 1) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Parser cannot be freed while it is parsing.");
RETURN_FALSE;
}
if (zend_list_delete(parser->index) == FAILURE) {
RETURN_FALSE;
}
RETVAL_TRUE;
}
|
Safe
|
[
"CWE-787"
] |
php-src
|
7d163e8a0880ae8af2dd869071393e5dc07ef271
|
1.414244147586592e+38
| 22 |
truncate results at depth of 255 to prevent corruption
| 0 |
static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int count,
uint32_t flags, void *data)
{
struct eventfd_ctx *trigger;
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int fd = *(int *)data;
trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(trigger)) {
gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger);
}
vgpu->vdev.msi_trigger = trigger;
} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
return 0;
}
|
Safe
|
[
"CWE-20"
] |
linux
|
51b00d8509dc69c98740da2ad07308b630d3eb7d
|
2.4497854034150933e+37
| 20 |
drm/i915/gvt: Fix mmap range check
This is to fix missed mmap range check on vGPU bar2 region
and only allow to map vGPU allocated GMADDR range, which means
user space should support sparse mmap to get proper offset for
mmap vGPU aperture. And this takes care of actual pgoff in mmap
request as original code always does from beginning of vGPU
aperture.
Fixes: 659643f7d814 ("drm/i915/gvt/kvmgt: add vfio/mdev support to KVMGT")
Cc: "Monroy, Rodrigo Axel" <rodrigo.axel.monroy@intel.com>
Cc: "Orrala Contreras, Alfredo" <alfredo.orrala.contreras@intel.com>
Cc: stable@vger.kernel.org # v4.10+
Reviewed-by: Hang Yuan <hang.yuan@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
| 0 |
TEST(GatherNdOpTest, Int8Int32) {
GatherNdOpModel m({TensorType_INT8, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<int8_t>({1, -1, 1, -2, 2, 2, //
3, 3, -3, -4, -4, 4, //
5, -5, 5, 6, -6, 6});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
|
Safe
|
[
"CWE-125"
] |
tensorflow
|
595a65a3e224a0362d7e68c2213acfc2b499a196
|
1.2777962222781011e+38
| 10 |
Return a TFLite error if gather_nd will result in reading invalid memory
PiperOrigin-RevId: 463054033
| 0 |
static inline int security_task_getpgid(struct task_struct *p)
{
return 0;
}
|
Safe
|
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
|
2.862469439722278e+38
| 4 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: James Morris <jmorris@namei.org>
| 0 |
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
unsigned long old_cr0 = kvm_read_cr0(vcpu);
unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
X86_CR0_CD | X86_CR0_NW;
cr0 |= X86_CR0_ET;
#ifdef CONFIG_X86_64
if (cr0 & 0xffffffff00000000UL)
return 1;
#endif
cr0 &= ~CR0_RESERVED_BITS;
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
return 1;
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
return 1;
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
#ifdef CONFIG_X86_64
if ((vcpu->arch.efer & EFER_LME)) {
int cs_db, cs_l;
if (!is_pae(vcpu))
return 1;
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
if (cs_l)
return 1;
} else
#endif
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
vcpu->arch.cr3))
return 1;
}
kvm_x86_ops->set_cr0(vcpu, cr0);
if ((cr0 ^ old_cr0) & update_bits)
kvm_mmu_reset_context(vcpu);
return 0;
}
|
Safe
|
[
"CWE-200"
] |
kvm
|
831d9d02f9522e739825a51a11e3bc5aa531a905
|
1.6660555836107285e+38
| 44 |
KVM: x86: fix information leak to userland
Structures kvm_vcpu_events, kvm_debugregs, kvm_pit_state2 and
kvm_clock_data are copied to userland with some padding and reserved
fields unitialized. It leads to leaking of contents of kernel stack
memory. We have to initialize them to zero.
In patch v1 Jan Kiszka suggested to fill reserved fields with zeros
instead of memset'ting the whole struct. It makes sense as these
fields are explicitly marked as padding. No more fields need zeroing.
KVM-Stable-Tag.
Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
| 0 |
TinyIntVal() : val(0) {}
|
Safe
|
[
"CWE-200"
] |
incubator-doris
|
246ac4e37aa4da6836b7850cb990f02d1c3725a3
|
1.9266333738486913e+38
| 1 |
[fix] fix a bug of encryption function with iv may return wrong result (#8277)
| 0 |
make_lastline_link(Buffer *buf, char *title, char *url)
{
Str s = NULL, u;
#ifdef USE_M17N
Lineprop *pr;
#endif
ParsedURL pu;
char *p;
int l = COLS - 1, i;
if (title && *title) {
s = Strnew_m_charp("[", title, "]", NULL);
for (p = s->ptr; *p; p++) {
if (IS_CNTRL(*p) || IS_SPACE(*p))
*p = ' ';
}
if (url)
Strcat_charp(s, " ");
l -= get_Str_strwidth(s);
if (l <= 0)
return s;
}
if (!url)
return s;
parseURL2(url, &pu, baseURL(buf));
u = parsedURL2Str(&pu);
if (DecodeURL)
u = Strnew_charp(url_decode2(u->ptr, buf));
#ifdef USE_M17N
u = checkType(u, &pr, NULL);
#endif
if (l <= 4 || l >= get_Str_strwidth(u)) {
if (!s)
return u;
Strcat(s, u);
return s;
}
if (!s)
s = Strnew_size(COLS);
i = (l - 2) / 2;
#ifdef USE_M17N
while (i && pr[i] & PC_WCHAR2)
i--;
#endif
Strcat_charp_n(s, u->ptr, i);
Strcat_charp(s, "..");
i = get_Str_strwidth(u) - (COLS - 1 - get_Str_strwidth(s));
#ifdef USE_M17N
while (i < u->length && pr[i] & PC_WCHAR2)
i++;
#endif
Strcat_charp(s, &u->ptr[i]);
return s;
}
|
Safe
|
[
"CWE-119"
] |
w3m
|
0c3f5d0e0d9269ad47b8f4b061d7818993913189
|
1.9137078194956012e+38
| 54 |
Prevent array index out of bounds for symbol
Bug-Debian: https://github.com/tats/w3m/issues/38
| 0 |
void RGWPutMetadataAccount_ObjStore_SWIFT::send_response()
{
const auto meta_ret = handle_metadata_errors(s, op_ret);
if (meta_ret != op_ret) {
op_ret = meta_ret;
} else {
if (!op_ret) {
op_ret = STATUS_NO_CONTENT;
}
set_req_state_err(s, op_ret);
}
dump_errno(s);
end_header(s, this);
rgw_flush_formatter_and_reset(s, s->formatter);
}
|
Safe
|
[
"CWE-617"
] |
ceph
|
f44a8ae8aa27ecef69528db9aec220f12492810e
|
1.962576750231955e+38
| 16 |
rgw: RGWSwiftWebsiteHandler::is_web_dir checks empty subdir_name
checking for empty name avoids later assertion in RGWObjectCtx::set_atomic
Fixes: CVE-2021-3531
Reviewed-by: Casey Bodley <cbodley@redhat.com>
Signed-off-by: Casey Bodley <cbodley@redhat.com>
(cherry picked from commit 7196a469b4470f3c8628489df9a41ec8b00a5610)
| 0 |
xmlParseXMLDecl(xmlParserCtxtPtr ctxt) {
xmlChar *version;
/*
* This value for standalone indicates that the document has an
* XML declaration but it does not have a standalone attribute.
* It will be overwritten later if a standalone attribute is found.
*/
ctxt->input->standalone = -2;
/*
* We know that '<?xml' is here.
*/
SKIP(5);
if (!IS_BLANK_CH(RAW)) {
xmlFatalErrMsg(ctxt, XML_ERR_SPACE_REQUIRED,
"Blank needed after '<?xml'\n");
}
SKIP_BLANKS;
/*
* We must have the VersionInfo here.
*/
version = xmlParseVersionInfo(ctxt);
if (version == NULL) {
xmlFatalErr(ctxt, XML_ERR_VERSION_MISSING, NULL);
} else {
if (!xmlStrEqual(version, (const xmlChar *) XML_DEFAULT_VERSION)) {
/*
* Changed here for XML-1.0 5th edition
*/
if (ctxt->options & XML_PARSE_OLD10) {
xmlFatalErrMsgStr(ctxt, XML_ERR_UNKNOWN_VERSION,
"Unsupported version '%s'\n",
version);
} else {
if ((version[0] == '1') && ((version[1] == '.'))) {
xmlWarningMsg(ctxt, XML_WAR_UNKNOWN_VERSION,
"Unsupported version '%s'\n",
version, NULL);
} else {
xmlFatalErrMsgStr(ctxt, XML_ERR_UNKNOWN_VERSION,
"Unsupported version '%s'\n",
version);
}
}
}
if (ctxt->version != NULL)
xmlFree((void *) ctxt->version);
ctxt->version = version;
}
/*
* We may have the encoding declaration
*/
if (!IS_BLANK_CH(RAW)) {
if ((RAW == '?') && (NXT(1) == '>')) {
SKIP(2);
return;
}
xmlFatalErrMsg(ctxt, XML_ERR_SPACE_REQUIRED, "Blank needed here\n");
}
xmlParseEncodingDecl(ctxt);
if (ctxt->errNo == XML_ERR_UNSUPPORTED_ENCODING) {
/*
* The XML REC instructs us to stop parsing right here
*/
return;
}
/*
* We may have the standalone status.
*/
if ((ctxt->input->encoding != NULL) && (!IS_BLANK_CH(RAW))) {
if ((RAW == '?') && (NXT(1) == '>')) {
SKIP(2);
return;
}
xmlFatalErrMsg(ctxt, XML_ERR_SPACE_REQUIRED, "Blank needed here\n");
}
/*
* We can grow the input buffer freely at that point
*/
GROW;
SKIP_BLANKS;
ctxt->input->standalone = xmlParseSDDecl(ctxt);
SKIP_BLANKS;
if ((RAW == '?') && (NXT(1) == '>')) {
SKIP(2);
} else if (RAW == '>') {
/* Deprecated old WD ... */
xmlFatalErr(ctxt, XML_ERR_XMLDECL_NOT_FINISHED, NULL);
NEXT;
} else {
xmlFatalErr(ctxt, XML_ERR_XMLDECL_NOT_FINISHED, NULL);
MOVETO_ENDTAG(CUR_PTR);
NEXT;
}
}
|
Vulnerable
|
[
"CWE-119"
] |
libxml2
|
afd27c21f6b36e22682b7da20d726bce2dcb2f43
|
2.7085476062770717e+38
| 103 |
Avoid processing entities after encoding conversion failures
For https://bugzilla.gnome.org/show_bug.cgi?id=756527
and was also raised by Chromium team in the past
When we hit a convwersion failure when switching encoding
it is bestter to stop parsing there, this was treated as a
fatal error but the parser was continuing to process to extract
more errors, unfortunately that makes little sense as the data
is obviously corrupt and can potentially lead to unexpected behaviour.
| 1 |
static int of_path_device_type_exists(const char *path,
enum overlay_type ovtype)
{
switch (ovtype) {
case PDEV_OVERLAY:
return of_path_platform_device_exists(path);
case I2C_OVERLAY:
return of_path_i2c_client_exists(path);
}
return 0;
}
|
Safe
|
[
"CWE-401"
] |
linux
|
e13de8fe0d6a51341671bbe384826d527afe8d44
|
8.041646512181707e+37
| 11 |
of: unittest: fix memory leak in unittest_data_add
In unittest_data_add, a copy buffer is created via kmemdup. This buffer
is leaked if of_fdt_unflatten_tree fails. The release for the
unittest_data buffer is added.
Fixes: b951f9dc7f25 ("Enabling OF selftest to run without machine's devicetree")
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Reviewed-by: Frank Rowand <frowand.list@gmail.com>
Signed-off-by: Rob Herring <robh@kernel.org>
| 0 |
static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
struct sk_buff *skb, u8 event)
{
int err = 0;
BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
control, skb, event, chan->rx_state);
if (__valid_reqseq(chan, control->reqseq)) {
switch (chan->rx_state) {
case L2CAP_RX_STATE_RECV:
err = l2cap_rx_state_recv(chan, control, skb, event);
break;
case L2CAP_RX_STATE_SREJ_SENT:
err = l2cap_rx_state_srej_sent(chan, control, skb,
event);
break;
case L2CAP_RX_STATE_WAIT_P:
err = l2cap_rx_state_wait_p(chan, control, skb, event);
break;
case L2CAP_RX_STATE_WAIT_F:
err = l2cap_rx_state_wait_f(chan, control, skb, event);
break;
default:
/* shut it down */
break;
}
} else {
BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
control->reqseq, chan->next_tx_seq,
chan->expected_ack_seq);
l2cap_send_disconn_req(chan, ECONNRESET);
}
return err;
}
|
Safe
|
[
"CWE-787"
] |
linux
|
e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3
|
1.091175037589205e+38
| 36 |
Bluetooth: Properly check L2CAP config option output buffer length
Validate the output buffer length for L2CAP config requests and responses
to avoid overflowing the stack buffer used for building the option blocks.
Cc: stable@vger.kernel.org
Signed-off-by: Ben Seri <ben@armis.com>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static void kvm_user_return_msr_cpu_online(void)
{
unsigned int cpu = smp_processor_id();
struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
u64 value;
int i;
for (i = 0; i < kvm_nr_uret_msrs; ++i) {
rdmsrl_safe(kvm_uret_msrs_list[i], &value);
msrs->values[i].host = value;
msrs->values[i].curr = value;
}
}
|
Safe
|
[
"CWE-476"
] |
linux
|
55749769fe608fa3f4a075e42e89d237c8e37637
|
1.6007463352811246e+38
| 13 |
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <butterflyhuangxx@gmail.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Message-Id: <20211210163625.2886-6-dwmw2@infradead.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
xmlFreeNsList(xmlNsPtr cur) {
xmlNsPtr next;
if (cur == NULL) {
#ifdef DEBUG_TREE
xmlGenericError(xmlGenericErrorContext,
"xmlFreeNsList : ns == NULL\n");
#endif
return;
}
while (cur != NULL) {
next = cur->next;
xmlFreeNs(cur);
cur = next;
}
}
|
Safe
|
[
"CWE-20"
] |
libxml2
|
bdd66182ef53fe1f7209ab6535fda56366bd7ac9
|
2.1836617178364096e+37
| 15 |
Avoid building recursive entities
For https://bugzilla.gnome.org/show_bug.cgi?id=762100
When we detect a recusive entity we should really not
build the associated data, moreover if someone bypass
libxml2 fatal errors and still tries to serialize a broken
entity make sure we don't risk to get ito a recursion
* parser.c: xmlParserEntityCheck() don't build if entity loop
were found and remove the associated text content
* tree.c: xmlStringGetNodeList() avoid a potential recursion
| 0 |
static const char *fmt_date(time_t secs, const char *format, int local_time)
{
static char buf[64];
struct tm *time;
if (!secs)
return "";
if (local_time)
time = localtime(&secs);
else
time = gmtime(&secs);
strftime(buf, sizeof(buf)-1, format, time);
return buf;
}
|
Safe
|
[] |
cgit
|
513b3863d999f91b47d7e9f26710390db55f9463
|
3.071092293364268e+37
| 14 |
ui-shared: prevent malicious filename from injecting headers
| 0 |
static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
unsigned int size_left, enum compat_mwt type,
struct ebt_entries_buf_state *state, const void *base)
{
int growth = 0;
char *buf;
if (size_left == 0)
return 0;
buf = (char *) match32;
while (size_left >= sizeof(*match32)) {
struct ebt_entry_match *match_kern;
int ret;
match_kern = (struct ebt_entry_match *) state->buf_kern_start;
if (match_kern) {
char *tmp;
tmp = state->buf_kern_start + state->buf_kern_offset;
match_kern = (struct ebt_entry_match *) tmp;
}
ret = ebt_buf_add(state, buf, sizeof(*match32));
if (ret < 0)
return ret;
size_left -= sizeof(*match32);
/* add padding before match->data (if any) */
ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
if (ret < 0)
return ret;
if (match32->match_size > size_left)
return -EINVAL;
size_left -= match32->match_size;
ret = compat_mtw_from_user(match32, type, state, base);
if (ret < 0)
return ret;
if (WARN_ON(ret < match32->match_size))
return -EINVAL;
growth += ret - match32->match_size;
growth += ebt_compat_entry_padsize();
buf += sizeof(*match32);
buf += match32->match_size;
if (match_kern)
match_kern->match_size = ret;
if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
return -EINVAL;
match32 = (struct compat_ebt_entry_mwt *) buf;
}
return growth;
}
|
Safe
|
[
"CWE-787"
] |
linux
|
b71812168571fa55e44cdd0254471331b9c4c4c6
|
1.4132768321681073e+38
| 60 |
netfilter: ebtables: CONFIG_COMPAT: don't trust userland offsets
We need to make sure the offsets are not out of range of the
total size.
Also check that they are in ascending order.
The WARN_ON triggered by syzkaller (it sets panic_on_warn) is
changed to also bail out, no point in continuing parsing.
Briefly tested with simple ruleset of
-A INPUT --limit 1/s' --log
plus jump to custom chains using 32bit ebtables binary.
Reported-by: <syzbot+845a53d13171abf8bf29@syzkaller.appspotmail.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
| 0 |
static int match_file(const void *p, struct file *file, unsigned fd)
{
return file_has_perm(p, file, file_to_av(file)) ? fd + 1 : 0;
}
|
Safe
|
[
"CWE-264"
] |
linux
|
7b0d0b40cd78cadb525df760ee4cac151533c2b5
|
6.347361794607345e+37
| 4 |
selinux: Permit bounded transitions under NO_NEW_PRIVS or NOSUID.
If the callee SID is bounded by the caller SID, then allowing
the transition to occur poses no risk of privilege escalation and we can
therefore safely allow the transition to occur. Add this exemption
for both the case where a transition was explicitly requested by the
application and the case where an automatic transition is defined in
policy.
Signed-off-by: Stephen Smalley <sds@tycho.nsa.gov>
Reviewed-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Paul Moore <pmoore@redhat.com>
| 0 |
bool TABLE::vcol_fix_exprs(THD *thd)
{
if (pos_in_table_list->placeholder() || !s->vcols_need_refixing ||
pos_in_table_list->lock_type < TL_WRITE_ALLOW_WRITE)
return false;
DBUG_ASSERT(pos_in_table_list != thd->lex->first_not_own_table());
bool result= true;
Security_context *save_security_ctx= thd->security_ctx;
Query_arena *stmt_backup= thd->stmt_arena;
if (thd->stmt_arena->is_conventional())
thd->stmt_arena= expr_arena;
if (pos_in_table_list->security_ctx)
thd->security_ctx= pos_in_table_list->security_ctx;
for (Field **vf= vfield; vf && *vf; vf++)
if ((*vf)->vcol_info->fix_session_expr(thd))
goto end;
for (Field **df= default_field; df && *df; df++)
if ((*df)->default_value &&
(*df)->default_value->fix_session_expr(thd))
goto end;
for (Virtual_column_info **cc= check_constraints; cc && *cc; cc++)
if ((*cc)->fix_session_expr(thd))
goto end;
result= false;
end:
thd->security_ctx= save_security_ctx;
thd->stmt_arena= stmt_backup;
return result;
}
|
Vulnerable
|
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
|
2.6549573371115897e+38
| 39 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <serg@mariadb.org>
| 1 |
polkit_system_bus_name_hash (PolkitSubject *subject)
{
PolkitSystemBusName *system_bus_name = POLKIT_SYSTEM_BUS_NAME (subject);
return g_str_hash (system_bus_name->name);
}
|
Safe
|
[
"CWE-754"
] |
polkit
|
a04d13affe0fa53ff618e07aa8f57f4c0e3b9b81
|
1.7304927084829033e+38
| 6 |
GHSL-2021-074: authentication bypass vulnerability in polkit
initial values returned if error caught
| 0 |
png_read_filter_row_sub(png_row_infop row_info, png_bytep row,
png_const_bytep prev_row)
{
png_size_t i;
png_size_t istop = row_info->rowbytes;
unsigned int bpp = (row_info->pixel_depth + 7) >> 3;
png_bytep rp = row + bpp;
PNG_UNUSED(prev_row)
for (i = bpp; i < istop; i++)
{
*rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
rp++;
}
}
|
Safe
|
[
"CWE-369"
] |
libpng
|
2dca15686fadb1b8951cb29b02bad4cae73448da
|
2.3796391258865614e+38
| 16 |
[libpng16] Moved chunk-length check into a png_check_chunk_length() private
function (Suggested by Max Stepin).
| 0 |
static void set_format_emu_quirk(struct snd_usb_substream *subs,
struct audioformat *fmt)
{
unsigned char emu_samplerate_id = 0;
/* When capture is active
* sample rate shouldn't be changed
* by playback substream
*/
if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
if (subs->stream->substream[SNDRV_PCM_STREAM_CAPTURE].interface != -1)
return;
}
switch (fmt->rate_min) {
case 48000:
emu_samplerate_id = EMU_QUIRK_SR_48000HZ;
break;
case 88200:
emu_samplerate_id = EMU_QUIRK_SR_88200HZ;
break;
case 96000:
emu_samplerate_id = EMU_QUIRK_SR_96000HZ;
break;
case 176400:
emu_samplerate_id = EMU_QUIRK_SR_176400HZ;
break;
case 192000:
emu_samplerate_id = EMU_QUIRK_SR_192000HZ;
break;
default:
emu_samplerate_id = EMU_QUIRK_SR_44100HZ;
break;
}
snd_emuusb_set_samplerate(subs->stream->chip, emu_samplerate_id);
subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
}
|
Safe
|
[] |
sound
|
0f886ca12765d20124bd06291c82951fd49a33be
|
1.6404241601380005e+38
| 37 |
ALSA: usb-audio: Fix NULL dereference in create_fixed_stream_quirk()
create_fixed_stream_quirk() may cause a NULL-pointer dereference by
accessing the non-existing endpoint when a USB device with a malformed
USB descriptor is used.
This patch avoids it simply by adding a sanity check of bNumEndpoints
before the accesses.
Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=971125
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
| 0 |
sysmalloc (INTERNAL_SIZE_T nb, mstate av)
{
mchunkptr old_top; /* incoming value of av->top */
INTERNAL_SIZE_T old_size; /* its size */
char *old_end; /* its end address */
long size; /* arg to first MORECORE or mmap call */
char *brk; /* return value from MORECORE */
long correction; /* arg to 2nd MORECORE call */
char *snd_brk; /* 2nd return val */
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
char *aligned_brk; /* aligned offset into brk */
mchunkptr p; /* the allocated/returned chunk */
mchunkptr remainder; /* remainder from allocation */
unsigned long remainder_size; /* its size */
size_t pagesize = GLRO (dl_pagesize);
bool tried_mmap = false;
/*
If have mmap, and the request size meets the mmap threshold, and
the system supports mmap, and there are few enough currently
allocated mmapped regions, try to directly map this request
rather than expanding top.
*/
if (av == NULL
|| ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
&& (mp_.n_mmaps < mp_.n_mmaps_max)))
{
char *mm; /* return value from mmap call*/
try_mmap:
/*
Round up size to nearest page. For mmapped chunks, the overhead
is one SIZE_SZ unit larger than for normal chunks, because there
is no following chunk whose prev_size field could be used.
See the front_misalign handling below, for glibc there is no
need for further alignments unless we have have high alignment.
*/
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
size = ALIGN_UP (nb + SIZE_SZ, pagesize);
else
size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
tried_mmap = true;
/* Don't try if size wraps around 0 */
if ((unsigned long) (size) > (unsigned long) (nb))
{
mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
if (mm != MAP_FAILED)
{
/*
The offset to the start of the mmapped region is stored
in the prev_size field of the chunk. This allows us to adjust
returned start address to meet alignment requirements here
and in memalign(), and still be able to compute proper
address argument for later munmap in free() and realloc().
*/
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
{
/* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
front_misalign = 0;
}
else
front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
if (front_misalign > 0)
{
correction = MALLOC_ALIGNMENT - front_misalign;
p = (mchunkptr) (mm + correction);
set_prev_size (p, correction);
set_head (p, (size - correction) | IS_MMAPPED);
}
else
{
p = (mchunkptr) mm;
set_prev_size (p, 0);
set_head (p, size | IS_MMAPPED);
}
/* update statistics */
int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
atomic_max (&mp_.max_n_mmaps, new);
unsigned long sum;
sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
atomic_max (&mp_.max_mmapped_mem, sum);
check_chunk (av, p);
return chunk2mem (p);
}
}
}
/* There are no usable arenas and mmap also failed. */
if (av == NULL)
return 0;
/* Record incoming configuration of top */
old_top = av->top;
old_size = chunksize (old_top);
old_end = (char *) (chunk_at_offset (old_top, old_size));
brk = snd_brk = (char *) (MORECORE_FAILURE);
/*
If not the first time through, we require old_size to be
at least MINSIZE and to have prev_inuse set.
*/
assert ((old_top == initial_top (av) && old_size == 0) ||
((unsigned long) (old_size) >= MINSIZE &&
prev_inuse (old_top) &&
((unsigned long) old_end & (pagesize - 1)) == 0));
/* Precondition: not enough current space to satisfy nb request */
assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
if (av != &main_arena)
{
heap_info *old_heap, *heap;
size_t old_heap_size;
/* First try to extend the current heap. */
old_heap = heap_for_ptr (old_top);
old_heap_size = old_heap->size;
if ((long) (MINSIZE + nb - old_size) > 0
&& grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
{
av->system_mem += old_heap->size - old_heap_size;
set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
| PREV_INUSE);
}
else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
{
/* Use a newly allocated heap. */
heap->ar_ptr = av;
heap->prev = old_heap;
av->system_mem += heap->size;
/* Set up the new top. */
top (av) = chunk_at_offset (heap, sizeof (*heap));
set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
/* Setup fencepost and free the old top chunk with a multiple of
MALLOC_ALIGNMENT in size. */
/* The fencepost takes at least MINSIZE bytes, because it might
become the top chunk again later. Note that a footer is set
up, too, although the chunk is marked in use. */
old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
if (old_size >= MINSIZE)
{
set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
_int_free (av, old_top, 1);
}
else
{
set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
set_foot (old_top, (old_size + 2 * SIZE_SZ));
}
}
else if (!tried_mmap)
/* We can at least try to use to mmap memory. */
goto try_mmap;
}
else /* av == main_arena */
{ /* Request enough space for nb + pad + overhead */
size = nb + mp_.top_pad + MINSIZE;
/*
If contiguous, we can subtract out existing space that we hope to
combine with new space. We add it back later only if
we don't actually get contiguous space.
*/
if (contiguous (av))
size -= old_size;
/*
Round to a multiple of page size.
If MORECORE is not contiguous, this ensures that we only call it
with whole-page arguments. And if MORECORE is contiguous and
this is not first time through, this preserves page-alignment of
previous calls. Otherwise, we correct to page-align below.
*/
size = ALIGN_UP (size, pagesize);
/*
Don't try to call MORECORE if argument is so big as to appear
negative. Note that since mmap takes size_t arg, it may succeed
below even if we cannot call MORECORE.
*/
if (size > 0)
{
brk = (char *) (MORECORE (size));
LIBC_PROBE (memory_sbrk_more, 2, brk, size);
}
if (brk != (char *) (MORECORE_FAILURE))
{
/* Call the `morecore' hook if necessary. */
void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
if (__builtin_expect (hook != NULL, 0))
(*hook)();
}
else
{
/*
If have mmap, try using it as a backup when MORECORE fails or
cannot be used. This is worth doing on systems that have "holes" in
address space, so sbrk cannot extend to give contiguous space, but
space is available elsewhere. Note that we ignore mmap max count
and threshold limits, since the space will not be used as a
segregated mmap region.
*/
/* Cannot merge with old top, so add its size back in */
if (contiguous (av))
size = ALIGN_UP (size + old_size, pagesize);
/* If we are relying on mmap as backup, then use larger units */
if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
size = MMAP_AS_MORECORE_SIZE;
/* Don't try if size wraps around 0 */
if ((unsigned long) (size) > (unsigned long) (nb))
{
char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
if (mbrk != MAP_FAILED)
{
/* We do not need, and cannot use, another sbrk call to find end */
brk = mbrk;
snd_brk = brk + size;
/*
Record that we no longer have a contiguous sbrk region.
After the first time mmap is used as backup, we do not
ever rely on contiguous space since this could incorrectly
bridge regions.
*/
set_noncontiguous (av);
}
}
}
if (brk != (char *) (MORECORE_FAILURE))
{
if (mp_.sbrk_base == 0)
mp_.sbrk_base = brk;
av->system_mem += size;
/*
If MORECORE extends previous space, we can likewise extend top size.
*/
if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
set_head (old_top, (size + old_size) | PREV_INUSE);
else if (contiguous (av) && old_size && brk < old_end)
/* Oops! Someone else killed our space.. Can't touch anything. */
malloc_printerr ("break adjusted to free malloc space");
/*
Otherwise, make adjustments:
* If the first time through or noncontiguous, we need to call sbrk
just to find out where the end of memory lies.
* We need to ensure that all returned chunks from malloc will meet
MALLOC_ALIGNMENT
* If there was an intervening foreign sbrk, we need to adjust sbrk
request size to account for fact that we will not be able to
combine new space with existing space in old_top.
* Almost all systems internally allocate whole pages at a time, in
which case we might as well use the whole last page of request.
So we allocate enough more memory to hit a page boundary now,
which in turn causes future contiguous calls to page-align.
*/
else
{
front_misalign = 0;
end_misalign = 0;
correction = 0;
aligned_brk = brk;
/* handle contiguous cases */
if (contiguous (av))
{
/* Count foreign sbrk as system_mem. */
if (old_size)
av->system_mem += brk - old_end;
/* Guarantee alignment of first new chunk made from this space */
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
if (front_misalign > 0)
{
/*
Skip over some bytes to arrive at an aligned position.
We don't need to specially mark these wasted front bytes.
They will never be accessed anyway because
prev_inuse of av->top (and any chunk created from its start)
is always true after initialization.
*/
correction = MALLOC_ALIGNMENT - front_misalign;
aligned_brk += correction;
}
/*
If this isn't adjacent to existing space, then we will not
be able to merge with old_top space, so must add to 2nd request.
*/
correction += old_size;
/* Extend the end address to hit a page boundary */
end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
assert (correction >= 0);
snd_brk = (char *) (MORECORE (correction));
/*
If can't allocate correction, try to at least find out current
brk. It might be enough to proceed without failing.
Note that if second sbrk did NOT fail, we assume that space
is contiguous with first sbrk. This is a safe assumption unless
program is multithreaded but doesn't use locks and a foreign sbrk
occurred between our first and second calls.
*/
if (snd_brk == (char *) (MORECORE_FAILURE))
{
correction = 0;
snd_brk = (char *) (MORECORE (0));
}
else
{
/* Call the `morecore' hook if necessary. */
void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
if (__builtin_expect (hook != NULL, 0))
(*hook)();
}
}
/* handle non-contiguous cases */
else
{
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
/* MORECORE/mmap must correctly align */
assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
else
{
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
if (front_misalign > 0)
{
/*
Skip over some bytes to arrive at an aligned position.
We don't need to specially mark these wasted front bytes.
They will never be accessed anyway because
prev_inuse of av->top (and any chunk created from its start)
is always true after initialization.
*/
aligned_brk += MALLOC_ALIGNMENT - front_misalign;
}
}
/* Find out current end of memory */
if (snd_brk == (char *) (MORECORE_FAILURE))
{
snd_brk = (char *) (MORECORE (0));
}
}
/* Adjust top based on results of second sbrk */
if (snd_brk != (char *) (MORECORE_FAILURE))
{
av->top = (mchunkptr) aligned_brk;
set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
av->system_mem += correction;
/*
If not the first time through, we either have a
gap due to foreign sbrk or a non-contiguous region. Insert a
double fencepost at old_top to prevent consolidation with space
we don't own. These fenceposts are artificial chunks that are
marked as inuse and are in any case too small to use. We need
two to make sizes and alignments work out.
*/
if (old_size != 0)
{
/*
Shrink old_top to insert fenceposts, keeping size a
multiple of MALLOC_ALIGNMENT. We know there is at least
enough space in old_top to do this.
*/
old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
set_head (old_top, old_size | PREV_INUSE);
/*
Note that the following assignments completely overwrite
old_top when old_size was previously MINSIZE. This is
intentional. We need the fencepost, even if old_top otherwise gets
lost.
*/
set_head (chunk_at_offset (old_top, old_size),
(2 * SIZE_SZ) | PREV_INUSE);
set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
(2 * SIZE_SZ) | PREV_INUSE);
/* If possible, release the rest. */
if (old_size >= MINSIZE)
{
_int_free (av, old_top, 1);
}
}
}
}
}
} /* if (av != &main_arena) */
if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
av->max_system_mem = av->system_mem;
check_malloc_state (av);
/* finally, do the allocation */
p = av->top;
size = chunksize (p);
/* check that one of the above allocation paths succeeded */
if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
{
remainder_size = size - nb;
remainder = chunk_at_offset (p, nb);
av->top = remainder;
set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
set_head (remainder, remainder_size | PREV_INUSE);
check_malloced_chunk (av, p, nb);
return chunk2mem (p);
}
/* catch all failure paths */
__set_errno (ENOMEM);
return 0;
}
|
Safe
|
[
"CWE-787"
] |
glibc
|
d6db68e66dff25d12c3bc5641b60cbd7fb6ab44f
|
1.2659970862563668e+38
| 475 |
malloc: Mitigate null-byte overflow attacks
* malloc/malloc.c (_int_free): Check for corrupt prev_size vs size.
(malloc_consolidate): Likewise.
| 0 |
cockpit_auth_new (gboolean login_loopback,
CockpitAuthFlags flags)
{
CockpitAuth *self = g_object_new (COCKPIT_TYPE_AUTH, NULL);
const gchar *max_startups_conf;
gint count = 0;
self->flags = flags;
self->login_loopback = login_loopback;
if (cockpit_ws_max_startups == NULL)
max_startups_conf = cockpit_conf_string ("WebService", "MaxStartups");
else
max_startups_conf = cockpit_ws_max_startups;
self->max_startups = max_startups;
self->max_startups_begin = max_startups;
self->max_startups_rate = 100;
if (max_startups_conf)
{
count = sscanf (max_startups_conf, "%u:%u:%u",
&self->max_startups_begin,
&self->max_startups_rate,
&self->max_startups);
/* If all three numbers are not given use the
* first as a hard limit */
if (count == 1 || count == 2)
{
self->max_startups = self->max_startups_begin;
self->max_startups_rate = 100;
}
if (count < 1 || count > 3 ||
self->max_startups_begin > self->max_startups ||
self->max_startups_rate > 100 || self->max_startups_rate < 1)
{
g_warning ("Illegal MaxStartups spec: %s. Reverting to defaults", max_startups_conf);
self->max_startups = max_startups;
self->max_startups_begin = max_startups;
self->max_startups_rate = 100;
}
}
return self;
}
|
Safe
|
[
"CWE-1021"
] |
cockpit
|
46f6839d1af4e662648a85f3e54bba2d57f39f0e
|
2.8412557950381738e+38
| 47 |
ws: Restrict our cookie to the login host only
Mark our cookie as `SameSite: Strict` [1]. The current `None` default
will soon be moved to `Lax` by Firefox and Chromium, and recent versions
started to throw a warning about it.
[1] https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite
https://bugzilla.redhat.com/show_bug.cgi?id=1891944
| 0 |
TEST(IndexBoundsBuilderTest, TranslateLteNegativeInfinity) {
auto testIndex = buildSimpleIndexEntry();
BSONObj obj = fromjson("{a: {$lte: -Infinity}}");
auto expr = parseMatchExpression(obj);
BSONElement elt = obj.firstElement();
OrderedIntervalList oil;
IndexBoundsBuilder::BoundsTightness tightness;
IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
ASSERT_EQUALS(oil.name, "a");
ASSERT_EQUALS(oil.intervals.size(), 1U);
ASSERT_EQUALS(
Interval::INTERVAL_EQUALS,
oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': -Infinity}"), true, true)));
ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
}
|
Safe
|
[
"CWE-754"
] |
mongo
|
f8f55e1825ee5c7bdb3208fc7c5b54321d172732
|
5.5087984731588e+37
| 15 |
SERVER-44377 generate correct plan for indexed inequalities to null
| 0 |
}
u32 parse_sdtp(char *arg_val, u32 opt)
{
if (!stricmp(arg_val, "both")) sdtp_in_traf = 2;
else if (!stricmp(arg_val, "sdtp")) sdtp_in_traf = 1;
else sdtp_in_traf = 0;
return 0;
|
Safe
|
[
"CWE-787"
] |
gpac
|
4e56ad72ac1afb4e049a10f2d99e7512d7141f9d
|
2.3705059505720593e+38
| 7 |
fixed #2216
| 0 |
bool operator!=(const LowerCaseString& rhs) const { return string_ != rhs.string_; }
|
Safe
|
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
|
1.9535025312875477e+37
| 1 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <mklein@lyft.com>
| 0 |
static int opxchg(RAsm *a, ut8 *data, const Opcode *op) {
int l = 0;
int mod_byte = 0;
int reg = 0;
int rm = 0;
st32 offset = 0;
if (op->operands[0].type & OT_MEMORY || op->operands[1].type & OT_MEMORY) {
data[l++] = 0x87;
if (op->operands[0].type & OT_MEMORY) {
rm = op->operands[0].regs[0];
offset = op->operands[0].offset * op->operands[0].offset_sign;
reg = op->operands[1].reg;
} else if (op->operands[1].type & OT_MEMORY) {
rm = op->operands[1].regs[0];
offset = op->operands[1].offset * op->operands[1].offset_sign;
reg = op->operands[0].reg;
}
if (offset) {
mod_byte = 1;
if (offset < ST8_MIN || offset > ST8_MAX) {
mod_byte = 2;
}
}
} else {
if (op->operands[0].reg == X86R_EAX &&
op->operands[1].type & OT_GPREG) {
data[l++] = 0x90 + op->operands[1].reg;
return l;
} else if (op->operands[1].reg == X86R_EAX &&
op->operands[0].type & OT_GPREG) {
data[l++] = 0x90 + op->operands[0].reg;
return l;
} else if (op->operands[0].type & OT_GPREG &&
op->operands[1].type & OT_GPREG) {
mod_byte = 3;
data[l++] = 0x87;
reg = op->operands[1].reg;
rm = op->operands[0].reg;
}
}
data[l++] = mod_byte << 6 | reg << 3 | rm;
if (mod_byte > 0 && mod_byte < 3) {
data[l++] = offset;
if (mod_byte == 2) {
data[l++] = offset >> 8;
data[l++] = offset >> 16;
data[l++] = offset >> 24;
}
}
return l;
}
|
Safe
|
[
"CWE-119",
"CWE-125",
"CWE-787"
] |
radare2
|
9b46d38dd3c4de6048a488b655c7319f845af185
|
1.3365830088522608e+38
| 52 |
Fix #12372 and #12373 - Crash in x86 assembler (#12380)
0 ,0,[bP-bL-bP-bL-bL-r-bL-bP-bL-bL-
mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx--
leA ,0,[bP-bL-bL-bP-bL-bP-bL-60@bL-
leA ,0,[bP-bL-r-bP-bL-bP-bL-60@bL-
mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx--
| 0 |
RaggedFeatureReader(const Tensor& values, const Tensor& row_splits)
: values_(values.flat<ValuesType>()),
row_splits_(row_splits.flat<SplitsType>()) {}
|
Safe
|
[
"CWE-125",
"CWE-369"
] |
tensorflow
|
44b7f486c0143f68b56c34e2d01e146ee445134a
|
1.1949083941088983e+38
| 3 |
Fix out of bounds read in `ragged_cross_op.cc`.
PiperOrigin-RevId: 369757702
Change-Id: Ie6e5d2c21513a8d56bf41fcf35960caf76e890f9
| 0 |
static int tab_is_tree(struct libmnt_table *tb)
{
struct libmnt_fs *fs = NULL;
struct libmnt_iter *itr;
int rc = 0;
itr = mnt_new_iter(MNT_ITER_BACKWARD);
if (!itr)
return 0;
rc = (mnt_table_next_fs(tb, itr, &fs) == 0 &&
mnt_fs_is_kernel(fs) &&
mnt_fs_get_root(fs));
mnt_free_iter(itr);
return rc;
}
|
Safe
|
[
"CWE-552",
"CWE-703"
] |
util-linux
|
166e87368ae88bf31112a30e078cceae637f4cdb
|
8.729418132441363e+37
| 17 |
libmount: remove support for deleted mount table entries
The "(deleted)" suffix has been originally used by kernel for deleted
mountpoints. Since kernel commit 9d4d65748a5ca26ea8650e50ba521295549bf4e3
(Dec 2014) kernel does not use this suffix for mount stuff in /proc at
all. Let's remove this support from libmount too.
Signed-off-by: Karel Zak <kzak@redhat.com>
| 0 |
QPDFObjectHandle::getKey(std::string const& key)
{
QPDFObjectHandle result;
if (isDictionary())
{
result = dynamic_cast<QPDF_Dictionary*>(
m->obj.getPointer())->getKey(key);
}
else
{
typeWarning(
"dictionary", "returning null for attempted key retrieval");
QTC::TC("qpdf", "QPDFObjectHandle dictionary null for getKey");
result = newNull();
QPDF* qpdf = 0;
std::string description;
if (this->m->obj->getDescription(qpdf, description))
{
result.setObjectDescription(
qpdf,
description +
" -> null returned from getting key " +
key + " from non-Dictionary");
}
}
return result;
}
|
Safe
|
[
"CWE-399",
"CWE-674"
] |
qpdf
|
b4d6cf6836ce025ba1811b7bbec52680c7204223
|
2.698765293173815e+38
| 27 |
Limit depth of nesting in direct objects (fixes #202)
This fixes CVE-2018-9918.
| 0 |
inline int32x4_t RoundToNearest(const float32x4_t input) {
#if defined(__aarch64__) || defined(__SSSE3__)
// Note: vcvtnq_s32_f32 is not available in ARMv7
return vcvtnq_s32_f32(input);
#else
static const float32x4_t zero_val_dup = vdupq_n_f32(0.0f);
static const float32x4_t point5_val_dup = vdupq_n_f32(0.5f);
static const float32x4_t minus_point5_val_dup = vdupq_n_f32(-0.5f);
const uint32x4_t mask = vcltq_f32(input, zero_val_dup);
const float32x4_t round =
vbslq_f32(mask, minus_point5_val_dup, point5_val_dup);
return vcvtq_s32_f32(vaddq_f32(input, round));
#endif // defined(__aarch64__) || defined(__SSSE3__)
}
|
Safe
|
[
"CWE-476",
"CWE-369"
] |
tensorflow
|
15691e456c7dc9bd6be203b09765b063bf4a380c
|
2.0698703378354146e+37
| 15 |
Prevent dereferencing of null pointers in TFLite's `add.cc`.
PiperOrigin-RevId: 387244946
Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9
| 0 |
bool link_dhcp6_enabled(Link *link) {
if (link->flags & IFF_LOOPBACK)
return false;
if (!link->network)
return false;
return link->network->dhcp & ADDRESS_FAMILY_IPV6;
}
|
Safe
|
[
"CWE-120"
] |
systemd
|
f5a8c43f39937d97c9ed75e3fe8621945b42b0db
|
7.551479434787745e+37
| 9 |
networkd: IPv6 router discovery - follow IPv6AcceptRouterAdvertisemnt=
The previous behavior:
When DHCPv6 was enabled, router discover was performed first, and then DHCPv6 was
enabled only if the relevant flags were passed in the Router Advertisement message.
Moreover, router discovery was performed even if AcceptRouterAdvertisements=false,
moreover, even if router advertisements were accepted (by the kernel) the flags
indicating that DHCPv6 should be performed were ignored.
New behavior:
If RouterAdvertisements are accepted, and either no routers are found, or an
advertisement is received indicating DHCPv6 should be performed, the DHCPv6
client is started. Moreover, the DHCP option now truly enables the DHCPv6
client regardless of router discovery (though it will probably not be
very useful to get a lease withotu any routes, this seems the more consistent
approach).
The recommended default setting should be to set DHCP=ipv4 and to leave
IPv6AcceptRouterAdvertisements unset.
| 0 |
void move_node_page(struct page *node_page, int gc_type)
{
if (gc_type == FG_GC) {
struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 1,
.for_reclaim = 0,
};
set_page_dirty(node_page);
f2fs_wait_on_page_writeback(node_page, NODE, true);
f2fs_bug_on(sbi, PageWriteback(node_page));
if (!clear_page_dirty_for_io(node_page))
goto out_page;
if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
unlock_page(node_page);
goto release_page;
} else {
/* set page dirty and write it */
if (!PageWriteback(node_page))
set_page_dirty(node_page);
}
out_page:
unlock_page(node_page);
release_page:
f2fs_put_page(node_page, 0);
}
|
Safe
|
[
"CWE-200",
"CWE-362"
] |
linux
|
30a61ddf8117c26ac5b295e1233eaa9629a94ca3
|
8.3361004035967565e+37
| 30 |
f2fs: fix race condition in between free nid allocator/initializer
In below concurrent case, allocated nid can be loaded into free nid cache
and be allocated again.
Thread A Thread B
- f2fs_create
- f2fs_new_inode
- alloc_nid
- __insert_nid_to_list(ALLOC_NID_LIST)
- f2fs_balance_fs_bg
- build_free_nids
- __build_free_nids
- scan_nat_page
- add_free_nid
- __lookup_nat_cache
- f2fs_add_link
- init_inode_metadata
- new_inode_page
- new_node_page
- set_node_addr
- alloc_nid_done
- __remove_nid_from_list(ALLOC_NID_LIST)
- __insert_nid_to_list(FREE_NID_LIST)
This patch makes nat cache lookup and free nid list operation being atomical
to avoid this race condition.
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
| 0 |
xmlRelaxNGNewGrammar(xmlRelaxNGParserCtxtPtr ctxt)
{
xmlRelaxNGGrammarPtr ret;
ret = (xmlRelaxNGGrammarPtr) xmlMalloc(sizeof(xmlRelaxNGGrammar));
if (ret == NULL) {
xmlRngPErrMemory(ctxt, NULL);
return (NULL);
}
memset(ret, 0, sizeof(xmlRelaxNGGrammar));
return (ret);
}
|
Safe
|
[
"CWE-134"
] |
libxml2
|
502f6a6d08b08c04b3ddfb1cd21b2f699c1b7f5b
|
3.354076216670089e+38
| 13 |
More format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
adds a new xmlEscapeFormatString() function to escape composed format
strings
| 0 |
set_client_comp_method(gnutls_session_t session,
uint8_t comp_method)
{
int comp_methods_num;
uint8_t compression_methods[MAX_ALGOS];
int id = _gnutls_compression_get_id(comp_method);
int i;
_gnutls_handshake_log
("HSK[%p]: Selected compression method: %s (%d)\n", session,
gnutls_compression_get_name(id), (int) comp_method);
comp_methods_num = _gnutls_supported_compression_methods(session,
compression_methods,
MAX_ALGOS);
if (comp_methods_num < 0) {
gnutls_assert();
return comp_methods_num;
}
for (i = 0; i < comp_methods_num; i++) {
if (compression_methods[i] == comp_method) {
comp_methods_num = 0;
break;
}
}
if (comp_methods_num != 0) {
gnutls_assert();
return GNUTLS_E_UNKNOWN_COMPRESSION_ALGORITHM;
}
session->security_parameters.compression_method = id;
_gnutls_epoch_set_compression(session, EPOCH_NEXT, id);
return 0;
}
|
Safe
|
[
"CWE-310"
] |
gnutls
|
db9a7d810f9ee4c9cc49731f5fd9bdeae68d7eaa
|
3.313962052038929e+38
| 37 |
handshake: check for TLS_FALLBACK_SCSV
If TLS_FALLBACK_SCSV was sent by the client during the handshake, and
the advertised protocol version is lower than GNUTLS_TLS_VERSION_MAX,
send the "Inappropriate fallback" fatal alert and abort the handshake.
This mechanism was defined in RFC7507.
| 0 |
bool at_eof()
{
if (io_cache)
{
return rownum * ref_length >= io_cache->end_of_file;
}
else
return (cache_pos == cache_end);
}
|
Safe
|
[] |
server
|
ba4927e520190bbad763bb5260ae154f29a61231
|
2.775312591062792e+38
| 9 |
MDEV-19398: Assertion `item1->type() == Item::FIELD_ITEM ...
Window Functions code tries to minimize the number of times it
needs to sort the select's resultset by finding "compatible"
OVER (PARTITION BY ... ORDER BY ...) clauses.
This employs compare_order_elements(). That function assumed that
the order expressions are Item_field-derived objects (that refer
to a temp.table). But this is not always the case: one can
construct queries order expressions are arbitrary item expressions.
Add handling for such expressions: sort them according to the window
specification they appeared in.
This means we cannot detect that two compatible PARTITION BY clauses
that use expressions can share the sorting step.
But at least we won't crash.
| 0 |
GF_Err gf_isom_text_reset(GF_TextSample *samp)
{
if (!samp) return GF_BAD_PARAM;
if (samp->text) gf_free(samp->text);
samp->text = NULL;
samp->len = 0;
return gf_isom_text_reset_styles(samp);
}
|
Safe
|
[
"CWE-476"
] |
gpac
|
d527325a9b72218612455a534a508f9e1753f76e
|
1.088147763683045e+38
| 8 |
fixed #1768
| 0 |
int kblockd_schedule_work_on(int cpu, struct work_struct *work)
{
return queue_work_on(cpu, kblockd_workqueue, work);
}
|
Safe
|
[
"CWE-416",
"CWE-703"
] |
linux
|
54648cf1ec2d7f4b6a71767799c45676a138ca24
|
1.9254043454654435e+38
| 4 |
block: blk_init_allocated_queue() set q->fq as NULL in the fail case
We find the memory use-after-free issue in __blk_drain_queue()
on the kernel 4.14. After read the latest kernel 4.18-rc6 we
think it has the same problem.
Memory is allocated for q->fq in the blk_init_allocated_queue().
If the elevator init function called with error return, it will
run into the fail case to free the q->fq.
Then the __blk_drain_queue() uses the same memory after the free
of the q->fq, it will lead to the unpredictable event.
The patch is to set q->fq as NULL in the fail case of
blk_init_allocated_queue().
Fixes: commit 7c94e1c157a2 ("block: introduce blk_flush_queue to drive flush machinery")
Cc: <stable@vger.kernel.org>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: xiao jin <jin.xiao@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.