project
stringclasses 633
values | commit_id
stringlengths 7
81
| target
int64 0
1
| func
stringlengths 5
484k
| cwe
stringclasses 131
values | big_vul_idx
float64 0
189k
⌀ | idx
int64 0
522k
| hash
stringlengths 34
39
| size
float64 1
24k
⌀ | message
stringlengths 0
11.5k
⌀ | dataset
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
linux
|
92f28d973cce45ef5823209aab3138eb45d8b349
| 1 |
static __inline__ int scm_check_creds(struct ucred *creds)
{
const struct cred *cred = current_cred();
kuid_t uid = make_kuid(cred->user_ns, creds->uid);
kgid_t gid = make_kgid(cred->user_ns, creds->gid);
if (!uid_valid(uid) || !gid_valid(gid))
return -EINVAL;
if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) &&
((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
gid_eq(gid, cred->sgid)) || nsown_capable(CAP_SETGID))) {
return 0;
}
return -EPERM;
}
|
CWE-264
| 179,265 | 1,050 |
265477273618585724017415837621967247884
| null | null | null |
linux
|
132c94e31b8bca8ea921f9f96a57d684fa4ae0a9
| 1 |
static struct mount *clone_mnt(struct mount *old, struct dentry *root,
int flag)
{
struct super_block *sb = old->mnt.mnt_sb;
struct mount *mnt;
int err;
mnt = alloc_vfsmnt(old->mnt_devname);
if (!mnt)
return ERR_PTR(-ENOMEM);
if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
mnt->mnt_group_id = 0; /* not a peer of original */
else
mnt->mnt_group_id = old->mnt_group_id;
if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
err = mnt_alloc_group_id(mnt);
if (err)
goto out_free;
}
mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
atomic_inc(&sb->s_active);
mnt->mnt.mnt_sb = sb;
mnt->mnt.mnt_root = dget(root);
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
mnt->mnt_parent = mnt;
br_write_lock(&vfsmount_lock);
list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
br_write_unlock(&vfsmount_lock);
if ((flag & CL_SLAVE) ||
((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
list_add(&mnt->mnt_slave, &old->mnt_slave_list);
mnt->mnt_master = old;
CLEAR_MNT_SHARED(mnt);
} else if (!(flag & CL_PRIVATE)) {
if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
list_add(&mnt->mnt_share, &old->mnt_share);
if (IS_MNT_SLAVE(old))
list_add(&mnt->mnt_slave, &old->mnt_slave);
mnt->mnt_master = old->mnt_master;
}
if (flag & CL_MAKE_SHARED)
set_mnt_shared(mnt);
/* stick the duplicate mount on the same expiry list
* as the original if that was on one */
if (flag & CL_EXPIRE) {
if (!list_empty(&old->mnt_expire))
list_add(&mnt->mnt_expire, &old->mnt_expire);
}
return mnt;
out_free:
free_vfsmnt(mnt);
return ERR_PTR(err);
}
|
CWE-264
| 179,266 | 1,051 |
333226396239700821421685271003259941488
| null | null | null |
linux
|
3151527ee007b73a0ebd296010f1c0454a919c7d
| 1 |
int create_user_ns(struct cred *new)
{
struct user_namespace *ns, *parent_ns = new->user_ns;
kuid_t owner = new->euid;
kgid_t group = new->egid;
int ret;
/* The creator needs a mapping in the parent user namespace
* or else we won't be able to reasonably tell userspace who
* created a user_namespace.
*/
if (!kuid_has_mapping(parent_ns, owner) ||
!kgid_has_mapping(parent_ns, group))
return -EPERM;
ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
if (!ns)
return -ENOMEM;
ret = proc_alloc_inum(&ns->proc_inum);
if (ret) {
kmem_cache_free(user_ns_cachep, ns);
return ret;
}
atomic_set(&ns->count, 1);
/* Leave the new->user_ns reference with the new user namespace. */
ns->parent = parent_ns;
ns->owner = owner;
ns->group = group;
set_cred_user_ns(new, ns);
return 0;
}
|
CWE-264
| 179,269 | 1,054 |
62769202323779361493560203769736579979
| null | null | null |
curl
|
2eb8dcf26cb37f09cffe26909a646e702dbcab66
| 1 |
static bool tailmatch(const char *little, const char *bigone)
{
size_t littlelen = strlen(little);
size_t biglen = strlen(bigone);
if(littlelen > biglen)
return FALSE;
return Curl_raw_equal(little, bigone+biglen-littlelen) ? TRUE : FALSE;
}
|
CWE-200
| 179,270 | 1,055 |
325637020867287521959143082960787130430
| null | null | null |
linux
|
715230a44310a8cf66fbfb5a46f9a62a9b2de424
| 1 |
static void tg3_read_vpd(struct tg3 *tp)
{
u8 *vpd_data;
unsigned int block_end, rosize, len;
u32 vpdlen;
int j, i = 0;
vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
if (!vpd_data)
goto out_no_vpd;
i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto out_not_found;
rosize = pci_vpd_lrdt_size(&vpd_data[i]);
block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
i += PCI_VPD_LRDT_TAG_SIZE;
if (block_end > vpdlen)
goto out_not_found;
j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
PCI_VPD_RO_KEYWORD_MFR_ID);
if (j > 0) {
len = pci_vpd_info_field_size(&vpd_data[j]);
j += PCI_VPD_INFO_FLD_HDR_SIZE;
if (j + len > block_end || len != 4 ||
memcmp(&vpd_data[j], "1028", 4))
goto partno;
j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
PCI_VPD_RO_KEYWORD_VENDOR0);
if (j < 0)
goto partno;
len = pci_vpd_info_field_size(&vpd_data[j]);
j += PCI_VPD_INFO_FLD_HDR_SIZE;
if (j + len > block_end)
goto partno;
memcpy(tp->fw_ver, &vpd_data[j], len);
strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
}
partno:
i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
PCI_VPD_RO_KEYWORD_PARTNO);
if (i < 0)
goto out_not_found;
len = pci_vpd_info_field_size(&vpd_data[i]);
i += PCI_VPD_INFO_FLD_HDR_SIZE;
if (len > TG3_BPN_SIZE ||
(len + i) > vpdlen)
goto out_not_found;
memcpy(tp->board_part_number, &vpd_data[i], len);
out_not_found:
kfree(vpd_data);
if (tp->board_part_number[0])
return;
out_no_vpd:
if (tg3_asic_rev(tp) == ASIC_REV_5717) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
strcpy(tp->board_part_number, "BCM5717");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
strcpy(tp->board_part_number, "BCM5718");
else
goto nomatch;
} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
strcpy(tp->board_part_number, "BCM57780");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
strcpy(tp->board_part_number, "BCM57760");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
strcpy(tp->board_part_number, "BCM57790");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
strcpy(tp->board_part_number, "BCM57788");
else
goto nomatch;
} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
strcpy(tp->board_part_number, "BCM57761");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
strcpy(tp->board_part_number, "BCM57765");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
strcpy(tp->board_part_number, "BCM57781");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
strcpy(tp->board_part_number, "BCM57785");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
strcpy(tp->board_part_number, "BCM57791");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
strcpy(tp->board_part_number, "BCM57795");
else
goto nomatch;
} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
strcpy(tp->board_part_number, "BCM57762");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
strcpy(tp->board_part_number, "BCM57766");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
strcpy(tp->board_part_number, "BCM57782");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
strcpy(tp->board_part_number, "BCM57786");
else
goto nomatch;
} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
strcpy(tp->board_part_number, "BCM95906");
} else {
nomatch:
strcpy(tp->board_part_number, "none");
}
}
|
CWE-119
| 179,273 | 1,056 |
112924613750397927048878971722801062246
| null | null | null |
linux
|
12176503366885edd542389eed3aaf94be163fdb
| 1 |
static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
struct compat_video_spu_palette __user *up)
{
struct video_spu_palette __user *up_native;
compat_uptr_t palp;
int length, err;
err = get_user(palp, &up->palette);
err |= get_user(length, &up->length);
up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
err = put_user(compat_ptr(palp), &up_native->palette);
err |= put_user(length, &up_native->length);
if (err)
return -EFAULT;
err = sys_ioctl(fd, cmd, (unsigned long) up_native);
return err;
}
|
CWE-200
| 179,274 | 1,057 |
316505203133565640195168358797398379359
| null | null | null |
linux
|
c0f5ecee4e741667b2493c742b60b6218d40b3aa
| 1 |
static void wdm_in_callback(struct urb *urb)
{
struct wdm_device *desc = urb->context;
int status = urb->status;
spin_lock(&desc->iuspin);
clear_bit(WDM_RESPONDING, &desc->flags);
if (status) {
switch (status) {
case -ENOENT:
dev_dbg(&desc->intf->dev,
"nonzero urb status received: -ENOENT");
goto skip_error;
case -ECONNRESET:
dev_dbg(&desc->intf->dev,
"nonzero urb status received: -ECONNRESET");
goto skip_error;
case -ESHUTDOWN:
dev_dbg(&desc->intf->dev,
"nonzero urb status received: -ESHUTDOWN");
goto skip_error;
case -EPIPE:
dev_err(&desc->intf->dev,
"nonzero urb status received: -EPIPE\n");
break;
default:
dev_err(&desc->intf->dev,
"Unexpected error %d\n", status);
break;
}
}
desc->rerr = status;
desc->reslength = urb->actual_length;
memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength);
desc->length += desc->reslength;
skip_error:
wake_up(&desc->wait);
set_bit(WDM_READ, &desc->flags);
spin_unlock(&desc->iuspin);
}
|
CWE-119
| 179,275 | 1,058 |
263555284317390403116045523741699148008
| null | null | null |
linux
|
726bc6b092da4c093eb74d13c07184b18c1af0f1
| 1 |
static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_stats sas;
struct sctp_association *asoc = NULL;
/* User must provide at least the assoc id */
if (len < sizeof(sctp_assoc_t))
return -EINVAL;
if (copy_from_user(&sas, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
if (!asoc)
return -EINVAL;
sas.sas_rtxchunks = asoc->stats.rtxchunks;
sas.sas_gapcnt = asoc->stats.gapcnt;
sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
sas.sas_osacks = asoc->stats.osacks;
sas.sas_isacks = asoc->stats.isacks;
sas.sas_octrlchunks = asoc->stats.octrlchunks;
sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
sas.sas_oodchunks = asoc->stats.oodchunks;
sas.sas_iodchunks = asoc->stats.iodchunks;
sas.sas_ouodchunks = asoc->stats.ouodchunks;
sas.sas_iuodchunks = asoc->stats.iuodchunks;
sas.sas_idupchunks = asoc->stats.idupchunks;
sas.sas_opackets = asoc->stats.opackets;
sas.sas_ipackets = asoc->stats.ipackets;
/* New high max rto observed, will return 0 if not a single
* RTO update took place. obs_rto_ipaddr will be bogus
* in such a case
*/
sas.sas_maxrto = asoc->stats.max_obs_rto;
memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
sizeof(struct sockaddr_storage));
/* Mark beginning of a new observation period */
asoc->stats.max_obs_rto = asoc->rto_min;
/* Allow the struct to grow and fill in as much as possible */
len = min_t(size_t, len, sizeof(sas));
if (put_user(len, optlen))
return -EFAULT;
SCTP_DEBUG_PRINTK("sctp_getsockopt_assoc_stat(%d): %d\n",
len, sas.sas_assoc_id);
if (copy_to_user(optval, &sas, len))
return -EFAULT;
return 0;
}
|
CWE-20
| 179,283 | 1,065 |
170963965115843549721398736237152348844
| null | null | null |
linux
|
864745d291b5ba80ea0bd0edcbe67273de368836
| 1 |
static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
struct xfrm_state *x, u32 seq)
{
struct xfrm_dump_info info;
struct sk_buff *skb;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
return ERR_PTR(-ENOMEM);
info.in_skb = in_skb;
info.out_skb = skb;
info.nlmsg_seq = seq;
info.nlmsg_flags = 0;
if (dump_one_state(x, 0, &info)) {
kfree_skb(skb);
return NULL;
}
return skb;
}
| 179,284 | 1,066 |
324377055092562535340765358029287961768
| null | null | null |
|
linux
|
eb178619f930fa2ba2348de332a1ff1c66a31424
| 1 |
_xfs_buf_find(
struct xfs_buftarg *btp,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags,
xfs_buf_t *new_bp)
{
size_t numbytes;
struct xfs_perag *pag;
struct rb_node **rbp;
struct rb_node *parent;
xfs_buf_t *bp;
xfs_daddr_t blkno = map[0].bm_bn;
int numblks = 0;
int i;
for (i = 0; i < nmaps; i++)
numblks += map[i].bm_len;
numbytes = BBTOB(numblks);
/* Check for IOs smaller than the sector size / not sector aligned */
ASSERT(!(numbytes < (1 << btp->bt_sshift)));
ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
/* get tree root */
pag = xfs_perag_get(btp->bt_mount,
xfs_daddr_to_agno(btp->bt_mount, blkno));
/* walk tree */
spin_lock(&pag->pag_buf_lock);
rbp = &pag->pag_buf_tree.rb_node;
parent = NULL;
bp = NULL;
while (*rbp) {
parent = *rbp;
bp = rb_entry(parent, struct xfs_buf, b_rbnode);
if (blkno < bp->b_bn)
rbp = &(*rbp)->rb_left;
else if (blkno > bp->b_bn)
rbp = &(*rbp)->rb_right;
else {
/*
* found a block number match. If the range doesn't
* match, the only way this is allowed is if the buffer
* in the cache is stale and the transaction that made
* it stale has not yet committed. i.e. we are
* reallocating a busy extent. Skip this buffer and
* continue searching to the right for an exact match.
*/
if (bp->b_length != numblks) {
ASSERT(bp->b_flags & XBF_STALE);
rbp = &(*rbp)->rb_right;
continue;
}
atomic_inc(&bp->b_hold);
goto found;
}
}
/* No match found */
if (new_bp) {
rb_link_node(&new_bp->b_rbnode, parent, rbp);
rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
/* the buffer keeps the perag reference until it is freed */
new_bp->b_pag = pag;
spin_unlock(&pag->pag_buf_lock);
} else {
XFS_STATS_INC(xb_miss_locked);
spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
}
return new_bp;
found:
spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
if (!xfs_buf_trylock(bp)) {
if (flags & XBF_TRYLOCK) {
xfs_buf_rele(bp);
XFS_STATS_INC(xb_busy_locked);
return NULL;
}
xfs_buf_lock(bp);
XFS_STATS_INC(xb_get_locked_waited);
}
/*
* if the buffer is stale, clear all the external state associated with
* it. We need to keep flags such as how we allocated the buffer memory
* intact here.
*/
if (bp->b_flags & XBF_STALE) {
ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
ASSERT(bp->b_iodone == NULL);
bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
bp->b_ops = NULL;
}
trace_xfs_buf_find(bp, flags, _RET_IP_);
XFS_STATS_INC(xb_get_locked);
return bp;
}
|
CWE-20
| 179,285 | 1,067 |
91424937264814556580236710533933601112
| null | null | null |
linux
|
a2c118bfab8bc6b8bb213abfc35201e441693d55
| 1 |
static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
unsigned long addr,
unsigned long length)
{
unsigned long result = 0;
switch (ioapic->ioregsel) {
case IOAPIC_REG_VERSION:
result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
| (IOAPIC_VERSION_ID & 0xff));
break;
case IOAPIC_REG_APIC_ID:
case IOAPIC_REG_ARB_ID:
result = ((ioapic->id & 0xf) << 24);
break;
default:
{
u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
u64 redir_content;
ASSERT(redir_index < IOAPIC_NUM_PINS);
redir_content = ioapic->redirtbl[redir_index].bits;
result = (ioapic->ioregsel & 0x1) ?
(redir_content >> 32) & 0xffffffff :
redir_content & 0xffffffff;
break;
}
}
return result;
}
|
CWE-20
| 179,286 | 1,068 |
68178098106123347133017883923942382578
| null | null | null |
linux
|
0b79459b482e85cb7426aa7da683a9f2c97aeae1
| 1 |
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
bool pr = false;
u32 msr = msr_info->index;
u64 data = msr_info->data;
switch (msr) {
case MSR_AMD64_NB_CFG:
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
break;
case MSR_EFER:
return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
return 1;
}
break;
case MSR_FAM10H_MMIO_CONF_BASE:
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
"0x%llx\n", data);
return 1;
}
break;
case MSR_IA32_DEBUGCTLMSR:
if (!data) {
/* We support the non-activated case already */
break;
} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
/* Values other than LBR and BTF are vendor-specific,
thus reserved and should throw a #GP */
return 1;
}
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
__func__, data);
break;
case 0x200 ... 0x2ff:
return set_msr_mtrr(vcpu, msr, data);
case MSR_IA32_APICBASE:
kvm_set_apic_base(vcpu, data);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
case MSR_IA32_TSC_ADJUST:
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
break;
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data;
kvm_write_wall_clock(vcpu->kvm, data);
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
kvmclock_reset(vcpu);
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */
if (!(data & 1))
break;
/* ...but clean it before doing the actual write */
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
if (vcpu->arch.time_offset &
(sizeof(struct pvclock_vcpu_time_info) - 1))
break;
vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
if (is_error_page(vcpu->arch.time_page))
vcpu->arch.time_page = NULL;
break;
}
case MSR_KVM_ASYNC_PF_EN:
if (kvm_pv_enable_async_pf(vcpu, data))
return 1;
break;
case MSR_KVM_STEAL_TIME:
if (unlikely(!sched_info_on()))
return 1;
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS))
return 1;
vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED))
break;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
preempt_disable();
accumulate_steal_time(vcpu);
preempt_enable();
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
break;
case MSR_KVM_PV_EOI_EN:
if (kvm_lapic_enable_pv_eoi(vcpu, data))
return 1;
break;
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
return set_msr_mce(vcpu, msr, data);
/* Performance counters are not protected by a CPUID bit,
* so we should check all of them in the generic path for the sake of
* cross vendor migration.
* Writing a zero into the event select MSRs disables them,
* which we perfectly emulate ;-). Any other value should be at least
* reported, some guests depend on them.
*/
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
if (data != 0)
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
/* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy.
*/
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
pr = true;
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (pr || data != 0)
vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_K7_CLK_CTL:
/*
* Ignore all writes to this no longer documented MSR.
* Writes are only relevant for old K7 processors,
* all pre-dating SVM, but a recommended workaround from
* AMD for these chips. It is possible to specify the
* affected processor models on the command line, hence
* the need to ignore the workaround.
*/
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
if (kvm_hv_msr_partition_wide(msr)) {
int r;
mutex_lock(&vcpu->kvm->lock);
r = set_msr_hyperv_pw(vcpu, msr, data);
mutex_unlock(&vcpu->kvm->lock);
return r;
} else
return set_msr_hyperv(vcpu, msr, data);
break;
case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.length = data;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.status = data;
break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
msr, data);
break;
}
}
return 0;
}
|
CWE-399
| 179,290 | 1,071 |
148215412997884658749636479368404361217
| null | null | null |
linux
|
c300aa64ddf57d9c5d9c898a64b36877345dd4a9
| 1 |
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
bool pr = false;
u32 msr = msr_info->index;
u64 data = msr_info->data;
switch (msr) {
case MSR_AMD64_NB_CFG:
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
break;
case MSR_EFER:
return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
return 1;
}
break;
case MSR_FAM10H_MMIO_CONF_BASE:
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
"0x%llx\n", data);
return 1;
}
break;
case MSR_IA32_DEBUGCTLMSR:
if (!data) {
/* We support the non-activated case already */
break;
} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
/* Values other than LBR and BTF are vendor-specific,
thus reserved and should throw a #GP */
return 1;
}
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
__func__, data);
break;
case 0x200 ... 0x2ff:
return set_msr_mtrr(vcpu, msr, data);
case MSR_IA32_APICBASE:
kvm_set_apic_base(vcpu, data);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
case MSR_IA32_TSC_ADJUST:
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
break;
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data;
kvm_write_wall_clock(vcpu->kvm, data);
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
kvmclock_reset(vcpu);
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */
if (!(data & 1))
break;
/* ...but clean it before doing the actual write */
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
if (is_error_page(vcpu->arch.time_page))
vcpu->arch.time_page = NULL;
break;
}
case MSR_KVM_ASYNC_PF_EN:
if (kvm_pv_enable_async_pf(vcpu, data))
return 1;
break;
case MSR_KVM_STEAL_TIME:
if (unlikely(!sched_info_on()))
return 1;
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS))
return 1;
vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED))
break;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
preempt_disable();
accumulate_steal_time(vcpu);
preempt_enable();
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
break;
case MSR_KVM_PV_EOI_EN:
if (kvm_lapic_enable_pv_eoi(vcpu, data))
return 1;
break;
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
return set_msr_mce(vcpu, msr, data);
/* Performance counters are not protected by a CPUID bit,
* so we should check all of them in the generic path for the sake of
* cross vendor migration.
* Writing a zero into the event select MSRs disables them,
* which we perfectly emulate ;-). Any other value should be at least
* reported, some guests depend on them.
*/
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
if (data != 0)
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
/* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy.
*/
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
pr = true;
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (pr || data != 0)
vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_K7_CLK_CTL:
/*
* Ignore all writes to this no longer documented MSR.
* Writes are only relevant for old K7 processors,
* all pre-dating SVM, but a recommended workaround from
* AMD for these chips. It is possible to specify the
* affected processor models on the command line, hence
* the need to ignore the workaround.
*/
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
if (kvm_hv_msr_partition_wide(msr)) {
int r;
mutex_lock(&vcpu->kvm->lock);
r = set_msr_hyperv_pw(vcpu, msr, data);
mutex_unlock(&vcpu->kvm->lock);
return r;
} else
return set_msr_hyperv(vcpu, msr, data);
break;
case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.length = data;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.status = data;
break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
msr, data);
break;
}
}
return 0;
}
|
CWE-119
| 179,292 | 1,073 |
283608099968125207964532142865153428743
| null | null | null |
linux
|
0da9dfdd2cd9889201bc6f6f43580c99165cd087
| 1 |
int install_user_keyrings(void)
{
struct user_struct *user;
const struct cred *cred;
struct key *uid_keyring, *session_keyring;
key_perm_t user_keyring_perm;
char buf[20];
int ret;
uid_t uid;
user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
cred = current_cred();
user = cred->user;
uid = from_kuid(cred->user_ns, user->uid);
kenter("%p{%u}", user, uid);
if (user->uid_keyring) {
kleave(" = 0 [exist]");
return 0;
}
mutex_lock(&key_user_keyring_mutex);
ret = 0;
if (!user->uid_keyring) {
/* get the UID-specific keyring
* - there may be one in existence already as it may have been
* pinned by a session, but the user_struct pointing to it
* may have been destroyed by setuid */
sprintf(buf, "_uid.%u", uid);
uid_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(uid_keyring)) {
uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
cred, user_keyring_perm,
KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(uid_keyring)) {
ret = PTR_ERR(uid_keyring);
goto error;
}
}
/* get a default session keyring (which might also exist
* already) */
sprintf(buf, "_uid_ses.%u", uid);
session_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(session_keyring)) {
session_keyring =
keyring_alloc(buf, user->uid, INVALID_GID,
cred, user_keyring_perm,
KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(session_keyring)) {
ret = PTR_ERR(session_keyring);
goto error_release;
}
/* we install a link from the user session keyring to
* the user keyring */
ret = key_link(session_keyring, uid_keyring);
if (ret < 0)
goto error_release_both;
}
/* install the keyrings */
user->uid_keyring = uid_keyring;
user->session_keyring = session_keyring;
}
mutex_unlock(&key_user_keyring_mutex);
kleave(" = 0");
return 0;
error_release_both:
key_put(session_keyring);
error_release:
key_put(uid_keyring);
error:
mutex_unlock(&key_user_keyring_mutex);
kleave(" = %d", ret);
return ret;
}
|
CWE-362
| 179,293 | 1,074 |
150644529088062988217803795766094771664
| null | null | null |
linux
|
1ee0a224bc9aad1de496c795f96bc6ba2c394811
| 1 |
static void chase_port(struct edgeport_port *port, unsigned long timeout,
int flush)
{
int baud_rate;
struct tty_struct *tty = tty_port_tty_get(&port->port->port);
struct usb_serial *serial = port->port->serial;
wait_queue_t wait;
unsigned long flags;
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
/* wait for data to drain from the buffer */
spin_lock_irqsave(&port->ep_lock, flags);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tty->write_wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (kfifo_len(&port->write_fifo) == 0
|| timeout == 0 || signal_pending(current)
|| serial->disconnected)
/* disconnect */
break;
spin_unlock_irqrestore(&port->ep_lock, flags);
timeout = schedule_timeout(timeout);
spin_lock_irqsave(&port->ep_lock, flags);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
if (flush)
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->ep_lock, flags);
tty_kref_put(tty);
/* wait for data to drain from the device */
timeout += jiffies;
while ((long)(jiffies - timeout) < 0 && !signal_pending(current)
&& !serial->disconnected) {
/* not disconnected */
if (!tx_active(port))
break;
msleep(10);
}
/* disconnected */
if (serial->disconnected)
return;
/* wait one more character time, based on baud rate */
/* (tx_active doesn't seem to wait for the last byte) */
baud_rate = port->baud_rate;
if (baud_rate == 0)
baud_rate = 50;
msleep(max(1, DIV_ROUND_UP(10000, baud_rate)));
}
|
CWE-264
| 179,294 | 1,075 |
45678875671616345348570222539533714638
| null | null | null |
linux
|
ce0030c00f95cf9110d9cdcd41e901e1fb814417
| 1 |
static void call_console_drivers(unsigned start, unsigned end)
{
unsigned cur_index, start_print;
static int msg_level = -1;
BUG_ON(((int)(start - end)) > 0);
cur_index = start;
start_print = start;
while (cur_index != end) {
if (msg_level < 0 && ((end - cur_index) > 2)) {
/* strip log prefix */
cur_index += log_prefix(&LOG_BUF(cur_index), &msg_level, NULL);
start_print = cur_index;
}
while (cur_index != end) {
char c = LOG_BUF(cur_index);
cur_index++;
if (c == '\n') {
if (msg_level < 0) {
/*
* printk() has already given us loglevel tags in
* the buffer. This code is here in case the
* log buffer has wrapped right round and scribbled
* on those tags
*/
msg_level = default_message_loglevel;
}
_call_console_drivers(start_print, cur_index, msg_level);
msg_level = -1;
start_print = cur_index;
break;
}
}
}
_call_console_drivers(start_print, end, msg_level);
}
|
CWE-119
| 179,298 | 1,079 |
150497347424301985399622499839907794574
| null | null | null |
tinc
|
17a33dfd95b1a29e90db76414eb9622df9632320
| 1 |
void receive_tcppacket(connection_t *c, const char *buffer, int len) {
vpn_packet_t outpkt;
outpkt.len = len;
if(c->options & OPTION_TCPONLY)
outpkt.priority = 0;
else
outpkt.priority = -1;
memcpy(outpkt.data, buffer, len);
receive_packet(c->node, &outpkt);
}
|
CWE-119
| 179,301 | 1,080 |
130046906967549597758760447336187151115
| null | null | null |
krb5
|
8ee70ec63931d1e38567905387ab9b1d45734d81
| 1 |
prep_reprocess_req(krb5_kdc_req *request, krb5_principal *krbtgt_princ)
{
krb5_error_code retval = KRB5KRB_AP_ERR_BADMATCH;
char **realms, **cpp, *temp_buf=NULL;
krb5_data *comp1 = NULL, *comp2 = NULL;
char *comp1_str = NULL;
/* By now we know that server principal name is unknown.
* If CANONICALIZE flag is set in the request
* If req is not U2U authn. req
* the requested server princ. has exactly two components
* either
* the name type is NT-SRV-HST
* or name type is NT-UNKNOWN and
* the 1st component is listed in conf file under host_based_services
* the 1st component is not in a list in conf under "no_host_referral"
* the 2d component looks like fully-qualified domain name (FQDN)
* If all of these conditions are satisfied - try mapping the FQDN and
* re-process the request as if client had asked for cross-realm TGT.
*/
if (isflagset(request->kdc_options, KDC_OPT_CANONICALIZE) &&
!isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY) &&
krb5_princ_size(kdc_context, request->server) == 2) {
comp1 = krb5_princ_component(kdc_context, request->server, 0);
comp2 = krb5_princ_component(kdc_context, request->server, 1);
comp1_str = calloc(1,comp1->length+1);
if (!comp1_str) {
retval = ENOMEM;
goto cleanup;
}
strlcpy(comp1_str,comp1->data,comp1->length+1);
if ((krb5_princ_type(kdc_context, request->server) == KRB5_NT_SRV_HST ||
krb5_princ_type(kdc_context, request->server) == KRB5_NT_SRV_INST ||
(krb5_princ_type(kdc_context, request->server) == KRB5_NT_UNKNOWN &&
kdc_active_realm->realm_host_based_services != NULL &&
(krb5_match_config_pattern(kdc_active_realm->realm_host_based_services,
comp1_str) == TRUE ||
krb5_match_config_pattern(kdc_active_realm->realm_host_based_services,
KRB5_CONF_ASTERISK) == TRUE))) &&
(kdc_active_realm->realm_no_host_referral == NULL ||
(krb5_match_config_pattern(kdc_active_realm->realm_no_host_referral,
KRB5_CONF_ASTERISK) == FALSE &&
krb5_match_config_pattern(kdc_active_realm->realm_no_host_referral,
comp1_str) == FALSE))) {
if (memchr(comp2->data, '.', comp2->length) == NULL)
goto cleanup;
temp_buf = calloc(1, comp2->length+1);
if (!temp_buf) {
retval = ENOMEM;
goto cleanup;
}
strlcpy(temp_buf, comp2->data,comp2->length+1);
retval = krb5int_get_domain_realm_mapping(kdc_context, temp_buf, &realms);
free(temp_buf);
if (retval) {
/* no match found */
kdc_err(kdc_context, retval, "unable to find realm of host");
goto cleanup;
}
if (realms == 0) {
retval = KRB5KRB_AP_ERR_BADMATCH;
goto cleanup;
}
/* Don't return a referral to the null realm or the service
* realm. */
if (realms[0] == 0 ||
data_eq_string(request->server->realm, realms[0])) {
free(realms[0]);
free(realms);
retval = KRB5KRB_AP_ERR_BADMATCH;
goto cleanup;
}
/* Modify request.
* Construct cross-realm tgt : krbtgt/REMOTE_REALM@LOCAL_REALM
* and use it as a principal in this req.
*/
retval = krb5_build_principal(kdc_context, krbtgt_princ,
(*request->server).realm.length,
(*request->server).realm.data,
"krbtgt", realms[0], (char *)0);
for (cpp = realms; *cpp; cpp++)
free(*cpp);
}
}
cleanup:
free(comp1_str);
return retval;
}
|
CWE-119
| 179,304 | 1,083 |
26445768461799658627820158469477432629
| null | null | null |
krb5
|
f249555301940c6df3a2cdda13b56b5674eebc2e
| 1 |
pkinit_check_kdc_pkid(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
unsigned char *pdid_buf,
unsigned int pkid_len,
int *valid_kdcPkId)
{
krb5_error_code retval = KRB5KDC_ERR_PREAUTH_FAILED;
PKCS7_ISSUER_AND_SERIAL *is = NULL;
const unsigned char *p = pdid_buf;
int status = 1;
X509 *kdc_cert = sk_X509_value(id_cryptoctx->my_certs, id_cryptoctx->cert_index);
*valid_kdcPkId = 0;
pkiDebug("found kdcPkId in AS REQ\n");
is = d2i_PKCS7_ISSUER_AND_SERIAL(NULL, &p, (int)pkid_len);
if (is == NULL)
goto cleanup;
status = X509_NAME_cmp(X509_get_issuer_name(kdc_cert), is->issuer);
if (!status) {
status = ASN1_INTEGER_cmp(X509_get_serialNumber(kdc_cert), is->serial);
if (!status)
*valid_kdcPkId = 1;
}
retval = 0;
cleanup:
X509_NAME_free(is->issuer);
ASN1_INTEGER_free(is->serial);
free(is);
return retval;
}
| 179,305 | 1,084 |
113776654271026499150762592325156046711
| null | null | null |
|
linux
|
2ca39528c01a933f6689cd6505ce65bd6d68a530
| 1 |
flush_signal_handlers(struct task_struct *t, int force_default)
{
int i;
struct k_sigaction *ka = &t->sighand->action[0];
for (i = _NSIG ; i != 0 ; i--) {
if (force_default || ka->sa.sa_handler != SIG_IGN)
ka->sa.sa_handler = SIG_DFL;
ka->sa.sa_flags = 0;
sigemptyset(&ka->sa.sa_mask);
ka++;
}
}
|
CWE-264
| 179,306 | 1,085 |
300076951675769576789330637334839523682
| null | null | null |
linux
|
0a9ab9bdb3e891762553f667066190c1d22ad62b
| 1 |
static int hidp_setup_hid(struct hidp_session *session,
struct hidp_connadd_req *req)
{
struct hid_device *hid;
int err;
session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
if (!session->rd_data)
return -ENOMEM;
if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
err = -EFAULT;
goto fault;
}
session->rd_size = req->rd_size;
hid = hid_allocate_device();
if (IS_ERR(hid)) {
err = PTR_ERR(hid);
goto fault;
}
session->hid = hid;
hid->driver_data = session;
hid->bus = BUS_BLUETOOTH;
hid->vendor = req->vendor;
hid->product = req->product;
hid->version = req->version;
hid->country = req->country;
strncpy(hid->name, req->name, 128);
snprintf(hid->phys, sizeof(hid->phys), "%pMR",
&bt_sk(session->ctrl_sock->sk)->src);
snprintf(hid->uniq, sizeof(hid->uniq), "%pMR",
&bt_sk(session->ctrl_sock->sk)->dst);
hid->dev.parent = &session->conn->dev;
hid->ll_driver = &hidp_hid_driver;
hid->hid_get_raw_report = hidp_get_raw_report;
hid->hid_output_raw_report = hidp_output_raw_report;
/* True if device is blacklisted in drivers/hid/hid-core.c */
if (hid_ignore(hid)) {
hid_destroy_device(session->hid);
session->hid = NULL;
return -ENODEV;
}
return 0;
fault:
kfree(session->rd_data);
session->rd_data = NULL;
return err;
}
|
CWE-200
| 179,312 | 1,089 |
293512902537004236220512847055814585434
| null | null | null |
linux
|
a67adb997419fb53540d4a4f79c6471c60bc69b6
| 1 |
int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
const char *xattr_value, size_t xattr_value_len)
{
struct inode *inode = dentry->d_inode;
struct evm_ima_xattr_data xattr_data;
int rc = 0;
rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
xattr_value_len, xattr_data.digest);
if (rc == 0) {
xattr_data.type = EVM_XATTR_HMAC;
rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
&xattr_data,
sizeof(xattr_data), 0);
}
else if (rc == -ENODATA)
rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
return rc;
}
| 179,313 | 1,090 |
305997047583974474229271303979449827597
| null | null | null |
|
linux
|
bd97120fc3d1a11f3124c7c9ba1d91f51829eb85
| 1 |
static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
struct iovec iov[], int iov_size)
{
const struct vhost_memory_region *reg;
struct vhost_memory *mem;
struct iovec *_iov;
u64 s = 0;
int ret = 0;
rcu_read_lock();
mem = rcu_dereference(dev->memory);
while ((u64)len > s) {
u64 size;
if (unlikely(ret >= iov_size)) {
ret = -ENOBUFS;
break;
}
reg = find_region(mem, addr, len);
if (unlikely(!reg)) {
ret = -EFAULT;
break;
}
_iov = iov + ret;
size = reg->memory_size - addr + reg->guest_phys_addr;
_iov->iov_len = min((u64)len, size);
_iov->iov_base = (void __user *)(unsigned long)
(reg->userspace_addr + addr - reg->guest_phys_addr);
s += size;
addr += size;
++ret;
}
rcu_read_unlock();
return ret;
}
| 179,314 | 1,091 |
36058613995489191804912663384715370298
| null | null | null |
|
linux
|
89d7ae34cdda4195809a5a987f697a517a2a3177
| 1 |
int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
{
unsigned char *opt = *option;
unsigned char *tag;
unsigned char opt_iter;
unsigned char err_offset = 0;
u8 opt_len;
u8 tag_len;
struct cipso_v4_doi *doi_def = NULL;
u32 tag_iter;
/* caller already checks for length values that are too large */
opt_len = opt[1];
if (opt_len < 8) {
err_offset = 1;
goto validate_return;
}
rcu_read_lock();
doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
if (doi_def == NULL) {
err_offset = 2;
goto validate_return_locked;
}
opt_iter = CIPSO_V4_HDR_LEN;
tag = opt + opt_iter;
while (opt_iter < opt_len) {
for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];)
if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID ||
++tag_iter == CIPSO_V4_TAG_MAXCNT) {
err_offset = opt_iter;
goto validate_return_locked;
}
tag_len = tag[1];
if (tag_len > (opt_len - opt_iter)) {
err_offset = opt_iter + 1;
goto validate_return_locked;
}
switch (tag[0]) {
case CIPSO_V4_TAG_RBITMAP:
if (tag_len < CIPSO_V4_TAG_RBM_BLEN) {
err_offset = opt_iter + 1;
goto validate_return_locked;
}
/* We are already going to do all the verification
* necessary at the socket layer so from our point of
* view it is safe to turn these checks off (and less
* work), however, the CIPSO draft says we should do
* all the CIPSO validations here but it doesn't
* really specify _exactly_ what we need to validate
* ... so, just make it a sysctl tunable. */
if (cipso_v4_rbm_strictvalid) {
if (cipso_v4_map_lvl_valid(doi_def,
tag[3]) < 0) {
err_offset = opt_iter + 3;
goto validate_return_locked;
}
if (tag_len > CIPSO_V4_TAG_RBM_BLEN &&
cipso_v4_map_cat_rbm_valid(doi_def,
&tag[4],
tag_len - 4) < 0) {
err_offset = opt_iter + 4;
goto validate_return_locked;
}
}
break;
case CIPSO_V4_TAG_ENUM:
if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) {
err_offset = opt_iter + 1;
goto validate_return_locked;
}
if (cipso_v4_map_lvl_valid(doi_def,
tag[3]) < 0) {
err_offset = opt_iter + 3;
goto validate_return_locked;
}
if (tag_len > CIPSO_V4_TAG_ENUM_BLEN &&
cipso_v4_map_cat_enum_valid(doi_def,
&tag[4],
tag_len - 4) < 0) {
err_offset = opt_iter + 4;
goto validate_return_locked;
}
break;
case CIPSO_V4_TAG_RANGE:
if (tag_len < CIPSO_V4_TAG_RNG_BLEN) {
err_offset = opt_iter + 1;
goto validate_return_locked;
}
if (cipso_v4_map_lvl_valid(doi_def,
tag[3]) < 0) {
err_offset = opt_iter + 3;
goto validate_return_locked;
}
if (tag_len > CIPSO_V4_TAG_RNG_BLEN &&
cipso_v4_map_cat_rng_valid(doi_def,
&tag[4],
tag_len - 4) < 0) {
err_offset = opt_iter + 4;
goto validate_return_locked;
}
break;
case CIPSO_V4_TAG_LOCAL:
/* This is a non-standard tag that we only allow for
* local connections, so if the incoming interface is
* not the loopback device drop the packet. */
if (!(skb->dev->flags & IFF_LOOPBACK)) {
err_offset = opt_iter;
goto validate_return_locked;
}
if (tag_len != CIPSO_V4_TAG_LOC_BLEN) {
err_offset = opt_iter + 1;
goto validate_return_locked;
}
break;
default:
err_offset = opt_iter;
goto validate_return_locked;
}
tag += tag_len;
opt_iter += tag_len;
}
validate_return_locked:
rcu_read_unlock();
validate_return:
*option = opt + err_offset;
return err_offset;
}
|
CWE-119
| 179,315 | 1,092 |
254520361513953381472265258043035062652
| null | null | null |
linux
|
77c1090f94d1b0b5186fb13a1b71b47b1343f87f
| 1 |
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
int *peeked, int *off, int *err)
{
struct sk_buff *skb;
long timeo;
/*
* Caller is allowed not to check sk->sk_err before skb_recv_datagram()
*/
int error = sock_error(sk);
if (error)
goto no_packet;
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
/* Again only user level code calls this function, so nothing
* interrupt level will suddenly eat the receive_queue.
*
* Look at current nfs client by the way...
* However, this function was correct in any case. 8)
*/
unsigned long cpu_flags;
struct sk_buff_head *queue = &sk->sk_receive_queue;
spin_lock_irqsave(&queue->lock, cpu_flags);
skb_queue_walk(queue, skb) {
*peeked = skb->peeked;
if (flags & MSG_PEEK) {
if (*off >= skb->len) {
*off -= skb->len;
continue;
}
skb->peeked = 1;
atomic_inc(&skb->users);
} else
__skb_unlink(skb, queue);
spin_unlock_irqrestore(&queue->lock, cpu_flags);
return skb;
}
spin_unlock_irqrestore(&queue->lock, cpu_flags);
/* User doesn't want to wait */
error = -EAGAIN;
if (!timeo)
goto no_packet;
} while (!wait_for_packet(sk, err, &timeo));
return NULL;
no_packet:
*err = error;
return NULL;
}
|
CWE-20
| 179,316 | 1,093 |
89456193869845556775746278518779936780
| null | null | null |
linux
|
c903f0456bc69176912dee6dd25c6a66ee1aed00
| 1 |
static int msr_open(struct inode *inode, struct file *file)
{
unsigned int cpu;
struct cpuinfo_x86 *c;
cpu = iminor(file->f_path.dentry->d_inode);
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
return -ENXIO; /* No such CPU */
c = &cpu_data(cpu);
if (!cpu_has(c, X86_FEATURE_MSR))
return -EIO; /* MSR not supported */
return 0;
}
|
CWE-264
| 179,338 | 1,115 |
134954684690763782108810687579849886489
| null | null | null |
libarchive
|
22531545514043e04633e1c015c7540b9de9dbe4
| 1 |
_archive_write_data(struct archive *_a, const void *buff, size_t s)
{
struct archive_write *a = (struct archive_write *)_a;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_DATA, "archive_write_data");
archive_clear_error(&a->archive);
return ((a->format_write_data)(a, buff, s));
}
|
CWE-189
| 179,348 | 1,123 |
130653882621145290070878332961230320693
| null | null | null |
linux
|
fe685aabf7c8c9f138e5ea900954d295bf229175
| 1 |
isofs_export_encode_fh(struct inode *inode,
__u32 *fh32,
int *max_len,
struct inode *parent)
{
struct iso_inode_info * ei = ISOFS_I(inode);
int len = *max_len;
int type = 1;
__u16 *fh16 = (__u16*)fh32;
/*
* WARNING: max_len is 5 for NFSv2. Because of this
* limitation, we use the lower 16 bits of fh32[1] to hold the
* offset of the inode and the upper 16 bits of fh32[1] to
* hold the offset of the parent.
*/
if (parent && (len < 5)) {
*max_len = 5;
return 255;
} else if (len < 3) {
*max_len = 3;
return 255;
}
len = 3;
fh32[0] = ei->i_iget5_block;
fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
fh32[2] = inode->i_generation;
if (parent) {
struct iso_inode_info *eparent;
eparent = ISOFS_I(parent);
fh32[3] = eparent->i_iget5_block;
fh16[3] = (__u16)eparent->i_iget5_offset; /* fh16 [sic] */
fh32[4] = parent->i_generation;
len = 5;
type = 2;
}
*max_len = len;
return type;
}
|
CWE-200
| 179,349 | 1,124 |
175765004232940692877706715077624484230
| null | null | null |
linux
|
0143fc5e9f6f5aad4764801015bc8d4b4a278200
| 1 |
static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
struct inode *parent)
{
int len = *lenp;
struct kernel_lb_addr location = UDF_I(inode)->i_location;
struct fid *fid = (struct fid *)fh;
int type = FILEID_UDF_WITHOUT_PARENT;
if (parent && (len < 5)) {
*lenp = 5;
return 255;
} else if (len < 3) {
*lenp = 3;
return 255;
}
*lenp = 3;
fid->udf.block = location.logicalBlockNum;
fid->udf.partref = location.partitionReferenceNum;
fid->udf.generation = inode->i_generation;
if (parent) {
location = UDF_I(parent)->i_location;
fid->udf.parent_block = location.logicalBlockNum;
fid->udf.parent_partref = location.partitionReferenceNum;
fid->udf.parent_generation = inode->i_generation;
*lenp = 5;
type = FILEID_UDF_WITH_PARENT;
}
return type;
}
|
CWE-200
| 179,350 | 1,125 |
271056274363302483785723168483520524098
| null | null | null |
linux
|
a117dacde0288f3ec60b6e5bcedae8fa37ee0dfc
| 1 |
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
struct sock_fprog fprog;
struct ifreq ifr;
int sndbuf;
int vnet_hdr_sz;
int ret;
if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
if (cmd == TUNGETFEATURES) {
/* Currently this just means: "what IFF flags are valid?".
* This is needed because we never checked for invalid flags on
* TUNSETIFF. */
return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
IFF_VNET_HDR,
(unsigned int __user*)argp);
}
rtnl_lock();
tun = __tun_get(tfile);
if (cmd == TUNSETIFF && !tun) {
ifr.ifr_name[IFNAMSIZ-1] = '\0';
ret = tun_set_iff(tfile->net, file, &ifr);
if (ret)
goto unlock;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
goto unlock;
}
ret = -EBADFD;
if (!tun)
goto unlock;
tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
ret = 0;
switch (cmd) {
case TUNGETIFF:
ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
if (ret)
break;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
case TUNSETNOCSUM:
/* Disable/Enable checksum */
/* [unimplemented] */
tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
arg ? "disabled" : "enabled");
break;
case TUNSETPERSIST:
/* Disable/Enable persist mode */
if (arg)
tun->flags |= TUN_PERSIST;
else
tun->flags &= ~TUN_PERSIST;
tun_debug(KERN_INFO, tun, "persist %s\n",
arg ? "enabled" : "disabled");
break;
case TUNSETOWNER:
/* Set owner of the device */
tun->owner = (uid_t) arg;
tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
break;
case TUNSETGROUP:
/* Set group of the device */
tun->group= (gid_t) arg;
tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
break;
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
if (tun->dev->flags & IFF_UP) {
tun_debug(KERN_INFO, tun,
"Linktype set failed because interface is up\n");
ret = -EBUSY;
} else {
tun->dev->type = (int) arg;
tun_debug(KERN_INFO, tun, "linktype set to %d\n",
tun->dev->type);
ret = 0;
}
break;
#ifdef TUN_DEBUG
case TUNSETDEBUG:
tun->debug = arg;
break;
#endif
case TUNSETOFFLOAD:
ret = set_offload(tun, arg);
break;
case TUNSETTXFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
ret = update_filter(&tun->txflt, (void __user *)arg);
break;
case SIOCGIFHWADDR:
/* Get hw address */
memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
ifr.ifr_hwaddr.sa_family = tun->dev->type;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
case SIOCSIFHWADDR:
/* Set hw address */
tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
ifr.ifr_hwaddr.sa_data);
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
break;
case TUNGETSNDBUF:
sndbuf = tun->socket.sk->sk_sndbuf;
if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
ret = -EFAULT;
break;
case TUNSETSNDBUF:
if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
ret = -EFAULT;
break;
}
tun->socket.sk->sk_sndbuf = sndbuf;
break;
case TUNGETVNETHDRSZ:
vnet_hdr_sz = tun->vnet_hdr_sz;
if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
ret = -EFAULT;
break;
case TUNSETVNETHDRSZ:
if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
ret = -EFAULT;
break;
}
if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
ret = -EINVAL;
break;
}
tun->vnet_hdr_sz = vnet_hdr_sz;
break;
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
ret = -EFAULT;
if (copy_from_user(&fprog, argp, sizeof(fprog)))
break;
ret = sk_attach_filter(&fprog, tun->socket.sk);
break;
case TUNDETACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
ret = sk_detach_filter(tun->socket.sk);
break;
default:
ret = -EINVAL;
break;
}
unlock:
rtnl_unlock();
if (tun)
tun_put(tun);
return ret;
}
|
CWE-200
| 179,351 | 1,126 |
35869627182032716346589885505053331464
| null | null | null |
linux
|
e862f1a9b7df4e8196ebec45ac62295138aa3fc2
| 1 |
int vcc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct atm_vcc *vcc;
int len;
if (get_user(len, optlen))
return -EFAULT;
if (__SO_LEVEL_MATCH(optname, level) && len != __SO_SIZE(optname))
return -EINVAL;
vcc = ATM_SD(sock);
switch (optname) {
case SO_ATMQOS:
if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
return -EINVAL;
return copy_to_user(optval, &vcc->qos, sizeof(vcc->qos))
? -EFAULT : 0;
case SO_SETCLP:
return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 0,
(unsigned long __user *)optval) ? -EFAULT : 0;
case SO_ATMPVC:
{
struct sockaddr_atmpvc pvc;
if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
return -ENOTCONN;
pvc.sap_family = AF_ATMPVC;
pvc.sap_addr.itf = vcc->dev->number;
pvc.sap_addr.vpi = vcc->vpi;
pvc.sap_addr.vci = vcc->vci;
return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0;
}
default:
if (level == SOL_SOCKET)
return -EINVAL;
break;
}
if (!vcc->dev || !vcc->dev->ops->getsockopt)
return -EINVAL;
return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len);
}
|
CWE-200
| 179,352 | 1,127 |
133848982303383403195265161712061518840
| null | null | null |
linux
|
9344a972961d1a6d2c04d9008b13617bcb6ec2ef
| 1 |
static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src);
*len = sizeof(struct sockaddr_rc);
return 0;
}
|
CWE-200
| 179,353 | 1,128 |
94097099797321759376102906975104746907
| null | null | null |
linux
|
e15ca9a0ef9a86f0477530b0f44a725d67f889ee
| 1 |
static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct hci_ufilter uf;
struct sock *sk = sock->sk;
int len, opt, err = 0;
BT_DBG("sk %p, opt %d", sk, optname);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
err = -EINVAL;
goto done;
}
switch (optname) {
case HCI_DATA_DIR:
if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
opt = 1;
else
opt = 0;
if (put_user(opt, optval))
err = -EFAULT;
break;
case HCI_TIME_STAMP:
if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
opt = 1;
else
opt = 0;
if (put_user(opt, optval))
err = -EFAULT;
break;
case HCI_FILTER:
{
struct hci_filter *f = &hci_pi(sk)->filter;
uf.type_mask = f->type_mask;
uf.opcode = f->opcode;
uf.event_mask[0] = *((u32 *) f->event_mask + 0);
uf.event_mask[1] = *((u32 *) f->event_mask + 1);
}
len = min_t(unsigned int, len, sizeof(uf));
if (copy_to_user(optval, &uf, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
done:
release_sock(sk);
return err;
}
|
CWE-200
| 179,354 | 1,129 |
274243474172140942840266275074793223211
| null | null | null |
linux
|
04d4fbca1017c11381e7d82acea21dd741e748bc
| 1 |
static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
struct sock *sk = sock->sk;
struct ipv6_pinfo *np = inet6_sk(sk);
struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
lsa->l2tp_family = AF_INET6;
lsa->l2tp_flowinfo = 0;
lsa->l2tp_scope_id = 0;
if (peer) {
if (!lsk->peer_conn_id)
return -ENOTCONN;
lsa->l2tp_conn_id = lsk->peer_conn_id;
lsa->l2tp_addr = np->daddr;
if (np->sndflow)
lsa->l2tp_flowinfo = np->flow_label;
} else {
if (ipv6_addr_any(&np->rcv_saddr))
lsa->l2tp_addr = np->saddr;
else
lsa->l2tp_addr = np->rcv_saddr;
lsa->l2tp_conn_id = lsk->conn_id;
}
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = sk->sk_bound_dev_if;
*uaddr_len = sizeof(*lsa);
return 0;
}
|
CWE-200
| 179,355 | 1,130 |
85210193838679965766514615800467640408
| null | null | null |
linux
|
3592aaeb80290bda0f2cf0b5456c97bfc638b192
| 1 |
static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddrlen, int peer)
{
struct sockaddr_llc sllc;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
int rc = 0;
memset(&sllc, 0, sizeof(sllc));
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
*uaddrlen = sizeof(sllc);
memset(uaddr, 0, *uaddrlen);
if (peer) {
rc = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
if(llc->dev)
sllc.sllc_arphrd = llc->dev->type;
sllc.sllc_sap = llc->daddr.lsap;
memcpy(&sllc.sllc_mac, &llc->daddr.mac, IFHWADDRLEN);
} else {
rc = -EINVAL;
if (!llc->sap)
goto out;
sllc.sllc_sap = llc->sap->laddr.lsap;
if (llc->dev) {
sllc.sllc_arphrd = llc->dev->type;
memcpy(&sllc.sllc_mac, llc->dev->dev_addr,
IFHWADDRLEN);
}
}
rc = 0;
sllc.sllc_family = AF_LLC;
memcpy(uaddr, &sllc, sizeof(sllc));
out:
release_sock(sk);
return rc;
}
|
CWE-200
| 179,356 | 1,131 |
39906440321854088454720219629246777862
| null | null | null |
linux
|
7b07f8eb75aa3097cdfd4f6eac3da49db787381d
| 1 |
static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
u32 __user *optval, int __user *optlen)
{
const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
struct tfrc_tx_info tfrc;
const void *val;
switch (optname) {
case DCCP_SOCKOPT_CCID_TX_INFO:
if (len < sizeof(tfrc))
return -EINVAL;
tfrc.tfrctx_x = hc->tx_x;
tfrc.tfrctx_x_recv = hc->tx_x_recv;
tfrc.tfrctx_x_calc = hc->tx_x_calc;
tfrc.tfrctx_rtt = hc->tx_rtt;
tfrc.tfrctx_p = hc->tx_p;
tfrc.tfrctx_rto = hc->tx_t_rto;
tfrc.tfrctx_ipi = hc->tx_t_ipi;
len = sizeof(tfrc);
val = &tfrc;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen) || copy_to_user(optval, val, len))
return -EFAULT;
return 0;
}
|
CWE-200
| 179,357 | 1,132 |
8536137024975475585022210917155420368
| null | null | null |
linux
|
2d8a041b7bfe1097af21441cb77d6af95f4f4680
| 1 |
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
unsigned char arg[128];
int ret = 0;
unsigned int copylen;
struct net *net = sock_net(sk);
struct netns_ipvs *ipvs = net_ipvs(net);
BUG_ON(!net);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
return -EINVAL;
if (*len < get_arglen[GET_CMDID(cmd)]) {
pr_err("get_ctl: len %u < %u\n",
*len, get_arglen[GET_CMDID(cmd)]);
return -EINVAL;
}
copylen = get_arglen[GET_CMDID(cmd)];
if (copylen > 128)
return -EINVAL;
if (copy_from_user(arg, user, copylen) != 0)
return -EFAULT;
/*
* Handle daemons first since it has its own locking
*/
if (cmd == IP_VS_SO_GET_DAEMON) {
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
if (mutex_lock_interruptible(&ipvs->sync_mutex))
return -ERESTARTSYS;
if (ipvs->sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
sizeof(d[0].mcast_ifn));
d[0].syncid = ipvs->master_syncid;
}
if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
sizeof(d[1].mcast_ifn));
d[1].syncid = ipvs->backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
mutex_unlock(&ipvs->sync_mutex);
return ret;
}
if (mutex_lock_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
switch (cmd) {
case IP_VS_SO_GET_VERSION:
{
char buf[64];
sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
ret = -EFAULT;
goto out;
}
*len = strlen(buf)+1;
}
break;
case IP_VS_SO_GET_INFO:
{
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = ip_vs_conn_tab_size;
info.num_services = ipvs->num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_SERVICES:
{
struct ip_vs_get_services *get;
int size;
get = (struct ip_vs_get_services *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_service_entry) * get->num_services;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_service_entries(net, get, user);
}
break;
case IP_VS_SO_GET_SERVICE:
{
struct ip_vs_service_entry *entry;
struct ip_vs_service *svc;
union nf_inet_addr addr;
entry = (struct ip_vs_service_entry *)arg;
addr.ip = entry->addr;
if (entry->fwmark)
svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
else
svc = __ip_vs_service_find(net, AF_INET,
entry->protocol, &addr,
entry->port);
if (svc) {
ip_vs_copy_service(entry, svc);
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
ret = -EFAULT;
} else
ret = -ESRCH;
}
break;
case IP_VS_SO_GET_DESTS:
{
struct ip_vs_get_dests *get;
int size;
get = (struct ip_vs_get_dests *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_dest_entry) * get->num_dests;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_dest_entries(net, get, user);
}
break;
case IP_VS_SO_GET_TIMEOUT:
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
|
CWE-200
| 179,358 | 1,133 |
267941661112975051742367659362771086875
| null | null | null |
linux
|
f778a636713a435d3a922c60b1622a91136560c1
| 1 |
static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
{
memcpy(&p->id, &x->id, sizeof(p->id));
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
memcpy(&p->stats, &x->stats, sizeof(p->stats));
memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
p->mode = x->props.mode;
p->replay_window = x->props.replay_window;
p->reqid = x->props.reqid;
p->family = x->props.family;
p->flags = x->props.flags;
p->seq = x->km.seq;
}
|
CWE-200
| 179,361 | 1,134 |
144112786017541915774880277836606119070
| null | null | null |
linux
|
b66c5984017533316fd1951770302649baf1aa33
| 1 |
static int load_script(struct linux_binprm *bprm)
{
const char *i_arg, *i_name;
char *cp;
struct file *file;
char interp[BINPRM_BUF_SIZE];
int retval;
if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
return -ENOEXEC;
/*
* This section does the #! interpretation.
* Sorta complicated, but hopefully it will work. -TYT
*/
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
if ((cp = strchr(bprm->buf, '\n')) == NULL)
cp = bprm->buf+BINPRM_BUF_SIZE-1;
*cp = '\0';
while (cp > bprm->buf) {
cp--;
if ((*cp == ' ') || (*cp == '\t'))
*cp = '\0';
else
break;
}
for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++);
if (*cp == '\0')
return -ENOEXEC; /* No interpreter name found */
i_name = cp;
i_arg = NULL;
for ( ; *cp && (*cp != ' ') && (*cp != '\t'); cp++)
/* nothing */ ;
while ((*cp == ' ') || (*cp == '\t'))
*cp++ = '\0';
if (*cp)
i_arg = cp;
strcpy (interp, i_name);
/*
* OK, we've parsed out the interpreter name and
* (optional) argument.
* Splice in (1) the interpreter's name for argv[0]
* (2) (optional) argument to interpreter
* (3) filename of shell script (replace argv[0])
*
* This is done in reverse order, because of how the
* user environment and arguments are stored.
*/
retval = remove_arg_zero(bprm);
if (retval)
return retval;
retval = copy_strings_kernel(1, &bprm->interp, bprm);
if (retval < 0) return retval;
bprm->argc++;
if (i_arg) {
retval = copy_strings_kernel(1, &i_arg, bprm);
if (retval < 0) return retval;
bprm->argc++;
}
retval = copy_strings_kernel(1, &i_name, bprm);
if (retval) return retval;
bprm->argc++;
bprm->interp = interp;
/*
* OK, now restart the process with the interpreter's dentry.
*/
file = open_exec(interp);
if (IS_ERR(file))
return PTR_ERR(file);
bprm->file = file;
retval = prepare_binprm(bprm);
if (retval < 0)
return retval;
return search_binary_handler(bprm);
}
|
CWE-200
| 179,370 | 1,142 |
328383916190717074167517185933046956222
| null | null | null |
krb5
|
db64ca25d661a47b996b4e2645998b5d7f0eb52c
| 1 |
pkinit_server_return_padata(krb5_context context,
krb5_pa_data * padata,
krb5_data *req_pkt,
krb5_kdc_req * request,
krb5_kdc_rep * reply,
krb5_keyblock * encrypting_key,
krb5_pa_data ** send_pa,
krb5_kdcpreauth_callbacks cb,
krb5_kdcpreauth_rock rock,
krb5_kdcpreauth_moddata moddata,
krb5_kdcpreauth_modreq modreq)
{
krb5_error_code retval = 0;
krb5_data scratch = {0, 0, NULL};
krb5_pa_pk_as_req *reqp = NULL;
krb5_pa_pk_as_req_draft9 *reqp9 = NULL;
int i = 0;
unsigned char *subjectPublicKey = NULL;
unsigned char *dh_pubkey = NULL, *server_key = NULL;
unsigned int subjectPublicKey_len = 0;
unsigned int server_key_len = 0, dh_pubkey_len = 0;
krb5_kdc_dh_key_info dhkey_info;
krb5_data *encoded_dhkey_info = NULL;
krb5_pa_pk_as_rep *rep = NULL;
krb5_pa_pk_as_rep_draft9 *rep9 = NULL;
krb5_data *out_data = NULL;
krb5_octet_data secret;
krb5_enctype enctype = -1;
krb5_reply_key_pack *key_pack = NULL;
krb5_reply_key_pack_draft9 *key_pack9 = NULL;
krb5_data *encoded_key_pack = NULL;
pkinit_kdc_context plgctx;
pkinit_kdc_req_context reqctx;
int fixed_keypack = 0;
*send_pa = NULL;
if (padata->pa_type == KRB5_PADATA_PKINIT_KX) {
return return_pkinit_kx(context, request, reply,
encrypting_key, send_pa);
}
if (padata->length <= 0 || padata->contents == NULL)
return 0;
if (modreq == NULL) {
pkiDebug("missing request context \n");
return EINVAL;
}
plgctx = pkinit_find_realm_context(context, moddata, request->server);
if (plgctx == NULL) {
pkiDebug("Unable to locate correct realm context\n");
return ENOENT;
}
pkiDebug("pkinit_return_padata: entered!\n");
reqctx = (pkinit_kdc_req_context)modreq;
if (encrypting_key->contents) {
free(encrypting_key->contents);
encrypting_key->length = 0;
encrypting_key->contents = NULL;
}
for(i = 0; i < request->nktypes; i++) {
enctype = request->ktype[i];
if (!krb5_c_valid_enctype(enctype))
continue;
else {
pkiDebug("KDC picked etype = %d\n", enctype);
break;
}
}
if (i == request->nktypes) {
retval = KRB5KDC_ERR_ETYPE_NOSUPP;
goto cleanup;
}
switch((int)reqctx->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
init_krb5_pa_pk_as_rep(&rep);
if (rep == NULL) {
retval = ENOMEM;
goto cleanup;
}
/* let's assume it's RSA. we'll reset it to DH if needed */
rep->choice = choice_pa_pk_as_rep_encKeyPack;
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
init_krb5_pa_pk_as_rep_draft9(&rep9);
if (rep9 == NULL) {
retval = ENOMEM;
goto cleanup;
}
rep9->choice = choice_pa_pk_as_rep_draft9_encKeyPack;
break;
default:
retval = KRB5KDC_ERR_PREAUTH_FAILED;
goto cleanup;
}
if (reqctx->rcv_auth_pack != NULL &&
reqctx->rcv_auth_pack->clientPublicValue != NULL) {
subjectPublicKey =
reqctx->rcv_auth_pack->clientPublicValue->subjectPublicKey.data;
subjectPublicKey_len =
reqctx->rcv_auth_pack->clientPublicValue->subjectPublicKey.length;
rep->choice = choice_pa_pk_as_rep_dhInfo;
} else if (reqctx->rcv_auth_pack9 != NULL &&
reqctx->rcv_auth_pack9->clientPublicValue != NULL) {
subjectPublicKey =
reqctx->rcv_auth_pack9->clientPublicValue->subjectPublicKey.data;
subjectPublicKey_len =
reqctx->rcv_auth_pack9->clientPublicValue->subjectPublicKey.length;
rep9->choice = choice_pa_pk_as_rep_draft9_dhSignedData;
}
/* if this DH, then process finish computing DH key */
if (rep != NULL && (rep->choice == choice_pa_pk_as_rep_dhInfo ||
rep->choice == choice_pa_pk_as_rep_draft9_dhSignedData)) {
pkiDebug("received DH key delivery AS REQ\n");
retval = server_process_dh(context, plgctx->cryptoctx,
reqctx->cryptoctx, plgctx->idctx, subjectPublicKey,
subjectPublicKey_len, &dh_pubkey, &dh_pubkey_len,
&server_key, &server_key_len);
if (retval) {
pkiDebug("failed to process/create dh paramters\n");
goto cleanup;
}
}
if ((rep9 != NULL &&
rep9->choice == choice_pa_pk_as_rep_draft9_dhSignedData) ||
(rep != NULL && rep->choice == choice_pa_pk_as_rep_dhInfo)) {
/*
* This is DH, so don't generate the key until after we
* encode the reply, because the encoded reply is needed
* to generate the key in some cases.
*/
dhkey_info.subjectPublicKey.length = dh_pubkey_len;
dhkey_info.subjectPublicKey.data = dh_pubkey;
dhkey_info.nonce = request->nonce;
dhkey_info.dhKeyExpiration = 0;
retval = k5int_encode_krb5_kdc_dh_key_info(&dhkey_info,
&encoded_dhkey_info);
if (retval) {
pkiDebug("encode_krb5_kdc_dh_key_info failed\n");
goto cleanup;
}
#ifdef DEBUG_ASN1
print_buffer_bin((unsigned char *)encoded_dhkey_info->data,
encoded_dhkey_info->length,
"/tmp/kdc_dh_key_info");
#endif
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
retval = cms_signeddata_create(context, plgctx->cryptoctx,
reqctx->cryptoctx, plgctx->idctx, CMS_SIGN_SERVER, 1,
(unsigned char *)encoded_dhkey_info->data,
encoded_dhkey_info->length,
&rep->u.dh_Info.dhSignedData.data,
&rep->u.dh_Info.dhSignedData.length);
if (retval) {
pkiDebug("failed to create pkcs7 signed data\n");
goto cleanup;
}
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
retval = cms_signeddata_create(context, plgctx->cryptoctx,
reqctx->cryptoctx, plgctx->idctx, CMS_SIGN_DRAFT9, 1,
(unsigned char *)encoded_dhkey_info->data,
encoded_dhkey_info->length,
&rep9->u.dhSignedData.data,
&rep9->u.dhSignedData.length);
if (retval) {
pkiDebug("failed to create pkcs7 signed data\n");
goto cleanup;
}
break;
}
} else {
pkiDebug("received RSA key delivery AS REQ\n");
retval = krb5_c_make_random_key(context, enctype, encrypting_key);
if (retval) {
pkiDebug("unable to make a session key\n");
goto cleanup;
}
/* check if PA_TYPE of 132 is present which means the client is
* requesting that a checksum is send back instead of the nonce
*/
for (i = 0; request->padata[i] != NULL; i++) {
pkiDebug("%s: Checking pa_type 0x%08x\n",
__FUNCTION__, request->padata[i]->pa_type);
if (request->padata[i]->pa_type == 132)
fixed_keypack = 1;
}
pkiDebug("%s: return checksum instead of nonce = %d\n",
__FUNCTION__, fixed_keypack);
/* if this is an RFC reply or draft9 client requested a checksum
* in the reply instead of the nonce, create an RFC-style keypack
*/
if ((int)padata->pa_type == KRB5_PADATA_PK_AS_REQ || fixed_keypack) {
init_krb5_reply_key_pack(&key_pack);
if (key_pack == NULL) {
retval = ENOMEM;
goto cleanup;
}
retval = krb5_c_make_checksum(context, 0,
encrypting_key, KRB5_KEYUSAGE_TGS_REQ_AUTH_CKSUM,
req_pkt, &key_pack->asChecksum);
if (retval) {
pkiDebug("unable to calculate AS REQ checksum\n");
goto cleanup;
}
#ifdef DEBUG_CKSUM
pkiDebug("calculating checksum on buf size = %d\n", req_pkt->length);
print_buffer(req_pkt->data, req_pkt->length);
pkiDebug("checksum size = %d\n", key_pack->asChecksum.length);
print_buffer(key_pack->asChecksum.contents,
key_pack->asChecksum.length);
pkiDebug("encrypting key (%d)\n", encrypting_key->length);
print_buffer(encrypting_key->contents, encrypting_key->length);
#endif
krb5_copy_keyblock_contents(context, encrypting_key,
&key_pack->replyKey);
retval = k5int_encode_krb5_reply_key_pack(key_pack,
&encoded_key_pack);
if (retval) {
pkiDebug("failed to encode reply_key_pack\n");
goto cleanup;
}
}
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
rep->choice = choice_pa_pk_as_rep_encKeyPack;
retval = cms_envelopeddata_create(context, plgctx->cryptoctx,
reqctx->cryptoctx, plgctx->idctx, padata->pa_type, 1,
(unsigned char *)encoded_key_pack->data,
encoded_key_pack->length,
&rep->u.encKeyPack.data, &rep->u.encKeyPack.length);
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
/* if the request is from the broken draft9 client that
* expects back a nonce, create it now
*/
if (!fixed_keypack) {
init_krb5_reply_key_pack_draft9(&key_pack9);
if (key_pack9 == NULL) {
retval = ENOMEM;
goto cleanup;
}
key_pack9->nonce = reqctx->rcv_auth_pack9->pkAuthenticator.nonce;
krb5_copy_keyblock_contents(context, encrypting_key,
&key_pack9->replyKey);
retval = k5int_encode_krb5_reply_key_pack_draft9(key_pack9,
&encoded_key_pack);
if (retval) {
pkiDebug("failed to encode reply_key_pack\n");
goto cleanup;
}
}
rep9->choice = choice_pa_pk_as_rep_draft9_encKeyPack;
retval = cms_envelopeddata_create(context, plgctx->cryptoctx,
reqctx->cryptoctx, plgctx->idctx, padata->pa_type, 1,
(unsigned char *)encoded_key_pack->data,
encoded_key_pack->length,
&rep9->u.encKeyPack.data, &rep9->u.encKeyPack.length);
break;
}
if (retval) {
pkiDebug("failed to create pkcs7 enveloped data: %s\n",
error_message(retval));
goto cleanup;
}
#ifdef DEBUG_ASN1
print_buffer_bin((unsigned char *)encoded_key_pack->data,
encoded_key_pack->length,
"/tmp/kdc_key_pack");
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
print_buffer_bin(rep->u.encKeyPack.data,
rep->u.encKeyPack.length,
"/tmp/kdc_enc_key_pack");
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
print_buffer_bin(rep9->u.encKeyPack.data,
rep9->u.encKeyPack.length,
"/tmp/kdc_enc_key_pack");
break;
}
#endif
}
if ((rep != NULL && rep->choice == choice_pa_pk_as_rep_dhInfo) &&
((reqctx->rcv_auth_pack != NULL &&
reqctx->rcv_auth_pack->supportedKDFs != NULL))) {
/* If using the alg-agility KDF, put the algorithm in the reply
* before encoding it.
*/
if (reqctx->rcv_auth_pack != NULL &&
reqctx->rcv_auth_pack->supportedKDFs != NULL) {
retval = pkinit_pick_kdf_alg(context, reqctx->rcv_auth_pack->supportedKDFs,
&(rep->u.dh_Info.kdfID));
if (retval) {
pkiDebug("pkinit_pick_kdf_alg failed: %s\n",
error_message(retval));
goto cleanup;
}
}
}
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
retval = k5int_encode_krb5_pa_pk_as_rep(rep, &out_data);
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
retval = k5int_encode_krb5_pa_pk_as_rep_draft9(rep9, &out_data);
break;
}
if (retval) {
pkiDebug("failed to encode AS_REP\n");
goto cleanup;
}
#ifdef DEBUG_ASN1
if (out_data != NULL)
print_buffer_bin((unsigned char *)out_data->data, out_data->length,
"/tmp/kdc_as_rep");
#endif
/* If this is DH, we haven't computed the key yet, so do it now. */
if ((rep9 != NULL &&
rep9->choice == choice_pa_pk_as_rep_draft9_dhSignedData) ||
(rep != NULL && rep->choice == choice_pa_pk_as_rep_dhInfo)) {
/* If mutually supported KDFs were found, use the alg agility KDF */
if (rep->u.dh_Info.kdfID) {
secret.data = server_key;
secret.length = server_key_len;
retval = pkinit_alg_agility_kdf(context, &secret,
rep->u.dh_Info.kdfID,
request->client, request->server,
enctype,
(krb5_octet_data *)req_pkt,
(krb5_octet_data *)out_data,
encrypting_key);
if (retval) {
pkiDebug("pkinit_alg_agility_kdf failed: %s\n",
error_message(retval));
goto cleanup;
}
/* Otherwise, use the older octetstring2key() function */
} else {
retval = pkinit_octetstring2key(context, enctype, server_key,
server_key_len, encrypting_key);
if (retval) {
pkiDebug("pkinit_octetstring2key failed: %s\n",
error_message(retval));
goto cleanup;
}
}
}
*send_pa = malloc(sizeof(krb5_pa_data));
if (*send_pa == NULL) {
retval = ENOMEM;
free(out_data->data);
free(out_data);
out_data = NULL;
goto cleanup;
}
(*send_pa)->magic = KV5M_PA_DATA;
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
(*send_pa)->pa_type = KRB5_PADATA_PK_AS_REP;
break;
case KRB5_PADATA_PK_AS_REQ_OLD:
case KRB5_PADATA_PK_AS_REP_OLD:
(*send_pa)->pa_type = KRB5_PADATA_PK_AS_REP_OLD;
break;
}
(*send_pa)->length = out_data->length;
(*send_pa)->contents = (krb5_octet *) out_data->data;
cleanup:
pkinit_fini_kdc_req_context(context, reqctx);
free(scratch.data);
free(out_data);
if (encoded_dhkey_info != NULL)
krb5_free_data(context, encoded_dhkey_info);
if (encoded_key_pack != NULL)
krb5_free_data(context, encoded_key_pack);
free(dh_pubkey);
free(server_key);
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
free_krb5_pa_pk_as_req(&reqp);
free_krb5_pa_pk_as_rep(&rep);
free_krb5_reply_key_pack(&key_pack);
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
free_krb5_pa_pk_as_req_draft9(&reqp9);
free_krb5_pa_pk_as_rep_draft9(&rep9);
if (!fixed_keypack)
free_krb5_reply_key_pack_draft9(&key_pack9);
else
free_krb5_reply_key_pack(&key_pack);
break;
}
if (retval)
pkiDebug("pkinit_verify_padata failure");
return retval;
}
| 179,378 | 1,150 |
76503992088312027840262291132360097655
| null | null | null |
|
linux
|
b5a1eeef04cc7859f34dec9b72ea1b28e4aba07c
| 1 |
static ssize_t bat_socket_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct socket_client *socket_client = file->private_data;
struct socket_packet *socket_packet;
size_t packet_len;
int error;
if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0))
return -EAGAIN;
if ((!buf) || (count < sizeof(struct icmp_packet)))
return -EINVAL;
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
error = wait_event_interruptible(socket_client->queue_wait,
socket_client->queue_len);
if (error)
return error;
spin_lock_bh(&socket_client->lock);
socket_packet = list_first_entry(&socket_client->queue_list,
struct socket_packet, list);
list_del(&socket_packet->list);
socket_client->queue_len--;
spin_unlock_bh(&socket_client->lock);
error = copy_to_user(buf, &socket_packet->icmp_packet,
socket_packet->icmp_len);
packet_len = socket_packet->icmp_len;
kfree(socket_packet);
if (error)
return -EFAULT;
return packet_len;
}
|
CWE-119
| 179,379 | 1,151 |
85869577016758360857678073749575889128
| null | null | null |
linux
|
ae53b5bd77719fed58086c5be60ce4f22bffe1c6
| 1 |
int sctp_rcv(struct sk_buff *skb)
{
struct sock *sk;
struct sctp_association *asoc;
struct sctp_endpoint *ep = NULL;
struct sctp_ep_common *rcvr;
struct sctp_transport *transport = NULL;
struct sctp_chunk *chunk;
struct sctphdr *sh;
union sctp_addr src;
union sctp_addr dest;
int family;
struct sctp_af *af;
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
if (skb_linearize(skb))
goto discard_it;
sh = sctp_hdr(skb);
/* Pull up the IP and SCTP headers. */
__skb_pull(skb, skb_transport_offset(skb));
if (skb->len < sizeof(struct sctphdr))
goto discard_it;
if (!skb_csum_unnecessary(skb) && sctp_rcv_checksum(skb) < 0)
goto discard_it;
skb_pull(skb, sizeof(struct sctphdr));
/* Make sure we at least have chunk headers worth of data left. */
if (skb->len < sizeof(struct sctp_chunkhdr))
goto discard_it;
family = ipver2af(ip_hdr(skb)->version);
af = sctp_get_af_specific(family);
if (unlikely(!af))
goto discard_it;
/* Initialize local addresses for lookups. */
af->from_skb(&src, skb, 1);
af->from_skb(&dest, skb, 0);
/* If the packet is to or from a non-unicast address,
* silently discard the packet.
*
* This is not clearly defined in the RFC except in section
* 8.4 - OOTB handling. However, based on the book "Stream Control
* Transmission Protocol" 2.1, "It is important to note that the
* IP address of an SCTP transport address must be a routable
* unicast address. In other words, IP multicast addresses and
* IP broadcast addresses cannot be used in an SCTP transport
* address."
*/
if (!af->addr_valid(&src, NULL, skb) ||
!af->addr_valid(&dest, NULL, skb))
goto discard_it;
asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
if (!asoc)
ep = __sctp_rcv_lookup_endpoint(&dest);
/* Retrieve the common input handling substructure. */
rcvr = asoc ? &asoc->base : &ep->base;
sk = rcvr->sk;
/*
* If a frame arrives on an interface and the receiving socket is
* bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
*/
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
{
if (asoc) {
sctp_association_put(asoc);
asoc = NULL;
} else {
sctp_endpoint_put(ep);
ep = NULL;
}
sk = sctp_get_ctl_sock();
ep = sctp_sk(sk)->ep;
sctp_endpoint_hold(ep);
rcvr = &ep->base;
}
/*
* RFC 2960, 8.4 - Handle "Out of the blue" Packets.
* An SCTP packet is called an "out of the blue" (OOTB)
* packet if it is correctly formed, i.e., passed the
* receiver's checksum check, but the receiver is not
* able to identify the association to which this
* packet belongs.
*/
if (!asoc) {
if (sctp_rcv_ootb(skb)) {
SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
goto discard_release;
}
}
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
goto discard_release;
nf_reset(skb);
if (sk_filter(sk, skb))
goto discard_release;
/* Create an SCTP packet structure. */
chunk = sctp_chunkify(skb, asoc, sk);
if (!chunk)
goto discard_release;
SCTP_INPUT_CB(skb)->chunk = chunk;
/* Remember what endpoint is to handle this packet. */
chunk->rcvr = rcvr;
/* Remember the SCTP header. */
chunk->sctp_hdr = sh;
/* Set the source and destination addresses of the incoming chunk. */
sctp_init_addrs(chunk, &src, &dest);
/* Remember where we came from. */
chunk->transport = transport;
/* Acquire access to the sock lock. Note: We are safe from other
* bottom halves on this lock, but a user may be in the lock too,
* so check if it is busy.
*/
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
sctp_add_backlog(sk, skb);
} else {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
}
sctp_bh_unlock_sock(sk);
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
return 0;
discard_it:
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS);
kfree_skb(skb);
return 0;
discard_release:
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
goto discard_it;
}
|
CWE-362
| 179,380 | 1,152 |
173020335186799730748611767119463332665
| null | null | null |
linux
|
c4e7f9022e506c6635a5037713c37118e23193e4
| 1 |
static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
struct kvm_assigned_pci_dev *assigned_dev)
{
int r = 0, idx;
struct kvm_assigned_dev_kernel *match;
struct pci_dev *dev;
if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
return -EINVAL;
mutex_lock(&kvm->lock);
idx = srcu_read_lock(&kvm->srcu);
match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
assigned_dev->assigned_dev_id);
if (match) {
/* device already assigned */
r = -EEXIST;
goto out;
}
match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
if (match == NULL) {
printk(KERN_INFO "%s: Couldn't allocate memory\n",
__func__);
r = -ENOMEM;
goto out;
}
dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
assigned_dev->busnr,
assigned_dev->devfn);
if (!dev) {
printk(KERN_INFO "%s: host device not found\n", __func__);
r = -EINVAL;
goto out_free;
}
if (pci_enable_device(dev)) {
printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
r = -EBUSY;
goto out_put;
}
r = pci_request_regions(dev, "kvm_assigned_device");
if (r) {
printk(KERN_INFO "%s: Could not get access to device regions\n",
__func__);
goto out_disable;
}
pci_reset_function(dev);
pci_save_state(dev);
match->pci_saved_state = pci_store_saved_state(dev);
if (!match->pci_saved_state)
printk(KERN_DEBUG "%s: Couldn't store %s saved state\n",
__func__, dev_name(&dev->dev));
match->assigned_dev_id = assigned_dev->assigned_dev_id;
match->host_segnr = assigned_dev->segnr;
match->host_busnr = assigned_dev->busnr;
match->host_devfn = assigned_dev->devfn;
match->flags = assigned_dev->flags;
match->dev = dev;
spin_lock_init(&match->intx_lock);
match->irq_source_id = -1;
match->kvm = kvm;
match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
list_add(&match->list, &kvm->arch.assigned_dev_head);
if (!kvm->arch.iommu_domain) {
r = kvm_iommu_map_guest(kvm);
if (r)
goto out_list_del;
}
r = kvm_assign_device(kvm, match);
if (r)
goto out_list_del;
out:
srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
return r;
out_list_del:
if (pci_load_and_free_saved_state(dev, &match->pci_saved_state))
printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
__func__, dev_name(&dev->dev));
list_del(&match->list);
pci_release_regions(dev);
out_disable:
pci_disable_device(dev);
out_put:
pci_dev_put(dev);
out_free:
kfree(match);
srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
return r;
}
|
CWE-264
| 179,381 | 1,153 |
272600557570379830235938521554631335981
| null | null | null |
linux
|
f8e9881c2aef1e982e5abc25c046820cd0b7cf64
| 1 |
static int br_parse_ip_options(struct sk_buff *skb)
{
struct ip_options *opt;
struct iphdr *iph;
struct net_device *dev = skb->dev;
u32 len;
iph = ip_hdr(skb);
opt = &(IPCB(skb)->opt);
/* Basic sanity checks */
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error;
len = ntohs(iph->tot_len);
if (skb->len < len) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
} else if (len < (iph->ihl*4))
goto inhdr_error;
if (pskb_trim_rcsum(skb, len)) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto drop;
}
/* Zero out the CB buffer if no options present */
if (iph->ihl == 5) {
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
return 0;
}
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb))
goto inhdr_error;
/* Check correct handling of SRR option */
if (unlikely(opt->srr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
goto drop;
if (ip_options_rcv_srr(skb))
goto drop;
}
return 0;
inhdr_error:
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
drop:
return -1;
}
|
CWE-399
| 179,388 | 1,160 |
126955592485195450458871228478571792967
| null | null | null |
linux
|
a5b2c5b2ad5853591a6cac6134cd0f599a720865
| 1 |
static int apparmor_setprocattr(struct task_struct *task, char *name,
void *value, size_t size)
{
char *command, *args = value;
size_t arg_size;
int error;
if (size == 0)
return -EINVAL;
/* args points to a PAGE_SIZE buffer, AppArmor requires that
* the buffer must be null terminated or have size <= PAGE_SIZE -1
* so that AppArmor can null terminate them
*/
if (args[size - 1] != '\0') {
if (size == PAGE_SIZE)
return -EINVAL;
args[size] = '\0';
}
/* task can only write its own attributes */
if (current != task)
return -EACCES;
args = value;
args = strim(args);
command = strsep(&args, " ");
if (!args)
return -EINVAL;
args = skip_spaces(args);
if (!*args)
return -EINVAL;
arg_size = size - (args - (char *) value);
if (strcmp(name, "current") == 0) {
if (strcmp(command, "changehat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
!AA_DO_TEST);
} else if (strcmp(command, "permhat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
AA_DO_TEST);
} else if (strcmp(command, "changeprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
!AA_DO_TEST);
} else if (strcmp(command, "permprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
AA_DO_TEST);
} else if (strcmp(command, "permipc") == 0) {
error = aa_setprocattr_permipc(args);
} else {
struct common_audit_data sa;
COMMON_AUDIT_DATA_INIT(&sa, NONE);
sa.aad.op = OP_SETPROCATTR;
sa.aad.info = name;
sa.aad.error = -EINVAL;
return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL,
&sa, NULL);
}
} else if (strcmp(name, "exec") == 0) {
error = aa_setprocattr_changeprofile(args, AA_ONEXEC,
!AA_DO_TEST);
} else {
/* only support the "current" and "exec" process attributes */
return -EINVAL;
}
if (!error)
error = size;
return error;
}
|
CWE-20
| 179,391 | 1,163 |
198478101125261503167728423542978062883
| null | null | null |
linux
|
aba8d056078e47350d85b06a9cabd5afcc4b72ea
| 1 |
int perf_config(config_fn_t fn, void *data)
{
int ret = 0, found = 0;
char *repo_config = NULL;
const char *home = NULL;
/* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
if (config_exclusive_filename)
return perf_config_from_file(fn, config_exclusive_filename, data);
if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) {
ret += perf_config_from_file(fn, perf_etc_perfconfig(),
data);
found += 1;
}
home = getenv("HOME");
if (perf_config_global() && home) {
char *user_config = strdup(mkpath("%s/.perfconfig", home));
if (!access(user_config, R_OK)) {
ret += perf_config_from_file(fn, user_config, data);
found += 1;
}
free(user_config);
}
repo_config = perf_pathdup("config");
if (!access(repo_config, R_OK)) {
ret += perf_config_from_file(fn, repo_config, data);
found += 1;
}
free(repo_config);
if (found == 0)
return -1;
return ret;
}
| 179,392 | 1,164 |
291567066678415769190519908516115264132
| null | null | null |
|
linux
|
ea2bc483ff5caada7c4aa0d5fbf87d3a6590273d
| 1 |
static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
struct sctp_association *assoc,
sctp_socket_type_t type)
{
struct sctp_sock *oldsp = sctp_sk(oldsk);
struct sctp_sock *newsp = sctp_sk(newsk);
struct sctp_bind_bucket *pp; /* hash list port iterator */
struct sctp_endpoint *newep = newsp->ep;
struct sk_buff *skb, *tmp;
struct sctp_ulpevent *event;
int flags = 0;
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
*/
newsk->sk_sndbuf = oldsk->sk_sndbuf;
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
/* Brute force copy old sctp opt. */
inet_sk_copy_descendant(newsk, oldsk);
/* Restore the ep value that was overwritten with the above structure
* copy.
*/
newsp->ep = newep;
newsp->hmac = NULL;
/* Hook this new socket in to the bind_hash list. */
pp = sctp_sk(oldsk)->bind_hash;
sk_add_bind_node(newsk, &pp->owner);
sctp_sk(newsk)->bind_hash = pp;
inet_sk(newsk)->num = inet_sk(oldsk)->num;
/* Copy the bind_addr list from the original endpoint to the new
* endpoint so that we can handle restarts properly
*/
if (PF_INET6 == assoc->base.sk->sk_family)
flags = SCTP_ADDR6_ALLOWED;
if (assoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
if (assoc->peer.ipv6_address)
flags |= SCTP_ADDR6_PEERSUPP;
sctp_bind_addr_copy(&newsp->ep->base.bind_addr,
&oldsp->ep->base.bind_addr,
SCTP_SCOPE_GLOBAL, GFP_KERNEL, flags);
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
*/
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
sctp_sock_rfree(skb);
__skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb);
sctp_skb_set_owner_r(skb, newsk);
}
}
/* Clean up any messages pending delivery due to partial
* delivery. Three cases:
* 1) No partial deliver; no work.
* 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
* 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
*/
skb_queue_head_init(&newsp->pd_lobby);
sctp_sk(newsk)->pd_mode = assoc->ulpq.pd_mode;
if (sctp_sk(oldsk)->pd_mode) {
struct sk_buff_head *queue;
/* Decide which queue to move pd_lobby skbs to. */
if (assoc->ulpq.pd_mode) {
queue = &newsp->pd_lobby;
} else
queue = &newsk->sk_receive_queue;
/* Walk through the pd_lobby, looking for skbs that
* need moved to the new socket.
*/
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
sctp_sock_rfree(skb);
__skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb);
sctp_skb_set_owner_r(skb, newsk);
}
}
/* Clear up any skbs waiting for the partial
* delivery to finish.
*/
if (assoc->ulpq.pd_mode)
sctp_clear_pd(oldsk);
}
/* Set the type of socket to indicate that it is peeled off from the
* original UDP-style socket or created with the accept() call on a
* TCP-style socket..
*/
newsp->type = type;
/* Mark the new socket "in-use" by the user so that any packets
* that may arrive on the association after we've moved it are
* queued to the backlog. This prevents a potential race between
* backlog processing on the old socket and new-packet processing
* on the new socket.
*/
sctp_lock_sock(newsk);
sctp_assoc_migrate(assoc, newsk);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
*/
if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP))
newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED;
sctp_release_sock(newsk);
}
| 179,396 | 1,167 |
245295386150735555711105037144149537552
| null | null | null |
|
linux
|
4ff67b720c02c36e54d55b88c2931879b7db1cd2
| 1 |
cifs_find_smb_ses(struct TCP_Server_Info *server, char *username)
{
struct list_head *tmp;
struct cifsSesInfo *ses;
write_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
if (strncmp(ses->userName, username, MAX_USERNAME_SIZE))
continue;
++ses->ses_count;
write_unlock(&cifs_tcp_ses_lock);
return ses;
}
write_unlock(&cifs_tcp_ses_lock);
return NULL;
}
|
CWE-264
| 179,401 | 1,172 |
174415870851944486620971874274594890925
| null | null | null |
linux
|
d370af0ef7951188daeb15bae75db7ba57c67846
| 1 |
static void iriap_getvaluebyclass_indication(struct iriap_cb *self,
struct sk_buff *skb)
{
struct ias_object *obj;
struct ias_attrib *attrib;
int name_len;
int attr_len;
char name[IAS_MAX_CLASSNAME + 1]; /* 60 bytes */
char attr[IAS_MAX_ATTRIBNAME + 1]; /* 60 bytes */
__u8 *fp;
int n;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
fp = skb->data;
n = 1;
name_len = fp[n++];
memcpy(name, fp+n, name_len); n+=name_len;
name[name_len] = '\0';
attr_len = fp[n++];
memcpy(attr, fp+n, attr_len); n+=attr_len;
attr[attr_len] = '\0';
IRDA_DEBUG(4, "LM-IAS: Looking up %s: %s\n", name, attr);
obj = irias_find_object(name);
if (obj == NULL) {
IRDA_DEBUG(2, "LM-IAS: Object %s not found\n", name);
iriap_getvaluebyclass_response(self, 0x1235, IAS_CLASS_UNKNOWN,
&irias_missing);
return;
}
IRDA_DEBUG(4, "LM-IAS: found %s, id=%d\n", obj->name, obj->id);
attrib = irias_find_attrib(obj, attr);
if (attrib == NULL) {
IRDA_DEBUG(2, "LM-IAS: Attribute %s not found\n", attr);
iriap_getvaluebyclass_response(self, obj->id,
IAS_ATTRIB_UNKNOWN,
&irias_missing);
return;
}
/* We have a match; send the value. */
iriap_getvaluebyclass_response(self, obj->id, IAS_SUCCESS,
attrib->value);
}
|
CWE-119
| 179,405 | 1,176 |
289039370717523292092463182568935459370
| null | null | null |
linux
|
8909c9ad8ff03611c9c96c9a92656213e4bb495b
| 1 |
void dev_load(struct net *net, const char *name)
{
struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_name_rcu(net, name);
rcu_read_unlock();
if (!dev && capable(CAP_NET_ADMIN))
request_module("%s", name);
}
|
CWE-264
| 179,406 | 1,177 |
265834847794592046608322157600854064030
| null | null | null |
krb5
|
cf1a0c411b2668c57c41e9c4efd15ba17b6b322c
| 1 |
process_chpw_request(krb5_context context, void *server_handle, char *realm,
krb5_keytab keytab, const krb5_fulladdr *local_faddr,
const krb5_fulladdr *remote_faddr, krb5_data *req,
krb5_data *rep)
{
krb5_error_code ret;
char *ptr;
unsigned int plen, vno;
krb5_data ap_req, ap_rep = empty_data();
krb5_data cipher = empty_data(), clear = empty_data();
krb5_auth_context auth_context = NULL;
krb5_principal changepw = NULL;
krb5_principal client, target = NULL;
krb5_ticket *ticket = NULL;
krb5_replay_data replay;
krb5_error krberror;
int numresult;
char strresult[1024];
char *clientstr = NULL, *targetstr = NULL;
const char *errmsg = NULL;
size_t clen;
char *cdots;
struct sockaddr_storage ss;
socklen_t salen;
char addrbuf[100];
krb5_address *addr = remote_faddr->address;
*rep = empty_data();
if (req->length < 4) {
/* either this, or the server is printing bad messages,
or the caller passed in garbage */
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request was truncated", sizeof(strresult));
goto chpwfail;
}
ptr = req->data;
/* verify length */
plen = (*ptr++ & 0xff);
plen = (plen<<8) | (*ptr++ & 0xff);
if (plen != req->length) {
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request length was inconsistent",
sizeof(strresult));
goto chpwfail;
}
/* verify version number */
vno = (*ptr++ & 0xff) ;
vno = (vno<<8) | (*ptr++ & 0xff);
if (vno != 1 && vno != RFC3244_VERSION) {
ret = KRB5KDC_ERR_BAD_PVNO;
numresult = KRB5_KPASSWD_BAD_VERSION;
snprintf(strresult, sizeof(strresult),
"Request contained unknown protocol version number %d", vno);
goto chpwfail;
}
/* read, check ap-req length */
ap_req.length = (*ptr++ & 0xff);
ap_req.length = (ap_req.length<<8) | (*ptr++ & 0xff);
if (ptr + ap_req.length >= req->data + req->length) {
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request was truncated in AP-REQ",
sizeof(strresult));
goto chpwfail;
}
/* verify ap_req */
ap_req.data = ptr;
ptr += ap_req.length;
ret = krb5_auth_con_init(context, &auth_context);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed initializing auth context",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_auth_con_setflags(context, auth_context,
KRB5_AUTH_CONTEXT_DO_SEQUENCE);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed initializing auth context",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_build_principal(context, &changepw, strlen(realm), realm,
"kadmin", "changepw", NULL);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed building kadmin/changepw principal",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_rd_req(context, &auth_context, &ap_req, changepw, keytab,
NULL, &ticket);
if (ret) {
numresult = KRB5_KPASSWD_AUTHERROR;
strlcpy(strresult, "Failed reading application request",
sizeof(strresult));
goto chpwfail;
}
/* construct the ap-rep */
ret = krb5_mk_rep(context, auth_context, &ap_rep);
if (ret) {
numresult = KRB5_KPASSWD_AUTHERROR;
strlcpy(strresult, "Failed replying to application request",
sizeof(strresult));
goto chpwfail;
}
/* decrypt the ChangePasswdData */
cipher.length = (req->data + req->length) - ptr;
cipher.data = ptr;
/*
* Don't set a remote address in auth_context before calling krb5_rd_priv,
* so that we can work against clients behind a NAT. Reflection attacks
* aren't a concern since we use sequence numbers and since our requests
* don't look anything like our responses. Also don't set a local address,
* since we don't know what interface the request was received on.
*/
ret = krb5_rd_priv(context, auth_context, &cipher, &clear, &replay);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed decrypting request", sizeof(strresult));
goto chpwfail;
}
client = ticket->enc_part2->client;
/* decode ChangePasswdData for setpw requests */
if (vno == RFC3244_VERSION) {
krb5_data *clear_data;
ret = decode_krb5_setpw_req(&clear, &clear_data, &target);
if (ret != 0) {
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Failed decoding ChangePasswdData",
sizeof(strresult));
goto chpwfail;
}
zapfree(clear.data, clear.length);
clear = *clear_data;
free(clear_data);
if (target != NULL) {
ret = krb5_unparse_name(context, target, &targetstr);
if (ret != 0) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed unparsing target name for log",
sizeof(strresult));
goto chpwfail;
}
}
}
ret = krb5_unparse_name(context, client, &clientstr);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed unparsing client name for log",
sizeof(strresult));
goto chpwfail;
}
/* for cpw, verify that this is an AS_REQ ticket */
if (vno == 1 &&
(ticket->enc_part2->flags & TKT_FLG_INITIAL) == 0) {
numresult = KRB5_KPASSWD_INITIAL_FLAG_NEEDED;
strlcpy(strresult, "Ticket must be derived from a password",
sizeof(strresult));
goto chpwfail;
}
/* change the password */
ptr = k5memdup0(clear.data, clear.length, &ret);
ret = schpw_util_wrapper(server_handle, client, target,
(ticket->enc_part2->flags & TKT_FLG_INITIAL) != 0,
ptr, NULL, strresult, sizeof(strresult));
if (ret)
errmsg = krb5_get_error_message(context, ret);
/* zap the password */
zapfree(clear.data, clear.length);
zapfree(ptr, clear.length);
clear = empty_data();
clen = strlen(clientstr);
trunc_name(&clen, &cdots);
switch (addr->addrtype) {
case ADDRTYPE_INET: {
struct sockaddr_in *sin = ss2sin(&ss);
sin->sin_family = AF_INET;
memcpy(&sin->sin_addr, addr->contents, addr->length);
sin->sin_port = htons(remote_faddr->port);
salen = sizeof(*sin);
break;
}
case ADDRTYPE_INET6: {
struct sockaddr_in6 *sin6 = ss2sin6(&ss);
sin6->sin6_family = AF_INET6;
memcpy(&sin6->sin6_addr, addr->contents, addr->length);
sin6->sin6_port = htons(remote_faddr->port);
salen = sizeof(*sin6);
break;
}
default: {
struct sockaddr *sa = ss2sa(&ss);
sa->sa_family = AF_UNSPEC;
salen = sizeof(*sa);
break;
}
}
if (getnameinfo(ss2sa(&ss), salen,
addrbuf, sizeof(addrbuf), NULL, 0,
NI_NUMERICHOST | NI_NUMERICSERV) != 0)
strlcpy(addrbuf, "<unprintable>", sizeof(addrbuf));
if (vno == RFC3244_VERSION) {
size_t tlen;
char *tdots;
const char *targetp;
if (target == NULL) {
tlen = clen;
tdots = cdots;
targetp = targetstr;
} else {
tlen = strlen(targetstr);
trunc_name(&tlen, &tdots);
targetp = clientstr;
}
krb5_klog_syslog(LOG_NOTICE, _("setpw request from %s by %.*s%s for "
"%.*s%s: %s"), addrbuf, (int) clen,
clientstr, cdots, (int) tlen, targetp, tdots,
errmsg ? errmsg : "success");
} else {
krb5_klog_syslog(LOG_NOTICE, _("chpw request from %s for %.*s%s: %s"),
addrbuf, (int) clen, clientstr, cdots,
errmsg ? errmsg : "success");
}
switch (ret) {
case KADM5_AUTH_CHANGEPW:
numresult = KRB5_KPASSWD_ACCESSDENIED;
break;
case KADM5_PASS_Q_TOOSHORT:
case KADM5_PASS_REUSE:
case KADM5_PASS_Q_CLASS:
case KADM5_PASS_Q_DICT:
case KADM5_PASS_Q_GENERIC:
case KADM5_PASS_TOOSOON:
numresult = KRB5_KPASSWD_SOFTERROR;
break;
case 0:
numresult = KRB5_KPASSWD_SUCCESS;
strlcpy(strresult, "", sizeof(strresult));
break;
default:
numresult = KRB5_KPASSWD_HARDERROR;
break;
}
chpwfail:
clear.length = 2 + strlen(strresult);
clear.data = (char *) malloc(clear.length);
ptr = clear.data;
*ptr++ = (numresult>>8) & 0xff;
*ptr++ = numresult & 0xff;
memcpy(ptr, strresult, strlen(strresult));
cipher = empty_data();
if (ap_rep.length) {
ret = krb5_auth_con_setaddrs(context, auth_context,
local_faddr->address, NULL);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult,
"Failed storing client and server internet addresses",
sizeof(strresult));
} else {
ret = krb5_mk_priv(context, auth_context, &clear, &cipher,
&replay);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed encrypting reply",
sizeof(strresult));
}
}
}
/* if no KRB-PRIV was constructed, then we need a KRB-ERROR.
if this fails, just bail. there's nothing else we can do. */
if (cipher.length == 0) {
/* clear out ap_rep now, so that it won't be inserted in the
reply */
if (ap_rep.length) {
free(ap_rep.data);
ap_rep = empty_data();
}
krberror.ctime = 0;
krberror.cusec = 0;
krberror.susec = 0;
ret = krb5_timeofday(context, &krberror.stime);
if (ret)
goto bailout;
/* this is really icky. but it's what all the other callers
to mk_error do. */
krberror.error = ret;
krberror.error -= ERROR_TABLE_BASE_krb5;
if (krberror.error < 0 || krberror.error > 128)
krberror.error = KRB_ERR_GENERIC;
krberror.client = NULL;
ret = krb5_build_principal(context, &krberror.server,
strlen(realm), realm,
"kadmin", "changepw", NULL);
if (ret)
goto bailout;
krberror.text.length = 0;
krberror.e_data = clear;
ret = krb5_mk_error(context, &krberror, &cipher);
krb5_free_principal(context, krberror.server);
if (ret)
goto bailout;
}
/* construct the reply */
ret = alloc_data(rep, 6 + ap_rep.length + cipher.length);
if (ret)
goto bailout;
ptr = rep->data;
/* length */
*ptr++ = (rep->length>>8) & 0xff;
*ptr++ = rep->length & 0xff;
/* version == 0x0001 big-endian */
*ptr++ = 0;
*ptr++ = 1;
/* ap_rep length, big-endian */
*ptr++ = (ap_rep.length>>8) & 0xff;
*ptr++ = ap_rep.length & 0xff;
/* ap-rep data */
if (ap_rep.length) {
memcpy(ptr, ap_rep.data, ap_rep.length);
ptr += ap_rep.length;
}
/* krb-priv or krb-error */
memcpy(ptr, cipher.data, cipher.length);
bailout:
krb5_auth_con_free(context, auth_context);
krb5_free_principal(context, changepw);
krb5_free_ticket(context, ticket);
free(ap_rep.data);
free(clear.data);
free(cipher.data);
krb5_free_principal(context, target);
krb5_free_unparsed_name(context, targetstr);
krb5_free_unparsed_name(context, clientstr);
krb5_free_error_message(context, errmsg);
return ret;
}
|
CWE-20
| 179,407 | 1,178 |
151952841528932102997447826449546146841
| null | null | null |
linux
|
f54e18f1b831c92f6512d2eedb224cd63d607d3d
| 1 |
static int rock_continue(struct rock_state *rs)
{
int ret = 1;
int blocksize = 1 << rs->inode->i_blkbits;
const int min_de_size = offsetof(struct rock_ridge, u);
kfree(rs->buffer);
rs->buffer = NULL;
if ((unsigned)rs->cont_offset > blocksize - min_de_size ||
(unsigned)rs->cont_size > blocksize ||
(unsigned)(rs->cont_offset + rs->cont_size) > blocksize) {
printk(KERN_NOTICE "rock: corrupted directory entry. "
"extent=%d, offset=%d, size=%d\n",
rs->cont_extent, rs->cont_offset, rs->cont_size);
ret = -EIO;
goto out;
}
if (rs->cont_extent) {
struct buffer_head *bh;
rs->buffer = kmalloc(rs->cont_size, GFP_KERNEL);
if (!rs->buffer) {
ret = -ENOMEM;
goto out;
}
ret = -EIO;
bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
if (bh) {
memcpy(rs->buffer, bh->b_data + rs->cont_offset,
rs->cont_size);
put_bh(bh);
rs->chr = rs->buffer;
rs->len = rs->cont_size;
rs->cont_extent = 0;
rs->cont_size = 0;
rs->cont_offset = 0;
return 0;
}
printk("Unable to read rock-ridge attributes\n");
}
out:
kfree(rs->buffer);
rs->buffer = NULL;
return ret;
}
|
CWE-399
| 179,408 | 1,179 |
37972843024361272794052624541015761601
| null | null | null |
linux
|
f647d7c155f069c1a068030255c300663516420e
| 1 |
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread;
struct thread_struct *next = &next_p->thread;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
unsigned fsindex, gsindex;
fpu_switch_t fpu;
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
/*
* Reload esp0, LDT and the page table pointer:
*/
load_sp0(tss, next);
/*
* Switch DS and ES.
* This won't pick up thread selector changes, but I guess that is ok.
*/
savesegment(es, prev->es);
if (unlikely(next->es | prev->es))
loadsegment(es, next->es);
savesegment(ds, prev->ds);
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
*
* (e.g. xen_load_tls())
*/
savesegment(fs, fsindex);
savesegment(gs, gsindex);
load_TLS(next, cpu);
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
* the GDT and LDT are properly updated, and must be
* done before math_state_restore, so the TS bit is up
* to date.
*/
arch_end_context_switch(next_p);
/*
* Switch FS and GS.
*
* Segment register != 0 always requires a reload. Also
* reload when it has changed. When prev process used 64bit
* base always reload to avoid an information leak.
*/
if (unlikely(fsindex | next->fsindex | prev->fs)) {
loadsegment(fs, next->fsindex);
/*
* Check if the user used a selector != 0; if yes
* clear 64bit base, since overloaded base is always
* mapped to the Null selector
*/
if (fsindex)
prev->fs = 0;
}
/* when next process has a 64bit base use it */
if (next->fs)
wrmsrl(MSR_FS_BASE, next->fs);
prev->fsindex = fsindex;
if (unlikely(gsindex | next->gsindex | prev->gs)) {
load_gs_index(next->gsindex);
if (gsindex)
prev->gs = 0;
}
if (next->gs)
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
prev->gsindex = gsindex;
switch_fpu_finish(next_p, fpu);
/*
* Switch the PDA and FPU contexts.
*/
prev->usersp = this_cpu_read(old_rsp);
this_cpu_write(old_rsp, next->usersp);
this_cpu_write(current_task, next_p);
/*
* If it were not for PREEMPT_ACTIVE we could guarantee that the
* preempt_count of all tasks was equal here and this would not be
* needed.
*/
task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
this_cpu_write(kernel_stack,
(unsigned long)task_stack_page(next_p) +
THREAD_SIZE - KERNEL_STACK_OFFSET);
/*
* Now maybe reload the debug registers and handle I/O bitmaps
*/
if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
__switch_to_xtra(prev_p, next_p, tss);
return prev_p;
}
|
CWE-200
| 179,409 | 1,180 |
205391279388742041103241912506458605751
| null | null | null |
tcpdump
|
0f95d441e4b5d7512cc5c326c8668a120e048eda
| 1 |
ppp_hdlc(netdissect_options *ndo,
const u_char *p, int length)
{
u_char *b, *s, *t, c;
int i, proto;
const void *se;
if (length <= 0)
return;
b = (uint8_t *)malloc(length);
if (b == NULL)
return;
/*
* Unescape all the data into a temporary, private, buffer.
* Do this so that we dont overwrite the original packet
* contents.
*/
for (s = (u_char *)p, t = b, i = length; i > 0; i--) {
c = *s++;
if (c == 0x7d) {
if (i > 1) {
i--;
c = *s++ ^ 0x20;
} else
continue;
}
*t++ = c;
}
se = ndo->ndo_snapend;
ndo->ndo_snapend = t;
length = t - b;
/* now lets guess about the payload codepoint format */
if (length < 1)
goto trunc;
proto = *b; /* start with a one-octet codepoint guess */
switch (proto) {
case PPP_IP:
ip_print(ndo, b + 1, length - 1);
goto cleanup;
case PPP_IPV6:
ip6_print(ndo, b + 1, length - 1);
goto cleanup;
default: /* no luck - try next guess */
break;
}
if (length < 2)
goto trunc;
proto = EXTRACT_16BITS(b); /* next guess - load two octets */
switch (proto) {
case (PPP_ADDRESS << 8 | PPP_CONTROL): /* looks like a PPP frame */
if (length < 4)
goto trunc;
proto = EXTRACT_16BITS(b+2); /* load the PPP proto-id */
handle_ppp(ndo, proto, b + 4, length - 4);
break;
default: /* last guess - proto must be a PPP proto-id */
handle_ppp(ndo, proto, b + 2, length - 2);
break;
}
cleanup:
ndo->ndo_snapend = se;
free(b);
return;
trunc:
ndo->ndo_snapend = se;
free(b);
ND_PRINT((ndo, "[|ppp]"));
}
|
CWE-119
| 179,412 | 1,183 |
163830884355634389581814561024512585440
| null | null | null |
linux
|
f2e323ec96077642d397bb1c355def536d489d16
| 1 |
static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *cmd)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
u8 b[] = { 0x00, 0xff, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00 };
memcpy(&b[4], cmd->msg, cmd->msg_len);
state->config->send_command(fe, 0x72,
sizeof(b) - (6 - cmd->msg_len), b,
NULL, NULL);
return 0;
}
|
CWE-119
| 179,413 | 1,184 |
192608949045328579601369441585250086987
| null | null | null |
linux
|
338f977f4eb441e69bb9a46eaa0ac715c931a67f
| 1 |
static int ieee80211_fragment(struct ieee80211_tx_data *tx,
struct sk_buff *skb, int hdrlen,
int frag_threshold)
{
struct ieee80211_local *local = tx->local;
struct ieee80211_tx_info *info;
struct sk_buff *tmp;
int per_fragm = frag_threshold - hdrlen - FCS_LEN;
int pos = hdrlen + per_fragm;
int rem = skb->len - hdrlen - per_fragm;
if (WARN_ON(rem < 0))
return -EINVAL;
/* first fragment was already added to queue by caller */
while (rem) {
int fraglen = per_fragm;
if (fraglen > rem)
fraglen = rem;
rem -= fraglen;
tmp = dev_alloc_skb(local->tx_headroom +
frag_threshold +
tx->sdata->encrypt_headroom +
IEEE80211_ENCRYPT_TAILROOM);
if (!tmp)
return -ENOMEM;
__skb_queue_tail(&tx->skbs, tmp);
skb_reserve(tmp,
local->tx_headroom + tx->sdata->encrypt_headroom);
/* copy control information */
memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
info = IEEE80211_SKB_CB(tmp);
info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
IEEE80211_TX_CTL_FIRST_FRAGMENT);
if (rem)
info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
skb_copy_queue_mapping(tmp, skb);
tmp->priority = skb->priority;
tmp->dev = skb->dev;
/* copy header and data */
memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
pos += fraglen;
}
/* adjust first fragment's length */
skb->len = hdrlen + per_fragm;
return 0;
}
|
CWE-200
| 179,414 | 1,185 |
252627261483521544309005842789154299812
| null | null | null |
linux
|
a430c9166312e1aa3d80bce32374233bdbfeba32
| 1 |
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 ||
(mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
done:
if (ctxt->rip_relative)
ctxt->memopp->addr.mem.ea += ctxt->_eip;
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
|
CWE-399
| 179,415 | 1,186 |
201743535793806938662424126490716359872
| null | null | null |
linux
|
3d32e4dbe71374a6780eaf51d719d76f9a9bf22f
| 1 |
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
gfn_t gfn, end_gfn;
pfn_t pfn;
int r = 0;
struct iommu_domain *domain = kvm->arch.iommu_domain;
int flags;
/* check if iommu exists and in use */
if (!domain)
return 0;
gfn = slot->base_gfn;
end_gfn = gfn + slot->npages;
flags = IOMMU_READ;
if (!(slot->flags & KVM_MEM_READONLY))
flags |= IOMMU_WRITE;
if (!kvm->arch.iommu_noncoherent)
flags |= IOMMU_CACHE;
while (gfn < end_gfn) {
unsigned long page_size;
/* Check if already mapped */
if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
gfn += 1;
continue;
}
/* Get the page size we could use to map */
page_size = kvm_host_page_size(kvm, gfn);
/* Make sure the page_size does not exceed the memslot */
while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
page_size >>= 1;
/* Make sure gfn is aligned to the page size we want to map */
while ((gfn << PAGE_SHIFT) & (page_size - 1))
page_size >>= 1;
/* Make sure hva is aligned to the page size we want to map */
while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
page_size >>= 1;
/*
* Pin all pages we are about to map in memory. This is
* important because we unmap and unpin in 4kb steps later.
*/
pfn = kvm_pin_pages(slot, gfn, page_size);
if (is_error_noslot_pfn(pfn)) {
gfn += 1;
continue;
}
/* Map into IO address space */
r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
page_size, flags);
if (r) {
printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%llx\n", pfn);
kvm_unpin_pages(kvm, pfn, page_size);
goto unmap_pages;
}
gfn += page_size >> PAGE_SHIFT;
}
return 0;
unmap_pages:
kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
return r;
}
|
CWE-189
| 179,416 | 1,187 |
40650788508994539050513510667633304622
| null | null | null |
linux
|
a2b9e6c1a35afcc0973acb72e591c714e78885ff
| 1 |
static int handle_emulation_failure(struct kvm_vcpu *vcpu)
{
int r = EMULATE_DONE;
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
if (!is_guest_mode(vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
r = EMULATE_FAIL;
}
kvm_queue_exception(vcpu, UD_VECTOR);
return r;
}
|
CWE-362
| 179,424 | 1,191 |
44326647696150177215031281260553621216
| null | null | null |
linux
|
e40607cbe270a9e8360907cb1e62ddf0736e4864
| 1 |
static int sctp_process_param(struct sctp_association *asoc,
union sctp_params param,
const union sctp_addr *peer_addr,
gfp_t gfp)
{
struct net *net = sock_net(asoc->base.sk);
union sctp_addr addr;
int i;
__u16 sat;
int retval = 1;
sctp_scope_t scope;
time_t stale;
struct sctp_af *af;
union sctp_addr_param *addr_param;
struct sctp_transport *t;
struct sctp_endpoint *ep = asoc->ep;
/* We maintain all INIT parameters in network byte order all the
* time. This allows us to not worry about whether the parameters
* came from a fresh INIT, and INIT ACK, or were stored in a cookie.
*/
switch (param.p->type) {
case SCTP_PARAM_IPV6_ADDRESS:
if (PF_INET6 != asoc->base.sk->sk_family)
break;
goto do_addr_param;
case SCTP_PARAM_IPV4_ADDRESS:
/* v4 addresses are not allowed on v6-only socket */
if (ipv6_only_sock(asoc->base.sk))
break;
do_addr_param:
af = sctp_get_af_specific(param_type2af(param.p->type));
af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
scope = sctp_scope(peer_addr);
if (sctp_in_scope(net, &addr, scope))
if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
return 0;
break;
case SCTP_PARAM_COOKIE_PRESERVATIVE:
if (!net->sctp.cookie_preserve_enable)
break;
stale = ntohl(param.life->lifespan_increment);
/* Suggested Cookie Life span increment's unit is msec,
* (1/1000sec).
*/
asoc->cookie_life = ktime_add_ms(asoc->cookie_life, stale);
break;
case SCTP_PARAM_HOST_NAME_ADDRESS:
pr_debug("%s: unimplemented SCTP_HOST_NAME_ADDRESS\n", __func__);
break;
case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES:
/* Turn off the default values first so we'll know which
* ones are really set by the peer.
*/
asoc->peer.ipv4_address = 0;
asoc->peer.ipv6_address = 0;
/* Assume that peer supports the address family
* by which it sends a packet.
*/
if (peer_addr->sa.sa_family == AF_INET6)
asoc->peer.ipv6_address = 1;
else if (peer_addr->sa.sa_family == AF_INET)
asoc->peer.ipv4_address = 1;
/* Cycle through address types; avoid divide by 0. */
sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
if (sat)
sat /= sizeof(__u16);
for (i = 0; i < sat; ++i) {
switch (param.sat->types[i]) {
case SCTP_PARAM_IPV4_ADDRESS:
asoc->peer.ipv4_address = 1;
break;
case SCTP_PARAM_IPV6_ADDRESS:
if (PF_INET6 == asoc->base.sk->sk_family)
asoc->peer.ipv6_address = 1;
break;
case SCTP_PARAM_HOST_NAME_ADDRESS:
asoc->peer.hostname_address = 1;
break;
default: /* Just ignore anything else. */
break;
}
}
break;
case SCTP_PARAM_STATE_COOKIE:
asoc->peer.cookie_len =
ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
asoc->peer.cookie = param.cookie->body;
break;
case SCTP_PARAM_HEARTBEAT_INFO:
/* Would be odd to receive, but it causes no problems. */
break;
case SCTP_PARAM_UNRECOGNIZED_PARAMETERS:
/* Rejected during verify stage. */
break;
case SCTP_PARAM_ECN_CAPABLE:
asoc->peer.ecn_capable = 1;
break;
case SCTP_PARAM_ADAPTATION_LAYER_IND:
asoc->peer.adaptation_ind = ntohl(param.aind->adaptation_ind);
break;
case SCTP_PARAM_SET_PRIMARY:
if (!net->sctp.addip_enable)
goto fall_through;
addr_param = param.v + sizeof(sctp_addip_param_t);
af = sctp_get_af_specific(param_type2af(param.p->type));
af->from_addr_param(&addr, addr_param,
htons(asoc->peer.port), 0);
/* if the address is invalid, we can't process it.
* XXX: see spec for what to do.
*/
if (!af->addr_valid(&addr, NULL, NULL))
break;
t = sctp_assoc_lookup_paddr(asoc, &addr);
if (!t)
break;
sctp_assoc_set_primary(asoc, t);
break;
case SCTP_PARAM_SUPPORTED_EXT:
sctp_process_ext_param(asoc, param);
break;
case SCTP_PARAM_FWD_TSN_SUPPORT:
if (net->sctp.prsctp_enable) {
asoc->peer.prsctp_capable = 1;
break;
}
/* Fall Through */
goto fall_through;
case SCTP_PARAM_RANDOM:
if (!ep->auth_enable)
goto fall_through;
/* Save peer's random parameter */
asoc->peer.peer_random = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_random) {
retval = 0;
break;
}
break;
case SCTP_PARAM_HMAC_ALGO:
if (!ep->auth_enable)
goto fall_through;
/* Save peer's HMAC list */
asoc->peer.peer_hmacs = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_hmacs) {
retval = 0;
break;
}
/* Set the default HMAC the peer requested*/
sctp_auth_asoc_set_default_hmac(asoc, param.hmac_algo);
break;
case SCTP_PARAM_CHUNKS:
if (!ep->auth_enable)
goto fall_through;
asoc->peer.peer_chunks = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_chunks)
retval = 0;
break;
fall_through:
default:
/* Any unrecognized parameters should have been caught
* and handled by sctp_verify_param() which should be
* called prior to this routine. Simply log the error
* here.
*/
pr_debug("%s: ignoring param:%d for association:%p.\n",
__func__, ntohs(param.p->type), asoc);
break;
}
return retval;
}
|
CWE-399
| 179,425 | 1,192 |
233310274121482160323868526280614090729
| null | null | null |
linux
|
c88547a8119e3b581318ab65e9b72f27f23e641d
| 1 |
xfs_da3_fixhashpath(
struct xfs_da_state *state,
struct xfs_da_state_path *path)
{
struct xfs_da_state_blk *blk;
struct xfs_da_intnode *node;
struct xfs_da_node_entry *btree;
xfs_dahash_t lasthash=0;
int level;
int count;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_fixhashpath(state->args);
level = path->active-1;
blk = &path->blk[ level ];
switch (blk->magic) {
case XFS_ATTR_LEAF_MAGIC:
lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
if (count == 0)
return;
break;
case XFS_DIR2_LEAFN_MAGIC:
lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
if (count == 0)
return;
break;
case XFS_DA_NODE_MAGIC:
lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
if (count == 0)
return;
break;
}
for (blk--, level--; level >= 0; blk--, level--) {
struct xfs_da3_icnode_hdr nodehdr;
node = blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
if (be32_to_cpu(btree->hashval) == lasthash)
break;
blk->hashval = lasthash;
btree[blk->index].hashval = cpu_to_be32(lasthash);
xfs_trans_log_buf(state->args->trans, blk->bp,
XFS_DA_LOGRANGE(node, &btree[blk->index],
sizeof(*btree)));
lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
}
}
|
CWE-399
| 179,432 | 1,199 |
190462110703120600634107071844827289206
| null | null | null |
linux
|
18f39e7be0121317550d03e267e3ebd4dbfbb3ce
| 1 |
SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
struct cifs_tcon *tcon, const struct nls_table *cp)
{
struct smb2_tree_connect_req *req;
struct smb2_tree_connect_rsp *rsp = NULL;
struct kvec iov[2];
int rc = 0;
int resp_buftype;
int unc_path_len;
struct TCP_Server_Info *server;
__le16 *unc_path = NULL;
cifs_dbg(FYI, "TCON\n");
if ((ses->server) && tree)
server = ses->server;
else
return -EIO;
if (tcon && tcon->bad_network_name)
return -ENOENT;
unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
if (unc_path == NULL)
return -ENOMEM;
unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
unc_path_len *= 2;
if (unc_path_len < 2) {
kfree(unc_path);
return -EINVAL;
}
rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
if (rc) {
kfree(unc_path);
return rc;
}
if (tcon == NULL) {
/* since no tcon, smb2_init can not do this, so do here */
req->hdr.SessionId = ses->Suid;
/* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
}
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field and 1 for pad */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
- 1 /* pad */ - 4 /* do not count rfc1001 len field */);
req->PathLength = cpu_to_le16(unc_path_len - 2);
iov[1].iov_base = unc_path;
iov[1].iov_len = unc_path_len;
inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
if (rc != 0) {
if (tcon) {
cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
tcon->need_reconnect = true;
}
goto tcon_error_exit;
}
if (tcon == NULL) {
ses->ipc_tid = rsp->hdr.TreeId;
goto tcon_exit;
}
if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
cifs_dbg(FYI, "connection to disk share\n");
else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
tcon->ipc = true;
cifs_dbg(FYI, "connection to pipe share\n");
} else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
tcon->print = true;
cifs_dbg(FYI, "connection to printer\n");
} else {
cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
rc = -EOPNOTSUPP;
goto tcon_error_exit;
}
tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
tcon->tidStatus = CifsGood;
tcon->need_reconnect = false;
tcon->tid = rsp->hdr.TreeId;
strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
init_copy_chunk_defaults(tcon);
if (tcon->ses->server->ops->validate_negotiate)
rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
tcon_exit:
free_rsp_buf(resp_buftype, rsp);
kfree(unc_path);
return rc;
tcon_error_exit:
if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
tcon->bad_network_name = true;
}
goto tcon_exit;
}
|
CWE-399
| 179,433 | 1,200 |
60644300295276330698708639327961395692
| null | null | null |
linux
|
c03aa9f6e1f938618e6db2e23afef0574efeeb65
| 1 |
static void __udf_read_inode(struct inode *inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
struct extendedFileEntry *efe;
uint16_t ident;
struct udf_inode_info *iinfo = UDF_I(inode);
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
unsigned int link_count;
/*
* Set defaults, but the inode is still incomplete!
* Note: get_new_inode() sets the following on a new inode:
* i_sb = sb
* i_no = ino
* i_flags = sb->s_flags
* i_state = 0
* clean_inode(): zero fills and sets
* i_count = 1
* i_nlink = 1
* i_op = NULL;
*/
bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident);
if (!bh) {
udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
make_bad_inode(inode);
return;
}
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
ident != TAG_IDENT_USE) {
udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
inode->i_ino, ident);
brelse(bh);
make_bad_inode(inode);
return;
}
fe = (struct fileEntry *)bh->b_data;
efe = (struct extendedFileEntry *)bh->b_data;
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
struct buffer_head *ibh;
ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
&ident);
if (ident == TAG_IDENT_IE && ibh) {
struct buffer_head *nbh = NULL;
struct kernel_lb_addr loc;
struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
if (ie->indirectICB.extLength &&
(nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
&ident))) {
if (ident == TAG_IDENT_FE ||
ident == TAG_IDENT_EFE) {
memcpy(&iinfo->i_location,
&loc,
sizeof(struct kernel_lb_addr));
brelse(bh);
brelse(ibh);
brelse(nbh);
__udf_read_inode(inode);
return;
}
brelse(nbh);
}
}
brelse(ibh);
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
udf_err(inode->i_sb, "unsupported strategy type: %d\n",
le16_to_cpu(fe->icbTag.strategyType));
brelse(bh);
make_bad_inode(inode);
return;
}
if (fe->icbTag.strategyType == cpu_to_le16(4))
iinfo->i_strat4096 = 0;
else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
iinfo->i_strat4096 = 1;
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
ICBTAG_FLAG_AD_MASK;
iinfo->i_unique = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenExtents = 0;
iinfo->i_lenAlloc = 0;
iinfo->i_next_alloc_block = 0;
iinfo->i_next_alloc_goal = 0;
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
iinfo->i_efe = 1;
iinfo->i_use = 0;
if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
sizeof(struct extendedFileEntry))) {
make_bad_inode(inode);
return;
}
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct extendedFileEntry),
inode->i_sb->s_blocksize -
sizeof(struct extendedFileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
iinfo->i_efe = 0;
iinfo->i_use = 0;
if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
sizeof(struct fileEntry))) {
make_bad_inode(inode);
return;
}
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct fileEntry),
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
iinfo->i_efe = 0;
iinfo->i_use = 1;
iinfo->i_lenAlloc = le32_to_cpu(
((struct unallocSpaceEntry *)bh->b_data)->
lengthAllocDescs);
if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry))) {
make_bad_inode(inode);
return;
}
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct unallocSpaceEntry),
inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry));
return;
}
read_lock(&sbi->s_cred_lock);
i_uid_write(inode, le32_to_cpu(fe->uid));
if (!uid_valid(inode->i_uid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
i_gid_write(inode, le32_to_cpu(fe->gid));
if (!gid_valid(inode->i_gid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_fmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_fmode;
else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_dmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_dmode;
else
inode->i_mode = udf_convert_permissions(fe);
inode->i_mode &= ~sbi->s_umask;
read_unlock(&sbi->s_cred_lock);
link_count = le16_to_cpu(fe->fileLinkCount);
if (!link_count)
link_count = 1;
set_nlink(inode, link_count);
inode->i_size = le64_to_cpu(fe->informationLength);
iinfo->i_lenExtents = inode->i_size;
if (iinfo->i_efe == 0) {
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
fe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
} else {
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
efe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
iinfo->i_crtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
}
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
inode->i_op = &udf_dir_inode_operations;
inode->i_fop = &udf_dir_operations;
inode->i_mode |= S_IFDIR;
inc_nlink(inode);
break;
case ICBTAG_FILE_TYPE_REALTIME:
case ICBTAG_FILE_TYPE_REGULAR:
case ICBTAG_FILE_TYPE_UNDEF:
case ICBTAG_FILE_TYPE_VAT20:
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
inode->i_mode |= S_IFREG;
break;
case ICBTAG_FILE_TYPE_BLOCK:
inode->i_mode |= S_IFBLK;
break;
case ICBTAG_FILE_TYPE_CHAR:
inode->i_mode |= S_IFCHR;
break;
case ICBTAG_FILE_TYPE_FIFO:
init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
break;
case ICBTAG_FILE_TYPE_SOCKET:
init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
break;
case ICBTAG_FILE_TYPE_SYMLINK:
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &udf_symlink_inode_operations;
inode->i_mode = S_IFLNK | S_IRWXUGO;
break;
case ICBTAG_FILE_TYPE_MAIN:
udf_debug("METADATA FILE-----\n");
break;
case ICBTAG_FILE_TYPE_MIRROR:
udf_debug("METADATA MIRROR FILE-----\n");
break;
case ICBTAG_FILE_TYPE_BITMAP:
udf_debug("METADATA BITMAP FILE-----\n");
break;
default:
udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
inode->i_ino, fe->icbTag.fileType);
make_bad_inode(inode);
return;
}
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
struct deviceSpec *dsea =
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
if (dsea) {
init_special_inode(inode, inode->i_mode,
MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
le32_to_cpu(dsea->minorDeviceIdent)));
/* Developer ID ??? */
} else
make_bad_inode(inode);
}
brelse(bh);
}
|
CWE-399
| 179,438 | 1,204 |
121718855555196308354887151663402502923
| null | null | null |
ettercap
|
e3abe7d7585ecc420a7cab73313216613aadad5a
| 1 |
FUNC_DECODER(dissector_postgresql)
{
DECLARE_DISP_PTR(ptr);
struct ec_session *s = NULL;
void *ident = NULL;
char tmp[MAX_ASCII_ADDR_LEN];
struct postgresql_status *conn_status;
/* don't complain about unused var */
(void) DECODE_DATA;
(void) DECODE_DATALEN;
(void) DECODED_LEN;
if (FROM_CLIENT("postgresql", PACKET)) {
if (PACKET->DATA.len < 4)
return NULL;
dissect_create_ident(&ident, PACKET, DISSECT_CODE(dissector_postgresql));
/* if the session does not exist... */
if (session_get(&s, ident, DISSECT_IDENT_LEN) == -ENOTFOUND) {
/* search for user and database strings, look for StartupMessage */
unsigned char *u = memmem(ptr, PACKET->DATA.len, "user", 4);
unsigned char *d = memmem(ptr, PACKET->DATA.len, "database", 8);
if (!memcmp(ptr + 4, "\x00\x03\x00\x00", 4) && u && d) {
/* create the new session */
dissect_create_session(&s, PACKET, DISSECT_CODE(dissector_postgresql));
/* remember the state (used later) */
SAFE_CALLOC(s->data, 1, sizeof(struct postgresql_status));
conn_status = (struct postgresql_status *) s->data;
conn_status->status = WAIT_AUTH;
/* user is always null-terminated */
strncpy((char*)conn_status->user, (char*)(u + 5), 65);
conn_status->user[64] = 0;
/* database is always null-terminated */
strncpy((char*)conn_status->database, (char*)(d + 9), 65);
conn_status->database[64] = 0;
/* save the session */
session_put(s);
}
} else {
conn_status = (struct postgresql_status *) s->data;
if (conn_status->status == WAIT_RESPONSE) {
/* check for PasswordMessage packet */
if (ptr[0] == 'p' && conn_status->type == MD5) {
DEBUG_MSG("\tDissector_postgresql RESPONSE type is MD5");
if(memcmp(ptr + 1, "\x00\x00\x00\x28", 4)) {
DEBUG_MSG("\tDissector_postgresql BUG, expected length is 40");
return NULL;
}
if (PACKET->DATA.len < 40) {
DEBUG_MSG("\tDissector_postgresql BUG, expected length is 40");
return NULL;
}
memcpy(conn_status->hash, ptr + 5 + 3, 32);
conn_status->hash[32] = 0;
DISSECT_MSG("%s:$postgres$%s*%s*%s:%s:%d\n", conn_status->user, conn_status->user, conn_status->salt, conn_status->hash, ip_addr_ntoa(&PACKET->L3.dst, tmp), ntohs(PACKET->L4.dst));
dissect_wipe_session(PACKET, DISSECT_CODE(dissector_postgresql));
}
else if (ptr[0] == 'p' && conn_status->type == CT) {
int length;
DEBUG_MSG("\tDissector_postgresql RESPONSE type is clear-text!");
GET_ULONG_BE(length, ptr, 1);
strncpy((char*)conn_status->password, (char*)(ptr + 5), length - 4);
conn_status->password[length - 4] = 0;
DISSECT_MSG("PostgreSQL credentials:%s-%d:%s:%s\n", ip_addr_ntoa(&PACKET->L3.dst, tmp), ntohs(PACKET->L4.dst), conn_status->user, conn_status->password);
dissect_wipe_session(PACKET, DISSECT_CODE(dissector_postgresql));
}
}
}
} else { /* Packets coming from the server */
if (PACKET->DATA.len < 9)
return NULL;
dissect_create_ident(&ident, PACKET, DISSECT_CODE(dissector_postgresql));
if (session_get(&s, ident, DISSECT_IDENT_LEN) == ESUCCESS) {
conn_status = (struct postgresql_status *) s->data;
if (conn_status->status == WAIT_AUTH &&
ptr[0] == 'R' && !memcmp(ptr + 1, "\x00\x00\x00\x0c", 4) &&
!memcmp(ptr + 5, "\x00\x00\x00\x05", 4)) {
conn_status->status = WAIT_RESPONSE;
conn_status->type = MD5;
DEBUG_MSG("\tDissector_postgresql AUTH type is MD5");
hex_encode(ptr + 9, 4, conn_status->salt); /* save salt */
}
else if (conn_status->status == WAIT_AUTH &&
ptr[0] == 'R' && !memcmp(ptr + 1, "\x00\x00\x00\x08", 4) &&
!memcmp(ptr + 5, "\x00\x00\x00\x03", 4)) {
conn_status->status = WAIT_RESPONSE;
conn_status->type = CT;
DEBUG_MSG("\tDissector_postgresql AUTH type is clear-text!");
}
}
}
SAFE_FREE(ident);
return NULL;
}
|
CWE-119
| 179,439 | 1,205 |
113723108844572007792416732313477179140
| null | null | null |
linux
|
410dd3cf4c9b36f27ed4542ee18b1af5e68645a4
| 1 |
parse_rock_ridge_inode_internal(struct iso_directory_record *de,
struct inode *inode, int regard_xa)
{
int symlink_len = 0;
int cnt, sig;
struct inode *reloc;
struct rock_ridge *rr;
int rootflag;
struct rock_state rs;
int ret = 0;
if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0;
init_rock_state(&rs, inode);
setup_rock_ridge(de, inode, &rs);
if (regard_xa) {
rs.chr += 14;
rs.len -= 14;
if (rs.len < 0)
rs.len = 0;
}
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
/*
* Ignore rock ridge info if rr->len is out of range, but
* don't return -EIO because that would make the file
* invisible.
*/
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto eio;
rs.chr += rr->len;
rs.len -= rr->len;
/*
* As above, just ignore the rock ridge info if rr->len
* is bogus.
*/
if (rs.len < 0)
goto out; /* Something got screwed up here */
switch (sig) {
#ifndef CONFIG_ZISOFS /* No flag for SF or ZF */
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] &
(RR_PX | RR_TF | RR_SL | RR_CL)) == 0)
goto out;
break;
#endif
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('C', 'E'):
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('E', 'R'):
ISOFS_SB(inode->i_sb)->s_rock = 1;
printk(KERN_DEBUG "ISO 9660 Extensions: ");
{
int p;
for (p = 0; p < rr->u.ER.len_id; p++)
printk("%c", rr->u.ER.data[p]);
}
printk("\n");
break;
case SIG('P', 'X'):
inode->i_mode = isonum_733(rr->u.PX.mode);
set_nlink(inode, isonum_733(rr->u.PX.n_links));
i_uid_write(inode, isonum_733(rr->u.PX.uid));
i_gid_write(inode, isonum_733(rr->u.PX.gid));
break;
case SIG('P', 'N'):
{
int high, low;
high = isonum_733(rr->u.PN.dev_high);
low = isonum_733(rr->u.PN.dev_low);
/*
* The Rock Ridge standard specifies that if
* sizeof(dev_t) <= 4, then the high field is
* unused, and the device number is completely
* stored in the low field. Some writers may
* ignore this subtlety,
* and as a result we test to see if the entire
* device number is
* stored in the low field, and use that.
*/
if ((low & ~0xff) && high == 0) {
inode->i_rdev =
MKDEV(low >> 8, low & 0xff);
} else {
inode->i_rdev =
MKDEV(high, low);
}
}
break;
case SIG('T', 'F'):
/*
* Some RRIP writers incorrectly place ctime in the
* TF_CREATE field. Try to handle this correctly for
* either case.
*/
/* Rock ridge never appears on a High Sierra disk */
cnt = 0;
if (rr->u.TF.flags & TF_CREATE) {
inode->i_ctime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_ctime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_MODIFY) {
inode->i_mtime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_mtime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_ACCESS) {
inode->i_atime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_atime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_ATTRIBUTES) {
inode->i_ctime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_ctime.tv_nsec = 0;
}
break;
case SIG('S', 'L'):
{
int slen;
struct SL_component *slp;
struct SL_component *oldslp;
slen = rr->len - 5;
slp = &rr->u.SL.link;
inode->i_size = symlink_len;
while (slen > 1) {
rootflag = 0;
switch (slp->flags & ~1) {
case 0:
inode->i_size +=
slp->len;
break;
case 2:
inode->i_size += 1;
break;
case 4:
inode->i_size += 2;
break;
case 8:
rootflag = 1;
inode->i_size += 1;
break;
default:
printk("Symlink component flag "
"not implemented\n");
}
slen -= slp->len + 2;
oldslp = slp;
slp = (struct SL_component *)
(((char *)slp) + slp->len + 2);
if (slen < 2) {
if (((rr->u.SL.
flags & 1) != 0)
&&
((oldslp->
flags & 1) == 0))
inode->i_size +=
1;
break;
}
/*
* If this component record isn't
* continued, then append a '/'.
*/
if (!rootflag
&& (oldslp->flags & 1) == 0)
inode->i_size += 1;
}
}
symlink_len = inode->i_size;
break;
case SIG('R', 'E'):
printk(KERN_WARNING "Attempt to read inode for "
"relocated directory\n");
goto out;
case SIG('C', 'L'):
ISOFS_I(inode)->i_first_extent =
isonum_733(rr->u.CL.location);
reloc =
isofs_iget(inode->i_sb,
ISOFS_I(inode)->i_first_extent,
0);
if (IS_ERR(reloc)) {
ret = PTR_ERR(reloc);
goto out;
}
inode->i_mode = reloc->i_mode;
set_nlink(inode, reloc->i_nlink);
inode->i_uid = reloc->i_uid;
inode->i_gid = reloc->i_gid;
inode->i_rdev = reloc->i_rdev;
inode->i_size = reloc->i_size;
inode->i_blocks = reloc->i_blocks;
inode->i_atime = reloc->i_atime;
inode->i_ctime = reloc->i_ctime;
inode->i_mtime = reloc->i_mtime;
iput(reloc);
break;
#ifdef CONFIG_ZISOFS
case SIG('Z', 'F'): {
int algo;
if (ISOFS_SB(inode->i_sb)->s_nocompress)
break;
algo = isonum_721(rr->u.ZF.algorithm);
if (algo == SIG('p', 'z')) {
int block_shift =
isonum_711(&rr->u.ZF.parms[1]);
if (block_shift > 17) {
printk(KERN_WARNING "isofs: "
"Can't handle ZF block "
"size of 2^%d\n",
block_shift);
} else {
/*
* Note: we don't change
* i_blocks here
*/
ISOFS_I(inode)->i_file_format =
isofs_file_compressed;
/*
* Parameters to compression
* algorithm (header size,
* block size)
*/
ISOFS_I(inode)->i_format_parm[0] =
isonum_711(&rr->u.ZF.parms[0]);
ISOFS_I(inode)->i_format_parm[1] =
isonum_711(&rr->u.ZF.parms[1]);
inode->i_size =
isonum_733(rr->u.ZF.
real_size);
}
} else {
printk(KERN_WARNING
"isofs: Unknown ZF compression "
"algorithm: %c%c\n",
rr->u.ZF.algorithm[0],
rr->u.ZF.algorithm[1]);
}
break;
}
#endif
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret == 1)
ret = 0;
out:
kfree(rs.buffer);
return ret;
eio:
ret = -EIO;
goto out;
}
|
CWE-20
| 179,443 | 1,209 |
46643672205519704191602265138531527274
| null | null | null |
krb5
|
af0ed4df4dfae762ab5fb605f5a0c8f59cb4f6ca
| 1 |
kadm5_randkey_principal_3(void *server_handle,
krb5_principal principal,
krb5_boolean keepold,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple,
krb5_keyblock **keyblocks,
int *n_keys)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
krb5_int32 now;
kadm5_policy_ent_rec pol;
int ret, last_pwd;
krb5_boolean have_pol = FALSE;
kadm5_server_handle_t handle = server_handle;
krb5_keyblock *act_mkey;
krb5_kvno act_kvno;
int new_n_ks_tuple = 0;
krb5_key_salt_tuple *new_ks_tuple = NULL;
if (keyblocks)
*keyblocks = NULL;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if (principal == NULL)
return EINVAL;
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
ret = apply_keysalt_policy(handle, adb.policy, n_ks_tuple, ks_tuple,
&new_n_ks_tuple, &new_ks_tuple);
if (ret)
goto done;
if (krb5_principal_compare(handle->context, principal, hist_princ)) {
/* If changing the history entry, the new entry must have exactly one
* key. */
if (keepold)
return KADM5_PROTECT_PRINCIPAL;
new_n_ks_tuple = 1;
}
ret = kdb_get_active_mkey(handle, &act_kvno, &act_mkey);
if (ret)
goto done;
ret = krb5_dbe_crk(handle->context, act_mkey, new_ks_tuple, new_n_ks_tuple,
keepold, kdb);
if (ret)
goto done;
ret = krb5_dbe_update_mkvno(handle->context, kdb, act_kvno);
if (ret)
goto done;
kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE;
ret = krb5_timeofday(handle->context, &now);
if (ret)
goto done;
if ((adb.aux_attributes & KADM5_POLICY)) {
ret = get_policy(handle, adb.policy, &pol, &have_pol);
if (ret)
goto done;
}
if (have_pol) {
ret = krb5_dbe_lookup_last_pwd_change(handle->context, kdb, &last_pwd);
if (ret)
goto done;
#if 0
/*
* The spec says this check is overridden if the caller has
* modify privilege. The admin server therefore makes this
* check itself (in chpass_principal_wrapper, misc.c). A
* local caller implicitly has all authorization bits.
*/
if((now - last_pwd) < pol.pw_min_life &&
!(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) {
ret = KADM5_PASS_TOOSOON;
goto done;
}
#endif
if (pol.pw_max_life)
kdb->pw_expiration = now + pol.pw_max_life;
else
kdb->pw_expiration = 0;
} else {
kdb->pw_expiration = 0;
}
ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now);
if (ret)
goto done;
/* unlock principal on this KDC */
kdb->fail_auth_count = 0;
if (keyblocks) {
ret = decrypt_key_data(handle->context,
kdb->n_key_data, kdb->key_data,
keyblocks, n_keys);
if (ret)
goto done;
}
/* key data changed, let the database provider know */
kdb->mask = KADM5_KEY_DATA | KADM5_FAIL_AUTH_COUNT;
/* | KADM5_RANDKEY_USED */;
ret = k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, principal, keepold,
new_n_ks_tuple, new_ks_tuple, NULL);
if (ret)
goto done;
if ((ret = kdb_put_entry(handle, kdb, &adb)))
goto done;
(void) k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, principal,
keepold, new_n_ks_tuple, new_ks_tuple, NULL);
ret = KADM5_OK;
done:
free(new_ks_tuple);
kdb_free_entry(handle, kdb, &adb);
if (have_pol)
kadm5_free_policy_ent(handle->lhandle, &pol);
return ret;
}
|
CWE-255
| 179,447 | 1,211 |
147608648129973756964624859682015241078
| null | null | null |
linux
|
9566d6742852c527bf5af38af5cbb878dad75705
| 1 |
static int do_remount(struct path *path, int flags, int mnt_flags,
void *data)
{
int err;
struct super_block *sb = path->mnt->mnt_sb;
struct mount *mnt = real_mount(path->mnt);
if (!check_mnt(mnt))
return -EINVAL;
if (path->dentry != path->mnt->mnt_root)
return -EINVAL;
/* Don't allow changing of locked mnt flags.
*
* No locks need to be held here while testing the various
* MNT_LOCK flags because those flags can never be cleared
* once they are set.
*/
if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
!(mnt_flags & MNT_READONLY)) {
return -EPERM;
}
err = security_sb_remount(sb, data);
if (err)
return err;
down_write(&sb->s_umount);
if (flags & MS_BIND)
err = change_mount_flags(path->mnt, flags);
else if (!capable(CAP_SYS_ADMIN))
err = -EPERM;
else
err = do_remount_sb(sb, flags, data, 0);
if (!err) {
lock_mount_hash();
mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
mnt->mnt.mnt_flags = mnt_flags;
touch_mnt_namespace(mnt->mnt_ns);
unlock_mount_hash();
}
up_write(&sb->s_umount);
return err;
}
|
CWE-264
| 179,454 | 1,217 |
67643344520913295248862751136888291968
| null | null | null |
linux
|
a6138db815df5ee542d848318e5dae681590fccd
| 1 |
static int do_remount(struct path *path, int flags, int mnt_flags,
void *data)
{
int err;
struct super_block *sb = path->mnt->mnt_sb;
struct mount *mnt = real_mount(path->mnt);
if (!check_mnt(mnt))
return -EINVAL;
if (path->dentry != path->mnt->mnt_root)
return -EINVAL;
err = security_sb_remount(sb, data);
if (err)
return err;
down_write(&sb->s_umount);
if (flags & MS_BIND)
err = change_mount_flags(path->mnt, flags);
else if (!capable(CAP_SYS_ADMIN))
err = -EPERM;
else
err = do_remount_sb(sb, flags, data, 0);
if (!err) {
lock_mount_hash();
mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
mnt->mnt.mnt_flags = mnt_flags;
touch_mnt_namespace(mnt->mnt_ns);
unlock_mount_hash();
}
up_write(&sb->s_umount);
return err;
}
|
CWE-264
| 179,455 | 1,218 |
251245953508821352743020493309298185926
| null | null | null |
linux
|
1be9a950c646c9092fb3618197f7b6bfb50e82aa
| 1 |
void sctp_assoc_update(struct sctp_association *asoc,
struct sctp_association *new)
{
struct sctp_transport *trans;
struct list_head *pos, *temp;
/* Copy in new parameters of peer. */
asoc->c = new->c;
asoc->peer.rwnd = new->peer.rwnd;
asoc->peer.sack_needed = new->peer.sack_needed;
asoc->peer.i = new->peer.i;
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
asoc->peer.i.initial_tsn, GFP_ATOMIC);
/* Remove any peer addresses not present in the new association. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
trans = list_entry(pos, struct sctp_transport, transports);
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
sctp_assoc_rm_peer(asoc, trans);
continue;
}
if (asoc->state >= SCTP_STATE_ESTABLISHED)
sctp_transport_reset(trans);
}
/* If the case is A (association restart), use
* initial_tsn as next_tsn. If the case is B, use
* current next_tsn in case data sent to peer
* has been discarded and needs retransmission.
*/
if (asoc->state >= SCTP_STATE_ESTABLISHED) {
asoc->next_tsn = new->next_tsn;
asoc->ctsn_ack_point = new->ctsn_ack_point;
asoc->adv_peer_ack_point = new->adv_peer_ack_point;
/* Reinitialize SSN for both local streams
* and peer's streams.
*/
sctp_ssnmap_clear(asoc->ssnmap);
/* Flush the ULP reassembly and ordered queue.
* Any data there will now be stale and will
* cause problems.
*/
sctp_ulpq_flush(&asoc->ulpq);
/* reset the overall association error count so
* that the restarted association doesn't get torn
* down on the next retransmission timer.
*/
asoc->overall_error_count = 0;
} else {
/* Add any peer addresses from the new association. */
list_for_each_entry(trans, &new->peer.transport_addr_list,
transports) {
if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
sctp_assoc_add_peer(asoc, &trans->ipaddr,
GFP_ATOMIC, trans->state);
}
asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
if (!asoc->ssnmap) {
/* Move the ssnmap. */
asoc->ssnmap = new->ssnmap;
new->ssnmap = NULL;
}
if (!asoc->assoc_id) {
/* get a new association id since we don't have one
* yet.
*/
sctp_assoc_set_id(asoc, GFP_ATOMIC);
}
}
/* SCTP-AUTH: Save the peer parameters from the new associations
* and also move the association shared keys over
*/
kfree(asoc->peer.peer_random);
asoc->peer.peer_random = new->peer.peer_random;
new->peer.peer_random = NULL;
kfree(asoc->peer.peer_chunks);
asoc->peer.peer_chunks = new->peer.peer_chunks;
new->peer.peer_chunks = NULL;
kfree(asoc->peer.peer_hmacs);
asoc->peer.peer_hmacs = new->peer.peer_hmacs;
new->peer.peer_hmacs = NULL;
sctp_auth_key_put(asoc->asoc_shared_key);
sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
}
| 179,456 | 1,219 |
268873015146939465230572027615388419429
| null | null | null |
|
linux
|
295dc39d941dc2ae53d5c170365af4c9d5c16212
| 1 |
mountpoint_last(struct nameidata *nd, struct path *path)
{
int error = 0;
struct dentry *dentry;
struct dentry *dir = nd->path.dentry;
/* If we're in rcuwalk, drop out of it to handle last component */
if (nd->flags & LOOKUP_RCU) {
if (unlazy_walk(nd, NULL)) {
error = -ECHILD;
goto out;
}
}
nd->flags &= ~LOOKUP_PARENT;
if (unlikely(nd->last_type != LAST_NORM)) {
error = handle_dots(nd, nd->last_type);
if (error)
goto out;
dentry = dget(nd->path.dentry);
goto done;
}
mutex_lock(&dir->d_inode->i_mutex);
dentry = d_lookup(dir, &nd->last);
if (!dentry) {
/*
* No cached dentry. Mounted dentries are pinned in the cache,
* so that means that this dentry is probably a symlink or the
* path doesn't actually point to a mounted dentry.
*/
dentry = d_alloc(dir, &nd->last);
if (!dentry) {
error = -ENOMEM;
mutex_unlock(&dir->d_inode->i_mutex);
goto out;
}
dentry = lookup_real(dir->d_inode, dentry, nd->flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry)) {
mutex_unlock(&dir->d_inode->i_mutex);
goto out;
}
}
mutex_unlock(&dir->d_inode->i_mutex);
done:
if (!dentry->d_inode || d_is_negative(dentry)) {
error = -ENOENT;
dput(dentry);
goto out;
}
path->dentry = dentry;
path->mnt = mntget(nd->path.mnt);
if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
return 1;
follow_mount(path);
error = 0;
out:
terminate_walk(nd);
return error;
}
|
CWE-59
| 179,457 | 1,220 |
289527718022341317804957843288422047472
| null | null | null |
linux
|
d3217b15a19a4779c39b212358a5c71d725822ee
| 1 |
void sctp_association_free(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
struct sctp_transport *transport;
struct list_head *pos, *temp;
int i;
/* Only real associations count against the endpoint, so
* don't bother for if this is a temporary association.
*/
if (!asoc->temp) {
list_del(&asoc->asocs);
/* Decrement the backlog value for a TCP-style listening
* socket.
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
sk->sk_ack_backlog--;
}
/* Mark as dead, so other users can know this structure is
* going away.
*/
asoc->base.dead = true;
/* Dispose of any data lying around in the outqueue. */
sctp_outq_free(&asoc->outqueue);
/* Dispose of any pending messages for the upper layer. */
sctp_ulpq_free(&asoc->ulpq);
/* Dispose of any pending chunks on the inqueue. */
sctp_inq_free(&asoc->base.inqueue);
sctp_tsnmap_free(&asoc->peer.tsn_map);
/* Free ssnmap storage. */
sctp_ssnmap_free(asoc->ssnmap);
/* Clean up the bound address list. */
sctp_bind_addr_free(&asoc->base.bind_addr);
/* Do we need to go through all of our timers and
* delete them? To be safe we will try to delete all, but we
* should be able to go through and make a guess based
* on our state.
*/
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
if (del_timer(&asoc->timers[i]))
sctp_association_put(asoc);
}
/* Free peer's cached cookie. */
kfree(asoc->peer.cookie);
kfree(asoc->peer.peer_random);
kfree(asoc->peer.peer_chunks);
kfree(asoc->peer.peer_hmacs);
/* Release the transport structures. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transports);
list_del_rcu(pos);
sctp_transport_free(transport);
}
asoc->peer.transport_count = 0;
sctp_asconf_queue_teardown(asoc);
/* Free pending address space being deleted */
if (asoc->asconf_addr_del_pending != NULL)
kfree(asoc->asconf_addr_del_pending);
/* AUTH - Free the endpoint shared keys */
sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
/* AUTH - Free the association shared key */
sctp_auth_key_put(asoc->asoc_shared_key);
sctp_association_put(asoc);
}
|
CWE-20
| 179,461 | 1,223 |
307155962389758075381537374371474956716
| null | null | null |
linux
|
ac902c112d90a89e59916f751c2745f4dbdbb4bd
| 1 |
static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
unsigned int count)
{
struct snd_kcontrol *kctl;
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid < card->last_numid + 1 + count &&
kctl->id.numid + kctl->count > card->last_numid + 1) {
card->last_numid = kctl->id.numid + kctl->count - 1;
return true;
}
}
return false;
}
|
CWE-189
| 179,462 | 1,224 |
82969020191262428727479609436945294087
| null | null | null |
linux
|
82262a46627bebb0febcc26664746c25cef08563
| 1 |
static int snd_ctl_elem_add(struct snd_ctl_file *file,
struct snd_ctl_elem_info *info, int replace)
{
struct snd_card *card = file->card;
struct snd_kcontrol kctl, *_kctl;
unsigned int access;
long private_size;
struct user_element *ue;
int idx, err;
if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
return -ENOMEM;
if (info->count < 1)
return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
info->id.numid = 0;
memset(&kctl, 0, sizeof(kctl));
down_write(&card->controls_rwsem);
_kctl = snd_ctl_find_id(card, &info->id);
err = 0;
if (_kctl) {
if (replace)
err = snd_ctl_remove(card, _kctl);
else
err = -EBUSY;
} else {
if (replace)
err = -ENOENT;
}
up_write(&card->controls_rwsem);
if (err < 0)
return err;
memcpy(&kctl.id, &info->id, sizeof(info->id));
kctl.count = info->owner ? info->owner : 1;
access |= SNDRV_CTL_ELEM_ACCESS_USER;
if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED)
kctl.info = snd_ctl_elem_user_enum_info;
else
kctl.info = snd_ctl_elem_user_info;
if (access & SNDRV_CTL_ELEM_ACCESS_READ)
kctl.get = snd_ctl_elem_user_get;
if (access & SNDRV_CTL_ELEM_ACCESS_WRITE)
kctl.put = snd_ctl_elem_user_put;
if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) {
kctl.tlv.c = snd_ctl_elem_user_tlv;
access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
}
switch (info->type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
case SNDRV_CTL_ELEM_TYPE_INTEGER:
private_size = sizeof(long);
if (info->count > 128)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
private_size = sizeof(long long);
if (info->count > 64)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
private_size = sizeof(unsigned int);
if (info->count > 128 || info->value.enumerated.items == 0)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_BYTES:
private_size = sizeof(unsigned char);
if (info->count > 512)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_IEC958:
private_size = sizeof(struct snd_aes_iec958);
if (info->count != 1)
return -EINVAL;
break;
default:
return -EINVAL;
}
private_size *= info->count;
ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
if (ue == NULL)
return -ENOMEM;
ue->card = card;
ue->info = *info;
ue->info.access = 0;
ue->elem_data = (char *)ue + sizeof(*ue);
ue->elem_data_size = private_size;
if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) {
err = snd_ctl_elem_init_enum_names(ue);
if (err < 0) {
kfree(ue);
return err;
}
}
kctl.private_free = snd_ctl_elem_user_free;
_kctl = snd_ctl_new(&kctl, access);
if (_kctl == NULL) {
kfree(ue->priv_data);
kfree(ue);
return -ENOMEM;
}
_kctl->private_data = ue;
for (idx = 0; idx < _kctl->count; idx++)
_kctl->vd[idx].owner = file;
err = snd_ctl_add(card, _kctl);
if (err < 0)
return err;
down_write(&card->controls_rwsem);
card->user_ctl_count++;
up_write(&card->controls_rwsem);
return 0;
}
|
CWE-189
| 179,463 | 1,225 |
104448511318866041989201295871870924377
| null | null | null |
linux
|
206204a1162b995e2185275167b22468c00d6b36
| 1 |
static int lz4_uncompress(const char *source, char *dest, int osize)
{
const BYTE *ip = (const BYTE *) source;
const BYTE *ref;
BYTE *op = (BYTE *) dest;
BYTE * const oend = op + osize;
BYTE *cpy;
unsigned token;
size_t length;
size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
while (1) {
/* get runlength */
token = *ip++;
length = (token >> ML_BITS);
if (length == RUN_MASK) {
size_t len;
len = *ip++;
for (; len == 255; length += 255)
len = *ip++;
length += len;
}
/* copy literals */
cpy = op + length;
if (unlikely(cpy > oend - COPYLENGTH)) {
/*
* Error: not enough place for another match
* (min 4) + 5 literals
*/
if (cpy != oend)
goto _output_error;
memcpy(op, ip, length);
ip += length;
break; /* EOF */
}
LZ4_WILDCOPY(ip, op, cpy);
ip -= (op - cpy);
op = cpy;
/* get offset */
LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
ip += 2;
/* Error: offset create reference outside destination buffer */
if (unlikely(ref < (BYTE *const) dest))
goto _output_error;
/* get matchlength */
length = token & ML_MASK;
if (length == ML_MASK) {
for (; *ip == 255; length += 255)
ip++;
length += *ip++;
}
/* copy repeated sequence */
if (unlikely((op - ref) < STEPSIZE)) {
#if LZ4_ARCH64
size_t dec64 = dec64table[op - ref];
#else
const int dec64 = 0;
#endif
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4;
ref += 4;
ref -= dec32table[op-ref];
PUT4(ref, op);
op += STEPSIZE - 4;
ref -= dec64;
} else {
LZ4_COPYSTEP(ref, op);
}
cpy = op + length - (STEPSIZE - 4);
if (cpy > (oend - COPYLENGTH)) {
/* Error: request to write beyond destination buffer */
if (cpy > oend)
goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
op = cpy;
/*
* Check EOF (should never happen, since last 5 bytes
* are supposed to be literals)
*/
if (op == oend)
goto _output_error;
continue;
}
LZ4_SECURECOPY(ref, op, cpy);
op = cpy; /* correction */
}
/* end of decoding */
return (int) (((char *)ip) - source);
/* write overflow error detected */
_output_error:
return (int) (-(((char *)ip) - source));
}
|
CWE-20
| 179,473 | 1,233 |
88608013649608299301297380118665587051
| null | null | null |
linux
|
206a81c18401c0cde6e579164f752c4b147324ce
| 1 |
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
{
unsigned char *op;
const unsigned char *ip;
size_t t, next;
size_t state = 0;
const unsigned char *m_pos;
const unsigned char * const ip_end = in + in_len;
unsigned char * const op_end = out + *out_len;
op = out;
ip = in;
if (unlikely(in_len < 3))
goto input_overrun;
if (*ip > 17) {
t = *ip++ - 17;
if (t < 4) {
next = t;
goto match_next;
}
goto copy_literal_run;
}
for (;;) {
t = *ip++;
if (t < 16) {
if (likely(state == 0)) {
if (unlikely(t == 0)) {
while (unlikely(*ip == 0)) {
t += 255;
ip++;
NEED_IP(1);
}
t += 15 + *ip++;
}
t += 3;
copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
COPY8(op, ip);
op += 8;
ip += 8;
COPY8(op, ip);
op += 8;
ip += 8;
} while (ip < ie);
ip = ie;
op = oe;
} else
#endif
{
NEED_OP(t);
NEED_IP(t + 3);
do {
*op++ = *ip++;
} while (--t > 0);
}
state = 4;
continue;
} else if (state != 4) {
next = t & 3;
m_pos = op - 1;
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
TEST_LB(m_pos);
NEED_OP(2);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
goto match_next;
} else {
next = t & 3;
m_pos = op - (1 + M2_MAX_OFFSET);
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
t = 3;
}
} else if (t >= 64) {
next = t & 3;
m_pos = op - 1;
m_pos -= (t >> 2) & 7;
m_pos -= *ip++ << 3;
t = (t >> 5) - 1 + (3 - 1);
} else if (t >= 32) {
t = (t & 31) + (3 - 1);
if (unlikely(t == 2)) {
while (unlikely(*ip == 0)) {
t += 255;
ip++;
NEED_IP(1);
}
t += 31 + *ip++;
NEED_IP(2);
}
m_pos = op - 1;
next = get_unaligned_le16(ip);
ip += 2;
m_pos -= next >> 2;
next &= 3;
} else {
m_pos = op;
m_pos -= (t & 8) << 11;
t = (t & 7) + (3 - 1);
if (unlikely(t == 2)) {
while (unlikely(*ip == 0)) {
t += 255;
ip++;
NEED_IP(1);
}
t += 7 + *ip++;
NEED_IP(2);
}
next = get_unaligned_le16(ip);
ip += 2;
m_pos -= next >> 2;
next &= 3;
if (m_pos == op)
goto eof_found;
m_pos -= 0x4000;
}
TEST_LB(m_pos);
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (op - m_pos >= 8) {
unsigned char *oe = op + t;
if (likely(HAVE_OP(t + 15))) {
do {
COPY8(op, m_pos);
op += 8;
m_pos += 8;
COPY8(op, m_pos);
op += 8;
m_pos += 8;
} while (op < oe);
op = oe;
if (HAVE_IP(6)) {
state = next;
COPY4(op, ip);
op += next;
ip += next;
continue;
}
} else {
NEED_OP(t);
do {
*op++ = *m_pos++;
} while (op < oe);
}
} else
#endif
{
unsigned char *oe = op + t;
NEED_OP(t);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
m_pos += 2;
do {
*op++ = *m_pos++;
} while (op < oe);
}
match_next:
state = next;
t = next;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (likely(HAVE_IP(6) && HAVE_OP(4))) {
COPY4(op, ip);
op += t;
ip += t;
} else
#endif
{
NEED_IP(t + 3);
NEED_OP(t);
while (t > 0) {
*op++ = *ip++;
t--;
}
}
}
eof_found:
*out_len = op - out;
return (t != 3 ? LZO_E_ERROR :
ip == ip_end ? LZO_E_OK :
ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN);
input_overrun:
*out_len = op - out;
return LZO_E_INPUT_OVERRUN;
output_overrun:
*out_len = op - out;
return LZO_E_OUTPUT_OVERRUN;
lookbehind_overrun:
*out_len = op - out;
return LZO_E_LOOKBEHIND_OVERRUN;
}
|
CWE-119
| 179,474 | 1,234 |
274406254790435141604534304938072258803
| null | null | null |
sgminer
|
910c36089940e81fb85c65b8e63dcd2fac71470c
| 1 |
static bool parse_notify(struct pool *pool, json_t *val)
{
char *job_id, *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit,
*ntime, *header;
size_t cb1_len, cb2_len, alloc_len;
unsigned char *cb1, *cb2;
bool clean, ret = false;
int merkles, i;
json_t *arr;
arr = json_array_get(val, 4);
if (!arr || !json_is_array(arr))
goto out;
merkles = json_array_size(arr);
job_id = json_array_string(val, 0);
prev_hash = json_array_string(val, 1);
coinbase1 = json_array_string(val, 2);
coinbase2 = json_array_string(val, 3);
bbversion = json_array_string(val, 5);
nbit = json_array_string(val, 6);
ntime = json_array_string(val, 7);
clean = json_is_true(json_array_get(val, 8));
if (!job_id || !prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime) {
/* Annoying but we must not leak memory */
if (job_id)
free(job_id);
if (prev_hash)
free(prev_hash);
if (coinbase1)
free(coinbase1);
if (coinbase2)
free(coinbase2);
if (bbversion)
free(bbversion);
if (nbit)
free(nbit);
if (ntime)
free(ntime);
goto out;
}
cg_wlock(&pool->data_lock);
free(pool->swork.job_id);
free(pool->swork.prev_hash);
free(pool->swork.bbversion);
free(pool->swork.nbit);
free(pool->swork.ntime);
pool->swork.job_id = job_id;
pool->swork.prev_hash = prev_hash;
cb1_len = strlen(coinbase1) / 2;
cb2_len = strlen(coinbase2) / 2;
pool->swork.bbversion = bbversion;
pool->swork.nbit = nbit;
pool->swork.ntime = ntime;
pool->swork.clean = clean;
alloc_len = pool->swork.cb_len = cb1_len + pool->n1_len + pool->n2size + cb2_len;
pool->nonce2_offset = cb1_len + pool->n1_len;
for (i = 0; i < pool->swork.merkles; i++)
free(pool->swork.merkle_bin[i]);
if (merkles) {
pool->swork.merkle_bin = (unsigned char **)realloc(pool->swork.merkle_bin,
sizeof(char *) * merkles + 1);
for (i = 0; i < merkles; i++) {
char *merkle = json_array_string(arr, i);
pool->swork.merkle_bin[i] = (unsigned char *)malloc(32);
if (unlikely(!pool->swork.merkle_bin[i]))
quit(1, "Failed to malloc pool swork merkle_bin");
hex2bin(pool->swork.merkle_bin[i], merkle, 32);
free(merkle);
}
}
pool->swork.merkles = merkles;
if (clean)
pool->nonce2 = 0;
pool->merkle_offset = strlen(pool->swork.bbversion) +
strlen(pool->swork.prev_hash);
pool->swork.header_len = pool->merkle_offset +
/* merkle_hash */ 32 +
strlen(pool->swork.ntime) +
strlen(pool->swork.nbit) +
/* nonce */ 8 +
/* workpadding */ 96;
pool->merkle_offset /= 2;
pool->swork.header_len = pool->swork.header_len * 2 + 1;
align_len(&pool->swork.header_len);
header = (char *)alloca(pool->swork.header_len);
snprintf(header, pool->swork.header_len,
"%s%s%s%s%s%s%s",
pool->swork.bbversion,
pool->swork.prev_hash,
blank_merkel,
pool->swork.ntime,
pool->swork.nbit,
"00000000", /* nonce */
workpadding);
if (unlikely(!hex2bin(pool->header_bin, header, 128)))
quit(1, "Failed to convert header to header_bin in parse_notify");
cb1 = (unsigned char *)calloc(cb1_len, 1);
if (unlikely(!cb1))
quithere(1, "Failed to calloc cb1 in parse_notify");
hex2bin(cb1, coinbase1, cb1_len);
cb2 = (unsigned char *)calloc(cb2_len, 1);
if (unlikely(!cb2))
quithere(1, "Failed to calloc cb2 in parse_notify");
hex2bin(cb2, coinbase2, cb2_len);
free(pool->coinbase);
align_len(&alloc_len);
pool->coinbase = (unsigned char *)calloc(alloc_len, 1);
if (unlikely(!pool->coinbase))
quit(1, "Failed to calloc pool coinbase in parse_notify");
memcpy(pool->coinbase, cb1, cb1_len);
memcpy(pool->coinbase + cb1_len, pool->nonce1bin, pool->n1_len);
memcpy(pool->coinbase + cb1_len + pool->n1_len + pool->n2size, cb2, cb2_len);
cg_wunlock(&pool->data_lock);
if (opt_protocol) {
applog(LOG_DEBUG, "job_id: %s", job_id);
applog(LOG_DEBUG, "prev_hash: %s", prev_hash);
applog(LOG_DEBUG, "coinbase1: %s", coinbase1);
applog(LOG_DEBUG, "coinbase2: %s", coinbase2);
applog(LOG_DEBUG, "bbversion: %s", bbversion);
applog(LOG_DEBUG, "nbit: %s", nbit);
applog(LOG_DEBUG, "ntime: %s", ntime);
applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no");
}
free(coinbase1);
free(coinbase2);
free(cb1);
free(cb2);
/* A notify message is the closest stratum gets to a getwork */
pool->getwork_requested++;
total_getworks++;
ret = true;
if (pool == current_pool())
opt_work_update = true;
out:
return ret;
}
|
CWE-20
| 179,475 | 1,235 |
317403549402048861780792420180268215135
| null | null | null |
cgminer
|
e1c5050734123973b99d181c45e74b2cbb00272e
| 1 |
bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port)
{
char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
char url_address[256], port[6];
int url_len, port_len = 0;
*sockaddr_url = url;
url_begin = strstr(url, "//");
if (!url_begin)
url_begin = url;
else
url_begin += 2;
/* Look for numeric ipv6 entries */
ipv6_begin = strstr(url_begin, "[");
ipv6_end = strstr(url_begin, "]");
if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
url_end = strstr(ipv6_end, ":");
else
url_end = strstr(url_begin, ":");
if (url_end) {
url_len = url_end - url_begin;
port_len = strlen(url_begin) - url_len - 1;
if (port_len < 1)
return false;
port_start = url_end + 1;
} else
url_len = strlen(url_begin);
if (url_len < 1)
return false;
sprintf(url_address, "%.*s", url_len, url_begin);
if (port_len) {
char *slash;
snprintf(port, 6, "%.*s", port_len, port_start);
slash = strchr(port, '/');
if (slash)
*slash = '\0';
} else
strcpy(port, "80");
*sockaddr_port = strdup(port);
*sockaddr_url = strdup(url_address);
return true;
}
|
CWE-119
| 179,476 | 1,236 |
268425937881460227077617707628391335121
| null | null | null |
cgminer
|
e1c5050734123973b99d181c45e74b2cbb00272e
| 1 |
static bool parse_notify(struct pool *pool, json_t *val)
{
char *job_id, *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit,
*ntime, header[228];
unsigned char *cb1 = NULL, *cb2 = NULL;
size_t cb1_len, cb2_len, alloc_len;
bool clean, ret = false;
int merkles, i;
json_t *arr;
arr = json_array_get(val, 4);
if (!arr || !json_is_array(arr))
goto out;
merkles = json_array_size(arr);
job_id = json_array_string(val, 0);
prev_hash = __json_array_string(val, 1);
coinbase1 = json_array_string(val, 2);
coinbase2 = json_array_string(val, 3);
bbversion = __json_array_string(val, 5);
nbit = __json_array_string(val, 6);
ntime = __json_array_string(val, 7);
clean = json_is_true(json_array_get(val, 8));
if (!job_id || !prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime) {
/* Annoying but we must not leak memory */
if (job_id)
free(job_id);
if (coinbase1)
free(coinbase1);
if (coinbase2)
free(coinbase2);
goto out;
}
cg_wlock(&pool->data_lock);
free(pool->swork.job_id);
pool->swork.job_id = job_id;
snprintf(pool->prev_hash, 65, "%s", prev_hash);
cb1_len = strlen(coinbase1) / 2;
cb2_len = strlen(coinbase2) / 2;
snprintf(pool->bbversion, 9, "%s", bbversion);
snprintf(pool->nbit, 9, "%s", nbit);
snprintf(pool->ntime, 9, "%s", ntime);
pool->swork.clean = clean;
alloc_len = pool->coinbase_len = cb1_len + pool->n1_len + pool->n2size + cb2_len;
pool->nonce2_offset = cb1_len + pool->n1_len;
for (i = 0; i < pool->merkles; i++)
free(pool->swork.merkle_bin[i]);
if (merkles) {
pool->swork.merkle_bin = realloc(pool->swork.merkle_bin,
sizeof(char *) * merkles + 1);
for (i = 0; i < merkles; i++) {
char *merkle = json_array_string(arr, i);
pool->swork.merkle_bin[i] = malloc(32);
if (unlikely(!pool->swork.merkle_bin[i]))
quit(1, "Failed to malloc pool swork merkle_bin");
if (opt_protocol)
applog(LOG_DEBUG, "merkle %d: %s", i, merkle);
ret = hex2bin(pool->swork.merkle_bin[i], merkle, 32);
free(merkle);
if (unlikely(!ret)) {
applog(LOG_ERR, "Failed to convert merkle to merkle_bin in parse_notify");
goto out_unlock;
}
}
}
pool->merkles = merkles;
if (clean)
pool->nonce2 = 0;
#if 0
header_len = strlen(pool->bbversion) +
strlen(pool->prev_hash);
/* merkle_hash */ 32 +
strlen(pool->ntime) +
strlen(pool->nbit) +
/* nonce */ 8 +
/* workpadding */ 96;
#endif
snprintf(header, 225,
"%s%s%s%s%s%s%s",
pool->bbversion,
pool->prev_hash,
blank_merkle,
pool->ntime,
pool->nbit,
"00000000", /* nonce */
workpadding);
ret = hex2bin(pool->header_bin, header, 112);
if (unlikely(!ret)) {
applog(LOG_ERR, "Failed to convert header to header_bin in parse_notify");
goto out_unlock;
}
cb1 = alloca(cb1_len);
ret = hex2bin(cb1, coinbase1, cb1_len);
if (unlikely(!ret)) {
applog(LOG_ERR, "Failed to convert cb1 to cb1_bin in parse_notify");
goto out_unlock;
}
cb2 = alloca(cb2_len);
ret = hex2bin(cb2, coinbase2, cb2_len);
if (unlikely(!ret)) {
applog(LOG_ERR, "Failed to convert cb2 to cb2_bin in parse_notify");
goto out_unlock;
}
free(pool->coinbase);
align_len(&alloc_len);
pool->coinbase = calloc(alloc_len, 1);
if (unlikely(!pool->coinbase))
quit(1, "Failed to calloc pool coinbase in parse_notify");
memcpy(pool->coinbase, cb1, cb1_len);
memcpy(pool->coinbase + cb1_len, pool->nonce1bin, pool->n1_len);
memcpy(pool->coinbase + cb1_len + pool->n1_len + pool->n2size, cb2, cb2_len);
if (opt_debug) {
char *cb = bin2hex(pool->coinbase, pool->coinbase_len);
applog(LOG_DEBUG, "Pool %d coinbase %s", pool->pool_no, cb);
free(cb);
}
out_unlock:
cg_wunlock(&pool->data_lock);
if (opt_protocol) {
applog(LOG_DEBUG, "job_id: %s", job_id);
applog(LOG_DEBUG, "prev_hash: %s", prev_hash);
applog(LOG_DEBUG, "coinbase1: %s", coinbase1);
applog(LOG_DEBUG, "coinbase2: %s", coinbase2);
applog(LOG_DEBUG, "bbversion: %s", bbversion);
applog(LOG_DEBUG, "nbit: %s", nbit);
applog(LOG_DEBUG, "ntime: %s", ntime);
applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no");
}
free(coinbase1);
free(coinbase2);
/* A notify message is the closest stratum gets to a getwork */
pool->getwork_requested++;
total_getworks++;
if (pool == current_pool())
opt_work_update = true;
out:
return ret;
}
|
CWE-119
| 179,478 | 1,237 |
84334959585750243556126605758413185403
| null | null | null |
cgminer
|
e1c5050734123973b99d181c45e74b2cbb00272e
| 1 |
static bool parse_reconnect(struct pool *pool, json_t *val)
{
char *sockaddr_url, *stratum_port, *tmp;
char *url, *port, address[256];
memset(address, 0, 255);
url = (char *)json_string_value(json_array_get(val, 0));
if (!url)
url = pool->sockaddr_url;
else {
char *dot_pool, *dot_reconnect;
dot_pool = strchr(pool->sockaddr_url, '.');
if (!dot_pool) {
applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'",
pool->sockaddr_url);
return false;
}
dot_reconnect = strchr(url, '.');
if (!dot_reconnect) {
applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'",
url);
return false;
}
if (strcmp(dot_pool, dot_reconnect)) {
applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'",
pool->sockaddr_url);
return false;
}
}
port = (char *)json_string_value(json_array_get(val, 1));
if (!port)
port = pool->stratum_port;
sprintf(address, "%s:%s", url, port);
if (!extract_sockaddr(address, &sockaddr_url, &stratum_port))
return false;
applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address);
clear_pool_work(pool);
mutex_lock(&pool->stratum_lock);
__suspend_stratum(pool);
tmp = pool->sockaddr_url;
pool->sockaddr_url = sockaddr_url;
pool->stratum_url = pool->sockaddr_url;
free(tmp);
tmp = pool->stratum_port;
pool->stratum_port = stratum_port;
free(tmp);
mutex_unlock(&pool->stratum_lock);
if (!restart_stratum(pool)) {
pool_failed(pool);
return false;
}
return true;
}
|
CWE-119
| 179,479 | 1,238 |
137361347459447412875947908752625113506
| null | null | null |
krb5
|
a7886f0ed1277c69142b14a2c6629175a6331edc
| 1 |
acc_ctx_cont(OM_uint32 *minstat,
gss_buffer_t buf,
gss_ctx_id_t *ctx,
gss_buffer_t *responseToken,
gss_buffer_t *mechListMIC,
OM_uint32 *negState,
send_token_flag *return_token)
{
OM_uint32 ret, tmpmin;
gss_OID supportedMech;
spnego_gss_ctx_id_t sc;
unsigned int len;
unsigned char *ptr, *bufstart;
sc = (spnego_gss_ctx_id_t)*ctx;
ret = GSS_S_DEFECTIVE_TOKEN;
*negState = REJECT;
*minstat = 0;
supportedMech = GSS_C_NO_OID;
*return_token = ERROR_TOKEN_SEND;
*responseToken = *mechListMIC = GSS_C_NO_BUFFER;
ptr = bufstart = buf->value;
#define REMAIN (buf->length - (ptr - bufstart))
if (REMAIN > INT_MAX)
return GSS_S_DEFECTIVE_TOKEN;
/*
* Attempt to work with old Sun SPNEGO.
*/
if (*ptr == HEADER_ID) {
ret = g_verify_token_header(gss_mech_spnego,
&len, &ptr, 0, REMAIN);
if (ret) {
*minstat = ret;
return GSS_S_DEFECTIVE_TOKEN;
}
}
if (*ptr != (CONTEXT | 0x01)) {
return GSS_S_DEFECTIVE_TOKEN;
}
ret = get_negTokenResp(minstat, ptr, REMAIN,
negState, &supportedMech,
responseToken, mechListMIC);
if (ret != GSS_S_COMPLETE)
goto cleanup;
if (*responseToken == GSS_C_NO_BUFFER &&
*mechListMIC == GSS_C_NO_BUFFER) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto cleanup;
}
if (supportedMech != GSS_C_NO_OID) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto cleanup;
}
sc->firstpass = 0;
*negState = ACCEPT_INCOMPLETE;
*return_token = CONT_TOKEN_SEND;
cleanup:
if (supportedMech != GSS_C_NO_OID) {
generic_gss_release_oid(&tmpmin, &supportedMech);
}
return ret;
#undef REMAIN
}
|
CWE-476
| 179,482 | 1,239 |
111177546391845933490047709357450343244
| null | null | null |
krb5
|
f18ddf5d82de0ab7591a36e465bc24225776940f
| 1 |
init_ctx_reselect(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
OM_uint32 acc_negState, gss_OID supportedMech,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 tmpmin;
size_t i;
generic_gss_release_oid(&tmpmin, &sc->internal_mech);
gss_delete_sec_context(&tmpmin, &sc->ctx_handle,
GSS_C_NO_BUFFER);
/* Find supportedMech in sc->mech_set. */
for (i = 0; i < sc->mech_set->count; i++) {
if (g_OID_equal(supportedMech, &sc->mech_set->elements[i]))
break;
}
if (i == sc->mech_set->count)
return GSS_S_DEFECTIVE_TOKEN;
sc->internal_mech = &sc->mech_set->elements[i];
/*
* Windows 2003 and earlier don't correctly send a
* negState of request-mic when counter-proposing a
* mechanism. They probably don't handle mechListMICs
* properly either.
*/
if (acc_negState != REQUEST_MIC)
return GSS_S_DEFECTIVE_TOKEN;
sc->mech_complete = 0;
sc->mic_reqd = 1;
*negState = REQUEST_MIC;
*tokflag = CONT_TOKEN_SEND;
return GSS_S_CONTINUE_NEEDED;
}
|
CWE-415
| 179,483 | 1,240 |
116847216199976585060972233215007848831
| null | null | null |
php-src
|
b34d7849ed90ced9345f8ea1c59bc8d101c18468
| 1 |
static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray)
{
u_short type, class, dlen;
u_long ttl;
long n, i;
u_short s;
u_char *tp, *p;
char name[MAXHOSTNAMELEN];
int have_v6_break = 0, in_v6_break = 0;
*subarray = NULL;
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, sizeof(name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
GETSHORT(type, cp);
GETSHORT(class, cp);
GETLONG(ttl, cp);
GETSHORT(dlen, cp);
if (type_to_fetch != T_ANY && type != type_to_fetch) {
cp += dlen;
return cp;
}
if (!store) {
cp += dlen;
return cp;
}
ALLOC_INIT_ZVAL(*subarray);
array_init(*subarray);
add_assoc_string(*subarray, "host", name, 1);
add_assoc_string(*subarray, "class", "IN", 1);
add_assoc_long(*subarray, "ttl", ttl);
if (raw) {
add_assoc_long(*subarray, "type", type);
add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1);
cp += dlen;
return cp;
}
switch (type) {
case DNS_T_A:
add_assoc_string(*subarray, "type", "A", 1);
snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]);
add_assoc_string(*subarray, "ip", name, 1);
cp += dlen;
break;
case DNS_T_MX:
add_assoc_string(*subarray, "type", "MX", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
/* no break; */
case DNS_T_CNAME:
if (type == DNS_T_CNAME) {
add_assoc_string(*subarray, "type", "CNAME", 1);
}
/* no break; */
case DNS_T_NS:
if (type == DNS_T_NS) {
add_assoc_string(*subarray, "type", "NS", 1);
}
/* no break; */
case DNS_T_PTR:
if (type == DNS_T_PTR) {
add_assoc_string(*subarray, "type", "PTR", 1);
}
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_HINFO:
/* See RFC 1010 for values */
add_assoc_string(*subarray, "type", "HINFO", 1);
n = *cp & 0xFF;
cp++;
add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1);
cp += n;
n = *cp & 0xFF;
cp++;
add_assoc_stringl(*subarray, "os", (char*)cp, n, 1);
cp += n;
break;
case DNS_T_TXT:
{
int ll = 0;
zval *entries = NULL;
add_assoc_string(*subarray, "type", "TXT", 1);
tp = emalloc(dlen + 1);
MAKE_STD_ZVAL(entries);
array_init(entries);
while (ll < dlen) {
n = cp[ll];
memcpy(tp + ll , cp + ll + 1, n);
add_next_index_stringl(entries, cp + ll + 1, n, 1);
ll = ll + n + 1;
}
tp[dlen] = '\0';
cp += dlen;
add_assoc_stringl(*subarray, "txt", tp, (dlen>0)?dlen - 1:0, 0);
add_assoc_zval(*subarray, "entries", entries);
}
break;
case DNS_T_SOA:
add_assoc_string(*subarray, "type", "SOA", 1);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "mname", name, 1);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "rname", name, 1);
GETLONG(n, cp);
add_assoc_long(*subarray, "serial", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "refresh", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "retry", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "expire", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "minimum-ttl", n);
break;
case DNS_T_AAAA:
tp = (u_char*)name;
for(i=0; i < 8; i++) {
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "type", "AAAA", 1);
add_assoc_string(*subarray, "ipv6", name, 1);
break;
case DNS_T_A6:
p = cp;
add_assoc_string(*subarray, "type", "A6", 1);
n = ((int)cp[0]) & 0xFF;
cp++;
add_assoc_long(*subarray, "masklen", n);
tp = (u_char*)name;
if (n > 15) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
}
if (n % 16 > 8) {
/* Partial short */
if (cp[0] != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
sprintf((char*)tp, "%x", cp[0] & 0xFF);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
cp++;
}
for (i = (n + 8) / 16; i < 8; i++) {
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "ipv6", name, 1);
if (cp < p + dlen) {
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "chain", name, 1);
}
break;
case DNS_T_SRV:
add_assoc_string(*subarray, "type", "SRV", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "weight", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "port", n);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_NAPTR:
add_assoc_string(*subarray, "type", "NAPTR", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "order", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pref", n);
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "flags", (char*)++cp, n, 1);
cp += n;
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "services", (char*)++cp, n, 1);
cp += n;
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "regex", (char*)++cp, n, 1);
cp += n;
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "replacement", name, 1);
break;
default:
zval_ptr_dtor(subarray);
*subarray = NULL;
cp += dlen;
break;
}
return cp;
}
|
CWE-119
| 179,486 | 1,242 |
61555348514619221447360707443484627048
| null | null | null |
linux
|
4442dc8a92b8f9ad8ee9e7f8438f4c04c03a22dc
| 1 |
static int rd_build_device_space(struct rd_dev *rd_dev)
{
u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
if (rd_dev->rd_page_count <= 0) {
pr_err("Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
return -EINVAL;
}
/* Don't need backing pages for NULLIO */
if (rd_dev->rd_flags & RDF_NULLIO)
return 0;
total_sg_needed = rd_dev->rd_page_count;
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
if (!sg_table) {
pr_err("Unable to allocate memory for Ramdisk"
" scatterlist tables\n");
return -ENOMEM;
}
rd_dev->sg_table_array = sg_table;
rd_dev->sg_table_count = sg_tables;
while (total_sg_needed) {
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
GFP_KERNEL);
if (!sg) {
pr_err("Unable to allocate scatterlist array"
" for struct rd_dev\n");
return -ENOMEM;
}
sg_init_table(sg, sg_per_table);
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
sg_table[i].page_start_offset = page_offset;
sg_table[i++].page_end_offset = (page_offset + sg_per_table)
- 1;
for (j = 0; j < sg_per_table; j++) {
pg = alloc_pages(GFP_KERNEL, 0);
if (!pg) {
pr_err("Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return -ENOMEM;
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
}
page_offset += sg_per_table;
total_sg_needed -= sg_per_table;
}
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count);
return 0;
}
|
CWE-264
| 179,487 | 1,243 |
231707640209773852599856573451469871968
| null | null | null |
miniupnp
|
3a87aa2f10bd7f1408e1849bdb59c41dd63a9fe9
| 1 |
getHTTPResponse(int s, int * size)
{
char buf[2048];
int n;
int endofheaders = 0;
int chunked = 0;
int content_length = -1;
unsigned int chunksize = 0;
unsigned int bytestocopy = 0;
/* buffers : */
char * header_buf;
unsigned int header_buf_len = 2048;
unsigned int header_buf_used = 0;
char * content_buf;
unsigned int content_buf_len = 2048;
unsigned int content_buf_used = 0;
char chunksize_buf[32];
unsigned int chunksize_buf_index;
header_buf = malloc(header_buf_len);
content_buf = malloc(content_buf_len);
chunksize_buf[0] = '\0';
chunksize_buf_index = 0;
while((n = receivedata(s, buf, 2048, 5000, NULL)) > 0)
{
if(endofheaders == 0)
{
int i;
int linestart=0;
int colon=0;
int valuestart=0;
if(header_buf_used + n > header_buf_len) {
header_buf = realloc(header_buf, header_buf_used + n);
header_buf_len = header_buf_used + n;
}
memcpy(header_buf + header_buf_used, buf, n);
header_buf_used += n;
/* search for CR LF CR LF (end of headers)
* recognize also LF LF */
i = 0;
while(i < ((int)header_buf_used-1) && (endofheaders == 0)) {
if(header_buf[i] == '\r') {
i++;
if(header_buf[i] == '\n') {
i++;
if(i < (int)header_buf_used && header_buf[i] == '\r') {
i++;
if(i < (int)header_buf_used && header_buf[i] == '\n') {
endofheaders = i+1;
}
}
}
} else if(header_buf[i] == '\n') {
i++;
if(header_buf[i] == '\n') {
endofheaders = i+1;
}
}
i++;
}
if(endofheaders == 0)
continue;
/* parse header lines */
for(i = 0; i < endofheaders - 1; i++) {
if(colon <= linestart && header_buf[i]==':')
{
colon = i;
while(i < (endofheaders-1)
&& (header_buf[i+1] == ' ' || header_buf[i+1] == '\t'))
i++;
valuestart = i + 1;
}
/* detecting end of line */
else if(header_buf[i]=='\r' || header_buf[i]=='\n')
{
if(colon > linestart && valuestart > colon)
{
#ifdef DEBUG
printf("header='%.*s', value='%.*s'\n",
colon-linestart, header_buf+linestart,
i-valuestart, header_buf+valuestart);
#endif
if(0==strncasecmp(header_buf+linestart, "content-length", colon-linestart))
{
content_length = atoi(header_buf+valuestart);
#ifdef DEBUG
printf("Content-Length: %d\n", content_length);
#endif
}
else if(0==strncasecmp(header_buf+linestart, "transfer-encoding", colon-linestart)
&& 0==strncasecmp(header_buf+valuestart, "chunked", 7))
{
#ifdef DEBUG
printf("chunked transfer-encoding!\n");
#endif
chunked = 1;
}
}
while(header_buf[i]=='\r' || header_buf[i] == '\n')
i++;
linestart = i;
colon = linestart;
valuestart = 0;
}
}
/* copy the remaining of the received data back to buf */
n = header_buf_used - endofheaders;
memcpy(buf, header_buf + endofheaders, n);
/* if(headers) */
}
if(endofheaders)
{
/* content */
if(chunked)
{
int i = 0;
while(i < n)
{
if(chunksize == 0)
{
/* reading chunk size */
if(chunksize_buf_index == 0) {
/* skipping any leading CR LF */
if(i<n && buf[i] == '\r') i++;
if(i<n && buf[i] == '\n') i++;
}
while(i<n && isxdigit(buf[i])
&& chunksize_buf_index < (sizeof(chunksize_buf)-1))
{
chunksize_buf[chunksize_buf_index++] = buf[i];
chunksize_buf[chunksize_buf_index] = '\0';
i++;
}
while(i<n && buf[i] != '\r' && buf[i] != '\n')
i++; /* discarding chunk-extension */
if(i<n && buf[i] == '\r') i++;
if(i<n && buf[i] == '\n') {
unsigned int j;
for(j = 0; j < chunksize_buf_index; j++) {
if(chunksize_buf[j] >= '0'
&& chunksize_buf[j] <= '9')
chunksize = (chunksize << 4) + (chunksize_buf[j] - '0');
else
chunksize = (chunksize << 4) + ((chunksize_buf[j] | 32) - 'a' + 10);
}
chunksize_buf[0] = '\0';
chunksize_buf_index = 0;
i++;
} else {
/* not finished to get chunksize */
continue;
}
#ifdef DEBUG
printf("chunksize = %u (%x)\n", chunksize, chunksize);
#endif
if(chunksize == 0)
{
#ifdef DEBUG
printf("end of HTTP content - %d %d\n", i, n);
/*printf("'%.*s'\n", n-i, buf+i);*/
#endif
goto end_of_stream;
}
}
bytestocopy = ((int)chunksize < (n - i))?chunksize:(unsigned int)(n - i);
if((content_buf_used + bytestocopy) > content_buf_len)
{
if(content_length >= (int)(content_buf_used + bytestocopy)) {
content_buf_len = content_length;
} else {
content_buf_len = content_buf_used + bytestocopy;
}
content_buf = (char *)realloc((void *)content_buf,
content_buf_len);
}
memcpy(content_buf + content_buf_used, buf + i, bytestocopy);
content_buf_used += bytestocopy;
i += bytestocopy;
chunksize -= bytestocopy;
}
}
else
{
/* not chunked */
if(content_length > 0
&& (int)(content_buf_used + n) > content_length) {
/* skipping additional bytes */
n = content_length - content_buf_used;
}
if(content_buf_used + n > content_buf_len)
{
if(content_length >= (int)(content_buf_used + n)) {
content_buf_len = content_length;
} else {
content_buf_len = content_buf_used + n;
}
content_buf = (char *)realloc((void *)content_buf,
content_buf_len);
}
memcpy(content_buf + content_buf_used, buf, n);
content_buf_used += n;
}
}
/* use the Content-Length header value if available */
if(content_length > 0 && (int)content_buf_used >= content_length)
{
#ifdef DEBUG
printf("End of HTTP content\n");
#endif
break;
}
}
end_of_stream:
free(header_buf); header_buf = NULL;
*size = content_buf_used;
if(content_buf_used == 0)
{
free(content_buf);
content_buf = NULL;
}
return content_buf;
}
|
CWE-119
| 179,496 | 1,251 |
306297354977511070065482433729517457700
| null | null | null |
file
|
39c7ac1106be844a5296d3eb5971946cc09ffda0
| 1 |
donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size,
int clazz, int swap, size_t align, int *flags)
{
Elf32_Nhdr nh32;
Elf64_Nhdr nh64;
size_t noff, doff;
#ifdef ELFCORE
int os_style = -1;
#endif
uint32_t namesz, descsz;
unsigned char *nbuf = CAST(unsigned char *, vbuf);
(void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof);
offset += xnh_sizeof;
namesz = xnh_namesz;
descsz = xnh_descsz;
if ((namesz == 0) && (descsz == 0)) {
/*
* We're out of note headers.
*/
return (offset >= size) ? offset : size;
}
if (namesz & 0x80000000) {
(void)file_printf(ms, ", bad note name size 0x%lx",
(unsigned long)namesz);
return offset;
}
if (descsz & 0x80000000) {
(void)file_printf(ms, ", bad note description size 0x%lx",
(unsigned long)descsz);
return offset;
}
noff = offset;
doff = ELF_ALIGN(offset + namesz);
if (offset + namesz > size) {
/*
* We're past the end of the buffer.
*/
return doff;
}
offset = ELF_ALIGN(doff + descsz);
if (doff + descsz > size) {
/*
* We're past the end of the buffer.
*/
return (offset >= size) ? offset : size;
}
if ((*flags & (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) ==
(FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID))
goto core;
if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 &&
xnh_type == NT_GNU_VERSION && descsz == 2) {
file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]);
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 &&
xnh_type == NT_GNU_VERSION && descsz == 16) {
uint32_t desc[4];
(void)memcpy(desc, &nbuf[doff], sizeof(desc));
if (file_printf(ms, ", for GNU/") == -1)
return size;
switch (elf_getu32(swap, desc[0])) {
case GNU_OS_LINUX:
if (file_printf(ms, "Linux") == -1)
return size;
break;
case GNU_OS_HURD:
if (file_printf(ms, "Hurd") == -1)
return size;
break;
case GNU_OS_SOLARIS:
if (file_printf(ms, "Solaris") == -1)
return size;
break;
case GNU_OS_KFREEBSD:
if (file_printf(ms, "kFreeBSD") == -1)
return size;
break;
case GNU_OS_KNETBSD:
if (file_printf(ms, "kNetBSD") == -1)
return size;
break;
default:
if (file_printf(ms, "<unknown>") == -1)
return size;
}
if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]),
elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1)
return size;
*flags |= FLAGS_DID_NOTE;
return size;
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 &&
xnh_type == NT_GNU_BUILD_ID && (descsz == 16 || descsz == 20)) {
uint8_t desc[20];
uint32_t i;
if (file_printf(ms, ", BuildID[%s]=", descsz == 16 ? "md5/uuid" :
"sha1") == -1)
return size;
(void)memcpy(desc, &nbuf[doff], descsz);
for (i = 0; i < descsz; i++)
if (file_printf(ms, "%02x", desc[i]) == -1)
return size;
*flags |= FLAGS_DID_BUILD_ID;
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 &&
xnh_type == NT_NETBSD_PAX && descsz == 4) {
static const char *pax[] = {
"+mprotect",
"-mprotect",
"+segvguard",
"-segvguard",
"+ASLR",
"-ASLR",
};
uint32_t desc;
size_t i;
int did = 0;
(void)memcpy(&desc, &nbuf[doff], sizeof(desc));
desc = elf_getu32(swap, desc);
if (desc && file_printf(ms, ", PaX: ") == -1)
return size;
for (i = 0; i < __arraycount(pax); i++) {
if (((1 << i) & desc) == 0)
continue;
if (file_printf(ms, "%s%s", did++ ? "," : "",
pax[i]) == -1)
return size;
}
}
if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) {
switch (xnh_type) {
case NT_NETBSD_VERSION:
if (descsz == 4) {
do_note_netbsd_version(ms, swap, &nbuf[doff]);
*flags |= FLAGS_DID_NOTE;
return size;
}
break;
case NT_NETBSD_MARCH:
if (file_printf(ms, ", compiled for: %.*s", (int)descsz,
(const char *)&nbuf[doff]) == -1)
return size;
break;
case NT_NETBSD_CMODEL:
if (file_printf(ms, ", compiler model: %.*s",
(int)descsz, (const char *)&nbuf[doff]) == -1)
return size;
break;
default:
if (file_printf(ms, ", note=%u", xnh_type) == -1)
return size;
break;
}
return size;
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) {
if (xnh_type == NT_FREEBSD_VERSION && descsz == 4) {
do_note_freebsd_version(ms, swap, &nbuf[doff]);
*flags |= FLAGS_DID_NOTE;
return size;
}
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 &&
xnh_type == NT_OPENBSD_VERSION && descsz == 4) {
if (file_printf(ms, ", for OpenBSD") == -1)
return size;
/* Content of note is always 0 */
*flags |= FLAGS_DID_NOTE;
return size;
}
if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 &&
xnh_type == NT_DRAGONFLY_VERSION && descsz == 4) {
uint32_t desc;
if (file_printf(ms, ", for DragonFly") == -1)
return size;
(void)memcpy(&desc, &nbuf[doff], sizeof(desc));
desc = elf_getu32(swap, desc);
if (file_printf(ms, " %d.%d.%d", desc / 100000,
desc / 10000 % 10, desc % 10000) == -1)
return size;
*flags |= FLAGS_DID_NOTE;
return size;
}
core:
/*
* Sigh. The 2.0.36 kernel in Debian 2.1, at
* least, doesn't correctly implement name
* sections, in core dumps, as specified by
* the "Program Linking" section of "UNIX(R) System
* V Release 4 Programmer's Guide: ANSI C and
* Programming Support Tools", because my copy
* clearly says "The first 'namesz' bytes in 'name'
* contain a *null-terminated* [emphasis mine]
* character representation of the entry's owner
* or originator", but the 2.0.36 kernel code
* doesn't include the terminating null in the
* name....
*/
if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) ||
(namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) {
os_style = OS_STYLE_SVR4;
}
if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) {
os_style = OS_STYLE_FREEBSD;
}
if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11)
== 0)) {
os_style = OS_STYLE_NETBSD;
}
#ifdef ELFCORE
if ((*flags & FLAGS_DID_CORE) != 0)
return size;
if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) {
if (file_printf(ms, ", %s-style", os_style_names[os_style])
== -1)
return size;
*flags |= FLAGS_DID_CORE_STYLE;
}
switch (os_style) {
case OS_STYLE_NETBSD:
if (xnh_type == NT_NETBSD_CORE_PROCINFO) {
uint32_t signo;
/*
* Extract the program name. It is at
* offset 0x7c, and is up to 32-bytes,
* including the terminating NUL.
*/
if (file_printf(ms, ", from '%.31s'",
&nbuf[doff + 0x7c]) == -1)
return size;
/*
* Extract the signal number. It is at
* offset 0x08.
*/
(void)memcpy(&signo, &nbuf[doff + 0x08],
sizeof(signo));
if (file_printf(ms, " (signal %u)",
elf_getu32(swap, signo)) == -1)
return size;
*flags |= FLAGS_DID_CORE;
return size;
}
break;
default:
if (xnh_type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) {
size_t i, j;
unsigned char c;
/*
* Extract the program name. We assume
* it to be 16 characters (that's what it
* is in SunOS 5.x and Linux).
*
* Unfortunately, it's at a different offset
* in various OSes, so try multiple offsets.
* If the characters aren't all printable,
* reject it.
*/
for (i = 0; i < NOFFSETS; i++) {
unsigned char *cname, *cp;
size_t reloffset = prpsoffsets(i);
size_t noffset = doff + reloffset;
size_t k;
for (j = 0; j < 16; j++, noffset++,
reloffset++) {
/*
* Make sure we're not past
* the end of the buffer; if
* we are, just give up.
*/
if (noffset >= size)
goto tryanother;
/*
* Make sure we're not past
* the end of the contents;
* if we are, this obviously
* isn't the right offset.
*/
if (reloffset >= descsz)
goto tryanother;
c = nbuf[noffset];
if (c == '\0') {
/*
* A '\0' at the
* beginning is
* obviously wrong.
* Any other '\0'
* means we're done.
*/
if (j == 0)
goto tryanother;
else
break;
} else {
/*
* A nonprintable
* character is also
* wrong.
*/
if (!isprint(c) || isquote(c))
goto tryanother;
}
}
/*
* Well, that worked.
*/
/*
* Try next offsets, in case this match is
* in the middle of a string.
*/
for (k = i + 1 ; k < NOFFSETS ; k++) {
size_t no;
int adjust = 1;
if (prpsoffsets(k) >= prpsoffsets(i))
continue;
for (no = doff + prpsoffsets(k);
no < doff + prpsoffsets(i); no++)
adjust = adjust
&& isprint(nbuf[no]);
if (adjust)
i = k;
}
cname = (unsigned char *)
&nbuf[doff + prpsoffsets(i)];
for (cp = cname; *cp && isprint(*cp); cp++)
continue;
/*
* Linux apparently appends a space at the end
* of the command line: remove it.
*/
while (cp > cname && isspace(cp[-1]))
cp--;
if (file_printf(ms, ", from '%.*s'",
(int)(cp - cname), cname) == -1)
return size;
*flags |= FLAGS_DID_CORE;
return size;
tryanother:
;
}
}
break;
}
#endif
return offset;
}
|
CWE-20
| 179,499 | 1,254 |
134716201177067834981482985636296733745
| null | null | null |
linux
|
26b87c7881006311828bb0ab271a551a62dcceb4
| 1 |
struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
{
struct sctp_chunk *chunk;
sctp_chunkhdr_t *ch = NULL;
/* The assumption is that we are safe to process the chunks
* at this time.
*/
if ((chunk = queue->in_progress)) {
/* There is a packet that we have been working on.
* Any post processing work to do before we move on?
*/
if (chunk->singleton ||
chunk->end_of_packet ||
chunk->pdiscard) {
sctp_chunk_free(chunk);
chunk = queue->in_progress = NULL;
} else {
/* Nothing to do. Next chunk in the packet, please. */
ch = (sctp_chunkhdr_t *) chunk->chunk_end;
/* Force chunk->skb->data to chunk->chunk_end. */
skb_pull(chunk->skb,
chunk->chunk_end - chunk->skb->data);
/* Verify that we have at least chunk headers
* worth of buffer left.
*/
if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
sctp_chunk_free(chunk);
chunk = queue->in_progress = NULL;
}
}
}
/* Do we need to take the next packet out of the queue to process? */
if (!chunk) {
struct list_head *entry;
/* Is the queue empty? */
if (list_empty(&queue->in_chunk_list))
return NULL;
entry = queue->in_chunk_list.next;
chunk = queue->in_progress =
list_entry(entry, struct sctp_chunk, list);
list_del_init(entry);
/* This is the first chunk in the packet. */
chunk->singleton = 1;
ch = (sctp_chunkhdr_t *) chunk->skb->data;
chunk->data_accepted = 0;
}
chunk->chunk_hdr = ch;
chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
/* In the unlikely case of an IP reassembly, the skb could be
* non-linear. If so, update chunk_end so that it doesn't go past
* the skb->tail.
*/
if (unlikely(skb_is_nonlinear(chunk->skb))) {
if (chunk->chunk_end > skb_tail_pointer(chunk->skb))
chunk->chunk_end = skb_tail_pointer(chunk->skb);
}
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
chunk->subh.v = NULL; /* Subheader is no longer valid. */
if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
/* This is not a singleton */
chunk->singleton = 0;
} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
/* RFC 2960, Section 6.10 Bundling
*
* Partial chunks MUST NOT be placed in an SCTP packet.
* If the receiver detects a partial chunk, it MUST drop
* the chunk.
*
* Since the end of the chunk is past the end of our buffer
* (which contains the whole packet, we can freely discard
* the whole packet.
*/
sctp_chunk_free(chunk);
chunk = queue->in_progress = NULL;
return NULL;
} else {
/* We are at the end of the packet, so mark the chunk
* in case we need to send a SACK.
*/
chunk->end_of_packet = 1;
}
pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
ntohs(chunk->chunk_hdr->length), chunk->skb->len);
return chunk;
}
|
CWE-399
| 179,502 | 1,257 |
146497261003956651827191971093959685854
| null | null | null |
linux
|
26b87c7881006311828bb0ab271a551a62dcceb4
| 1 |
sctp_chunk_length_valid(struct sctp_chunk *chunk,
__u16 required_length)
{
__u16 chunk_length = ntohs(chunk->chunk_hdr->length);
if (unlikely(chunk_length < required_length))
return 0;
return 1;
}
|
CWE-399
| 179,503 | 1,258 |
256102226120872395569526101840031059938
| null | null | null |
linux
|
b69040d8e39f20d5215a03502a8e8b4c6ab78395
| 1 |
struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
const struct sctp_association *asoc,
__be32 serial)
{
struct sctp_chunk *ack;
/* Walk through the list of cached ASCONF-ACKs and find the
* ack chunk whose serial number matches that of the request.
*/
list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
if (ack->subh.addip_hdr->serial == serial) {
sctp_chunk_hold(ack);
return ack;
}
}
return NULL;
}
|
CWE-399
| 179,504 | 1,259 |
285652383158838014592938696821462414726
| null | null | null |
linux
|
bfd0a56b90005f8c8a004baf407ad90045c2b11e
| 1 |
static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
{
u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 exit_reason = vmx->exit_reason;
if (vmx->nested.nested_run_pending)
return 0;
if (unlikely(vmx->fail)) {
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
vmcs_read32(VM_INSTRUCTION_ERROR));
return 1;
}
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
if (!is_exception(intr_info))
return 0;
else if (is_page_fault(intr_info))
return enable_ept;
return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
case EXIT_REASON_EXTERNAL_INTERRUPT:
return 0;
case EXIT_REASON_TRIPLE_FAULT:
return 1;
case EXIT_REASON_PENDING_INTERRUPT:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
case EXIT_REASON_NMI_WINDOW:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
case EXIT_REASON_TASK_SWITCH:
return 1;
case EXIT_REASON_CPUID:
return 1;
case EXIT_REASON_HLT:
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
case EXIT_REASON_INVD:
return 1;
case EXIT_REASON_INVLPG:
return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
case EXIT_REASON_RDPMC:
return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
case EXIT_REASON_RDTSC:
return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
/*
* VMX instructions trap unconditionally. This allows L1 to
* emulate them for its L2 guest, i.e., allows 3-level nesting!
*/
return 1;
case EXIT_REASON_CR_ACCESS:
return nested_vmx_exit_handled_cr(vcpu, vmcs12);
case EXIT_REASON_DR_ACCESS:
return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
case EXIT_REASON_IO_INSTRUCTION:
return nested_vmx_exit_handled_io(vcpu, vmcs12);
case EXIT_REASON_MSR_READ:
case EXIT_REASON_MSR_WRITE:
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
case EXIT_REASON_INVALID_STATE:
return 1;
case EXIT_REASON_MWAIT_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
case EXIT_REASON_MONITOR_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
case EXIT_REASON_PAUSE_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
nested_cpu_has2(vmcs12,
SECONDARY_EXEC_PAUSE_LOOP_EXITING);
case EXIT_REASON_MCE_DURING_VMENTRY:
return 0;
case EXIT_REASON_TPR_BELOW_THRESHOLD:
return 1;
case EXIT_REASON_APIC_ACCESS:
return nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
case EXIT_REASON_EPT_VIOLATION:
case EXIT_REASON_EPT_MISCONFIG:
return 0;
case EXIT_REASON_PREEMPTION_TIMER:
return vmcs12->pin_based_vm_exec_control &
PIN_BASED_VMX_PREEMPTION_TIMER;
case EXIT_REASON_WBINVD:
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
case EXIT_REASON_XSETBV:
return 1;
default:
return 1;
}
}
|
CWE-20
| 179,517 | 1,269 |
74454302223687578052101214034107124660
| null | null | null |
linux
|
95389b08d93d5c06ec63ab49bd732b0069b7c35e
| 1 |
int assoc_array_gc(struct assoc_array *array,
const struct assoc_array_ops *ops,
bool (*iterator)(void *object, void *iterator_data),
void *iterator_data)
{
struct assoc_array_shortcut *shortcut, *new_s;
struct assoc_array_node *node, *new_n;
struct assoc_array_edit *edit;
struct assoc_array_ptr *cursor, *ptr;
struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
unsigned long nr_leaves_on_tree;
int keylen, slot, nr_free, next_slot, i;
pr_devel("-->%s()\n", __func__);
if (!array->root)
return 0;
edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
if (!edit)
return -ENOMEM;
edit->array = array;
edit->ops = ops;
edit->ops_for_excised_subtree = ops;
edit->set[0].ptr = &array->root;
edit->excised_subtree = array->root;
new_root = new_parent = NULL;
new_ptr_pp = &new_root;
cursor = array->root;
descend:
/* If this point is a shortcut, then we need to duplicate it and
* advance the target cursor.
*/
if (assoc_array_ptr_is_shortcut(cursor)) {
shortcut = assoc_array_ptr_to_shortcut(cursor);
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s = kmalloc(sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long), GFP_KERNEL);
if (!new_s)
goto enomem;
pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
memcpy(new_s, shortcut, (sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long)));
new_s->back_pointer = new_parent;
new_s->parent_slot = shortcut->parent_slot;
*new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);
new_ptr_pp = &new_s->next_node;
cursor = shortcut->next_node;
}
/* Duplicate the node at this position */
node = assoc_array_ptr_to_node(cursor);
new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n)
goto enomem;
pr_devel("dup node %p -> %p\n", node, new_n);
new_n->back_pointer = new_parent;
new_n->parent_slot = node->parent_slot;
*new_ptr_pp = new_parent = assoc_array_node_to_ptr(new_n);
new_ptr_pp = NULL;
slot = 0;
continue_node:
/* Filter across any leaves and gc any subtrees */
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
ptr = node->slots[slot];
if (!ptr)
continue;
if (assoc_array_ptr_is_leaf(ptr)) {
if (iterator(assoc_array_ptr_to_leaf(ptr),
iterator_data))
/* The iterator will have done any reference
* counting on the object for us.
*/
new_n->slots[slot] = ptr;
continue;
}
new_ptr_pp = &new_n->slots[slot];
cursor = ptr;
goto descend;
}
pr_devel("-- compress node %p --\n", new_n);
/* Count up the number of empty slots in this node and work out the
* subtree leaf count.
*/
new_n->nr_leaves_on_branch = 0;
nr_free = 0;
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
ptr = new_n->slots[slot];
if (!ptr)
nr_free++;
else if (assoc_array_ptr_is_leaf(ptr))
new_n->nr_leaves_on_branch++;
}
pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
/* See what we can fold in */
next_slot = 0;
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
struct assoc_array_shortcut *s;
struct assoc_array_node *child;
ptr = new_n->slots[slot];
if (!ptr || assoc_array_ptr_is_leaf(ptr))
continue;
s = NULL;
if (assoc_array_ptr_is_shortcut(ptr)) {
s = assoc_array_ptr_to_shortcut(ptr);
ptr = s->next_node;
}
child = assoc_array_ptr_to_node(ptr);
new_n->nr_leaves_on_branch += child->nr_leaves_on_branch;
if (child->nr_leaves_on_branch <= nr_free + 1) {
/* Fold the child node into this one */
pr_devel("[%d] fold node %lu/%d [nx %d]\n",
slot, child->nr_leaves_on_branch, nr_free + 1,
next_slot);
/* We would already have reaped an intervening shortcut
* on the way back up the tree.
*/
BUG_ON(s);
new_n->slots[slot] = NULL;
nr_free++;
if (slot < next_slot)
next_slot = slot;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
struct assoc_array_ptr *p = child->slots[i];
if (!p)
continue;
BUG_ON(assoc_array_ptr_is_meta(p));
while (new_n->slots[next_slot])
next_slot++;
BUG_ON(next_slot >= ASSOC_ARRAY_FAN_OUT);
new_n->slots[next_slot++] = p;
nr_free--;
}
kfree(child);
} else {
pr_devel("[%d] retain node %lu/%d [nx %d]\n",
slot, child->nr_leaves_on_branch, nr_free + 1,
next_slot);
}
}
pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
nr_leaves_on_tree = new_n->nr_leaves_on_branch;
/* Excise this node if it is singly occupied by a shortcut */
if (nr_free == ASSOC_ARRAY_FAN_OUT - 1) {
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++)
if ((ptr = new_n->slots[slot]))
break;
if (assoc_array_ptr_is_meta(ptr) &&
assoc_array_ptr_is_shortcut(ptr)) {
pr_devel("excise node %p with 1 shortcut\n", new_n);
new_s = assoc_array_ptr_to_shortcut(ptr);
new_parent = new_n->back_pointer;
slot = new_n->parent_slot;
kfree(new_n);
if (!new_parent) {
new_s->back_pointer = NULL;
new_s->parent_slot = 0;
new_root = ptr;
goto gc_complete;
}
if (assoc_array_ptr_is_shortcut(new_parent)) {
/* We can discard any preceding shortcut also */
struct assoc_array_shortcut *s =
assoc_array_ptr_to_shortcut(new_parent);
pr_devel("excise preceding shortcut\n");
new_parent = new_s->back_pointer = s->back_pointer;
slot = new_s->parent_slot = s->parent_slot;
kfree(s);
if (!new_parent) {
new_s->back_pointer = NULL;
new_s->parent_slot = 0;
new_root = ptr;
goto gc_complete;
}
}
new_s->back_pointer = new_parent;
new_s->parent_slot = slot;
new_n = assoc_array_ptr_to_node(new_parent);
new_n->slots[slot] = ptr;
goto ascend_old_tree;
}
}
/* Excise any shortcuts we might encounter that point to nodes that
* only contain leaves.
*/
ptr = new_n->back_pointer;
if (!ptr)
goto gc_complete;
if (assoc_array_ptr_is_shortcut(ptr)) {
new_s = assoc_array_ptr_to_shortcut(ptr);
new_parent = new_s->back_pointer;
slot = new_s->parent_slot;
if (new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
struct assoc_array_node *n;
pr_devel("excise shortcut\n");
new_n->back_pointer = new_parent;
new_n->parent_slot = slot;
kfree(new_s);
if (!new_parent) {
new_root = assoc_array_node_to_ptr(new_n);
goto gc_complete;
}
n = assoc_array_ptr_to_node(new_parent);
n->slots[slot] = assoc_array_node_to_ptr(new_n);
}
} else {
new_parent = ptr;
}
new_n = assoc_array_ptr_to_node(new_parent);
ascend_old_tree:
ptr = node->back_pointer;
if (assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
slot = shortcut->parent_slot;
cursor = shortcut->back_pointer;
} else {
slot = node->parent_slot;
cursor = ptr;
}
BUG_ON(!ptr);
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
gc_complete:
edit->set[0].to = new_root;
assoc_array_apply_edit(edit);
array->nr_leaves_on_tree = nr_leaves_on_tree;
return 0;
enomem:
pr_devel("enomem\n");
assoc_array_destroy_subtree(new_root, edit->ops);
kfree(edit);
return -ENOMEM;
}
| 179,518 | 1,270 |
32773475763762926162159414643734330465
| null | null | null |
|
linux
|
2febc839133280d5a5e8e1179c94ea674489dae2
| 1 |
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
{
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
struct hrtimer *timer;
if (!kvm_vcpu_is_bsp(vcpu) || !pit)
return;
timer = &pit->pit_state.timer;
if (hrtimer_cancel(timer))
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
|
CWE-362
| 179,519 | 1,271 |
119883143059227327169610043313158814661
| null | null | null |
linux
|
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
| 1 |
static int handle_wrmsr(struct kvm_vcpu *vcpu)
{
struct msr_data msr;
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
msr.data = data;
msr.index = ecx;
msr.host_initiated = false;
if (vmx_set_msr(vcpu, &msr) != 0) {
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(vcpu, 0);
return 1;
}
trace_kvm_msr_write(ecx, data);
skip_emulated_instruction(vcpu);
return 1;
}
|
CWE-264
| 179,521 | 1,272 |
236270974209097227870081475907257735246
| null | null | null |
linux
|
350b8bdd689cd2ab2c67c8a86a0be86cfa0751a7
| 1 |
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
gfn_t gfn, end_gfn;
pfn_t pfn;
int r = 0;
struct iommu_domain *domain = kvm->arch.iommu_domain;
int flags;
/* check if iommu exists and in use */
if (!domain)
return 0;
gfn = slot->base_gfn;
end_gfn = gfn + slot->npages;
flags = IOMMU_READ;
if (!(slot->flags & KVM_MEM_READONLY))
flags |= IOMMU_WRITE;
if (!kvm->arch.iommu_noncoherent)
flags |= IOMMU_CACHE;
while (gfn < end_gfn) {
unsigned long page_size;
/* Check if already mapped */
if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
gfn += 1;
continue;
}
/* Get the page size we could use to map */
page_size = kvm_host_page_size(kvm, gfn);
/* Make sure the page_size does not exceed the memslot */
while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
page_size >>= 1;
/* Make sure gfn is aligned to the page size we want to map */
while ((gfn << PAGE_SHIFT) & (page_size - 1))
page_size >>= 1;
/* Make sure hva is aligned to the page size we want to map */
while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
page_size >>= 1;
/*
* Pin all pages we are about to map in memory. This is
* important because we unmap and unpin in 4kb steps later.
*/
pfn = kvm_pin_pages(slot, gfn, page_size);
if (is_error_noslot_pfn(pfn)) {
gfn += 1;
continue;
}
/* Map into IO address space */
r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
page_size, flags);
if (r) {
printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%llx\n", pfn);
goto unmap_pages;
}
gfn += page_size >> PAGE_SHIFT;
}
return 0;
unmap_pages:
kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
return r;
}
|
CWE-189
| 179,523 | 1,274 |
150168140098793637323703932111029602132
| null | null | null |
php-src
|
2fefae47716d501aec41c1102f3fd4531f070b05
| 1 |
static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray)
{
u_short type, class, dlen;
u_long ttl;
long n, i;
u_short s;
u_char *tp, *p;
char name[MAXHOSTNAMELEN];
int have_v6_break = 0, in_v6_break = 0;
*subarray = NULL;
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, sizeof(name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
GETSHORT(type, cp);
GETSHORT(class, cp);
GETLONG(ttl, cp);
GETSHORT(dlen, cp);
if (type_to_fetch != T_ANY && type != type_to_fetch) {
cp += dlen;
return cp;
}
if (!store) {
cp += dlen;
return cp;
}
ALLOC_INIT_ZVAL(*subarray);
array_init(*subarray);
add_assoc_string(*subarray, "host", name, 1);
add_assoc_string(*subarray, "class", "IN", 1);
add_assoc_long(*subarray, "ttl", ttl);
if (raw) {
add_assoc_long(*subarray, "type", type);
add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1);
cp += dlen;
return cp;
}
switch (type) {
case DNS_T_A:
add_assoc_string(*subarray, "type", "A", 1);
snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]);
add_assoc_string(*subarray, "ip", name, 1);
cp += dlen;
break;
case DNS_T_MX:
add_assoc_string(*subarray, "type", "MX", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
/* no break; */
case DNS_T_CNAME:
if (type == DNS_T_CNAME) {
add_assoc_string(*subarray, "type", "CNAME", 1);
}
/* no break; */
case DNS_T_NS:
if (type == DNS_T_NS) {
add_assoc_string(*subarray, "type", "NS", 1);
}
/* no break; */
case DNS_T_PTR:
if (type == DNS_T_PTR) {
add_assoc_string(*subarray, "type", "PTR", 1);
}
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_HINFO:
/* See RFC 1010 for values */
add_assoc_string(*subarray, "type", "HINFO", 1);
n = *cp & 0xFF;
cp++;
add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1);
cp += n;
n = *cp & 0xFF;
cp++;
add_assoc_stringl(*subarray, "os", (char*)cp, n, 1);
cp += n;
break;
case DNS_T_TXT:
{
int ll = 0;
zval *entries = NULL;
add_assoc_string(*subarray, "type", "TXT", 1);
tp = emalloc(dlen + 1);
MAKE_STD_ZVAL(entries);
array_init(entries);
while (ll < dlen) {
n = cp[ll];
if ((ll + n) >= dlen) {
n = dlen - (ll + 1);
}
memcpy(tp + ll , cp + ll + 1, n);
add_next_index_stringl(entries, cp + ll + 1, n, 1);
ll = ll + n + 1;
}
tp[dlen] = '\0';
cp += dlen;
add_assoc_stringl(*subarray, "txt", tp, (dlen>0)?dlen - 1:0, 0);
add_assoc_zval(*subarray, "entries", entries);
}
break;
case DNS_T_SOA:
add_assoc_string(*subarray, "type", "SOA", 1);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "mname", name, 1);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "rname", name, 1);
GETLONG(n, cp);
add_assoc_long(*subarray, "serial", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "refresh", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "retry", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "expire", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "minimum-ttl", n);
break;
case DNS_T_AAAA:
tp = (u_char*)name;
for(i=0; i < 8; i++) {
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "type", "AAAA", 1);
add_assoc_string(*subarray, "ipv6", name, 1);
break;
case DNS_T_A6:
p = cp;
add_assoc_string(*subarray, "type", "A6", 1);
n = ((int)cp[0]) & 0xFF;
cp++;
add_assoc_long(*subarray, "masklen", n);
tp = (u_char*)name;
if (n > 15) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
}
if (n % 16 > 8) {
/* Partial short */
if (cp[0] != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
sprintf((char*)tp, "%x", cp[0] & 0xFF);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
cp++;
}
for (i = (n + 8) / 16; i < 8; i++) {
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "ipv6", name, 1);
if (cp < p + dlen) {
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "chain", name, 1);
}
break;
case DNS_T_SRV:
add_assoc_string(*subarray, "type", "SRV", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "weight", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "port", n);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_NAPTR:
add_assoc_string(*subarray, "type", "NAPTR", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "order", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pref", n);
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "flags", (char*)++cp, n, 1);
cp += n;
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "services", (char*)++cp, n, 1);
cp += n;
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "regex", (char*)++cp, n, 1);
cp += n;
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "replacement", name, 1);
break;
default:
zval_ptr_dtor(subarray);
*subarray = NULL;
cp += dlen;
break;
}
return cp;
}
|
CWE-119
| 179,526 | 1,277 |
192550270728735179499592366180467891975
| null | null | null |
file
|
93e063ee374b6a75729df9e7201fb511e47e259d
| 1 |
cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
if (nelements == 0) {
DPRINTF(("CDF_VECTOR with nelements == 0\n"));
goto out;
}
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements && i < sh.sh_properties;
j++, i++)
{
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
}
|
CWE-20
| 179,536 | 1,285 |
39554699879590961013357878814695740567
| null | null | null |
file
|
40bade80cbe2af1d0b2cd0420cebd5d5905a2382
| 1 |
cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size)
{
size_t i, j;
cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size);
DPRINTF(("Chain:"));
for (j = i = 0; sid >= 0; i++, j++) {
DPRINTF((" %d", sid));
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Counting chain loop limit"));
errno = EFTYPE;
return (size_t)-1;
}
if (sid > maxsector) {
DPRINTF(("Sector %d > %d\n", sid, maxsector));
errno = EFTYPE;
return (size_t)-1;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (i == 0) {
DPRINTF((" none, sid: %d\n", sid));
return (size_t)-1;
}
DPRINTF(("\n"));
return i;
}
|
CWE-20
| 179,537 | 1,286 |
128863893313877960752074141895244297013
| null | null | null |
file
|
36fadd29849b8087af9f4586f89dbf74ea45be67
| 1 |
cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h,
const void *p, size_t tail, int line)
{
const char *b = (const char *)sst->sst_tab;
const char *e = ((const char *)p) + tail;
(void)&line;
if (e >= b && (size_t)(e - b) <= CDF_SEC_SIZE(h) * sst->sst_len)
return 0;
DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u"
" > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %"
SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b),
CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len));
errno = EFTYPE;
return -1;
}
|
CWE-189
| 179,538 | 1,287 |
126331237157471830789816841088958865442
| null | null | null |
linux
|
844817e47eef14141cf59b8d5ac08dd11c0a9189
| 1 |
static int picolcd_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *raw_data, int size)
{
struct picolcd_data *data = hid_get_drvdata(hdev);
unsigned long flags;
int ret = 0;
if (!data)
return 1;
if (report->id == REPORT_KEY_STATE) {
if (data->input_keys)
ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
} else if (report->id == REPORT_IR_DATA) {
ret = picolcd_raw_cir(data, report, raw_data+1, size-1);
} else {
spin_lock_irqsave(&data->lock, flags);
/*
* We let the caller of picolcd_send_and_wait() check if the
* report we got is one of the expected ones or not.
*/
if (data->pending) {
memcpy(data->pending->raw_data, raw_data+1, size-1);
data->pending->raw_size = size-1;
data->pending->in_report = report;
complete(&data->pending->ready);
}
spin_unlock_irqrestore(&data->lock, flags);
}
picolcd_debug_raw_event(data, hdev, report, raw_data, size);
return 1;
}
|
CWE-119
| 179,540 | 1,288 |
336846250312050028232066323392072383213
| null | null | null |
linux
|
6817ae225cd650fb1c3295d769298c38b1eba818
| 1 |
static void command_port_read_callback(struct urb *urb)
{
struct usb_serial_port *command_port = urb->context;
struct whiteheat_command_private *command_info;
int status = urb->status;
unsigned char *data = urb->transfer_buffer;
int result;
command_info = usb_get_serial_port_data(command_port);
if (!command_info) {
dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
return;
}
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
if (status != -ENOENT)
command_info->command_finished = WHITEHEAT_CMD_FAILURE;
wake_up(&command_info->wait_command);
return;
}
usb_serial_debug_data(&command_port->dev, __func__, urb->actual_length, data);
if (data[0] == WHITEHEAT_CMD_COMPLETE) {
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
wake_up(&command_info->wait_command);
} else if (data[0] == WHITEHEAT_CMD_FAILURE) {
command_info->command_finished = WHITEHEAT_CMD_FAILURE;
wake_up(&command_info->wait_command);
} else if (data[0] == WHITEHEAT_EVENT) {
/* These are unsolicited reports from the firmware, hence no
waiting command to wakeup */
dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
} else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
memcpy(command_info->result_buffer, &data[1],
urb->actual_length - 1);
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
wake_up(&command_info->wait_command);
} else
dev_dbg(&urb->dev->dev, "%s - bad reply from firmware\n", __func__);
/* Continue trying to always read */
result = usb_submit_urb(command_port->read_urb, GFP_ATOMIC);
if (result)
dev_dbg(&urb->dev->dev, "%s - failed resubmitting read urb, error %d\n",
__func__, result);
}
|
CWE-119
| 179,541 | 1,289 |
11995144287059041052849383480810782444
| null | null | null |
linux
|
4ab25786c87eb20857bbb715c3ae34ec8fd6a214
| 1 |
static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
rdesc[11] = rdesc[16] = 0xff;
rdesc[12] = rdesc[17] = 0x03;
}
return rdesc;
}
|
CWE-119
| 179,542 | 1,290 |
45538586871323178624538960674889390064
| null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.