project
stringclasses 633
values | commit_id
stringlengths 7
81
| target
int64 0
1
| func
stringlengths 5
484k
| cwe
stringclasses 131
values | big_vul_idx
float64 0
189k
⌀ | idx
int64 0
522k
| hash
stringlengths 34
39
| size
float64 1
24k
⌀ | message
stringlengths 0
11.5k
⌀ | dataset
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
linux
|
70945643722ffeac779d2529a348f99567fa5c33
| 1 |
cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
char *mount_data_global, const char *devname)
{
int rc;
int xid;
struct smb_vol *volume_info;
struct cifsSesInfo *pSesInfo;
struct cifsTconInfo *tcon;
struct TCP_Server_Info *srvTcp;
char *full_path;
char *mount_data = mount_data_global;
struct tcon_link *tlink;
#ifdef CONFIG_CIFS_DFS_UPCALL
struct dfs_info3_param *referrals = NULL;
unsigned int num_referrals = 0;
int referral_walks_count = 0;
try_mount_again:
#endif
rc = 0;
tcon = NULL;
pSesInfo = NULL;
srvTcp = NULL;
full_path = NULL;
tlink = NULL;
xid = GetXid();
volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL);
if (!volume_info) {
rc = -ENOMEM;
goto out;
}
if (cifs_parse_mount_options(mount_data, devname, volume_info)) {
rc = -EINVAL;
goto out;
}
if (volume_info->nullauth) {
cFYI(1, "null user");
volume_info->username = "";
} else if (volume_info->username) {
/* BB fixme parse for domain name here */
cFYI(1, "Username: %s", volume_info->username);
} else {
cifserror("No username specified");
/* In userspace mount helper we can get user name from alternate
locations such as env variables and files on disk */
rc = -EINVAL;
goto out;
}
/* this is needed for ASCII cp to Unicode converts */
if (volume_info->iocharset == NULL) {
/* load_nls_default cannot return null */
volume_info->local_nls = load_nls_default();
} else {
volume_info->local_nls = load_nls(volume_info->iocharset);
if (volume_info->local_nls == NULL) {
cERROR(1, "CIFS mount error: iocharset %s not found",
volume_info->iocharset);
rc = -ELIBACC;
goto out;
}
}
cifs_sb->local_nls = volume_info->local_nls;
/* get a reference to a tcp session */
srvTcp = cifs_get_tcp_session(volume_info);
if (IS_ERR(srvTcp)) {
rc = PTR_ERR(srvTcp);
goto out;
}
/* get a reference to a SMB session */
pSesInfo = cifs_get_smb_ses(srvTcp, volume_info);
if (IS_ERR(pSesInfo)) {
rc = PTR_ERR(pSesInfo);
pSesInfo = NULL;
goto mount_fail_check;
}
setup_cifs_sb(volume_info, cifs_sb);
if (pSesInfo->capabilities & CAP_LARGE_FILES)
sb->s_maxbytes = MAX_LFS_FILESIZE;
else
sb->s_maxbytes = MAX_NON_LFS;
/* BB FIXME fix time_gran to be larger for LANMAN sessions */
sb->s_time_gran = 100;
/* search for existing tcon to this server share */
tcon = cifs_get_tcon(pSesInfo, volume_info);
if (IS_ERR(tcon)) {
rc = PTR_ERR(tcon);
tcon = NULL;
goto remote_path_check;
}
/* do not care if following two calls succeed - informational */
if (!tcon->ipc) {
CIFSSMBQFSDeviceInfo(xid, tcon);
CIFSSMBQFSAttributeInfo(xid, tcon);
}
/* tell server which Unix caps we support */
if (tcon->ses->capabilities & CAP_UNIX)
/* reset of caps checks mount to see if unix extensions
disabled for just this mount */
reset_cifs_unix_caps(xid, tcon, sb, volume_info);
else
tcon->unix_ext = 0; /* server does not support them */
/* convert forward to back slashes in prepath here if needed */
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0)
convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
cifs_sb->rsize = 1024 * 127;
cFYI(DBG2, "no very large read support, rsize now 127K");
}
if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X))
cifs_sb->wsize = min(cifs_sb->wsize,
(tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE));
if (!(tcon->ses->capabilities & CAP_LARGE_READ_X))
cifs_sb->rsize = min(cifs_sb->rsize,
(tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE));
remote_path_check:
/* check if a whole path (including prepath) is not remote */
if (!rc && cifs_sb->prepathlen && tcon) {
/* build_path_to_root works only when we have a valid tcon */
full_path = cifs_build_path_to_root(cifs_sb, tcon);
if (full_path == NULL) {
rc = -ENOMEM;
goto mount_fail_check;
}
rc = is_path_accessible(xid, tcon, cifs_sb, full_path);
if (rc != 0 && rc != -EREMOTE) {
kfree(full_path);
goto mount_fail_check;
}
kfree(full_path);
}
/* get referral if needed */
if (rc == -EREMOTE) {
#ifdef CONFIG_CIFS_DFS_UPCALL
if (referral_walks_count > MAX_NESTED_LINKS) {
/*
* BB: when we implement proper loop detection,
* we will remove this check. But now we need it
* to prevent an indefinite loop if 'DFS tree' is
* misconfigured (i.e. has loops).
*/
rc = -ELOOP;
goto mount_fail_check;
}
/* convert forward to back slashes in prepath here if needed */
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0)
convert_delimiter(cifs_sb->prepath,
CIFS_DIR_SEP(cifs_sb));
full_path = build_unc_path_to_root(volume_info, cifs_sb);
if (IS_ERR(full_path)) {
rc = PTR_ERR(full_path);
goto mount_fail_check;
}
cFYI(1, "Getting referral for: %s", full_path);
rc = get_dfs_path(xid, pSesInfo , full_path + 1,
cifs_sb->local_nls, &num_referrals, &referrals,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (!rc && num_referrals > 0) {
char *fake_devname = NULL;
if (mount_data != mount_data_global)
kfree(mount_data);
mount_data = cifs_compose_mount_options(
cifs_sb->mountdata, full_path + 1,
referrals, &fake_devname);
free_dfs_info_array(referrals, num_referrals);
kfree(fake_devname);
kfree(full_path);
if (IS_ERR(mount_data)) {
rc = PTR_ERR(mount_data);
mount_data = NULL;
goto mount_fail_check;
}
if (tcon)
cifs_put_tcon(tcon);
else if (pSesInfo)
cifs_put_smb_ses(pSesInfo);
cleanup_volume_info(&volume_info);
referral_walks_count++;
FreeXid(xid);
goto try_mount_again;
}
#else /* No DFS support, return error on mount */
rc = -EOPNOTSUPP;
#endif
}
if (rc)
goto mount_fail_check;
/* now, hang the tcon off of the superblock */
tlink = kzalloc(sizeof *tlink, GFP_KERNEL);
if (tlink == NULL) {
rc = -ENOMEM;
goto mount_fail_check;
}
tlink->tl_uid = pSesInfo->linux_uid;
tlink->tl_tcon = tcon;
tlink->tl_time = jiffies;
set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
cifs_sb->master_tlink = tlink;
spin_lock(&cifs_sb->tlink_tree_lock);
tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE);
mount_fail_check:
/* on error free sesinfo and tcon struct if needed */
if (rc) {
if (mount_data != mount_data_global)
kfree(mount_data);
/* If find_unc succeeded then rc == 0 so we can not end */
/* up accidentally freeing someone elses tcon struct */
if (tcon)
cifs_put_tcon(tcon);
else if (pSesInfo)
cifs_put_smb_ses(pSesInfo);
else
cifs_put_tcp_session(srvTcp);
goto out;
}
/* volume_info->password is freed above when existing session found
(in which case it is not needed anymore) but when new sesion is created
the password ptr is put in the new session structure (in which case the
password will be freed at unmount time) */
out:
/* zero out password before freeing */
cleanup_volume_info(&volume_info);
FreeXid(xid);
return rc;
}
|
CWE-20
| 178,917 | 757 |
325540286319854190045703657104874730519
| null | null | null |
linux
|
c85ce65ecac078ab1a1835c87c4a6319cf74660a
| 1 |
static void dma_rx(struct b43_dmaring *ring, int *slot)
{
const struct b43_dma_ops *ops = ring->ops;
struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
struct b43_rxhdr_fw4 *rxhdr;
struct sk_buff *skb;
u16 len;
int err;
dma_addr_t dmaaddr;
desc = ops->idx2desc(ring, *slot, &meta);
sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
skb = meta->skb;
rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
len = le16_to_cpu(rxhdr->frame_len);
if (len == 0) {
int i = 0;
do {
udelay(2);
barrier();
len = le16_to_cpu(rxhdr->frame_len);
} while (len == 0 && i++ < 5);
if (unlikely(len == 0)) {
dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer;
}
}
if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
/* Something went wrong with the DMA.
* The device did not touch the buffer and did not overwrite the poison. */
b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer;
}
if (unlikely(len > ring->rx_buffersize)) {
/* The data did not fit into one descriptor buffer
* and is split over multiple buffers.
* This should never happen, as we try to allocate buffers
* big enough. So simply ignore this packet.
*/
int cnt = 0;
s32 tmp = len;
while (1) {
desc = ops->idx2desc(ring, *slot, &meta);
/* recycle the descriptor buffer. */
b43_poison_rx_buffer(ring, meta->skb);
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
*slot = next_slot(ring, *slot);
cnt++;
tmp -= ring->rx_buffersize;
if (tmp <= 0)
break;
}
b43err(ring->dev->wl, "DMA RX buffer too small "
"(len: %u, buffer: %u, nr-dropped: %d)\n",
len, ring->rx_buffersize, cnt);
goto drop;
}
dmaaddr = meta->dmaaddr;
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
if (unlikely(err)) {
b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
goto drop_recycle_buffer;
}
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
skb_put(skb, len + ring->frameoffset);
skb_pull(skb, ring->frameoffset);
b43_rx(ring->dev, skb, rxhdr);
drop:
return;
drop_recycle_buffer:
/* Poison and recycle the RX buffer. */
b43_poison_rx_buffer(ring, skb);
sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
}
|
CWE-119
| 178,918 | 758 |
104346933262330900978913399332634585939
| null | null | null |
linux
|
c2183d1e9b3f313dd8ba2b1b0197c8d9fb86a7ae
| 1 |
static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_entry_out outarg;
int err = -ENOMEM;
char *buf;
struct qstr name;
buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
if (!buf)
goto err;
err = -EINVAL;
if (size < sizeof(outarg))
goto err;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
goto err;
err = -ENAMETOOLONG;
if (outarg.namelen > FUSE_NAME_MAX)
goto err;
name.name = buf;
name.len = outarg.namelen;
err = fuse_copy_one(cs, buf, outarg.namelen + 1);
if (err)
goto err;
fuse_copy_finish(cs);
buf[outarg.namelen] = 0;
name.hash = full_name_hash(name.name, name.len);
down_read(&fc->killsb);
err = -ENOENT;
if (fc->sb)
err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
up_read(&fc->killsb);
kfree(buf);
return err;
err:
kfree(buf);
fuse_copy_finish(cs);
return err;
}
|
CWE-119
| 178,919 | 759 |
64929278621445786641724855732949782650
| null | null | null |
linux
|
0d0138ebe24b94065580bd2601f8bb7eb6152f56
| 1 |
int ptrace_setxregs(struct task_struct *child, void __user *uregs)
{
struct thread_info *ti = task_thread_info(child);
struct pt_regs *regs = task_pt_regs(child);
elf_xtregs_t *xtregs = uregs;
int ret = 0;
#if XTENSA_HAVE_COPROCESSORS
/* Flush all coprocessors before we overwrite them. */
coprocessor_flush_all(ti);
coprocessor_release_all(ti);
ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
sizeof(xtregs_coprocessor_t));
#endif
ret |= __copy_from_user(®s->xtregs_opt, &xtregs->opt,
sizeof(xtregs->opt));
ret |= __copy_from_user(&ti->xtregs_user, &xtregs->user,
sizeof(xtregs->user));
return ret ? -EFAULT : 0;
}
|
CWE-20
| 179,021 | 840 |
313347442751777676104527709698435827373
| null | null | null |
linux
|
2b472611a32a72f4a118c069c2d62a1a3f087afd
| 1 |
static struct rmap_item *scan_get_next_rmap_item(struct page **page)
{
struct mm_struct *mm;
struct mm_slot *slot;
struct vm_area_struct *vma;
struct rmap_item *rmap_item;
if (list_empty(&ksm_mm_head.mm_list))
return NULL;
slot = ksm_scan.mm_slot;
if (slot == &ksm_mm_head) {
/*
* A number of pages can hang around indefinitely on per-cpu
* pagevecs, raised page count preventing write_protect_page
* from merging them. Though it doesn't really matter much,
* it is puzzling to see some stuck in pages_volatile until
* other activity jostles them out, and they also prevented
* LTP's KSM test from succeeding deterministically; so drain
* them here (here rather than on entry to ksm_do_scan(),
* so we don't IPI too often when pages_to_scan is set low).
*/
lru_add_drain_all();
root_unstable_tree = RB_ROOT;
spin_lock(&ksm_mmlist_lock);
slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
ksm_scan.mm_slot = slot;
spin_unlock(&ksm_mmlist_lock);
next_mm:
ksm_scan.address = 0;
ksm_scan.rmap_list = &slot->rmap_list;
}
mm = slot->mm;
down_read(&mm->mmap_sem);
if (ksm_test_exit(mm))
vma = NULL;
else
vma = find_vma(mm, ksm_scan.address);
for (; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_MERGEABLE))
continue;
if (ksm_scan.address < vma->vm_start)
ksm_scan.address = vma->vm_start;
if (!vma->anon_vma)
ksm_scan.address = vma->vm_end;
while (ksm_scan.address < vma->vm_end) {
if (ksm_test_exit(mm))
break;
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
if (IS_ERR_OR_NULL(*page)) {
ksm_scan.address += PAGE_SIZE;
cond_resched();
continue;
}
if (PageAnon(*page) ||
page_trans_compound_anon(*page)) {
flush_anon_page(vma, *page, ksm_scan.address);
flush_dcache_page(*page);
rmap_item = get_next_rmap_item(slot,
ksm_scan.rmap_list, ksm_scan.address);
if (rmap_item) {
ksm_scan.rmap_list =
&rmap_item->rmap_list;
ksm_scan.address += PAGE_SIZE;
} else
put_page(*page);
up_read(&mm->mmap_sem);
return rmap_item;
}
put_page(*page);
ksm_scan.address += PAGE_SIZE;
cond_resched();
}
}
if (ksm_test_exit(mm)) {
ksm_scan.address = 0;
ksm_scan.rmap_list = &slot->rmap_list;
}
/*
* Nuke all the rmap_items that are above this current rmap:
* because there were no VM_MERGEABLE vmas with such addresses.
*/
remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = list_entry(slot->mm_list.next,
struct mm_slot, mm_list);
if (ksm_scan.address == 0) {
/*
* We've completed a full scan of all vmas, holding mmap_sem
* throughout, and found no VM_MERGEABLE: so do the same as
* __ksm_exit does to remove this mm from all our lists now.
* This applies either when cleaning up after __ksm_exit
* (but beware: we can reach here even before __ksm_exit),
* or when all VM_MERGEABLE areas have been unmapped (and
* mmap_sem then protects against race with MADV_MERGEABLE).
*/
hlist_del(&slot->link);
list_del(&slot->mm_list);
spin_unlock(&ksm_mmlist_lock);
free_mm_slot(slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
up_read(&mm->mmap_sem);
mmdrop(mm);
} else {
spin_unlock(&ksm_mmlist_lock);
up_read(&mm->mmap_sem);
}
/* Repeat until we've completed scanning the whole list */
slot = ksm_scan.mm_slot;
if (slot != &ksm_mm_head)
goto next_mm;
ksm_scan.seqnr++;
return NULL;
}
|
CWE-362
| 179,043 | 853 |
257503113012619682041170750752251595007
| null | null | null |
linux
|
cae13fe4cc3f24820ffb990c09110626837e85d4
| 1 |
static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
{
struct frag *f;
struct list_head *item;
int rec, num, group;
BUG_ON (!data || !frags);
if (size < 2 * VBLK_SIZE_HEAD) {
ldm_error("Value of size is to small.");
return false;
}
group = get_unaligned_be32(data + 0x08);
rec = get_unaligned_be16(data + 0x0C);
num = get_unaligned_be16(data + 0x0E);
if ((num < 1) || (num > 4)) {
ldm_error ("A VBLK claims to have %d parts.", num);
return false;
}
if (rec >= num) {
ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
return false;
}
list_for_each (item, frags) {
f = list_entry (item, struct frag, list);
if (f->group == group)
goto found;
}
f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
if (!f) {
ldm_crit ("Out of memory.");
return false;
}
f->group = group;
f->num = num;
f->rec = rec;
f->map = 0xFF << num;
list_add_tail (&f->list, frags);
found:
if (f->map & (1 << rec)) {
ldm_error ("Duplicate VBLK, part %d.", rec);
f->map &= 0x7F; /* Mark the group as broken */
return false;
}
f->map |= (1 << rec);
data += VBLK_SIZE_HEAD;
size -= VBLK_SIZE_HEAD;
memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
return true;
}
|
CWE-119
| 179,044 | 854 |
164142535356774616118975210091758939882
| null | null | null |
linux
|
64f3b9e203bd06855072e295557dca1485a2ecba
| 1 |
static void ip_expire(unsigned long arg)
{
struct ipq *qp;
struct net *net;
qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
net = container_of(qp->q.net, struct net, ipv4.frags);
spin_lock(&qp->q.lock);
if (qp->q.last_in & INET_FRAG_COMPLETE)
goto out;
ipq_kill(qp);
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
struct sk_buff *head = qp->q.fragments;
rcu_read_lock();
head->dev = dev_get_by_index_rcu(net, qp->iif);
if (!head->dev)
goto out_rcu_unlock;
/*
* Only search router table for the head fragment,
* when defraging timeout at PRE_ROUTING HOOK.
*/
if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) {
const struct iphdr *iph = ip_hdr(head);
int err = ip_route_input(head, iph->daddr, iph->saddr,
iph->tos, head->dev);
if (unlikely(err))
goto out_rcu_unlock;
/*
* Only an end host needs to send an ICMP
* "Fragment Reassembly Timeout" message, per RFC792.
*/
if (skb_rtable(head)->rt_type != RTN_LOCAL)
goto out_rcu_unlock;
}
/* Send an ICMP "Fragment Reassembly Timeout" message. */
icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
out_rcu_unlock:
rcu_read_unlock();
}
out:
spin_unlock(&qp->q.lock);
ipq_put(qp);
}
| 179,045 | 855 |
133776853164301156762765270288111357500
| null | null | null |
|
linux
|
764355487ea220fdc2faf128d577d7f679b91f97
| 1 |
static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
struct super_block *s;
struct ecryptfs_sb_info *sbi;
struct ecryptfs_dentry_info *root_info;
const char *err = "Getting sb failed";
struct inode *inode;
struct path path;
int rc;
sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL);
if (!sbi) {
rc = -ENOMEM;
goto out;
}
rc = ecryptfs_parse_options(sbi, raw_data);
if (rc) {
err = "Error parsing options";
goto out;
}
s = sget(fs_type, NULL, set_anon_super, NULL);
if (IS_ERR(s)) {
rc = PTR_ERR(s);
goto out;
}
s->s_flags = flags;
rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
if (rc)
goto out1;
ecryptfs_set_superblock_private(s, sbi);
s->s_bdi = &sbi->bdi;
/* ->kill_sb() will take care of sbi after that point */
sbi = NULL;
s->s_op = &ecryptfs_sops;
s->s_d_op = &ecryptfs_dops;
err = "Reading sb failed";
rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
if (rc) {
ecryptfs_printk(KERN_WARNING, "kern_path() failed\n");
goto out1;
}
if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
rc = -EINVAL;
printk(KERN_ERR "Mount on filesystem of type "
"eCryptfs explicitly disallowed due to "
"known incompatibilities\n");
goto out_free;
}
ecryptfs_set_superblock_lower(s, path.dentry->d_sb);
s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
s->s_blocksize = path.dentry->d_sb->s_blocksize;
s->s_magic = ECRYPTFS_SUPER_MAGIC;
inode = ecryptfs_get_inode(path.dentry->d_inode, s);
rc = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_free;
s->s_root = d_alloc_root(inode);
if (!s->s_root) {
iput(inode);
rc = -ENOMEM;
goto out_free;
}
rc = -ENOMEM;
root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
if (!root_info)
goto out_free;
/* ->kill_sb() will take care of root_info */
ecryptfs_set_dentry_private(s->s_root, root_info);
ecryptfs_set_dentry_lower(s->s_root, path.dentry);
ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt);
s->s_flags |= MS_ACTIVE;
return dget(s->s_root);
out_free:
path_put(&path);
out1:
deactivate_locked_super(s);
out:
if (sbi) {
ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat);
kmem_cache_free(ecryptfs_sb_info_cache, sbi);
}
printk(KERN_ERR "%s; rc = [%d]\n", err, rc);
return ERR_PTR(rc);
}
|
CWE-264
| 179,046 | 856 |
178146217980549522525269635608062302430
| null | null | null |
linux
|
0f22072ab50cac7983f9660d33974b45184da4f9
| 1 |
asmlinkage long sys_oabi_semtimedop(int semid,
struct oabi_sembuf __user *tsops,
unsigned nsops,
const struct timespec __user *timeout)
{
struct sembuf *sops;
struct timespec local_timeout;
long err;
int i;
if (nsops < 1)
return -EINVAL;
sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
if (!sops)
return -ENOMEM;
err = 0;
for (i = 0; i < nsops; i++) {
__get_user_error(sops[i].sem_num, &tsops->sem_num, err);
__get_user_error(sops[i].sem_op, &tsops->sem_op, err);
__get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
tsops++;
}
if (timeout) {
/* copy this as well before changing domain protection */
err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout));
timeout = &local_timeout;
}
if (err) {
err = -EFAULT;
} else {
mm_segment_t fs = get_fs();
set_fs(KERNEL_DS);
err = sys_semtimedop(semid, sops, nsops, timeout);
set_fs(fs);
}
kfree(sops);
return err;
}
|
CWE-189
| 179,057 | 867 |
110952072524385946398682995361474109587
| null | null | null |
linux
|
d846f71195d57b0bbb143382647c2c6638b04c5a
| 1 |
static int do_replace(struct net *net, const void __user *user,
unsigned int len)
{
int ret, countersize;
struct ebt_table_info *newinfo;
struct ebt_replace tmp;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
if (len != sizeof(tmp) + tmp.entries_size) {
BUGPRINT("Wrong len argument\n");
return -EINVAL;
}
if (tmp.entries_size == 0) {
BUGPRINT("Entries_size never zero\n");
return -EINVAL;
}
/* overflow check */
if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
return -ENOMEM;
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
newinfo = vmalloc(sizeof(*newinfo) + countersize);
if (!newinfo)
return -ENOMEM;
if (countersize)
memset(newinfo->counters, 0, countersize);
newinfo->entries = vmalloc(tmp.entries_size);
if (!newinfo->entries) {
ret = -ENOMEM;
goto free_newinfo;
}
if (copy_from_user(
newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
BUGPRINT("Couldn't copy entries from userspace\n");
ret = -EFAULT;
goto free_entries;
}
ret = do_replace_finish(net, &tmp, newinfo);
if (ret == 0)
return ret;
free_entries:
vfree(newinfo->entries);
free_newinfo:
vfree(newinfo);
return ret;
}
|
CWE-20
| 179,068 | 877 |
1079727230191121500325975239533734205
| null | null | null |
linux
|
43629f8f5ea32a998d06d1bb41eefa0e821ff573
| 1 |
static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct bnep_connlist_req cl;
struct bnep_connadd_req ca;
struct bnep_conndel_req cd;
struct bnep_conninfo ci;
struct socket *nsock;
void __user *argp = (void __user *)arg;
int err;
BT_DBG("cmd %x arg %lx", cmd, arg);
switch (cmd) {
case BNEPCONNADD:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
nsock = sockfd_lookup(ca.sock, &err);
if (!nsock)
return err;
if (nsock->sk->sk_state != BT_CONNECTED) {
sockfd_put(nsock);
return -EBADFD;
}
err = bnep_add_connection(&ca, nsock);
if (!err) {
if (copy_to_user(argp, &ca, sizeof(ca)))
err = -EFAULT;
} else
sockfd_put(nsock);
return err;
case BNEPCONNDEL:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
return bnep_del_connection(&cd);
case BNEPGETCONNLIST:
if (copy_from_user(&cl, argp, sizeof(cl)))
return -EFAULT;
if (cl.cnum <= 0)
return -EINVAL;
err = bnep_get_connlist(&cl);
if (!err && copy_to_user(argp, &cl, sizeof(cl)))
return -EFAULT;
return err;
case BNEPGETCONNINFO:
if (copy_from_user(&ci, argp, sizeof(ci)))
return -EFAULT;
err = bnep_get_conninfo(&ci);
if (!err && copy_to_user(argp, &ci, sizeof(ci)))
return -EFAULT;
return err;
default:
return -EINVAL;
}
return 0;
}
|
CWE-20
| 179,069 | 878 |
245883328951772850994795045080647916116
| null | null | null |
linux
|
6b0d6a9b4296fa16a28d10d416db7a770fc03287
| 1 |
static int br_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *group)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
unsigned long now = jiffies;
int err;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED))
goto out;
mp = br_multicast_new_group(br, port, group);
err = PTR_ERR(mp);
if (IS_ERR(mp))
goto err;
if (!port) {
hlist_add_head(&mp->mglist, &br->mglist);
mod_timer(&mp->timer, now + br->multicast_membership_interval);
goto out;
}
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p->port == port)
goto found;
if ((unsigned long)p->port < (unsigned long)port)
break;
}
p = kzalloc(sizeof(*p), GFP_ATOMIC);
err = -ENOMEM;
if (unlikely(!p))
goto err;
p->addr = *group;
p->port = port;
p->next = *pp;
hlist_add_head(&p->mglist, &port->mglist);
setup_timer(&p->timer, br_multicast_port_group_expired,
(unsigned long)p);
setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
(unsigned long)p);
rcu_assign_pointer(*pp, p);
found:
mod_timer(&p->timer, now + br->multicast_membership_interval);
out:
err = 0;
err:
spin_unlock(&br->multicast_lock);
return err;
}
|
CWE-399
| 179,076 | 883 |
126154348126534696648893961109982181130
| null | null | null |
linux
|
867c20265459d30a01b021a9c1e81fb4c5832aa9
| 1 |
static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry,
char *args, int lsm_rule, int audit_type)
{
int result;
if (entry->lsm[lsm_rule].rule)
return -EINVAL;
entry->lsm[lsm_rule].type = audit_type;
result = security_filter_rule_init(entry->lsm[lsm_rule].type,
Audit_equal, args,
&entry->lsm[lsm_rule].rule);
return result;
}
|
CWE-264
| 179,077 | 884 |
289210770505321273503318366769890960438
| null | null | null |
linux
|
7572777eef78ebdee1ecb7c258c0ef94d35bad16
| 1 |
long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
unsigned int flags)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
struct fuse_ioctl_in inarg = {
.fh = ff->fh,
.cmd = cmd,
.arg = arg,
.flags = flags
};
struct fuse_ioctl_out outarg;
struct fuse_req *req = NULL;
struct page **pages = NULL;
struct page *iov_page = NULL;
struct iovec *in_iov = NULL, *out_iov = NULL;
unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
size_t in_size, out_size, transferred;
int err;
/* assume all the iovs returned by client always fits in a page */
BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
err = -ENOMEM;
pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL);
iov_page = alloc_page(GFP_KERNEL);
if (!pages || !iov_page)
goto out;
/*
* If restricted, initialize IO parameters as encoded in @cmd.
* RETRY from server is not allowed.
*/
if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
struct iovec *iov = page_address(iov_page);
iov->iov_base = (void __user *)arg;
iov->iov_len = _IOC_SIZE(cmd);
if (_IOC_DIR(cmd) & _IOC_WRITE) {
in_iov = iov;
in_iovs = 1;
}
if (_IOC_DIR(cmd) & _IOC_READ) {
out_iov = iov;
out_iovs = 1;
}
}
retry:
inarg.in_size = in_size = iov_length(in_iov, in_iovs);
inarg.out_size = out_size = iov_length(out_iov, out_iovs);
/*
* Out data can be used either for actual out data or iovs,
* make sure there always is at least one page.
*/
out_size = max_t(size_t, out_size, PAGE_SIZE);
max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
/* make sure there are enough buffer pages and init request with them */
err = -ENOMEM;
if (max_pages > FUSE_MAX_PAGES_PER_REQ)
goto out;
while (num_pages < max_pages) {
pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
if (!pages[num_pages])
goto out;
num_pages++;
}
req = fuse_get_req(fc);
if (IS_ERR(req)) {
err = PTR_ERR(req);
req = NULL;
goto out;
}
memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
req->num_pages = num_pages;
/* okay, let's send it to the client */
req->in.h.opcode = FUSE_IOCTL;
req->in.h.nodeid = ff->nodeid;
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
if (in_size) {
req->in.numargs++;
req->in.args[1].size = in_size;
req->in.argpages = 1;
err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
false);
if (err)
goto out;
}
req->out.numargs = 2;
req->out.args[0].size = sizeof(outarg);
req->out.args[0].value = &outarg;
req->out.args[1].size = out_size;
req->out.argpages = 1;
req->out.argvar = 1;
fuse_request_send(fc, req);
err = req->out.h.error;
transferred = req->out.args[1].size;
fuse_put_request(fc, req);
req = NULL;
if (err)
goto out;
/* did it ask for retry? */
if (outarg.flags & FUSE_IOCTL_RETRY) {
char *vaddr;
/* no retry if in restricted mode */
err = -EIO;
if (!(flags & FUSE_IOCTL_UNRESTRICTED))
goto out;
in_iovs = outarg.in_iovs;
out_iovs = outarg.out_iovs;
/*
* Make sure things are in boundary, separate checks
* are to protect against overflow.
*/
err = -ENOMEM;
if (in_iovs > FUSE_IOCTL_MAX_IOV ||
out_iovs > FUSE_IOCTL_MAX_IOV ||
in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
goto out;
vaddr = kmap_atomic(pages[0], KM_USER0);
err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr,
transferred, in_iovs + out_iovs,
(flags & FUSE_IOCTL_COMPAT) != 0);
kunmap_atomic(vaddr, KM_USER0);
if (err)
goto out;
in_iov = page_address(iov_page);
out_iov = in_iov + in_iovs;
goto retry;
}
err = -EIO;
if (transferred > inarg.out_size)
goto out;
err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
out:
if (req)
fuse_put_request(fc, req);
if (iov_page)
__free_page(iov_page);
while (num_pages)
__free_page(pages[--num_pages]);
kfree(pages);
return err ? err : outarg.result;
}
|
CWE-119
| 179,078 | 885 |
265577904395187415417640424029624842814
| null | null | null |
linux
|
0a54917c3fc295cb61f3fb52373c173fd3b69f48
| 1 |
static int orinoco_ioctl_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
hermes_t *hw = &priv->hw;
struct iw_param *param = &wrqu->param;
unsigned long flags;
int ret = -EINPROGRESS;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
case IW_AUTH_PRIVACY_INVOKED:
case IW_AUTH_DROP_UNENCRYPTED:
/*
* orinoco does not use these parameters
*/
break;
case IW_AUTH_KEY_MGMT:
/* wl_lkm implies value 2 == PSK for Hermes I
* which ties in with WEXT
* no other hints tho :(
*/
priv->key_mgmt = param->value;
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
/* When countermeasures are enabled, shut down the
* card; when disabled, re-enable the card. This must
* take effect immediately.
*
* TODO: Make sure that the EAPOL message is getting
* out before card disabled
*/
if (param->value) {
priv->tkip_cm_active = 1;
ret = hermes_enable_port(hw, 0);
} else {
priv->tkip_cm_active = 0;
ret = hermes_disable_port(hw, 0);
}
break;
case IW_AUTH_80211_AUTH_ALG:
if (param->value & IW_AUTH_ALG_SHARED_KEY)
priv->wep_restrict = 1;
else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM)
priv->wep_restrict = 0;
else
ret = -EINVAL;
break;
case IW_AUTH_WPA_ENABLED:
if (priv->has_wpa) {
priv->wpa_enabled = param->value ? 1 : 0;
} else {
if (param->value)
ret = -EOPNOTSUPP;
/* else silently accept disable of WPA */
priv->wpa_enabled = 0;
}
break;
default:
ret = -EOPNOTSUPP;
}
orinoco_unlock(priv, &flags);
return ret;
}
| 179,079 | 886 |
98556305873420674058746144199000595109
| null | null | null |
|
linux
|
a2ae4cc9a16e211c8a128ba10d22a85431f093ab
| 1 |
SYSCALL_DEFINE1(inotify_init1, int, flags)
{
struct fsnotify_group *group;
struct user_struct *user;
int ret;
/* Check the IN_* constants for consistency. */
BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
return -EINVAL;
user = get_current_user();
if (unlikely(atomic_read(&user->inotify_devs) >=
inotify_max_user_instances)) {
ret = -EMFILE;
goto out_free_uid;
}
/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
group = inotify_new_group(user, inotify_max_queued_events);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_free_uid;
}
atomic_inc(&user->inotify_devs);
ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags);
if (ret >= 0)
return ret;
atomic_dec(&user->inotify_devs);
out_free_uid:
free_uid(user);
return ret;
}
|
CWE-399
| 179,080 | 887 |
321635802954137778161647104660914660755
| null | null | null |
linux
|
f5563318ff1bde15b10e736e97ffce13be08bc1a
| 1 |
int ieee80211_radiotap_iterator_init(
struct ieee80211_radiotap_iterator *iterator,
struct ieee80211_radiotap_header *radiotap_header,
int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
{
/* Linux only supports version 0 radiotap format */
if (radiotap_header->it_version)
return -EINVAL;
/* sanity check for allowed length and radiotap length field */
if (max_length < get_unaligned_le16(&radiotap_header->it_len))
return -EINVAL;
iterator->_rtheader = radiotap_header;
iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len);
iterator->_arg_index = 0;
iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);
iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header);
iterator->_reset_on_ext = 0;
iterator->_next_bitmap = &radiotap_header->it_present;
iterator->_next_bitmap++;
iterator->_vns = vns;
iterator->current_namespace = &radiotap_ns;
iterator->is_radiotap_ns = 1;
/* find payload start allowing for extended bitmap(s) */
if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
while (get_unaligned_le32(iterator->_arg) &
(1 << IEEE80211_RADIOTAP_EXT)) {
iterator->_arg += sizeof(uint32_t);
/*
* check for insanity where the present bitmaps
* keep claiming to extend up to or even beyond the
* stated radiotap header length
*/
if ((unsigned long)iterator->_arg -
(unsigned long)iterator->_rtheader >
(unsigned long)iterator->_max_length)
return -EINVAL;
}
iterator->_arg += sizeof(uint32_t);
/*
* no need to check again for blowing past stated radiotap
* header length, because ieee80211_radiotap_iterator_next
* checks it before it is dereferenced
*/
}
iterator->this_arg = iterator->_arg;
/* we are all initialized happily */
return 0;
}
|
CWE-119
| 179,081 | 888 |
202083656134031078857371790539548342481
| null | null | null |
FFmpeg
|
fe448cd28d674c3eff3072552eae366d0b659ce9
| 1 |
static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
AVFrame *picture)
{
int compno, reslevelno, bandno;
int x, y;
uint8_t *line;
Jpeg2000T1Context t1;
/* Loop on tile components */
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000Component *comp = tile->comp + compno;
Jpeg2000CodingStyle *codsty = tile->codsty + compno;
/* Loop on resolution levels */
for (reslevelno = 0; reslevelno < codsty->nreslevels2decode; reslevelno++) {
Jpeg2000ResLevel *rlevel = comp->reslevel + reslevelno;
/* Loop on bands */
for (bandno = 0; bandno < rlevel->nbands; bandno++) {
int nb_precincts, precno;
Jpeg2000Band *band = rlevel->band + bandno;
int cblkno = 0, bandpos;
bandpos = bandno + (reslevelno > 0);
if (band->coord[0][0] == band->coord[0][1] ||
band->coord[1][0] == band->coord[1][1])
continue;
nb_precincts = rlevel->num_precincts_x * rlevel->num_precincts_y;
/* Loop on precincts */
for (precno = 0; precno < nb_precincts; precno++) {
Jpeg2000Prec *prec = band->prec + precno;
/* Loop on codeblocks */
for (cblkno = 0; cblkno < prec->nb_codeblocks_width * prec->nb_codeblocks_height; cblkno++) {
int x, y;
Jpeg2000Cblk *cblk = prec->cblk + cblkno;
decode_cblk(s, codsty, &t1, cblk,
cblk->coord[0][1] - cblk->coord[0][0],
cblk->coord[1][1] - cblk->coord[1][0],
bandpos);
x = cblk->coord[0][0];
y = cblk->coord[1][0];
if (codsty->transform == FF_DWT97)
dequantization_float(x, y, cblk, comp, &t1, band);
else
dequantization_int(x, y, cblk, comp, &t1, band);
} /* end cblk */
} /*end prec */
} /* end band */
} /* end reslevel */
/* inverse DWT */
ff_dwt_decode(&comp->dwt, codsty->transform == FF_DWT97 ? (void*)comp->f_data : (void*)comp->i_data);
} /*end comp */
/* inverse MCT transformation */
if (tile->codsty[0].mct)
mct_decode(s, tile);
if (s->cdef[0] < 0) {
for (x = 0; x < s->ncomponents; x++)
s->cdef[x] = x + 1;
if ((s->ncomponents & 1) == 0)
s->cdef[s->ncomponents-1] = 0;
}
if (s->precision <= 8) {
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000Component *comp = tile->comp + compno;
Jpeg2000CodingStyle *codsty = tile->codsty + compno;
float *datap = comp->f_data;
int32_t *i_datap = comp->i_data;
int cbps = s->cbps[compno];
int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
int planar = !!picture->data[2];
int pixelsize = planar ? 1 : s->ncomponents;
int plane = 0;
if (planar)
plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1);
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
line = picture->data[plane] + y * picture->linesize[plane];
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
uint8_t *dst;
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
dst = line + x * pixelsize + compno*!planar;
if (codsty->transform == FF_DWT97) {
for (; x < w; x += s->cdx[compno]) {
int val = lrintf(*datap) + (1 << (cbps - 1));
/* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
val = av_clip(val, 0, (1 << cbps) - 1);
*dst = val << (8 - cbps);
datap++;
dst += pixelsize;
}
} else {
for (; x < w; x += s->cdx[compno]) {
int val = *i_datap + (1 << (cbps - 1));
/* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
val = av_clip(val, 0, (1 << cbps) - 1);
*dst = val << (8 - cbps);
i_datap++;
dst += pixelsize;
}
}
line += picture->linesize[plane];
}
}
} else {
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000Component *comp = tile->comp + compno;
Jpeg2000CodingStyle *codsty = tile->codsty + compno;
float *datap = comp->f_data;
int32_t *i_datap = comp->i_data;
uint16_t *linel;
int cbps = s->cbps[compno];
int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
int planar = !!picture->data[2];
int pixelsize = planar ? 1 : s->ncomponents;
int plane = 0;
if (planar)
plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1);
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
linel = (uint16_t *)picture->data[plane] + y * (picture->linesize[plane] >> 1);
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
uint16_t *dst;
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
dst = linel + (x * pixelsize + compno*!planar);
if (codsty->transform == FF_DWT97) {
for (; x < w; x += s-> cdx[compno]) {
int val = lrintf(*datap) + (1 << (cbps - 1));
/* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
val = av_clip(val, 0, (1 << cbps) - 1);
/* align 12 bit values in little-endian mode */
*dst = val << (16 - cbps);
datap++;
dst += pixelsize;
}
} else {
for (; x < w; x += s-> cdx[compno]) {
int val = *i_datap + (1 << (cbps - 1));
/* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
val = av_clip(val, 0, (1 << cbps) - 1);
/* align 12 bit values in little-endian mode */
*dst = val << (16 - cbps);
i_datap++;
dst += pixelsize;
}
}
linel += picture->linesize[plane] >> 1;
}
}
}
return 0;
}
|
CWE-119
| 179,085 | 892 |
128636664101031794987394119785499829755
| null | null | null |
FFmpeg
|
f31011e9abfb2ae75bb32bc44e2c34194c8dc40a
| 1 |
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
{
if(pc->overread){
av_dlog(NULL, "overread %d, state:%X next:%d index:%d o_index:%d\n",
pc->overread, pc->state, next, pc->index, pc->overread_index);
av_dlog(NULL, "%X %X %X %X\n", (*buf)[0], (*buf)[1], (*buf)[2], (*buf)[3]);
}
/* Copy overread bytes from last frame into buffer. */
for(; pc->overread>0; pc->overread--){
pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
}
/* flush remaining if EOF */
if(!*buf_size && next == END_NOT_FOUND){
next= 0;
}
pc->last_index= pc->index;
/* copy into buffer end return */
if(next == END_NOT_FOUND){
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
if(!new_buffer)
return AVERROR(ENOMEM);
pc->buffer = new_buffer;
memcpy(&pc->buffer[pc->index], *buf, *buf_size);
pc->index += *buf_size;
return -1;
}
*buf_size=
pc->overread_index= pc->index + next;
/* append to buffer */
if(pc->index){
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
if(!new_buffer)
return AVERROR(ENOMEM);
pc->buffer = new_buffer;
if (next > -FF_INPUT_BUFFER_PADDING_SIZE)
memcpy(&pc->buffer[pc->index], *buf,
next + FF_INPUT_BUFFER_PADDING_SIZE);
pc->index = 0;
*buf= pc->buffer;
}
/* store overread bytes */
for(;next < 0; next++){
pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
pc->state64 = (pc->state64<<8) | pc->buffer[pc->last_index + next];
pc->overread++;
}
if(pc->overread){
av_dlog(NULL, "overread %d, state:%X next:%d index:%d o_index:%d\n",
pc->overread, pc->state, next, pc->index, pc->overread_index);
av_dlog(NULL, "%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
}
return 0;
}
|
CWE-119
| 179,086 | 893 |
222309056610865204382267494971212653031
| null | null | null |
FFmpeg
|
e07ac727c1cc9eed39e7f9117c97006f719864bd
| 1 |
static int g2m_init_buffers(G2MContext *c)
{
int aligned_height;
if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
c->framebuf_stride = FFALIGN(c->width * 3, 16);
aligned_height = FFALIGN(c->height, 16);
av_free(c->framebuf);
c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
if (!c->framebuf)
return AVERROR(ENOMEM);
}
if (!c->synth_tile || !c->jpeg_tile ||
c->old_tile_w < c->tile_width ||
c->old_tile_h < c->tile_height) {
c->tile_stride = FFALIGN(c->tile_width, 16) * 3;
aligned_height = FFALIGN(c->tile_height, 16);
av_free(c->synth_tile);
av_free(c->jpeg_tile);
av_free(c->kempf_buf);
av_free(c->kempf_flags);
c->synth_tile = av_mallocz(c->tile_stride * aligned_height);
c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height);
c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height
+ FF_INPUT_BUFFER_PADDING_SIZE);
c->kempf_flags = av_mallocz( c->tile_width * aligned_height);
if (!c->synth_tile || !c->jpeg_tile ||
!c->kempf_buf || !c->kempf_flags)
return AVERROR(ENOMEM);
}
return 0;
}
|
CWE-119
| 179,087 | 894 |
255877059988693141105119960061023166155
| null | null | null |
FFmpeg
|
cdd5df8189ff1537f7abe8defe971f80602cc2d2
| 1 |
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
FPSContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int64_t delta;
int i, ret;
s->frames_in++;
/* discard frames until we get the first timestamp */
if (s->pts == AV_NOPTS_VALUE) {
if (buf->pts != AV_NOPTS_VALUE) {
ret = write_to_fifo(s->fifo, buf);
if (ret < 0)
return ret;
if (s->start_time != DBL_MAX && s->start_time != AV_NOPTS_VALUE) {
double first_pts = s->start_time * AV_TIME_BASE;
first_pts = FFMIN(FFMAX(first_pts, INT64_MIN), INT64_MAX);
s->first_pts = s->pts = av_rescale_q(first_pts, AV_TIME_BASE_Q,
inlink->time_base);
av_log(ctx, AV_LOG_VERBOSE, "Set first pts to (in:%"PRId64" out:%"PRId64")\n",
s->first_pts, av_rescale_q(first_pts, AV_TIME_BASE_Q,
outlink->time_base));
} else {
s->first_pts = s->pts = buf->pts;
}
} else {
av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no "
"timestamp.\n");
av_frame_free(&buf);
s->drop++;
}
return 0;
}
/* now wait for the next timestamp */
if (buf->pts == AV_NOPTS_VALUE) {
return write_to_fifo(s->fifo, buf);
}
/* number of output frames */
delta = av_rescale_q_rnd(buf->pts - s->pts, inlink->time_base,
outlink->time_base, s->rounding);
if (delta < 1) {
/* drop the frame and everything buffered except the first */
AVFrame *tmp;
int drop = av_fifo_size(s->fifo)/sizeof(AVFrame*);
av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop);
s->drop += drop;
av_fifo_generic_read(s->fifo, &tmp, sizeof(tmp), NULL);
flush_fifo(s->fifo);
ret = write_to_fifo(s->fifo, tmp);
av_frame_free(&buf);
return ret;
}
/* can output >= 1 frames */
for (i = 0; i < delta; i++) {
AVFrame *buf_out;
av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL);
/* duplicate the frame if needed */
if (!av_fifo_size(s->fifo) && i < delta - 1) {
AVFrame *dup = av_frame_clone(buf_out);
av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n");
if (dup)
ret = write_to_fifo(s->fifo, dup);
else
ret = AVERROR(ENOMEM);
if (ret < 0) {
av_frame_free(&buf_out);
av_frame_free(&buf);
return ret;
}
s->dup++;
}
buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base,
outlink->time_base) + s->frames_out;
if ((ret = ff_filter_frame(outlink, buf_out)) < 0) {
av_frame_free(&buf);
return ret;
}
s->frames_out++;
}
flush_fifo(s->fifo);
ret = write_to_fifo(s->fifo, buf);
s->pts = s->first_pts + av_rescale_q(s->frames_out, outlink->time_base, inlink->time_base);
return ret;
}
|
CWE-399
| 179,088 | 895 |
40209124885848565360625924329087433885
| null | null | null |
FFmpeg
|
b05cd1ea7e45a836f7f6071a716c38bb30326e0f
| 1 |
static int read_header(FFV1Context *f)
{
uint8_t state[CONTEXT_SIZE];
int i, j, context_count = -1; //-1 to avoid warning
RangeCoder *const c = &f->slice_context[0]->c;
memset(state, 128, sizeof(state));
if (f->version < 2) {
int chroma_planes, chroma_h_shift, chroma_v_shift, transparency;
unsigned v= get_symbol(c, state, 0);
if (v >= 2) {
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
return AVERROR_INVALIDDATA;
}
f->version = v;
f->ac = f->avctx->coder_type = get_symbol(c, state, 0);
if (f->ac > 1) {
for (i = 1; i < 256; i++)
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
}
f->colorspace = get_symbol(c, state, 0); //YUV cs type
if (f->version > 0)
f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
chroma_planes = get_rac(c, state);
chroma_h_shift = get_symbol(c, state, 0);
chroma_v_shift = get_symbol(c, state, 0);
transparency = get_rac(c, state);
if (f->plane_count) {
if ( chroma_planes != f->chroma_planes
|| chroma_h_shift!= f->chroma_h_shift
|| chroma_v_shift!= f->chroma_v_shift
|| transparency != f->transparency) {
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
return AVERROR_INVALIDDATA;
}
}
f->chroma_planes = chroma_planes;
f->chroma_h_shift = chroma_h_shift;
f->chroma_v_shift = chroma_v_shift;
f->transparency = transparency;
f->plane_count = 2 + f->transparency;
}
if (f->colorspace == 0) {
if (!f->transparency && !f->chroma_planes) {
if (f->avctx->bits_per_raw_sample <= 8)
f->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
else
f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
} else if (f->avctx->bits_per_raw_sample<=8 && !f->transparency) {
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P; break;
case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample == 9) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample == 10) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else {
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
}
} else if (f->colorspace == 1) {
if (f->chroma_h_shift || f->chroma_v_shift) {
av_log(f->avctx, AV_LOG_ERROR,
"chroma subsampling not supported in this colorspace\n");
return AVERROR(ENOSYS);
}
if ( f->avctx->bits_per_raw_sample == 9)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP9;
else if (f->avctx->bits_per_raw_sample == 10)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP10;
else if (f->avctx->bits_per_raw_sample == 12)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP12;
else if (f->avctx->bits_per_raw_sample == 14)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP14;
else
if (f->transparency) f->avctx->pix_fmt = AV_PIX_FMT_RGB32;
else f->avctx->pix_fmt = AV_PIX_FMT_0RGB32;
} else {
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
return AVERROR(ENOSYS);
}
av_dlog(f->avctx, "%d %d %d\n",
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
if (f->version < 2) {
context_count = read_quant_tables(c, f->quant_table);
if (context_count < 0) {
av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
return AVERROR_INVALIDDATA;
}
} else if (f->version < 3) {
f->slice_count = get_symbol(c, state, 0);
} else {
const uint8_t *p = c->bytestream_end;
for (f->slice_count = 0;
f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start;
f->slice_count++) {
int trailer = 3 + 5*!!f->ec;
int size = AV_RB24(p-trailer);
if (size + trailer > p - c->bytestream_start)
break;
p -= size + trailer;
}
}
if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0) {
av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
return AVERROR_INVALIDDATA;
}
for (j = 0; j < f->slice_count; j++) {
FFV1Context *fs = f->slice_context[j];
fs->ac = f->ac;
fs->packed_at_lsb = f->packed_at_lsb;
fs->slice_damaged = 0;
if (f->version == 2) {
fs->slice_x = get_symbol(c, state, 0) * f->width ;
fs->slice_y = get_symbol(c, state, 0) * f->height;
fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x;
fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y;
fs->slice_x /= f->num_h_slices;
fs->slice_y /= f->num_v_slices;
fs->slice_width = fs->slice_width / f->num_h_slices - fs->slice_x;
fs->slice_height = fs->slice_height / f->num_v_slices - fs->slice_y;
if ((unsigned)fs->slice_width > f->width ||
(unsigned)fs->slice_height > f->height)
return AVERROR_INVALIDDATA;
if ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
|| (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
return AVERROR_INVALIDDATA;
}
for (i = 0; i < f->plane_count; i++) {
PlaneContext *const p = &fs->plane[i];
if (f->version == 2) {
int idx = get_symbol(c, state, 0);
if (idx > (unsigned)f->quant_table_count) {
av_log(f->avctx, AV_LOG_ERROR,
"quant_table_index out of range\n");
return AVERROR_INVALIDDATA;
}
p->quant_table_index = idx;
memcpy(p->quant_table, f->quant_tables[idx],
sizeof(p->quant_table));
context_count = f->context_count[idx];
} else {
memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
}
if (f->version <= 2) {
av_assert0(context_count >= 0);
if (p->context_count < context_count) {
av_freep(&p->state);
av_freep(&p->vlc_state);
}
p->context_count = context_count;
}
}
}
return 0;
}
|
CWE-119
| 179,089 | 896 |
145906591874544820355065071924399305170
| null | null | null |
FFmpeg
|
a1b9004b768bef606ee98d417bceb9392ceb788d
| 1 |
static int get_cox(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c)
{
uint8_t byte;
if (bytestream2_get_bytes_left(&s->g) < 5)
return AVERROR_INVALIDDATA;
/* nreslevels = number of resolution levels
= number of decomposition level +1 */
c->nreslevels = bytestream2_get_byteu(&s->g) + 1;
if (c->nreslevels >= JPEG2000_MAX_RESLEVELS) {
av_log(s->avctx, AV_LOG_ERROR, "nreslevels %d is invalid\n", c->nreslevels);
return AVERROR_INVALIDDATA;
}
/* compute number of resolution levels to decode */
if (c->nreslevels < s->reduction_factor)
c->nreslevels2decode = 1;
else
c->nreslevels2decode = c->nreslevels - s->reduction_factor;
c->log2_cblk_width = (bytestream2_get_byteu(&s->g) & 15) + 2; // cblk width
c->log2_cblk_height = (bytestream2_get_byteu(&s->g) & 15) + 2; // cblk height
if (c->log2_cblk_width > 10 || c->log2_cblk_height > 10 ||
c->log2_cblk_width + c->log2_cblk_height > 12) {
av_log(s->avctx, AV_LOG_ERROR, "cblk size invalid\n");
return AVERROR_INVALIDDATA;
}
if (c->log2_cblk_width > 6 || c->log2_cblk_height > 6) {
avpriv_request_sample(s->avctx, "cblk size > 64");
return AVERROR_PATCHWELCOME;
}
c->cblk_style = bytestream2_get_byteu(&s->g);
if (c->cblk_style != 0) { // cblk style
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
}
c->transform = bytestream2_get_byteu(&s->g); // DWT transformation type
/* set integer 9/7 DWT in case of BITEXACT flag */
if ((s->avctx->flags & CODEC_FLAG_BITEXACT) && (c->transform == FF_DWT97))
c->transform = FF_DWT97_INT;
if (c->csty & JPEG2000_CSTY_PREC) {
int i;
for (i = 0; i < c->nreslevels; i++) {
byte = bytestream2_get_byte(&s->g);
c->log2_prec_widths[i] = byte & 0x0F; // precinct PPx
c->log2_prec_heights[i] = (byte >> 4) & 0x0F; // precinct PPy
}
} else {
memset(c->log2_prec_widths , 15, sizeof(c->log2_prec_widths ));
memset(c->log2_prec_heights, 15, sizeof(c->log2_prec_heights));
}
return 0;
}
|
CWE-20
| 179,090 | 897 |
284258088157399619618022794648070491540
| null | null | null |
FFmpeg
|
8bb11c3ca77b52e05a9ed1496a65f8a76e6e2d8f
| 1 |
static int get_siz(Jpeg2000DecoderContext *s)
{
int i;
int ncomponents;
uint32_t log2_chroma_wh = 0;
const enum AVPixelFormat *possible_fmts = NULL;
int possible_fmts_nb = 0;
if (bytestream2_get_bytes_left(&s->g) < 36)
return AVERROR_INVALIDDATA;
s->avctx->profile = bytestream2_get_be16u(&s->g); // Rsiz
s->width = bytestream2_get_be32u(&s->g); // Width
s->height = bytestream2_get_be32u(&s->g); // Height
s->image_offset_x = bytestream2_get_be32u(&s->g); // X0Siz
s->image_offset_y = bytestream2_get_be32u(&s->g); // Y0Siz
s->tile_width = bytestream2_get_be32u(&s->g); // XTSiz
s->tile_height = bytestream2_get_be32u(&s->g); // YTSiz
s->tile_offset_x = bytestream2_get_be32u(&s->g); // XT0Siz
s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
ncomponents = bytestream2_get_be16u(&s->g); // CSiz
if (ncomponents <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of components: %d\n",
s->ncomponents);
return AVERROR_INVALIDDATA;
}
if (ncomponents > 4) {
avpriv_request_sample(s->avctx, "Support for %d components",
s->ncomponents);
return AVERROR_PATCHWELCOME;
}
s->ncomponents = ncomponents;
if (s->tile_width <= 0 || s->tile_height <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid tile dimension %dx%d.\n",
s->tile_width, s->tile_height);
return AVERROR_INVALIDDATA;
}
if (bytestream2_get_bytes_left(&s->g) < 3 * s->ncomponents)
return AVERROR_INVALIDDATA;
for (i = 0; i < s->ncomponents; i++) { // Ssiz_i XRsiz_i, YRsiz_i
uint8_t x = bytestream2_get_byteu(&s->g);
s->cbps[i] = (x & 0x7f) + 1;
s->precision = FFMAX(s->cbps[i], s->precision);
s->sgnd[i] = !!(x & 0x80);
s->cdx[i] = bytestream2_get_byteu(&s->g);
s->cdy[i] = bytestream2_get_byteu(&s->g);
if (!s->cdx[i] || !s->cdy[i]) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid sample seperation\n");
return AVERROR_INVALIDDATA;
}
log2_chroma_wh |= s->cdy[i] >> 1 << i * 4 | s->cdx[i] >> 1 << i * 4 + 2;
}
s->numXtiles = ff_jpeg2000_ceildiv(s->width - s->tile_offset_x, s->tile_width);
s->numYtiles = ff_jpeg2000_ceildiv(s->height - s->tile_offset_y, s->tile_height);
if (s->numXtiles * (uint64_t)s->numYtiles > INT_MAX/sizeof(*s->tile)) {
s->numXtiles = s->numYtiles = 0;
return AVERROR(EINVAL);
}
s->tile = av_mallocz_array(s->numXtiles * s->numYtiles, sizeof(*s->tile));
if (!s->tile) {
s->numXtiles = s->numYtiles = 0;
return AVERROR(ENOMEM);
}
for (i = 0; i < s->numXtiles * s->numYtiles; i++) {
Jpeg2000Tile *tile = s->tile + i;
tile->comp = av_mallocz(s->ncomponents * sizeof(*tile->comp));
if (!tile->comp)
return AVERROR(ENOMEM);
}
/* compute image size with reduction factor */
s->avctx->width = ff_jpeg2000_ceildivpow2(s->width - s->image_offset_x,
s->reduction_factor);
s->avctx->height = ff_jpeg2000_ceildivpow2(s->height - s->image_offset_y,
s->reduction_factor);
if (s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_2K ||
s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_4K) {
possible_fmts = xyz_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(xyz_pix_fmts);
} else {
switch (s->colour_space) {
case 16:
possible_fmts = rgb_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(rgb_pix_fmts);
break;
case 17:
possible_fmts = gray_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(gray_pix_fmts);
break;
case 18:
possible_fmts = yuv_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(yuv_pix_fmts);
break;
default:
possible_fmts = all_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(all_pix_fmts);
break;
}
}
for (i = 0; i < possible_fmts_nb; ++i) {
if (pix_fmt_match(possible_fmts[i], ncomponents, s->precision, log2_chroma_wh, s->pal8)) {
s->avctx->pix_fmt = possible_fmts[i];
break;
}
}
if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
av_log(s->avctx, AV_LOG_ERROR,
"Unknown pix_fmt, profile: %d, colour_space: %d, "
"components: %d, precision: %d, "
"cdx[1]: %d, cdy[1]: %d, cdx[2]: %d, cdy[2]: %d\n",
s->avctx->profile, s->colour_space, ncomponents, s->precision,
ncomponents > 2 ? s->cdx[1] : 0,
ncomponents > 2 ? s->cdy[1] : 0,
ncomponents > 2 ? s->cdx[2] : 0,
ncomponents > 2 ? s->cdy[2] : 0);
}
return 0;
}
|
CWE-119
| 179,095 | 901 |
101435318398182137895811640160490132866
| null | null | null |
FFmpeg
|
880c73cd76109697447fbfbaa8e5ee5683309446
| 1 |
static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
int buf_size = avpkt->size;
FlashSVContext *s = avctx->priv_data;
int h_blocks, v_blocks, h_part, v_part, i, j, ret;
GetBitContext gb;
int last_blockwidth = s->block_width;
int last_blockheight= s->block_height;
/* no supplementary picture */
if (buf_size == 0)
return 0;
if (buf_size < 4)
return -1;
init_get_bits(&gb, avpkt->data, buf_size * 8);
/* start to parse the bitstream */
s->block_width = 16 * (get_bits(&gb, 4) + 1);
s->image_width = get_bits(&gb, 12);
s->block_height = 16 * (get_bits(&gb, 4) + 1);
s->image_height = get_bits(&gb, 12);
if ( last_blockwidth != s->block_width
|| last_blockheight!= s->block_height)
av_freep(&s->blocks);
if (s->ver == 2) {
skip_bits(&gb, 6);
if (get_bits1(&gb)) {
avpriv_request_sample(avctx, "iframe");
return AVERROR_PATCHWELCOME;
}
if (get_bits1(&gb)) {
avpriv_request_sample(avctx, "Custom palette");
return AVERROR_PATCHWELCOME;
}
}
/* calculate number of blocks and size of border (partial) blocks */
h_blocks = s->image_width / s->block_width;
h_part = s->image_width % s->block_width;
v_blocks = s->image_height / s->block_height;
v_part = s->image_height % s->block_height;
/* the block size could change between frames, make sure the buffer
* is large enough, if not, get a larger one */
if (s->block_size < s->block_width * s->block_height) {
int tmpblock_size = 3 * s->block_width * s->block_height;
s->tmpblock = av_realloc(s->tmpblock, tmpblock_size);
if (!s->tmpblock) {
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
return AVERROR(ENOMEM);
}
if (s->ver == 2) {
s->deflate_block_size = calc_deflate_block_size(tmpblock_size);
if (s->deflate_block_size <= 0) {
av_log(avctx, AV_LOG_ERROR, "Can't determine deflate buffer size.\n");
return -1;
}
s->deflate_block = av_realloc(s->deflate_block, s->deflate_block_size);
if (!s->deflate_block) {
av_log(avctx, AV_LOG_ERROR, "Can't allocate deflate buffer.\n");
return AVERROR(ENOMEM);
}
}
}
s->block_size = s->block_width * s->block_height;
/* initialize the image size once */
if (avctx->width == 0 && avctx->height == 0) {
avcodec_set_dimensions(avctx, s->image_width, s->image_height);
}
/* check for changes of image width and image height */
if (avctx->width != s->image_width || avctx->height != s->image_height) {
av_log(avctx, AV_LOG_ERROR,
"Frame width or height differs from first frame!\n");
av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d vs ch = %d, cv = %d\n",
avctx->height, avctx->width, s->image_height, s->image_width);
return AVERROR_INVALIDDATA;
}
/* we care for keyframes only in Screen Video v2 */
s->is_keyframe = (avpkt->flags & AV_PKT_FLAG_KEY) && (s->ver == 2);
if (s->is_keyframe) {
s->keyframedata = av_realloc(s->keyframedata, avpkt->size);
memcpy(s->keyframedata, avpkt->data, avpkt->size);
}
if(s->ver == 2 && !s->blocks)
s->blocks = av_mallocz((v_blocks + !!v_part) * (h_blocks + !!h_part)
* sizeof(s->blocks[0]));
av_dlog(avctx, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n",
s->image_width, s->image_height, s->block_width, s->block_height,
h_blocks, v_blocks, h_part, v_part);
if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0)
return ret;
/* loop over all block columns */
for (j = 0; j < v_blocks + (v_part ? 1 : 0); j++) {
int y_pos = j * s->block_height; // vertical position in frame
int cur_blk_height = (j < v_blocks) ? s->block_height : v_part;
/* loop over all block rows */
for (i = 0; i < h_blocks + (h_part ? 1 : 0); i++) {
int x_pos = i * s->block_width; // horizontal position in frame
int cur_blk_width = (i < h_blocks) ? s->block_width : h_part;
int has_diff = 0;
/* get the size of the compressed zlib chunk */
int size = get_bits(&gb, 16);
s->color_depth = 0;
s->zlibprime_curr = 0;
s->zlibprime_prev = 0;
s->diff_start = 0;
s->diff_height = cur_blk_height;
if (8 * size > get_bits_left(&gb)) {
av_frame_unref(&s->frame);
return AVERROR_INVALIDDATA;
}
if (s->ver == 2 && size) {
skip_bits(&gb, 3);
s->color_depth = get_bits(&gb, 2);
has_diff = get_bits1(&gb);
s->zlibprime_curr = get_bits1(&gb);
s->zlibprime_prev = get_bits1(&gb);
if (s->color_depth != 0 && s->color_depth != 2) {
av_log(avctx, AV_LOG_ERROR,
"%dx%d invalid color depth %d\n", i, j, s->color_depth);
return AVERROR_INVALIDDATA;
}
if (has_diff) {
if (!s->keyframe) {
av_log(avctx, AV_LOG_ERROR,
"inter frame without keyframe\n");
return AVERROR_INVALIDDATA;
}
s->diff_start = get_bits(&gb, 8);
s->diff_height = get_bits(&gb, 8);
av_log(avctx, AV_LOG_DEBUG,
"%dx%d diff start %d height %d\n",
i, j, s->diff_start, s->diff_height);
size -= 2;
}
if (s->zlibprime_prev)
av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_prev\n", i, j);
if (s->zlibprime_curr) {
int col = get_bits(&gb, 8);
int row = get_bits(&gb, 8);
av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_curr %dx%d\n", i, j, col, row);
size -= 2;
avpriv_request_sample(avctx, "zlibprime_curr");
return AVERROR_PATCHWELCOME;
}
if (!s->blocks && (s->zlibprime_curr || s->zlibprime_prev)) {
av_log(avctx, AV_LOG_ERROR, "no data available for zlib "
"priming\n");
return AVERROR_INVALIDDATA;
}
size--; // account for flags byte
}
if (has_diff) {
int k;
int off = (s->image_height - y_pos - 1) * s->frame.linesize[0];
for (k = 0; k < cur_blk_height; k++)
memcpy(s->frame.data[0] + off - k*s->frame.linesize[0] + x_pos*3,
s->keyframe + off - k*s->frame.linesize[0] + x_pos*3,
cur_blk_width * 3);
}
/* skip unchanged blocks, which have size 0 */
if (size) {
if (flashsv_decode_block(avctx, avpkt, &gb, size,
cur_blk_width, cur_blk_height,
x_pos, y_pos,
i + j * (h_blocks + !!h_part)))
av_log(avctx, AV_LOG_ERROR,
"error in decompression of block %dx%d\n", i, j);
}
}
}
if (s->is_keyframe && s->ver == 2) {
if (!s->keyframe) {
s->keyframe = av_malloc(s->frame.linesize[0] * avctx->height);
if (!s->keyframe) {
av_log(avctx, AV_LOG_ERROR, "Cannot allocate image data\n");
return AVERROR(ENOMEM);
}
}
memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height);
}
if ((ret = av_frame_ref(data, &s->frame)) < 0)
return ret;
*got_frame = 1;
if ((get_bits_count(&gb) / 8) != buf_size)
av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n",
buf_size, (get_bits_count(&gb) / 8));
/* report that the buffer was completely consumed */
return buf_size;
}
|
CWE-20
| 179,096 | 902 |
269767046134726614373351448788115704200
| null | null | null |
FFmpeg
|
86736f59d6a527d8bc807d09b93f971c0fe0bb07
| 1 |
static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
{
long i;
for (i = 0; i <= w - sizeof(long); i += sizeof(long)) {
long a = *(long *)(src1 + i);
long b = *(long *)(src2 + i);
*(long *)(dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
}
for (; i < w; i++)
dst[i] = src1[i] + src2[i];
}
|
CWE-189
| 179,097 | 903 |
13327716502808838951480419970082341279
| null | null | null |
FFmpeg
|
821a5938d100458f4d09d634041b05c860554ce0
| 1 |
static int g2m_init_buffers(G2MContext *c)
{
int aligned_height;
if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
c->framebuf_stride = FFALIGN(c->width * 3, 16);
aligned_height = FFALIGN(c->height, 16);
av_free(c->framebuf);
c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
if (!c->framebuf)
return AVERROR(ENOMEM);
}
if (!c->synth_tile || !c->jpeg_tile ||
c->old_tile_w < c->tile_width ||
c->old_tile_h < c->tile_height) {
c->tile_stride = FFALIGN(c->tile_width * 3, 16);
aligned_height = FFALIGN(c->tile_height, 16);
av_free(c->synth_tile);
av_free(c->jpeg_tile);
av_free(c->kempf_buf);
av_free(c->kempf_flags);
c->synth_tile = av_mallocz(c->tile_stride * aligned_height);
c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height);
c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height
+ FF_INPUT_BUFFER_PADDING_SIZE);
c->kempf_flags = av_mallocz( c->tile_width * aligned_height);
if (!c->synth_tile || !c->jpeg_tile ||
!c->kempf_buf || !c->kempf_flags)
return AVERROR(ENOMEM);
}
return 0;
}
|
CWE-189
| 179,098 | 904 |
230706319375448150360832482995209905440
| null | null | null |
FFmpeg
|
780669ef7c23c00836a24921fcc6b03be2b8ca4a
| 1 |
static int get_siz(Jpeg2000DecoderContext *s)
{
int i;
int ncomponents;
uint32_t log2_chroma_wh = 0;
const enum AVPixelFormat *possible_fmts = NULL;
int possible_fmts_nb = 0;
if (bytestream2_get_bytes_left(&s->g) < 36)
return AVERROR_INVALIDDATA;
s->avctx->profile = bytestream2_get_be16u(&s->g); // Rsiz
s->width = bytestream2_get_be32u(&s->g); // Width
s->height = bytestream2_get_be32u(&s->g); // Height
s->image_offset_x = bytestream2_get_be32u(&s->g); // X0Siz
s->image_offset_y = bytestream2_get_be32u(&s->g); // Y0Siz
s->tile_width = bytestream2_get_be32u(&s->g); // XTSiz
s->tile_height = bytestream2_get_be32u(&s->g); // YTSiz
s->tile_offset_x = bytestream2_get_be32u(&s->g); // XT0Siz
s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
ncomponents = bytestream2_get_be16u(&s->g); // CSiz
if (ncomponents <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of components: %d\n",
s->ncomponents);
return AVERROR_INVALIDDATA;
}
if (ncomponents > 4) {
avpriv_request_sample(s->avctx, "Support for %d components",
s->ncomponents);
return AVERROR_PATCHWELCOME;
}
s->ncomponents = ncomponents;
if (s->tile_width <= 0 || s->tile_height <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid tile dimension %dx%d.\n",
s->tile_width, s->tile_height);
return AVERROR_INVALIDDATA;
}
if (bytestream2_get_bytes_left(&s->g) < 3 * s->ncomponents)
return AVERROR_INVALIDDATA;
for (i = 0; i < s->ncomponents; i++) { // Ssiz_i XRsiz_i, YRsiz_i
uint8_t x = bytestream2_get_byteu(&s->g);
s->cbps[i] = (x & 0x7f) + 1;
s->precision = FFMAX(s->cbps[i], s->precision);
s->sgnd[i] = !!(x & 0x80);
s->cdx[i] = bytestream2_get_byteu(&s->g);
s->cdy[i] = bytestream2_get_byteu(&s->g);
if ( !s->cdx[i] || s->cdx[i] == 3 || s->cdx[i] > 4
|| !s->cdy[i] || s->cdy[i] == 3 || s->cdy[i] > 4) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid sample separation %d/%d\n", s->cdx[i], s->cdy[i]);
return AVERROR_INVALIDDATA;
}
log2_chroma_wh |= s->cdy[i] >> 1 << i * 4 | s->cdx[i] >> 1 << i * 4 + 2;
}
s->numXtiles = ff_jpeg2000_ceildiv(s->width - s->tile_offset_x, s->tile_width);
s->numYtiles = ff_jpeg2000_ceildiv(s->height - s->tile_offset_y, s->tile_height);
if (s->numXtiles * (uint64_t)s->numYtiles > INT_MAX/sizeof(*s->tile)) {
s->numXtiles = s->numYtiles = 0;
return AVERROR(EINVAL);
}
s->tile = av_mallocz_array(s->numXtiles * s->numYtiles, sizeof(*s->tile));
if (!s->tile) {
s->numXtiles = s->numYtiles = 0;
return AVERROR(ENOMEM);
}
for (i = 0; i < s->numXtiles * s->numYtiles; i++) {
Jpeg2000Tile *tile = s->tile + i;
tile->comp = av_mallocz(s->ncomponents * sizeof(*tile->comp));
if (!tile->comp)
return AVERROR(ENOMEM);
}
/* compute image size with reduction factor */
s->avctx->width = ff_jpeg2000_ceildivpow2(s->width - s->image_offset_x,
s->reduction_factor);
s->avctx->height = ff_jpeg2000_ceildivpow2(s->height - s->image_offset_y,
s->reduction_factor);
if (s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_2K ||
s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_4K) {
possible_fmts = xyz_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(xyz_pix_fmts);
} else {
switch (s->colour_space) {
case 16:
possible_fmts = rgb_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(rgb_pix_fmts);
break;
case 17:
possible_fmts = gray_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(gray_pix_fmts);
break;
case 18:
possible_fmts = yuv_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(yuv_pix_fmts);
break;
default:
possible_fmts = all_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(all_pix_fmts);
break;
}
}
for (i = 0; i < possible_fmts_nb; ++i) {
if (pix_fmt_match(possible_fmts[i], ncomponents, s->precision, log2_chroma_wh, s->pal8)) {
s->avctx->pix_fmt = possible_fmts[i];
break;
}
}
if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
av_log(s->avctx, AV_LOG_ERROR,
"Unknown pix_fmt, profile: %d, colour_space: %d, "
"components: %d, precision: %d, "
"cdx[1]: %d, cdy[1]: %d, cdx[2]: %d, cdy[2]: %d\n",
s->avctx->profile, s->colour_space, ncomponents, s->precision,
ncomponents > 2 ? s->cdx[1] : 0,
ncomponents > 2 ? s->cdy[1] : 0,
ncomponents > 2 ? s->cdx[2] : 0,
ncomponents > 2 ? s->cdy[2] : 0);
}
s->avctx->bits_per_raw_sample = s->precision;
return 0;
}
|
CWE-119
| 179,099 | 905 |
85678138236663394668367920105380330110
| null | null | null |
FFmpeg
|
547d690d676064069d44703a1917e0dab7e33445
| 1 |
static int read_header(FFV1Context *f)
{
uint8_t state[CONTEXT_SIZE];
int i, j, context_count = -1; //-1 to avoid warning
RangeCoder *const c = &f->slice_context[0]->c;
memset(state, 128, sizeof(state));
if (f->version < 2) {
unsigned v= get_symbol(c, state, 0);
if (v >= 2) {
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
return AVERROR_INVALIDDATA;
}
f->version = v;
f->ac = f->avctx->coder_type = get_symbol(c, state, 0);
if (f->ac > 1) {
for (i = 1; i < 256; i++)
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
}
f->colorspace = get_symbol(c, state, 0); //YUV cs type
if (f->version > 0)
f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
f->chroma_planes = get_rac(c, state);
f->chroma_h_shift = get_symbol(c, state, 0);
f->chroma_v_shift = get_symbol(c, state, 0);
f->transparency = get_rac(c, state);
f->plane_count = 2 + f->transparency;
}
if (f->colorspace == 0) {
if (!f->transparency && !f->chroma_planes) {
if (f->avctx->bits_per_raw_sample <= 8)
f->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
else
f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
} else if (f->avctx->bits_per_raw_sample<=8 && !f->transparency) {
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P; break;
case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample == 9) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample == 10) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else {
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
}
} else if (f->colorspace == 1) {
if (f->chroma_h_shift || f->chroma_v_shift) {
av_log(f->avctx, AV_LOG_ERROR,
"chroma subsampling not supported in this colorspace\n");
return AVERROR(ENOSYS);
}
if ( f->avctx->bits_per_raw_sample == 9)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP9;
else if (f->avctx->bits_per_raw_sample == 10)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP10;
else if (f->avctx->bits_per_raw_sample == 12)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP12;
else if (f->avctx->bits_per_raw_sample == 14)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP14;
else
if (f->transparency) f->avctx->pix_fmt = AV_PIX_FMT_RGB32;
else f->avctx->pix_fmt = AV_PIX_FMT_0RGB32;
} else {
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
return AVERROR(ENOSYS);
}
av_dlog(f->avctx, "%d %d %d\n",
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
if (f->version < 2) {
context_count = read_quant_tables(c, f->quant_table);
if (context_count < 0) {
av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
return AVERROR_INVALIDDATA;
}
} else if (f->version < 3) {
f->slice_count = get_symbol(c, state, 0);
} else {
const uint8_t *p = c->bytestream_end;
for (f->slice_count = 0;
f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start;
f->slice_count++) {
int trailer = 3 + 5*!!f->ec;
int size = AV_RB24(p-trailer);
if (size + trailer > p - c->bytestream_start)
break;
p -= size + trailer;
}
}
if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0) {
av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
return AVERROR_INVALIDDATA;
}
for (j = 0; j < f->slice_count; j++) {
FFV1Context *fs = f->slice_context[j];
fs->ac = f->ac;
fs->packed_at_lsb = f->packed_at_lsb;
fs->slice_damaged = 0;
if (f->version == 2) {
fs->slice_x = get_symbol(c, state, 0) * f->width ;
fs->slice_y = get_symbol(c, state, 0) * f->height;
fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x;
fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y;
fs->slice_x /= f->num_h_slices;
fs->slice_y /= f->num_v_slices;
fs->slice_width = fs->slice_width / f->num_h_slices - fs->slice_x;
fs->slice_height = fs->slice_height / f->num_v_slices - fs->slice_y;
if ((unsigned)fs->slice_width > f->width ||
(unsigned)fs->slice_height > f->height)
return AVERROR_INVALIDDATA;
if ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
|| (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
return AVERROR_INVALIDDATA;
}
for (i = 0; i < f->plane_count; i++) {
PlaneContext *const p = &fs->plane[i];
if (f->version == 2) {
int idx = get_symbol(c, state, 0);
if (idx > (unsigned)f->quant_table_count) {
av_log(f->avctx, AV_LOG_ERROR,
"quant_table_index out of range\n");
return AVERROR_INVALIDDATA;
}
p->quant_table_index = idx;
memcpy(p->quant_table, f->quant_tables[idx],
sizeof(p->quant_table));
context_count = f->context_count[idx];
} else {
memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
}
if (f->version <= 2) {
av_assert0(context_count >= 0);
if (p->context_count < context_count) {
av_freep(&p->state);
av_freep(&p->vlc_state);
}
p->context_count = context_count;
}
}
}
return 0;
}
|
CWE-119
| 179,100 | 906 |
255289524855579606545986031138694050751
| null | null | null |
FFmpeg
|
3819db745da2ac7fb3faacb116788c32f4753f34
| 1 |
static void rpza_decode_stream(RpzaContext *s)
{
int width = s->avctx->width;
int stride = s->frame.linesize[0] / 2;
int row_inc = stride - 4;
int stream_ptr = 0;
int chunk_size;
unsigned char opcode;
int n_blocks;
unsigned short colorA = 0, colorB;
unsigned short color4[4];
unsigned char index, idx;
unsigned short ta, tb;
unsigned short *pixels = (unsigned short *)s->frame.data[0];
int row_ptr = 0;
int pixel_ptr = 0;
int block_ptr;
int pixel_x, pixel_y;
int total_blocks;
/* First byte is always 0xe1. Warn if it's different */
if (s->buf[stream_ptr] != 0xe1)
av_log(s->avctx, AV_LOG_ERROR, "First chunk byte is 0x%02x instead of 0xe1\n",
s->buf[stream_ptr]);
/* Get chunk size, ingnoring first byte */
chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF;
stream_ptr += 4;
/* If length mismatch use size from MOV file and try to decode anyway */
if (chunk_size != s->size)
av_log(s->avctx, AV_LOG_ERROR, "MOV chunk size != encoded chunk size; using MOV chunk size\n");
chunk_size = s->size;
/* Number of 4x4 blocks in frame. */
total_blocks = ((s->avctx->width + 3) / 4) * ((s->avctx->height + 3) / 4);
/* Process chunk data */
while (stream_ptr < chunk_size) {
opcode = s->buf[stream_ptr++]; /* Get opcode */
n_blocks = (opcode & 0x1f) + 1; /* Extract block counter from opcode */
/* If opcode MSbit is 0, we need more data to decide what to do */
if ((opcode & 0x80) == 0) {
colorA = (opcode << 8) | (s->buf[stream_ptr++]);
opcode = 0;
if ((s->buf[stream_ptr] & 0x80) != 0) {
/* Must behave as opcode 110xxxxx, using colorA computed
* above. Use fake opcode 0x20 to enter switch block at
* the right place */
opcode = 0x20;
n_blocks = 1;
}
}
switch (opcode & 0xe0) {
/* Skip blocks */
case 0x80:
while (n_blocks--) {
ADVANCE_BLOCK();
}
break;
/* Fill blocks with one color */
case 0xa0:
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
while (n_blocks--) {
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
for (pixel_x = 0; pixel_x < 4; pixel_x++){
pixels[block_ptr] = colorA;
block_ptr++;
}
block_ptr += row_inc;
}
ADVANCE_BLOCK();
}
break;
/* Fill blocks with 4 colors */
case 0xc0:
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
case 0x20:
colorB = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
/* sort out the colors */
color4[0] = colorB;
color4[1] = 0;
color4[2] = 0;
color4[3] = colorA;
/* red components */
ta = (colorA >> 10) & 0x1F;
tb = (colorB >> 10) & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5) << 10;
color4[2] |= ((21 * ta + 11 * tb) >> 5) << 10;
/* green components */
ta = (colorA >> 5) & 0x1F;
tb = (colorB >> 5) & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5) << 5;
color4[2] |= ((21 * ta + 11 * tb) >> 5) << 5;
/* blue components */
ta = colorA & 0x1F;
tb = colorB & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5);
color4[2] |= ((21 * ta + 11 * tb) >> 5);
if (s->size - stream_ptr < n_blocks * 4)
return;
while (n_blocks--) {
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
index = s->buf[stream_ptr++];
for (pixel_x = 0; pixel_x < 4; pixel_x++){
idx = (index >> (2 * (3 - pixel_x))) & 0x03;
pixels[block_ptr] = color4[idx];
block_ptr++;
}
block_ptr += row_inc;
}
ADVANCE_BLOCK();
}
break;
/* Fill block with 16 colors */
case 0x00:
if (s->size - stream_ptr < 16)
return;
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
for (pixel_x = 0; pixel_x < 4; pixel_x++){
/* We already have color of upper left pixel */
if ((pixel_y != 0) || (pixel_x !=0)) {
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
}
pixels[block_ptr] = colorA;
block_ptr++;
}
block_ptr += row_inc;
}
ADVANCE_BLOCK();
break;
/* Unknown opcode */
default:
av_log(s->avctx, AV_LOG_ERROR, "Unknown opcode %d in rpza chunk."
" Skip remaining %d bytes of chunk data.\n", opcode,
chunk_size - stream_ptr);
return;
} /* Opcode switch */
}
}
|
CWE-119
| 179,103 | 909 |
269902084834802765407826520872887191624
| null | null | null |
krb5
|
c2ccf4197f697c4ff143b8a786acdd875e70a89d
| 1 |
setup_server_realm(krb5_principal sprinc)
{
krb5_error_code kret;
kdc_realm_t *newrealm;
kret = 0;
if (kdc_numrealms > 1) {
if (!(newrealm = find_realm_data(sprinc->realm.data,
(krb5_ui_4) sprinc->realm.length)))
kret = ENOENT;
else
kdc_active_realm = newrealm;
}
else
kdc_active_realm = kdc_realmlist[0];
return(kret);
}
| 179,105 | 910 |
321135329376345057543090650245082995208
| null | null | null |
|
linux
|
7314e613d5ff9f0934f7a0f74ed7973b903315d1
| 1 |
static int uio_mmap_physical(struct vm_area_struct *vma)
{
struct uio_device *idev = vma->vm_private_data;
int mi = uio_find_mem_index(vma);
if (mi < 0)
return -EINVAL;
vma->vm_ops = &uio_physical_vm_ops;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma,
vma->vm_start,
idev->info->mem[mi].addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
|
CWE-119
| 179,106 | 911 |
124769329517591234580599514826095154999
| null | null | null |
linux
|
7314e613d5ff9f0934f7a0f74ed7973b903315d1
| 1 |
int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{
struct au1100fb_device *fbdev;
unsigned int len;
unsigned long start=0, off;
fbdev = to_au1100fb_device(fbi);
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
return -EINVAL;
}
start = fbdev->fb_phys & PAGE_MASK;
len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
off = vma->vm_pgoff << PAGE_SHIFT;
if ((vma->vm_end - vma->vm_start + off) > len) {
return -EINVAL;
}
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot)) {
return -EAGAIN;
}
return 0;
}
|
CWE-119
| 179,107 | 912 |
177273139877749557163219661985792046530
| null | null | null |
linux
|
7314e613d5ff9f0934f7a0f74ed7973b903315d1
| 1 |
static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned int len;
unsigned long start=0, off;
struct au1200fb_device *fbdev = info->par;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
return -EINVAL;
}
start = fbdev->fb_phys & PAGE_MASK;
len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
off = vma->vm_pgoff << PAGE_SHIFT;
if ((vma->vm_end - vma->vm_start + off) > len) {
return -EINVAL;
}
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
|
CWE-119
| 179,108 | 913 |
67752722467221074330046327748380108876
| null | null | null |
linux
|
cf970c002d270c36202bd5b9c2804d3097a52da0
| 1 |
int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *isk = inet_sk(sk);
int family = sk->sk_family;
struct sk_buff *skb;
int copied, err;
pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
err = -EOPNOTSUPP;
if (flags & MSG_OOB)
goto out;
if (flags & MSG_ERRQUEUE) {
if (family == AF_INET) {
return ip_recv_error(sk, msg, len);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
return pingv6_ops.ipv6_recv_error(sk, msg, len);
#endif
}
}
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
/* Don't bother checking the checksum */
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address and add cmsg data. */
if (family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
sin->sin_family = AF_INET;
sin->sin_port = 0 /* skb->h.uh->source */;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
if (isk->cmsg_flags)
ip_cmsg_recv(msg, skb);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6hdr *ip6 = ipv6_hdr(skb);
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)msg->msg_name;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = 0;
sin6->sin6_addr = ip6->saddr;
sin6->sin6_flowinfo = 0;
if (np->sndflow)
sin6->sin6_flowinfo = ip6_flowinfo(ip6);
sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
IP6CB(skb)->iif);
*addr_len = sizeof(*sin6);
if (inet6_sk(sk)->rxopt.all)
pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
#endif
} else {
BUG();
}
err = copied;
done:
skb_free_datagram(sk, skb);
out:
pr_debug("ping_recvmsg -> %d\n", err);
return err;
}
| 179,109 | 914 |
155904653362942953286680221498255008251
| null | null | null |
|
linux
|
f856567b930dfcdbc3323261bf77240ccdde01f5
| 1 |
static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
}
|
CWE-264
| 179,111 | 915 |
240252914944451615354632176694423264734
| null | null | null |
linux
|
6fb392b1a63ae36c31f62bc3fc8630b49d602b62
| 1 |
int qeth_snmp_command(struct qeth_card *card, char __user *udata)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
struct qeth_snmp_ureq *ureq;
int req_len;
struct qeth_arp_query_info qinfo = {0, };
int rc = 0;
QETH_CARD_TEXT(card, 3, "snmpcmd");
if (card->info.guestlan)
return -EOPNOTSUPP;
if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
(!card->options.layer2)) {
return -EOPNOTSUPP;
}
/* skip 4 bytes (data_len struct member) to get req_len */
if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
return -EFAULT;
ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
if (IS_ERR(ureq)) {
QETH_CARD_TEXT(card, 2, "snmpnome");
return PTR_ERR(ureq);
}
qinfo.udata_len = ureq->hdr.data_len;
qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
if (!qinfo.udata) {
kfree(ureq);
return -ENOMEM;
}
qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
QETH_SNMP_SETADP_CMDLENGTH + req_len);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
qeth_snmp_command_cb, (void *)&qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
QETH_CARD_IFNAME(card), rc);
else {
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
}
kfree(ureq);
kfree(qinfo.udata);
return rc;
}
|
CWE-119
| 179,112 | 916 |
104798081715700399925516111463035480914
| null | null | null |
linux
|
a497e47d4aec37aaf8f13509f3ef3d1f6a717d88
| 1 |
static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
size_t cnt, loff_t *ppos)
{
int r, i;
char *pdata;
char *p;
char *p0;
char *p1;
char *p2;
struct debug_data *d = f->private_data;
pdata = kmalloc(cnt, GFP_KERNEL);
if (pdata == NULL)
return 0;
if (copy_from_user(pdata, buf, cnt)) {
lbs_deb_debugfs("Copy from user failed\n");
kfree(pdata);
return 0;
}
p0 = pdata;
for (i = 0; i < num_of_items; i++) {
do {
p = strstr(p0, d[i].name);
if (p == NULL)
break;
p1 = strchr(p, '\n');
if (p1 == NULL)
break;
p0 = p1++;
p2 = strchr(p, '=');
if (!p2)
break;
p2++;
r = simple_strtoul(p2, NULL, 0);
if (d[i].size == 1)
*((u8 *) d[i].addr) = (u8) r;
else if (d[i].size == 2)
*((u16 *) d[i].addr) = (u16) r;
else if (d[i].size == 4)
*((u32 *) d[i].addr) = (u32) r;
else if (d[i].size == 8)
*((u64 *) d[i].addr) = (u64) r;
break;
} while (1);
}
kfree(pdata);
return (ssize_t)cnt;
}
|
CWE-189
| 179,114 | 917 |
75946839779695435397940604468788358223
| null | null | null |
linux
|
17d68b763f09a9ce824ae23eb62c9efc57b69271
| 1 |
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
struct kvm_vcpu *vcpu;
int i;
new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL);
mutex_lock(&kvm->arch.apic_map_lock);
if (!new)
goto out;
new->ldr_bits = 8;
/* flat mode is default */
new->cid_shift = 8;
new->cid_mask = 0;
new->lid_mask = 0xff;
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
u16 cid, lid;
u32 ldr;
if (!kvm_apic_present(vcpu))
continue;
/*
* All APICs have to be configured in the same mode by an OS.
* We take advatage of this while building logical id loockup
* table. After reset APICs are in xapic/flat mode, so if we
* find apic with different setting we assume this is the mode
* OS wants all apics to be in; build lookup table accordingly.
*/
if (apic_x2apic_mode(apic)) {
new->ldr_bits = 32;
new->cid_shift = 16;
new->cid_mask = new->lid_mask = 0xffff;
} else if (kvm_apic_sw_enabled(apic) &&
!new->cid_mask /* flat mode */ &&
kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
new->cid_shift = 4;
new->cid_mask = 0xf;
new->lid_mask = 0xf;
}
new->phys_map[kvm_apic_id(apic)] = apic;
ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr);
lid = apic_logical_id(new, ldr);
if (lid)
new->logical_map[cid][ffs(lid) - 1] = apic;
}
out:
old = rcu_dereference_protected(kvm->arch.apic_map,
lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new);
mutex_unlock(&kvm->arch.apic_map_lock);
if (old)
kfree_rcu(old, rcu);
kvm_vcpu_request_scan_ioapic(kvm);
}
|
CWE-189
| 179,115 | 918 |
90650423270339167732341881575553005428
| null | null | null |
linux
|
b963a22e6d1a266a67e9eecc88134713fd54775c
| 1 |
static u32 apic_get_tmcct(struct kvm_lapic *apic)
{
ktime_t remaining;
s64 ns;
u32 tmcct;
ASSERT(apic != NULL);
/* if initial count is 0, current count should also be 0 */
if (kvm_apic_get_reg(apic, APIC_TMICT) == 0)
return 0;
remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
if (ktime_to_ns(remaining) < 0)
remaining = ktime_set(0, 0);
ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
tmcct = div64_u64(ns,
(APIC_BUS_CYCLE_NS * apic->divide_count));
return tmcct;
}
|
CWE-189
| 179,123 | 926 |
154149295278805450375914164750424887763
| null | null | null |
polarssl
|
1922a4e6aade7b1d685af19d4d9339ddb5c02859
| 1 |
int ssl_parse_certificate( ssl_context *ssl )
{
int ret;
size_t i, n;
SSL_DEBUG_MSG( 2, ( "=> parse certificate" ) );
if( ssl->endpoint == SSL_IS_SERVER &&
ssl->authmode == SSL_VERIFY_NONE )
{
ssl->verify_result = BADCERT_SKIP_VERIFY;
SSL_DEBUG_MSG( 2, ( "<= skip parse certificate" ) );
ssl->state++;
return( 0 );
}
if( ( ret = ssl_read_record( ssl ) ) != 0 )
{
SSL_DEBUG_RET( 1, "ssl_read_record", ret );
return( ret );
}
ssl->state++;
/*
* Check if the client sent an empty certificate
*/
if( ssl->endpoint == SSL_IS_SERVER &&
ssl->minor_ver == SSL_MINOR_VERSION_0 )
{
if( ssl->in_msglen == 2 &&
ssl->in_msgtype == SSL_MSG_ALERT &&
ssl->in_msg[0] == SSL_ALERT_LEVEL_WARNING &&
ssl->in_msg[1] == SSL_ALERT_MSG_NO_CERT )
{
SSL_DEBUG_MSG( 1, ( "SSLv3 client has no certificate" ) );
ssl->verify_result = BADCERT_MISSING;
if( ssl->authmode == SSL_VERIFY_OPTIONAL )
return( 0 );
else
return( POLARSSL_ERR_SSL_NO_CLIENT_CERTIFICATE );
}
}
if( ssl->endpoint == SSL_IS_SERVER &&
ssl->minor_ver != SSL_MINOR_VERSION_0 )
{
if( ssl->in_hslen == 7 &&
ssl->in_msgtype == SSL_MSG_HANDSHAKE &&
ssl->in_msg[0] == SSL_HS_CERTIFICATE &&
memcmp( ssl->in_msg + 4, "\0\0\0", 3 ) == 0 )
{
SSL_DEBUG_MSG( 1, ( "TLSv1 client has no certificate" ) );
ssl->verify_result = BADCERT_MISSING;
if( ssl->authmode == SSL_VERIFY_REQUIRED )
return( POLARSSL_ERR_SSL_NO_CLIENT_CERTIFICATE );
else
return( 0 );
}
}
if( ssl->in_msgtype != SSL_MSG_HANDSHAKE )
{
SSL_DEBUG_MSG( 1, ( "bad certificate message" ) );
return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE );
}
if( ssl->in_msg[0] != SSL_HS_CERTIFICATE || ssl->in_hslen < 10 )
{
SSL_DEBUG_MSG( 1, ( "bad certificate message" ) );
return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE );
}
/*
* Same message structure as in ssl_write_certificate()
*/
n = ( ssl->in_msg[5] << 8 ) | ssl->in_msg[6];
if( ssl->in_msg[4] != 0 || ssl->in_hslen != 7 + n )
{
SSL_DEBUG_MSG( 1, ( "bad certificate message" ) );
return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE );
}
if( ( ssl->session_negotiate->peer_cert = (x509_cert *) malloc(
sizeof( x509_cert ) ) ) == NULL )
{
SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed",
sizeof( x509_cert ) ) );
return( POLARSSL_ERR_SSL_MALLOC_FAILED );
}
memset( ssl->session_negotiate->peer_cert, 0, sizeof( x509_cert ) );
i = 7;
while( i < ssl->in_hslen )
{
if( ssl->in_msg[i] != 0 )
{
SSL_DEBUG_MSG( 1, ( "bad certificate message" ) );
return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE );
}
n = ( (unsigned int) ssl->in_msg[i + 1] << 8 )
| (unsigned int) ssl->in_msg[i + 2];
i += 3;
if( n < 128 || i + n > ssl->in_hslen )
{
SSL_DEBUG_MSG( 1, ( "bad certificate message" ) );
return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE );
}
ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i,
n );
if( ret != 0 )
{
SSL_DEBUG_RET( 1, " x509parse_crt", ret );
return( ret );
}
i += n;
}
SSL_DEBUG_CRT( 3, "peer certificate", ssl->session_negotiate->peer_cert );
if( ssl->authmode != SSL_VERIFY_NONE )
{
if( ssl->ca_chain == NULL )
{
SSL_DEBUG_MSG( 1, ( "got no CA chain" ) );
return( POLARSSL_ERR_SSL_CA_CHAIN_REQUIRED );
}
ret = x509parse_verify( ssl->session_negotiate->peer_cert,
ssl->ca_chain, ssl->ca_crl,
ssl->peer_cn, &ssl->verify_result,
ssl->f_vrfy, ssl->p_vrfy );
if( ret != 0 )
SSL_DEBUG_RET( 1, "x509_verify_cert", ret );
if( ssl->authmode != SSL_VERIFY_REQUIRED )
ret = 0;
}
SSL_DEBUG_MSG( 2, ( "<= parse certificate" ) );
return( ret );
}
|
CWE-20
| 179,126 | 929 |
79433638809905907774865686970222643696
| null | null | null |
linux
|
7d3e91a89b7adbc2831334def9e494dd9892f9af
| 1 |
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_getaclres res = {
.acl_len = buflen,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
.rpc_resp = &res,
};
unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
int ret = -ENOMEM, i;
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
if (npages == 0)
npages = 1;
if (npages > ARRAY_SIZE(pages))
return -ERANGE;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i])
goto out_free;
}
/* for decoding across pages */
res.acl_scratch = alloc_page(GFP_KERNEL);
if (!res.acl_scratch)
goto out_free;
args.acl_len = npages * PAGE_SIZE;
args.acl_pgbase = 0;
dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
__func__, buf, buflen, npages, args.acl_len);
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
&msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
/* Handle the case where the passed-in buffer is too short */
if (res.acl_flags & NFS4_ACL_TRUNC) {
/* Did the user only issue a request for the acl length? */
if (buf == NULL)
goto out_ok;
ret = -ERANGE;
goto out_free;
}
nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
if (buf)
_copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
out_ok:
ret = res.acl_len;
out_free:
for (i = 0; i < npages; i++)
if (pages[i])
__free_page(pages[i]);
if (res.acl_scratch)
__free_page(res.acl_scratch);
return ret;
}
|
CWE-119
| 179,128 | 930 |
318227710018918458338702188376392743449
| null | null | null |
linux
|
04bcef2a83f40c6db24222b27a52892cba39dffb
| 1 |
do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
int ret;
unsigned char arg[MAX_ARG_LEN];
struct ip_vs_service_user *usvc_compat;
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
struct ip_vs_dest_user *udest_compat;
struct ip_vs_dest_user_kern udest;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (len != set_arglen[SET_CMDID(cmd)]) {
pr_err("set_ctl: len %u != %u\n",
len, set_arglen[SET_CMDID(cmd)]);
return -EINVAL;
}
if (copy_from_user(arg, user, len) != 0)
return -EFAULT;
/* increase the module use count */
ip_vs_use_count_inc();
if (mutex_lock_interruptible(&__ip_vs_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
}
if (cmd == IP_VS_SO_SET_FLUSH) {
/* Flush the virtual service */
ret = ip_vs_flush();
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
/* Set timeout values for (tcp tcpfin udp) */
ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
ret = start_sync_thread(dm->state, dm->mcast_ifn, dm->syncid);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
ret = stop_sync_thread(dm->state);
goto out_unlock;
}
usvc_compat = (struct ip_vs_service_user *)arg;
udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1);
/* We only use the new structs internally, so copy userspace compat
* structs to extended internal versions */
ip_vs_copy_usvc_compat(&usvc, usvc_compat);
ip_vs_copy_udest_compat(&udest, udest_compat);
if (cmd == IP_VS_SO_SET_ZERO) {
/* if no service address is set, zero counters in all */
if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
ret = ip_vs_zero_all();
goto out_unlock;
}
}
/* Check for valid protocol: TCP or UDP, even for fwmark!=0 */
if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) {
pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
usvc.protocol, &usvc.addr.ip,
ntohs(usvc.port), usvc.sched_name);
ret = -EFAULT;
goto out_unlock;
}
/* Lookup the exact service by <protocol, addr, port> or fwmark */
if (usvc.fwmark == 0)
svc = __ip_vs_service_get(usvc.af, usvc.protocol,
&usvc.addr, usvc.port);
else
svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark);
if (cmd != IP_VS_SO_SET_ADD
&& (svc == NULL || svc->protocol != usvc.protocol)) {
ret = -ESRCH;
goto out_unlock;
}
switch (cmd) {
case IP_VS_SO_SET_ADD:
if (svc != NULL)
ret = -EEXIST;
else
ret = ip_vs_add_service(&usvc, &svc);
break;
case IP_VS_SO_SET_EDIT:
ret = ip_vs_edit_service(svc, &usvc);
break;
case IP_VS_SO_SET_DEL:
ret = ip_vs_del_service(svc);
if (!ret)
goto out_unlock;
break;
case IP_VS_SO_SET_ZERO:
ret = ip_vs_zero_service(svc);
break;
case IP_VS_SO_SET_ADDDEST:
ret = ip_vs_add_dest(svc, &udest);
break;
case IP_VS_SO_SET_EDITDEST:
ret = ip_vs_edit_dest(svc, &udest);
break;
case IP_VS_SO_SET_DELDEST:
ret = ip_vs_del_dest(svc, &udest);
break;
default:
ret = -EINVAL;
}
if (svc)
ip_vs_service_put(svc);
out_unlock:
mutex_unlock(&__ip_vs_mutex);
out_dec:
/* decrease the module use count */
ip_vs_use_count_dec();
return ret;
}
|
CWE-119
| 179,130 | 931 |
83673314409807091768576788287268941887
| null | null | null |
linux
|
338c7dbadd2671189cec7faf64c84d01071b3f96
| 1 |
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{
int r;
struct kvm_vcpu *vcpu, *v;
vcpu = kvm_arch_vcpu_create(kvm, id);
if (IS_ERR(vcpu))
return PTR_ERR(vcpu);
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
r = kvm_arch_vcpu_setup(vcpu);
if (r)
goto vcpu_destroy;
mutex_lock(&kvm->lock);
if (!kvm_vcpu_compatible(vcpu)) {
r = -EINVAL;
goto unlock_vcpu_destroy;
}
if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
r = -EINVAL;
goto unlock_vcpu_destroy;
}
kvm_for_each_vcpu(r, v, kvm)
if (v->vcpu_id == id) {
r = -EEXIST;
goto unlock_vcpu_destroy;
}
BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
/* Now it's all set up, let userspace reach it */
kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
if (r < 0) {
kvm_put_kvm(kvm);
goto unlock_vcpu_destroy;
}
kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
smp_wmb();
atomic_inc(&kvm->online_vcpus);
mutex_unlock(&kvm->lock);
kvm_arch_vcpu_postcreate(vcpu);
return r;
unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
vcpu_destroy:
kvm_arch_vcpu_destroy(vcpu);
return r;
}
|
CWE-20
| 179,131 | 932 |
78071846559468948243618412598588559342
| null | null | null |
linux
|
0e033e04c2678dbbe74a46b23fffb7bb918c288e
| 1 |
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
unsigned int unfrag_ip6hlen, unfrag_len;
struct frag_hdr *fptr;
u8 *packet_start, *prevhdr;
u8 nexthdr;
u8 frag_hdr_sz = sizeof(struct frag_hdr);
int offset;
__wsum csum;
int tnl_hlen;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int type = skb_shinfo(skb)->gso_type;
if (unlikely(type & ~(SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_GRE |
SKB_GSO_IPIP |
SKB_GSO_SIT |
SKB_GSO_MPLS) ||
!(type & (SKB_GSO_UDP))))
goto out;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
segs = NULL;
goto out;
}
if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
segs = skb_udp_tunnel_segment(skb, features);
else {
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments.
*/
offset = skb_checksum_start_offset(skb);
csum = skb_checksum(skb, offset, skb->len - offset, 0);
offset += skb->csum_offset;
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
/* Check if there is enough headroom to insert fragment header. */
tnl_hlen = skb_tnl_header_len(skb);
if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
goto out;
}
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
unfrag_ip6hlen + tnl_hlen;
packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
skb->mac_header -= frag_hdr_sz;
skb->network_header -= frag_hdr_sz;
fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
/* Fragment the skb. ipv6 header and the remaining fields of the
* fragment header are updated in ipv6_gso_segment()
*/
segs = skb_segment(skb, features);
}
out:
return segs;
}
|
CWE-189
| 179,132 | 933 |
168959719558504354311532021332122220778
| null | null | null |
linux
|
a8b33654b1e3b0c74d4a1fed041c9aae50b3c427
| 1 |
static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
{
struct serial_icounter_struct icount;
struct sb_uart_icount cnow;
struct sb_uart_port *port = state->port;
spin_lock_irq(&port->lock);
memcpy(&cnow, &port->icount, sizeof(struct sb_uart_icount));
spin_unlock_irq(&port->lock);
icount.cts = cnow.cts;
icount.dsr = cnow.dsr;
icount.rng = cnow.rng;
icount.dcd = cnow.dcd;
icount.rx = cnow.rx;
icount.tx = cnow.tx;
icount.frame = cnow.frame;
icount.overrun = cnow.overrun;
icount.parity = cnow.parity;
icount.brk = cnow.brk;
icount.buf_overrun = cnow.buf_overrun;
return copy_to_user(icnt, &icount, sizeof(icount)) ? -EFAULT : 0;
}
|
CWE-200
| 179,133 | 934 |
46385543661315144532345373974739295516
| null | null | null |
linux
|
8d1e72250c847fa96498ec029891de4dc638a5ba
| 1 |
static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
struct bcm_tarang_data *pTarang = filp->private_data;
void __user *argp = (void __user *)arg;
struct bcm_mini_adapter *Adapter = pTarang->Adapter;
INT Status = STATUS_FAILURE;
int timeout = 0;
struct bcm_ioctl_buffer IoBuffer;
int bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg);
if (_IOC_TYPE(cmd) != BCM_IOCTL)
return -EFAULT;
if (_IOC_DIR(cmd) & _IOC_READ)
Status = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
Status = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
else if (_IOC_NONE == (_IOC_DIR(cmd) & _IOC_NONE))
Status = STATUS_SUCCESS;
if (Status)
return -EFAULT;
if (Adapter->device_removed)
return -EFAULT;
if (FALSE == Adapter->fw_download_done) {
switch (cmd) {
case IOCTL_MAC_ADDR_REQ:
case IOCTL_LINK_REQ:
case IOCTL_CM_REQUEST:
case IOCTL_SS_INFO_REQ:
case IOCTL_SEND_CONTROL_MESSAGE:
case IOCTL_IDLE_REQ:
case IOCTL_BCM_GPIO_SET_REQUEST:
case IOCTL_BCM_GPIO_STATUS_REQUEST:
return -EACCES;
default:
break;
}
}
Status = vendorextnIoctl(Adapter, cmd, arg);
if (Status != CONTINUE_COMMON_PATH)
return Status;
switch (cmd) {
/* Rdms for Swin Idle... */
case IOCTL_BCM_REGISTER_READ_PRIVATE: {
struct bcm_rdm_buffer sRdmBuffer = {0};
PCHAR temp_buff;
UINT Bufflen;
u16 temp_value;
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.InputLength > sizeof(sRdmBuffer))
return -EINVAL;
if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
if (IoBuffer.OutputLength > USHRT_MAX ||
IoBuffer.OutputLength == 0) {
return -EINVAL;
}
Bufflen = IoBuffer.OutputLength;
temp_value = 4 - (Bufflen % 4);
Bufflen += temp_value % 4;
temp_buff = kmalloc(Bufflen, GFP_KERNEL);
if (!temp_buff)
return -ENOMEM;
bytes = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
(PUINT)temp_buff, Bufflen);
if (bytes > 0) {
Status = STATUS_SUCCESS;
if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
kfree(temp_buff);
return -EFAULT;
}
} else {
Status = bytes;
}
kfree(temp_buff);
break;
}
case IOCTL_BCM_REGISTER_WRITE_PRIVATE: {
struct bcm_wrm_buffer sWrmBuffer = {0};
UINT uiTempVar = 0;
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.InputLength > sizeof(sWrmBuffer))
return -EINVAL;
/* Get WrmBuffer structure */
if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
((uiTempVar == EEPROM_REJECT_REG_1) ||
(uiTempVar == EEPROM_REJECT_REG_2) ||
(uiTempVar == EEPROM_REJECT_REG_3) ||
(uiTempVar == EEPROM_REJECT_REG_4))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
return -EFAULT;
}
Status = wrmalt(Adapter, (UINT)sWrmBuffer.Register,
(PUINT)sWrmBuffer.Data, sizeof(ULONG));
if (Status == STATUS_SUCCESS) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n");
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n");
Status = -EFAULT;
}
break;
}
case IOCTL_BCM_REGISTER_READ:
case IOCTL_BCM_EEPROM_REGISTER_READ: {
struct bcm_rdm_buffer sRdmBuffer = {0};
PCHAR temp_buff = NULL;
UINT uiTempVar = 0;
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Rdms\n");
return -EACCES;
}
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.InputLength > sizeof(sRdmBuffer))
return -EINVAL;
if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
if (IoBuffer.OutputLength > USHRT_MAX ||
IoBuffer.OutputLength == 0) {
return -EINVAL;
}
temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
if (!temp_buff)
return STATUS_FAILURE;
if ((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
((ULONG)sRdmBuffer.Register & 0x3)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Done On invalid Address : %x Access Denied.\n",
(int)sRdmBuffer.Register);
kfree(temp_buff);
return -EINVAL;
}
uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength);
if (bytes > 0) {
Status = STATUS_SUCCESS;
if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
kfree(temp_buff);
return -EFAULT;
}
} else {
Status = bytes;
}
kfree(temp_buff);
break;
}
case IOCTL_BCM_REGISTER_WRITE:
case IOCTL_BCM_EEPROM_REGISTER_WRITE: {
struct bcm_wrm_buffer sWrmBuffer = {0};
UINT uiTempVar = 0;
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Wrms\n");
return -EACCES;
}
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.InputLength > sizeof(sWrmBuffer))
return -EINVAL;
/* Get WrmBuffer structure */
if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
if ((((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
((ULONG)sWrmBuffer.Register & 0x3)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n", (int)sWrmBuffer.Register);
return -EINVAL;
}
uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
((uiTempVar == EEPROM_REJECT_REG_1) ||
(uiTempVar == EEPROM_REJECT_REG_2) ||
(uiTempVar == EEPROM_REJECT_REG_3) ||
(uiTempVar == EEPROM_REJECT_REG_4)) &&
(cmd == IOCTL_BCM_REGISTER_WRITE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
return -EFAULT;
}
Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register,
(PUINT)sWrmBuffer.Data, sWrmBuffer.Length);
if (Status == STATUS_SUCCESS) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n");
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n");
Status = -EFAULT;
}
break;
}
case IOCTL_BCM_GPIO_SET_REQUEST: {
UCHAR ucResetValue[4];
UINT value = 0;
UINT uiBit = 0;
UINT uiOperation = 0;
struct bcm_gpio_info gpio_info = {0};
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode");
return -EACCES;
}
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.InputLength > sizeof(gpio_info))
return -EINVAL;
if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
uiBit = gpio_info.uiGpioNumber;
uiOperation = gpio_info.uiGpioValue;
value = (1<<uiBit);
if (IsReqGpioIsLedInNVM(Adapter, value) == FALSE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!", value);
Status = -EINVAL;
break;
}
/* Set - setting 1 */
if (uiOperation) {
/* Set the gpio output register */
Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, (PUINT)(&value), sizeof(UINT));
if (Status == STATUS_SUCCESS) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n");
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to set the %dth GPIO\n", uiBit);
break;
}
} else {
/* Set the gpio output register */
Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)(&value), sizeof(UINT));
if (Status == STATUS_SUCCESS) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n");
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to clear the %dth GPIO\n", uiBit);
break;
}
}
bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
if (bytes < 0) {
Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"GPIO_MODE_REGISTER read failed");
break;
} else {
Status = STATUS_SUCCESS;
}
/* Set the gpio mode register to output */
*(UINT *)ucResetValue |= (1<<uiBit);
Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER,
(PUINT)ucResetValue, sizeof(UINT));
if (Status == STATUS_SUCCESS) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO to output Mode\n");
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to put GPIO in Output Mode\n");
break;
}
}
break;
case BCM_LED_THREAD_STATE_CHANGE_REQ: {
struct bcm_user_thread_req threadReq = {0};
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "User made LED thread InActive");
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode");
Status = -EACCES;
break;
}
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.InputLength > sizeof(threadReq))
return -EINVAL;
if (copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
/* if LED thread is running(Actively or Inactively) set it state to make inactive */
if (Adapter->LEDInfo.led_thread_running) {
if (threadReq.ThreadState == LED_THREAD_ACTIVATION_REQ) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Activating thread req");
Adapter->DriverState = LED_THREAD_ACTIVE;
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DeActivating Thread req.....");
Adapter->DriverState = LED_THREAD_INACTIVE;
}
/* signal thread. */
wake_up(&Adapter->LEDInfo.notify_led_event);
}
}
break;
case IOCTL_BCM_GPIO_STATUS_REQUEST: {
ULONG uiBit = 0;
UCHAR ucRead[4];
struct bcm_gpio_info gpio_info = {0};
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE))
return -EACCES;
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.InputLength > sizeof(gpio_info))
return -EINVAL;
if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
uiBit = gpio_info.uiGpioNumber;
/* Set the gpio output register */
bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
(PUINT)ucRead, sizeof(UINT));
if (bytes < 0) {
Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Failed\n");
return Status;
} else {
Status = STATUS_SUCCESS;
}
}
break;
case IOCTL_BCM_GPIO_MULTI_REQUEST: {
UCHAR ucResetValue[4];
struct bcm_gpio_multi_info gpio_multi_info[MAX_IDX];
struct bcm_gpio_multi_info *pgpio_multi_info = (struct bcm_gpio_multi_info *)gpio_multi_info;
memset(pgpio_multi_info, 0, MAX_IDX * sizeof(struct bcm_gpio_multi_info));
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE))
return -EINVAL;
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.InputLength > sizeof(gpio_multi_info))
return -EINVAL;
if (copy_from_user(&gpio_multi_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == FALSE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
pgpio_multi_info[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap);
Status = -EINVAL;
break;
}
/* Set the gpio output register */
if ((pgpio_multi_info[WIMAX_IDX].uiGPIOMask) &
(pgpio_multi_info[WIMAX_IDX].uiGPIOCommand)) {
/* Set 1's in GPIO OUTPUT REGISTER */
*(UINT *)ucResetValue = pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
pgpio_multi_info[WIMAX_IDX].uiGPIOValue;
if (*(UINT *) ucResetValue)
Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG,
(PUINT)ucResetValue, sizeof(ULONG));
if (Status != STATUS_SUCCESS) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
return Status;
}
/* Clear to 0's in GPIO OUTPUT REGISTER */
*(UINT *)ucResetValue = (pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
(~(pgpio_multi_info[WIMAX_IDX].uiGPIOValue)));
if (*(UINT *) ucResetValue)
Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)ucResetValue, sizeof(ULONG));
if (Status != STATUS_SUCCESS) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
return Status;
}
}
if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) {
bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
if (bytes < 0) {
Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM to GPIO_PIN_STATE_REGISTER Failed.");
return Status;
} else {
Status = STATUS_SUCCESS;
}
pgpio_multi_info[WIMAX_IDX].uiGPIOValue = (*(UINT *)ucResetValue &
pgpio_multi_info[WIMAX_IDX].uiGPIOMask);
}
Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_info, IoBuffer.OutputLength);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Failed while copying Content to IOBufer for user space err:%d", Status);
return -EFAULT;
}
}
break;
case IOCTL_BCM_GPIO_MODE_REQUEST: {
UCHAR ucResetValue[4];
struct bcm_gpio_multi_mode gpio_multi_mode[MAX_IDX];
struct bcm_gpio_multi_mode *pgpio_multi_mode = (struct bcm_gpio_multi_mode *)gpio_multi_mode;
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE))
return -EINVAL;
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.InputLength > sizeof(gpio_multi_mode))
return -EINVAL;
if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
if (bytes < 0) {
Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read of GPIO_MODE_REGISTER failed");
return Status;
} else {
Status = STATUS_SUCCESS;
}
/* Validating the request */
if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) == FALSE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
pgpio_multi_mode[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap);
Status = -EINVAL;
break;
}
if (pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) {
/* write all OUT's (1's) */
*(UINT *) ucResetValue |= (pgpio_multi_mode[WIMAX_IDX].uiGPIOMode &
pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
/* write all IN's (0's) */
*(UINT *) ucResetValue &= ~((~pgpio_multi_mode[WIMAX_IDX].uiGPIOMode) &
pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
/* Currently implemented return the modes of all GPIO's
* else needs to bit AND with mask
*/
pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(ULONG));
if (Status == STATUS_SUCCESS) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"WRM to GPIO_MODE_REGISTER Done");
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"WRM to GPIO_MODE_REGISTER Failed");
Status = -EFAULT;
break;
}
} else {
/* if uiGPIOMask is 0 then return mode register configuration */
pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
}
Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_mode, IoBuffer.OutputLength);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Failed while copying Content to IOBufer for user space err:%d", Status);
return -EFAULT;
}
}
break;
case IOCTL_MAC_ADDR_REQ:
case IOCTL_LINK_REQ:
case IOCTL_CM_REQUEST:
case IOCTL_SS_INFO_REQ:
case IOCTL_SEND_CONTROL_MESSAGE:
case IOCTL_IDLE_REQ: {
PVOID pvBuffer = NULL;
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.InputLength < sizeof(struct bcm_link_request))
return -EINVAL;
if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE)
return -EINVAL;
pvBuffer = memdup_user(IoBuffer.InputBuffer,
IoBuffer.InputLength);
if (IS_ERR(pvBuffer))
return PTR_ERR(pvBuffer);
down(&Adapter->LowPowerModeSync);
Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
!Adapter->bPreparingForLowPowerMode,
(1 * HZ));
if (Status == -ERESTARTSYS)
goto cntrlEnd;
if (Adapter->bPreparingForLowPowerMode) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"Preparing Idle Mode is still True - Hence Rejecting control message\n");
Status = STATUS_FAILURE;
goto cntrlEnd;
}
Status = CopyBufferToControlPacket(Adapter, (PVOID)pvBuffer);
cntrlEnd:
up(&Adapter->LowPowerModeSync);
kfree(pvBuffer);
break;
}
case IOCTL_BCM_BUFFER_DOWNLOAD_START: {
if (down_trylock(&Adapter->NVMRdmWrmLock)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
return -EACCES;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Starting the firmware download PID =0x%x!!!!\n", current->pid);
if (down_trylock(&Adapter->fw_download_sema))
return -EBUSY;
Adapter->bBinDownloaded = FALSE;
Adapter->fw_download_process_pid = current->pid;
Adapter->bCfgDownloaded = FALSE;
Adapter->fw_download_done = FALSE;
netif_carrier_off(Adapter->dev);
netif_stop_queue(Adapter->dev);
Status = reset_card_proc(Adapter);
if (Status) {
pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
up(&Adapter->fw_download_sema);
up(&Adapter->NVMRdmWrmLock);
return Status;
}
mdelay(10);
up(&Adapter->NVMRdmWrmLock);
return Status;
}
case IOCTL_BCM_BUFFER_DOWNLOAD: {
struct bcm_firmware_info *psFwInfo = NULL;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid);
if (!down_trylock(&Adapter->fw_download_sema)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Invalid way to download buffer. Use Start and then call this!!!\n");
up(&Adapter->fw_download_sema);
Status = -EINVAL;
return Status;
}
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
up(&Adapter->fw_download_sema);
return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Length for FW DLD is : %lx\n", IoBuffer.InputLength);
if (IoBuffer.InputLength > sizeof(struct bcm_firmware_info)) {
up(&Adapter->fw_download_sema);
return -EINVAL;
}
psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
if (!psFwInfo) {
up(&Adapter->fw_download_sema);
return -ENOMEM;
}
if (copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
up(&Adapter->fw_download_sema);
kfree(psFwInfo);
return -EFAULT;
}
if (!psFwInfo->pvMappedFirmwareAddress ||
(psFwInfo->u32FirmwareLength == 0)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n",
psFwInfo->u32FirmwareLength);
up(&Adapter->fw_download_sema);
kfree(psFwInfo);
Status = -EINVAL;
return Status;
}
Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
if (Status != STATUS_SUCCESS) {
if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n");
else
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n");
/* up(&Adapter->fw_download_sema); */
if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->DriverState = DRIVER_INIT;
Adapter->LEDInfo.bLedInitDone = FALSE;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
}
if (Status != STATUS_SUCCESS)
up(&Adapter->fw_download_sema);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "IOCTL: Firmware File Uploaded\n");
kfree(psFwInfo);
return Status;
}
case IOCTL_BCM_BUFFER_DOWNLOAD_STOP: {
if (!down_trylock(&Adapter->fw_download_sema)) {
up(&Adapter->fw_download_sema);
return -EINVAL;
}
if (down_trylock(&Adapter->NVMRdmWrmLock)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"FW download blocked as EEPROM Read/Write is in progress\n");
up(&Adapter->fw_download_sema);
return -EACCES;
}
Adapter->bBinDownloaded = TRUE;
Adapter->bCfgDownloaded = TRUE;
atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
Adapter->CurrNumRecvDescs = 0;
Adapter->downloadDDR = 0;
/* setting the Mips to Run */
Status = run_card_proc(Adapter);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n");
up(&Adapter->fw_download_sema);
up(&Adapter->NVMRdmWrmLock);
return Status;
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
DBG_LVL_ALL, "Firm Download Over...\n");
}
mdelay(10);
/* Wait for MailBox Interrupt */
if (StartInterruptUrb((struct bcm_interface_adapter *)Adapter->pvInterfaceAdapter))
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
timeout = 5*HZ;
Adapter->waiting_to_fw_download_done = FALSE;
wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
Adapter->waiting_to_fw_download_done, timeout);
Adapter->fw_download_process_pid = INVALID_PID;
Adapter->fw_download_done = TRUE;
atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
Adapter->CurrNumRecvDescs = 0;
Adapter->PrevNumRecvDescs = 0;
atomic_set(&Adapter->cntrlpktCnt, 0);
Adapter->LinkUpStatus = 0;
Adapter->LinkStatus = 0;
if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->DriverState = FW_DOWNLOAD_DONE;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
if (!timeout)
Status = -ENODEV;
up(&Adapter->fw_download_sema);
up(&Adapter->NVMRdmWrmLock);
return Status;
}
case IOCTL_BE_BUCKET_SIZE:
Status = 0;
if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
Status = -EFAULT;
break;
case IOCTL_RTPS_BUCKET_SIZE:
Status = 0;
if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
Status = -EFAULT;
break;
case IOCTL_CHIP_RESET: {
INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
if (NVMAccess) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
return -EACCES;
}
down(&Adapter->RxAppControlQueuelock);
Status = reset_card_proc(Adapter);
flushAllAppQ();
up(&Adapter->RxAppControlQueuelock);
up(&Adapter->NVMRdmWrmLock);
ResetCounters(Adapter);
break;
}
case IOCTL_QOS_THRESHOLD: {
USHORT uiLoopIndex;
Status = 0;
for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
(unsigned long __user *)arg)) {
Status = -EFAULT;
break;
}
}
break;
}
case IOCTL_DUMP_PACKET_INFO:
DumpPackInfo(Adapter);
DumpPhsRules(&Adapter->stBCMPhsContext);
Status = STATUS_SUCCESS;
break;
case IOCTL_GET_PACK_INFO:
if (copy_to_user(argp, &Adapter->PackInfo, sizeof(struct bcm_packet_info)*NO_OF_QUEUES))
return -EFAULT;
Status = STATUS_SUCCESS;
break;
case IOCTL_BCM_SWITCH_TRANSFER_MODE: {
UINT uiData = 0;
if (copy_from_user(&uiData, argp, sizeof(UINT)))
return -EFAULT;
if (uiData) {
/* Allow All Packets */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n");
Adapter->TransferMode = ETH_PACKET_TUNNELING_MODE;
} else {
/* Allow IP only Packets */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: IP_PACKET_ONLY_MODE\n");
Adapter->TransferMode = IP_PACKET_ONLY_MODE;
}
Status = STATUS_SUCCESS;
break;
}
case IOCTL_BCM_GET_DRIVER_VERSION: {
ulong len;
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
len = min_t(ulong, IoBuffer.OutputLength, strlen(DRV_VERSION) + 1);
if (copy_to_user(IoBuffer.OutputBuffer, DRV_VERSION, len))
return -EFAULT;
Status = STATUS_SUCCESS;
break;
}
case IOCTL_BCM_GET_CURRENT_STATUS: {
struct bcm_link_state link_state;
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user failed..\n");
return -EFAULT;
}
if (IoBuffer.OutputLength != sizeof(link_state)) {
Status = -EINVAL;
break;
}
memset(&link_state, 0, sizeof(link_state));
link_state.bIdleMode = Adapter->IdleMode;
link_state.bShutdownMode = Adapter->bShutStatus;
link_state.ucLinkStatus = Adapter->LinkStatus;
if (copy_to_user(IoBuffer.OutputBuffer, &link_state, min_t(size_t, sizeof(link_state), IoBuffer.OutputLength))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
return -EFAULT;
}
Status = STATUS_SUCCESS;
break;
}
case IOCTL_BCM_SET_MAC_TRACING: {
UINT tracing_flag;
/* copy ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (copy_from_user(&tracing_flag, IoBuffer.InputBuffer, sizeof(UINT)))
return -EFAULT;
if (tracing_flag)
Adapter->pTarangs->MacTracingEnabled = TRUE;
else
Adapter->pTarangs->MacTracingEnabled = FALSE;
break;
}
case IOCTL_BCM_GET_DSX_INDICATION: {
ULONG ulSFId = 0;
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.OutputLength < sizeof(struct bcm_add_indication_alt)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Mismatch req: %lx needed is =0x%zx!!!",
IoBuffer.OutputLength, sizeof(struct bcm_add_indication_alt));
return -EINVAL;
}
if (copy_from_user(&ulSFId, IoBuffer.InputBuffer, sizeof(ulSFId)))
return -EFAULT;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Get DSX Data SF ID is =%lx\n", ulSFId);
get_dsx_sf_data_to_application(Adapter, ulSFId, IoBuffer.OutputBuffer);
Status = STATUS_SUCCESS;
}
break;
case IOCTL_BCM_GET_HOST_MIBS: {
PVOID temp_buff;
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.OutputLength != sizeof(struct bcm_host_stats_mibs)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Length Check failed %lu %zd\n",
IoBuffer.OutputLength, sizeof(struct bcm_host_stats_mibs));
return -EINVAL;
}
/* FIXME: HOST_STATS are too big for kmalloc (122048)! */
temp_buff = kzalloc(sizeof(struct bcm_host_stats_mibs), GFP_KERNEL);
if (!temp_buff)
return STATUS_FAILURE;
Status = ProcessGetHostMibs(Adapter, temp_buff);
GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
if (Status != STATUS_FAILURE)
if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(struct bcm_host_stats_mibs))) {
kfree(temp_buff);
return -EFAULT;
}
kfree(temp_buff);
break;
}
case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE:
if ((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode) && (TRUE == Adapter->IdleMode)) {
Adapter->usIdleModePattern = ABORT_IDLE_MODE;
Adapter->bWakeUpDevice = TRUE;
wake_up(&Adapter->process_rx_cntrlpkt);
}
Status = STATUS_SUCCESS;
break;
case IOCTL_BCM_BULK_WRM: {
struct bcm_bulk_wrm_buffer *pBulkBuffer;
UINT uiTempVar = 0;
PCHAR pvBuffer = NULL;
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle/Shutdown Mode, Blocking Wrms\n");
Status = -EACCES;
break;
}
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.InputLength < sizeof(ULONG) * 2)
return -EINVAL;
pvBuffer = memdup_user(IoBuffer.InputBuffer,
IoBuffer.InputLength);
if (IS_ERR(pvBuffer))
return PTR_ERR(pvBuffer);
pBulkBuffer = (struct bcm_bulk_wrm_buffer *)pvBuffer;
if (((ULONG)pBulkBuffer->Register & 0x0F000000) != 0x0F000000 ||
((ULONG)pBulkBuffer->Register & 0x3)) {
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n", (int)pBulkBuffer->Register);
kfree(pvBuffer);
Status = -EINVAL;
break;
}
uiTempVar = pBulkBuffer->Register & EEPROM_REJECT_MASK;
if (!((Adapter->pstargetparams->m_u32Customize)&VSG_MODE) &&
((uiTempVar == EEPROM_REJECT_REG_1) ||
(uiTempVar == EEPROM_REJECT_REG_2) ||
(uiTempVar == EEPROM_REJECT_REG_3) ||
(uiTempVar == EEPROM_REJECT_REG_4)) &&
(cmd == IOCTL_BCM_REGISTER_WRITE)) {
kfree(pvBuffer);
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
Status = -EFAULT;
break;
}
if (pBulkBuffer->SwapEndian == FALSE)
Status = wrmWithLock(Adapter, (UINT)pBulkBuffer->Register, (PCHAR)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
else
Status = wrmaltWithLock(Adapter, (UINT)pBulkBuffer->Register, (PUINT)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
if (Status != STATUS_SUCCESS)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n");
kfree(pvBuffer);
break;
}
case IOCTL_BCM_GET_NVM_SIZE:
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (Adapter->eNVMType == NVM_EEPROM || Adapter->eNVMType == NVM_FLASH) {
if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiNVMDSDSize, sizeof(UINT)))
return -EFAULT;
}
Status = STATUS_SUCCESS;
break;
case IOCTL_BCM_CAL_INIT: {
UINT uiSectorSize = 0 ;
if (Adapter->eNVMType == NVM_FLASH) {
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (copy_from_user(&uiSectorSize, IoBuffer.InputBuffer, sizeof(UINT)))
return -EFAULT;
if ((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) {
if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize,
sizeof(UINT)))
return -EFAULT;
} else {
if (IsFlash2x(Adapter)) {
if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize, sizeof(UINT)))
return -EFAULT;
} else {
if ((TRUE == Adapter->bShutStatus) || (TRUE == Adapter->IdleMode)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device is in Idle/Shutdown Mode\n");
return -EACCES;
}
Adapter->uiSectorSize = uiSectorSize;
BcmUpdateSectorSize(Adapter, Adapter->uiSectorSize);
}
}
Status = STATUS_SUCCESS;
} else {
Status = STATUS_FAILURE;
}
}
break;
case IOCTL_BCM_SET_DEBUG:
#ifdef DEBUG
{
struct bcm_user_debug_state sUserDebugState;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "In SET_DEBUG ioctl\n");
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer, sizeof(struct bcm_user_debug_state)))
return -EFAULT;
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
sUserDebugState.OnOff, sUserDebugState.Type);
/* sUserDebugState.Subtype <<= 1; */
sUserDebugState.Subtype = 1 << sUserDebugState.Subtype;
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "actual Subtype=0x%x\n", sUserDebugState.Subtype);
/* Update new 'DebugState' in the Adapter */
Adapter->stDebugState.type |= sUserDebugState.Type;
/* Subtype: A bitmap of 32 bits for Subtype per Type.
* Valid indexes in 'subtype' array: 1,2,4,8
* corresponding to valid Type values. Hence we can use the 'Type' field
* as the index value, ignoring the array entries 0,3,5,6,7 !
*/
if (sUserDebugState.OnOff)
Adapter->stDebugState.subtype[sUserDebugState.Type] |= sUserDebugState.Subtype;
else
Adapter->stDebugState.subtype[sUserDebugState.Type] &= ~sUserDebugState.Subtype;
BCM_SHOW_DEBUG_BITMAP(Adapter);
}
#endif
break;
case IOCTL_BCM_NVM_READ:
case IOCTL_BCM_NVM_WRITE: {
struct bcm_nvm_readwrite stNVMReadWrite;
PUCHAR pReadData = NULL;
ULONG ulDSDMagicNumInUsrBuff = 0;
struct timeval tv0, tv1;
memset(&tv0, 0, sizeof(struct timeval));
memset(&tv1, 0, sizeof(struct timeval));
if ((Adapter->eNVMType == NVM_FLASH) && (Adapter->uiFlashLayoutMajorVersion == 0)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
return -EFAULT;
}
if (IsFlash2x(Adapter)) {
if ((Adapter->eActiveDSD != DSD0) &&
(Adapter->eActiveDSD != DSD1) &&
(Adapter->eActiveDSD != DSD2)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No DSD is active..hence NVM Command is blocked");
return STATUS_FAILURE;
}
}
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (copy_from_user(&stNVMReadWrite,
(IOCTL_BCM_NVM_READ == cmd) ? IoBuffer.OutputBuffer : IoBuffer.InputBuffer,
sizeof(struct bcm_nvm_readwrite)))
return -EFAULT;
/*
* Deny the access if the offset crosses the cal area limit.
*/
if (stNVMReadWrite.uiNumBytes > Adapter->uiNVMDSDSize)
return STATUS_FAILURE;
if (stNVMReadWrite.uiOffset > Adapter->uiNVMDSDSize - stNVMReadWrite.uiNumBytes) {
/* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */
return STATUS_FAILURE;
}
pReadData = memdup_user(stNVMReadWrite.pBuffer,
stNVMReadWrite.uiNumBytes);
if (IS_ERR(pReadData))
return PTR_ERR(pReadData);
do_gettimeofday(&tv0);
if (IOCTL_BCM_NVM_READ == cmd) {
down(&Adapter->NVMRdmWrmLock);
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
kfree(pReadData);
return -EACCES;
}
Status = BeceemNVMRead(Adapter, (PUINT)pReadData, stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes);
up(&Adapter->NVMRdmWrmLock);
if (Status != STATUS_SUCCESS) {
kfree(pReadData);
return Status;
}
if (copy_to_user(stNVMReadWrite.pBuffer, pReadData, stNVMReadWrite.uiNumBytes)) {
kfree(pReadData);
return -EFAULT;
}
} else {
down(&Adapter->NVMRdmWrmLock);
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
kfree(pReadData);
return -EACCES;
}
Adapter->bHeaderChangeAllowed = TRUE;
if (IsFlash2x(Adapter)) {
/*
* New Requirement:-
* DSD section updation will be allowed in two case:-
* 1. if DSD sig is present in DSD header means dongle is ok and updation is fruitfull
* 2. if point 1 failes then user buff should have DSD sig. this point ensures that if dongle is
* corrupted then user space program first modify the DSD header with valid DSD sig so
* that this as well as further write may be worthwhile.
*
* This restriction has been put assuming that if DSD sig is corrupted, DSD
* data won't be considered valid.
*/
Status = BcmFlash2xCorruptSig(Adapter, Adapter->eActiveDSD);
if (Status != STATUS_SUCCESS) {
if (((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) != Adapter->uiNVMDSDSize) ||
(stNVMReadWrite.uiNumBytes < SIGNATURE_SIZE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DSD Sig is present neither in Flash nor User provided Input..");
up(&Adapter->NVMRdmWrmLock);
kfree(pReadData);
return Status;
}
ulDSDMagicNumInUsrBuff = ntohl(*(PUINT)(pReadData + stNVMReadWrite.uiNumBytes - SIGNATURE_SIZE));
if (ulDSDMagicNumInUsrBuff != DSD_IMAGE_MAGIC_NUMBER) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DSD Sig is present neither in Flash nor User provided Input..");
up(&Adapter->NVMRdmWrmLock);
kfree(pReadData);
return Status;
}
}
}
Status = BeceemNVMWrite(Adapter, (PUINT)pReadData, stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes, stNVMReadWrite.bVerify);
if (IsFlash2x(Adapter))
BcmFlash2xWriteSig(Adapter, Adapter->eActiveDSD);
Adapter->bHeaderChangeAllowed = FALSE;
up(&Adapter->NVMRdmWrmLock);
if (Status != STATUS_SUCCESS) {
kfree(pReadData);
return Status;
}
}
do_gettimeofday(&tv1);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " timetaken by Write/read :%ld msec\n", (tv1.tv_sec - tv0.tv_sec)*1000 + (tv1.tv_usec - tv0.tv_usec)/1000);
kfree(pReadData);
return STATUS_SUCCESS;
}
case IOCTL_BCM_FLASH2X_SECTION_READ: {
struct bcm_flash2x_readwrite sFlash2xRead = {0};
PUCHAR pReadBuff = NULL ;
UINT NOB = 0;
UINT BuffSize = 0;
UINT ReadBytes = 0;
UINT ReadOffset = 0;
void __user *OutPutBuff;
if (IsFlash2x(Adapter) != TRUE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
return -EINVAL;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called");
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
/* Reading FLASH 2.x READ structure */
if (copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer, sizeof(struct bcm_flash2x_readwrite)))
return -EFAULT;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.Section :%x", sFlash2xRead.Section);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.offset :%x", sFlash2xRead.offset);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.numOfBytes :%x", sFlash2xRead.numOfBytes);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.bVerify :%x\n", sFlash2xRead.bVerify);
/* This was internal to driver for raw read. now it has ben exposed to user space app. */
if (validateFlash2xReadWrite(Adapter, &sFlash2xRead) == FALSE)
return STATUS_FAILURE;
NOB = sFlash2xRead.numOfBytes;
if (NOB > Adapter->uiSectorSize)
BuffSize = Adapter->uiSectorSize;
else
BuffSize = NOB;
ReadOffset = sFlash2xRead.offset ;
OutPutBuff = IoBuffer.OutputBuffer;
pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL);
if (pReadBuff == NULL) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for Flash 2.x Read Structure");
return -ENOMEM;
}
down(&Adapter->NVMRdmWrmLock);
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
kfree(pReadBuff);
return -EACCES;
}
while (NOB) {
if (NOB > Adapter->uiSectorSize)
ReadBytes = Adapter->uiSectorSize;
else
ReadBytes = NOB;
/* Reading the data from Flash 2.x */
Status = BcmFlash2xBulkRead(Adapter, (PUINT)pReadBuff, sFlash2xRead.Section, ReadOffset, ReadBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Flash 2x read err with Status :%d", Status);
break;
}
BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pReadBuff, ReadBytes);
Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy to use failed with status :%d", Status);
up(&Adapter->NVMRdmWrmLock);
kfree(pReadBuff);
return -EFAULT;
}
NOB = NOB - ReadBytes;
if (NOB) {
ReadOffset = ReadOffset + ReadBytes;
OutPutBuff = OutPutBuff + ReadBytes ;
}
}
up(&Adapter->NVMRdmWrmLock);
kfree(pReadBuff);
}
break;
case IOCTL_BCM_FLASH2X_SECTION_WRITE: {
struct bcm_flash2x_readwrite sFlash2xWrite = {0};
PUCHAR pWriteBuff;
void __user *InputAddr;
UINT NOB = 0;
UINT BuffSize = 0;
UINT WriteOffset = 0;
UINT WriteBytes = 0;
if (IsFlash2x(Adapter) != TRUE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
return -EINVAL;
}
/* First make this False so that we can enable the Sector Permission Check in BeceemFlashBulkWrite */
Adapter->bAllDSDWriteAllow = FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
/* Reading FLASH 2.x READ structure */
if (copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer, sizeof(struct bcm_flash2x_readwrite)))
return -EFAULT;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.Section :%x", sFlash2xWrite.Section);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.offset :%d", sFlash2xWrite.offset);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.numOfBytes :%x", sFlash2xWrite.numOfBytes);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.bVerify :%x\n", sFlash2xWrite.bVerify);
if ((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1) && (sFlash2xWrite.Section != VSA2)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Only VSA write is allowed");
return -EINVAL;
}
if (validateFlash2xReadWrite(Adapter, &sFlash2xWrite) == FALSE)
return STATUS_FAILURE;
InputAddr = sFlash2xWrite.pDataBuff;
WriteOffset = sFlash2xWrite.offset;
NOB = sFlash2xWrite.numOfBytes;
if (NOB > Adapter->uiSectorSize)
BuffSize = Adapter->uiSectorSize;
else
BuffSize = NOB ;
pWriteBuff = kmalloc(BuffSize, GFP_KERNEL);
if (pWriteBuff == NULL)
return -ENOMEM;
/* extracting the remainder of the given offset. */
WriteBytes = Adapter->uiSectorSize;
if (WriteOffset % Adapter->uiSectorSize)
WriteBytes = Adapter->uiSectorSize - (WriteOffset % Adapter->uiSectorSize);
if (NOB < WriteBytes)
WriteBytes = NOB;
down(&Adapter->NVMRdmWrmLock);
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
kfree(pWriteBuff);
return -EACCES;
}
BcmFlash2xCorruptSig(Adapter, sFlash2xWrite.Section);
do {
Status = copy_from_user(pWriteBuff, InputAddr, WriteBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to user failed with status :%d", Status);
up(&Adapter->NVMRdmWrmLock);
kfree(pWriteBuff);
return -EFAULT;
}
BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pWriteBuff, WriteBytes);
/* Writing the data from Flash 2.x */
Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pWriteBuff, sFlash2xWrite.Section, WriteOffset, WriteBytes, sFlash2xWrite.bVerify);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash 2x read err with Status :%d", Status);
break;
}
NOB = NOB - WriteBytes;
if (NOB) {
WriteOffset = WriteOffset + WriteBytes;
InputAddr = InputAddr + WriteBytes;
if (NOB > Adapter->uiSectorSize)
WriteBytes = Adapter->uiSectorSize;
else
WriteBytes = NOB;
}
} while (NOB > 0);
BcmFlash2xWriteSig(Adapter, sFlash2xWrite.Section);
up(&Adapter->NVMRdmWrmLock);
kfree(pWriteBuff);
}
break;
case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP: {
struct bcm_flash2x_bitmap *psFlash2xBitMap;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called");
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.OutputLength != sizeof(struct bcm_flash2x_bitmap))
return -EINVAL;
psFlash2xBitMap = kzalloc(sizeof(struct bcm_flash2x_bitmap), GFP_KERNEL);
if (psFlash2xBitMap == NULL) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory is not available");
return -ENOMEM;
}
/* Reading the Flash Sectio Bit map */
down(&Adapter->NVMRdmWrmLock);
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
kfree(psFlash2xBitMap);
return -EACCES;
}
BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
up(&Adapter->NVMRdmWrmLock);
if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(struct bcm_flash2x_bitmap))) {
kfree(psFlash2xBitMap);
return -EFAULT;
}
kfree(psFlash2xBitMap);
}
break;
case IOCTL_BCM_SET_ACTIVE_SECTION: {
enum bcm_flash2x_section_val eFlash2xSectionVal = 0;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SET_ACTIVE_SECTION Called");
if (IsFlash2x(Adapter) != TRUE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
return -EINVAL;
}
Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
return -EFAULT;
}
Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
return -EFAULT;
}
down(&Adapter->NVMRdmWrmLock);
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
return -EACCES;
}
Status = BcmSetActiveSection(Adapter, eFlash2xSectionVal);
if (Status)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Failed to make it's priority Highest. Status %d", Status);
up(&Adapter->NVMRdmWrmLock);
}
break;
case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION: {
/* Right Now we are taking care of only DSD */
Adapter->bAllDSDWriteAllow = FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
Status = STATUS_SUCCESS;
}
break;
case IOCTL_BCM_COPY_SECTION: {
struct bcm_flash2x_copy_section sCopySectStrut = {0};
Status = STATUS_SUCCESS;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_COPY_SECTION Called");
Adapter->bAllDSDWriteAllow = FALSE;
if (IsFlash2x(Adapter) != TRUE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
return -EINVAL;
}
Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed Status :%d", Status);
return -EFAULT;
}
Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer, sizeof(struct bcm_flash2x_copy_section));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of Copy_Section_Struct failed with Status :%d", Status);
return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source SEction :%x", sCopySectStrut.SrcSection);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Destination SEction :%x", sCopySectStrut.DstSection);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "offset :%x", sCopySectStrut.offset);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "NOB :%x", sCopySectStrut.numOfBytes);
if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == FALSE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exixt in Flash ", sCopySectStrut.SrcSection);
return -EINVAL;
}
if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == FALSE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exixt in Flash ", sCopySectStrut.DstSection);
return -EINVAL;
}
if (sCopySectStrut.SrcSection == sCopySectStrut.DstSection) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source and Destination section should be different");
return -EINVAL;
}
down(&Adapter->NVMRdmWrmLock);
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
return -EACCES;
}
if (sCopySectStrut.SrcSection == ISO_IMAGE1 || sCopySectStrut.SrcSection == ISO_IMAGE2) {
if (IsNonCDLessDevice(Adapter)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device is Non-CDLess hence won't have ISO !!");
Status = -EINVAL;
} else if (sCopySectStrut.numOfBytes == 0) {
Status = BcmCopyISO(Adapter, sCopySectStrut);
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Partial Copy of ISO section is not Allowed..");
Status = STATUS_FAILURE;
}
up(&Adapter->NVMRdmWrmLock);
return Status;
}
Status = BcmCopySection(Adapter, sCopySectStrut.SrcSection,
sCopySectStrut.DstSection, sCopySectStrut.offset, sCopySectStrut.numOfBytes);
up(&Adapter->NVMRdmWrmLock);
}
break;
case IOCTL_BCM_GET_FLASH_CS_INFO: {
Status = STATUS_SUCCESS;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " IOCTL_BCM_GET_FLASH_CS_INFO Called");
Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
return -EFAULT;
}
if (Adapter->eNVMType != NVM_FLASH) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Connected device does not have flash");
Status = -EINVAL;
break;
}
if (IsFlash2x(Adapter) == TRUE) {
if (IoBuffer.OutputLength < sizeof(struct bcm_flash2x_cs_info))
return -EINVAL;
if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(struct bcm_flash2x_cs_info)))
return -EFAULT;
} else {
if (IoBuffer.OutputLength < sizeof(struct bcm_flash_cs_info))
return -EINVAL;
if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(struct bcm_flash_cs_info)))
return -EFAULT;
}
}
break;
case IOCTL_BCM_SELECT_DSD: {
UINT SectOfset = 0;
enum bcm_flash2x_section_val eFlash2xSectionVal;
eFlash2xSectionVal = NO_SECTION_VAL;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SELECT_DSD Called");
if (IsFlash2x(Adapter) != TRUE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
return -EINVAL;
}
Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
return -EFAULT;
}
Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Read Section :%d", eFlash2xSectionVal);
if ((eFlash2xSectionVal != DSD0) &&
(eFlash2xSectionVal != DSD1) &&
(eFlash2xSectionVal != DSD2)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Passed section<%x> is not DSD section", eFlash2xSectionVal);
return STATUS_FAILURE;
}
SectOfset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
if (SectOfset == INVALID_OFFSET) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section val <%d> does not exixt in Flash 2.x", eFlash2xSectionVal);
return -EINVAL;
}
Adapter->bAllDSDWriteAllow = TRUE;
Adapter->ulFlashCalStart = SectOfset;
Adapter->eActiveDSD = eFlash2xSectionVal;
}
Status = STATUS_SUCCESS;
break;
case IOCTL_BCM_NVM_RAW_READ: {
struct bcm_nvm_readwrite stNVMRead;
INT NOB ;
INT BuffSize ;
INT ReadOffset = 0;
UINT ReadBytes = 0 ;
PUCHAR pReadBuff;
void __user *OutPutBuff;
if (Adapter->eNVMType != NVM_FLASH) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "NVM TYPE is not Flash");
return -EINVAL;
}
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user 1 failed\n");
return -EFAULT;
}
if (copy_from_user(&stNVMRead, IoBuffer.OutputBuffer, sizeof(struct bcm_nvm_readwrite)))
return -EFAULT;
NOB = stNVMRead.uiNumBytes;
/* In Raw-Read max Buff size : 64MB */
if (NOB > DEFAULT_BUFF_SIZE)
BuffSize = DEFAULT_BUFF_SIZE;
else
BuffSize = NOB;
ReadOffset = stNVMRead.uiOffset;
OutPutBuff = stNVMRead.pBuffer;
pReadBuff = kzalloc(BuffSize , GFP_KERNEL);
if (pReadBuff == NULL) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for Flash 2.x Read Structure");
Status = -ENOMEM;
break;
}
down(&Adapter->NVMRdmWrmLock);
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
kfree(pReadBuff);
up(&Adapter->NVMRdmWrmLock);
return -EACCES;
}
Adapter->bFlashRawRead = TRUE;
while (NOB) {
if (NOB > DEFAULT_BUFF_SIZE)
ReadBytes = DEFAULT_BUFF_SIZE;
else
ReadBytes = NOB;
/* Reading the data from Flash 2.x */
Status = BeceemNVMRead(Adapter, (PUINT)pReadBuff, ReadOffset, ReadBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash 2x read err with Status :%d", Status);
break;
}
BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pReadBuff, ReadBytes);
Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to use failed with status :%d", Status);
up(&Adapter->NVMRdmWrmLock);
kfree(pReadBuff);
return -EFAULT;
}
NOB = NOB - ReadBytes;
if (NOB) {
ReadOffset = ReadOffset + ReadBytes;
OutPutBuff = OutPutBuff + ReadBytes;
}
}
Adapter->bFlashRawRead = FALSE;
up(&Adapter->NVMRdmWrmLock);
kfree(pReadBuff);
break;
}
case IOCTL_BCM_CNTRLMSG_MASK: {
ULONG RxCntrlMsgBitMask = 0;
/* Copy Ioctl Buffer structure */
Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of Ioctl buffer is failed from user space");
return -EFAULT;
}
if (IoBuffer.InputLength != sizeof(unsigned long)) {
Status = -EINVAL;
break;
}
Status = copy_from_user(&RxCntrlMsgBitMask, IoBuffer.InputBuffer, IoBuffer.InputLength);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of control bit mask failed from user space");
return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\n Got user defined cntrl msg bit mask :%lx", RxCntrlMsgBitMask);
pTarang->RxCntrlMsgBitMask = RxCntrlMsgBitMask;
}
break;
case IOCTL_BCM_GET_DEVICE_DRIVER_INFO: {
struct bcm_driver_info DevInfo;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
DevInfo.MaxRDMBufferSize = BUFFER_4K;
DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
DevInfo.u32RxAlignmentCorrection = 0;
DevInfo.u32NVMType = Adapter->eNVMType;
DevInfo.u32InterfaceType = BCM_USB;
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.OutputLength < sizeof(DevInfo))
return -EINVAL;
if (copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo)))
return -EFAULT;
}
break;
case IOCTL_BCM_TIME_SINCE_NET_ENTRY: {
struct bcm_time_elapsed stTimeElapsedSinceNetEntry = {0};
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_TIME_SINCE_NET_ENTRY called");
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
if (IoBuffer.OutputLength < sizeof(struct bcm_time_elapsed))
return -EINVAL;
stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = get_seconds() - Adapter->liTimeSinceLastNetEntry;
if (copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry, sizeof(struct bcm_time_elapsed)))
return -EFAULT;
}
break;
case IOCTL_CLOSE_NOTIFICATION:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_CLOSE_NOTIFICATION");
break;
default:
pr_info(DRV_NAME ": unknown ioctl cmd=%#x\n", cmd);
Status = STATUS_FAILURE;
break;
}
return Status;
}
|
CWE-200
| 179,134 | 935 |
15758140552209269066920347778856016720
| null | null | null |
linux
|
b5e2f339865fb443107e5b10603e53bbc92dc054
| 1 |
int wvlan_set_station_nickname(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
/*------------------------------------------------------------------------*/
DBG_FUNC("wvlan_set_station_nickname");
DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
memset(lp->StationName, 0, sizeof(lp->StationName));
memcpy(lp->StationName, extra, wrqu->data.length);
/* Commit the adapter parameters */
wl_apply(lp);
wl_unlock(lp, &flags);
DBG_LEAVE(DbgInfo);
return ret;
} /* wvlan_set_station_nickname */
|
CWE-119
| 179,135 | 936 |
62431484736668092588406280640396277708
| null | null | null |
linux
|
b5e2f339865fb443107e5b10603e53bbc92dc054
| 1 |
int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
ltv_t *pLtv;
bool_t ltvAllocated = FALSE;
ENCSTRCT sEncryption;
#ifdef USE_WDS
hcf_16 hcfPort = HCF_PORT_0;
#endif /* USE_WDS */
/*------------------------------------------------------------------------*/
DBG_FUNC("wvlan_uil_put_info");
DBG_ENTER(DbgInfo);
if (urq->hcfCtx == &(lp->hcfCtx)) {
if (capable(CAP_NET_ADMIN)) {
if ((urq->data != NULL) && (urq->len != 0)) {
/* Make sure that we have at least a command and length to send. */
if (urq->len < (sizeof(hcf_16) * 2)) {
urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
DBG_ERROR(DbgInfo, "No Length/Type in LTV!!!\n");
DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
DBG_LEAVE(DbgInfo);
return result;
}
/* Verify the user buffer */
result = verify_area(VERIFY_READ, urq->data, urq->len);
if (result != 0) {
urq->result = UIL_FAILURE;
DBG_ERROR(DbgInfo, "verify_area(), VERIFY_READ FAILED\n");
DBG_LEAVE(DbgInfo);
return result;
}
/* Get only the command and length information. */
copy_from_user(&(lp->ltvRecord), urq->data, sizeof(hcf_16) * 2);
/* Make sure the incoming LTV record length is within the bounds of the
IOCTL length */
if (((lp->ltvRecord.len + 1) * sizeof(hcf_16)) > urq->len) {
urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
DBG_LEAVE(DbgInfo);
return result;
}
/* If the requested length is greater than the size of our local
LTV record, try to allocate it from the kernel stack.
Otherwise, we just use our local LTV record. */
if (urq->len > sizeof(lp->ltvRecord)) {
pLtv = kmalloc(urq->len, GFP_KERNEL);
if (pLtv != NULL) {
ltvAllocated = TRUE;
} else {
DBG_ERROR(DbgInfo, "Alloc FAILED\n");
urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
result = -ENOMEM;
DBG_LEAVE(DbgInfo);
return result;
}
} else {
pLtv = &(lp->ltvRecord);
}
/* Copy the data from the user's buffer into the local LTV
record data area. */
copy_from_user(pLtv, urq->data, urq->len);
/* We need to snoop the commands to see if there is anything we
need to store for the purposes of a reset or start/stop
sequence. Perform endian translation as needed */
switch (pLtv->typ) {
case CFG_CNF_PORT_TYPE:
lp->PortType = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_OWN_MAC_ADDR:
/* TODO: determine if we are going to store anything based on this */
break;
case CFG_CNF_OWN_CHANNEL:
lp->Channel = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
/* CFG_CNF_OWN_SSID currently same as CNF_DESIRED_SSID. Do we
need separate storage for this? */
/* case CFG_CNF_OWN_SSID: */
case CFG_CNF_OWN_ATIM_WINDOW:
lp->atimWindow = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_SYSTEM_SCALE:
lp->DistanceBetweenAPs = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
case CFG_CNF_MAX_DATA_LEN:
/* TODO: determine if we are going to store anything based
on this */
break;
case CFG_CNF_PM_ENABLED:
lp->PMEnabled = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_MCAST_RX:
lp->MulticastReceive = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_MAX_SLEEP_DURATION:
lp->MaxSleepDuration = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_HOLDOVER_DURATION:
lp->holdoverDuration = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_OWN_NAME:
memset(lp->StationName, 0, sizeof(lp->StationName));
memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_LOAD_BALANCING:
lp->loadBalancing = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_MEDIUM_DISTRIBUTION:
lp->mediumDistribution = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#ifdef WARP
case CFG_CNF_TX_POW_LVL:
lp->txPowLevel = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
/* case CFG_CNF_SHORT_RETRY_LIMIT: */ /* Short Retry Limit */
/* case 0xFC33: */ /* Long Retry Limit */
case CFG_SUPPORTED_RATE_SET_CNTL: /* Supported Rate Set Control */
lp->srsc[0] = pLtv->u.u16[0];
lp->srsc[1] = pLtv->u.u16[1];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
pLtv->u.u16[1] = CNV_INT_TO_LITTLE(pLtv->u.u16[1]);
break;
case CFG_BASIC_RATE_SET_CNTL: /* Basic Rate Set Control */
lp->brsc[0] = pLtv->u.u16[0];
lp->brsc[1] = pLtv->u.u16[1];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
pLtv->u.u16[1] = CNV_INT_TO_LITTLE(pLtv->u.u16[1]);
break;
case CFG_CNF_CONNECTION_CNTL:
lp->connectionControl = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
/* case CFG_PROBE_DATA_RATE: */
#endif /* HERMES25 */
#if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */
/* ;?should we restore this to allow smaller memory footprint */
case CFG_CNF_OWN_DTIM_PERIOD:
lp->DTIMPeriod = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#ifdef WARP
case CFG_CNF_OWN_BEACON_INTERVAL: /* Own Beacon Interval */
lp->ownBeaconInterval = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#endif /* WARP */
case CFG_COEXISTENSE_BEHAVIOUR: /* Coexistence behavior */
lp->coexistence = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#ifdef USE_WDS
case CFG_CNF_WDS_ADDR1:
memcpy(&lp->wds_port[0].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_1;
break;
case CFG_CNF_WDS_ADDR2:
memcpy(&lp->wds_port[1].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_2;
break;
case CFG_CNF_WDS_ADDR3:
memcpy(&lp->wds_port[2].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_3;
break;
case CFG_CNF_WDS_ADDR4:
memcpy(&lp->wds_port[3].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_4;
break;
case CFG_CNF_WDS_ADDR5:
memcpy(&lp->wds_port[4].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_5;
break;
case CFG_CNF_WDS_ADDR6:
memcpy(&lp->wds_port[5].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_6;
break;
#endif /* USE_WDS */
case CFG_CNF_MCAST_PM_BUF:
lp->multicastPMBuffering = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_REJECT_ANY:
lp->RejectAny = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#endif
case CFG_CNF_ENCRYPTION:
lp->EnableEncryption = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_AUTHENTICATION:
lp->authentication = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */
/* ;?should we restore this to allow smaller memory footprint */
/* case CFG_CNF_EXCL_UNENCRYPTED:
lp->ExcludeUnencrypted = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break; */
case CFG_CNF_MCAST_RATE:
/* TODO: determine if we are going to store anything based on this */
break;
case CFG_CNF_INTRA_BSS_RELAY:
lp->intraBSSRelay = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#endif
case CFG_CNF_MICRO_WAVE:
/* TODO: determine if we are going to store anything based on this */
break;
/*case CFG_CNF_LOAD_BALANCING:*/
/* TODO: determine if we are going to store anything based on this */
/* break; */
/* case CFG_CNF_MEDIUM_DISTRIBUTION: */
/* TODO: determine if we are going to store anything based on this */
/* break; */
/* case CFG_CNF_RX_ALL_GROUP_ADDRESS: */
/* TODO: determine if we are going to store anything based on this */
/* break; */
/* case CFG_CNF_COUNTRY_INFO: */
/* TODO: determine if we are going to store anything based on this */
/* break; */
case CFG_CNF_OWN_SSID:
/* case CNF_DESIRED_SSID: */
case CFG_DESIRED_SSID:
memset(lp->NetworkName, 0, sizeof(lp->NetworkName));
memcpy((void *)lp->NetworkName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
/* take care of the special network name "ANY" case */
if ((strlen(&pLtv->u.u8[2]) == 0) ||
(strcmp(&pLtv->u.u8[2], "ANY") == 0) ||
(strcmp(&pLtv->u.u8[2], "any") == 0)) {
/* set the SSID_STRCT llen field (u16[0]) to zero, and the
effectually null the string u8[2] */
pLtv->u.u16[0] = 0;
pLtv->u.u8[2] = 0;
}
break;
case CFG_GROUP_ADDR:
/* TODO: determine if we are going to store anything based on this */
break;
case CFG_CREATE_IBSS:
lp->CreateIBSS = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_RTS_THRH:
lp->RTSThreshold = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_TX_RATE_CNTL:
lp->TxRateControl[0] = pLtv->u.u16[0];
lp->TxRateControl[1] = pLtv->u.u16[1];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
pLtv->u.u16[1] = CNV_INT_TO_LITTLE(pLtv->u.u16[1]);
break;
case CFG_PROMISCUOUS_MODE:
/* TODO: determine if we are going to store anything based on this */
break;
/* case CFG_WAKE_ON_LAN: */
/* TODO: determine if we are going to store anything based on this */
/* break; */
#if 1 /* ;? #if (HCF_TYPE) & HCF_TYPE_AP */
/* ;?should we restore this to allow smaller memory footprint */
case CFG_RTS_THRH0:
lp->RTSThreshold = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_TX_RATE_CNTL0:
/*;?no idea what this should be, get going so comment it out lp->TxRateControl = pLtv->u.u16[0];*/
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#ifdef USE_WDS
case CFG_RTS_THRH1:
lp->wds_port[0].rtsThreshold = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_1;
break;
case CFG_RTS_THRH2:
lp->wds_port[1].rtsThreshold = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_2;
break;
case CFG_RTS_THRH3:
lp->wds_port[2].rtsThreshold = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_3;
break;
case CFG_RTS_THRH4:
lp->wds_port[3].rtsThreshold = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_4;
break;
case CFG_RTS_THRH5:
lp->wds_port[4].rtsThreshold = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_5;
break;
case CFG_RTS_THRH6:
lp->wds_port[5].rtsThreshold = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_6;
break;
case CFG_TX_RATE_CNTL1:
lp->wds_port[0].txRateCntl = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_1;
break;
case CFG_TX_RATE_CNTL2:
lp->wds_port[1].txRateCntl = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_2;
break;
case CFG_TX_RATE_CNTL3:
lp->wds_port[2].txRateCntl = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_3;
break;
case CFG_TX_RATE_CNTL4:
lp->wds_port[3].txRateCntl = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_4;
break;
case CFG_TX_RATE_CNTL5:
lp->wds_port[4].txRateCntl = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_5;
break;
case CFG_TX_RATE_CNTL6:
lp->wds_port[5].txRateCntl = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_6;
break;
#endif /* USE_WDS */
#endif /* (HCF_TYPE) & HCF_TYPE_AP */
case CFG_DEFAULT_KEYS:
{
CFG_DEFAULT_KEYS_STRCT *pKeys = (CFG_DEFAULT_KEYS_STRCT *)pLtv;
pKeys->key[0].len = CNV_INT_TO_LITTLE(pKeys->key[0].len);
pKeys->key[1].len = CNV_INT_TO_LITTLE(pKeys->key[1].len);
pKeys->key[2].len = CNV_INT_TO_LITTLE(pKeys->key[2].len);
pKeys->key[3].len = CNV_INT_TO_LITTLE(pKeys->key[3].len);
memcpy((void *)&(lp->DefaultKeys), (void *)pKeys,
sizeof(CFG_DEFAULT_KEYS_STRCT));
}
break;
case CFG_TX_KEY_ID:
lp->TransmitKeyID = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_SCAN_SSID:
/* TODO: determine if we are going to store anything based on this */
break;
case CFG_TICK_TIME:
/* TODO: determine if we are going to store anything based on this */
break;
/* these RIDS are Info RIDs, and should they be allowed for puts??? */
case CFG_MAX_LOAD_TIME:
case CFG_DL_BUF:
/* case CFG_HSI_SUP_RANGE: */
case CFG_NIC_SERIAL_NUMBER:
case CFG_NIC_IDENTITY:
case CFG_NIC_MFI_SUP_RANGE:
case CFG_NIC_CFI_SUP_RANGE:
case CFG_NIC_TEMP_TYPE:
case CFG_NIC_PROFILE:
case CFG_FW_IDENTITY:
case CFG_FW_SUP_RANGE:
case CFG_MFI_ACT_RANGES_STA:
case CFG_CFI_ACT_RANGES_STA:
case CFG_PORT_STAT:
case CFG_CUR_SSID:
case CFG_CUR_BSSID:
case CFG_COMMS_QUALITY:
case CFG_CUR_TX_RATE:
case CFG_CUR_BEACON_INTERVAL:
case CFG_CUR_SCALE_THRH:
case CFG_PROTOCOL_RSP_TIME:
case CFG_CUR_SHORT_RETRY_LIMIT:
case CFG_CUR_LONG_RETRY_LIMIT:
case CFG_MAX_TX_LIFETIME:
case CFG_MAX_RX_LIFETIME:
case CFG_CF_POLLABLE:
case CFG_AUTHENTICATION_ALGORITHMS:
case CFG_PRIVACY_OPT_IMPLEMENTED:
/* case CFG_CURRENT_REMOTE_RATES: */
/* case CFG_CURRENT_USED_RATES: */
/* case CFG_CURRENT_SYSTEM_SCALE: */
/* case CFG_CURRENT_TX_RATE1: */
/* case CFG_CURRENT_TX_RATE2: */
/* case CFG_CURRENT_TX_RATE3: */
/* case CFG_CURRENT_TX_RATE4: */
/* case CFG_CURRENT_TX_RATE5: */
/* case CFG_CURRENT_TX_RATE6: */
case CFG_NIC_MAC_ADDR:
case CFG_PCF_INFO:
/* case CFG_CURRENT_COUNTRY_INFO: */
case CFG_PHY_TYPE:
case CFG_CUR_CHANNEL:
/* case CFG_CURRENT_POWER_STATE: */
/* case CFG_CCAMODE: */
case CFG_SUPPORTED_DATA_RATES:
break;
case CFG_AP_MODE:
/*;? lp->DownloadFirmware = (pLtv->u.u16[0]) + 1; */
DBG_ERROR(DbgInfo, "set CFG_AP_MODE no longer supported\n");
break;
case CFG_ENCRYPT_STRING:
/* TODO: ENDIAN TRANSLATION HERE??? */
memset(lp->szEncryption, 0, sizeof(lp->szEncryption));
memcpy((void *)lp->szEncryption, (void *)&pLtv->u.u8[0],
(pLtv->len * sizeof(hcf_16)));
wl_wep_decode(CRYPT_CODE, &sEncryption,
lp->szEncryption);
/* the Linux driver likes to use 1-4 for the key IDs, and then
convert to 0-3 when sending to the card. The Windows code
base used 0-3 in the API DLL, which was ported to Linux. For
the sake of the user experience, we decided to keep 0-3 as the
numbers used in the DLL; and will perform the +1 conversion here.
We could have converted the entire Linux driver, but this is
less obtrusive. This may be a "todo" to convert the whole driver */
lp->TransmitKeyID = sEncryption.wTxKeyID + 1;
lp->EnableEncryption = sEncryption.wEnabled;
memcpy(&lp->DefaultKeys, &sEncryption.EncStr,
sizeof(CFG_DEFAULT_KEYS_STRCT));
break;
/*case CFG_COUNTRY_STRING:
memset(lp->countryString, 0, sizeof(lp->countryString));
memcpy((void *)lp->countryString, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
break;
*/
case CFG_DRIVER_ENABLE:
lp->driverEnable = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_WOLAS_ENABLE:
lp->wolasEnable = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_SET_WPA_AUTH_KEY_MGMT_SUITE:
lp->AuthKeyMgmtSuite = pLtv->u.u16[0];
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_DISASSOCIATE_ADDR:
pLtv->u.u16[ETH_ALEN / 2] = CNV_INT_TO_LITTLE(pLtv->u.u16[ETH_ALEN / 2]);
break;
case CFG_ADD_TKIP_DEFAULT_KEY:
case CFG_REMOVE_TKIP_DEFAULT_KEY:
/* Endian convert the Tx Key Information */
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_ADD_TKIP_MAPPED_KEY:
break;
case CFG_REMOVE_TKIP_MAPPED_KEY:
break;
/* some RIDs just can't be put */
case CFG_MB_INFO:
case CFG_IFB:
default:
break;
}
/* This code will prevent Static Configuration Entities from
being sent to the card, as they require a call to
UIL_ACT_APPLY to take effect. Dynamic Entities will be sent
immediately */
switch (pLtv->typ) {
case CFG_CNF_PORT_TYPE:
case CFG_CNF_OWN_MAC_ADDR:
case CFG_CNF_OWN_CHANNEL:
case CFG_CNF_OWN_SSID:
case CFG_CNF_OWN_ATIM_WINDOW:
case CFG_CNF_SYSTEM_SCALE:
case CFG_CNF_MAX_DATA_LEN:
case CFG_CNF_PM_ENABLED:
case CFG_CNF_MCAST_RX:
case CFG_CNF_MAX_SLEEP_DURATION:
case CFG_CNF_HOLDOVER_DURATION:
case CFG_CNF_OWN_NAME:
case CFG_CNF_LOAD_BALANCING:
case CFG_CNF_MEDIUM_DISTRIBUTION:
#ifdef WARP
case CFG_CNF_TX_POW_LVL:
case CFG_CNF_CONNECTION_CNTL:
/*case CFG_PROBE_DATA_RATE: */
#endif /* HERMES25 */
#if 1 /*;? (HCF_TYPE) & HCF_TYPE_AP */
/*;?should we restore this to allow smaller memory footprint */
case CFG_CNF_OWN_DTIM_PERIOD:
#ifdef WARP
case CFG_CNF_OWN_BEACON_INTERVAL: /* Own Beacon Interval */
#endif /* WARP */
#ifdef USE_WDS
case CFG_CNF_WDS_ADDR1:
case CFG_CNF_WDS_ADDR2:
case CFG_CNF_WDS_ADDR3:
case CFG_CNF_WDS_ADDR4:
case CFG_CNF_WDS_ADDR5:
case CFG_CNF_WDS_ADDR6:
#endif
case CFG_CNF_MCAST_PM_BUF:
case CFG_CNF_REJECT_ANY:
#endif
case CFG_CNF_ENCRYPTION:
case CFG_CNF_AUTHENTICATION:
#if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */
/* ;?should we restore this to allow smaller memory footprint */
case CFG_CNF_EXCL_UNENCRYPTED:
case CFG_CNF_MCAST_RATE:
case CFG_CNF_INTRA_BSS_RELAY:
#endif
case CFG_CNF_MICRO_WAVE:
/* case CFG_CNF_LOAD_BALANCING: */
/* case CFG_CNF_MEDIUM_DISTRIBUTION: */
/* case CFG_CNF_RX_ALL_GROUP_ADDRESS: */
/* case CFG_CNF_COUNTRY_INFO: */
/* case CFG_COUNTRY_STRING: */
case CFG_AP_MODE:
case CFG_ENCRYPT_STRING:
/* case CFG_DRIVER_ENABLE: */
case CFG_WOLAS_ENABLE:
case CFG_MB_INFO:
case CFG_IFB:
break;
/* Deal with this dynamic MSF RID, as it's required for WPA */
case CFG_DRIVER_ENABLE:
if (lp->driverEnable) {
hcf_cntl(&(lp->hcfCtx), HCF_CNTL_ENABLE | HCF_PORT_0);
hcf_cntl(&(lp->hcfCtx), HCF_CNTL_CONNECT);
} else {
hcf_cntl(&(lp->hcfCtx), HCF_CNTL_DISABLE | HCF_PORT_0);
hcf_cntl(&(lp->hcfCtx), HCF_CNTL_DISCONNECT);
}
break;
default:
wl_act_int_off(lp);
urq->result = hcf_put_info(&(lp->hcfCtx), (LTVP) pLtv);
wl_act_int_on(lp);
break;
}
if (ltvAllocated)
kfree(pLtv);
} else {
urq->result = UIL_FAILURE;
}
} else {
DBG_ERROR(DbgInfo, "EPERM\n");
urq->result = UIL_FAILURE;
result = -EPERM;
}
} else {
DBG_ERROR(DbgInfo, "UIL_ERR_WRONG_IFB\n");
urq->result = UIL_ERR_WRONG_IFB;
}
DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_put_info */
|
CWE-119
| 179,136 | 937 |
192300055996805061518087593122531999232
| null | null | null |
linux
|
c2c65cd2e14ada6de44cb527e7f1990bede24e15
| 1 |
static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
size_t count, loff_t *fpos)
{
struct oz_pd *pd;
struct oz_elt_buf *eb;
struct oz_elt_info *ei;
struct oz_elt *elt;
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)
oz_pd_get(pd);
spin_unlock_bh(&g_cdev.lock);
if (pd == NULL)
return -ENXIO;
if (!(pd->state & OZ_PD_S_CONNECTED))
return -EAGAIN;
eb = &pd->elt_buff;
ei = oz_elt_info_alloc(eb);
if (ei == NULL) {
count = 0;
goto out;
}
elt = (struct oz_elt *)ei->data;
app_hdr = (struct oz_app_hdr *)(elt+1);
elt->length = sizeof(struct oz_app_hdr) + count;
elt->type = OZ_ELT_APP_DATA;
ei->app_id = OZ_APPID_SERIAL;
ei->length = elt->length + sizeof(struct oz_elt);
app_hdr->app_id = OZ_APPID_SERIAL;
if (copy_from_user(app_hdr+1, buf, count))
goto out;
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
if (ctx) {
app_hdr->elt_seq_num = ctx->tx_seq_num++;
if (ctx->tx_seq_num == 0)
ctx->tx_seq_num = 1;
spin_lock(&eb->lock);
if (oz_queue_elt_info(eb, 0, 0, ei) == 0)
ei = NULL;
spin_unlock(&eb->lock);
}
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
out:
if (ei) {
count = 0;
spin_lock_bh(&eb->lock);
oz_elt_info_free(eb, ei);
spin_unlock_bh(&eb->lock);
}
oz_pd_put(pd);
return count;
}
|
CWE-119
| 179,137 | 938 |
33191172042980294618689596958041133974
| null | null | null |
linux
|
6062a8dc0517bce23e3c2f7d2fea5e22411269a3
| 1 |
void ipc_rcu_putref(void *ptr)
{
if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
ipc_schedule_free);
} else {
kfree_rcu(container_of(ptr, struct ipc_rcu_grace, data), rcu);
}
}
|
CWE-189
| 179,157 | 956 |
147913652096999237317585629480667799082
| null | null | null |
linux
|
e93b7d748be887cd7639b113ba7d7ef792a7efb9
| 1 |
static inline int ip_ufo_append_data(struct sock *sk,
struct sk_buff_head *queue,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
int transhdrlen, int maxfraglen, unsigned int flags)
{
struct sk_buff *skb;
int err;
/* There is support for UDP fragmentation offload by network
* device, so create one single skb packet containing complete
* udp datagram
*/
if ((skb = skb_peek_tail(queue)) == NULL) {
skb = sock_alloc_send_skb(sk,
hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err);
if (skb == NULL)
return err;
/* reserve space for Hardware header */
skb_reserve(skb, hh_len);
/* create space for UDP/IP header */
skb_put(skb, fragheaderlen + transhdrlen);
/* initialize network header pointer */
skb_reset_network_header(skb);
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
/* specify the length of each IP datagram fragment */
skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
__skb_queue_tail(queue, skb);
}
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
}
|
CWE-264
| 179,158 | 957 |
119382236388357002252942235840226949267
| null | null | null |
linux
|
d661684cf6820331feae71146c35da83d794467e
| 1 |
static __inline__ int scm_check_creds(struct ucred *creds)
{
const struct cred *cred = current_cred();
kuid_t uid = make_kuid(cred->user_ns, creds->uid);
kgid_t gid = make_kgid(cred->user_ns, creds->gid);
if (!uid_valid(uid) || !gid_valid(gid))
return -EINVAL;
if ((creds->pid == task_tgid_vnr(current) ||
ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
gid_eq(gid, cred->sgid)) || nsown_capable(CAP_SETGID))) {
return 0;
}
return -EPERM;
}
|
CWE-264
| 179,163 | 962 |
113094377788410965139613485088601104883
| null | null | null |
linux
|
2433c8f094a008895e66f25bd1773cdb01c91d01
| 1 |
static int net_ctl_permissions(struct ctl_table_header *head,
struct ctl_table *table)
{
struct net *net = container_of(head->set, struct net, sysctls);
kuid_t root_uid = make_kuid(net->user_ns, 0);
kgid_t root_gid = make_kgid(net->user_ns, 0);
/* Allow network administrator to have same access as root. */
if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
uid_eq(root_uid, current_uid())) {
int mode = (table->mode >> 6) & 7;
return (mode << 6) | (mode << 3) | mode;
}
/* Allow netns root group to have the same access as the root group */
if (gid_eq(root_gid, current_gid())) {
int mode = (table->mode >> 3) & 7;
return (mode << 3) | mode;
}
return table->mode;
}
|
CWE-20
| 179,166 | 964 |
4251532209852224313464912506999199087
| null | null | null |
FFmpeg
|
c94f9e854228e0ea00e1de8769d8d3f7cab84a55
| 1 |
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
{
void **ptrptr = ptr;
*ptrptr = av_realloc_f(*ptrptr, nmemb, size);
if (!*ptrptr && !(nmemb && size))
return AVERROR(ENOMEM);
return 0;
}
| 179,167 | 965 |
104455358682340863259750507091028415905
| null | null | null |
|
linux
|
c95eb3184ea1a3a2551df57190c81da695e2144b
| 1 |
validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
return armpmu->get_event_idx(hw_events, event) >= 0;
}
|
CWE-20
| 179,181 | 977 |
214965441703397323222765547353775407610
| null | null | null |
linux
|
1fc29bacedeabb278080e31bb9c1ecb49f143c3b
| 1 |
build_unc_path_to_root(const struct smb_vol *vol,
const struct cifs_sb_info *cifs_sb)
{
char *full_path, *pos;
unsigned int pplen = vol->prepath ? strlen(vol->prepath) + 1 : 0;
unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
strncpy(full_path, vol->UNC, unc_len);
pos = full_path + unc_len;
if (pplen) {
*pos++ = CIFS_DIR_SEP(cifs_sb);
strncpy(pos, vol->prepath, pplen);
pos += pplen;
}
*pos = '\0'; /* add trailing null */
convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path);
return full_path;
}
|
CWE-189
| 179,182 | 978 |
8340581254640439328367740409140108158
| null | null | null |
libtiff
|
ce6841d9e41d621ba23cf18b190ee6a23b2cc833
| 1 |
process(register int code, unsigned char** fill)
{
int incode;
static unsigned char firstchar;
if (code == clear) {
codesize = datasize + 1;
codemask = (1 << codesize) - 1;
avail = clear + 2;
oldcode = -1;
return 1;
}
if (oldcode == -1) {
*(*fill)++ = suffix[code];
firstchar = oldcode = code;
return 1;
}
if (code > avail) {
fprintf(stderr, "code %d too large for %d\n", code, avail);
return 0;
}
incode = code;
if (code == avail) { /* the first code is always < avail */
*stackp++ = firstchar;
code = oldcode;
}
while (code > clear) {
*stackp++ = suffix[code];
code = prefix[code];
}
*stackp++ = firstchar = suffix[code];
prefix[avail] = oldcode;
suffix[avail] = firstchar;
avail++;
if (((avail & codemask) == 0) && (avail < 4096)) {
codesize++;
codemask += avail;
}
oldcode = incode;
do {
*(*fill)++ = *--stackp;
} while (stackp > stack);
return 1;
}
|
CWE-119
| 179,183 | 979 |
49639759056433865662554059812328933578
| null | null | null |
linux
|
9955ac47f4ba1c95ecb6092aeaefb40a22e99268
| 1 |
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
console_verbose();
pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
handler[reason], esr);
die("Oops - bad mode", regs, 0);
local_irq_disable();
panic("bad mode");
}
| 179,184 | 980 |
172340806476661151675149696735670215989
| null | null | null |
|
linux
|
75a493e60ac4bbe2e977e7129d6d8cbb0dd236be
| 1 |
static void ip6_append_data_mtu(int *mtu,
int *maxfraglen,
unsigned int fragheaderlen,
struct sk_buff *skb,
struct rt6_info *rt)
{
if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
if (skb == NULL) {
/* first fragment, reserve header_len */
*mtu = *mtu - rt->dst.header_len;
} else {
/*
* this fragment is not first, the headers
* space is regarded as data space.
*/
*mtu = dst_mtu(rt->dst.path);
}
*maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ fragheaderlen - sizeof(struct frag_hdr);
}
}
|
CWE-399
| 179,187 | 982 |
249477262173279048030533198102446614258
| null | null | null |
linux
|
8822b64a0fa64a5dd1dfcf837c5b0be83f8c05d1
| 1 |
static int udp_v6_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
struct udphdr *uh;
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
__wsum csum = 0;
/* Grab the skbuff where UDP header space exists. */
if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = fl6->fl6_sport;
uh->dest = fl6->fl6_dport;
uh->len = htons(up->len);
uh->check = 0;
if (is_udplite)
csum = udplite_csum_outgoing(sk, skb);
else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
up->len);
goto send;
} else
csum = udp_csum_outgoing(sk, skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
up->len, fl6->flowi6_proto, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip6_push_pending_frames(sk);
if (err) {
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
out:
up->len = 0;
up->pending = 0;
return err;
}
|
CWE-399
| 179,189 | 984 |
308814749542050957785836886257037971312
| null | null | null |
linux
|
c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1
| 1 |
static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct br_ip ip;
int err = -EINVAL;
if (!netif_running(br->dev) || br->multicast_disabled)
return -EINVAL;
if (timer_pending(&br->multicast_querier_timer))
return -EBUSY;
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP))
ip.u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
else
ip.u.ip6 = entry->addr.u.ip6;
#endif
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
mp = br_mdb_ip_get(mdb, &ip);
if (!mp)
goto unlock;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (!p->port || p->port->dev->ifindex != entry->ifindex)
continue;
if (p->port->state == BR_STATE_DISABLED)
goto unlock;
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
err = 0;
if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
break;
}
unlock:
spin_unlock_bh(&br->multicast_lock);
return err;
}
|
CWE-20
| 179,190 | 985 |
182535534055103098983025293091081731271
| null | null | null |
linux
|
c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1
| 1 |
static void br_multicast_del_pg(struct net_bridge *br,
struct net_bridge_port_group *pg)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
mdb = mlock_dereference(br->mdb, br);
mp = br_mdb_ip_get(mdb, &pg->addr);
if (WARN_ON(!mp))
return;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p != pg)
continue;
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
return;
}
WARN_ON(1);
}
|
CWE-20
| 179,191 | 986 |
74664793578988533913040766638001812814
| null | null | null |
linux
|
dd7633ecd553a5e304d349aa6f8eb8a0417098c5
| 1 |
static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
{
struct socket *sock, *oldsock;
struct vhost_virtqueue *vq;
struct vhost_net_virtqueue *nvq;
struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
int r;
mutex_lock(&n->dev.mutex);
r = vhost_dev_check_owner(&n->dev);
if (r)
goto err;
if (index >= VHOST_NET_VQ_MAX) {
r = -ENOBUFS;
goto err;
}
vq = &n->vqs[index].vq;
nvq = &n->vqs[index];
mutex_lock(&vq->mutex);
/* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(vq)) {
r = -EFAULT;
goto err_vq;
}
sock = get_socket(fd);
if (IS_ERR(sock)) {
r = PTR_ERR(sock);
goto err_vq;
}
/* start polling new socket */
oldsock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
if (sock != oldsock) {
ubufs = vhost_net_ubuf_alloc(vq,
sock && vhost_sock_zcopy(sock));
if (IS_ERR(ubufs)) {
r = PTR_ERR(ubufs);
goto err_ubufs;
}
vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, sock);
r = vhost_init_used(vq);
if (r)
goto err_used;
r = vhost_net_enable_vq(n, vq);
if (r)
goto err_used;
oldubufs = nvq->ubufs;
nvq->ubufs = ubufs;
n->tx_packets = 0;
n->tx_zcopy_err = 0;
n->tx_flush = false;
}
mutex_unlock(&vq->mutex);
if (oldubufs) {
vhost_net_ubuf_put_and_wait(oldubufs);
mutex_lock(&vq->mutex);
vhost_zerocopy_signal_used(n, vq);
mutex_unlock(&vq->mutex);
}
if (oldsock) {
vhost_net_flush_vq(n, index);
fput(oldsock->file);
}
mutex_unlock(&n->dev.mutex);
return 0;
err_used:
rcu_assign_pointer(vq->private_data, oldsock);
vhost_net_enable_vq(n, vq);
if (ubufs)
vhost_net_ubuf_put_and_wait(ubufs);
err_ubufs:
fput(sock->file);
err_vq:
mutex_unlock(&vq->mutex);
err:
mutex_unlock(&n->dev.mutex);
return r;
}
|
CWE-399
| 179,192 | 987 |
241366389520784052576898155125538477612
| null | null | null |
linux
|
307f2fb95e9b96b3577916e73d92e104f8f26494
| 1 |
static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
struct nl_info *info)
{
struct rt6_info *iter = NULL;
struct rt6_info **ins;
int replace = (info->nlh &&
(info->nlh->nlmsg_flags & NLM_F_REPLACE));
int add = (!info->nlh ||
(info->nlh->nlmsg_flags & NLM_F_CREATE));
int found = 0;
ins = &fn->leaf;
for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) {
/*
* Search for duplicates
*/
if (iter->rt6i_metric == rt->rt6i_metric) {
/*
* Same priority level
*/
if (info->nlh &&
(info->nlh->nlmsg_flags & NLM_F_EXCL))
return -EEXIST;
if (replace) {
found++;
break;
}
if (iter->dst.dev == rt->dst.dev &&
iter->rt6i_idev == rt->rt6i_idev &&
ipv6_addr_equal(&iter->rt6i_gateway,
&rt->rt6i_gateway)) {
if (rt->rt6i_nsiblings)
rt->rt6i_nsiblings = 0;
if (!(iter->rt6i_flags & RTF_EXPIRES))
return -EEXIST;
if (!(rt->rt6i_flags & RTF_EXPIRES))
rt6_clean_expires(iter);
else
rt6_set_expires(iter, rt->dst.expires);
return -EEXIST;
}
/* If we have the same destination and the same metric,
* but not the same gateway, then the route we try to
* add is sibling to this route, increment our counter
* of siblings, and later we will add our route to the
* list.
* Only static routes (which don't have flag
* RTF_EXPIRES) are used for ECMPv6.
*
* To avoid long list, we only had siblings if the
* route have a gateway.
*/
if (rt->rt6i_flags & RTF_GATEWAY &&
!(rt->rt6i_flags & RTF_EXPIRES) &&
!(iter->rt6i_flags & RTF_EXPIRES))
rt->rt6i_nsiblings++;
}
if (iter->rt6i_metric > rt->rt6i_metric)
break;
ins = &iter->dst.rt6_next;
}
/* Reset round-robin state, if necessary */
if (ins == &fn->leaf)
fn->rr_ptr = NULL;
/* Link this route to others same route. */
if (rt->rt6i_nsiblings) {
unsigned int rt6i_nsiblings;
struct rt6_info *sibling, *temp_sibling;
/* Find the first route that have the same metric */
sibling = fn->leaf;
while (sibling) {
if (sibling->rt6i_metric == rt->rt6i_metric) {
list_add_tail(&rt->rt6i_siblings,
&sibling->rt6i_siblings);
break;
}
sibling = sibling->dst.rt6_next;
}
/* For each sibling in the list, increment the counter of
* siblings. BUG() if counters does not match, list of siblings
* is broken!
*/
rt6i_nsiblings = 0;
list_for_each_entry_safe(sibling, temp_sibling,
&rt->rt6i_siblings, rt6i_siblings) {
sibling->rt6i_nsiblings++;
BUG_ON(sibling->rt6i_nsiblings != rt->rt6i_nsiblings);
rt6i_nsiblings++;
}
BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings);
}
/*
* insert node
*/
if (!replace) {
if (!add)
pr_warn("NLM_F_CREATE should be set when creating new route\n");
add:
rt->dst.rt6_next = iter;
*ins = rt;
rt->rt6i_node = fn;
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info);
info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
if (!(fn->fn_flags & RTN_RTINFO)) {
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
fn->fn_flags |= RTN_RTINFO;
}
} else {
if (!found) {
if (add)
goto add;
pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
return -ENOENT;
}
*ins = rt;
rt->rt6i_node = fn;
rt->dst.rt6_next = iter->dst.rt6_next;
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info);
rt6_release(iter);
if (!(fn->fn_flags & RTN_RTINFO)) {
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
fn->fn_flags |= RTN_RTINFO;
}
}
return 0;
}
|
CWE-399
| 179,194 | 988 |
27152909099956747115348762917341164799
| null | null | null |
linux
|
ea702b80e0bbb2448e201472127288beb82ca2fe
| 1 |
smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
int rc;
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
unsigned int i;
size_t total_len = 0, sent;
struct socket *ssocket = server->ssocket;
int val = 1;
cFYI(1, "Sending smb: smb_len=%u", smb_buf_length);
dump_smb(iov[0].iov_base, iov[0].iov_len);
/* cork the socket */
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
(char *)&val, sizeof(val));
rc = smb_send_kvec(server, iov, n_vec, &sent);
if (rc < 0)
goto uncork;
total_len += sent;
/* now walk the page array and send each page in it */
for (i = 0; i < rqst->rq_npages; i++) {
struct kvec p_iov;
cifs_rqst_page_to_kvec(rqst, i, &p_iov);
rc = smb_send_kvec(server, &p_iov, 1, &sent);
kunmap(rqst->rq_pages[i]);
if (rc < 0)
break;
total_len += sent;
}
uncork:
/* uncork it */
val = 0;
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
(char *)&val, sizeof(val));
if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
cFYI(1, "partial send (wanted=%u sent=%zu): terminating "
"session", smb_buf_length + 4, total_len);
/*
* If we have only sent part of an SMB then the next SMB could
* be taken as the remainder of this one. We need to kill the
* socket so the server throws away the partial SMB
*/
server->tcpStatus = CifsNeedReconnect;
}
if (rc < 0 && rc != -EINTR)
cERROR(1, "Error %d sending data on socket to server", rc);
else
rc = 0;
return rc;
}
|
CWE-362
| 179,198 | 992 |
125179843386250264419280955990474562499
| null | null | null |
linux
|
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
| 1 |
ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
{
loff_t ret;
if (file->f_mode & FMODE_READ)
ret = seq_lseek(file, offset, whence);
else
file->f_pos = ret = 1;
return ret;
}
| 179,199 | 993 |
93795587963981605065524571558493539908
| null | null | null |
|
linux
|
d5e0d0f607a7a029c6563a0470d88255c89a8d11
| 1 |
vsock_stream_recvmsg(struct kiocb *kiocb,
struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk;
struct vsock_sock *vsk;
int err;
size_t target;
ssize_t copied;
long timeout;
struct vsock_transport_recv_notify_data recv_data;
DEFINE_WAIT(wait);
sk = sock->sk;
vsk = vsock_sk(sk);
err = 0;
lock_sock(sk);
if (sk->sk_state != SS_CONNECTED) {
/* Recvmsg is supposed to return 0 if a peer performs an
* orderly shutdown. Differentiate between that case and when a
* peer has not connected or a local shutdown occured with the
* SOCK_DONE flag.
*/
if (sock_flag(sk, SOCK_DONE))
err = 0;
else
err = -ENOTCONN;
goto out;
}
if (flags & MSG_OOB) {
err = -EOPNOTSUPP;
goto out;
}
/* We don't check peer_shutdown flag here since peer may actually shut
* down, but there can be data in the queue that a local socket can
* receive.
*/
if (sk->sk_shutdown & RCV_SHUTDOWN) {
err = 0;
goto out;
}
/* It is valid on Linux to pass in a zero-length receive buffer. This
* is not an error. We may as well bail out now.
*/
if (!len) {
err = 0;
goto out;
}
/* We must not copy less than target bytes into the user's buffer
* before returning successfully, so we wait for the consume queue to
* have that much data to consume before dequeueing. Note that this
* makes it impossible to handle cases where target is greater than the
* queue size.
*/
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (target >= transport->stream_rcvhiwat(vsk)) {
err = -ENOMEM;
goto out;
}
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
copied = 0;
err = transport->notify_recv_init(vsk, target, &recv_data);
if (err < 0)
goto out;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (1) {
s64 ready = vsock_stream_has_data(vsk);
if (ready < 0) {
/* Invalid queue pair content. XXX This should be
* changed to a connection reset in a later change.
*/
err = -ENOMEM;
goto out_wait;
} else if (ready > 0) {
ssize_t read;
err = transport->notify_recv_pre_dequeue(
vsk, target, &recv_data);
if (err < 0)
break;
read = transport->stream_dequeue(
vsk, msg->msg_iov,
len - copied, flags);
if (read < 0) {
err = -ENOMEM;
break;
}
copied += read;
err = transport->notify_recv_post_dequeue(
vsk, target, read,
!(flags & MSG_PEEK), &recv_data);
if (err < 0)
goto out_wait;
if (read >= target || flags & MSG_PEEK)
break;
target -= read;
} else {
if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN)
|| (vsk->peer_shutdown & SEND_SHUTDOWN)) {
break;
}
/* Don't wait for non-blocking sockets. */
if (timeout == 0) {
err = -EAGAIN;
break;
}
err = transport->notify_recv_pre_block(
vsk, target, &recv_data);
if (err < 0)
break;
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
break;
} else if (timeout == 0) {
err = -EAGAIN;
break;
}
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
}
}
if (sk->sk_err)
err = -sk->sk_err;
else if (sk->sk_shutdown & RCV_SHUTDOWN)
err = 0;
if (copied > 0) {
/* We only do these additional bookkeeping/notification steps
* if we actually copied something out of the queue pair
* instead of just peeking ahead.
*/
if (!(flags & MSG_PEEK)) {
/* If the other side has shutdown for sending and there
* is nothing more to read, then modify the socket
* state.
*/
if (vsk->peer_shutdown & SEND_SHUTDOWN) {
if (vsock_stream_has_data(vsk) <= 0) {
sk->sk_state = SS_UNCONNECTED;
sock_set_flag(sk, SOCK_DONE);
sk->sk_state_change(sk);
}
}
}
err = copied;
}
out_wait:
finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
}
|
CWE-200
| 179,200 | 994 |
314567100570998213294758795947569447860
| null | null | null |
linux
|
680d04e0ba7e926233e3b9cee59125ce181f66ba
| 1 |
static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
struct vsock_sock *vsk,
struct msghdr *msg, size_t len,
int flags)
{
int err;
int noblock;
struct vmci_datagram *dg;
size_t payload_len;
struct sk_buff *skb;
noblock = flags & MSG_DONTWAIT;
if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
return -EOPNOTSUPP;
/* Retrieve the head sk_buff from the socket's receive queue. */
err = 0;
skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
if (err)
return err;
if (!skb)
return -EAGAIN;
dg = (struct vmci_datagram *)skb->data;
if (!dg)
/* err is 0, meaning we read zero bytes. */
goto out;
payload_len = dg->payload_size;
/* Ensure the sk_buff matches the payload size claimed in the packet. */
if (payload_len != skb->len - sizeof(*dg)) {
err = -EINVAL;
goto out;
}
if (payload_len > len) {
payload_len = len;
msg->msg_flags |= MSG_TRUNC;
}
/* Place the datagram payload in the user's iovec. */
err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov,
payload_len);
if (err)
goto out;
msg->msg_namelen = 0;
if (msg->msg_name) {
struct sockaddr_vm *vm_addr;
/* Provide the address of the sender. */
vm_addr = (struct sockaddr_vm *)msg->msg_name;
vsock_addr_init(vm_addr, dg->src.context, dg->src.resource);
msg->msg_namelen = sizeof(*vm_addr);
}
err = payload_len;
out:
skb_free_datagram(&vsk->sk, skb);
return err;
}
|
CWE-200
| 179,201 | 995 |
299433778323540956503556179829030220591
| null | null | null |
linux
|
4a184233f21645cf0b719366210ed445d1024d72
| 1 |
static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
size_t copied;
unsigned char *asmptr;
struct sk_buff *skb;
int n, er, qbit;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
return er;
qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
skb_pull(skb, ROSE_MIN_LEN);
if (rose->qbitincl) {
asmptr = skb_push(skb, 1);
*asmptr = qbit;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (srose != NULL) {
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
for (n = 0 ; n < rose->dest_ndigis ; n++)
full_srose->srose_digis[n] = rose->dest_digis[n];
msg->msg_namelen = sizeof(struct full_sockaddr_rose);
} else {
if (rose->dest_ndigis >= 1) {
srose->srose_ndigis = 1;
srose->srose_digi = rose->dest_digis[0];
}
msg->msg_namelen = sizeof(struct sockaddr_rose);
}
}
skb_free_datagram(sk, skb);
return copied;
}
|
CWE-200
| 179,205 | 999 |
8668028458710471572364550407621650238
| null | null | null |
linux
|
d26d6504f23e803824e8ebd14e52d4fc0a0b09cb
| 1 |
static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
unsigned int copied, rlen;
struct sk_buff *skb, *cskb;
int err = 0;
pr_debug("%p %zu\n", sk, len);
lock_sock(sk);
if (sk->sk_state == LLCP_CLOSED &&
skb_queue_empty(&sk->sk_receive_queue)) {
release_sock(sk);
return 0;
}
release_sock(sk);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
pr_err("Recv datagram failed state %d %d %d",
sk->sk_state, err, sock_error(sk));
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
rlen = skb->len; /* real length of skb */
copied = min_t(unsigned int, rlen, len);
cskb = skb;
if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return -EFAULT;
}
sock_recv_timestamp(msg, sk, skb);
if (sk->sk_type == SOCK_DGRAM && msg->msg_name) {
struct nfc_llcp_ui_cb *ui_cb = nfc_llcp_ui_skb_cb(skb);
struct sockaddr_nfc_llcp *sockaddr =
(struct sockaddr_nfc_llcp *) msg->msg_name;
msg->msg_namelen = sizeof(struct sockaddr_nfc_llcp);
pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap);
sockaddr->sa_family = AF_NFC;
sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP;
sockaddr->dsap = ui_cb->dsap;
sockaddr->ssap = ui_cb->ssap;
}
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
/* SOCK_STREAM: re-queue skb if it contains unreceived data */
if (sk->sk_type == SOCK_STREAM ||
sk->sk_type == SOCK_DGRAM ||
sk->sk_type == SOCK_RAW) {
skb_pull(skb, copied);
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
goto done;
}
}
kfree_skb(skb);
}
/* XXX Queue backlogged skbs */
done:
/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
copied = rlen;
return copied;
}
|
CWE-200
| 179,206 | 1,000 |
122362862671268240017611662503371115453
| null | null | null |
linux
|
3ce5efad47b62c57a4f5c54248347085a750ce0e
| 1 |
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
size_t copied;
struct sk_buff *skb;
int er;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
lock_sock(sk);
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
release_sock(sk);
return er;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (er < 0) {
skb_free_datagram(sk, skb);
release_sock(sk);
return er;
}
if (sax != NULL) {
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);
}
msg->msg_namelen = sizeof(*sax);
skb_free_datagram(sk, skb);
release_sock(sk);
return copied;
}
|
CWE-200
| 179,207 | 1,001 |
180984074270992013207621097845470989444
| null | null | null |
linux
|
c77a4b9cffb6215a15196ec499490d116dfad181
| 1 |
static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name;
const int nonblock = flags & MSG_DONTWAIT;
struct sk_buff *skb = NULL;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
unsigned long cpu_flags;
size_t copied = 0;
u32 peek_seq = 0;
u32 *seq;
unsigned long used;
int target; /* Read at least this many bytes */
long timeo;
lock_sock(sk);
copied = -ENOTCONN;
if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
goto out;
timeo = sock_rcvtimeo(sk, nonblock);
seq = &llc->copied_seq;
if (flags & MSG_PEEK) {
peek_seq = llc->copied_seq;
seq = &peek_seq;
}
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
copied = 0;
do {
u32 offset;
/*
* We need to check signals first, to get correct SIGURG
* handling. FIXME: Need to check this doesn't impact 1003.1g
* and move it down to the bottom of the loop
*/
if (signal_pending(current)) {
if (copied)
break;
copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
break;
}
/* Next get a buffer. */
skb = skb_peek(&sk->sk_receive_queue);
if (skb) {
offset = *seq;
goto found_ok_skb;
}
/* Well, if we have backlog, try to process it now yet. */
if (copied >= target && !sk->sk_backlog.tail)
break;
if (copied) {
if (sk->sk_err ||
sk->sk_state == TCP_CLOSE ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
!timeo ||
(flags & MSG_PEEK))
break;
} else {
if (sock_flag(sk, SOCK_DONE))
break;
if (sk->sk_err) {
copied = sock_error(sk);
break;
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSE) {
if (!sock_flag(sk, SOCK_DONE)) {
/*
* This occurs when user tries to read
* from never connected socket.
*/
copied = -ENOTCONN;
break;
}
break;
}
if (!timeo) {
copied = -EAGAIN;
break;
}
}
if (copied >= target) { /* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
} else
sk_wait_data(sk, &timeo);
if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
current->comm,
task_pid_nr(current));
peek_seq = llc->copied_seq;
}
continue;
found_ok_skb:
/* Ok so how much can we use? */
used = skb->len - offset;
if (len < used)
used = len;
if (!(flags & MSG_TRUNC)) {
int rc = skb_copy_datagram_iovec(skb, offset,
msg->msg_iov, used);
if (rc) {
/* Exception. Bailout! */
if (!copied)
copied = -EFAULT;
break;
}
}
*seq += used;
copied += used;
len -= used;
/* For non stream protcols we get one packet per recvmsg call */
if (sk->sk_type != SOCK_STREAM)
goto copy_uaddr;
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb, false);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}
/* Partial read */
if (used + offset < skb->len)
continue;
} while (len > 0);
out:
release_sock(sk);
return copied;
copy_uaddr:
if (uaddr != NULL && skb != NULL) {
memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
msg->msg_namelen = sizeof(*uaddr);
}
if (llc_sk(sk)->cmsg_flags)
llc_cmsg_rcv(msg, skb);
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb, false);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}
goto out;
}
|
CWE-200
| 179,208 | 1,002 |
63715411456460912940940241556670716923
| null | null | null |
linux
|
b860d3cc62877fad02863e2a08efff69a19382d2
| 1 |
static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
if (addr_len)
*addr_len = sizeof(*lsa);
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address. */
if (lsa) {
lsa->l2tp_family = AF_INET6;
lsa->l2tp_unused = 0;
lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
lsa->l2tp_flowinfo = 0;
lsa->l2tp_scope_id = 0;
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = IP6CB(skb)->iif;
}
if (np->rxopt.all)
ip6_datagram_recv_ctl(sk, msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
return err ? err : copied;
}
|
CWE-200
| 179,209 | 1,003 |
153456865559495391283664871292336985954
| null | null | null |
linux
|
a5598bd9c087dc0efc250a5221e5d0e6f584ee88
| 1 |
static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned int copied, rlen;
struct sk_buff *skb, *rskb, *cskb;
int err = 0;
if ((sk->sk_state == IUCV_DISCONN) &&
skb_queue_empty(&iucv->backlog_skb_q) &&
skb_queue_empty(&sk->sk_receive_queue) &&
list_empty(&iucv->message_q.list))
return 0;
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
/* receive/dequeue next skb:
* the function understands MSG_PEEK and, thus, does not dequeue skb */
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
rlen = skb->len; /* real length of skb */
copied = min_t(unsigned int, rlen, len);
if (!rlen)
sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
cskb = skb;
if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return -EFAULT;
}
/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
if (sk->sk_type == SOCK_SEQPACKET) {
if (copied < rlen)
msg->msg_flags |= MSG_TRUNC;
/* each iucv message contains a complete record */
msg->msg_flags |= MSG_EOR;
}
/* create control message to store iucv msg target class:
* get the trgcls from the control buffer of the skb due to
* fragmentation of original iucv message. */
err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
CB_TRGCLS_LEN, CB_TRGCLS(skb));
if (err) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return err;
}
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
/* SOCK_STREAM: re-queue skb if it contains unreceived data */
if (sk->sk_type == SOCK_STREAM) {
skb_pull(skb, copied);
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
goto done;
}
}
kfree_skb(skb);
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_recv);
if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
WARN_ON(1);
iucv_sock_close(sk);
return -EFAULT;
}
}
/* Queue backlog skbs */
spin_lock_bh(&iucv->message_q.lock);
rskb = skb_dequeue(&iucv->backlog_skb_q);
while (rskb) {
if (sock_queue_rcv_skb(sk, rskb)) {
skb_queue_head(&iucv->backlog_skb_q,
rskb);
break;
} else {
rskb = skb_dequeue(&iucv->backlog_skb_q);
}
}
if (skb_queue_empty(&iucv->backlog_skb_q)) {
if (!list_empty(&iucv->message_q.list))
iucv_process_message_q(sk);
if (atomic_read(&iucv->msg_recv) >=
iucv->msglimit / 2) {
err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
if (err) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
}
}
spin_unlock_bh(&iucv->message_q.lock);
}
done:
/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
copied = rlen;
return copied;
}
|
CWE-200
| 179,210 | 1,004 |
292835413067186272787791611262939477245
| null | null | null |
linux
|
5ae94c0d2f0bed41d6718be743985d61b7f5c47d
| 1 |
static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
struct sk_buff *skb;
size_t copied;
int err;
IRDA_DEBUG(4, "%s()\n", __func__);
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n",
__func__, copied, size);
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
skb_free_datagram(sk, skb);
/*
* Check if we have previously stopped IrTTP and we know
* have more free space in our rx_queue. If so tell IrTTP
* to start delivering frames again before our rx_queue gets
* empty
*/
if (self->rx_flow == FLOW_STOP) {
if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__);
self->rx_flow = FLOW_START;
irttp_flow_request(self->tsap, FLOW_START);
}
}
return copied;
}
|
CWE-200
| 179,211 | 1,005 |
124022070218876064215658571670133482285
| null | null | null |
linux
|
2d6fbfe733f35c6b355c216644e08e149c61b271
| 1 |
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int ret;
int copylen;
ret = -EOPNOTSUPP;
if (m->msg_flags&MSG_OOB)
goto read_error;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
copylen = skb->len;
if (len < copylen) {
m->msg_flags |= MSG_TRUNC;
copylen = len;
}
ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
if (ret)
goto out_free;
ret = (flags & MSG_TRUNC) ? skb->len : copylen;
out_free:
skb_free_datagram(sk, skb);
caif_check_flow_release(sk);
return ret;
read_error:
return ret;
}
|
CWE-200
| 179,212 | 1,006 |
338803166792488490555003114574224123799
| null | null | null |
linux
|
e11e0455c0d7d3d62276a0c55d9dfbc16779d691
| 1 |
static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int len;
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
rfcomm_dlc_accept(d);
return 0;
}
len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags);
lock_sock(sk);
if (!(flags & MSG_PEEK) && len > 0)
atomic_sub(len, &sk->sk_rmem_alloc);
if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc);
release_sock(sk);
return len;
}
|
CWE-200
| 179,214 | 1,007 |
318172830474011822972223789938922002319
| null | null | null |
linux
|
4683f42fde3977bdb4e8a09622788cc8b5313778
| 1 |
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
size_t copied;
int err;
BT_DBG("sock %p sk %p len %zu", sock, sk, len);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
msg->msg_namelen = 0;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err == 0)
sock_recv_ts_and_drops(msg, sk, skb);
skb_free_datagram(sk, skb);
return err ? : copied;
}
|
CWE-200
| 179,215 | 1,008 |
4403052444065820777651314476491855154
| null | null | null |
linux
|
ef3313e84acbf349caecae942ab3ab731471f1a1
| 1 |
static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied;
int err = 0;
lock_sock(sk);
/*
* This works for seqpacket too. The receiver has ordered the
* queue for us! We do one quick check first though
*/
if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
/* Now we can treat all alike */
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
if (!ax25_sk(sk)->pidincl)
skb_pull(skb, 1); /* Remove PID */
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (msg->msg_namelen != 0) {
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
ax25_digi digi;
ax25_address src;
const unsigned char *mac = skb_mac_header(skb);
ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
&digi, NULL, NULL);
sax->sax25_family = AF_AX25;
/* We set this correctly, even though we may not let the
application know the digi calls further down (because it
did NOT ask to know them). This could get political... **/
sax->sax25_ndigis = digi.ndigi;
sax->sax25_call = src;
if (sax->sax25_ndigis != 0) {
int ct;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax;
for (ct = 0; ct < digi.ndigi; ct++)
fsa->fsa_digipeater[ct] = digi.calls[ct];
}
msg->msg_namelen = sizeof(struct full_sockaddr_ax25);
}
skb_free_datagram(sk, skb);
err = copied;
out:
release_sock(sk);
return err;
}
|
CWE-200
| 179,216 | 1,009 |
198649123377212608141537931049932965607
| null | null | null |
linux
|
9b3e617f3df53822345a8573b6d358f6b9e5ed87
| 1 |
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
struct sk_buff *skb;
int copied, error = -EINVAL;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
/* only handle MSG_DONTWAIT and MSG_PEEK */
if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
if (!skb)
return error;
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
skb->truesize);
atm_return(vcc, skb->truesize);
}
skb_free_datagram(sk, skb);
return copied;
}
|
CWE-200
| 179,217 | 1,010 |
84284193461125217600399094486913333707
| null | null | null |
linux
|
72a763d805a48ac8c0bf48fdb510e84c12de51fe
| 1 |
static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
&ctx->req));
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
unsigned long iovlen;
struct iovec *iov;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk);
for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
char __user *from = iov->iov_base;
while (seglen) {
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
used = ctx->used;
if (!used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto unlock;
}
used = min_t(unsigned long, used, seglen);
used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
err = used;
if (err < 0)
goto unlock;
if (ctx->more || used < ctx->used)
used -= used % bs;
err = -EINVAL;
if (!used)
goto free;
ablkcipher_request_set_crypt(&ctx->req, sg,
ctx->rsgl.sg, used,
ctx->iv);
err = af_alg_wait_for_completion(
ctx->enc ?
crypto_ablkcipher_encrypt(&ctx->req) :
crypto_ablkcipher_decrypt(&ctx->req),
&ctx->completion);
free:
af_alg_free_sg(&ctx->rsgl);
if (err)
goto unlock;
copied += used;
from += used;
seglen -= used;
skcipher_pull_sgl(sk, used);
}
}
err = 0;
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
return copied ?: err;
}
|
CWE-200
| 179,219 | 1,011 |
54729755431548461547794629583782960655
| null | null | null |
linux
|
12ae030d54ef250706da5642fc7697cc60ad0df7
| 1 |
static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
struct perf_event *p_event)
{
/* The ftrace function trace is allowed only for root. */
if (ftrace_event_is_function(tp_event) &&
perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EPERM;
/* No tracing, just counting, so no obvious leak */
if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
return 0;
/* Some events are ok to be traced by non-root users... */
if (p_event->attach_state == PERF_ATTACH_TASK) {
if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
return 0;
}
/*
* ...otherwise raw tracepoint data can be a severe data leak,
* only allow root to have these.
*/
if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
|
CWE-264
| 179,220 | 1,012 |
282170608950902628612724760199058575746
| null | null | null |
linux
|
d049f74f2dbe71354d43d393ac3a188947811348
| 1 |
static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
{
const struct cred *cred = current_cred(), *tcred;
/* May we inspect the given task?
* This check is used both for attaching with ptrace
* and for allowing access to sensitive information in /proc.
*
* ptrace_attach denies several cases that /proc allows
* because setting up the necessary parent/child relationship
* or halting the specified task is impossible.
*/
int dumpable = 0;
/* Don't let security modules deny introspection */
if (same_thread_group(task, current))
return 0;
rcu_read_lock();
tcred = __task_cred(task);
if (uid_eq(cred->uid, tcred->euid) &&
uid_eq(cred->uid, tcred->suid) &&
uid_eq(cred->uid, tcred->uid) &&
gid_eq(cred->gid, tcred->egid) &&
gid_eq(cred->gid, tcred->sgid) &&
gid_eq(cred->gid, tcred->gid))
goto ok;
if (ptrace_has_cap(tcred->user_ns, mode))
goto ok;
rcu_read_unlock();
return -EPERM;
ok:
rcu_read_unlock();
smp_rmb();
if (task->mm)
dumpable = get_dumpable(task->mm);
rcu_read_lock();
if (!dumpable && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
rcu_read_unlock();
return -EPERM;
}
rcu_read_unlock();
return security_ptrace_access_check(task, mode);
}
|
CWE-264
| 179,221 | 1,013 |
145541299218287462727348997150264754367
| null | null | null |
linux
|
cea4dcfdad926a27a18e188720efe0f2c9403456
| 1 |
static int iscsi_add_notunderstood_response(
char *key,
char *value,
struct iscsi_param_list *param_list)
{
struct iscsi_extra_response *extra_response;
if (strlen(value) > VALUE_MAXLEN) {
pr_err("Value for notunderstood key \"%s\" exceeds %d,"
" protocol error.\n", key, VALUE_MAXLEN);
return -1;
}
extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL);
if (!extra_response) {
pr_err("Unable to allocate memory for"
" struct iscsi_extra_response.\n");
return -1;
}
INIT_LIST_HEAD(&extra_response->er_list);
strncpy(extra_response->key, key, strlen(key) + 1);
strncpy(extra_response->value, NOTUNDERSTOOD,
strlen(NOTUNDERSTOOD) + 1);
list_add_tail(&extra_response->er_list,
¶m_list->extra_response_list);
return 0;
}
|
CWE-119
| 179,222 | 1,014 |
100762060795839649631362584880083269766
| null | null | null |
linux
|
84d73cd3fb142bf1298a8c13fd4ca50fd2432372
| 1 |
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, u32 ext_filter_mask)
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats;
struct nlattr *attr, *af_spec;
struct rtnl_af_ops *af_ops;
struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
ASSERT_RTNL();
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
if (nlh == NULL)
return -EMSGSIZE;
ifm = nlmsg_data(nlh);
ifm->ifi_family = AF_UNSPEC;
ifm->__ifi_pad = 0;
ifm->ifi_type = dev->type;
ifm->ifi_index = dev->ifindex;
ifm->ifi_flags = dev_get_flags(dev);
ifm->ifi_change = change;
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
nla_put_u8(skb, IFLA_OPERSTATE,
netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
nla_put_u32(skb, IFLA_GROUP, dev->group) ||
nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
#ifdef CONFIG_RPS
nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
#endif
(dev->ifindex != dev->iflink &&
nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
(upper_dev &&
nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
(dev->qdisc &&
nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
(dev->ifalias &&
nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)))
goto nla_put_failure;
if (1) {
struct rtnl_link_ifmap map = {
.mem_start = dev->mem_start,
.mem_end = dev->mem_end,
.base_addr = dev->base_addr,
.irq = dev->irq,
.dma = dev->dma,
.port = dev->if_port,
};
if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
goto nla_put_failure;
}
if (dev->addr_len) {
if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
goto nla_put_failure;
}
attr = nla_reserve(skb, IFLA_STATS,
sizeof(struct rtnl_link_stats));
if (attr == NULL)
goto nla_put_failure;
stats = dev_get_stats(dev, &temp);
copy_rtnl_link_stats(nla_data(attr), stats);
attr = nla_reserve(skb, IFLA_STATS64,
sizeof(struct rtnl_link_stats64));
if (attr == NULL)
goto nla_put_failure;
copy_rtnl_link_stats64(nla_data(attr), stats);
if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
goto nla_put_failure;
if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
&& (ext_filter_mask & RTEXT_FILTER_VF)) {
int i;
struct nlattr *vfinfo, *vf;
int num_vfs = dev_num_vf(dev->dev.parent);
vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
if (!vfinfo)
goto nla_put_failure;
for (i = 0; i < num_vfs; i++) {
struct ifla_vf_info ivi;
struct ifla_vf_mac vf_mac;
struct ifla_vf_vlan vf_vlan;
struct ifla_vf_tx_rate vf_tx_rate;
struct ifla_vf_spoofchk vf_spoofchk;
/*
* Not all SR-IOV capable drivers support the
* spoofcheck query. Preset to -1 so the user
* space tool can detect that the driver didn't
* report anything.
*/
ivi.spoofchk = -1;
if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
break;
vf_mac.vf =
vf_vlan.vf =
vf_tx_rate.vf =
vf_spoofchk.vf = ivi.vf;
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
vf_vlan.qos = ivi.qos;
vf_tx_rate.rate = ivi.tx_rate;
vf_spoofchk.setting = ivi.spoofchk;
vf = nla_nest_start(skb, IFLA_VF_INFO);
if (!vf) {
nla_nest_cancel(skb, vfinfo);
goto nla_put_failure;
}
if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
&vf_tx_rate) ||
nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
&vf_spoofchk))
goto nla_put_failure;
nla_nest_end(skb, vf);
}
nla_nest_end(skb, vfinfo);
}
if (rtnl_port_fill(skb, dev))
goto nla_put_failure;
if (dev->rtnl_link_ops) {
if (rtnl_link_fill(skb, dev) < 0)
goto nla_put_failure;
}
if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
goto nla_put_failure;
list_for_each_entry(af_ops, &rtnl_af_ops, list) {
if (af_ops->fill_link_af) {
struct nlattr *af;
int err;
if (!(af = nla_nest_start(skb, af_ops->family)))
goto nla_put_failure;
err = af_ops->fill_link_af(skb, dev);
/*
* Caller may return ENODATA to indicate that there
* was no data to be dumped. This is not an error, it
* means we should trim the attribute header and
* continue.
*/
if (err == -ENODATA)
nla_nest_cancel(skb, af);
else if (err < 0)
goto nla_put_failure;
nla_nest_end(skb, af);
}
}
nla_nest_end(skb, af_spec);
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
|
CWE-399
| 179,228 | 1,020 |
305591951297307640988468536828415934525
| null | null | null |
linux
|
fc9bbca8f650e5f738af8806317c0a041a48ae4a
| 1 |
fb_mmap(struct file *file, struct vm_area_struct * vma)
{
struct fb_info *info = file_fb_info(file);
struct fb_ops *fb;
unsigned long off;
unsigned long start;
u32 len;
if (!info)
return -ENODEV;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
off = vma->vm_pgoff << PAGE_SHIFT;
fb = info->fbops;
if (!fb)
return -ENODEV;
mutex_lock(&info->mm_lock);
if (fb->fb_mmap) {
int res;
res = fb->fb_mmap(info, vma);
mutex_unlock(&info->mm_lock);
return res;
}
/* frame buffer memory */
start = info->fix.smem_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
if (off >= len) {
/* memory mapped io */
off -= len;
if (info->var.accel_flags) {
mutex_unlock(&info->mm_lock);
return -EINVAL;
}
start = info->fix.mmio_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
}
mutex_unlock(&info->mm_lock);
start &= PAGE_MASK;
if ((vma->vm_end - vma->vm_start + off) > len)
return -EINVAL;
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
fb_pgprotect(file, vma, off);
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
|
CWE-189
| 179,232 | 1,022 |
61607845425516357035166702793045732353
| null | null | null |
linux
|
9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6
| 1 |
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name));
memcpy(&ualg->cru_driver_name, &alg->cra_driver_name,
sizeof(ualg->cru_driver_name));
memcpy(&ualg->cru_module_name, module_name(alg->cra_module),
CRYPTO_MAX_ALG_NAME);
ualg->cru_flags = alg->cra_flags;
ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
goto nla_put_failure;
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_report_larval rl;
snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
sizeof(struct crypto_report_larval), &rl))
goto nla_put_failure;
goto out;
}
if (alg->cra_type && alg->cra_type->report) {
if (alg->cra_type->report(skb, alg))
goto nla_put_failure;
goto out;
}
switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
case CRYPTO_ALG_TYPE_CIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_COMPRESS:
if (crypto_report_comp(skb, alg))
goto nla_put_failure;
break;
}
out:
return 0;
nla_put_failure:
return -EMSGSIZE;
}
|
CWE-310
| 179,241 | 1,030 |
20209587001997521145887409055557500066
| null | null | null |
linux
|
85dfb745ee40232876663ae206cba35f24ab2a40
| 1 |
static int key_notify_policy_flush(const struct km_event *c)
{
struct sk_buff *skb_out;
struct sadb_msg *hdr;
skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
if (!skb_out)
return -ENOBUFS;
hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
hdr->sadb_msg_type = SADB_X_SPDFLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
|
CWE-119
| 179,245 | 1,034 |
25111496152480302463340671723285260016
| null | null | null |
linux
|
a5cc68f3d63306d0d288f31edfc2ae6ef8ecd887
| 1 |
static int key_notify_policy_flush(const struct km_event *c)
{
struct sk_buff *skb_out;
struct sadb_msg *hdr;
skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
if (!skb_out)
return -ENOBUFS;
hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
hdr->sadb_msg_type = SADB_X_SPDFLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
|
CWE-119
| 179,246 | 1,035 |
89775021181796415187010260408670555020
| null | null | null |
linux
|
a5cc68f3d63306d0d288f31edfc2ae6ef8ecd887
| 1 |
static int key_notify_sa_flush(const struct km_event *c)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
if (!skb)
return -ENOBUFS;
hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto);
hdr->sadb_msg_type = SADB_FLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
|
CWE-119
| 179,247 | 1,036 |
39936032300218859141460935336518748381
| null | null | null |
linux
|
a963a37d384d71ad43b3e9e79d68d42fbe0901f3
| 1 |
static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
struct dst_entry *dst,
const struct flowi6 *fl6)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct rt6_info *rt = (struct rt6_info *)dst;
if (!dst)
goto out;
/* Yes, checking route validity in not connected
* case is not very simple. Take into account,
* that we do not support routing by source, TOS,
* and MSG_DONTROUTE --ANK (980726)
*
* 1. ip6_rt_check(): If route was host route,
* check that cached destination is current.
* If it is network route, we still may
* check its validity using saved pointer
* to the last used address: daddr_cache.
* We do not want to save whole address now,
* (because main consumer of this service
* is tcp, which has not this problem),
* so that the last trick works only on connected
* sockets.
* 2. oif also should be the same.
*/
if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
#ifdef CONFIG_IPV6_SUBTREES
ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
#endif
(fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
dst_release(dst);
dst = NULL;
}
out:
return dst;
}
|
CWE-20
| 179,248 | 1,037 |
297614718685075805140610808856983032495
| null | null | null |
linux
|
f2815633504b442ca0b0605c16bf3d88a3a0fcea
| 1 |
sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_disposition_t retval;
struct sctp_chunk *chunk = arg;
struct sctp_association *new_asoc;
int error = 0;
char action;
struct sctp_chunk *err_chk_p;
/* Make sure that the chunk has a valid length from the protocol
* perspective. In this case check to make sure we have at least
* enough for the chunk header. Cookie length verification is
* done later.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
goto nomem;
/* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie
* of a duplicate COOKIE ECHO match the Verification Tags of the
* current association, consider the State Cookie valid even if
* the lifespan is exceeded.
*/
new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
&err_chk_p);
/* FIXME:
* If the re-build failed, what is the proper error path
* from here?
*
* [We should abort the association. --piggy]
*/
if (!new_asoc) {
/* FIXME: Several errors are possible. A bad cookie should
* be silently discarded, but think about logging it too.
*/
switch (error) {
case -SCTP_IERROR_NOMEM:
goto nomem;
case -SCTP_IERROR_STALE_COOKIE:
sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
err_chk_p);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
case -SCTP_IERROR_BAD_SIG:
default:
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
}
/* Compare the tie_tag in cookie with the verification tag of
* current association.
*/
action = sctp_tietags_compare(new_asoc, asoc);
switch (action) {
case 'A': /* Association restart. */
retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
new_asoc);
break;
case 'B': /* Collision case B. */
retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands,
new_asoc);
break;
case 'C': /* Collision case C. */
retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands,
new_asoc);
break;
case 'D': /* Collision case D. */
retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands,
new_asoc);
break;
default: /* Discard packet for all others. */
retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
break;
}
/* Delete the tempory new association. */
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
/* Restore association pointer to provide SCTP command interpeter
* with a valid context in case it needs to manipulate
* the queues */
sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
SCTP_ASOC((struct sctp_association *)asoc));
return retval;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
| 179,251 | 1,039 |
32908841536456586528306842066836880926
| null | null | null |
|
curl
|
192c4f788d48f82c03e9cef40013f34370e90737
| 1 |
CURLcode Curl_urldecode(struct SessionHandle *data,
const char *string, size_t length,
char **ostring, size_t *olen,
bool reject_ctrl)
{
size_t alloc = (length?length:strlen(string))+1;
char *ns = malloc(alloc);
unsigned char in;
size_t strindex=0;
unsigned long hex;
CURLcode res;
if(!ns)
return CURLE_OUT_OF_MEMORY;
while(--alloc > 0) {
in = *string;
if(('%' == in) && ISXDIGIT(string[1]) && ISXDIGIT(string[2])) {
/* this is two hexadecimal digits following a '%' */
char hexstr[3];
char *ptr;
hexstr[0] = string[1];
hexstr[1] = string[2];
hexstr[2] = 0;
hex = strtoul(hexstr, &ptr, 16);
in = curlx_ultouc(hex); /* this long is never bigger than 255 anyway */
res = Curl_convert_from_network(data, &in, 1);
if(res) {
/* Curl_convert_from_network calls failf if unsuccessful */
free(ns);
return res;
}
string+=2;
alloc-=2;
}
if(reject_ctrl && (in < 0x20)) {
free(ns);
return CURLE_URL_MALFORMAT;
}
ns[strindex++] = in;
string++;
}
ns[strindex]=0; /* terminate it */
if(olen)
/* store output size */
*olen = strindex;
if(ostring)
/* store output string */
*ostring = ns;
return CURLE_OK;
}
|
CWE-119
| 179,252 | 1,040 |
85709973777579247011240190240020563804
| null | null | null |
linux
|
b9e146d8eb3b9ecae5086d373b50fa0c1f3e7f0f
| 1 |
static int do_tkill(pid_t tgid, pid_t pid, int sig)
{
struct siginfo info;
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_TKILL;
info.si_pid = task_tgid_vnr(current);
info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
return do_send_specific(tgid, pid, sig, &info);
}
|
CWE-399
| 179,254 | 1,041 |
92468020625669760156181034635618824508
| null | null | null |
linux
|
604c499cbbcc3d5fe5fb8d53306aa0fae1990109
| 1 |
static int dispatch_discard_io(struct xen_blkif *blkif,
struct blkif_request *req)
{
int err = 0;
int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev;
unsigned long secure;
blkif->st_ds_req++;
xen_blkif_get(blkif);
secure = (blkif->vbd.discard_secure &&
(req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
BLKDEV_DISCARD_SECURE : 0;
err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
req->u.discard.nr_sectors,
GFP_KERNEL, secure);
if (err == -EOPNOTSUPP) {
pr_debug(DRV_PFX "discard op failed, not supported\n");
status = BLKIF_RSP_EOPNOTSUPP;
} else if (err)
status = BLKIF_RSP_ERROR;
make_response(blkif, req->u.discard.id, req->operation, status);
xen_blkif_put(blkif);
return err;
}
|
CWE-20
| 179,255 | 1,042 |
326182960843919999499419294954174193791
| null | null | null |
linux
|
baff42ab1494528907bf4d5870359e31711746ae
| 1 |
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor)
{
struct sk_buff *skb;
struct tcp_sock *tp = tcp_sk(sk);
u32 seq = tp->copied_seq;
u32 offset;
int copied = 0;
if (sk->sk_state == TCP_LISTEN)
return -ENOTCONN;
while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
if (offset < skb->len) {
int used;
size_t len;
len = skb->len - offset;
/* Stop reading if we hit a patch of urgent data */
if (tp->urg_data) {
u32 urg_offset = tp->urg_seq - seq;
if (urg_offset < len)
len = urg_offset;
if (!len)
break;
}
used = recv_actor(desc, skb, offset, len);
if (used < 0) {
if (!copied)
copied = used;
break;
} else if (used <= len) {
seq += used;
copied += used;
offset += used;
}
/*
* If recv_actor drops the lock (e.g. TCP splice
* receive) the skb pointer might be invalid when
* getting here: tcp_collapse might have deleted it
* while aggregating skbs from the socket queue.
*/
skb = tcp_recv_skb(sk, seq-1, &offset);
if (!skb || (offset+1 != skb->len))
break;
}
if (tcp_hdr(skb)->fin) {
sk_eat_skb(sk, skb, 0);
++seq;
break;
}
sk_eat_skb(sk, skb, 0);
if (!desc->count)
break;
}
tp->copied_seq = seq;
tcp_rcv_space_adjust(sk);
/* Clean up data we have read: This will do ACK frames. */
if (copied > 0)
tcp_cleanup_rbuf(sk, copied);
return copied;
}
|
CWE-119
| 179,256 | 1,043 |
199403613559602624417210486913245263722
| null | null | null |
openvpn
|
11d21349a4e7e38a025849479b36ace7c2eec2ee
| 1 |
openvpn_decrypt (struct buffer *buf, struct buffer work,
const struct crypto_options *opt,
const struct frame* frame)
{
static const char error_prefix[] = "Authenticate/Decrypt packet error";
struct gc_arena gc;
gc_init (&gc);
if (buf->len > 0 && opt->key_ctx_bi)
{
struct key_ctx *ctx = &opt->key_ctx_bi->decrypt;
struct packet_id_net pin;
bool have_pin = false;
/* Verify the HMAC */
if (ctx->hmac)
{
int hmac_len;
uint8_t local_hmac[MAX_HMAC_KEY_LENGTH]; /* HMAC of ciphertext computed locally */
hmac_ctx_reset(ctx->hmac);
/* Assume the length of the input HMAC */
hmac_len = hmac_ctx_size (ctx->hmac);
/* Authentication fails if insufficient data in packet for HMAC */
if (buf->len < hmac_len)
CRYPT_ERROR ("missing authentication info");
hmac_ctx_update (ctx->hmac, BPTR (buf) + hmac_len, BLEN (buf) - hmac_len);
hmac_ctx_final (ctx->hmac, local_hmac);
/* Compare locally computed HMAC with packet HMAC */
if (memcmp (local_hmac, BPTR (buf), hmac_len))
CRYPT_ERROR ("packet HMAC authentication failed");
ASSERT (buf_advance (buf, hmac_len));
}
/* Decrypt packet ID + payload */
if (ctx->cipher)
{
const unsigned int mode = cipher_ctx_mode (ctx->cipher);
const int iv_size = cipher_ctx_iv_length (ctx->cipher);
uint8_t iv_buf[OPENVPN_MAX_IV_LENGTH];
int outlen;
/* initialize work buffer with FRAME_HEADROOM bytes of prepend capacity */
ASSERT (buf_init (&work, FRAME_HEADROOM_ADJ (frame, FRAME_HEADROOM_MARKER_DECRYPT)));
/* use IV if user requested it */
CLEAR (iv_buf);
if (opt->flags & CO_USE_IV)
{
if (buf->len < iv_size)
CRYPT_ERROR ("missing IV info");
memcpy (iv_buf, BPTR (buf), iv_size);
ASSERT (buf_advance (buf, iv_size));
}
/* show the IV's initial state */
if (opt->flags & CO_USE_IV)
dmsg (D_PACKET_CONTENT, "DECRYPT IV: %s", format_hex (iv_buf, iv_size, 0, &gc));
if (buf->len < 1)
CRYPT_ERROR ("missing payload");
/* ctx->cipher was already initialized with key & keylen */
if (!cipher_ctx_reset (ctx->cipher, iv_buf))
CRYPT_ERROR ("cipher init failed");
/* Buffer overflow check (should never happen) */
if (!buf_safe (&work, buf->len))
CRYPT_ERROR ("buffer overflow");
/* Decrypt packet ID, payload */
if (!cipher_ctx_update (ctx->cipher, BPTR (&work), &outlen, BPTR (buf), BLEN (buf)))
CRYPT_ERROR ("cipher update failed");
work.len += outlen;
/* Flush the decryption buffer */
if (!cipher_ctx_final (ctx->cipher, BPTR (&work) + outlen, &outlen))
CRYPT_ERROR ("cipher final failed");
work.len += outlen;
dmsg (D_PACKET_CONTENT, "DECRYPT TO: %s",
format_hex (BPTR (&work), BLEN (&work), 80, &gc));
/* Get packet ID from plaintext buffer or IV, depending on cipher mode */
{
if (mode == OPENVPN_MODE_CBC)
{
if (opt->packet_id)
{
if (!packet_id_read (&pin, &work, BOOL_CAST (opt->flags & CO_PACKET_ID_LONG_FORM)))
CRYPT_ERROR ("error reading CBC packet-id");
have_pin = true;
}
}
else if (mode == OPENVPN_MODE_CFB || mode == OPENVPN_MODE_OFB)
{
struct buffer b;
ASSERT (opt->flags & CO_USE_IV); /* IV and packet-ID required */
ASSERT (opt->packet_id); /* for this mode. */
buf_set_read (&b, iv_buf, iv_size);
if (!packet_id_read (&pin, &b, true))
CRYPT_ERROR ("error reading CFB/OFB packet-id");
have_pin = true;
}
else /* We only support CBC, CFB, or OFB modes right now */
{
ASSERT (0);
}
}
}
else
{
work = *buf;
if (opt->packet_id)
{
if (!packet_id_read (&pin, &work, BOOL_CAST (opt->flags & CO_PACKET_ID_LONG_FORM)))
CRYPT_ERROR ("error reading packet-id");
have_pin = !BOOL_CAST (opt->flags & CO_IGNORE_PACKET_ID);
}
}
if (have_pin)
{
packet_id_reap_test (&opt->packet_id->rec);
if (packet_id_test (&opt->packet_id->rec, &pin))
{
packet_id_add (&opt->packet_id->rec, &pin);
if (opt->pid_persist && (opt->flags & CO_PACKET_ID_LONG_FORM))
packet_id_persist_save_obj (opt->pid_persist, opt->packet_id);
}
else
{
if (!(opt->flags & CO_MUTE_REPLAY_WARNINGS))
msg (D_REPLAY_ERRORS, "%s: bad packet ID (may be a replay): %s -- see the man page entry for --no-replay and --replay-window for more info or silence this warning with --mute-replay-warnings",
error_prefix, packet_id_net_print (&pin, true, &gc));
goto error_exit;
}
}
*buf = work;
}
gc_free (&gc);
return true;
error_exit:
crypto_clear_error();
buf->len = 0;
gc_free (&gc);
return false;
}
|
CWE-200
| 179,258 | 1,044 |
289938016318508466240781488768836247822
| null | null | null |
linux
|
929473ea05db455ad88cdc081f2adc556b8dc48f
| 1 |
static int host_start(struct ci13xxx *ci)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
int ret;
if (usb_disabled())
return -ENODEV;
hcd = usb_create_hcd(&ci_ehci_hc_driver, ci->dev, dev_name(ci->dev));
if (!hcd)
return -ENOMEM;
dev_set_drvdata(ci->dev, ci);
hcd->rsrc_start = ci->hw_bank.phys;
hcd->rsrc_len = ci->hw_bank.size;
hcd->regs = ci->hw_bank.abs;
hcd->has_tt = 1;
hcd->power_budget = ci->platdata->power_budget;
hcd->phy = ci->transceiver;
ehci = hcd_to_ehci(hcd);
ehci->caps = ci->hw_bank.cap;
ehci->has_hostpc = ci->hw_bank.lpm;
ret = usb_add_hcd(hcd, 0, 0);
if (ret)
usb_put_hcd(hcd);
else
ci->hcd = hcd;
return ret;
}
|
CWE-119
| 179,259 | 1,045 |
178357202326078378305249559512498247150
| null | null | null |
linux
|
0e9a9a1ad619e7e987815d20262d36a2f95717ca
| 1 |
int ext4_orphan_del(handle_t *handle, struct inode *inode)
{
struct list_head *prev;
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi;
__u32 ino_next;
struct ext4_iloc iloc;
int err = 0;
if (!EXT4_SB(inode->i_sb)->s_journal)
return 0;
mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
if (list_empty(&ei->i_orphan))
goto out;
ino_next = NEXT_ORPHAN(inode);
prev = ei->i_orphan.prev;
sbi = EXT4_SB(inode->i_sb);
jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
list_del_init(&ei->i_orphan);
/* If we're on an error path, we may not have a valid
* transaction handle with which to update the orphan list on
* disk, but we still need to remove the inode from the linked
* list in memory. */
if (!handle)
goto out;
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
goto out_err;
if (prev == &sbi->s_orphan) {
jbd_debug(4, "superblock will point to %u\n", ino_next);
BUFFER_TRACE(sbi->s_sbh, "get_write_access");
err = ext4_journal_get_write_access(handle, sbi->s_sbh);
if (err)
goto out_brelse;
sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
err = ext4_handle_dirty_super(handle, inode->i_sb);
} else {
struct ext4_iloc iloc2;
struct inode *i_prev =
&list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
jbd_debug(4, "orphan inode %lu will point to %u\n",
i_prev->i_ino, ino_next);
err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
if (err)
goto out_brelse;
NEXT_ORPHAN(i_prev) = ino_next;
err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
}
if (err)
goto out_brelse;
NEXT_ORPHAN(inode) = 0;
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
out_err:
ext4_std_error(inode->i_sb, err);
out:
mutex_unlock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
return err;
out_brelse:
brelse(iloc.bh);
goto out_err;
}
|
CWE-399
| 179,262 | 1,047 |
244588647148744642893305983706705044314
| null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.