project
stringclasses 633
values | commit_id
stringlengths 7
81
| target
int64 0
1
| func
stringlengths 5
484k
| cwe
stringclasses 131
values | big_vul_idx
float64 0
189k
⌀ | idx
int64 0
522k
| hash
stringlengths 34
39
| size
float64 1
24k
⌀ | message
stringlengths 0
11.5k
⌀ | dataset
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
linux
|
cb3232138e37129e88240a98a1d2aba2187ff57c
| 1 |
static int clie_5_attach(struct usb_serial *serial)
{
struct usb_serial_port *port;
unsigned int pipe;
int j;
/* TH55 registers 2 ports.
Communication in from the UX50/TH55 uses bulk_in_endpointAddress
from port 0. Communication out to the UX50/TH55 uses
bulk_out_endpointAddress from port 1
Lets do a quick and dirty mapping
*/
/* some sanity check */
if (serial->num_ports < 2)
return -1;
/* port 0 now uses the modified endpoint Address */
port = serial->port[0];
port->bulk_out_endpointAddress =
serial->port[1]->bulk_out_endpointAddress;
pipe = usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress);
for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j)
port->write_urbs[j]->pipe = pipe;
return 0;
}
| 180,729 | 2,296 |
302546870172646938684143672016916521803
| null | null | null |
|
linux
|
b4a1b4f5047e4f54e194681125c74c0aa64d637d
| 1 |
long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
{
struct key *key;
key_ref_t key_ref;
long ret;
/* find the key first */
key_ref = lookup_user_key(keyid, 0, 0);
if (IS_ERR(key_ref)) {
ret = -ENOKEY;
goto error;
}
key = key_ref_to_ptr(key_ref);
/* see if we can read it directly */
ret = key_permission(key_ref, KEY_NEED_READ);
if (ret == 0)
goto can_read_key;
if (ret != -EACCES)
goto error;
/* we can't; see if it's searchable from this process's keyrings
* - we automatically take account of the fact that it may be
* dangling off an instantiation key
*/
if (!is_key_possessed(key_ref)) {
ret = -EACCES;
goto error2;
}
/* the key is probably readable - now try to read it */
can_read_key:
ret = key_validate(key);
if (ret == 0) {
ret = -EOPNOTSUPP;
if (key->type->read) {
/* read the data with the semaphore held (since we
* might sleep) */
down_read(&key->sem);
ret = key->type->read(key, buffer, buflen);
up_read(&key->sem);
}
}
error2:
key_put(key);
error:
return ret;
}
|
CWE-362
| 180,730 | 2,297 |
182054607697593478153120067337500454875
| null | null | null |
linux
|
8e20cf2bce122ce9262d6034ee5d5b76fbb92f96
| 1 |
aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *usbdev = interface_to_usbdev(intf);
struct usb_endpoint_descriptor *endpoint;
struct aiptek *aiptek;
struct input_dev *inputdev;
int i;
int speeds[] = { 0,
AIPTEK_PROGRAMMABLE_DELAY_50,
AIPTEK_PROGRAMMABLE_DELAY_400,
AIPTEK_PROGRAMMABLE_DELAY_25,
AIPTEK_PROGRAMMABLE_DELAY_100,
AIPTEK_PROGRAMMABLE_DELAY_200,
AIPTEK_PROGRAMMABLE_DELAY_300
};
int err = -ENOMEM;
/* programmableDelay is where the command-line specified
* delay is kept. We make it the first element of speeds[],
* so therefore, your override speed is tried first, then the
* remainder. Note that the default value of 400ms will be tried
* if you do not specify any command line parameter.
*/
speeds[0] = programmableDelay;
aiptek = kzalloc(sizeof(struct aiptek), GFP_KERNEL);
inputdev = input_allocate_device();
if (!aiptek || !inputdev) {
dev_warn(&intf->dev,
"cannot allocate memory or input device\n");
goto fail1;
}
aiptek->data = usb_alloc_coherent(usbdev, AIPTEK_PACKET_LENGTH,
GFP_ATOMIC, &aiptek->data_dma);
if (!aiptek->data) {
dev_warn(&intf->dev, "cannot allocate usb buffer\n");
goto fail1;
}
aiptek->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!aiptek->urb) {
dev_warn(&intf->dev, "cannot allocate urb\n");
goto fail2;
}
aiptek->inputdev = inputdev;
aiptek->usbdev = usbdev;
aiptek->intf = intf;
aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber;
aiptek->inDelay = 0;
aiptek->endDelay = 0;
aiptek->previousJitterable = 0;
aiptek->lastMacro = -1;
/* Set up the curSettings struct. Said struct contains the current
* programmable parameters. The newSetting struct contains changes
* the user makes to the settings via the sysfs interface. Those
* changes are not "committed" to curSettings until the user
* writes to the sysfs/.../execute file.
*/
aiptek->curSetting.pointerMode = AIPTEK_POINTER_EITHER_MODE;
aiptek->curSetting.coordinateMode = AIPTEK_COORDINATE_ABSOLUTE_MODE;
aiptek->curSetting.toolMode = AIPTEK_TOOL_BUTTON_PEN_MODE;
aiptek->curSetting.xTilt = AIPTEK_TILT_DISABLE;
aiptek->curSetting.yTilt = AIPTEK_TILT_DISABLE;
aiptek->curSetting.mouseButtonLeft = AIPTEK_MOUSE_LEFT_BUTTON;
aiptek->curSetting.mouseButtonMiddle = AIPTEK_MOUSE_MIDDLE_BUTTON;
aiptek->curSetting.mouseButtonRight = AIPTEK_MOUSE_RIGHT_BUTTON;
aiptek->curSetting.stylusButtonUpper = AIPTEK_STYLUS_UPPER_BUTTON;
aiptek->curSetting.stylusButtonLower = AIPTEK_STYLUS_LOWER_BUTTON;
aiptek->curSetting.jitterDelay = jitterDelay;
aiptek->curSetting.programmableDelay = programmableDelay;
/* Both structs should have equivalent settings
*/
aiptek->newSetting = aiptek->curSetting;
/* Determine the usb devices' physical path.
* Asketh not why we always pretend we're using "../input0",
* but I suspect this will have to be refactored one
* day if a single USB device can be a keyboard & a mouse
* & a tablet, and the inputX number actually will tell
* us something...
*/
usb_make_path(usbdev, aiptek->features.usbPath,
sizeof(aiptek->features.usbPath));
strlcat(aiptek->features.usbPath, "/input0",
sizeof(aiptek->features.usbPath));
/* Set up client data, pointers to open and close routines
* for the input device.
*/
inputdev->name = "Aiptek";
inputdev->phys = aiptek->features.usbPath;
usb_to_input_id(usbdev, &inputdev->id);
inputdev->dev.parent = &intf->dev;
input_set_drvdata(inputdev, aiptek);
inputdev->open = aiptek_open;
inputdev->close = aiptek_close;
/* Now program the capacities of the tablet, in terms of being
* an input device.
*/
for (i = 0; i < ARRAY_SIZE(eventTypes); ++i)
__set_bit(eventTypes[i], inputdev->evbit);
for (i = 0; i < ARRAY_SIZE(absEvents); ++i)
__set_bit(absEvents[i], inputdev->absbit);
for (i = 0; i < ARRAY_SIZE(relEvents); ++i)
__set_bit(relEvents[i], inputdev->relbit);
__set_bit(MSC_SERIAL, inputdev->mscbit);
/* Set up key and button codes */
for (i = 0; i < ARRAY_SIZE(buttonEvents); ++i)
__set_bit(buttonEvents[i], inputdev->keybit);
for (i = 0; i < ARRAY_SIZE(macroKeyEvents); ++i)
__set_bit(macroKeyEvents[i], inputdev->keybit);
/*
* Program the input device coordinate capacities. We do not yet
* know what maximum X, Y, and Z values are, so we're putting fake
* values in. Later, we'll ask the tablet to put in the correct
* values.
*/
input_set_abs_params(inputdev, ABS_X, 0, 2999, 0, 0);
input_set_abs_params(inputdev, ABS_Y, 0, 2249, 0, 0);
input_set_abs_params(inputdev, ABS_PRESSURE, 0, 511, 0, 0);
input_set_abs_params(inputdev, ABS_TILT_X, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0);
input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0);
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
endpoint = &intf->altsetting[0].endpoint[0].desc;
/* Go set up our URB, which is called when the tablet receives
* input.
*/
usb_fill_int_urb(aiptek->urb,
aiptek->usbdev,
usb_rcvintpipe(aiptek->usbdev,
endpoint->bEndpointAddress),
aiptek->data, 8, aiptek_irq, aiptek,
endpoint->bInterval);
aiptek->urb->transfer_dma = aiptek->data_dma;
aiptek->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* Program the tablet. This sets the tablet up in the mode
* specified in newSetting, and also queries the tablet's
* physical capacities.
*
* Sanity check: if a tablet doesn't like the slow programmatic
* delay, we often get sizes of 0x0. Let's use that as an indicator
* to try faster delays, up to 25 ms. If that logic fails, well, you'll
* have to explain to us how your tablet thinks it's 0x0, and yet that's
* not an error :-)
*/
for (i = 0; i < ARRAY_SIZE(speeds); ++i) {
aiptek->curSetting.programmableDelay = speeds[i];
(void)aiptek_program_tablet(aiptek);
if (input_abs_get_max(aiptek->inputdev, ABS_X) > 0) {
dev_info(&intf->dev,
"Aiptek using %d ms programming speed\n",
aiptek->curSetting.programmableDelay);
break;
}
}
/* Murphy says that some day someone will have a tablet that fails the
above test. That's you, Frederic Rodrigo */
if (i == ARRAY_SIZE(speeds)) {
dev_info(&intf->dev,
"Aiptek tried all speeds, no sane response\n");
goto fail3;
}
/* Associate this driver's struct with the usb interface.
*/
usb_set_intfdata(intf, aiptek);
/* Set up the sysfs files
*/
err = sysfs_create_group(&intf->dev.kobj, &aiptek_attribute_group);
if (err) {
dev_warn(&intf->dev, "cannot create sysfs group err: %d\n",
err);
goto fail3;
}
/* Register the tablet as an Input Device
*/
err = input_register_device(aiptek->inputdev);
if (err) {
dev_warn(&intf->dev,
"input_register_device returned err: %d\n", err);
goto fail4;
}
return 0;
fail4: sysfs_remove_group(&intf->dev.kobj, &aiptek_attribute_group);
fail3: usb_free_urb(aiptek->urb);
fail2: usb_free_coherent(usbdev, AIPTEK_PACKET_LENGTH, aiptek->data,
aiptek->data_dma);
fail1: usb_set_intfdata(intf, NULL);
input_free_device(inputdev);
kfree(aiptek);
return err;
}
| 180,731 | 2,298 |
116279422032136146703846531328372054860
| null | null | null |
|
linux
|
0185604c2d82c560dab2f2933a18f797e74ab5a8
| 1 |
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return 0;
}
| 180,732 | 2,299 |
208694431467498242113447724435619413176
| null | null | null |
|
linux
|
0185604c2d82c560dab2f2933a18f797e74ab5a8
| 1 |
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
int start = 0;
u32 prev_legacy, cur_legacy;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
if (!prev_legacy && cur_legacy)
start = 1;
memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
sizeof(kvm->arch.vpit->pit_state.channels));
kvm->arch.vpit->pit_state.flags = ps->flags;
kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return 0;
}
| 180,733 | 2,300 |
246557586103247983393786080333386558083
| null | null | null |
|
linux
|
cd4a40174b71acd021877341684d8bb1dc8ea4ae
| 1 |
struct vfsmount *collect_mounts(struct path *path)
{
struct mount *tree;
namespace_lock();
tree = copy_tree(real_mount(path->mnt), path->dentry,
CL_COPY_ALL | CL_PRIVATE);
namespace_unlock();
if (IS_ERR(tree))
return ERR_CAST(tree);
return &tree->mnt;
}
| 180,735 | 2,301 |
19334538589789937197837029932069005485
| null | null | null |
|
linux
|
cf872776fc84128bb779ce2b83a37c884c3203ae
| 1 |
static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
{
long tmp = *old;
*old = atomic_long_cmpxchg(&sem->count, *old, new);
return *old == tmp;
}
|
CWE-362
| 180,738 | 2,303 |
228392180986723328438684157890230086487
| null | null | null |
linux
|
a2f18db0c68fec96631c10cad9384c196e9008ac
| 1 |
static int nft_flush_table(struct nft_ctx *ctx)
{
int err;
struct nft_chain *chain, *nc;
struct nft_set *set, *ns;
list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
ctx->chain = chain;
err = nft_delrule_by_chain(ctx);
if (err < 0)
goto out;
err = nft_delchain(ctx);
if (err < 0)
goto out;
}
list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
if (set->flags & NFT_SET_ANONYMOUS &&
!list_empty(&set->bindings))
continue;
err = nft_delset(ctx, set);
if (err < 0)
goto out;
}
err = nft_deltable(ctx);
out:
return err;
}
|
CWE-19
| 180,744 | 2,308 |
176778942905014936695932937244012092184
| null | null | null |
linux
|
2c5816b4beccc8ba709144539f6fdd764f8fa49c
| 1 |
static int cuse_channel_release(struct inode *inode, struct file *file)
{
struct fuse_dev *fud = file->private_data;
struct cuse_conn *cc = fc_to_cc(fud->fc);
int rc;
/* remove from the conntbl, no more access from this point on */
mutex_lock(&cuse_lock);
list_del_init(&cc->list);
mutex_unlock(&cuse_lock);
/* remove device */
if (cc->dev)
device_unregister(cc->dev);
if (cc->cdev) {
unregister_chrdev_region(cc->cdev->dev, 1);
cdev_del(cc->cdev);
}
rc = fuse_dev_release(inode, file); /* puts the base reference */
return rc;
}
|
CWE-399
| 180,745 | 2,309 |
206651427646572480350187731381483149808
| null | null | null |
linux
|
6217e5ede23285ddfee10d2e4ba0cc2d4c046205
| 1 |
static int snd_compress_check_input(struct snd_compr_params *params)
{
/* first let's check the buffer parameter's */
if (params->buffer.fragment_size == 0 ||
params->buffer.fragments > SIZE_MAX / params->buffer.fragment_size)
return -EINVAL;
/* now codec parameters */
if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
return -EINVAL;
if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
return -EINVAL;
return 0;
}
| 180,746 | 2,310 |
77147876717907738792002602358531841658
| null | null | null |
|
linux
|
4efbc454ba68def5ef285b26ebfcfdb605b52755
| 1 |
static int sched_read_attr(struct sched_attr __user *uattr,
struct sched_attr *attr,
unsigned int usize)
{
int ret;
if (!access_ok(VERIFY_WRITE, uattr, usize))
return -EFAULT;
/*
* If we're handed a smaller struct than we know of,
* ensure all the unknown bits are 0 - i.e. old
* user-space does not get uncomplete information.
*/
if (usize < sizeof(*attr)) {
unsigned char *addr;
unsigned char *end;
addr = (void *)attr + usize;
end = (void *)attr + sizeof(*attr);
for (; addr < end; addr++) {
if (*addr)
goto err_size;
}
attr->size = usize;
}
ret = copy_to_user(uattr, attr, usize);
if (ret)
return -EFAULT;
out:
return ret;
err_size:
ret = -E2BIG;
goto out;
}
|
CWE-200
| 180,747 | 2,311 |
339536197997710328363394448087627293018
| null | null | null |
linux
|
c88e739b1fad662240e99ecbd0bdaac871717987
| 1 |
static long __media_device_enum_links(struct media_device *mdev,
struct media_links_enum *links)
{
struct media_entity *entity;
entity = find_entity(mdev, links->entity);
if (entity == NULL)
return -EINVAL;
if (links->pads) {
unsigned int p;
for (p = 0; p < entity->num_pads; p++) {
struct media_pad_desc pad;
media_device_kpad_to_upad(&entity->pads[p], &pad);
if (copy_to_user(&links->pads[p], &pad, sizeof(pad)))
return -EFAULT;
}
}
if (links->links) {
struct media_link_desc __user *ulink;
unsigned int l;
for (l = 0, ulink = links->links; l < entity->num_links; l++) {
struct media_link_desc link;
/* Ignore backlinks. */
if (entity->links[l].source->entity != entity)
continue;
media_device_kpad_to_upad(entity->links[l].source,
&link.source);
media_device_kpad_to_upad(entity->links[l].sink,
&link.sink);
link.flags = entity->links[l].flags;
if (copy_to_user(ulink, &link, sizeof(*ulink)))
return -EFAULT;
ulink++;
}
}
return 0;
}
|
CWE-200
| 180,748 | 2,312 |
154830867138233729337898903656645655475
| null | null | null |
libgd
|
4f65a3e4eedaffa1efcf9ee1eb08f0b504fbc31a
| 1 |
static inline LineContribType *_gdContributionsCalc(unsigned int line_size, unsigned int src_size, double scale_d, const interpolation_method pFilter)
{
double width_d;
double scale_f_d = 1.0;
const double filter_width_d = DEFAULT_BOX_RADIUS;
int windows_size;
unsigned int u;
LineContribType *res;
if (scale_d < 1.0) {
width_d = filter_width_d / scale_d;
scale_f_d = scale_d;
} else {
width_d= filter_width_d;
}
windows_size = 2 * (int)ceil(width_d) + 1;
res = _gdContributionsAlloc(line_size, windows_size);
for (u = 0; u < line_size; u++) {
const double dCenter = (double)u / scale_d;
/* get the significant edge points affecting the pixel */
register int iLeft = MAX(0, (int)floor (dCenter - width_d));
int iRight = MIN((int)ceil(dCenter + width_d), (int)src_size - 1);
double dTotalWeight = 0.0;
int iSrc;
res->ContribRow[u].Left = iLeft;
res->ContribRow[u].Right = iRight;
/* Cut edge points to fit in filter window in case of spill-off */
if (iRight - iLeft + 1 > windows_size) {
if (iLeft < ((int)src_size - 1 / 2)) {
iLeft++;
} else {
iRight--;
}
}
for (iSrc = iLeft; iSrc <= iRight; iSrc++) {
dTotalWeight += (res->ContribRow[u].Weights[iSrc-iLeft] = scale_f_d * (*pFilter)(scale_f_d * (dCenter - (double)iSrc)));
}
if (dTotalWeight < 0.0) {
_gdContributionsFree(res);
return NULL;
}
if (dTotalWeight > 0.0) {
for (iSrc = iLeft; iSrc <= iRight; iSrc++) {
res->ContribRow[u].Weights[iSrc-iLeft] /= dTotalWeight;
}
}
}
return res;
}
|
CWE-125
| 180,763 | 2,323 |
41848651368514129223970481374051132044
| null | null | null |
Little-CMS
|
fefaaa43c382eee632ea3ad0cfa915335140e1db
| 1 |
cmsPipeline* DefaultICCintents(cmsContext ContextID,
cmsUInt32Number nProfiles,
cmsUInt32Number TheIntents[],
cmsHPROFILE hProfiles[],
cmsBool BPC[],
cmsFloat64Number AdaptationStates[],
cmsUInt32Number dwFlags)
{
cmsPipeline* Lut = NULL;
cmsPipeline* Result;
cmsHPROFILE hProfile;
cmsMAT3 m;
cmsVEC3 off;
cmsColorSpaceSignature ColorSpaceIn, ColorSpaceOut, CurrentColorSpace;
cmsProfileClassSignature ClassSig;
cmsUInt32Number i, Intent;
if (nProfiles == 0) return NULL;
Result = cmsPipelineAlloc(ContextID, 0, 0);
if (Result == NULL) return NULL;
CurrentColorSpace = cmsGetColorSpace(hProfiles[0]);
for (i=0; i < nProfiles; i++) {
cmsBool lIsDeviceLink, lIsInput;
hProfile = hProfiles[i];
ClassSig = cmsGetDeviceClass(hProfile);
lIsDeviceLink = (ClassSig == cmsSigLinkClass || ClassSig == cmsSigAbstractClass );
if ((i == 0) && !lIsDeviceLink) {
lIsInput = TRUE;
}
else {
lIsInput = (CurrentColorSpace != cmsSigXYZData) &&
(CurrentColorSpace != cmsSigLabData);
}
Intent = TheIntents[i];
if (lIsInput || lIsDeviceLink) {
ColorSpaceIn = cmsGetColorSpace(hProfile);
ColorSpaceOut = cmsGetPCS(hProfile);
}
else {
ColorSpaceIn = cmsGetPCS(hProfile);
ColorSpaceOut = cmsGetColorSpace(hProfile);
}
if (!ColorSpaceIsCompatible(ColorSpaceIn, CurrentColorSpace)) {
cmsSignalError(ContextID, cmsERROR_COLORSPACE_CHECK, "ColorSpace mismatch");
goto Error;
}
if (lIsDeviceLink || ((ClassSig == cmsSigNamedColorClass) && (nProfiles == 1))) {
Lut = _cmsReadDevicelinkLUT(hProfile, Intent);
if (Lut == NULL) goto Error;
if (ClassSig == cmsSigAbstractClass && i > 0) {
if (!ComputeConversion(i, hProfiles, Intent, BPC[i], AdaptationStates[i], &m, &off)) goto Error;
}
else {
_cmsMAT3identity(&m);
_cmsVEC3init(&off, 0, 0, 0);
}
if (!AddConversion(Result, CurrentColorSpace, ColorSpaceIn, &m, &off)) goto Error;
}
else {
if (lIsInput) {
Lut = _cmsReadInputLUT(hProfile, Intent);
if (Lut == NULL) goto Error;
}
else {
Lut = _cmsReadOutputLUT(hProfile, Intent);
if (Lut == NULL) goto Error;
if (!ComputeConversion(i, hProfiles, Intent, BPC[i], AdaptationStates[i], &m, &off)) goto Error;
if (!AddConversion(Result, CurrentColorSpace, ColorSpaceIn, &m, &off)) goto Error;
}
}
if (!cmsPipelineCat(Result, Lut))
goto Error;
cmsPipelineFree(Lut);
CurrentColorSpace = ColorSpaceOut;
}
return Result;
Error:
cmsPipelineFree(Lut);
if (Result != NULL) cmsPipelineFree(Result);
return NULL;
cmsUNUSED_PARAMETER(dwFlags);
}
| 180,764 | 2,324 |
120973636847525123043991558312278084903
| null | null | null |
|
linux
|
82981930125abfd39d7c8378a9cfdf5e1be2002b
| 1 |
int sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int val;
int valbool;
struct linger ling;
int ret = 0;
/*
* Options without arguments
*/
if (optname == SO_BINDTODEVICE)
return sock_bindtodevice(sk, optval, optlen);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
valbool = val ? 1 : 0;
lock_sock(sk);
switch (optname) {
case SO_DEBUG:
if (val && !capable(CAP_NET_ADMIN))
ret = -EACCES;
else
sock_valbool_flag(sk, SOCK_DBG, valbool);
break;
case SO_REUSEADDR:
sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
break;
case SO_TYPE:
case SO_PROTOCOL:
case SO_DOMAIN:
case SO_ERROR:
ret = -ENOPROTOOPT;
break;
case SO_DONTROUTE:
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
break;
case SO_BROADCAST:
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
break;
case SO_SNDBUF:
/* Don't error on this BSD doesn't and if you think
about it this is right. Otherwise apps have to
play 'guess the biggest size' games. RCVBUF/SNDBUF
are treated in BSD as hints */
if (val > sysctl_wmem_max)
val = sysctl_wmem_max;
set_sndbuf:
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
if ((val * 2) < SOCK_MIN_SNDBUF)
sk->sk_sndbuf = SOCK_MIN_SNDBUF;
else
sk->sk_sndbuf = val * 2;
/*
* Wake up sending tasks if we
* upped the value.
*/
sk->sk_write_space(sk);
break;
case SO_SNDBUFFORCE:
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
goto set_sndbuf;
case SO_RCVBUF:
/* Don't error on this BSD doesn't and if you think
about it this is right. Otherwise apps have to
play 'guess the biggest size' games. RCVBUF/SNDBUF
are treated in BSD as hints */
if (val > sysctl_rmem_max)
val = sysctl_rmem_max;
set_rcvbuf:
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/*
* We double it on the way in to account for
* "struct sk_buff" etc. overhead. Applications
* assume that the SO_RCVBUF setting they make will
* allow that much actual data to be received on that
* socket.
*
* Applications are unaware that "struct sk_buff" and
* other overheads allocate from the receive buffer
* during socket buffer allocation.
*
* And after considering the possible alternatives,
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
if ((val * 2) < SOCK_MIN_RCVBUF)
sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
else
sk->sk_rcvbuf = val * 2;
break;
case SO_RCVBUFFORCE:
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
goto set_rcvbuf;
case SO_KEEPALIVE:
#ifdef CONFIG_INET
if (sk->sk_protocol == IPPROTO_TCP)
tcp_set_keepalive(sk, valbool);
#endif
sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
break;
case SO_OOBINLINE:
sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
break;
case SO_NO_CHECK:
sk->sk_no_check = valbool;
break;
case SO_PRIORITY:
if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
sk->sk_priority = val;
else
ret = -EPERM;
break;
case SO_LINGER:
if (optlen < sizeof(ling)) {
ret = -EINVAL; /* 1003.1g */
break;
}
if (copy_from_user(&ling, optval, sizeof(ling))) {
ret = -EFAULT;
break;
}
if (!ling.l_onoff)
sock_reset_flag(sk, SOCK_LINGER);
else {
#if (BITS_PER_LONG == 32)
if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
else
#endif
sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
sock_set_flag(sk, SOCK_LINGER);
}
break;
case SO_BSDCOMPAT:
sock_warn_obsolete_bsdism("setsockopt");
break;
case SO_PASSCRED:
if (valbool)
set_bit(SOCK_PASSCRED, &sock->flags);
else
clear_bit(SOCK_PASSCRED, &sock->flags);
break;
case SO_TIMESTAMP:
case SO_TIMESTAMPNS:
if (valbool) {
if (optname == SO_TIMESTAMP)
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
else
sock_set_flag(sk, SOCK_RCVTSTAMPNS);
sock_set_flag(sk, SOCK_RCVTSTAMP);
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
} else {
sock_reset_flag(sk, SOCK_RCVTSTAMP);
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
}
break;
case SO_TIMESTAMPING:
if (val & ~SOF_TIMESTAMPING_MASK) {
ret = -EINVAL;
break;
}
sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
val & SOF_TIMESTAMPING_TX_HARDWARE);
sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
val & SOF_TIMESTAMPING_TX_SOFTWARE);
sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
val & SOF_TIMESTAMPING_RX_HARDWARE);
if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
sock_enable_timestamp(sk,
SOCK_TIMESTAMPING_RX_SOFTWARE);
else
sock_disable_timestamp(sk,
(1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
val & SOF_TIMESTAMPING_SOFTWARE);
sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
val & SOF_TIMESTAMPING_SYS_HARDWARE);
sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
val & SOF_TIMESTAMPING_RAW_HARDWARE);
break;
case SO_RCVLOWAT:
if (val < 0)
val = INT_MAX;
sk->sk_rcvlowat = val ? : 1;
break;
case SO_RCVTIMEO:
ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
break;
case SO_SNDTIMEO:
ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
break;
case SO_ATTACH_FILTER:
ret = -EINVAL;
if (optlen == sizeof(struct sock_fprog)) {
struct sock_fprog fprog;
ret = -EFAULT;
if (copy_from_user(&fprog, optval, sizeof(fprog)))
break;
ret = sk_attach_filter(&fprog, sk);
}
break;
case SO_DETACH_FILTER:
ret = sk_detach_filter(sk);
break;
case SO_PASSSEC:
if (valbool)
set_bit(SOCK_PASSSEC, &sock->flags);
else
clear_bit(SOCK_PASSSEC, &sock->flags);
break;
case SO_MARK:
if (!capable(CAP_NET_ADMIN))
ret = -EPERM;
else
sk->sk_mark = val;
break;
/* We implement the SO_SNDLOWAT etc to
not be settable (1003.1g 5.3) */
case SO_RXQ_OVFL:
sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
break;
case SO_WIFI_STATUS:
sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
break;
case SO_PEEK_OFF:
if (sock->ops->set_peek_off)
sock->ops->set_peek_off(sk, val);
else
ret = -EOPNOTSUPP;
break;
case SO_NOFCS:
sock_valbool_flag(sk, SOCK_NOFCS, valbool);
break;
default:
ret = -ENOPROTOOPT;
break;
}
release_sock(sk);
return ret;
}
|
CWE-119
| 180,781 | 2,337 |
84799346879089591141142253594612624049
| null | null | null |
linux
|
b35cc8225845112a616e3a2266d2fde5ab13d3ab
| 1 |
static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
struct snd_compr_params *params)
{
unsigned int buffer_size;
void *buffer;
buffer_size = params->buffer.fragment_size * params->buffer.fragments;
if (stream->ops->copy) {
buffer = NULL;
/* if copy is defined the driver will be required to copy
* the data from core
*/
} else {
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
}
stream->runtime->fragment_size = params->buffer.fragment_size;
stream->runtime->fragments = params->buffer.fragments;
stream->runtime->buffer = buffer;
stream->runtime->buffer_size = buffer_size;
return 0;
}
| 180,782 | 2,338 |
320223001183825300598401950795056162108
| null | null | null |
|
linux
|
20e1db19db5d6b9e4e83021595eab0dc8f107bef
| 1 |
static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
struct sockaddr_nl *addr = msg->msg_name;
u32 dst_pid;
u32 dst_group;
struct sk_buff *skb;
int err;
struct scm_cookie scm;
if (msg->msg_flags&MSG_OOB)
return -EOPNOTSUPP;
if (NULL == siocb->scm)
siocb->scm = &scm;
err = scm_send(sock, msg, siocb->scm, true);
if (err < 0)
return err;
if (msg->msg_namelen) {
err = -EINVAL;
if (addr->nl_family != AF_NETLINK)
goto out;
dst_pid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
err = -EPERM;
if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
goto out;
} else {
dst_pid = nlk->dst_pid;
dst_group = nlk->dst_group;
}
if (!nlk->pid) {
err = netlink_autobind(sock);
if (err)
goto out;
}
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
err = -ENOBUFS;
skb = alloc_skb(len, GFP_KERNEL);
if (skb == NULL)
goto out;
NETLINK_CB(skb).pid = nlk->pid;
NETLINK_CB(skb).dst_group = dst_group;
memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
err = -EFAULT;
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
goto out;
}
err = security_netlink_send(sk, skb);
if (err) {
kfree_skb(skb);
goto out;
}
if (dst_group) {
atomic_inc(&skb->users);
netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
}
err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
out:
scm_destroy(siocb->scm);
return err;
}
|
CWE-284
| 180,787 | 2,342 |
169493009832141549770812668255774168936
| null | null | null |
linux
|
c290f8358acaeffd8e0c551ddcc24d1206143376
| 1 |
static int tty_open(struct inode *inode, struct file *filp)
{
struct tty_struct *tty = NULL;
int noctty, retval;
struct tty_driver *driver;
int index;
dev_t device = inode->i_rdev;
unsigned saved_flags = filp->f_flags;
nonseekable_open(inode, filp);
retry_open:
noctty = filp->f_flags & O_NOCTTY;
index = -1;
retval = 0;
mutex_lock(&tty_mutex);
tty_lock();
if (device == MKDEV(TTYAUX_MAJOR, 0)) {
tty = get_current_tty();
if (!tty) {
tty_unlock();
mutex_unlock(&tty_mutex);
return -ENXIO;
}
driver = tty_driver_kref_get(tty->driver);
index = tty->index;
filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
/* noctty = 1; */
/* FIXME: Should we take a driver reference ? */
tty_kref_put(tty);
goto got_driver;
}
#ifdef CONFIG_VT
if (device == MKDEV(TTY_MAJOR, 0)) {
extern struct tty_driver *console_driver;
driver = tty_driver_kref_get(console_driver);
index = fg_console;
noctty = 1;
goto got_driver;
}
#endif
if (device == MKDEV(TTYAUX_MAJOR, 1)) {
struct tty_driver *console_driver = console_device(&index);
if (console_driver) {
driver = tty_driver_kref_get(console_driver);
if (driver) {
/* Don't let /dev/console block */
filp->f_flags |= O_NONBLOCK;
noctty = 1;
goto got_driver;
}
}
tty_unlock();
mutex_unlock(&tty_mutex);
return -ENODEV;
}
driver = get_tty_driver(device, &index);
if (!driver) {
tty_unlock();
mutex_unlock(&tty_mutex);
return -ENODEV;
}
got_driver:
if (!tty) {
/* check whether we're reopening an existing tty */
tty = tty_driver_lookup_tty(driver, inode, index);
if (IS_ERR(tty)) {
tty_unlock();
mutex_unlock(&tty_mutex);
return PTR_ERR(tty);
}
}
if (tty) {
retval = tty_reopen(tty);
if (retval)
tty = ERR_PTR(retval);
} else
tty = tty_init_dev(driver, index, 0);
mutex_unlock(&tty_mutex);
tty_driver_kref_put(driver);
if (IS_ERR(tty)) {
tty_unlock();
return PTR_ERR(tty);
}
retval = tty_add_file(tty, filp);
if (retval) {
tty_unlock();
tty_release(inode, filp);
return retval;
}
check_tty_count(tty, "tty_open");
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
noctty = 1;
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "opening %s...", tty->name);
#endif
if (tty->ops->open)
retval = tty->ops->open(tty, filp);
else
retval = -ENODEV;
filp->f_flags = saved_flags;
if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) &&
!capable(CAP_SYS_ADMIN))
retval = -EBUSY;
if (retval) {
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "error %d in opening %s...", retval,
tty->name);
#endif
tty_unlock(); /* need to call tty_release without BTM */
tty_release(inode, filp);
if (retval != -ERESTARTSYS)
return retval;
if (signal_pending(current))
return retval;
schedule();
/*
* Need to reset f_op in case a hangup happened.
*/
tty_lock();
if (filp->f_op == &hung_up_tty_fops)
filp->f_op = &tty_fops;
tty_unlock();
goto retry_open;
}
tty_unlock();
mutex_lock(&tty_mutex);
tty_lock();
spin_lock_irq(¤t->sighand->siglock);
if (!noctty &&
current->signal->leader &&
!current->signal->tty &&
tty->session == NULL)
__proc_set_tty(current, tty);
spin_unlock_irq(¤t->sighand->siglock);
tty_unlock();
mutex_unlock(&tty_mutex);
return 0;
}
| 180,788 | 2,343 |
2589888721444243111074919100510851960
| null | null | null |
|
linux
|
ba3021b2c79b2fa9114f92790a99deb27a65b728
| 1 |
static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
}
__err:
return err;
}
|
CWE-200
| 180,791 | 2,346 |
18027996916287278673301734704406442254
| null | null | null |
linux
|
36ae3c0a36b7456432fedce38ae2f7bd3e01a563
| 1 |
kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
return -EINVAL;
if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
return kvm_irqfd_deassign(kvm, args);
return kvm_irqfd_assign(kvm, args);
}
|
CWE-20
| 180,792 | 2,347 |
83207294150061306602211255224566801067
| null | null | null |
file
|
35c94dc6acc418f1ad7f6241a6680e5327495793
| 1 |
do_bid_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type,
int swap __attribute__((__unused__)), uint32_t namesz, uint32_t descsz,
size_t noff, size_t doff, int *flags)
{
if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 &&
type == NT_GNU_BUILD_ID && (descsz >= 4 || descsz <= 20)) {
uint8_t desc[20];
const char *btype;
uint32_t i;
*flags |= FLAGS_DID_BUILD_ID;
switch (descsz) {
case 8:
btype = "xxHash";
break;
case 16:
btype = "md5/uuid";
break;
case 20:
btype = "sha1";
break;
default:
btype = "unknown";
break;
}
if (file_printf(ms, ", BuildID[%s]=", btype) == -1)
return 1;
(void)memcpy(desc, &nbuf[doff], descsz);
for (i = 0; i < descsz; i++)
if (file_printf(ms, "%02x", desc[i]) == -1)
return 1;
return 1;
}
return 0;
}
|
CWE-119
| 180,800 | 2,354 |
125277570129377130993833351272884504631
| null | null | null |
lynx-snapshots
|
280a61b300a1614f6037efc0902ff7ecf17146e9
| 1 |
void HTML_put_string(HTStructured * me, const char *s)
{
#ifdef USE_PRETTYSRC
char *translated_string = NULL;
#endif
if (s == NULL || (LYMapsOnly && me->sp[0].tag_number != HTML_OBJECT))
return;
#ifdef USE_PRETTYSRC
if (psrc_convert_string) {
StrAllocCopy(translated_string, s);
TRANSLATE_AND_UNESCAPE_ENTITIES(&translated_string, TRUE, FALSE);
s = (const char *) translated_string;
}
#endif
switch (me->sp[0].tag_number) {
case HTML_COMMENT:
break; /* Do Nothing */
case HTML_TITLE:
HTChunkPuts(&me->title, s);
break;
case HTML_STYLE:
HTChunkPuts(&me->style_block, s);
break;
case HTML_SCRIPT:
HTChunkPuts(&me->script, s);
break;
case HTML_PRE: /* Formatted text */
case HTML_LISTING: /* Literal text */
case HTML_XMP:
case HTML_PLAINTEXT:
/*
* We guarantee that the style is up-to-date in begin_litteral
*/
HText_appendText(me->text, s);
break;
case HTML_OBJECT:
HTChunkPuts(&me->object, s);
break;
case HTML_TEXTAREA:
HTChunkPuts(&me->textarea, s);
break;
case HTML_SELECT:
case HTML_OPTION:
HTChunkPuts(&me->option, s);
break;
case HTML_MATH:
HTChunkPuts(&me->math, s);
break;
default: /* Free format text? */
if (!me->sp->style->freeFormat) {
/*
* If we are within a preformatted text style not caught by the
* cases above (HTML_PRE or similar may not be the last element
* pushed on the style stack). - kw
*/
#ifdef USE_PRETTYSRC
if (psrc_view) {
/*
* We do this so that a raw '\r' in the string will not be
* interpreted as an internal request to break a line - passing
* '\r' to HText_appendText is treated by it as a request to
* insert a blank line - VH
*/
for (; *s; ++s)
HTML_put_character(me, *s);
} else
#endif
HText_appendText(me->text, s);
break;
} else {
const char *p = s;
char c;
if (me->style_change) {
for (; *p && ((*p == '\n') || (*p == '\r') ||
(*p == ' ') || (*p == '\t')); p++) ; /* Ignore leaders */
if (!*p)
break;
UPDATE_STYLE;
}
for (; *p; p++) {
if (*p == 13 && p[1] != 10) {
/*
* Treat any '\r' which is not followed by '\n' as '\n', to
* account for macintosh lineend in ALT attributes etc. -
* kw
*/
c = '\n';
} else {
c = *p;
}
if (me->style_change) {
if ((c == '\n') || (c == ' ') || (c == '\t'))
continue; /* Ignore it */
UPDATE_STYLE;
}
if (c == '\n') {
if (!FIX_JAPANESE_SPACES) {
if (me->in_word) {
if (HText_getLastChar(me->text) != ' ')
HText_appendCharacter(me->text, ' ');
me->in_word = NO;
}
}
} else if (c == ' ' || c == '\t') {
if (HText_getLastChar(me->text) != ' ')
HText_appendCharacter(me->text, ' ');
} else if (c == '\r') {
/* ignore */
} else {
HText_appendCharacter(me->text, c);
me->in_word = YES;
}
/* set the Last Character */
if (c == '\n' || c == '\t') {
/* set it to a generic separator */
HText_setLastChar(me->text, ' ');
} else if (c == '\r' &&
HText_getLastChar(me->text) == ' ') {
/*
* \r's are ignored. In order to keep collapsing spaces
* correctly, we must default back to the previous
* separator, if there was one. So we set LastChar to a
* generic separator.
*/
HText_setLastChar(me->text, ' ');
} else {
HText_setLastChar(me->text, c);
}
} /* for */
}
} /* end switch */
#ifdef USE_PRETTYSRC
if (psrc_convert_string) {
psrc_convert_string = FALSE;
FREE(translated_string);
}
#endif
}
|
CWE-416
| 180,801 | 2,355 |
185287537479326979907025807825206483145
| null | null | null |
tcmu-runner
|
e2d953050766ac538615a811c64b34358614edce
| 1 |
on_unregister_handler(TCMUService1HandlerManager1 *interface,
GDBusMethodInvocation *invocation,
gchar *subtype,
gpointer user_data)
{
struct tcmur_handler *handler = find_handler_by_subtype(subtype);
struct dbus_info *info = handler->opaque;
if (!handler) {
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", FALSE,
"unknown subtype"));
return TRUE;
}
dbus_unexport_handler(handler);
tcmur_unregister_handler(handler);
g_bus_unwatch_name(info->watcher_id);
g_free(info);
g_free(handler);
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", TRUE, "succeeded"));
return TRUE;
}
|
CWE-20
| 180,802 | 2,356 |
116020867554130668193424324893271725989
| null | null | null |
tcmu-runner
|
bb80e9c7a798f035768260ebdadffb6eb0786178
| 1 |
on_unregister_handler(TCMUService1HandlerManager1 *interface,
GDBusMethodInvocation *invocation,
gchar *subtype,
gpointer user_data)
{
struct tcmur_handler *handler = find_handler_by_subtype(subtype);
struct dbus_info *info = handler ? handler->opaque : NULL;
if (!handler) {
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", FALSE,
"unknown subtype"));
return TRUE;
}
dbus_unexport_handler(handler);
tcmur_unregister_handler(handler);
g_bus_unwatch_name(info->watcher_id);
g_free(info);
g_free(handler);
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", TRUE, "succeeded"));
return TRUE;
}
|
CWE-476
| 180,806 | 2,360 |
274977298780245980512070718535102508647
| null | null | null |
tcmu-runner
|
61bd03e600d2abf309173e9186f4d465bb1b7157
| 1 |
static bool glfs_check_config(const char *cfgstring, char **reason)
{
char *path;
glfs_t *fs = NULL;
glfs_fd_t *gfd = NULL;
gluster_server *hosts = NULL; /* gluster server defination */
bool result = true;
path = strchr(cfgstring, '/');
if (!path) {
if (asprintf(reason, "No path found") == -1)
*reason = NULL;
result = false;
goto done;
}
path += 1; /* get past '/' */
fs = tcmu_create_glfs_object(path, &hosts);
if (!fs) {
tcmu_err("tcmu_create_glfs_object failed\n");
goto done;
}
gfd = glfs_open(fs, hosts->path, ALLOWED_BSOFLAGS);
if (!gfd) {
if (asprintf(reason, "glfs_open failed: %m") == -1)
*reason = NULL;
result = false;
goto unref;
}
if (glfs_access(fs, hosts->path, R_OK|W_OK) == -1) {
if (asprintf(reason, "glfs_access file not present, or not writable") == -1)
*reason = NULL;
result = false;
goto unref;
}
goto done;
unref:
gluster_cache_refresh(fs, path);
done:
if (gfd)
glfs_close(gfd);
gluster_free_server(&hosts);
return result;
}
|
CWE-119
| 180,807 | 2,361 |
23917343011042042541314960786542795782
| null | null | null |
evince
|
717df38fd8509bf883b70d680c9b1b3cf36732ee
| 1 |
comics_check_decompress_command (gchar *mime_type,
ComicsDocument *comics_document,
GError **error)
{
gboolean success;
gchar *std_out, *std_err;
gint retval;
GError *err = NULL;
/* FIXME, use proper cbr/cbz mime types once they're
* included in shared-mime-info */
if (g_content_type_is_a (mime_type, "application/x-cbr") ||
g_content_type_is_a (mime_type, "application/x-rar")) {
/* The RARLAB provides a no-charge proprietary (freeware)
* decompress-only client for Linux called unrar. Another
* option is a GPLv2-licensed command-line tool developed by
* the Gna! project. Confusingly enough, the free software RAR
* decoder is also named unrar. For this reason we need to add
* some lines for disambiguation. Sorry for the added the
* complexity but it's life :)
* Finally, some distributions, like Debian, rename this free
* option as unrar-free.
* */
comics_document->selected_command =
g_find_program_in_path ("unrar");
if (comics_document->selected_command) {
/* We only use std_err to avoid printing useless error
* messages on the terminal */
success =
g_spawn_command_line_sync (
comics_document->selected_command,
&std_out, &std_err,
&retval, &err);
if (!success) {
g_propagate_error (error, err);
g_error_free (err);
return FALSE;
/* I don't check retval status because RARLAB unrar
* doesn't have a way to return 0 without involving an
* operation with a file*/
} else if (WIFEXITED (retval)) {
if (g_strrstr (std_out,"freeware") != NULL)
/* The RARLAB freeware client */
comics_document->command_usage = RARLABS;
else
/* The Gna! free software client */
comics_document->command_usage = GNAUNRAR;
g_free (std_out);
g_free (std_err);
return TRUE;
}
}
/* The Gna! free software client with Debian naming convention */
comics_document->selected_command =
g_find_program_in_path ("unrar-free");
if (comics_document->selected_command) {
comics_document->command_usage = GNAUNRAR;
return TRUE;
}
comics_document->selected_command =
g_find_program_in_path ("bsdtar");
if (comics_document->selected_command) {
comics_document->command_usage = TAR;
return TRUE;
}
} else if (g_content_type_is_a (mime_type, "application/x-cbz") ||
g_content_type_is_a (mime_type, "application/zip")) {
/* InfoZIP's unzip program */
comics_document->selected_command =
g_find_program_in_path ("unzip");
comics_document->alternative_command =
g_find_program_in_path ("zipnote");
if (comics_document->selected_command &&
comics_document->alternative_command) {
comics_document->command_usage = UNZIP;
return TRUE;
}
/* fallback mode using 7za and 7z from p7zip project */
comics_document->selected_command =
g_find_program_in_path ("7za");
if (comics_document->selected_command) {
comics_document->command_usage = P7ZIP;
return TRUE;
}
comics_document->selected_command =
g_find_program_in_path ("7z");
if (comics_document->selected_command) {
comics_document->command_usage = P7ZIP;
return TRUE;
}
comics_document->selected_command =
g_find_program_in_path ("bsdtar");
if (comics_document->selected_command) {
comics_document->command_usage = TAR;
return TRUE;
}
} else if (g_content_type_is_a (mime_type, "application/x-cb7") ||
g_content_type_is_a (mime_type, "application/x-7z-compressed")) {
/* 7zr, 7za and 7z are the commands from the p7zip project able
* to decompress .7z files */
comics_document->selected_command =
g_find_program_in_path ("7zr");
if (comics_document->selected_command) {
comics_document->command_usage = P7ZIP;
return TRUE;
}
comics_document->selected_command =
g_find_program_in_path ("7za");
if (comics_document->selected_command) {
comics_document->command_usage = P7ZIP;
return TRUE;
}
comics_document->selected_command =
g_find_program_in_path ("7z");
if (comics_document->selected_command) {
comics_document->command_usage = P7ZIP;
return TRUE;
}
comics_document->selected_command =
g_find_program_in_path ("bsdtar");
if (comics_document->selected_command) {
comics_document->command_usage = TAR;
return TRUE;
}
} else if (g_content_type_is_a (mime_type, "application/x-cbt") ||
g_content_type_is_a (mime_type, "application/x-tar")) {
/* tar utility (Tape ARchive) */
comics_document->selected_command =
g_find_program_in_path ("tar");
if (comics_document->selected_command) {
comics_document->command_usage = TAR;
return TRUE;
}
comics_document->selected_command =
g_find_program_in_path ("bsdtar");
if (comics_document->selected_command) {
comics_document->command_usage = TAR;
return TRUE;
}
} else {
g_set_error (error,
EV_DOCUMENT_ERROR,
EV_DOCUMENT_ERROR_INVALID,
_("Not a comic book MIME type: %s"),
mime_type);
return FALSE;
}
g_set_error_literal (error,
EV_DOCUMENT_ERROR,
EV_DOCUMENT_ERROR_INVALID,
_("Can’t find an appropriate command to "
"decompress this type of comic book"));
return FALSE;
}
| 180,808 | 2,362 |
102189984230626422995821779053396632474
| null | null | null |
|
linux
|
ea25f914dc164c8d56b36147ecc86bc65f83c469
| 1 |
static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *regs = state->regs;
int off, i, slot, spi;
if (regs[regno].type != PTR_TO_STACK) {
/* Allow zero-byte read from NULL, regardless of pointer type */
if (zero_size_allowed && access_size == 0 &&
register_is_null(regs[regno]))
return 0;
verbose(env, "R%d type=%s expected=%s\n", regno,
reg_type_str[regs[regno].type],
reg_type_str[PTR_TO_STACK]);
return -EACCES;
}
/* Only allow fixed-offset stack reads */
if (!tnum_is_const(regs[regno].var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
}
off = regs[regno].off + regs[regno].var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
regno, off, access_size);
return -EACCES;
}
if (env->prog->aux->stack_depth < -off)
env->prog->aux->stack_depth = -off;
if (meta && meta->raw_mode) {
meta->access_size = access_size;
meta->regno = regno;
return 0;
}
for (i = 0; i < access_size; i++) {
slot = -(off + i) - 1;
spi = slot / BPF_REG_SIZE;
if (state->allocated_stack <= slot ||
state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
STACK_MISC) {
verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
off, i, access_size);
return -EACCES;
}
}
return 0;
}
|
CWE-119
| 180,812 | 2,366 |
100968769887024984694199990496086030146
| null | null | null |
linux
|
a5ec6ae161d72f01411169a938fa5f8baea16e8f
| 1 |
static int check_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size)
{
bool strict = env->strict_alignment;
const char *pointer_desc = "";
switch (reg->type) {
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
/* Special case, because of NET_IP_ALIGN. Given metadata sits
* right in front, treat it the very same way.
*/
return check_pkt_ptr_alignment(env, reg, off, size, strict);
case PTR_TO_MAP_VALUE:
pointer_desc = "value ";
break;
case PTR_TO_CTX:
pointer_desc = "context ";
break;
case PTR_TO_STACK:
pointer_desc = "stack ";
break;
default:
break;
}
return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
strict);
}
|
CWE-119
| 180,813 | 2,367 |
132971837477051945919058697320929966878
| null | null | null |
linux
|
179d1c5602997fef5a940c6ddcf31212cbfebd14
| 1 |
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
struct idpair *idmap)
{
if (!(rold->live & REG_LIVE_READ))
/* explored state didn't use this */
return true;
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
return true;
if (rold->type == NOT_INIT)
/* explored state can't have used this */
return true;
if (rcur->type == NOT_INIT)
return false;
switch (rold->type) {
case SCALAR_VALUE:
if (rcur->type == SCALAR_VALUE) {
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
} else {
/* if we knew anything about the old value, we're not
* equal, because we can't know anything about the
* scalar value of the pointer in the new value.
*/
return rold->umin_value == 0 &&
rold->umax_value == U64_MAX &&
rold->smin_value == S64_MIN &&
rold->smax_value == S64_MAX &&
tnum_is_unknown(rold->var_off);
}
case PTR_TO_MAP_VALUE:
/* If the new min/max/var_off satisfy the old ones and
* everything else matches, we are OK.
* We don't care about the 'id' value, because nothing
* uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
*/
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_MAP_VALUE_OR_NULL:
/* a PTR_TO_MAP_VALUE could be safe to use as a
* PTR_TO_MAP_VALUE_OR_NULL into the same map.
* However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
* checked, doing so could have affected others with the same
* id, and we can't check for that because we lost the id when
* we converted to a PTR_TO_MAP_VALUE.
*/
if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
return false;
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
return false;
/* Check our ids match any regs they're supposed to */
return check_ids(rold->id, rcur->id, idmap);
case PTR_TO_PACKET_META:
case PTR_TO_PACKET:
if (rcur->type != rold->type)
return false;
/* We must have at least as much range as the old ptr
* did, so that any accesses which were safe before are
* still safe. This is true even if old range < old off,
* since someone could have accessed through (ptr - k), or
* even done ptr -= k in a register, to get a safe access.
*/
if (rold->range > rcur->range)
return false;
/* If the offsets don't match, we can't trust our alignment;
* nor can we be sure that we won't fall out of range.
*/
if (rold->off != rcur->off)
return false;
/* id relations must be preserved */
if (rold->id && !check_ids(rold->id, rcur->id, idmap))
return false;
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_CTX:
case CONST_PTR_TO_MAP:
case PTR_TO_STACK:
case PTR_TO_PACKET_END:
/* Only valid matches are exact, which memcmp() above
* would have accepted
*/
default:
/* Don't know what's going on, just say it's not safe */
return false;
}
/* Shouldn't get here; if we do, say it's not safe */
WARN_ON_ONCE(1);
return false;
}
|
CWE-119
| 180,814 | 2,368 |
312414542843290372624743195982646413593
| null | null | null |
linux
|
4374f256ce8182019353c0c639bb8d0695b4c941
| 1 |
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state src_reg)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->64 */
coerce_reg_to_32(dst_reg);
coerce_reg_to_32(&src_reg);
}
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
umax_val = src_reg.umax_value;
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
switch (opcode) {
case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value += smin_val;
dst_reg->smax_value += smax_val;
}
if (dst_reg->umin_value + umin_val < umin_val ||
dst_reg->umax_value + umax_val < umax_val) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value += umin_val;
dst_reg->umax_value += umax_val;
}
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value -= smax_val;
dst_reg->smax_value -= smin_val;
}
if (dst_reg->umin_value < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->umin_value -= umax_val;
dst_reg->umax_value -= umin_val;
}
dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
break;
case BPF_MUL:
dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
if (smin_val < 0 || dst_reg->smin_value < 0) {
/* Ain't nobody got time to multiply that sign */
__mark_reg_unbounded(dst_reg);
__update_reg_bounds(dst_reg);
break;
}
/* Both values are positive, so we can work with unsigned and
* copy the result to signed (unless it exceeds S64_MAX).
*/
if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
/* Potential overflow, we know nothing */
__mark_reg_unbounded(dst_reg);
/* (except what we can learn from the var_off) */
__update_reg_bounds(dst_reg);
break;
}
dst_reg->umin_value *= umin_val;
dst_reg->umax_value *= umax_val;
if (dst_reg->umax_value > S64_MAX) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
break;
case BPF_AND:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value &
src_reg.var_off.value);
break;
}
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
*/
dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = dst_reg->var_off.value;
dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ANDing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ANDing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_OR:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value |
src_reg.var_off.value);
break;
}
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/
dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
dst_reg->umax_value = dst_reg->var_off.value |
dst_reg->var_off.mask;
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ORing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ORing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_LSH:
if (umax_val > 63) {
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* We lose all sign bit information (except what we can pick
* up from var_off)
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
/* If we might shift our top bit out, then we know nothing */
if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value <<= umin_val;
dst_reg->umax_value <<= umax_val;
}
if (src_known)
dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
else
dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val);
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_RSH:
if (umax_val > 63) {
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* BPF_RSH is an unsigned shift, so make the appropriate casts */
if (dst_reg->smin_value < 0) {
if (umin_val) {
/* Sign bit will be cleared */
dst_reg->smin_value = 0;
} else {
/* Lost sign bit information */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
}
} else {
dst_reg->smin_value =
(u64)(dst_reg->smin_value) >> umax_val;
}
if (src_known)
dst_reg->var_off = tnum_rshift(dst_reg->var_off,
umin_val);
else
dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val);
dst_reg->umin_value >>= umax_val;
dst_reg->umax_value >>= umin_val;
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
default:
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
}
|
CWE-119
| 180,817 | 2,371 |
322783137603102715097131954466307591369
| null | null | null |
linux
|
468f6eafa6c44cb2c5d8aad35e12f06c240a812a
| 1 |
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state src_reg)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->64 */
coerce_reg_to_size(dst_reg, 4);
coerce_reg_to_size(&src_reg, 4);
}
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
umax_val = src_reg.umax_value;
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
switch (opcode) {
case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value += smin_val;
dst_reg->smax_value += smax_val;
}
if (dst_reg->umin_value + umin_val < umin_val ||
dst_reg->umax_value + umax_val < umax_val) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value += umin_val;
dst_reg->umax_value += umax_val;
}
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value -= smax_val;
dst_reg->smax_value -= smin_val;
}
if (dst_reg->umin_value < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->umin_value -= umax_val;
dst_reg->umax_value -= umin_val;
}
dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
break;
case BPF_MUL:
dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
if (smin_val < 0 || dst_reg->smin_value < 0) {
/* Ain't nobody got time to multiply that sign */
__mark_reg_unbounded(dst_reg);
__update_reg_bounds(dst_reg);
break;
}
/* Both values are positive, so we can work with unsigned and
* copy the result to signed (unless it exceeds S64_MAX).
*/
if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
/* Potential overflow, we know nothing */
__mark_reg_unbounded(dst_reg);
/* (except what we can learn from the var_off) */
__update_reg_bounds(dst_reg);
break;
}
dst_reg->umin_value *= umin_val;
dst_reg->umax_value *= umax_val;
if (dst_reg->umax_value > S64_MAX) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
break;
case BPF_AND:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value &
src_reg.var_off.value);
break;
}
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
*/
dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = dst_reg->var_off.value;
dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ANDing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ANDing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_OR:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value |
src_reg.var_off.value);
break;
}
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/
dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
dst_reg->umax_value = dst_reg->var_off.value |
dst_reg->var_off.mask;
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ORing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ORing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_LSH:
if (umax_val > 63) {
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* We lose all sign bit information (except what we can pick
* up from var_off)
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
/* If we might shift our top bit out, then we know nothing */
if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value <<= umin_val;
dst_reg->umax_value <<= umax_val;
}
if (src_known)
dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
else
dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val);
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_RSH:
if (umax_val > 63) {
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
* be negative, then either:
* 1) src_reg might be zero, so the sign bit of the result is
* unknown, so we lose our signed bounds
* 2) it's known negative, thus the unsigned bounds capture the
* signed bounds
* 3) the signed bounds cross zero, so they tell us nothing
* about the result
* If the value in dst_reg is known nonnegative, then again the
* unsigned bounts capture the signed bounds.
* Thus, in all cases it suffices to blow away our signed bounds
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
if (src_known)
dst_reg->var_off = tnum_rshift(dst_reg->var_off,
umin_val);
else
dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val);
dst_reg->umin_value >>= umax_val;
dst_reg->umax_value >>= umin_val;
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
default:
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
}
|
CWE-119
| 180,818 | 2,372 |
338721747458807656695851600634646855591
| null | null | null |
linux
|
4dca6ea1d9432052afb06baf2e3ae78188a4410b
| 1 |
static void construct_get_dest_keyring(struct key **_dest_keyring)
{
struct request_key_auth *rka;
const struct cred *cred = current_cred();
struct key *dest_keyring = *_dest_keyring, *authkey;
kenter("%p", dest_keyring);
/* find the appropriate keyring */
if (dest_keyring) {
/* the caller supplied one */
key_get(dest_keyring);
} else {
/* use a default keyring; falling through the cases until we
* find one that we actually have */
switch (cred->jit_keyring) {
case KEY_REQKEY_DEFL_DEFAULT:
case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
if (cred->request_key_auth) {
authkey = cred->request_key_auth;
down_read(&authkey->sem);
rka = authkey->payload.data[0];
if (!test_bit(KEY_FLAG_REVOKED,
&authkey->flags))
dest_keyring =
key_get(rka->dest_keyring);
up_read(&authkey->sem);
if (dest_keyring)
break;
}
case KEY_REQKEY_DEFL_THREAD_KEYRING:
dest_keyring = key_get(cred->thread_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
dest_keyring = key_get(cred->process_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_SESSION_KEYRING:
rcu_read_lock();
dest_keyring = key_get(
rcu_dereference(cred->session_keyring));
rcu_read_unlock();
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
dest_keyring =
key_get(cred->user->session_keyring);
break;
case KEY_REQKEY_DEFL_USER_KEYRING:
dest_keyring = key_get(cred->user->uid_keyring);
break;
case KEY_REQKEY_DEFL_GROUP_KEYRING:
default:
BUG();
}
}
*_dest_keyring = dest_keyring;
kleave(" [dk %d]", key_serial(dest_keyring));
return;
}
|
CWE-862
| 180,819 | 2,373 |
275435868270037711146449830073903331943
| null | null | null |
linux
|
af3ff8045bbf3e32f1a448542e73abb4c8ceb6f1
| 1 |
static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
struct crypto_alg *alg;
struct shash_alg *salg;
int err;
int ds;
int ss;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
if (err)
return err;
salg = shash_attr_alg(tb[1], 0, 0);
if (IS_ERR(salg))
return PTR_ERR(salg);
err = -EINVAL;
ds = salg->digestsize;
ss = salg->statesize;
alg = &salg->base;
if (ds > alg->cra_blocksize ||
ss < alg->cra_blocksize)
goto out_put_alg;
inst = shash_alloc_instance("hmac", alg);
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg,
shash_crypto_instance(inst));
if (err)
goto out_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
ss = ALIGN(ss, alg->cra_alignmask + 1);
inst->alg.digestsize = ds;
inst->alg.statesize = ss;
inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) +
ALIGN(ss * 2, crypto_tfm_ctx_alignment());
inst->alg.base.cra_init = hmac_init_tfm;
inst->alg.base.cra_exit = hmac_exit_tfm;
inst->alg.init = hmac_init;
inst->alg.update = hmac_update;
inst->alg.final = hmac_final;
inst->alg.finup = hmac_finup;
inst->alg.export = hmac_export;
inst->alg.import = hmac_import;
inst->alg.setkey = hmac_setkey;
err = shash_register_instance(tmpl, inst);
if (err) {
out_free_inst:
shash_free_instance(shash_crypto_instance(inst));
}
out_put_alg:
crypto_mod_put(alg);
return err;
}
|
CWE-787
| 180,821 | 2,375 |
288299611161677821609080915801291634897
| null | null | null |
linux
|
ecaaab5649781c5a0effdaf298a925063020500e
| 1 |
static int encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, 64);
salsa20_ivsetup(ctx, walk.iv);
if (likely(walk.nbytes == nbytes))
{
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
walk.dst.virt.addr, nbytes);
return blkcipher_walk_done(desc, &walk, 0);
}
while (walk.nbytes >= 64) {
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
walk.dst.virt.addr,
walk.nbytes - (walk.nbytes % 64));
err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
}
if (walk.nbytes) {
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
walk.dst.virt.addr, walk.nbytes);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
|
CWE-20
| 180,823 | 2,377 |
328829554120722254631759373321712097039
| null | null | null |
linux
|
ecaaab5649781c5a0effdaf298a925063020500e
| 1 |
static int encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, 64);
salsa20_ivsetup(ctx, walk.iv);
if (likely(walk.nbytes == nbytes))
{
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
walk.src.virt.addr, nbytes);
return blkcipher_walk_done(desc, &walk, 0);
}
while (walk.nbytes >= 64) {
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
walk.src.virt.addr,
walk.nbytes - (walk.nbytes % 64));
err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
}
if (walk.nbytes) {
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
walk.src.virt.addr, walk.nbytes);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
|
CWE-20
| 180,824 | 2,378 |
38637576161082233175356705377762150199
| null | null | null |
linux
|
8f659a03a0ba9289b9aeb9b4470e6fb263d6f483
| 1 |
static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
struct flowi4 fl4;
int free = 0;
__be32 daddr;
__be32 saddr;
u8 tos;
int err;
struct ip_options_data opt_copy;
struct raw_frag_vec rfv;
err = -EMSGSIZE;
if (len > 0xFFFF)
goto out;
/*
* Check the flags.
*/
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */
goto out; /* compatibility */
/*
* Get and verify the address.
*/
if (msg->msg_namelen) {
DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
err = -EINVAL;
if (msg->msg_namelen < sizeof(*usin))
goto out;
if (usin->sin_family != AF_INET) {
pr_info_once("%s: %s forgot to set AF_INET. Fix it!\n",
__func__, current->comm);
err = -EAFNOSUPPORT;
if (usin->sin_family)
goto out;
}
daddr = usin->sin_addr.s_addr;
/* ANK: I did not forget to get protocol from port field.
* I just do not know, who uses this weirdness.
* IP_HDRINCL is much more convenient.
*/
} else {
err = -EDESTADDRREQ;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
daddr = inet->inet_daddr;
}
ipc.sockc.tsflags = sk->sk_tsflags;
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.ttl = 0;
ipc.tos = -1;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(sk, msg, &ipc, false);
if (unlikely(err)) {
kfree(ipc.opt);
goto out;
}
if (ipc.opt)
free = 1;
}
saddr = ipc.addr;
ipc.addr = daddr;
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt) {
memcpy(&opt_copy, inet_opt,
sizeof(*inet_opt) + inet_opt->opt.optlen);
ipc.opt = &opt_copy.opt;
}
rcu_read_unlock();
}
if (ipc.opt) {
err = -EINVAL;
/* Linux does not mangle headers on raw sockets,
* so that IP options + IP_HDRINCL is non-sense.
*/
if (inet->hdrincl)
goto done;
if (ipc.opt->opt.srr) {
if (!daddr)
goto done;
daddr = ipc.opt->opt.faddr;
}
}
tos = get_rtconn_flags(&ipc, sk);
if (msg->msg_flags & MSG_DONTROUTE)
tos |= RTO_ONLINK;
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
} else if (!ipc.oif)
ipc.oif = inet->uc_index;
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk) |
(inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0, sk->sk_uid);
if (!inet->hdrincl) {
rfv.msg = msg;
rfv.hlen = 0;
err = raw_probe_proto_opt(&rfv, &fl4);
if (err)
goto done;
}
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
goto done;
}
err = -EACCES;
if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
goto done;
if (msg->msg_flags & MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = raw_send_hdrinc(sk, &fl4, msg, len,
&rt, msg->msg_flags, &ipc.sockc);
else {
sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
if (!ipc.addr)
ipc.addr = fl4.daddr;
lock_sock(sk);
err = ip_append_data(sk, &fl4, raw_getfrag,
&rfv, len, 0,
&ipc, &rt, msg->msg_flags);
if (err)
ip_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE)) {
err = ip_push_pending_frames(sk, &fl4);
if (err == -ENOBUFS && !inet->recverr)
err = 0;
}
release_sock(sk);
}
done:
if (free)
kfree(ipc.opt);
ip_rt_put(rt);
out:
if (err < 0)
return err;
return len;
do_confirm:
if (msg->msg_flags & MSG_PROBE)
dst_confirm_neigh(&rt->dst, &fl4.daddr);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
|
CWE-362
| 180,825 | 2,379 |
148099251020650172474770082419741037035
| null | null | null |
heimdal
|
1a6a6e462dc2ac6111f9e02c6852ddec4849b887
| 1 |
_kdc_as_rep(kdc_request_t r,
krb5_data *reply,
const char *from,
struct sockaddr *from_addr,
int datagram_reply)
{
krb5_context context = r->context;
krb5_kdc_configuration *config = r->config;
KDC_REQ *req = &r->req;
KDC_REQ_BODY *b = NULL;
AS_REP rep;
KDCOptions f;
krb5_enctype setype;
krb5_error_code ret = 0;
Key *skey;
int found_pa = 0;
int i, flags = HDB_F_FOR_AS_REQ;
METHOD_DATA error_method;
const PA_DATA *pa;
memset(&rep, 0, sizeof(rep));
error_method.len = 0;
error_method.val = NULL;
/*
* Look for FAST armor and unwrap
*/
ret = _kdc_fast_unwrap_request(r);
if (ret) {
_kdc_r_log(r, 0, "FAST unwrap request from %s failed: %d", from, ret);
goto out;
}
b = &req->req_body;
f = b->kdc_options;
if (f.canonicalize)
flags |= HDB_F_CANON;
if(b->sname == NULL){
ret = KRB5KRB_ERR_GENERIC;
_kdc_set_e_text(r, "No server in request");
} else{
ret = _krb5_principalname2krb5_principal (context,
&r->server_princ,
*(b->sname),
b->realm);
if (ret == 0)
ret = krb5_unparse_name(context, r->server_princ, &r->server_name);
}
if (ret) {
kdc_log(context, config, 0,
"AS-REQ malformed server name from %s", from);
goto out;
}
if(b->cname == NULL){
ret = KRB5KRB_ERR_GENERIC;
_kdc_set_e_text(r, "No client in request");
} else {
ret = _krb5_principalname2krb5_principal (context,
&r->client_princ,
*(b->cname),
b->realm);
if (ret)
goto out;
ret = krb5_unparse_name(context, r->client_princ, &r->client_name);
}
if (ret) {
kdc_log(context, config, 0,
"AS-REQ malformed client name from %s", from);
goto out;
}
kdc_log(context, config, 0, "AS-REQ %s from %s for %s",
r->client_name, from, r->server_name);
/*
*
*/
if (_kdc_is_anonymous(context, r->client_princ)) {
if (!_kdc_is_anon_request(b)) {
kdc_log(context, config, 0, "Anonymous ticket w/o anonymous flag");
ret = KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN;
goto out;
}
} else if (_kdc_is_anon_request(b)) {
kdc_log(context, config, 0,
"Request for a anonymous ticket with non "
"anonymous client name: %s", r->client_name);
ret = KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN;
goto out;
}
/*
*
*/
ret = _kdc_db_fetch(context, config, r->client_princ,
HDB_F_GET_CLIENT | flags, NULL,
&r->clientdb, &r->client);
if(ret == HDB_ERR_NOT_FOUND_HERE) {
kdc_log(context, config, 5, "client %s does not have secrets at this KDC, need to proxy",
r->client_name);
goto out;
} else if (ret == HDB_ERR_WRONG_REALM) {
char *fixed_client_name = NULL;
ret = krb5_unparse_name(context, r->client->entry.principal,
&fixed_client_name);
if (ret) {
goto out;
}
kdc_log(context, config, 0, "WRONG_REALM - %s -> %s",
r->client_name, fixed_client_name);
free(fixed_client_name);
ret = _kdc_fast_mk_error(context, r,
&error_method,
r->armor_crypto,
&req->req_body,
KRB5_KDC_ERR_WRONG_REALM,
NULL,
r->server_princ,
NULL,
&r->client->entry.principal->realm,
NULL, NULL,
reply);
goto out;
} else if(ret){
const char *msg = krb5_get_error_message(context, ret);
kdc_log(context, config, 0, "UNKNOWN -- %s: %s", r->client_name, msg);
krb5_free_error_message(context, msg);
ret = KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN;
goto out;
}
ret = _kdc_db_fetch(context, config, r->server_princ,
HDB_F_GET_SERVER|HDB_F_GET_KRBTGT | flags,
NULL, NULL, &r->server);
if(ret == HDB_ERR_NOT_FOUND_HERE) {
kdc_log(context, config, 5, "target %s does not have secrets at this KDC, need to proxy",
r->server_name);
goto out;
} else if(ret){
const char *msg = krb5_get_error_message(context, ret);
kdc_log(context, config, 0, "UNKNOWN -- %s: %s", r->server_name, msg);
krb5_free_error_message(context, msg);
ret = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
goto out;
}
/*
* Select a session enctype from the list of the crypto system
* supported enctypes that is supported by the client and is one of
* the enctype of the enctype of the service (likely krbtgt).
*
* The latter is used as a hint of what enctypes all KDC support,
* to make sure a newer version of KDC won't generate a session
* enctype that an older version of a KDC in the same realm can't
* decrypt.
*/
ret = _kdc_find_etype(context,
krb5_principal_is_krbtgt(context, r->server_princ) ?
config->tgt_use_strongest_session_key :
config->svc_use_strongest_session_key, FALSE,
r->client, b->etype.val, b->etype.len, &r->sessionetype,
NULL);
if (ret) {
kdc_log(context, config, 0,
"Client (%s) from %s has no common enctypes with KDC "
"to use for the session key",
r->client_name, from);
goto out;
}
/*
* Pre-auth processing
*/
if(req->padata){
unsigned int n;
log_patypes(context, config, req->padata);
/* Check if preauth matching */
for (n = 0; !found_pa && n < sizeof(pat) / sizeof(pat[0]); n++) {
if (pat[n].validate == NULL)
continue;
if (r->armor_crypto == NULL && (pat[n].flags & PA_REQ_FAST))
continue;
kdc_log(context, config, 5,
"Looking for %s pa-data -- %s", pat[n].name, r->client_name);
i = 0;
pa = _kdc_find_padata(req, &i, pat[n].type);
if (pa) {
ret = pat[n].validate(r, pa);
if (ret != 0) {
goto out;
}
kdc_log(context, config, 0,
"%s pre-authentication succeeded -- %s",
pat[n].name, r->client_name);
found_pa = 1;
r->et.flags.pre_authent = 1;
}
}
}
if (found_pa == 0) {
Key *ckey = NULL;
size_t n;
for (n = 0; n < sizeof(pat) / sizeof(pat[0]); n++) {
if ((pat[n].flags & PA_ANNOUNCE) == 0)
continue;
ret = krb5_padata_add(context, &error_method,
pat[n].type, NULL, 0);
if (ret)
goto out;
}
/*
* If there is a client key, send ETYPE_INFO{,2}
*/
ret = _kdc_find_etype(context,
config->preauth_use_strongest_session_key, TRUE,
r->client, b->etype.val, b->etype.len, NULL, &ckey);
if (ret == 0) {
/*
* RFC4120 requires:
* - If the client only knows about old enctypes, then send
* both info replies (we send 'info' first in the list).
* - If the client is 'modern', because it knows about 'new'
* enctype types, then only send the 'info2' reply.
*
* Before we send the full list of etype-info data, we pick
* the client key we would have used anyway below, just pick
* that instead.
*/
if (older_enctype(ckey->key.keytype)) {
ret = get_pa_etype_info(context, config,
&error_method, ckey);
if (ret)
goto out;
}
ret = get_pa_etype_info2(context, config,
&error_method, ckey);
if (ret)
goto out;
}
/*
* send requre preauth is its required or anon is requested,
* anon is today only allowed via preauth mechanisms.
*/
if (require_preauth_p(r) || _kdc_is_anon_request(b)) {
ret = KRB5KDC_ERR_PREAUTH_REQUIRED;
_kdc_set_e_text(r, "Need to use PA-ENC-TIMESTAMP/PA-PK-AS-REQ");
goto out;
}
if (ckey == NULL) {
ret = KRB5KDC_ERR_CLIENT_NOTYET;
_kdc_set_e_text(r, "Doesn't have a client key available");
goto out;
}
krb5_free_keyblock_contents(r->context, &r->reply_key);
ret = krb5_copy_keyblock_contents(r->context, &ckey->key, &r->reply_key);
if (ret)
goto out;
}
if (r->clientdb->hdb_auth_status) {
r->clientdb->hdb_auth_status(context, r->clientdb, r->client,
HDB_AUTH_SUCCESS);
}
/*
* Verify flags after the user been required to prove its identity
* with in a preauth mech.
*/
ret = _kdc_check_access(context, config, r->client, r->client_name,
r->server, r->server_name,
req, &error_method);
if(ret)
goto out;
/*
* Select the best encryption type for the KDC with out regard to
* the client since the client never needs to read that data.
*/
ret = _kdc_get_preferred_key(context, config,
r->server, r->server_name,
&setype, &skey);
if(ret)
goto out;
if(f.renew || f.validate || f.proxy || f.forwarded || f.enc_tkt_in_skey
|| (_kdc_is_anon_request(b) && !config->allow_anonymous)) {
ret = KRB5KDC_ERR_BADOPTION;
_kdc_set_e_text(r, "Bad KDC options");
goto out;
}
/*
* Build reply
*/
rep.pvno = 5;
rep.msg_type = krb_as_rep;
if (_kdc_is_anonymous(context, r->client_princ)) {
Realm anon_realm=KRB5_ANON_REALM;
ret = copy_Realm(&anon_realm, &rep.crealm);
} else
ret = copy_Realm(&r->client->entry.principal->realm, &rep.crealm);
if (ret)
goto out;
ret = _krb5_principal2principalname(&rep.cname, r->client->entry.principal);
if (ret)
goto out;
rep.ticket.tkt_vno = 5;
ret = copy_Realm(&r->server->entry.principal->realm, &rep.ticket.realm);
if (ret)
goto out;
_krb5_principal2principalname(&rep.ticket.sname,
r->server->entry.principal);
/* java 1.6 expects the name to be the same type, lets allow that
* uncomplicated name-types. */
#define CNT(sp,t) (((sp)->sname->name_type) == KRB5_NT_##t)
if (CNT(b, UNKNOWN) || CNT(b, PRINCIPAL) || CNT(b, SRV_INST) || CNT(b, SRV_HST) || CNT(b, SRV_XHST))
rep.ticket.sname.name_type = b->sname->name_type;
#undef CNT
r->et.flags.initial = 1;
if(r->client->entry.flags.forwardable && r->server->entry.flags.forwardable)
r->et.flags.forwardable = f.forwardable;
else if (f.forwardable) {
_kdc_set_e_text(r, "Ticket may not be forwardable");
ret = KRB5KDC_ERR_POLICY;
goto out;
}
if(r->client->entry.flags.proxiable && r->server->entry.flags.proxiable)
r->et.flags.proxiable = f.proxiable;
else if (f.proxiable) {
_kdc_set_e_text(r, "Ticket may not be proxiable");
ret = KRB5KDC_ERR_POLICY;
goto out;
}
if(r->client->entry.flags.postdate && r->server->entry.flags.postdate)
r->et.flags.may_postdate = f.allow_postdate;
else if (f.allow_postdate){
_kdc_set_e_text(r, "Ticket may not be postdate");
ret = KRB5KDC_ERR_POLICY;
goto out;
}
/* check for valid set of addresses */
if(!_kdc_check_addresses(context, config, b->addresses, from_addr)) {
_kdc_set_e_text(r, "Bad address list in requested");
ret = KRB5KRB_AP_ERR_BADADDR;
goto out;
}
ret = copy_PrincipalName(&rep.cname, &r->et.cname);
if (ret)
goto out;
ret = copy_Realm(&rep.crealm, &r->et.crealm);
if (ret)
goto out;
{
time_t start;
time_t t;
start = r->et.authtime = kdc_time;
if(f.postdated && req->req_body.from){
ALLOC(r->et.starttime);
start = *r->et.starttime = *req->req_body.from;
r->et.flags.invalid = 1;
r->et.flags.postdated = 1; /* XXX ??? */
}
_kdc_fix_time(&b->till);
t = *b->till;
/* be careful not overflowing */
if(r->client->entry.max_life)
t = start + min(t - start, *r->client->entry.max_life);
if(r->server->entry.max_life)
t = start + min(t - start, *r->server->entry.max_life);
#if 0
t = min(t, start + realm->max_life);
#endif
r->et.endtime = t;
if(f.renewable_ok && r->et.endtime < *b->till){
f.renewable = 1;
if(b->rtime == NULL){
ALLOC(b->rtime);
*b->rtime = 0;
}
if(*b->rtime < *b->till)
*b->rtime = *b->till;
}
if(f.renewable && b->rtime){
t = *b->rtime;
if(t == 0)
t = MAX_TIME;
if(r->client->entry.max_renew)
t = start + min(t - start, *r->client->entry.max_renew);
if(r->server->entry.max_renew)
t = start + min(t - start, *r->server->entry.max_renew);
#if 0
t = min(t, start + realm->max_renew);
#endif
ALLOC(r->et.renew_till);
*r->et.renew_till = t;
r->et.flags.renewable = 1;
}
}
if (_kdc_is_anon_request(b))
r->et.flags.anonymous = 1;
if(b->addresses){
ALLOC(r->et.caddr);
copy_HostAddresses(b->addresses, r->et.caddr);
}
r->et.transited.tr_type = DOMAIN_X500_COMPRESS;
krb5_data_zero(&r->et.transited.contents);
/* The MIT ASN.1 library (obviously) doesn't tell lengths encoded
* as 0 and as 0x80 (meaning indefinite length) apart, and is thus
* incapable of correctly decoding SEQUENCE OF's of zero length.
*
* To fix this, always send at least one no-op last_req
*
* If there's a pw_end or valid_end we will use that,
* otherwise just a dummy lr.
*/
r->ek.last_req.val = malloc(2 * sizeof(*r->ek.last_req.val));
if (r->ek.last_req.val == NULL) {
ret = ENOMEM;
goto out;
}
r->ek.last_req.len = 0;
if (r->client->entry.pw_end
&& (config->kdc_warn_pwexpire == 0
|| kdc_time + config->kdc_warn_pwexpire >= *r->client->entry.pw_end)) {
r->ek.last_req.val[r->ek.last_req.len].lr_type = LR_PW_EXPTIME;
r->ek.last_req.val[r->ek.last_req.len].lr_value = *r->client->entry.pw_end;
++r->ek.last_req.len;
}
if (r->client->entry.valid_end) {
r->ek.last_req.val[r->ek.last_req.len].lr_type = LR_ACCT_EXPTIME;
r->ek.last_req.val[r->ek.last_req.len].lr_value = *r->client->entry.valid_end;
++r->ek.last_req.len;
}
if (r->ek.last_req.len == 0) {
r->ek.last_req.val[r->ek.last_req.len].lr_type = LR_NONE;
r->ek.last_req.val[r->ek.last_req.len].lr_value = 0;
++r->ek.last_req.len;
}
r->ek.nonce = b->nonce;
if (r->client->entry.valid_end || r->client->entry.pw_end) {
ALLOC(r->ek.key_expiration);
if (r->client->entry.valid_end) {
if (r->client->entry.pw_end)
*r->ek.key_expiration = min(*r->client->entry.valid_end,
*r->client->entry.pw_end);
else
*r->ek.key_expiration = *r->client->entry.valid_end;
} else
*r->ek.key_expiration = *r->client->entry.pw_end;
} else
r->ek.key_expiration = NULL;
r->ek.flags = r->et.flags;
r->ek.authtime = r->et.authtime;
if (r->et.starttime) {
ALLOC(r->ek.starttime);
*r->ek.starttime = *r->et.starttime;
}
r->ek.endtime = r->et.endtime;
if (r->et.renew_till) {
ALLOC(r->ek.renew_till);
*r->ek.renew_till = *r->et.renew_till;
}
ret = copy_Realm(&rep.ticket.realm, &r->ek.srealm);
if (ret)
goto out;
ret = copy_PrincipalName(&rep.ticket.sname, &r->ek.sname);
if (ret)
goto out;
if(r->et.caddr){
ALLOC(r->ek.caddr);
copy_HostAddresses(r->et.caddr, r->ek.caddr);
}
/*
* Check and session and reply keys
*/
if (r->session_key.keytype == ETYPE_NULL) {
ret = krb5_generate_random_keyblock(context, r->sessionetype, &r->session_key);
if (ret)
goto out;
}
if (r->reply_key.keytype == ETYPE_NULL) {
_kdc_set_e_text(r, "Client have no reply key");
ret = KRB5KDC_ERR_CLIENT_NOTYET;
goto out;
}
ret = copy_EncryptionKey(&r->session_key, &r->et.key);
if (ret)
goto out;
ret = copy_EncryptionKey(&r->session_key, &r->ek.key);
if (ret)
goto out;
if (r->outpadata.len) {
ALLOC(rep.padata);
if (rep.padata == NULL) {
ret = ENOMEM;
goto out;
}
ret = copy_METHOD_DATA(&r->outpadata, rep.padata);
if (ret)
goto out;
}
/* Add the PAC */
if (send_pac_p(context, req)) {
generate_pac(r, skey);
}
_kdc_log_timestamp(context, config, "AS-REQ", r->et.authtime, r->et.starttime,
r->et.endtime, r->et.renew_till);
/* do this as the last thing since this signs the EncTicketPart */
ret = _kdc_add_KRB5SignedPath(context,
config,
r->server,
setype,
r->client->entry.principal,
NULL,
NULL,
&r->et);
if (ret)
goto out;
log_as_req(context, config, r->reply_key.keytype, setype, b);
/*
* We always say we support FAST/enc-pa-rep
*/
r->et.flags.enc_pa_rep = r->ek.flags.enc_pa_rep = 1;
/*
* Add REQ_ENC_PA_REP if client supports it
*/
i = 0;
pa = _kdc_find_padata(req, &i, KRB5_PADATA_REQ_ENC_PA_REP);
if (pa) {
ret = add_enc_pa_rep(r);
if (ret) {
const char *msg = krb5_get_error_message(r->context, ret);
_kdc_r_log(r, 0, "add_enc_pa_rep failed: %s: %d", msg, ret);
krb5_free_error_message(r->context, msg);
goto out;
}
}
/*
*
*/
ret = _kdc_encode_reply(context, config,
r->armor_crypto, req->req_body.nonce,
&rep, &r->et, &r->ek, setype, r->server->entry.kvno,
&skey->key, r->client->entry.kvno,
&r->reply_key, 0, &r->e_text, reply);
if (ret)
goto out;
/*
* Check if message too large
*/
if (datagram_reply && reply->length > config->max_datagram_reply_length) {
krb5_data_free(reply);
ret = KRB5KRB_ERR_RESPONSE_TOO_BIG;
_kdc_set_e_text(r, "Reply packet too large");
}
out:
free_AS_REP(&rep);
/*
* In case of a non proxy error, build an error message.
*/
if(ret != 0 && ret != HDB_ERR_NOT_FOUND_HERE && reply->length == 0) {
ret = _kdc_fast_mk_error(context, r,
&error_method,
r->armor_crypto,
&req->req_body,
ret, r->e_text,
r->server_princ,
&r->client_princ->name,
&r->client_princ->realm,
NULL, NULL,
reply);
if (ret)
goto out2;
}
out2:
free_EncTicketPart(&r->et);
free_EncKDCRepPart(&r->ek);
free_KDCFastState(&r->fast);
if (error_method.len)
free_METHOD_DATA(&error_method);
if (r->outpadata.len)
free_METHOD_DATA(&r->outpadata);
if (r->client_princ) {
krb5_free_principal(context, r->client_princ);
r->client_princ = NULL;
}
if (r->client_name) {
free(r->client_name);
r->client_name = NULL;
}
if (r->server_princ){
krb5_free_principal(context, r->server_princ);
r->server_princ = NULL;
}
if (r->server_name) {
free(r->server_name);
r->server_name = NULL;
}
if (r->client)
_kdc_free_ent(context, r->client);
if (r->server)
_kdc_free_ent(context, r->server);
if (r->armor_crypto) {
krb5_crypto_destroy(r->context, r->armor_crypto);
r->armor_crypto = NULL;
}
krb5_free_keyblock_contents(r->context, &r->reply_key);
krb5_free_keyblock_contents(r->context, &r->session_key);
return ret;
}
|
CWE-476
| 180,826 | 2,380 |
56768369760868757704328364106042737253
| null | null | null |
FFmpeg
|
58cf31cee7a456057f337b3102a03206d833d5e8
| 1 |
static void gmc_mmx(uint8_t *dst, uint8_t *src,
int stride, int h, int ox, int oy,
int dxx, int dxy, int dyx, int dyy,
int shift, int r, int width, int height)
{
const int w = 8;
const int ix = ox >> (16 + shift);
const int iy = oy >> (16 + shift);
const int oxs = ox >> 4;
const int oys = oy >> 4;
const int dxxs = dxx >> 4;
const int dxys = dxy >> 4;
const int dyxs = dyx >> 4;
const int dyys = dyy >> 4;
const uint16_t r4[4] = { r, r, r, r };
const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
const uint64_t shift2 = 2 * shift;
#define MAX_STRIDE 4096U
#define MAX_H 8U
uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE];
int x, y;
const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
const int dxh = dxy * (h - 1);
const int dyw = dyx * (w - 1);
int need_emu = (unsigned) ix >= width - w ||
(unsigned) iy >= height - h;
if ( // non-constant fullpel offset (3% of blocks)
((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
(oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) ||
(dxx | dxy | dyx | dyy) & 15 ||
(need_emu && (h > MAX_H || stride > MAX_STRIDE))) {
ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
shift, r, width, height);
return;
}
src += ix + iy * stride;
if (need_emu) {
ff_emulated_edge_mc_8(edge_buf, src, stride, stride, w + 1, h + 1, ix, iy, width, height);
src = edge_buf;
}
__asm__ volatile (
"movd %0, %%mm6 \n\t"
"pxor %%mm7, %%mm7 \n\t"
"punpcklwd %%mm6, %%mm6 \n\t"
"punpcklwd %%mm6, %%mm6 \n\t"
:: "r" (1 << shift));
for (x = 0; x < w; x += 4) {
uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
oxs - dxys + dxxs * (x + 1),
oxs - dxys + dxxs * (x + 2),
oxs - dxys + dxxs * (x + 3) };
uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
oys - dyys + dyxs * (x + 1),
oys - dyys + dyxs * (x + 2),
oys - dyys + dyxs * (x + 3) };
for (y = 0; y < h; y++) {
__asm__ volatile (
"movq %0, %%mm4 \n\t"
"movq %1, %%mm5 \n\t"
"paddw %2, %%mm4 \n\t"
"paddw %3, %%mm5 \n\t"
"movq %%mm4, %0 \n\t"
"movq %%mm5, %1 \n\t"
"psrlw $12, %%mm4 \n\t"
"psrlw $12, %%mm5 \n\t"
: "+m" (*dx4), "+m" (*dy4)
: "m" (*dxy4), "m" (*dyy4));
__asm__ volatile (
"movq %%mm6, %%mm2 \n\t"
"movq %%mm6, %%mm1 \n\t"
"psubw %%mm4, %%mm2 \n\t"
"psubw %%mm5, %%mm1 \n\t"
"movq %%mm2, %%mm0 \n\t"
"movq %%mm4, %%mm3 \n\t"
"pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
"pmullw %%mm5, %%mm3 \n\t" // dx * dy
"pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
"pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
"movd %4, %%mm5 \n\t"
"movd %3, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
"pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
"movd %2, %%mm5 \n\t"
"movd %1, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
"pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
"paddw %5, %%mm1 \n\t"
"paddw %%mm3, %%mm2 \n\t"
"paddw %%mm1, %%mm0 \n\t"
"paddw %%mm2, %%mm0 \n\t"
"psrlw %6, %%mm0 \n\t"
"packuswb %%mm0, %%mm0 \n\t"
"movd %%mm0, %0 \n\t"
: "=m" (dst[x + y * stride])
: "m" (src[0]), "m" (src[1]),
"m" (src[stride]), "m" (src[stride + 1]),
"m" (*r4), "m" (shift2));
src += stride;
}
src += 4 - h * stride;
}
}
|
CWE-125
| 180,827 | 2,381 |
53843406165273077727141744678584227428
| null | null | null |
linux
|
95a762e2c8c942780948091f8f2a4f32fce1ac6f
| 1 |
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode == BPF_END || opcode == BPF_NEG) {
if (opcode == BPF_NEG) {
if (BPF_SRC(insn->code) != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->off != 0 || insn->imm != 0) {
verbose(env, "BPF_NEG uses reserved fields\n");
return -EINVAL;
}
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
(insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
BPF_CLASS(insn->code) == BPF_ALU64) {
verbose(env, "BPF_END uses reserved fields\n");
return -EINVAL;
}
}
/* check src operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->dst_reg)) {
verbose(env, "R%d pointer arithmetic prohibited\n",
insn->dst_reg);
return -EACCES;
}
/* check dest operand */
err = check_reg_arg(env, insn->dst_reg, DST_OP);
if (err)
return err;
} else if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose(env, "BPF_MOV uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose(env, "BPF_MOV uses reserved fields\n");
return -EINVAL;
}
}
/* check dest operand */
err = check_reg_arg(env, insn->dst_reg, DST_OP);
if (err)
return err;
if (BPF_SRC(insn->code) == BPF_X) {
if (BPF_CLASS(insn->code) == BPF_ALU64) {
/* case: R1 = R2
* copy register state to dest reg
*/
regs[insn->dst_reg] = regs[insn->src_reg];
regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
} else {
/* R1 = (u32) R2 */
if (is_pointer_value(env, insn->src_reg)) {
verbose(env,
"R%d partial copy of pointer\n",
insn->src_reg);
return -EACCES;
}
mark_reg_unknown(env, regs, insn->dst_reg);
/* high 32 bits are known zero. */
regs[insn->dst_reg].var_off = tnum_cast(
regs[insn->dst_reg].var_off, 4);
__update_reg_bounds(®s[insn->dst_reg]);
}
} else {
/* case: R = imm
* remember the value we stored into this reg
*/
regs[insn->dst_reg].type = SCALAR_VALUE;
__mark_reg_known(regs + insn->dst_reg, insn->imm);
}
} else if (opcode > BPF_END) {
verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
return -EINVAL;
} else { /* all other ALU ops: and, sub, xor, add, ... */
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
verbose(env, "div by zero\n");
return -EINVAL;
}
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
if (insn->imm < 0 || insn->imm >= size) {
verbose(env, "invalid shift %d\n", insn->imm);
return -EINVAL;
}
}
/* check dest operand */
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
return adjust_reg_min_max_vals(env, insn);
}
return 0;
}
|
CWE-119
| 180,832 | 2,385 |
195502195018736619584634757968225342872
| null | null | null |
linux
|
373c4557d2aa362702c4c2d41288fb1e54990b7c
| 1 |
static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct hstate *h = hstate_vma(vma);
unsigned long next;
unsigned long hmask = huge_page_mask(h);
unsigned long sz = huge_page_size(h);
pte_t *pte;
int err = 0;
do {
next = hugetlb_entry_end(h, addr, end);
pte = huge_pte_offset(walk->mm, addr & hmask, sz);
if (pte && walk->hugetlb_entry)
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
if (err)
break;
} while (addr = next, addr != end);
return err;
}
|
CWE-200
| 180,833 | 2,386 |
27644966037369065190274352572076611686
| null | null | null |
collectd
|
d16c24542b2f96a194d43a73c2e5778822b9cb47
| 1 |
static int csnmp_read_table(host_definition_t *host, data_definition_t *data) {
struct snmp_pdu *req;
struct snmp_pdu *res = NULL;
struct variable_list *vb;
const data_set_t *ds;
size_t oid_list_len = data->values_len + 1;
/* Holds the last OID returned by the device. We use this in the GETNEXT
* request to proceed. */
oid_t oid_list[oid_list_len];
/* Set to false when an OID has left its subtree so we don't re-request it
* again. */
_Bool oid_list_todo[oid_list_len];
int status;
size_t i;
/* `value_list_head' and `value_list_tail' implement a linked list for each
* value. `instance_list_head' and `instance_list_tail' implement a linked
* list of
* instance names. This is used to jump gaps in the table. */
csnmp_list_instances_t *instance_list_head;
csnmp_list_instances_t *instance_list_tail;
csnmp_table_values_t **value_list_head;
csnmp_table_values_t **value_list_tail;
DEBUG("snmp plugin: csnmp_read_table (host = %s, data = %s)", host->name,
data->name);
if (host->sess_handle == NULL) {
DEBUG("snmp plugin: csnmp_read_table: host->sess_handle == NULL");
return (-1);
}
ds = plugin_get_ds(data->type);
if (!ds) {
ERROR("snmp plugin: DataSet `%s' not defined.", data->type);
return (-1);
}
if (ds->ds_num != data->values_len) {
ERROR("snmp plugin: DataSet `%s' requires %zu values, but config talks "
"about %zu",
data->type, ds->ds_num, data->values_len);
return (-1);
}
assert(data->values_len > 0);
/* We need a copy of all the OIDs, because GETNEXT will destroy them. */
memcpy(oid_list, data->values, data->values_len * sizeof(oid_t));
if (data->instance.oid.oid_len > 0)
memcpy(oid_list + data->values_len, &data->instance.oid, sizeof(oid_t));
else /* no InstanceFrom option specified. */
oid_list_len--;
for (i = 0; i < oid_list_len; i++)
oid_list_todo[i] = 1;
/* We're going to construct n linked lists, one for each "value".
* value_list_head will contain pointers to the heads of these linked lists,
* value_list_tail will contain pointers to the tail of the lists. */
value_list_head = calloc(data->values_len, sizeof(*value_list_head));
value_list_tail = calloc(data->values_len, sizeof(*value_list_tail));
if ((value_list_head == NULL) || (value_list_tail == NULL)) {
ERROR("snmp plugin: csnmp_read_table: calloc failed.");
sfree(value_list_head);
sfree(value_list_tail);
return (-1);
}
instance_list_head = NULL;
instance_list_tail = NULL;
status = 0;
while (status == 0) {
int oid_list_todo_num;
req = snmp_pdu_create(SNMP_MSG_GETNEXT);
if (req == NULL) {
ERROR("snmp plugin: snmp_pdu_create failed.");
status = -1;
break;
}
oid_list_todo_num = 0;
for (i = 0; i < oid_list_len; i++) {
/* Do not rerequest already finished OIDs */
if (!oid_list_todo[i])
continue;
oid_list_todo_num++;
snmp_add_null_var(req, oid_list[i].oid, oid_list[i].oid_len);
}
if (oid_list_todo_num == 0) {
/* The request is still empty - so we are finished */
DEBUG("snmp plugin: all variables have left their subtree");
status = 0;
break;
}
res = NULL;
status = snmp_sess_synch_response(host->sess_handle, req, &res);
if ((status != STAT_SUCCESS) || (res == NULL)) {
char *errstr = NULL;
snmp_sess_error(host->sess_handle, NULL, NULL, &errstr);
c_complain(LOG_ERR, &host->complaint,
"snmp plugin: host %s: snmp_sess_synch_response failed: %s",
host->name, (errstr == NULL) ? "Unknown problem" : errstr);
if (res != NULL)
snmp_free_pdu(res);
res = NULL;
/* snmp_synch_response already freed our PDU */
req = NULL;
sfree(errstr);
csnmp_host_close_session(host);
status = -1;
break;
}
status = 0;
assert(res != NULL);
c_release(LOG_INFO, &host->complaint,
"snmp plugin: host %s: snmp_sess_synch_response successful.",
host->name);
vb = res->variables;
if (vb == NULL) {
status = -1;
break;
}
for (vb = res->variables, i = 0; (vb != NULL);
vb = vb->next_variable, i++) {
/* Calculate value index from todo list */
while ((i < oid_list_len) && !oid_list_todo[i])
i++;
/* An instance is configured and the res variable we process is the
* instance value (last index) */
if ((data->instance.oid.oid_len > 0) && (i == data->values_len)) {
if ((vb->type == SNMP_ENDOFMIBVIEW) ||
(snmp_oid_ncompare(
data->instance.oid.oid, data->instance.oid.oid_len, vb->name,
vb->name_length, data->instance.oid.oid_len) != 0)) {
DEBUG("snmp plugin: host = %s; data = %s; Instance left its subtree.",
host->name, data->name);
oid_list_todo[i] = 0;
continue;
}
/* Allocate a new `csnmp_list_instances_t', insert the instance name and
* add it to the list */
if (csnmp_instance_list_add(&instance_list_head, &instance_list_tail,
res, host, data) != 0) {
ERROR("snmp plugin: host %s: csnmp_instance_list_add failed.",
host->name);
status = -1;
break;
}
} else /* The variable we are processing is a normal value */
{
csnmp_table_values_t *vt;
oid_t vb_name;
oid_t suffix;
int ret;
csnmp_oid_init(&vb_name, vb->name, vb->name_length);
/* Calculate the current suffix. This is later used to check that the
* suffix is increasing. This also checks if we left the subtree */
ret = csnmp_oid_suffix(&suffix, &vb_name, data->values + i);
if (ret != 0) {
DEBUG("snmp plugin: host = %s; data = %s; i = %zu; "
"Value probably left its subtree.",
host->name, data->name, i);
oid_list_todo[i] = 0;
continue;
}
/* Make sure the OIDs returned by the agent are increasing. Otherwise
* our
* table matching algorithm will get confused. */
if ((value_list_tail[i] != NULL) &&
(csnmp_oid_compare(&suffix, &value_list_tail[i]->suffix) <= 0)) {
DEBUG("snmp plugin: host = %s; data = %s; i = %zu; "
"Suffix is not increasing.",
host->name, data->name, i);
oid_list_todo[i] = 0;
continue;
}
vt = calloc(1, sizeof(*vt));
if (vt == NULL) {
ERROR("snmp plugin: calloc failed.");
status = -1;
break;
}
vt->value =
csnmp_value_list_to_value(vb, ds->ds[i].type, data->scale,
data->shift, host->name, data->name);
memcpy(&vt->suffix, &suffix, sizeof(vt->suffix));
vt->next = NULL;
if (value_list_tail[i] == NULL)
value_list_head[i] = vt;
else
value_list_tail[i]->next = vt;
value_list_tail[i] = vt;
}
/* Copy OID to oid_list[i] */
memcpy(oid_list[i].oid, vb->name, sizeof(oid) * vb->name_length);
oid_list[i].oid_len = vb->name_length;
} /* for (vb = res->variables ...) */
if (res != NULL)
snmp_free_pdu(res);
res = NULL;
} /* while (status == 0) */
if (res != NULL)
snmp_free_pdu(res);
res = NULL;
if (req != NULL)
snmp_free_pdu(req);
req = NULL;
if (status == 0)
csnmp_dispatch_table(host, data, instance_list_head, value_list_head);
/* Free all allocated variables here */
while (instance_list_head != NULL) {
csnmp_list_instances_t *next = instance_list_head->next;
sfree(instance_list_head);
instance_list_head = next;
}
for (i = 0; i < data->values_len; i++) {
while (value_list_head[i] != NULL) {
csnmp_table_values_t *next = value_list_head[i]->next;
sfree(value_list_head[i]);
value_list_head[i] = next;
}
}
sfree(value_list_head);
sfree(value_list_tail);
return (0);
} /* int csnmp_read_table */
|
CWE-415
| 180,839 | 2,392 |
218389400164176202416425166145247212945
| null | null | null |
radare2
|
2ca9ab45891b6ae8e32b6c28c81eebca059cbe5d
| 1 |
static const ut8 *r_bin_dwarf_parse_comp_unit(Sdb *s, const ut8 *obuf,
RBinDwarfCompUnit *cu, const RBinDwarfDebugAbbrev *da,
size_t offset, const ut8 *debug_str, size_t debug_str_len) {
const ut8 *buf = obuf, *buf_end = obuf + (cu->hdr.length - 7);
ut64 abbr_code;
size_t i;
if (cu->hdr.length > debug_str_len) {
return NULL;
}
while (buf && buf < buf_end && buf >= obuf) {
if (cu->length && cu->capacity == cu->length) {
r_bin_dwarf_expand_cu (cu);
}
buf = r_uleb128 (buf, buf_end - buf, &abbr_code);
if (abbr_code > da->length || !buf) {
return NULL;
}
r_bin_dwarf_init_die (&cu->dies[cu->length]);
if (!abbr_code) {
cu->dies[cu->length].abbrev_code = 0;
cu->length++;
buf++;
continue;
}
cu->dies[cu->length].abbrev_code = abbr_code;
cu->dies[cu->length].tag = da->decls[abbr_code - 1].tag;
abbr_code += offset;
if (da->capacity < abbr_code) {
return NULL;
}
for (i = 0; i < da->decls[abbr_code - 1].length; i++) {
if (cu->dies[cu->length].length == cu->dies[cu->length].capacity) {
r_bin_dwarf_expand_die (&cu->dies[cu->length]);
}
if (i >= cu->dies[cu->length].capacity || i >= da->decls[abbr_code - 1].capacity) {
eprintf ("Warning: malformed dwarf attribute capacity doesn't match length\n");
break;
}
memset (&cu->dies[cu->length].attr_values[i], 0, sizeof
(cu->dies[cu->length].attr_values[i]));
buf = r_bin_dwarf_parse_attr_value (buf, buf_end - buf,
&da->decls[abbr_code - 1].specs[i],
&cu->dies[cu->length].attr_values[i],
&cu->hdr, debug_str, debug_str_len);
if (cu->dies[cu->length].attr_values[i].name == DW_AT_comp_dir) {
const char *name = cu->dies[cu->length].attr_values[i].encoding.str_struct.string;
sdb_set (s, "DW_AT_comp_dir", name, 0);
}
cu->dies[cu->length].length++;
}
cu->length++;
}
return buf;
}
|
CWE-125
| 180,842 | 2,395 |
96593607869001604723535959978510375078
| null | null | null |
libav
|
cd4663dc80323ba64989d0c103d51ad3ee0e9c2f
| 1 |
static int smacker_decode_tree(BitstreamContext *bc, HuffContext *hc,
uint32_t prefix, int length)
{
if (!bitstream_read_bit(bc)) { // Leaf
if(hc->current >= 256){
av_log(NULL, AV_LOG_ERROR, "Tree size exceeded!\n");
return AVERROR_INVALIDDATA;
}
if(length){
hc->bits[hc->current] = prefix;
hc->lengths[hc->current] = length;
} else {
hc->bits[hc->current] = 0;
hc->lengths[hc->current] = 0;
}
hc->values[hc->current] = bitstream_read(bc, 8);
hc->current++;
if(hc->maxlength < length)
hc->maxlength = length;
return 0;
} else { //Node
int r;
length++;
r = smacker_decode_tree(bc, hc, prefix, length);
if(r)
return r;
return smacker_decode_tree(bc, hc, prefix | (1 << (length - 1)), length);
}
}
|
CWE-119
| 180,843 | 2,396 |
123413656536164614730473477667931906703
| null | null | null |
linux
|
a50829479f58416a013a4ccca791336af3c584c7
| 1 |
static void parse_hid_report_descriptor(struct gtco *device, char * report,
int length)
{
struct device *ddev = &device->intf->dev;
int x, i = 0;
/* Tag primitive vars */
__u8 prefix;
__u8 size;
__u8 tag;
__u8 type;
__u8 data = 0;
__u16 data16 = 0;
__u32 data32 = 0;
/* For parsing logic */
int inputnum = 0;
__u32 usage = 0;
/* Global Values, indexed by TAG */
__u32 globalval[TAG_GLOB_MAX];
__u32 oldval[TAG_GLOB_MAX];
/* Debug stuff */
char maintype = 'x';
char globtype[12];
int indent = 0;
char indentstr[10] = "";
dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n");
/* Walk this report and pull out the info we need */
while (i < length) {
prefix = report[i];
/* Skip over prefix */
i++;
/* Determine data size and save the data in the proper variable */
size = PREF_SIZE(prefix);
switch (size) {
case 1:
data = report[i];
break;
case 2:
data16 = get_unaligned_le16(&report[i]);
break;
case 3:
size = 4;
data32 = get_unaligned_le32(&report[i]);
break;
}
/* Skip size of data */
i += size;
/* What we do depends on the tag type */
tag = PREF_TAG(prefix);
type = PREF_TYPE(prefix);
switch (type) {
case TYPE_MAIN:
strcpy(globtype, "");
switch (tag) {
case TAG_MAIN_INPUT:
/*
* The INPUT MAIN tag signifies this is
* information from a report. We need to
* figure out what it is and store the
* min/max values
*/
maintype = 'I';
if (data == 2)
strcpy(globtype, "Variable");
else if (data == 3)
strcpy(globtype, "Var|Const");
dev_dbg(ddev, "::::: Saving Report: %d input #%d Max: 0x%X(%d) Min:0x%X(%d) of %d bits\n",
globalval[TAG_GLOB_REPORT_ID], inputnum,
globalval[TAG_GLOB_LOG_MAX], globalval[TAG_GLOB_LOG_MAX],
globalval[TAG_GLOB_LOG_MIN], globalval[TAG_GLOB_LOG_MIN],
globalval[TAG_GLOB_REPORT_SZ] * globalval[TAG_GLOB_REPORT_CNT]);
/*
We can assume that the first two input items
are always the X and Y coordinates. After
that, we look for everything else by
local usage value
*/
switch (inputnum) {
case 0: /* X coord */
dev_dbg(ddev, "GER: X Usage: 0x%x\n", usage);
if (device->max_X == 0) {
device->max_X = globalval[TAG_GLOB_LOG_MAX];
device->min_X = globalval[TAG_GLOB_LOG_MIN];
}
break;
case 1: /* Y coord */
dev_dbg(ddev, "GER: Y Usage: 0x%x\n", usage);
if (device->max_Y == 0) {
device->max_Y = globalval[TAG_GLOB_LOG_MAX];
device->min_Y = globalval[TAG_GLOB_LOG_MIN];
}
break;
default:
/* Tilt X */
if (usage == DIGITIZER_USAGE_TILT_X) {
if (device->maxtilt_X == 0) {
device->maxtilt_X = globalval[TAG_GLOB_LOG_MAX];
device->mintilt_X = globalval[TAG_GLOB_LOG_MIN];
}
}
/* Tilt Y */
if (usage == DIGITIZER_USAGE_TILT_Y) {
if (device->maxtilt_Y == 0) {
device->maxtilt_Y = globalval[TAG_GLOB_LOG_MAX];
device->mintilt_Y = globalval[TAG_GLOB_LOG_MIN];
}
}
/* Pressure */
if (usage == DIGITIZER_USAGE_TIP_PRESSURE) {
if (device->maxpressure == 0) {
device->maxpressure = globalval[TAG_GLOB_LOG_MAX];
device->minpressure = globalval[TAG_GLOB_LOG_MIN];
}
}
break;
}
inputnum++;
break;
case TAG_MAIN_OUTPUT:
maintype = 'O';
break;
case TAG_MAIN_FEATURE:
maintype = 'F';
break;
case TAG_MAIN_COL_START:
maintype = 'S';
if (data == 0) {
dev_dbg(ddev, "======>>>>>> Physical\n");
strcpy(globtype, "Physical");
} else
dev_dbg(ddev, "======>>>>>>\n");
/* Indent the debug output */
indent++;
for (x = 0; x < indent; x++)
indentstr[x] = '-';
indentstr[x] = 0;
/* Save global tags */
for (x = 0; x < TAG_GLOB_MAX; x++)
oldval[x] = globalval[x];
break;
case TAG_MAIN_COL_END:
dev_dbg(ddev, "<<<<<<======\n");
maintype = 'E';
indent--;
for (x = 0; x < indent; x++)
indentstr[x] = '-';
indentstr[x] = 0;
/* Copy global tags back */
for (x = 0; x < TAG_GLOB_MAX; x++)
globalval[x] = oldval[x];
break;
}
switch (size) {
case 1:
dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n",
indentstr, tag, maintype, size, globtype, data);
break;
case 2:
dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n",
indentstr, tag, maintype, size, globtype, data16);
break;
case 4:
dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n",
indentstr, tag, maintype, size, globtype, data32);
break;
}
break;
case TYPE_GLOBAL:
switch (tag) {
case TAG_GLOB_USAGE:
/*
* First time we hit the global usage tag,
* it should tell us the type of device
*/
if (device->usage == 0)
device->usage = data;
strcpy(globtype, "USAGE");
break;
case TAG_GLOB_LOG_MIN:
strcpy(globtype, "LOG_MIN");
break;
case TAG_GLOB_LOG_MAX:
strcpy(globtype, "LOG_MAX");
break;
case TAG_GLOB_PHYS_MIN:
strcpy(globtype, "PHYS_MIN");
break;
case TAG_GLOB_PHYS_MAX:
strcpy(globtype, "PHYS_MAX");
break;
case TAG_GLOB_UNIT_EXP:
strcpy(globtype, "EXP");
break;
case TAG_GLOB_UNIT:
strcpy(globtype, "UNIT");
break;
case TAG_GLOB_REPORT_SZ:
strcpy(globtype, "REPORT_SZ");
break;
case TAG_GLOB_REPORT_ID:
strcpy(globtype, "REPORT_ID");
/* New report, restart numbering */
inputnum = 0;
break;
case TAG_GLOB_REPORT_CNT:
strcpy(globtype, "REPORT_CNT");
break;
case TAG_GLOB_PUSH:
strcpy(globtype, "PUSH");
break;
case TAG_GLOB_POP:
strcpy(globtype, "POP");
break;
}
/* Check to make sure we have a good tag number
so we don't overflow array */
if (tag < TAG_GLOB_MAX) {
switch (size) {
case 1:
dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n",
indentstr, globtype, tag, size, data);
globalval[tag] = data;
break;
case 2:
dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n",
indentstr, globtype, tag, size, data16);
globalval[tag] = data16;
break;
case 4:
dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n",
indentstr, globtype, tag, size, data32);
globalval[tag] = data32;
break;
}
} else {
dev_dbg(ddev, "%sGLOBALTAG: ILLEGAL TAG:%d SIZE: %d\n",
indentstr, tag, size);
}
break;
case TYPE_LOCAL:
switch (tag) {
case TAG_GLOB_USAGE:
strcpy(globtype, "USAGE");
/* Always 1 byte */
usage = data;
break;
case TAG_GLOB_LOG_MIN:
strcpy(globtype, "MIN");
break;
case TAG_GLOB_LOG_MAX:
strcpy(globtype, "MAX");
break;
default:
strcpy(globtype, "UNKNOWN");
break;
}
switch (size) {
case 1:
dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n",
indentstr, tag, globtype, size, data);
break;
case 2:
dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n",
indentstr, tag, globtype, size, data16);
break;
case 4:
dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n",
indentstr, tag, globtype, size, data32);
break;
}
break;
}
}
}
|
CWE-125
| 180,845 | 2,397 |
179175241032229645743598866903702095995
| null | null | null |
ImageMagick
|
2130bf6f89ded32ef0c88a11694f107c52566c53
| 1 |
static Image *ReadWPGImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
typedef struct
{
size_t FileId;
MagickOffsetType DataOffset;
unsigned int ProductType;
unsigned int FileType;
unsigned char MajorVersion;
unsigned char MinorVersion;
unsigned int EncryptKey;
unsigned int Reserved;
} WPGHeader;
typedef struct
{
unsigned char RecType;
size_t RecordLength;
} WPGRecord;
typedef struct
{
unsigned char Class;
unsigned char RecType;
size_t Extension;
size_t RecordLength;
} WPG2Record;
typedef struct
{
unsigned HorizontalUnits;
unsigned VerticalUnits;
unsigned char PosSizePrecision;
} WPG2Start;
typedef struct
{
unsigned int Width;
unsigned int Height;
unsigned int Depth;
unsigned int HorzRes;
unsigned int VertRes;
} WPGBitmapType1;
typedef struct
{
unsigned int Width;
unsigned int Height;
unsigned char Depth;
unsigned char Compression;
} WPG2BitmapType1;
typedef struct
{
unsigned int RotAngle;
unsigned int LowLeftX;
unsigned int LowLeftY;
unsigned int UpRightX;
unsigned int UpRightY;
unsigned int Width;
unsigned int Height;
unsigned int Depth;
unsigned int HorzRes;
unsigned int VertRes;
} WPGBitmapType2;
typedef struct
{
unsigned int StartIndex;
unsigned int NumOfEntries;
} WPGColorMapRec;
/*
typedef struct {
size_t PS_unknown1;
unsigned int PS_unknown2;
unsigned int PS_unknown3;
} WPGPSl1Record;
*/
Image
*image;
unsigned int
status;
WPGHeader
Header;
WPGRecord
Rec;
WPG2Record
Rec2;
WPG2Start StartWPG;
WPGBitmapType1
BitmapHeader1;
WPG2BitmapType1
Bitmap2Header1;
WPGBitmapType2
BitmapHeader2;
WPGColorMapRec
WPG_Palette;
int
i,
bpp,
WPG2Flags;
ssize_t
ldblk;
size_t
one;
unsigned char
*BImgBuff;
tCTM CTM; /*current transform matrix*/
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1;
image=AcquireImage(image_info,exception);
image->depth=8;
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read WPG image.
*/
Header.FileId=ReadBlobLSBLong(image);
Header.DataOffset=(MagickOffsetType) ReadBlobLSBLong(image);
Header.ProductType=ReadBlobLSBShort(image);
Header.FileType=ReadBlobLSBShort(image);
Header.MajorVersion=ReadBlobByte(image);
Header.MinorVersion=ReadBlobByte(image);
Header.EncryptKey=ReadBlobLSBShort(image);
Header.Reserved=ReadBlobLSBShort(image);
if (Header.FileId!=0x435057FF || (Header.ProductType>>8)!=0x16)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (Header.EncryptKey!=0)
ThrowReaderException(CoderError,"EncryptedWPGImageFileNotSupported");
image->columns = 1;
image->rows = 1;
image->colors = 0;
bpp=0;
BitmapHeader2.RotAngle=0;
Rec2.RecordLength=0;
switch(Header.FileType)
{
case 1: /* WPG level 1 */
while(!EOFBlob(image)) /* object parser loop */
{
(void) SeekBlob(image,Header.DataOffset,SEEK_SET);
if(EOFBlob(image))
break;
Rec.RecType=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rd_WP_DWORD(image,&Rec.RecordLength);
if (Rec.RecordLength > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if(EOFBlob(image))
break;
Header.DataOffset=TellBlob(image)+Rec.RecordLength;
switch(Rec.RecType)
{
case 0x0B: /* bitmap type 1 */
BitmapHeader1.Width=ReadBlobLSBShort(image);
BitmapHeader1.Height=ReadBlobLSBShort(image);
if ((BitmapHeader1.Width == 0) || (BitmapHeader1.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
BitmapHeader1.Depth=ReadBlobLSBShort(image);
BitmapHeader1.HorzRes=ReadBlobLSBShort(image);
BitmapHeader1.VertRes=ReadBlobLSBShort(image);
if(BitmapHeader1.HorzRes && BitmapHeader1.VertRes)
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x=BitmapHeader1.HorzRes/470.0;
image->resolution.y=BitmapHeader1.VertRes/470.0;
}
image->columns=BitmapHeader1.Width;
image->rows=BitmapHeader1.Height;
bpp=BitmapHeader1.Depth;
goto UnpackRaster;
case 0x0E: /*Color palette */
WPG_Palette.StartIndex=ReadBlobLSBShort(image);
WPG_Palette.NumOfEntries=ReadBlobLSBShort(image);
if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) >
(Rec2.RecordLength-2-2) / 3)
ThrowReaderException(CorruptImageError,"InvalidColormapIndex");
image->colors=WPG_Palette.NumOfEntries;
if (!AcquireImageColormap(image,image->colors,exception))
goto NoMemory;
for (i=WPG_Palette.StartIndex;
i < (int)WPG_Palette.NumOfEntries; i++)
{
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
}
break;
case 0x11: /* Start PS l1 */
if(Rec.RecordLength > 8)
image=ExtractPostscript(image,image_info,
TellBlob(image)+8, /* skip PS header in the wpg */
(ssize_t) Rec.RecordLength-8,exception);
break;
case 0x14: /* bitmap type 2 */
BitmapHeader2.RotAngle=ReadBlobLSBShort(image);
BitmapHeader2.LowLeftX=ReadBlobLSBShort(image);
BitmapHeader2.LowLeftY=ReadBlobLSBShort(image);
BitmapHeader2.UpRightX=ReadBlobLSBShort(image);
BitmapHeader2.UpRightY=ReadBlobLSBShort(image);
BitmapHeader2.Width=ReadBlobLSBShort(image);
BitmapHeader2.Height=ReadBlobLSBShort(image);
if ((BitmapHeader2.Width == 0) || (BitmapHeader2.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
BitmapHeader2.Depth=ReadBlobLSBShort(image);
BitmapHeader2.HorzRes=ReadBlobLSBShort(image);
BitmapHeader2.VertRes=ReadBlobLSBShort(image);
image->units=PixelsPerCentimeterResolution;
image->page.width=(unsigned int)
((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightX)/470.0);
image->page.height=(unsigned int)
((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightY)/470.0);
image->page.x=(int) (BitmapHeader2.LowLeftX/470.0);
image->page.y=(int) (BitmapHeader2.LowLeftX/470.0);
if(BitmapHeader2.HorzRes && BitmapHeader2.VertRes)
{
image->resolution.x=BitmapHeader2.HorzRes/470.0;
image->resolution.y=BitmapHeader2.VertRes/470.0;
}
image->columns=BitmapHeader2.Width;
image->rows=BitmapHeader2.Height;
bpp=BitmapHeader2.Depth;
UnpackRaster:
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
break;
if ((image->colors == 0) && (bpp <= 16))
{
image->colors=one << bpp;
if (!AcquireImageColormap(image,image->colors,exception))
{
NoMemory:
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
/* printf("Load default colormap \n"); */
for (i=0; (i < (int) image->colors) && (i < 256); i++)
{
image->colormap[i].red=ScaleCharToQuantum(WPG1_Palette[i].Red);
image->colormap[i].green=ScaleCharToQuantum(WPG1_Palette[i].Green);
image->colormap[i].blue=ScaleCharToQuantum(WPG1_Palette[i].Blue);
}
}
else
{
if (bpp < 24)
if ( (image->colors < (one << bpp)) && (bpp != 24) )
image->colormap=(PixelInfo *) ResizeQuantumMemory(
image->colormap,(size_t) (one << bpp),
sizeof(*image->colormap));
}
if (bpp == 1)
{
if(image->colormap[0].red==0 &&
image->colormap[0].green==0 &&
image->colormap[0].blue==0 &&
image->colormap[1].red==0 &&
image->colormap[1].green==0 &&
image->colormap[1].blue==0)
{ /* fix crippled monochrome palette */
image->colormap[1].red =
image->colormap[1].green =
image->colormap[1].blue = QuantumRange;
}
}
if(UnpackWPGRaster(image,bpp,exception) < 0)
/* The raster cannot be unpacked */
{
DecompressionFailed:
ThrowReaderException(CoderError,"UnableToDecompressImage");
}
if(Rec.RecType==0x14 && BitmapHeader2.RotAngle!=0 && !image_info->ping)
{
/* flop command */
if(BitmapHeader2.RotAngle & 0x8000)
{
Image
*flop_image;
flop_image = FlopImage(image, exception);
if (flop_image != (Image *) NULL) {
DuplicateBlob(flop_image,image);
ReplaceImageInList(&image,flop_image);
}
}
/* flip command */
if(BitmapHeader2.RotAngle & 0x2000)
{
Image
*flip_image;
flip_image = FlipImage(image, exception);
if (flip_image != (Image *) NULL) {
DuplicateBlob(flip_image,image);
ReplaceImageInList(&image,flip_image);
}
}
/* rotate command */
if(BitmapHeader2.RotAngle & 0x0FFF)
{
Image
*rotate_image;
rotate_image=RotateImage(image,(BitmapHeader2.RotAngle &
0x0FFF), exception);
if (rotate_image != (Image *) NULL) {
DuplicateBlob(rotate_image,image);
ReplaceImageInList(&image,rotate_image);
}
}
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
image->depth=8;
if (image->next == (Image *) NULL)
goto Finish;
image=SyncNextImageInList(image);
image->columns=image->rows=1;
image->colors=0;
break;
case 0x1B: /* Postscript l2 */
if(Rec.RecordLength>0x3C)
image=ExtractPostscript(image,image_info,
TellBlob(image)+0x3C, /* skip PS l2 header in the wpg */
(ssize_t) Rec.RecordLength-0x3C,exception);
break;
}
}
break;
case 2: /* WPG level 2 */
(void) memset(CTM,0,sizeof(CTM));
StartWPG.PosSizePrecision = 0;
while(!EOFBlob(image)) /* object parser loop */
{
(void) SeekBlob(image,Header.DataOffset,SEEK_SET);
if(EOFBlob(image))
break;
Rec2.Class=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rec2.RecType=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rd_WP_DWORD(image,&Rec2.Extension);
Rd_WP_DWORD(image,&Rec2.RecordLength);
if(EOFBlob(image))
break;
Header.DataOffset=TellBlob(image)+Rec2.RecordLength;
switch(Rec2.RecType)
{
case 1:
StartWPG.HorizontalUnits=ReadBlobLSBShort(image);
StartWPG.VerticalUnits=ReadBlobLSBShort(image);
StartWPG.PosSizePrecision=ReadBlobByte(image);
break;
case 0x0C: /* Color palette */
WPG_Palette.StartIndex=ReadBlobLSBShort(image);
WPG_Palette.NumOfEntries=ReadBlobLSBShort(image);
if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) >
(Rec2.RecordLength-2-2) / 3)
ThrowReaderException(CorruptImageError,"InvalidColormapIndex");
image->colors=WPG_Palette.NumOfEntries;
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
for (i=WPG_Palette.StartIndex;
i < (int)WPG_Palette.NumOfEntries; i++)
{
image->colormap[i].red=ScaleCharToQuantum((char)
ReadBlobByte(image));
image->colormap[i].green=ScaleCharToQuantum((char)
ReadBlobByte(image));
image->colormap[i].blue=ScaleCharToQuantum((char)
ReadBlobByte(image));
(void) ReadBlobByte(image); /*Opacity??*/
}
break;
case 0x0E:
Bitmap2Header1.Width=ReadBlobLSBShort(image);
Bitmap2Header1.Height=ReadBlobLSBShort(image);
if ((Bitmap2Header1.Width == 0) || (Bitmap2Header1.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
Bitmap2Header1.Depth=ReadBlobByte(image);
Bitmap2Header1.Compression=ReadBlobByte(image);
if(Bitmap2Header1.Compression > 1)
continue; /*Unknown compression method */
switch(Bitmap2Header1.Depth)
{
case 1:
bpp=1;
break;
case 2:
bpp=2;
break;
case 3:
bpp=4;
break;
case 4:
bpp=8;
break;
case 8:
bpp=24;
break;
default:
continue; /*Ignore raster with unknown depth*/
}
image->columns=Bitmap2Header1.Width;
image->rows=Bitmap2Header1.Height;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
break;
if ((image->colors == 0) && (bpp != 24))
{
image->colors=one << bpp;
if (!AcquireImageColormap(image,image->colors,exception))
goto NoMemory;
}
else
{
if(bpp < 24)
if( image->colors<(one << bpp) && bpp!=24 )
image->colormap=(PixelInfo *) ResizeQuantumMemory(
image->colormap,(size_t) (one << bpp),
sizeof(*image->colormap));
}
switch(Bitmap2Header1.Compression)
{
case 0: /*Uncompressed raster*/
{
ldblk=(ssize_t) ((bpp*image->columns+7)/8);
BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t)
ldblk+1,sizeof(*BImgBuff));
if (BImgBuff == (unsigned char *) NULL)
goto NoMemory;
for(i=0; i< (ssize_t) image->rows; i++)
{
(void) ReadBlob(image,ldblk,BImgBuff);
InsertRow(image,BImgBuff,i,bpp,exception);
}
if(BImgBuff)
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);
break;
}
case 1: /*RLE for WPG2 */
{
if( UnpackWPG2Raster(image,bpp,exception) < 0)
goto DecompressionFailed;
break;
}
}
if(CTM[0][0]<0 && !image_info->ping)
{ /*?? RotAngle=360-RotAngle;*/
Image
*flop_image;
flop_image = FlopImage(image, exception);
if (flop_image != (Image *) NULL) {
DuplicateBlob(flop_image,image);
ReplaceImageInList(&image,flop_image);
}
/* Try to change CTM according to Flip - I am not sure, must be checked.
Tx(0,0)=-1; Tx(1,0)=0; Tx(2,0)=0;
Tx(0,1)= 0; Tx(1,1)=1; Tx(2,1)=0;
Tx(0,2)=(WPG._2Rect.X_ur+WPG._2Rect.X_ll);
Tx(1,2)=0; Tx(2,2)=1; */
}
if(CTM[1][1]<0 && !image_info->ping)
{ /*?? RotAngle=360-RotAngle;*/
Image
*flip_image;
flip_image = FlipImage(image, exception);
if (flip_image != (Image *) NULL) {
DuplicateBlob(flip_image,image);
ReplaceImageInList(&image,flip_image);
}
/* Try to change CTM according to Flip - I am not sure, must be checked.
float_matrix Tx(3,3);
Tx(0,0)= 1; Tx(1,0)= 0; Tx(2,0)=0;
Tx(0,1)= 0; Tx(1,1)=-1; Tx(2,1)=0;
Tx(0,2)= 0; Tx(1,2)=(WPG._2Rect.Y_ur+WPG._2Rect.Y_ll);
Tx(2,2)=1; */
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
image->depth=8;
if (image->next == (Image *) NULL)
goto Finish;
image=SyncNextImageInList(image);
image->columns=image->rows=1;
image->colors=0;
break;
case 0x12: /* Postscript WPG2*/
i=ReadBlobLSBShort(image);
if(Rec2.RecordLength > (unsigned int) i)
image=ExtractPostscript(image,image_info,
TellBlob(image)+i, /*skip PS header in the wpg2*/
(ssize_t) (Rec2.RecordLength-i-2),exception);
break;
case 0x1B: /*bitmap rectangle*/
WPG2Flags = LoadWPG2Flags(image,StartWPG.PosSizePrecision,NULL,&CTM);
(void) WPG2Flags;
break;
}
}
break;
default:
{
ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported");
}
}
Finish:
(void) CloseBlob(image);
{
Image
*p;
ssize_t
scene=0;
/*
Rewind list, removing any empty images while rewinding.
*/
p=image;
image=NULL;
while (p != (Image *) NULL)
{
Image *tmp=p;
if ((p->rows == 0) || (p->columns == 0)) {
p=p->previous;
DeleteImageFromList(&tmp);
} else {
image=p;
p=p->previous;
}
}
/*
Fix scene numbers.
*/
for (p=image; p != (Image *) NULL; p=p->next)
p->scene=(size_t) scene++;
}
if (image == (Image *) NULL)
ThrowReaderException(CorruptImageError,
"ImageFileDoesNotContainAnyImageData");
return(image);
}
|
CWE-119
| 180,846 | 2,398 |
159163992560882621855337767657661558656
| null | null | null |
linux
|
1c0edc3633b56000e18d82fc241e3995ca18a69e
| 1 |
int usb_get_bos_descriptor(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
struct usb_bos_descriptor *bos;
struct usb_dev_cap_header *cap;
unsigned char *buffer;
int length, total_len, num, i;
int ret;
bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
if (!bos)
return -ENOMEM;
/* Get BOS descriptor */
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE);
if (ret < USB_DT_BOS_SIZE) {
dev_err(ddev, "unable to get BOS descriptor\n");
if (ret >= 0)
ret = -ENOMSG;
kfree(bos);
return ret;
}
length = bos->bLength;
total_len = le16_to_cpu(bos->wTotalLength);
num = bos->bNumDeviceCaps;
kfree(bos);
if (total_len < length)
return -EINVAL;
dev->bos = kzalloc(sizeof(struct usb_host_bos), GFP_KERNEL);
if (!dev->bos)
return -ENOMEM;
/* Now let's get the whole BOS descriptor set */
buffer = kzalloc(total_len, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto err;
}
dev->bos->desc = (struct usb_bos_descriptor *)buffer;
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len);
if (ret < total_len) {
dev_err(ddev, "unable to get BOS descriptor set\n");
if (ret >= 0)
ret = -ENOMSG;
goto err;
}
total_len -= length;
for (i = 0; i < num; i++) {
buffer += length;
cap = (struct usb_dev_cap_header *)buffer;
length = cap->bLength;
if (total_len < length)
break;
total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
dev_warn(ddev, "descriptor type invalid, skip\n");
continue;
}
switch (cap->bDevCapabilityType) {
case USB_CAP_TYPE_WIRELESS_USB:
/* Wireless USB cap descriptor is handled by wusb */
break;
case USB_CAP_TYPE_EXT:
dev->bos->ext_cap =
(struct usb_ext_cap_descriptor *)buffer;
break;
case USB_SS_CAP_TYPE:
dev->bos->ss_cap =
(struct usb_ss_cap_descriptor *)buffer;
break;
case USB_SSP_CAP_TYPE:
dev->bos->ssp_cap =
(struct usb_ssp_cap_descriptor *)buffer;
break;
case CONTAINER_ID_TYPE:
dev->bos->ss_id =
(struct usb_ss_container_id_descriptor *)buffer;
break;
case USB_PTM_CAP_TYPE:
dev->bos->ptm_cap =
(struct usb_ptm_cap_descriptor *)buffer;
default:
break;
}
}
return 0;
err:
usb_release_bos_descriptor(dev);
return ret;
}
|
CWE-125
| 180,847 | 2,399 |
326295248605660377066265869143708055629
| null | null | null |
linux
|
2e1c42391ff2556387b3cb6308b24f6f65619feb
| 1 |
int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
struct usb_interface *intf,
u8 *buffer,
int buflen)
{
/* duplicates are ignored */
struct usb_cdc_union_desc *union_header = NULL;
/* duplicates are not tolerated */
struct usb_cdc_header_desc *header = NULL;
struct usb_cdc_ether_desc *ether = NULL;
struct usb_cdc_mdlm_detail_desc *detail = NULL;
struct usb_cdc_mdlm_desc *desc = NULL;
unsigned int elength;
int cnt = 0;
memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
hdr->phonet_magic_present = false;
while (buflen > 0) {
elength = buffer[0];
if (!elength) {
dev_err(&intf->dev, "skipping garbage byte\n");
elength = 1;
goto next_desc;
}
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
switch (buffer[2]) {
case USB_CDC_UNION_TYPE: /* we've found it */
if (elength < sizeof(struct usb_cdc_union_desc))
goto next_desc;
if (union_header) {
dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
goto next_desc;
}
union_header = (struct usb_cdc_union_desc *)buffer;
break;
case USB_CDC_COUNTRY_TYPE:
if (elength < sizeof(struct usb_cdc_country_functional_desc))
goto next_desc;
hdr->usb_cdc_country_functional_desc =
(struct usb_cdc_country_functional_desc *)buffer;
break;
case USB_CDC_HEADER_TYPE:
if (elength != sizeof(struct usb_cdc_header_desc))
goto next_desc;
if (header)
return -EINVAL;
header = (struct usb_cdc_header_desc *)buffer;
break;
case USB_CDC_ACM_TYPE:
if (elength < sizeof(struct usb_cdc_acm_descriptor))
goto next_desc;
hdr->usb_cdc_acm_descriptor =
(struct usb_cdc_acm_descriptor *)buffer;
break;
case USB_CDC_ETHERNET_TYPE:
if (elength != sizeof(struct usb_cdc_ether_desc))
goto next_desc;
if (ether)
return -EINVAL;
ether = (struct usb_cdc_ether_desc *)buffer;
break;
case USB_CDC_CALL_MANAGEMENT_TYPE:
if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
goto next_desc;
hdr->usb_cdc_call_mgmt_descriptor =
(struct usb_cdc_call_mgmt_descriptor *)buffer;
break;
case USB_CDC_DMM_TYPE:
if (elength < sizeof(struct usb_cdc_dmm_desc))
goto next_desc;
hdr->usb_cdc_dmm_desc =
(struct usb_cdc_dmm_desc *)buffer;
break;
case USB_CDC_MDLM_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_desc *))
goto next_desc;
if (desc)
return -EINVAL;
desc = (struct usb_cdc_mdlm_desc *)buffer;
break;
case USB_CDC_MDLM_DETAIL_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
goto next_desc;
if (detail)
return -EINVAL;
detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
break;
case USB_CDC_NCM_TYPE:
if (elength < sizeof(struct usb_cdc_ncm_desc))
goto next_desc;
hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
break;
case USB_CDC_MBIM_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_desc))
goto next_desc;
hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
break;
case USB_CDC_MBIM_EXTENDED_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
break;
hdr->usb_cdc_mbim_extended_desc =
(struct usb_cdc_mbim_extended_desc *)buffer;
break;
case CDC_PHONET_MAGIC_NUMBER:
hdr->phonet_magic_present = true;
break;
default:
/*
* there are LOTS more CDC descriptors that
* could legitimately be found here.
*/
dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
buffer[2], elength);
goto next_desc;
}
cnt++;
next_desc:
buflen -= elength;
buffer += elength;
}
hdr->usb_cdc_union_desc = union_header;
hdr->usb_cdc_header_desc = header;
hdr->usb_cdc_mdlm_detail_desc = detail;
hdr->usb_cdc_mdlm_desc = desc;
hdr->usb_cdc_ether_desc = ether;
return cnt;
}
|
CWE-119
| 180,848 | 2,400 |
126505036785122092950930144116403378139
| null | null | null |
linux
|
f043bfc98c193c284e2cd768fefabe18ac2fed9b
| 1 |
static int usbhid_parse(struct hid_device *hid)
{
struct usb_interface *intf = to_usb_interface(hid->dev.parent);
struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_device *dev = interface_to_usbdev (intf);
struct hid_descriptor *hdesc;
u32 quirks = 0;
unsigned int rsize = 0;
char *rdesc;
int ret, n;
quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
if (quirks & HID_QUIRK_IGNORE)
return -ENODEV;
/* Many keyboards and mice don't like to be polled for reports,
* so we will always set the HID_QUIRK_NOGET flag for them. */
if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT) {
if (interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD ||
interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE)
quirks |= HID_QUIRK_NOGET;
}
if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) &&
(!interface->desc.bNumEndpoints ||
usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) {
dbg_hid("class descriptor not present\n");
return -ENODEV;
}
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
for (n = 0; n < hdesc->bNumDescriptors; n++)
if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
dbg_hid("weird size of report descriptor (%u)\n", rsize);
return -EINVAL;
}
rdesc = kmalloc(rsize, GFP_KERNEL);
if (!rdesc)
return -ENOMEM;
hid_set_idle(dev, interface->desc.bInterfaceNumber, 0, 0);
ret = hid_get_class_descriptor(dev, interface->desc.bInterfaceNumber,
HID_DT_REPORT, rdesc, rsize);
if (ret < 0) {
dbg_hid("reading report descriptor failed\n");
kfree(rdesc);
goto err;
}
ret = hid_parse_report(hid, rdesc, rsize);
kfree(rdesc);
if (ret) {
dbg_hid("parsing report descriptor failed\n");
goto err;
}
hid->quirks |= quirks;
return 0;
err:
return ret;
}
|
CWE-125
| 180,849 | 2,401 |
185715477481380405991722673664044798446
| null | null | null |
linux
|
bd7a3fe770ebd8391d1c7d072ff88e9e76d063eb
| 1 |
static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
struct usb_host_config *config, unsigned char *buffer, int size)
{
struct device *ddev = &dev->dev;
unsigned char *buffer0 = buffer;
int cfgno;
int nintf, nintf_orig;
int i, j, n;
struct usb_interface_cache *intfc;
unsigned char *buffer2;
int size2;
struct usb_descriptor_header *header;
int len, retval;
u8 inums[USB_MAXINTERFACES], nalts[USB_MAXINTERFACES];
unsigned iad_num = 0;
memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
if (config->desc.bDescriptorType != USB_DT_CONFIG ||
config->desc.bLength < USB_DT_CONFIG_SIZE ||
config->desc.bLength > size) {
dev_err(ddev, "invalid descriptor for config index %d: "
"type = 0x%X, length = %d\n", cfgidx,
config->desc.bDescriptorType, config->desc.bLength);
return -EINVAL;
}
cfgno = config->desc.bConfigurationValue;
buffer += config->desc.bLength;
size -= config->desc.bLength;
nintf = nintf_orig = config->desc.bNumInterfaces;
if (nintf > USB_MAXINTERFACES) {
dev_warn(ddev, "config %d has too many interfaces: %d, "
"using maximum allowed: %d\n",
cfgno, nintf, USB_MAXINTERFACES);
nintf = USB_MAXINTERFACES;
}
/* Go through the descriptors, checking their length and counting the
* number of altsettings for each interface */
n = 0;
for ((buffer2 = buffer, size2 = size);
size2 > 0;
(buffer2 += header->bLength, size2 -= header->bLength)) {
if (size2 < sizeof(struct usb_descriptor_header)) {
dev_warn(ddev, "config %d descriptor has %d excess "
"byte%s, ignoring\n",
cfgno, size2, plural(size2));
break;
}
header = (struct usb_descriptor_header *) buffer2;
if ((header->bLength > size2) || (header->bLength < 2)) {
dev_warn(ddev, "config %d has an invalid descriptor "
"of length %d, skipping remainder of the config\n",
cfgno, header->bLength);
break;
}
if (header->bDescriptorType == USB_DT_INTERFACE) {
struct usb_interface_descriptor *d;
int inum;
d = (struct usb_interface_descriptor *) header;
if (d->bLength < USB_DT_INTERFACE_SIZE) {
dev_warn(ddev, "config %d has an invalid "
"interface descriptor of length %d, "
"skipping\n", cfgno, d->bLength);
continue;
}
inum = d->bInterfaceNumber;
if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) &&
n >= nintf_orig) {
dev_warn(ddev, "config %d has more interface "
"descriptors, than it declares in "
"bNumInterfaces, ignoring interface "
"number: %d\n", cfgno, inum);
continue;
}
if (inum >= nintf_orig)
dev_warn(ddev, "config %d has an invalid "
"interface number: %d but max is %d\n",
cfgno, inum, nintf_orig - 1);
/* Have we already encountered this interface?
* Count its altsettings */
for (i = 0; i < n; ++i) {
if (inums[i] == inum)
break;
}
if (i < n) {
if (nalts[i] < 255)
++nalts[i];
} else if (n < USB_MAXINTERFACES) {
inums[n] = inum;
nalts[n] = 1;
++n;
}
} else if (header->bDescriptorType ==
USB_DT_INTERFACE_ASSOCIATION) {
if (iad_num == USB_MAXIADS) {
dev_warn(ddev, "found more Interface "
"Association Descriptors "
"than allocated for in "
"configuration %d\n", cfgno);
} else {
config->intf_assoc[iad_num] =
(struct usb_interface_assoc_descriptor
*)header;
iad_num++;
}
} else if (header->bDescriptorType == USB_DT_DEVICE ||
header->bDescriptorType == USB_DT_CONFIG)
dev_warn(ddev, "config %d contains an unexpected "
"descriptor of type 0x%X, skipping\n",
cfgno, header->bDescriptorType);
} /* for ((buffer2 = buffer, size2 = size); ...) */
size = buffer2 - buffer;
config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0);
if (n != nintf)
dev_warn(ddev, "config %d has %d interface%s, different from "
"the descriptor's value: %d\n",
cfgno, n, plural(n), nintf_orig);
else if (n == 0)
dev_warn(ddev, "config %d has no interfaces?\n", cfgno);
config->desc.bNumInterfaces = nintf = n;
/* Check for missing interface numbers */
for (i = 0; i < nintf; ++i) {
for (j = 0; j < nintf; ++j) {
if (inums[j] == i)
break;
}
if (j >= nintf)
dev_warn(ddev, "config %d has no interface number "
"%d\n", cfgno, i);
}
/* Allocate the usb_interface_caches and altsetting arrays */
for (i = 0; i < nintf; ++i) {
j = nalts[i];
if (j > USB_MAXALTSETTING) {
dev_warn(ddev, "too many alternate settings for "
"config %d interface %d: %d, "
"using maximum allowed: %d\n",
cfgno, inums[i], j, USB_MAXALTSETTING);
nalts[i] = j = USB_MAXALTSETTING;
}
len = sizeof(*intfc) + sizeof(struct usb_host_interface) * j;
config->intf_cache[i] = intfc = kzalloc(len, GFP_KERNEL);
if (!intfc)
return -ENOMEM;
kref_init(&intfc->ref);
}
/* FIXME: parse the BOS descriptor */
/* Skip over any Class Specific or Vendor Specific descriptors;
* find the first interface descriptor */
config->extra = buffer;
i = find_next_descriptor(buffer, size, USB_DT_INTERFACE,
USB_DT_INTERFACE, &n);
config->extralen = i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
n, plural(n), "configuration");
buffer += i;
size -= i;
/* Parse all the interface/altsetting descriptors */
while (size > 0) {
retval = usb_parse_interface(ddev, cfgno, config,
buffer, size, inums, nalts);
if (retval < 0)
return retval;
buffer += retval;
size -= retval;
}
/* Check for missing altsettings */
for (i = 0; i < nintf; ++i) {
intfc = config->intf_cache[i];
for (j = 0; j < intfc->num_altsetting; ++j) {
for (n = 0; n < intfc->num_altsetting; ++n) {
if (intfc->altsetting[n].desc.
bAlternateSetting == j)
break;
}
if (n >= intfc->num_altsetting)
dev_warn(ddev, "config %d interface %d has no "
"altsetting %d\n", cfgno, inums[i], j);
}
}
return 0;
}
|
CWE-119
| 180,851 | 2,402 |
240764745726812281413181367713856263063
| null | null | null |
linux
|
bfc81a8bc18e3c4ba0cbaa7666ff76be2f998991
| 1 |
static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
{
struct usb_device *dev = chip->dev;
struct usb_host_interface *host_iface;
struct usb_interface_descriptor *altsd;
void *control_header;
int i, protocol;
/* find audiocontrol interface */
host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
control_header = snd_usb_find_csint_desc(host_iface->extra,
host_iface->extralen,
NULL, UAC_HEADER);
altsd = get_iface_desc(host_iface);
protocol = altsd->bInterfaceProtocol;
if (!control_header) {
dev_err(&dev->dev, "cannot find UAC_HEADER\n");
return -EINVAL;
}
switch (protocol) {
default:
dev_warn(&dev->dev,
"unknown interface protocol %#02x, assuming v1\n",
protocol);
/* fall through */
case UAC_VERSION_1: {
struct uac1_ac_header_descriptor *h1 = control_header;
if (!h1->bInCollection) {
dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
return -EINVAL;
}
if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n");
return -EINVAL;
}
for (i = 0; i < h1->bInCollection; i++)
snd_usb_create_stream(chip, ctrlif, h1->baInterfaceNr[i]);
break;
}
case UAC_VERSION_2: {
struct usb_interface_assoc_descriptor *assoc =
usb_ifnum_to_if(dev, ctrlif)->intf_assoc;
if (!assoc) {
/*
* Firmware writers cannot count to three. So to find
* the IAD on the NuForce UDH-100, also check the next
* interface.
*/
struct usb_interface *iface =
usb_ifnum_to_if(dev, ctrlif + 1);
if (iface &&
iface->intf_assoc &&
iface->intf_assoc->bFunctionClass == USB_CLASS_AUDIO &&
iface->intf_assoc->bFunctionProtocol == UAC_VERSION_2)
assoc = iface->intf_assoc;
}
if (!assoc) {
dev_err(&dev->dev, "Audio class v2 interfaces need an interface association\n");
return -EINVAL;
}
for (i = 0; i < assoc->bInterfaceCount; i++) {
int intf = assoc->bFirstInterface + i;
if (intf != ctrlif)
snd_usb_create_stream(chip, ctrlif, intf);
}
break;
}
}
return 0;
}
|
CWE-125
| 180,853 | 2,403 |
17984538902999569946368924950222980008
| null | null | null |
linux
|
fc27fe7e8deef2f37cba3f2be2d52b6ca5eb9d57
| 1 |
static int snd_seq_device_dev_free(struct snd_device *device)
{
struct snd_seq_device *dev = device->device_data;
put_device(&dev->dev);
return 0;
}
|
CWE-416
| 180,854 | 2,404 |
27591105700120663554036974683684216243
| null | null | null |
linux
|
299d7572e46f98534033a9e65973f13ad1ce9047
| 1 |
static int usb_console_setup(struct console *co, char *options)
{
struct usbcons_info *info = &usbcons_info;
int baud = 9600;
int bits = 8;
int parity = 'n';
int doflow = 0;
int cflag = CREAD | HUPCL | CLOCAL;
char *s;
struct usb_serial *serial;
struct usb_serial_port *port;
int retval;
struct tty_struct *tty = NULL;
struct ktermios dummy;
if (options) {
baud = simple_strtoul(options, NULL, 10);
s = options;
while (*s >= '0' && *s <= '9')
s++;
if (*s)
parity = *s++;
if (*s)
bits = *s++ - '0';
if (*s)
doflow = (*s++ == 'r');
}
/* Sane default */
if (baud == 0)
baud = 9600;
switch (bits) {
case 7:
cflag |= CS7;
break;
default:
case 8:
cflag |= CS8;
break;
}
switch (parity) {
case 'o': case 'O':
cflag |= PARODD;
break;
case 'e': case 'E':
cflag |= PARENB;
break;
}
co->cflag = cflag;
/*
* no need to check the index here: if the index is wrong, console
* code won't call us
*/
port = usb_serial_port_get_by_minor(co->index);
if (port == NULL) {
/* no device is connected yet, sorry :( */
pr_err("No USB device connected to ttyUSB%i\n", co->index);
return -ENODEV;
}
serial = port->serial;
retval = usb_autopm_get_interface(serial->interface);
if (retval)
goto error_get_interface;
tty_port_tty_set(&port->port, NULL);
info->port = port;
++port->port.count;
if (!tty_port_initialized(&port->port)) {
if (serial->type->set_termios) {
/*
* allocate a fake tty so the driver can initialize
* the termios structure, then later call set_termios to
* configure according to command line arguments
*/
tty = kzalloc(sizeof(*tty), GFP_KERNEL);
if (!tty) {
retval = -ENOMEM;
goto reset_open_count;
}
kref_init(&tty->kref);
tty->driver = usb_serial_tty_driver;
tty->index = co->index;
init_ldsem(&tty->ldisc_sem);
spin_lock_init(&tty->files_lock);
INIT_LIST_HEAD(&tty->tty_files);
kref_get(&tty->driver->kref);
__module_get(tty->driver->owner);
tty->ops = &usb_console_fake_tty_ops;
tty_init_termios(tty);
tty_port_tty_set(&port->port, tty);
}
/* only call the device specific open if this
* is the first time the port is opened */
retval = serial->type->open(NULL, port);
if (retval) {
dev_err(&port->dev, "could not open USB console port\n");
goto fail;
}
if (serial->type->set_termios) {
tty->termios.c_cflag = cflag;
tty_termios_encode_baud_rate(&tty->termios, baud, baud);
memset(&dummy, 0, sizeof(struct ktermios));
serial->type->set_termios(tty, port, &dummy);
tty_port_tty_set(&port->port, NULL);
tty_kref_put(tty);
}
tty_port_set_initialized(&port->port, 1);
}
/* Now that any required fake tty operations are completed restore
* the tty port count */
--port->port.count;
/* The console is special in terms of closing the device so
* indicate this port is now acting as a system console. */
port->port.console = 1;
mutex_unlock(&serial->disc_mutex);
return retval;
fail:
tty_port_tty_set(&port->port, NULL);
tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
usb_autopm_put_interface(serial->interface);
error_get_interface:
usb_serial_put(serial);
mutex_unlock(&serial->disc_mutex);
return retval;
}
|
CWE-416
| 180,859 | 2,409 |
315797150859318089009247205537515770557
| null | null | null |
radare2
|
62e39f34b2705131a2d08aff0c2e542c6a52cf0e
| 1 |
static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) {
const char *section_name = "";
const char *link_section_name = "";
char *end = NULL;
Elf_(Shdr) *link_shdr = NULL;
ut8 dfs[sizeof (Elf_(Verdef))] = {0};
Sdb *sdb;
int cnt, i;
if (shdr->sh_link > bin->ehdr.e_shnum) {
return false;
}
link_shdr = &bin->shdr[shdr->sh_link];
if (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) {
return false;
}
Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char));
if (!defs) {
return false;
}
if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) {
section_name = &bin->shstrtab[shdr->sh_name];
}
if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) {
link_section_name = &bin->shstrtab[link_shdr->sh_name];
}
if (!defs) {
bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n");
return NULL;
}
sdb = sdb_new0 ();
end = (char *)defs + shdr->sh_size;
sdb_set (sdb, "section_name", section_name, 0);
sdb_num_set (sdb, "entries", shdr->sh_info, 0);
sdb_num_set (sdb, "addr", shdr->sh_addr, 0);
sdb_num_set (sdb, "offset", shdr->sh_offset, 0);
sdb_num_set (sdb, "link", shdr->sh_link, 0);
sdb_set (sdb, "link_section_name", link_section_name, 0);
for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) {
Sdb *sdb_verdef = sdb_new0 ();
char *vstart = ((char*)defs) + i;
char key[32] = {0};
Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart;
Elf_(Verdaux) aux = {0};
int j = 0;
int isum = 0;
r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef)));
verdef->vd_version = READ16 (dfs, j)
verdef->vd_flags = READ16 (dfs, j)
verdef->vd_ndx = READ16 (dfs, j)
verdef->vd_cnt = READ16 (dfs, j)
verdef->vd_hash = READ32 (dfs, j)
verdef->vd_aux = READ32 (dfs, j)
verdef->vd_next = READ32 (dfs, j)
int vdaux = verdef->vd_aux;
if (vdaux < 1) {
sdb_free (sdb_verdef);
goto out_error;
}
vstart += vdaux;
if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
goto out_error;
}
j = 0;
aux.vda_name = READ32 (vstart, j)
aux.vda_next = READ32 (vstart, j)
isum = i + verdef->vd_aux;
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
goto out_error;
}
sdb_num_set (sdb_verdef, "idx", i, 0);
sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0);
sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0);
sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0);
sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0);
sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0);
for (j = 1; j < verdef->vd_cnt; ++j) {
int k;
Sdb *sdb_parent = sdb_new0 ();
isum += aux.vda_next;
vstart += aux.vda_next;
if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
k = 0;
aux.vda_name = READ32 (vstart, k)
aux.vda_next = READ32 (vstart, k)
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
sdb_num_set (sdb_parent, "idx", isum, 0);
sdb_num_set (sdb_parent, "parent", j, 0);
sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0);
snprintf (key, sizeof (key), "parent%d", j - 1);
sdb_ns_set (sdb_verdef, key, sdb_parent);
}
snprintf (key, sizeof (key), "verdef%d", cnt);
sdb_ns_set (sdb, key, sdb_verdef);
if (!verdef->vd_next) {
sdb_free (sdb_verdef);
goto out_error;
}
if ((st32)verdef->vd_next < 1) {
eprintf ("Warning: Invalid vd_next in the ELF version\n");
break;
}
i += verdef->vd_next;
}
free (defs);
return sdb;
out_error:
free (defs);
sdb_free (sdb);
return NULL;
}
|
CWE-476
| 180,860 | 2,410 |
256945349868002668483144656008195023828
| null | null | null |
radare2
|
0b973e28166636e0ff1fad80baa0385c9c09c53a
| 1 |
static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) {
const char *section_name = "";
const char *link_section_name = "";
char *end = NULL;
Elf_(Shdr) *link_shdr = NULL;
ut8 dfs[sizeof (Elf_(Verdef))] = {0};
Sdb *sdb;
int cnt, i;
if (shdr->sh_link > bin->ehdr.e_shnum) {
return false;
}
link_shdr = &bin->shdr[shdr->sh_link];
if (shdr->sh_size < 1) {
return false;
}
Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char));
if (!defs) {
return false;
}
if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) {
section_name = &bin->shstrtab[shdr->sh_name];
}
if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) {
link_section_name = &bin->shstrtab[link_shdr->sh_name];
}
if (!defs) {
bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n");
return NULL;
}
sdb = sdb_new0 ();
end = (char *)defs + shdr->sh_size;
sdb_set (sdb, "section_name", section_name, 0);
sdb_num_set (sdb, "entries", shdr->sh_info, 0);
sdb_num_set (sdb, "addr", shdr->sh_addr, 0);
sdb_num_set (sdb, "offset", shdr->sh_offset, 0);
sdb_num_set (sdb, "link", shdr->sh_link, 0);
sdb_set (sdb, "link_section_name", link_section_name, 0);
for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) {
Sdb *sdb_verdef = sdb_new0 ();
char *vstart = ((char*)defs) + i;
char key[32] = {0};
Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart;
Elf_(Verdaux) aux = {0};
int j = 0;
int isum = 0;
r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef)));
verdef->vd_version = READ16 (dfs, j)
verdef->vd_flags = READ16 (dfs, j)
verdef->vd_ndx = READ16 (dfs, j)
verdef->vd_cnt = READ16 (dfs, j)
verdef->vd_hash = READ32 (dfs, j)
verdef->vd_aux = READ32 (dfs, j)
verdef->vd_next = READ32 (dfs, j)
int vdaux = verdef->vd_aux;
if (vdaux < 1) {
sdb_free (sdb_verdef);
goto out_error;
}
vstart += vdaux;
if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
goto out_error;
}
j = 0;
aux.vda_name = READ32 (vstart, j)
aux.vda_next = READ32 (vstart, j)
isum = i + verdef->vd_aux;
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
goto out_error;
}
sdb_num_set (sdb_verdef, "idx", i, 0);
sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0);
sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0);
sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0);
sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0);
sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0);
for (j = 1; j < verdef->vd_cnt; ++j) {
int k;
Sdb *sdb_parent = sdb_new0 ();
isum += aux.vda_next;
vstart += aux.vda_next;
if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
k = 0;
aux.vda_name = READ32 (vstart, k)
aux.vda_next = READ32 (vstart, k)
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
sdb_num_set (sdb_parent, "idx", isum, 0);
sdb_num_set (sdb_parent, "parent", j, 0);
sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0);
snprintf (key, sizeof (key), "parent%d", j - 1);
sdb_ns_set (sdb_verdef, key, sdb_parent);
}
snprintf (key, sizeof (key), "verdef%d", cnt);
sdb_ns_set (sdb, key, sdb_verdef);
if (!verdef->vd_next) {
sdb_free (sdb_verdef);
goto out_error;
}
if ((st32)verdef->vd_next < 1) {
eprintf ("Warning: Invalid vd_next in the ELF version\n");
break;
}
i += verdef->vd_next;
}
free (defs);
return sdb;
out_error:
free (defs);
sdb_free (sdb);
return NULL;
}
|
CWE-119
| 180,861 | 2,411 |
14868589566797490319741247466536626347
| null | null | null |
radare2
|
0b973e28166636e0ff1fad80baa0385c9c09c53a
| 1 |
static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) {
ut8 *end, *need = NULL;
const char *section_name = "";
Elf_(Shdr) *link_shdr = NULL;
const char *link_section_name = "";
Sdb *sdb_vernaux = NULL;
Sdb *sdb_version = NULL;
Sdb *sdb = NULL;
int i, cnt;
if (!bin || !bin->dynstr) {
return NULL;
}
if (shdr->sh_link > bin->ehdr.e_shnum) {
return NULL;
}
if (shdr->sh_size < 1) {
return NULL;
}
sdb = sdb_new0 ();
if (!sdb) {
return NULL;
}
link_shdr = &bin->shdr[shdr->sh_link];
if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) {
section_name = &bin->shstrtab[shdr->sh_name];
}
if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) {
link_section_name = &bin->shstrtab[link_shdr->sh_name];
}
if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) {
bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n");
goto beach;
}
end = need + shdr->sh_size;
sdb_set (sdb, "section_name", section_name, 0);
sdb_num_set (sdb, "num_entries", shdr->sh_info, 0);
sdb_num_set (sdb, "addr", shdr->sh_addr, 0);
sdb_num_set (sdb, "offset", shdr->sh_offset, 0);
sdb_num_set (sdb, "link", shdr->sh_link, 0);
sdb_set (sdb, "link_section_name", link_section_name, 0);
if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) {
goto beach;
}
if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) {
goto beach;
}
i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size);
if (i < 0)
goto beach;
for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) {
int j, isum;
ut8 *vstart = need + i;
Elf_(Verneed) vvn = {0};
if (vstart + sizeof (Elf_(Verneed)) > end) {
goto beach;
}
Elf_(Verneed) *entry = &vvn;
char key[32] = {0};
sdb_version = sdb_new0 ();
if (!sdb_version) {
goto beach;
}
j = 0;
vvn.vn_version = READ16 (vstart, j)
vvn.vn_cnt = READ16 (vstart, j)
vvn.vn_file = READ32 (vstart, j)
vvn.vn_aux = READ32 (vstart, j)
vvn.vn_next = READ32 (vstart, j)
sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0);
sdb_num_set (sdb_version, "idx", i, 0);
if (entry->vn_file > bin->dynstr_size) {
goto beach;
}
{
char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16);
sdb_set (sdb_version, "file_name", s, 0);
free (s);
}
sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0);
st32 vnaux = entry->vn_aux;
if (vnaux < 1) {
goto beach;
}
vstart += vnaux;
for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) {
int k;
Elf_(Vernaux) * aux = NULL;
Elf_(Vernaux) vaux = {0};
sdb_vernaux = sdb_new0 ();
if (!sdb_vernaux) {
goto beach;
}
aux = (Elf_(Vernaux)*)&vaux;
k = 0;
vaux.vna_hash = READ32 (vstart, k)
vaux.vna_flags = READ16 (vstart, k)
vaux.vna_other = READ16 (vstart, k)
vaux.vna_name = READ32 (vstart, k)
vaux.vna_next = READ32 (vstart, k)
if (aux->vna_name > bin->dynstr_size) {
goto beach;
}
sdb_num_set (sdb_vernaux, "idx", isum, 0);
if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) {
char name [16];
strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1);
name[sizeof(name)-1] = 0;
sdb_set (sdb_vernaux, "name", name, 0);
}
sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0);
sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0);
isum += aux->vna_next;
vstart += aux->vna_next;
snprintf (key, sizeof (key), "vernaux%d", j);
sdb_ns_set (sdb_version, key, sdb_vernaux);
}
if ((int)entry->vn_next < 0) {
bprintf ("Invalid vn_next\n");
break;
}
i += entry->vn_next;
snprintf (key, sizeof (key), "version%d", cnt );
sdb_ns_set (sdb, key, sdb_version);
if (!entry->vn_next) {
break;
}
}
free (need);
return sdb;
beach:
free (need);
sdb_free (sdb_vernaux);
sdb_free (sdb_version);
sdb_free (sdb);
return NULL;
}
|
CWE-119
| 180,862 | 2,412 |
239473085722377722115401106929450483795
| null | null | null |
radare2
|
44ded3ff35b8264f54b5a900cab32ec489d9e5b9
| 1 |
static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) {
const char *section_name = "";
const char *link_section_name = "";
char *end = NULL;
Elf_(Shdr) *link_shdr = NULL;
ut8 dfs[sizeof (Elf_(Verdef))] = {0};
Sdb *sdb;
int cnt, i;
if (shdr->sh_link > bin->ehdr.e_shnum) {
return false;
}
link_shdr = &bin->shdr[shdr->sh_link];
if (shdr->sh_size < 1) {
return false;
}
Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char));
if (!defs) {
return false;
}
if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) {
section_name = &bin->shstrtab[shdr->sh_name];
}
if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) {
link_section_name = &bin->shstrtab[link_shdr->sh_name];
}
if (!defs) {
bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n");
return NULL;
}
sdb = sdb_new0 ();
end = (char *)defs + shdr->sh_size;
sdb_set (sdb, "section_name", section_name, 0);
sdb_num_set (sdb, "entries", shdr->sh_info, 0);
sdb_num_set (sdb, "addr", shdr->sh_addr, 0);
sdb_num_set (sdb, "offset", shdr->sh_offset, 0);
sdb_num_set (sdb, "link", shdr->sh_link, 0);
sdb_set (sdb, "link_section_name", link_section_name, 0);
for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) {
Sdb *sdb_verdef = sdb_new0 ();
char *vstart = ((char*)defs) + i;
char key[32] = {0};
Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart;
Elf_(Verdaux) aux = {0};
int j = 0;
int isum = 0;
r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef)));
verdef->vd_version = READ16 (dfs, j)
verdef->vd_flags = READ16 (dfs, j)
verdef->vd_ndx = READ16 (dfs, j)
verdef->vd_cnt = READ16 (dfs, j)
verdef->vd_hash = READ32 (dfs, j)
verdef->vd_aux = READ32 (dfs, j)
verdef->vd_next = READ32 (dfs, j)
vstart += verdef->vd_aux;
if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
goto out_error;
}
j = 0;
aux.vda_name = READ32 (vstart, j)
aux.vda_next = READ32 (vstart, j)
isum = i + verdef->vd_aux;
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
goto out_error;
}
sdb_num_set (sdb_verdef, "idx", i, 0);
sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0);
sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0);
sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0);
sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0);
sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0);
for (j = 1; j < verdef->vd_cnt; ++j) {
int k;
Sdb *sdb_parent = sdb_new0 ();
isum += aux.vda_next;
vstart += aux.vda_next;
if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
k = 0;
aux.vda_name = READ32 (vstart, k)
aux.vda_next = READ32 (vstart, k)
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
sdb_num_set (sdb_parent, "idx", isum, 0);
sdb_num_set (sdb_parent, "parent", j, 0);
sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0);
snprintf (key, sizeof (key), "parent%d", j - 1);
sdb_ns_set (sdb_verdef, key, sdb_parent);
}
snprintf (key, sizeof (key), "verdef%d", cnt);
sdb_ns_set (sdb, key, sdb_verdef);
if (!verdef->vd_next) {
sdb_free (sdb_verdef);
goto out_error;
}
if ((st32)verdef->vd_next < 1) {
eprintf ("Warning: Invalid vd_next in the ELF version\n");
break;
}
i += verdef->vd_next;
}
free (defs);
return sdb;
out_error:
free (defs);
sdb_free (sdb);
return NULL;
}
|
CWE-125
| 180,883 | 2,428 |
289574171374133833673846841637084736705
| null | null | null |
shadowsocks-libev
|
c67d275803dc6ea22c558d06b1f7ba9f94cd8de3
| 1 |
build_config(char *prefix, struct server *server)
{
char *path = NULL;
int path_size = strlen(prefix) + strlen(server->port) + 20;
path = ss_malloc(path_size);
snprintf(path, path_size, "%s/.shadowsocks_%s.conf", prefix, server->port);
FILE *f = fopen(path, "w+");
if (f == NULL) {
if (verbose) {
LOGE("unable to open config file");
}
ss_free(path);
return;
}
fprintf(f, "{\n");
fprintf(f, "\"server_port\":%d,\n", atoi(server->port));
fprintf(f, "\"password\":\"%s\"", server->password);
if (server->fast_open[0]) fprintf(f, ",\n\"fast_open\": %s", server->fast_open);
if (server->mode) fprintf(f, ",\n\"mode\":\"%s\"", server->mode);
if (server->method) fprintf(f, ",\n\"method\":\"%s\"", server->method);
if (server->plugin) fprintf(f, ",\n\"plugin\":\"%s\"", server->plugin);
if (server->plugin_opts) fprintf(f, ",\n\"plugin_opts\":\"%s\"", server->plugin_opts);
fprintf(f, "\n}\n");
fclose(f);
ss_free(path);
}
|
CWE-78
| 180,885 | 2,429 |
329376205814019093954971990828971672620
| null | null | null |
shadowsocks-libev
|
c67d275803dc6ea22c558d06b1f7ba9f94cd8de3
| 1 |
construct_command_line(struct manager_ctx *manager, struct server *server)
{
static char cmd[BUF_SIZE];
char *method = manager->method;
int i;
build_config(working_dir, server);
if (server->method) method = server->method;
memset(cmd, 0, BUF_SIZE);
snprintf(cmd, BUF_SIZE,
"%s -m %s --manager-address %s -f %s/.shadowsocks_%s.pid -c %s/.shadowsocks_%s.conf",
executable, method, manager->manager_address,
working_dir, server->port, working_dir, server->port);
if (manager->acl != NULL) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " --acl %s", manager->acl);
}
if (manager->timeout != NULL) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " -t %s", manager->timeout);
}
#ifdef HAVE_SETRLIMIT
if (manager->nofile) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " -n %d", manager->nofile);
}
#endif
if (manager->user != NULL) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " -a %s", manager->user);
}
if (manager->verbose) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " -v");
}
if (server->mode == NULL && manager->mode == UDP_ONLY) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " -U");
}
if (server->mode == NULL && manager->mode == TCP_AND_UDP) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " -u");
}
if (server->fast_open[0] == 0 && manager->fast_open) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " --fast-open");
}
if (manager->ipv6first) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " -6");
}
if (manager->mtu) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " --mtu %d", manager->mtu);
}
if (server->plugin == NULL && manager->plugin) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " --plugin \"%s\"", manager->plugin);
}
if (server->plugin_opts == NULL && manager->plugin_opts) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " --plugin-opts \"%s\"", manager->plugin_opts);
}
for (i = 0; i < manager->nameserver_num; i++) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " -d %s", manager->nameservers[i]);
}
for (i = 0; i < manager->host_num; i++) {
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " -s %s", manager->hosts[i]);
}
{
int len = strlen(cmd);
snprintf(cmd + len, BUF_SIZE - len, " --reuse-port");
}
if (verbose) {
LOGI("cmd: %s", cmd);
}
return cmd;
}
|
CWE-78
| 180,886 | 2,430 |
56556845946959241723985189303974958310
| null | null | null |
src
|
a6981567e8e215acc1ef690c8dbb30f2d9b00a19
| 1 |
process_open(u_int32_t id)
{
u_int32_t pflags;
Attrib a;
char *name;
int r, handle, fd, flags, mode, status = SSH2_FX_FAILURE;
if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0 ||
(r = sshbuf_get_u32(iqueue, &pflags)) != 0 || /* portable flags */
(r = decode_attrib(iqueue, &a)) != 0)
fatal("%s: buffer error: %s", __func__, ssh_err(r));
debug3("request %u: open flags %d", id, pflags);
flags = flags_from_portable(pflags);
mode = (a.flags & SSH2_FILEXFER_ATTR_PERMISSIONS) ? a.perm : 0666;
logit("open \"%s\" flags %s mode 0%o",
name, string_from_portable(pflags), mode);
if (readonly &&
((flags & O_ACCMODE) == O_WRONLY ||
(flags & O_ACCMODE) == O_RDWR)) {
verbose("Refusing open request in read-only mode");
status = SSH2_FX_PERMISSION_DENIED;
} else {
fd = open(name, flags, mode);
if (fd < 0) {
status = errno_to_portable(errno);
} else {
handle = handle_new(HANDLE_FILE, name, fd, flags, NULL);
if (handle < 0) {
close(fd);
} else {
send_handle(id, handle);
status = SSH2_FX_OK;
}
}
}
if (status != SSH2_FX_OK)
send_status(id, status);
free(name);
}
|
CWE-269
| 180,887 | 2,431 |
7430065301267818646405159117640857338
| null | null | null |
linux
|
71bb99a02b32b4cc4265118e85f6035ca72923f0
| 1 |
int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
{
struct net_device *dev;
struct bnep_session *s, *ss;
u8 dst[ETH_ALEN], src[ETH_ALEN];
int err;
BT_DBG("");
baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
/* session struct allocated as private part of net_device */
dev = alloc_netdev(sizeof(struct bnep_session),
(*req->device) ? req->device : "bnep%d",
NET_NAME_UNKNOWN,
bnep_net_setup);
if (!dev)
return -ENOMEM;
down_write(&bnep_session_sem);
ss = __bnep_get_session(dst);
if (ss && ss->state == BT_CONNECTED) {
err = -EEXIST;
goto failed;
}
s = netdev_priv(dev);
/* This is rx header therefore addresses are swapped.
* ie. eh.h_dest is our local address. */
memcpy(s->eh.h_dest, &src, ETH_ALEN);
memcpy(s->eh.h_source, &dst, ETH_ALEN);
memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN);
s->dev = dev;
s->sock = sock;
s->role = req->role;
s->state = BT_CONNECTED;
s->msg.msg_flags = MSG_NOSIGNAL;
#ifdef CONFIG_BT_BNEP_MC_FILTER
/* Set default mc filter */
set_bit(bnep_mc_hash(dev->broadcast), (ulong *) &s->mc_filter);
#endif
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Set default protocol filter */
bnep_set_default_proto_filter(s);
#endif
SET_NETDEV_DEV(dev, bnep_get_device(s));
SET_NETDEV_DEVTYPE(dev, &bnep_type);
err = register_netdev(dev);
if (err)
goto failed;
__bnep_link_session(s);
__module_get(THIS_MODULE);
s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
if (IS_ERR(s->task)) {
/* Session thread start failed, gotta cleanup. */
module_put(THIS_MODULE);
unregister_netdev(dev);
__bnep_unlink_session(s);
err = PTR_ERR(s->task);
goto failed;
}
up_write(&bnep_session_sem);
strcpy(req->device, dev->name);
return 0;
failed:
up_write(&bnep_session_sem);
free_netdev(dev);
return err;
}
|
CWE-20
| 180,888 | 2,432 |
246459063774981644660060171161081083217
| null | null | null |
linux
|
4971613c1639d8e5f102c4e797c3bf8f83a5a69e
| 1 |
static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
__be16 proto)
{
struct packet_sock *po = pkt_sk(sk);
struct net_device *dev_curr;
__be16 proto_curr;
bool need_rehook;
struct net_device *dev = NULL;
int ret = 0;
bool unlisted = false;
if (po->fanout)
return -EINVAL;
lock_sock(sk);
spin_lock(&po->bind_lock);
rcu_read_lock();
if (name) {
dev = dev_get_by_name_rcu(sock_net(sk), name);
if (!dev) {
ret = -ENODEV;
goto out_unlock;
}
} else if (ifindex) {
dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
if (!dev) {
ret = -ENODEV;
goto out_unlock;
}
}
if (dev)
dev_hold(dev);
proto_curr = po->prot_hook.type;
dev_curr = po->prot_hook.dev;
need_rehook = proto_curr != proto || dev_curr != dev;
if (need_rehook) {
if (po->running) {
rcu_read_unlock();
__unregister_prot_hook(sk, true);
rcu_read_lock();
dev_curr = po->prot_hook.dev;
if (dev)
unlisted = !dev_get_by_index_rcu(sock_net(sk),
dev->ifindex);
}
po->num = proto;
po->prot_hook.type = proto;
if (unlikely(unlisted)) {
dev_put(dev);
po->prot_hook.dev = NULL;
po->ifindex = -1;
packet_cached_dev_reset(po);
} else {
po->prot_hook.dev = dev;
po->ifindex = dev ? dev->ifindex : 0;
packet_cached_dev_assign(po, dev);
}
}
if (dev_curr)
dev_put(dev_curr);
if (proto == 0 || !need_rehook)
goto out_unlock;
if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
register_prot_hook(sk);
} else {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
}
out_unlock:
rcu_read_unlock();
spin_unlock(&po->bind_lock);
release_sock(sk);
return ret;
}
|
CWE-362
| 180,889 | 2,433 |
113256036162153374580649847624008527748
| null | null | null |
linux
|
814fb7bb7db5433757d76f4c4502c96fc53b0b5e
| 1 |
int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
struct xregs_state *xsave;
int ret;
if (!boot_cpu_has(X86_FEATURE_XSAVE))
return -ENODEV;
/*
* A whole standard-format XSAVE buffer is needed:
*/
if ((pos != 0) || (count < fpu_user_xstate_size))
return -EFAULT;
xsave = &fpu->state.xsave;
fpu__activate_fpstate_write(fpu);
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
if (kbuf)
ret = copy_kernel_to_xstate(xsave, kbuf);
else
ret = copy_user_to_xstate(xsave, ubuf);
} else {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
}
/*
* In case of failure, mark all states as init:
*/
if (ret)
fpstate_init(&fpu->state);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
xsave->header.xfeatures &= xfeatures_mask;
/*
* These bits must be zero.
*/
memset(&xsave->header.reserved, 0, 48);
return ret;
}
|
CWE-200
| 180,890 | 2,434 |
29945451952694289181987195058974996252
| null | null | null |
linux
|
814fb7bb7db5433757d76f4c4502c96fc53b0b5e
| 1 |
static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
{
int ia32_fxstate = (buf != buf_fx);
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
int state_size = fpu_kernel_xstate_size;
u64 xfeatures = 0;
int fx_only = 0;
ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
IS_ENABLED(CONFIG_IA32_EMULATION));
if (!buf) {
fpu__clear(fpu);
return 0;
}
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
fpu__activate_curr(fpu);
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(current, NULL,
0, sizeof(struct user_i387_ia32_struct),
NULL, buf) != 0;
if (use_xsave()) {
struct _fpx_sw_bytes fx_sw_user;
if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
/*
* Couldn't find the extended state information in the
* memory layout. Restore just the FP/SSE and init all
* the other extended state.
*/
state_size = sizeof(struct fxregs_state);
fx_only = 1;
trace_x86_fpu_xstate_check_failed(fpu);
} else {
state_size = fx_sw_user.xstate_size;
xfeatures = fx_sw_user.xfeatures;
}
}
if (ia32_fxstate) {
/*
* For 32-bit frames with fxstate, copy the user state to the
* thread's fpu state, reconstruct fxstate from the fsave
* header. Sanitize the copied state etc.
*/
struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env;
int err = 0;
/*
* Drop the current fpu which clears fpu->fpstate_active. This ensures
* that any context-switch during the copy of the new state,
* avoids the intermediate state from getting restored/saved.
* Thus avoiding the new restored state from getting corrupted.
* We will be ready to restore/save the state only after
* fpu->fpstate_active is again set.
*/
fpu__drop(fpu);
if (using_compacted_format())
err = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
else
err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
if (err || __copy_from_user(&env, buf, sizeof(env))) {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
err = -1;
} else {
sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
}
fpu->fpstate_active = 1;
preempt_disable();
fpu__restore(fpu);
preempt_enable();
return err;
} else {
/*
* For 64-bit frames and 32-bit fsave frames, restore the user
* state to the registers directly (with exceptions handled).
*/
user_fpu_begin();
if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
fpu__clear(fpu);
return -1;
}
}
return 0;
}
|
CWE-200
| 180,891 | 2,435 |
182906564625953418159746819992845637632
| null | null | null |
radare2
|
21a6f570ba33fa9f52f1bba87f07acc4e8c178f4
| 1 |
static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) {
const char *section_name = "";
const char *link_section_name = "";
char *end = NULL;
Elf_(Shdr) *link_shdr = NULL;
ut8 dfs[sizeof (Elf_(Verdef))] = {0};
Sdb *sdb;
int cnt, i;
if (shdr->sh_link > bin->ehdr.e_shnum) {
return false;
}
link_shdr = &bin->shdr[shdr->sh_link];
if (shdr->sh_size < 1) {
return false;
}
Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char));
if (!defs) {
return false;
}
if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) {
section_name = &bin->shstrtab[shdr->sh_name];
}
if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) {
link_section_name = &bin->shstrtab[link_shdr->sh_name];
}
if (!defs) {
bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n");
return NULL;
}
sdb = sdb_new0 ();
end = (char *)defs + shdr->sh_size;
sdb_set (sdb, "section_name", section_name, 0);
sdb_num_set (sdb, "entries", shdr->sh_info, 0);
sdb_num_set (sdb, "addr", shdr->sh_addr, 0);
sdb_num_set (sdb, "offset", shdr->sh_offset, 0);
sdb_num_set (sdb, "link", shdr->sh_link, 0);
sdb_set (sdb, "link_section_name", link_section_name, 0);
for (cnt = 0, i = 0; cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) {
Sdb *sdb_verdef = sdb_new0 ();
char *vstart = ((char*)defs) + i;
char key[32] = {0};
Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart;
Elf_(Verdaux) aux = {0};
int j = 0;
int isum = 0;
r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef)));
verdef->vd_version = READ16 (dfs, j)
verdef->vd_flags = READ16 (dfs, j)
verdef->vd_ndx = READ16 (dfs, j)
verdef->vd_cnt = READ16 (dfs, j)
verdef->vd_hash = READ32 (dfs, j)
verdef->vd_aux = READ32 (dfs, j)
verdef->vd_next = READ32 (dfs, j)
vstart += verdef->vd_aux;
if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
goto out_error;
}
j = 0;
aux.vda_name = READ32 (vstart, j)
aux.vda_next = READ32 (vstart, j)
isum = i + verdef->vd_aux;
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
goto out_error;
}
sdb_num_set (sdb_verdef, "idx", i, 0);
sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0);
sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0);
sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0);
sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0);
sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0);
for (j = 1; j < verdef->vd_cnt; ++j) {
int k;
Sdb *sdb_parent = sdb_new0 ();
isum += aux.vda_next;
vstart += aux.vda_next;
if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
k = 0;
aux.vda_name = READ32 (vstart, k)
aux.vda_next = READ32 (vstart, k)
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
sdb_num_set (sdb_parent, "idx", isum, 0);
sdb_num_set (sdb_parent, "parent", j, 0);
sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0);
snprintf (key, sizeof (key), "parent%d", j - 1);
sdb_ns_set (sdb_verdef, key, sdb_parent);
}
snprintf (key, sizeof (key), "verdef%d", cnt);
sdb_ns_set (sdb, key, sdb_verdef);
if (!verdef->vd_next) {
sdb_free (sdb_verdef);
goto out_error;
}
i += verdef->vd_next;
}
free (defs);
return sdb;
out_error:
free (defs);
sdb_free (sdb);
return NULL;
}
|
CWE-119
| 180,892 | 2,436 |
243866778000885346195425595121235671831
| null | null | null |
suricata
|
47afc577ff763150f9b47f10331f5ef9eb847a57
| 1 |
int DetectEngineContentInspection(DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx,
const Signature *s, const SigMatchData *smd,
Flow *f,
uint8_t *buffer, uint32_t buffer_len,
uint32_t stream_start_offset,
uint8_t inspection_mode, void *data)
{
SCEnter();
KEYWORD_PROFILING_START;
det_ctx->inspection_recursion_counter++;
if (det_ctx->inspection_recursion_counter == de_ctx->inspection_recursion_limit) {
det_ctx->discontinue_matching = 1;
KEYWORD_PROFILING_END(det_ctx, smd->type, 0);
SCReturnInt(0);
}
if (smd == NULL || buffer_len == 0) {
KEYWORD_PROFILING_END(det_ctx, smd->type, 0);
SCReturnInt(0);
}
/* \todo unify this which is phase 2 of payload inspection unification */
if (smd->type == DETECT_CONTENT) {
DetectContentData *cd = (DetectContentData *)smd->ctx;
SCLogDebug("inspecting content %"PRIu32" buffer_len %"PRIu32, cd->id, buffer_len);
/* we might have already have this content matched by the mpm.
* (if there is any other reason why we'd want to avoid checking
* it here, please fill it in) */
/* rule parsers should take care of this */
#ifdef DEBUG
BUG_ON(cd->depth != 0 && cd->depth <= cd->offset);
#endif
/* search for our pattern, checking the matches recursively.
* if we match we look for the next SigMatch as well */
uint8_t *found = NULL;
uint32_t offset = 0;
uint32_t depth = buffer_len;
uint32_t prev_offset = 0; /**< used in recursive searching */
uint32_t prev_buffer_offset = det_ctx->buffer_offset;
do {
if ((cd->flags & DETECT_CONTENT_DISTANCE) ||
(cd->flags & DETECT_CONTENT_WITHIN)) {
SCLogDebug("det_ctx->buffer_offset %"PRIu32, det_ctx->buffer_offset);
offset = prev_buffer_offset;
depth = buffer_len;
int distance = cd->distance;
if (cd->flags & DETECT_CONTENT_DISTANCE) {
if (cd->flags & DETECT_CONTENT_DISTANCE_BE) {
distance = det_ctx->bj_values[cd->distance];
}
if (distance < 0 && (uint32_t)(abs(distance)) > offset)
offset = 0;
else
offset += distance;
SCLogDebug("cd->distance %"PRIi32", offset %"PRIu32", depth %"PRIu32,
distance, offset, depth);
}
if (cd->flags & DETECT_CONTENT_WITHIN) {
if (cd->flags & DETECT_CONTENT_WITHIN_BE) {
if ((int32_t)depth > (int32_t)(prev_buffer_offset + det_ctx->bj_values[cd->within] + distance)) {
depth = prev_buffer_offset + det_ctx->bj_values[cd->within] + distance;
}
} else {
if ((int32_t)depth > (int32_t)(prev_buffer_offset + cd->within + distance)) {
depth = prev_buffer_offset + cd->within + distance;
}
SCLogDebug("cd->within %"PRIi32", det_ctx->buffer_offset %"PRIu32", depth %"PRIu32,
cd->within, prev_buffer_offset, depth);
}
if (stream_start_offset != 0 && prev_buffer_offset == 0) {
if (depth <= stream_start_offset) {
goto no_match;
} else if (depth >= (stream_start_offset + buffer_len)) {
;
} else {
depth = depth - stream_start_offset;
}
}
}
if (cd->flags & DETECT_CONTENT_DEPTH_BE) {
if ((det_ctx->bj_values[cd->depth] + prev_buffer_offset) < depth) {
depth = prev_buffer_offset + det_ctx->bj_values[cd->depth];
}
} else {
if (cd->depth != 0) {
if ((cd->depth + prev_buffer_offset) < depth) {
depth = prev_buffer_offset + cd->depth;
}
SCLogDebug("cd->depth %"PRIu32", depth %"PRIu32, cd->depth, depth);
}
}
if (cd->flags & DETECT_CONTENT_OFFSET_BE) {
if (det_ctx->bj_values[cd->offset] > offset)
offset = det_ctx->bj_values[cd->offset];
} else {
if (cd->offset > offset) {
offset = cd->offset;
SCLogDebug("setting offset %"PRIu32, offset);
}
}
} else { /* implied no relative matches */
/* set depth */
if (cd->flags & DETECT_CONTENT_DEPTH_BE) {
depth = det_ctx->bj_values[cd->depth];
} else {
if (cd->depth != 0) {
depth = cd->depth;
}
}
if (stream_start_offset != 0 && cd->flags & DETECT_CONTENT_DEPTH) {
if (depth <= stream_start_offset) {
goto no_match;
} else if (depth >= (stream_start_offset + buffer_len)) {
;
} else {
depth = depth - stream_start_offset;
}
}
/* set offset */
if (cd->flags & DETECT_CONTENT_OFFSET_BE)
offset = det_ctx->bj_values[cd->offset];
else
offset = cd->offset;
prev_buffer_offset = 0;
}
/* update offset with prev_offset if we're searching for
* matches after the first occurence. */
SCLogDebug("offset %"PRIu32", prev_offset %"PRIu32, offset, prev_offset);
if (prev_offset != 0)
offset = prev_offset;
SCLogDebug("offset %"PRIu32", depth %"PRIu32, offset, depth);
if (depth > buffer_len)
depth = buffer_len;
/* if offset is bigger than depth we can never match on a pattern.
* We can however, "match" on a negated pattern. */
if (offset > depth || depth == 0) {
if (cd->flags & DETECT_CONTENT_NEGATED) {
goto match;
} else {
goto no_match;
}
}
uint8_t *sbuffer = buffer + offset;
uint32_t sbuffer_len = depth - offset;
uint32_t match_offset = 0;
SCLogDebug("sbuffer_len %"PRIu32, sbuffer_len);
#ifdef DEBUG
BUG_ON(sbuffer_len > buffer_len);
#endif
/* \todo Add another optimization here. If cd->content_len is
* greater than sbuffer_len found is anyways NULL */
/* do the actual search */
found = SpmScan(cd->spm_ctx, det_ctx->spm_thread_ctx, sbuffer,
sbuffer_len);
/* next we evaluate the result in combination with the
* negation flag. */
SCLogDebug("found %p cd negated %s", found, cd->flags & DETECT_CONTENT_NEGATED ? "true" : "false");
if (found == NULL && !(cd->flags & DETECT_CONTENT_NEGATED)) {
goto no_match;
} else if (found == NULL && (cd->flags & DETECT_CONTENT_NEGATED)) {
goto match;
} else if (found != NULL && (cd->flags & DETECT_CONTENT_NEGATED)) {
SCLogDebug("content %"PRIu32" matched at offset %"PRIu32", but negated so no match", cd->id, match_offset);
/* don't bother carrying recursive matches now, for preceding
* relative keywords */
if (DETECT_CONTENT_IS_SINGLE(cd))
det_ctx->discontinue_matching = 1;
goto no_match;
} else {
match_offset = (uint32_t)((found - buffer) + cd->content_len);
SCLogDebug("content %"PRIu32" matched at offset %"PRIu32"", cd->id, match_offset);
det_ctx->buffer_offset = match_offset;
/* Match branch, add replace to the list if needed */
if (cd->flags & DETECT_CONTENT_REPLACE) {
if (inspection_mode == DETECT_ENGINE_CONTENT_INSPECTION_MODE_PAYLOAD) {
/* we will need to replace content if match is confirmed */
det_ctx->replist = DetectReplaceAddToList(det_ctx->replist, found, cd);
} else {
SCLogWarning(SC_ERR_INVALID_VALUE, "Can't modify payload without packet");
}
}
if (!(cd->flags & DETECT_CONTENT_RELATIVE_NEXT)) {
SCLogDebug("no relative match coming up, so this is a match");
goto match;
}
/* bail out if we have no next match. Technically this is an
* error, as the current cd has the DETECT_CONTENT_RELATIVE_NEXT
* flag set. */
if (smd->is_last) {
goto no_match;
}
SCLogDebug("content %"PRIu32, cd->id);
KEYWORD_PROFILING_END(det_ctx, smd->type, 1);
/* see if the next buffer keywords match. If not, we will
* search for another occurence of this content and see
* if the others match then until we run out of matches */
int r = DetectEngineContentInspection(de_ctx, det_ctx, s, smd+1,
f, buffer, buffer_len, stream_start_offset, inspection_mode, data);
if (r == 1) {
SCReturnInt(1);
}
if (det_ctx->discontinue_matching)
goto no_match;
/* set the previous match offset to the start of this match + 1 */
prev_offset = (match_offset - (cd->content_len - 1));
SCLogDebug("trying to see if there is another match after prev_offset %"PRIu32, prev_offset);
}
} while(1);
} else if (smd->type == DETECT_ISDATAAT) {
SCLogDebug("inspecting isdataat");
DetectIsdataatData *id = (DetectIsdataatData *)smd->ctx;
if (id->flags & ISDATAAT_RELATIVE) {
if (det_ctx->buffer_offset + id->dataat > buffer_len) {
SCLogDebug("det_ctx->buffer_offset + id->dataat %"PRIu32" > %"PRIu32, det_ctx->buffer_offset + id->dataat, buffer_len);
if (id->flags & ISDATAAT_NEGATED)
goto match;
goto no_match;
} else {
SCLogDebug("relative isdataat match");
if (id->flags & ISDATAAT_NEGATED)
goto no_match;
goto match;
}
} else {
if (id->dataat < buffer_len) {
SCLogDebug("absolute isdataat match");
if (id->flags & ISDATAAT_NEGATED)
goto no_match;
goto match;
} else {
SCLogDebug("absolute isdataat mismatch, id->isdataat %"PRIu32", buffer_len %"PRIu32"", id->dataat, buffer_len);
if (id->flags & ISDATAAT_NEGATED)
goto match;
goto no_match;
}
}
} else if (smd->type == DETECT_PCRE) {
SCLogDebug("inspecting pcre");
DetectPcreData *pe = (DetectPcreData *)smd->ctx;
uint32_t prev_buffer_offset = det_ctx->buffer_offset;
uint32_t prev_offset = 0;
int r = 0;
det_ctx->pcre_match_start_offset = 0;
do {
Packet *p = NULL;
if (inspection_mode == DETECT_ENGINE_CONTENT_INSPECTION_MODE_PAYLOAD)
p = (Packet *)data;
r = DetectPcrePayloadMatch(det_ctx, s, smd, p, f,
buffer, buffer_len);
if (r == 0) {
goto no_match;
}
if (!(pe->flags & DETECT_PCRE_RELATIVE_NEXT)) {
SCLogDebug("no relative match coming up, so this is a match");
goto match;
}
KEYWORD_PROFILING_END(det_ctx, smd->type, 1);
/* save it, in case we need to do a pcre match once again */
prev_offset = det_ctx->pcre_match_start_offset;
/* see if the next payload keywords match. If not, we will
* search for another occurence of this pcre and see
* if the others match, until we run out of matches */
r = DetectEngineContentInspection(de_ctx, det_ctx, s, smd+1,
f, buffer, buffer_len, stream_start_offset, inspection_mode, data);
if (r == 1) {
SCReturnInt(1);
}
if (det_ctx->discontinue_matching)
goto no_match;
det_ctx->buffer_offset = prev_buffer_offset;
det_ctx->pcre_match_start_offset = prev_offset;
} while (1);
} else if (smd->type == DETECT_BYTETEST) {
DetectBytetestData *btd = (DetectBytetestData *)smd->ctx;
uint8_t flags = btd->flags;
int32_t offset = btd->offset;
uint64_t value = btd->value;
if (flags & DETECT_BYTETEST_OFFSET_BE) {
offset = det_ctx->bj_values[offset];
}
if (flags & DETECT_BYTETEST_VALUE_BE) {
value = det_ctx->bj_values[value];
}
/* if we have dce enabled we will have to use the endianness
* specified by the dce header */
if (flags & DETECT_BYTETEST_DCE && data != NULL) {
DCERPCState *dcerpc_state = (DCERPCState *)data;
/* enable the endianness flag temporarily. once we are done
* processing we reset the flags to the original value*/
flags |= ((dcerpc_state->dcerpc.dcerpchdr.packed_drep[0] & 0x10) ?
DETECT_BYTETEST_LITTLE: 0);
}
if (DetectBytetestDoMatch(det_ctx, s, smd->ctx, buffer, buffer_len, flags,
offset, value) != 1) {
goto no_match;
}
goto match;
} else if (smd->type == DETECT_BYTEJUMP) {
DetectBytejumpData *bjd = (DetectBytejumpData *)smd->ctx;
uint8_t flags = bjd->flags;
int32_t offset = bjd->offset;
if (flags & DETECT_BYTEJUMP_OFFSET_BE) {
offset = det_ctx->bj_values[offset];
}
/* if we have dce enabled we will have to use the endianness
* specified by the dce header */
if (flags & DETECT_BYTEJUMP_DCE && data != NULL) {
DCERPCState *dcerpc_state = (DCERPCState *)data;
/* enable the endianness flag temporarily. once we are done
* processing we reset the flags to the original value*/
flags |= ((dcerpc_state->dcerpc.dcerpchdr.packed_drep[0] & 0x10) ?
DETECT_BYTEJUMP_LITTLE: 0);
}
if (DetectBytejumpDoMatch(det_ctx, s, smd->ctx, buffer, buffer_len,
flags, offset) != 1) {
goto no_match;
}
goto match;
} else if (smd->type == DETECT_BYTE_EXTRACT) {
DetectByteExtractData *bed = (DetectByteExtractData *)smd->ctx;
uint8_t endian = bed->endian;
/* if we have dce enabled we will have to use the endianness
* specified by the dce header */
if ((bed->flags & DETECT_BYTE_EXTRACT_FLAG_ENDIAN) &&
endian == DETECT_BYTE_EXTRACT_ENDIAN_DCE && data != NULL) {
DCERPCState *dcerpc_state = (DCERPCState *)data;
/* enable the endianness flag temporarily. once we are done
* processing we reset the flags to the original value*/
endian |= ((dcerpc_state->dcerpc.dcerpchdr.packed_drep[0] == 0x10) ?
DETECT_BYTE_EXTRACT_ENDIAN_LITTLE : DETECT_BYTE_EXTRACT_ENDIAN_BIG);
}
if (DetectByteExtractDoMatch(det_ctx, smd, s, buffer,
buffer_len,
&det_ctx->bj_values[bed->local_id],
endian) != 1) {
goto no_match;
}
goto match;
/* we should never get here, but bail out just in case */
} else if (smd->type == DETECT_AL_URILEN) {
SCLogDebug("inspecting uri len");
int r = 0;
DetectUrilenData *urilend = (DetectUrilenData *) smd->ctx;
switch (urilend->mode) {
case DETECT_URILEN_EQ:
if (buffer_len == urilend->urilen1)
r = 1;
break;
case DETECT_URILEN_LT:
if (buffer_len < urilend->urilen1)
r = 1;
break;
case DETECT_URILEN_GT:
if (buffer_len > urilend->urilen1)
r = 1;
break;
case DETECT_URILEN_RA:
if (buffer_len > urilend->urilen1 &&
buffer_len < urilend->urilen2) {
r = 1;
}
break;
}
if (r == 1) {
goto match;
}
det_ctx->discontinue_matching = 0;
goto no_match;
#ifdef HAVE_LUA
}
else if (smd->type == DETECT_LUA) {
SCLogDebug("lua starting");
if (DetectLuaMatchBuffer(det_ctx, s, smd, buffer, buffer_len,
det_ctx->buffer_offset, f) != 1)
{
SCLogDebug("lua no_match");
goto no_match;
}
SCLogDebug("lua match");
goto match;
#endif /* HAVE_LUA */
} else if (smd->type == DETECT_BASE64_DECODE) {
if (DetectBase64DecodeDoMatch(det_ctx, s, smd, buffer, buffer_len)) {
if (s->sm_arrays[DETECT_SM_LIST_BASE64_DATA] != NULL) {
KEYWORD_PROFILING_END(det_ctx, smd->type, 1);
if (DetectBase64DataDoMatch(de_ctx, det_ctx, s, f)) {
/* Base64 is a terminal list. */
goto final_match;
}
}
}
} else {
SCLogDebug("sm->type %u", smd->type);
#ifdef DEBUG
BUG_ON(1);
#endif
}
no_match:
KEYWORD_PROFILING_END(det_ctx, smd->type, 0);
SCReturnInt(0);
match:
/* this sigmatch matched, inspect the next one. If it was the last,
* the buffer portion of the signature matched. */
if (!smd->is_last) {
KEYWORD_PROFILING_END(det_ctx, smd->type, 1);
int r = DetectEngineContentInspection(de_ctx, det_ctx, s, smd+1,
f, buffer, buffer_len, stream_start_offset, inspection_mode, data);
SCReturnInt(r);
}
final_match:
KEYWORD_PROFILING_END(det_ctx, smd->type, 1);
SCReturnInt(1);
}
| 180,893 | 2,437 |
254926862513297039733671762108012735541
| null | null | null |
|
linux
|
ac64115a66c18c01745bbd3c47a36b124e5fd8c0
| 1 |
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
/* Assume we're using HV mode when the HV module is loaded */
int hv_enabled = kvmppc_hv_ops ? 1 : 0;
if (kvm) {
/*
* Hooray - we know which VM type we're running on. Depend on
* that rather than the guess above.
*/
hv_enabled = is_kvmppc_hv_enabled(kvm);
}
switch (ext) {
#ifdef CONFIG_BOOKE
case KVM_CAP_PPC_BOOKE_SREGS:
case KVM_CAP_PPC_BOOKE_WATCHDOG:
case KVM_CAP_PPC_EPR:
#else
case KVM_CAP_PPC_SEGSTATE:
case KVM_CAP_PPC_HIOR:
case KVM_CAP_PPC_PAPR:
#endif
case KVM_CAP_PPC_UNSET_IRQ:
case KVM_CAP_PPC_IRQ_LEVEL:
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_ONE_REG:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_IMMEDIATE_EXIT:
r = 1;
break;
case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_CAP_SW_TLB:
#endif
/* We support this only for PR */
r = !hv_enabled;
break;
#ifdef CONFIG_KVM_MPIC
case KVM_CAP_IRQ_MPIC:
r = 1;
break;
#endif
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_SPAPR_TCE_64:
/* fallthrough */
case KVM_CAP_SPAPR_TCE_VFIO:
case KVM_CAP_PPC_RTAS:
case KVM_CAP_PPC_FIXUP_HCALL:
case KVM_CAP_PPC_ENABLE_HCALL:
#ifdef CONFIG_KVM_XICS
case KVM_CAP_IRQ_XICS:
#endif
r = 1;
break;
case KVM_CAP_PPC_ALLOC_HTAB:
r = hv_enabled;
break;
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
case KVM_CAP_PPC_SMT:
r = 0;
if (kvm) {
if (kvm->arch.emul_smt_mode > 1)
r = kvm->arch.emul_smt_mode;
else
r = kvm->arch.smt_mode;
} else if (hv_enabled) {
if (cpu_has_feature(CPU_FTR_ARCH_300))
r = 1;
else
r = threads_per_subcore;
}
break;
case KVM_CAP_PPC_SMT_POSSIBLE:
r = 1;
if (hv_enabled) {
if (!cpu_has_feature(CPU_FTR_ARCH_300))
r = ((threads_per_subcore << 1) - 1);
else
/* P9 can emulate dbells, so allow any mode */
r = 8 | 4 | 2 | 1;
}
break;
case KVM_CAP_PPC_RMA:
r = 0;
break;
case KVM_CAP_PPC_HWRNG:
r = kvmppc_hwrng_present();
break;
case KVM_CAP_PPC_MMU_RADIX:
r = !!(hv_enabled && radix_enabled());
break;
case KVM_CAP_PPC_MMU_HASH_V3:
r = !!(hv_enabled && !radix_enabled() &&
cpu_has_feature(CPU_FTR_ARCH_300));
break;
#endif
case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
r = hv_enabled;
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
r = 1;
#else
r = 0;
#endif
break;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
case KVM_CAP_PPC_HTAB_FD:
r = hv_enabled;
break;
#endif
case KVM_CAP_NR_VCPUS:
/*
* Recommending a number of CPUs is somewhat arbitrary; we
* return the number of present CPUs for -HV (since a host
* will have secondary threads "offline"), and for other KVM
* implementations just count online CPUs.
*/
if (hv_enabled)
r = num_present_cpus();
else
r = num_online_cpus();
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_PPC_GET_SMMU_INFO:
r = 1;
break;
case KVM_CAP_SPAPR_MULTITCE:
r = 1;
break;
case KVM_CAP_SPAPR_RESIZE_HPT:
/* Disable this on POWER9 until code handles new HPTE format */
r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300);
break;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
case KVM_CAP_PPC_FWNMI:
r = hv_enabled;
break;
#endif
case KVM_CAP_PPC_HTM:
r = cpu_has_feature(CPU_FTR_TM_COMP) &&
is_kvmppc_hv_enabled(kvm);
break;
default:
r = 0;
break;
}
return r;
}
|
CWE-476
| 180,896 | 2,438 |
78468150303237020119497747580724675131
| null | null | null |
ImageMagick
|
9fd10cf630832b36a588c1545d8736539b2f1fb5
| 1 |
static Image *ReadGIFImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define BitSet(byte,bit) (((byte) & (bit)) == (bit))
#define LSBFirstOrder(x,y) (((y) << 8) | (x))
Image
*image,
*meta_image;
int
number_extensionss=0;
MagickBooleanType
status;
RectangleInfo
page;
register ssize_t
i;
register unsigned char
*p;
size_t
delay,
dispose,
duration,
global_colors,
image_count,
iterations,
one;
ssize_t
count,
opacity;
unsigned char
background,
c,
flag,
*global_colormap,
buffer[257];
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Determine if this a GIF file.
*/
count=ReadBlob(image,6,buffer);
if ((count != 6) || ((LocaleNCompare((char *) buffer,"GIF87",5) != 0) &&
(LocaleNCompare((char *) buffer,"GIF89",5) != 0)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
page.width=ReadBlobLSBShort(image);
page.height=ReadBlobLSBShort(image);
flag=(unsigned char) ReadBlobByte(image);
background=(unsigned char) ReadBlobByte(image);
c=(unsigned char) ReadBlobByte(image); /* reserved */
one=1;
global_colors=one << (((size_t) flag & 0x07)+1);
global_colormap=(unsigned char *) AcquireQuantumMemory((size_t)
MagickMax(global_colors,256),3UL*sizeof(*global_colormap));
if (global_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (BitSet((int) flag,0x80) != 0)
{
count=ReadBlob(image,(size_t) (3*global_colors),global_colormap);
if (count != (ssize_t) (3*global_colors))
{
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
}
delay=0;
dispose=0;
duration=0;
iterations=1;
opacity=(-1);
image_count=0;
meta_image=AcquireImage(image_info,exception); /* metadata container */
for ( ; ; )
{
count=ReadBlob(image,1,&c);
if (count != 1)
break;
if (c == (unsigned char) ';')
break; /* terminator */
if (c == (unsigned char) '!')
{
/*
GIF Extension block.
*/
count=ReadBlob(image,1,&c);
if (count != 1)
{
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
meta_image=DestroyImage(meta_image);
ThrowReaderException(CorruptImageError,
"UnableToReadExtensionBlock");
}
switch (c)
{
case 0xf9:
{
/*
Read graphics control extension.
*/
while (ReadBlobBlock(image,buffer) != 0) ;
dispose=(size_t) (buffer[0] >> 2);
delay=(size_t) ((buffer[2] << 8) | buffer[1]);
if ((ssize_t) (buffer[0] & 0x01) == 0x01)
opacity=(ssize_t) buffer[3];
break;
}
case 0xfe:
{
char
*comments;
size_t
length;
/*
Read comment extension.
*/
comments=AcquireString((char *) NULL);
for (length=0; ; length+=count)
{
count=(ssize_t) ReadBlobBlock(image,buffer);
if (count == 0)
break;
buffer[count]='\0';
(void) ConcatenateString(&comments,(const char *) buffer);
}
(void) SetImageProperty(meta_image,"comment",comments,exception);
comments=DestroyString(comments);
break;
}
case 0xff:
{
MagickBooleanType
loop;
/*
Read Netscape Loop extension.
*/
loop=MagickFalse;
if (ReadBlobBlock(image,buffer) != 0)
loop=LocaleNCompare((char *) buffer,"NETSCAPE2.0",11) == 0 ?
MagickTrue : MagickFalse;
if (loop != MagickFalse)
{
while (ReadBlobBlock(image,buffer) != 0)
iterations=(size_t) ((buffer[2] << 8) | buffer[1]);
break;
}
else
{
char
name[MagickPathExtent];
int
block_length,
info_length,
reserved_length;
MagickBooleanType
i8bim,
icc,
iptc,
magick;
StringInfo
*profile;
unsigned char
*info;
/*
Store GIF application extension as a generic profile.
*/
icc=LocaleNCompare((char *) buffer,"ICCRGBG1012",11) == 0 ?
MagickTrue : MagickFalse;
magick=LocaleNCompare((char *) buffer,"ImageMagick",11) == 0 ?
MagickTrue : MagickFalse;
i8bim=LocaleNCompare((char *) buffer,"MGK8BIM0000",11) == 0 ?
MagickTrue : MagickFalse;
iptc=LocaleNCompare((char *) buffer,"MGKIPTC0000",11) == 0 ?
MagickTrue : MagickFalse;
number_extensionss++;
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading GIF application extension");
info=(unsigned char *) AcquireQuantumMemory(255UL,
sizeof(*info));
if (info == (unsigned char *) NULL)
{
meta_image=DestroyImage(meta_image);
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
reserved_length=255;
for (info_length=0; ; )
{
block_length=(int) ReadBlobBlock(image,&info[info_length]);
if (block_length == 0)
break;
info_length+=block_length;
if (info_length > (reserved_length-255))
{
reserved_length+=4096;
info=(unsigned char *) ResizeQuantumMemory(info,(size_t)
reserved_length,sizeof(*info));
if (info == (unsigned char *) NULL)
{
meta_image=DestroyImage(meta_image);
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
}
}
profile=BlobToStringInfo(info,(size_t) info_length);
if (profile == (StringInfo *) NULL)
{
meta_image=DestroyImage(meta_image);
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
if (i8bim != MagickFalse)
(void) CopyMagickString(name,"8bim",sizeof(name));
else if (icc != MagickFalse)
(void) CopyMagickString(name,"icc",sizeof(name));
else if (iptc != MagickFalse)
(void) CopyMagickString(name,"iptc",sizeof(name));
else if (magick != MagickFalse)
{
(void) CopyMagickString(name,"magick",sizeof(name));
meta_image->gamma=StringToDouble((char *) info+6,
(char **) NULL);
}
else
(void) FormatLocaleString(name,sizeof(name),"gif:%.11s",
buffer);
info=(unsigned char *) RelinquishMagickMemory(info);
if (magick == MagickFalse)
(void) SetImageProfile(meta_image,name,profile,exception);
profile=DestroyStringInfo(profile);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" profile name=%s",name);
}
break;
}
default:
{
while (ReadBlobBlock(image,buffer) != 0) ;
break;
}
}
}
if (c != (unsigned char) ',')
continue;
if (image_count != 0)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
}
image_count++;
/*
Read image attributes.
*/
meta_image->scene=image->scene;
(void) CloneImageProperties(image,meta_image);
DestroyImageProperties(meta_image);
(void) CloneImageProfiles(image,meta_image);
DestroyImageProfiles(meta_image);
image->storage_class=PseudoClass;
image->compression=LZWCompression;
page.x=(ssize_t) ReadBlobLSBShort(image);
page.y=(ssize_t) ReadBlobLSBShort(image);
image->columns=ReadBlobLSBShort(image);
image->rows=ReadBlobLSBShort(image);
image->depth=8;
flag=(unsigned char) ReadBlobByte(image);
image->interlace=BitSet((int) flag,0x40) != 0 ? GIFInterlace : NoInterlace;
image->colors=BitSet((int) flag,0x80) == 0 ? global_colors : one <<
((size_t) (flag & 0x07)+1);
if (opacity >= (ssize_t) image->colors)
opacity=(-1);
image->page.width=page.width;
image->page.height=page.height;
image->page.y=page.y;
image->page.x=page.x;
image->delay=delay;
image->ticks_per_second=100;
image->dispose=(DisposeType) dispose;
image->iterations=iterations;
image->alpha_trait=opacity >= 0 ? BlendPixelTrait : UndefinedPixelTrait;
delay=0;
dispose=0;
if ((image->columns == 0) || (image->rows == 0))
{
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
meta_image=DestroyImage(meta_image);
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
}
/*
Inititialize colormap.
*/
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
{
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
meta_image=DestroyImage(meta_image);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
if (BitSet((int) flag,0x80) == 0)
{
/*
Use global colormap.
*/
p=global_colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=(double) ScaleCharToQuantum(*p++);
image->colormap[i].green=(double) ScaleCharToQuantum(*p++);
image->colormap[i].blue=(double) ScaleCharToQuantum(*p++);
if (i == opacity)
{
image->colormap[i].alpha=(double) TransparentAlpha;
image->transparent_color=image->colormap[opacity];
}
}
image->background_color=image->colormap[MagickMin((ssize_t) background,
(ssize_t) image->colors-1)];
}
else
{
unsigned char
*colormap;
/*
Read local colormap.
*/
colormap=(unsigned char *) AcquireQuantumMemory(image->colors,3*
sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
{
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
meta_image=DestroyImage(meta_image);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
count=ReadBlob(image,(3*image->colors)*sizeof(*colormap),colormap);
if (count != (ssize_t) (3*image->colors))
{
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
meta_image=DestroyImage(meta_image);
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
}
p=colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=(double) ScaleCharToQuantum(*p++);
image->colormap[i].green=(double) ScaleCharToQuantum(*p++);
image->colormap[i].blue=(double) ScaleCharToQuantum(*p++);
if (i == opacity)
image->colormap[i].alpha=(double) TransparentAlpha;
}
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
}
if (image->gamma == 1.0)
{
for (i=0; i < (ssize_t) image->colors; i++)
if (IsPixelInfoGray(image->colormap+i) == MagickFalse)
break;
(void) SetImageColorspace(image,i == (ssize_t) image->colors ?
GRAYColorspace : RGBColorspace,exception);
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Decode image.
*/
if (image_info->ping != MagickFalse)
status=PingGIFImage(image,exception);
else
status=DecodeImage(image,opacity,exception);
if ((image_info->ping == MagickFalse) && (status == MagickFalse))
{
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
meta_image=DestroyImage(meta_image);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
duration+=image->delay*image->iterations;
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
opacity=(-1);
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) image->scene-
1,image->scene);
if (status == MagickFalse)
break;
}
image->duration=duration;
meta_image=DestroyImage(meta_image);
global_colormap=(unsigned char *) RelinquishMagickMemory(global_colormap);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
|
CWE-200
| 180,897 | 2,439 |
49173632362171761388319546200503138873
| null | null | null |
linux
|
71105998845fb012937332fe2e806d443c09e026
| 1 |
static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
{
struct snd_seq_port_info *info = arg;
struct snd_seq_client_port *port;
struct snd_seq_port_callback *callback;
/* it is not allowed to create the port for an another client */
if (info->addr.client != client->number)
return -EPERM;
port = snd_seq_create_port(client, (info->flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT) ? info->addr.port : -1);
if (port == NULL)
return -ENOMEM;
if (client->type == USER_CLIENT && info->kernel) {
snd_seq_delete_port(client, port->addr.port);
return -EINVAL;
}
if (client->type == KERNEL_CLIENT) {
if ((callback = info->kernel) != NULL) {
if (callback->owner)
port->owner = callback->owner;
port->private_data = callback->private_data;
port->private_free = callback->private_free;
port->event_input = callback->event_input;
port->c_src.open = callback->subscribe;
port->c_src.close = callback->unsubscribe;
port->c_dest.open = callback->use;
port->c_dest.close = callback->unuse;
}
}
info->addr = port->addr;
snd_seq_set_port_info(port, info);
snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
return 0;
}
|
CWE-416
| 180,900 | 2,442 |
335970720208132724311839258626111729110
| null | null | null |
linux
|
71105998845fb012937332fe2e806d443c09e026
| 1 |
struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
int port)
{
unsigned long flags;
struct snd_seq_client_port *new_port, *p;
int num = -1;
/* sanity check */
if (snd_BUG_ON(!client))
return NULL;
if (client->num_ports >= SNDRV_SEQ_MAX_PORTS) {
pr_warn("ALSA: seq: too many ports for client %d\n", client->number);
return NULL;
}
/* create a new port */
new_port = kzalloc(sizeof(*new_port), GFP_KERNEL);
if (!new_port)
return NULL; /* failure, out of memory */
/* init port data */
new_port->addr.client = client->number;
new_port->addr.port = -1;
new_port->owner = THIS_MODULE;
sprintf(new_port->name, "port-%d", num);
snd_use_lock_init(&new_port->use_lock);
port_subs_info_init(&new_port->c_src);
port_subs_info_init(&new_port->c_dest);
num = port >= 0 ? port : 0;
mutex_lock(&client->ports_mutex);
write_lock_irqsave(&client->ports_lock, flags);
list_for_each_entry(p, &client->ports_list_head, list) {
if (p->addr.port > num)
break;
if (port < 0) /* auto-probe mode */
num = p->addr.port + 1;
}
/* insert the new port */
list_add_tail(&new_port->list, &p->list);
client->num_ports++;
new_port->addr.port = num; /* store the port number in the port */
write_unlock_irqrestore(&client->ports_lock, flags);
mutex_unlock(&client->ports_mutex);
sprintf(new_port->name, "port-%d", num);
return new_port;
}
|
CWE-416
| 180,901 | 2,443 |
153601357183929501043255702282709207630
| null | null | null |
linux
|
94f1bb15bed84ad6c893916b7e7b9db6f1d7eec6
| 1 |
static int rngapi_reset(struct crypto_rng *tfm, const u8 *seed,
unsigned int slen)
{
u8 *buf = NULL;
u8 *src = (u8 *)seed;
int err;
if (slen) {
buf = kmalloc(slen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, seed, slen);
src = buf;
}
err = crypto_old_rng_alg(tfm)->rng_reset(tfm, src, slen);
kzfree(buf);
return err;
}
|
CWE-476
| 180,906 | 2,448 |
261910434330675911398440622191523259704
| null | null | null |
linux
|
df80cd9b28b9ebaa284a41df611dbf3a2d05ca74
| 1 |
int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
{
struct sctp_association *asoc = sctp_id2assoc(sk, id);
struct sctp_sock *sp = sctp_sk(sk);
struct socket *sock;
int err = 0;
if (!asoc)
return -EINVAL;
/* If there is a thread waiting on more sndbuf space for
* sending on this asoc, it cannot be peeled.
*/
if (waitqueue_active(&asoc->wait))
return -EBUSY;
/* An association cannot be branched off from an already peeled-off
* socket, nor is this supported for tcp style sockets.
*/
if (!sctp_style(sk, UDP))
return -EINVAL;
/* Create a new socket. */
err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
if (err < 0)
return err;
sctp_copy_sock(sock->sk, sk, asoc);
/* Make peeled-off sockets more like 1-1 accepted sockets.
* Set the daddr and initialize id to something more random
*/
sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
/* Populate the fields of the newsk from the oldsk and migrate the
* asoc to the newsk.
*/
sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
*sockp = sock;
return err;
}
|
CWE-416
| 180,908 | 2,450 |
74326315358449763506207584151691485221
| null | null | null |
linux
|
2fae9e5a7babada041e2e161699ade2447a01989
| 1 |
static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id)
{
struct device *idev = &interface->dev;
struct usb_device *udev = interface_to_usbdev(interface);
struct lego_usb_tower *dev = NULL;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor* endpoint;
struct tower_get_version_reply get_version_reply;
int i;
int retval = -ENOMEM;
int result;
/* allocate memory for our device state and initialize it */
dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL);
if (!dev)
goto exit;
mutex_init(&dev->lock);
dev->udev = udev;
dev->open_count = 0;
dev->read_buffer = NULL;
dev->read_buffer_length = 0;
dev->read_packet_length = 0;
spin_lock_init (&dev->read_buffer_lock);
dev->packet_timeout_jiffies = msecs_to_jiffies(packet_timeout);
dev->read_last_arrival = jiffies;
init_waitqueue_head (&dev->read_wait);
init_waitqueue_head (&dev->write_wait);
dev->interrupt_in_buffer = NULL;
dev->interrupt_in_endpoint = NULL;
dev->interrupt_in_urb = NULL;
dev->interrupt_in_running = 0;
dev->interrupt_in_done = 0;
dev->interrupt_out_buffer = NULL;
dev->interrupt_out_endpoint = NULL;
dev->interrupt_out_urb = NULL;
dev->interrupt_out_busy = 0;
iface_desc = interface->cur_altsetting;
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (usb_endpoint_xfer_int(endpoint)) {
if (usb_endpoint_dir_in(endpoint))
dev->interrupt_in_endpoint = endpoint;
else
dev->interrupt_out_endpoint = endpoint;
}
}
if(dev->interrupt_in_endpoint == NULL) {
dev_err(idev, "interrupt in endpoint not found\n");
goto error;
}
if (dev->interrupt_out_endpoint == NULL) {
dev_err(idev, "interrupt out endpoint not found\n");
goto error;
}
dev->read_buffer = kmalloc (read_buffer_size, GFP_KERNEL);
if (!dev->read_buffer)
goto error;
dev->interrupt_in_buffer = kmalloc (usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
if (!dev->interrupt_in_buffer)
goto error;
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb)
goto error;
dev->interrupt_out_buffer = kmalloc (write_buffer_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_out_urb)
goto error;
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
/* we can register the device now, as it is ready */
usb_set_intfdata (interface, dev);
retval = usb_register_dev (interface, &tower_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
usb_set_intfdata (interface, NULL);
goto error;
}
dev->minor = interface->minor;
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
"%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
USB_MAJOR, dev->minor);
/* get the firmware version and log it */
result = usb_control_msg (udev,
usb_rcvctrlpipe(udev, 0),
LEGO_USB_TOWER_REQUEST_GET_VERSION,
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0,
0,
&get_version_reply,
sizeof(get_version_reply),
1000);
if (result < 0) {
dev_err(idev, "LEGO USB Tower get version control request failed\n");
retval = result;
goto error;
}
dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d "
"build %d\n", get_version_reply.major,
get_version_reply.minor,
le16_to_cpu(get_version_reply.build_no));
exit:
return retval;
error:
tower_delete(dev);
return retval;
}
|
CWE-476
| 180,909 | 2,451 |
118264152329955412359409976629846966857
| null | null | null |
ImageMagick
|
ef8f40689ac452398026c07da41656a7c87e4683
| 1 |
static Image *ReadYUVImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*chroma_image,
*image,
*resize_image;
InterlaceType
interlace;
MagickBooleanType
status;
register const Quantum
*chroma_pixels;
register ssize_t
x;
register Quantum
*q;
register unsigned char
*p;
ssize_t
count,
horizontal_factor,
vertical_factor,
y;
size_t
length,
quantum;
unsigned char
*scanline;
/*
Allocate image structure.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(OptionError,"MustSpecifyImageSize");
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
quantum=(ssize_t) (image->depth <= 8 ? 1 : 2);
interlace=image_info->interlace;
horizontal_factor=2;
vertical_factor=2;
if (image_info->sampling_factor != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(image_info->sampling_factor,&geometry_info);
horizontal_factor=(ssize_t) geometry_info.rho;
vertical_factor=(ssize_t) geometry_info.sigma;
if ((flags & SigmaValue) == 0)
vertical_factor=horizontal_factor;
if ((horizontal_factor != 1) && (horizontal_factor != 2) &&
(vertical_factor != 1) && (vertical_factor != 2))
ThrowReaderException(CorruptImageError,"UnexpectedSamplingFactor");
}
if ((interlace == UndefinedInterlace) ||
((interlace == NoInterlace) && (vertical_factor == 2)))
{
interlace=NoInterlace; /* CCIR 4:2:2 */
if (vertical_factor == 2)
interlace=PlaneInterlace; /* CCIR 4:1:1 */
}
if (interlace != PartitionInterlace)
{
/*
Open image file.
*/
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,(MagickSizeType) image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
/*
Allocate memory for a scanline.
*/
if (interlace == NoInterlace)
scanline=(unsigned char *) AcquireQuantumMemory((size_t) (2UL*
image->columns+2UL),(size_t) quantum*sizeof(*scanline));
else
scanline=(unsigned char *) AcquireQuantumMemory(image->columns,
(size_t) quantum*sizeof(*scanline));
if (scanline == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
status=MagickTrue;
do
{
chroma_image=CloneImage(image,(image->columns+horizontal_factor-1)/
horizontal_factor,(image->rows+vertical_factor-1)/vertical_factor,
MagickTrue,exception);
if (chroma_image == (Image *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/*
Convert raster image to pixel packets.
*/
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
break;
if (interlace == PartitionInterlace)
{
AppendImageFormat("Y",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*chroma_pixels;
if (interlace == NoInterlace)
{
if ((y > 0) || (GetPreviousImageInList(image) == (Image *) NULL))
{
length=2*quantum*image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
p=scanline;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
chroma_pixels=QueueAuthenticPixels(chroma_image,0,y,
chroma_image->columns,1,exception);
if (chroma_pixels == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x+=2)
{
SetPixelRed(chroma_image,0,chroma_pixels);
if (quantum == 1)
SetPixelGreen(chroma_image,ScaleCharToQuantum(*p++),
chroma_pixels);
else
{
SetPixelGreen(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),chroma_pixels);
p+=2;
}
if (quantum == 1)
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelRed(image,ScaleShortToQuantum(((*p) << 8) | *(p+1)),q);
p+=2;
}
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
q+=GetPixelChannels(image);
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
if (quantum == 1)
SetPixelBlue(chroma_image,ScaleCharToQuantum(*p++),chroma_pixels);
else
{
SetPixelBlue(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),chroma_pixels);
p+=2;
}
if (quantum == 1)
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelRed(image,ScaleShortToQuantum(((*p) << 8) | *(p+1)),q);
p+=2;
}
chroma_pixels+=GetPixelChannels(chroma_image);
q+=GetPixelChannels(image);
}
}
else
{
if ((y > 0) || (GetPreviousImageInList(image) == (Image *) NULL))
{
length=quantum*image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
p=scanline;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (quantum == 1)
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelRed(image,ScaleShortToQuantum(((*p) << 8) | *(p+1)),q);
p+=2;
}
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
q+=GetPixelChannels(image);
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (interlace == NoInterlace)
if (SyncAuthenticPixels(chroma_image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (interlace == PartitionInterlace)
{
(void) CloseBlob(image);
AppendImageFormat("U",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (interlace != NoInterlace)
{
for (y=0; y < (ssize_t) chroma_image->rows; y++)
{
length=quantum*chroma_image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
p=scanline;
q=QueueAuthenticPixels(chroma_image,0,y,chroma_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) chroma_image->columns; x++)
{
SetPixelRed(chroma_image,0,q);
if (quantum == 1)
SetPixelGreen(chroma_image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelGreen(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),q);
p+=2;
}
SetPixelBlue(chroma_image,0,q);
q+=GetPixelChannels(chroma_image);
}
if (SyncAuthenticPixels(chroma_image,exception) == MagickFalse)
break;
}
if (interlace == PartitionInterlace)
{
(void) CloseBlob(image);
AppendImageFormat("V",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
}
for (y=0; y < (ssize_t) chroma_image->rows; y++)
{
length=quantum*chroma_image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
p=scanline;
q=GetAuthenticPixels(chroma_image,0,y,chroma_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) chroma_image->columns; x++)
{
if (quantum == 1)
SetPixelBlue(chroma_image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelBlue(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),q);
p+=2;
}
q+=GetPixelChannels(chroma_image);
}
if (SyncAuthenticPixels(chroma_image,exception) == MagickFalse)
break;
}
}
/*
Scale image.
*/
resize_image=ResizeImage(chroma_image,image->columns,image->rows,
TriangleFilter,exception);
chroma_image=DestroyImage(chroma_image);
if (resize_image == (Image *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
chroma_pixels=GetVirtualPixels(resize_image,0,y,resize_image->columns,1,
exception);
if ((q == (Quantum *) NULL) ||
(chroma_pixels == (const Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(image,GetPixelGreen(resize_image,chroma_pixels),q);
SetPixelBlue(image,GetPixelBlue(resize_image,chroma_pixels),q);
chroma_pixels+=GetPixelChannels(resize_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
resize_image=DestroyImage(resize_image);
if (SetImageColorspace(image,YCbCrColorspace,exception) == MagickFalse)
break;
if (interlace == PartitionInterlace)
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (interlace == NoInterlace)
count=ReadBlob(image,(size_t) (2*quantum*image->columns),scanline);
else
count=ReadBlob(image,(size_t) quantum*image->columns,scanline);
if (count != 0)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (count != 0);
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
(void) CloseBlob(image);
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
|
CWE-772
| 180,910 | 2,452 |
147391551823955436154635701582525000724
| null | null | null |
ImageMagick
|
241988ca28139ad970c1d9717c419f41e360ddb0
| 1 |
static Image *ReadYCBCRImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const unsigned char
*pixels;
Image
*canvas_image,
*image;
MagickBooleanType
status;
MagickOffsetType
scene;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register const Quantum
*p;
register ssize_t
i,
x;
register Quantum
*q;
size_t
length;
ssize_t
count,
y;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(OptionError,"MustSpecifyImageSize");
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
SetImageColorspace(image,YCbCrColorspace,exception);
if (image_info->interlace != PartitionInterlace)
{
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
/*
Create virtual canvas to support cropping (i.e. image.rgb[100x100+10+20]).
*/
canvas_image=CloneImage(image,image->extract_info.width,1,MagickFalse,
exception);
(void) SetImageVirtualPixelMethod(canvas_image,BlackVirtualPixelMethod,
exception);
quantum_info=AcquireQuantumInfo(image_info,canvas_image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
quantum_type=RGBQuantum;
if (LocaleCompare(image_info->magick,"YCbCrA") == 0)
{
quantum_type=RGBAQuantum;
image->alpha_trait=BlendPixelTrait;
}
pixels=(const unsigned char *) NULL;
if (image_info->number_scenes != 0)
while (image->scene < image_info->scene)
{
/*
Skip to next image.
*/
image->scene++;
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
break;
}
}
count=0;
length=0;
scene=0;
do
{
/*
Read pixels to virtual canvas image then push to image.
*/
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(DestroyImageList(image));
}
SetImageColorspace(image,YCbCrColorspace,exception);
switch (image_info->interlace)
{
case NoInterlace:
default:
{
/*
No interlacing: YCbCrYCbCrYCbCrYCbCrYCbCrYCbCr...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=QueueAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
break;
}
case LineInterlace:
{
static QuantumType
quantum_types[4] =
{
RedQuantum,
GreenQuantum,
BlueQuantum,
OpacityQuantum
};
/*
Line interlacing: YYY...CbCbCb...CrCrCr...YYY...CbCbCb...CrCrCr...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
for (i=0; i < (image->alpha_trait != UndefinedPixelTrait ? 4 : 3); i++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
quantum_type=quantum_types[i];
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,
0,canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (quantum_type)
{
case RedQuantum:
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
break;
}
case GreenQuantum:
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
break;
}
case BlueQuantum:
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
break;
}
default:
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case PlaneInterlace:
{
/*
Plane interlacing: YYYYYY...CbCbCbCbCbCb...CrCrCrCrCrCr...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,RedQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,5);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,GreenQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,5);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,5);
if (status == MagickFalse)
break;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,AlphaQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,
canvas_image->extract_info.x,0,canvas_image->columns,1,
exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,5);
if (status == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,5);
if (status == MagickFalse)
break;
}
break;
}
case PartitionInterlace:
{
/*
Partition interlacing: YYYYYY..., CbCbCbCbCbCb..., CrCrCrCrCrCr...
*/
AppendImageFormat("Y",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,RedQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("Cb",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,GreenQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,GreenQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("Cr",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,BlueQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,5);
if (status == MagickFalse)
break;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) CloseBlob(image);
AppendImageFormat("A",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,AlphaQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,
canvas_image->extract_info.x,0,canvas_image->columns,1,
exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,5);
if (status == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,5);
if (status == MagickFalse)
break;
}
break;
}
}
SetQuantumImageType(image,quantum_type);
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (count == (ssize_t) length)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
scene++;
} while (count == (ssize_t) length);
quantum_info=DestroyQuantumInfo(quantum_info);
canvas_image=DestroyImage(canvas_image);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
|
CWE-772
| 180,911 | 2,453 |
326645646455820724744297621007624025134
| null | null | null |
linux
|
3e0097499839e0fe3af380410eababe5a47c4cf9
| 1 |
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
void __user *p = (void __user *)arg;
int __user *ip = p;
int result, val, read_only;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
unsigned long iflags;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_ioctl: cmd=0x%x\n", (int) cmd_in));
read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
switch (cmd_in) {
case SG_IO:
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
return -EFAULT;
result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
1, read_only, 1, &srp);
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
(srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
if (atomic_read(&sdp->detaching))
return -ENODEV;
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
write_unlock_irq(&sfp->rq_list_lock);
result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
return (result < 0) ? result : 0;
}
srp->orphan = 1;
write_unlock_irq(&sfp->rq_list_lock);
return result; /* -ERESTARTSYS because signal hit process */
case SG_SET_TIMEOUT:
result = get_user(val, ip);
if (result)
return result;
if (val < 0)
return -EIO;
if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ))
val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ),
INT_MAX);
sfp->timeout_user = val;
sfp->timeout = mult_frac(val, HZ, USER_HZ);
return 0;
case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
/* strange ..., for backward compatibility */
return sfp->timeout_user;
case SG_SET_FORCE_LOW_DMA:
/*
* N.B. This ioctl never worked properly, but failed to
* return an error value. So returning '0' to keep compability
* with legacy applications.
*/
return 0;
case SG_GET_LOW_DMA:
return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
case SG_GET_SCSI_ID:
if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
return -EFAULT;
else {
sg_scsi_id_t __user *sg_idp = p;
if (atomic_read(&sdp->detaching))
return -ENODEV;
__put_user((int) sdp->device->host->host_no,
&sg_idp->host_no);
__put_user((int) sdp->device->channel,
&sg_idp->channel);
__put_user((int) sdp->device->id, &sg_idp->scsi_id);
__put_user((int) sdp->device->lun, &sg_idp->lun);
__put_user((int) sdp->device->type, &sg_idp->scsi_type);
__put_user((short) sdp->device->host->cmd_per_lun,
&sg_idp->h_cmd_per_lun);
__put_user((short) sdp->device->queue_depth,
&sg_idp->d_queue_depth);
__put_user(0, &sg_idp->unused[0]);
__put_user(0, &sg_idp->unused[1]);
return 0;
}
case SG_SET_FORCE_PACK_ID:
result = get_user(val, ip);
if (result)
return result;
sfp->force_packid = val ? 1 : 0;
return 0;
case SG_GET_PACK_ID:
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
__put_user(srp->header.pack_id, ip);
return 0;
}
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
__put_user(-1, ip);
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return put_user(val, ip);
case SG_GET_SG_TABLESIZE:
return put_user(sdp->sg_tablesize, ip);
case SG_SET_RESERVED_SIZE:
result = get_user(val, ip);
if (result)
return result;
if (val < 0)
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
mutex_lock(&sfp->f_mutex);
if (val != sfp->reserve.bufflen) {
if (sfp->mmap_called ||
sfp->res_in_use) {
mutex_unlock(&sfp->f_mutex);
return -EBUSY;
}
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
mutex_unlock(&sfp->f_mutex);
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
max_sectors_bytes(sdp->device->request_queue));
return put_user(val, ip);
case SG_SET_COMMAND_Q:
result = get_user(val, ip);
if (result)
return result;
sfp->cmd_q = val ? 1 : 0;
return 0;
case SG_GET_COMMAND_Q:
return put_user((int) sfp->cmd_q, ip);
case SG_SET_KEEP_ORPHAN:
result = get_user(val, ip);
if (result)
return result;
sfp->keep_orphan = val;
return 0;
case SG_GET_KEEP_ORPHAN:
return put_user((int) sfp->keep_orphan, ip);
case SG_NEXT_CMD_LEN:
result = get_user(val, ip);
if (result)
return result;
if (val > SG_MAX_CDB_SIZE)
return -ENOMEM;
sfp->next_cmd_len = (val > 0) ? val : 0;
return 0;
case SG_GET_VERSION_NUM:
return put_user(sg_version_num, ip);
case SG_GET_ACCESS_COUNT:
/* faked - we don't have a real access count anymore */
val = (sdp->device ? 1 : 0);
return put_user(val, ip);
case SG_GET_REQUEST_TABLE:
if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
return -EFAULT;
else {
sg_req_info_t *rinfo;
rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
return result;
}
case SG_EMULATED_HOST:
if (atomic_read(&sdp->detaching))
return -ENODEV;
return put_user(sdp->device->host->hostt->emulated, ip);
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (read_only) {
unsigned char opcode = WRITE_6;
Scsi_Ioctl_Command __user *siocp = p;
if (copy_from_user(&opcode, siocp->data, 1))
return -EFAULT;
if (sg_allow_access(filp, &opcode))
return -EPERM;
}
return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
return result;
sdp->sgdebug = (char) val;
return 0;
case BLKSECTGET:
return put_user(max_sectors_bytes(sdp->device->request_queue),
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
sdp->disk->disk_name,
MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
NULL, p);
case BLKTRACESTART:
return blk_trace_startstop(sdp->device->request_queue, 1);
case BLKTRACESTOP:
return blk_trace_startstop(sdp->device->request_queue, 0);
case BLKTRACETEARDOWN:
return blk_trace_remove(sdp->device->request_queue);
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
case SCSI_IOCTL_PROBE_HOST:
case SG_GET_TRANSFORM:
case SG_SCSI_RESET:
if (atomic_read(&sdp->detaching))
return -ENODEV;
break;
default:
if (read_only)
return -EPERM; /* don't know so take safe approach */
break;
}
result = scsi_ioctl_block_when_processing_errors(sdp->device,
cmd_in, filp->f_flags & O_NDELAY);
if (result)
return result;
return scsi_ioctl(sdp->device, cmd_in, p);
}
|
CWE-200
| 180,913 | 2,455 |
130633992931209771359171915915220469283
| null | null | null |
weechat
|
f105c6f0b56fb5687b2d2aedf37cb1d1b434d556
| 1 |
logger_get_mask_expanded (struct t_gui_buffer *buffer, const char *mask)
{
char *mask2, *mask_decoded, *mask_decoded2, *mask_decoded3, *mask_decoded4;
char *mask_decoded5;
const char *dir_separator;
int length;
time_t seconds;
struct tm *date_tmp;
mask2 = NULL;
mask_decoded = NULL;
mask_decoded2 = NULL;
mask_decoded3 = NULL;
mask_decoded4 = NULL;
mask_decoded5 = NULL;
dir_separator = weechat_info_get ("dir_separator", "");
if (!dir_separator)
return NULL;
/*
* we first replace directory separator (commonly '/') by \01 because
* buffer mask can contain this char, and will be replaced by replacement
* char ('_' by default)
*/
mask2 = weechat_string_replace (mask, dir_separator, "\01");
if (!mask2)
goto end;
mask_decoded = weechat_buffer_string_replace_local_var (buffer, mask2);
if (!mask_decoded)
goto end;
mask_decoded2 = weechat_string_replace (mask_decoded,
dir_separator,
weechat_config_string (logger_config_file_replacement_char));
if (!mask_decoded2)
goto end;
#ifdef __CYGWIN__
mask_decoded3 = weechat_string_replace (mask_decoded2, "\\",
weechat_config_string (logger_config_file_replacement_char));
#else
mask_decoded3 = strdup (mask_decoded2);
#endif /* __CYGWIN__ */
if (!mask_decoded3)
goto end;
/* restore directory separator */
mask_decoded4 = weechat_string_replace (mask_decoded3,
"\01", dir_separator);
if (!mask_decoded4)
goto end;
/* replace date/time specifiers in mask */
length = strlen (mask_decoded4) + 256 + 1;
mask_decoded5 = malloc (length);
if (!mask_decoded5)
goto end;
seconds = time (NULL);
date_tmp = localtime (&seconds);
mask_decoded5[0] = '\0';
strftime (mask_decoded5, length - 1, mask_decoded4, date_tmp);
/* convert to lower case? */
if (weechat_config_boolean (logger_config_file_name_lower_case))
weechat_string_tolower (mask_decoded5);
if (weechat_logger_plugin->debug)
{
weechat_printf_date_tags (NULL, 0, "no_log",
"%s: buffer = \"%s\", mask = \"%s\", "
"decoded mask = \"%s\"",
LOGGER_PLUGIN_NAME,
weechat_buffer_get_string (buffer, "name"),
mask, mask_decoded5);
}
end:
if (mask2)
free (mask2);
if (mask_decoded)
free (mask_decoded);
if (mask_decoded2)
free (mask_decoded2);
if (mask_decoded3)
free (mask_decoded3);
if (mask_decoded4)
free (mask_decoded4);
return mask_decoded5;
}
|
CWE-119
| 180,917 | 2,458 |
35719065491166298989169814145776903181
| null | null | null |
libarchive
|
5562545b5562f6d12a4ef991fae158bf4ccf92b6
| 1 |
read_header(struct archive_read *a, struct archive_entry *entry,
char head_type)
{
const void *h;
const char *p, *endp;
struct rar *rar;
struct rar_header rar_header;
struct rar_file_header file_header;
int64_t header_size;
unsigned filename_size, end;
char *filename;
char *strp;
char packed_size[8];
char unp_size[8];
int ttime;
struct archive_string_conv *sconv, *fn_sconv;
unsigned long crc32_val;
int ret = (ARCHIVE_OK), ret2;
rar = (struct rar *)(a->format->data);
/* Setup a string conversion object for non-rar-unicode filenames. */
sconv = rar->opt_sconv;
if (sconv == NULL) {
if (!rar->init_default_conversion) {
rar->sconv_default =
archive_string_default_conversion_for_read(
&(a->archive));
rar->init_default_conversion = 1;
}
sconv = rar->sconv_default;
}
if ((h = __archive_read_ahead(a, 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
memcpy(&rar_header, p, sizeof(rar_header));
rar->file_flags = archive_le16dec(rar_header.flags);
header_size = archive_le16dec(rar_header.size);
if (header_size < (int64_t)sizeof(file_header) + 7) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
crc32_val = crc32(0, (const unsigned char *)p + 2, 7 - 2);
__archive_read_consume(a, 7);
if (!(rar->file_flags & FHD_SOLID))
{
rar->compression_method = 0;
rar->packed_size = 0;
rar->unp_size = 0;
rar->mtime = 0;
rar->ctime = 0;
rar->atime = 0;
rar->arctime = 0;
rar->mode = 0;
memset(&rar->salt, 0, sizeof(rar->salt));
rar->atime = 0;
rar->ansec = 0;
rar->ctime = 0;
rar->cnsec = 0;
rar->mtime = 0;
rar->mnsec = 0;
rar->arctime = 0;
rar->arcnsec = 0;
}
else
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"RAR solid archive support unavailable.");
return (ARCHIVE_FATAL);
}
if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
/* File Header CRC check. */
crc32_val = crc32(crc32_val, h, (unsigned)(header_size - 7));
if ((crc32_val & 0xffff) != archive_le16dec(rar_header.crc)) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Header CRC error");
return (ARCHIVE_FATAL);
}
/* If no CRC error, Go on parsing File Header. */
p = h;
endp = p + header_size - 7;
memcpy(&file_header, p, sizeof(file_header));
p += sizeof(file_header);
rar->compression_method = file_header.method;
ttime = archive_le32dec(file_header.file_time);
rar->mtime = get_time(ttime);
rar->file_crc = archive_le32dec(file_header.file_crc);
if (rar->file_flags & FHD_PASSWORD)
{
archive_entry_set_is_data_encrypted(entry, 1);
rar->has_encrypted_entries = 1;
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"RAR encryption support unavailable.");
/* Since it is only the data part itself that is encrypted we can at least
extract information about the currently processed entry and don't need
to return ARCHIVE_FATAL here. */
/*return (ARCHIVE_FATAL);*/
}
if (rar->file_flags & FHD_LARGE)
{
memcpy(packed_size, file_header.pack_size, 4);
memcpy(packed_size + 4, p, 4); /* High pack size */
p += 4;
memcpy(unp_size, file_header.unp_size, 4);
memcpy(unp_size + 4, p, 4); /* High unpack size */
p += 4;
rar->packed_size = archive_le64dec(&packed_size);
rar->unp_size = archive_le64dec(&unp_size);
}
else
{
rar->packed_size = archive_le32dec(file_header.pack_size);
rar->unp_size = archive_le32dec(file_header.unp_size);
}
if (rar->packed_size < 0 || rar->unp_size < 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid sizes specified.");
return (ARCHIVE_FATAL);
}
rar->bytes_remaining = rar->packed_size;
/* TODO: RARv3 subblocks contain comments. For now the complete block is
* consumed at the end.
*/
if (head_type == NEWSUB_HEAD) {
size_t distance = p - (const char *)h;
header_size += rar->packed_size;
/* Make sure we have the extended data. */
if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
endp = p + header_size - 7;
p += distance;
}
filename_size = archive_le16dec(file_header.name_size);
if (p + filename_size > endp) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid filename size");
return (ARCHIVE_FATAL);
}
if (rar->filename_allocated < filename_size * 2 + 2) {
char *newptr;
size_t newsize = filename_size * 2 + 2;
newptr = realloc(rar->filename, newsize);
if (newptr == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->filename = newptr;
rar->filename_allocated = newsize;
}
filename = rar->filename;
memcpy(filename, p, filename_size);
filename[filename_size] = '\0';
if (rar->file_flags & FHD_UNICODE)
{
if (filename_size != strlen(filename))
{
unsigned char highbyte, flagbits, flagbyte;
unsigned fn_end, offset;
end = filename_size;
fn_end = filename_size * 2;
filename_size = 0;
offset = (unsigned)strlen(filename) + 1;
highbyte = *(p + offset++);
flagbits = 0;
flagbyte = 0;
while (offset < end && filename_size < fn_end)
{
if (!flagbits)
{
flagbyte = *(p + offset++);
flagbits = 8;
}
flagbits -= 2;
switch((flagbyte >> flagbits) & 3)
{
case 0:
filename[filename_size++] = '\0';
filename[filename_size++] = *(p + offset++);
break;
case 1:
filename[filename_size++] = highbyte;
filename[filename_size++] = *(p + offset++);
break;
case 2:
filename[filename_size++] = *(p + offset + 1);
filename[filename_size++] = *(p + offset);
offset += 2;
break;
case 3:
{
char extra, high;
uint8_t length = *(p + offset++);
if (length & 0x80) {
extra = *(p + offset++);
high = (char)highbyte;
} else
extra = high = 0;
length = (length & 0x7f) + 2;
while (length && filename_size < fn_end) {
unsigned cp = filename_size >> 1;
filename[filename_size++] = high;
filename[filename_size++] = p[cp] + extra;
length--;
}
}
break;
}
}
if (filename_size > fn_end) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid filename");
return (ARCHIVE_FATAL);
}
filename[filename_size++] = '\0';
filename[filename_size++] = '\0';
/* Decoded unicode form is UTF-16BE, so we have to update a string
* conversion object for it. */
if (rar->sconv_utf16be == NULL) {
rar->sconv_utf16be = archive_string_conversion_from_charset(
&a->archive, "UTF-16BE", 1);
if (rar->sconv_utf16be == NULL)
return (ARCHIVE_FATAL);
}
fn_sconv = rar->sconv_utf16be;
strp = filename;
while (memcmp(strp, "\x00\x00", 2))
{
if (!memcmp(strp, "\x00\\", 2))
*(strp + 1) = '/';
strp += 2;
}
p += offset;
} else {
/*
* If FHD_UNICODE is set but no unicode data, this file name form
* is UTF-8, so we have to update a string conversion object for
* it accordingly.
*/
if (rar->sconv_utf8 == NULL) {
rar->sconv_utf8 = archive_string_conversion_from_charset(
&a->archive, "UTF-8", 1);
if (rar->sconv_utf8 == NULL)
return (ARCHIVE_FATAL);
}
fn_sconv = rar->sconv_utf8;
while ((strp = strchr(filename, '\\')) != NULL)
*strp = '/';
p += filename_size;
}
}
else
{
fn_sconv = sconv;
while ((strp = strchr(filename, '\\')) != NULL)
*strp = '/';
p += filename_size;
}
/* Split file in multivolume RAR. No more need to process header. */
if (rar->filename_save &&
filename_size == rar->filename_save_size &&
!memcmp(rar->filename, rar->filename_save, filename_size + 1))
{
__archive_read_consume(a, header_size - 7);
rar->cursor++;
if (rar->cursor >= rar->nodes)
{
rar->nodes++;
if ((rar->dbo =
realloc(rar->dbo, sizeof(*rar->dbo) * rar->nodes)) == NULL)
{
archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->dbo[rar->cursor].header_size = header_size;
rar->dbo[rar->cursor].start_offset = -1;
rar->dbo[rar->cursor].end_offset = -1;
}
if (rar->dbo[rar->cursor].start_offset < 0)
{
rar->dbo[rar->cursor].start_offset = a->filter->position;
rar->dbo[rar->cursor].end_offset = rar->dbo[rar->cursor].start_offset +
rar->packed_size;
}
return ret;
}
rar->filename_save = (char*)realloc(rar->filename_save,
filename_size + 1);
memcpy(rar->filename_save, rar->filename, filename_size + 1);
rar->filename_save_size = filename_size;
/* Set info for seeking */
free(rar->dbo);
if ((rar->dbo = calloc(1, sizeof(*rar->dbo))) == NULL)
{
archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->dbo[0].header_size = header_size;
rar->dbo[0].start_offset = -1;
rar->dbo[0].end_offset = -1;
rar->cursor = 0;
rar->nodes = 1;
if (rar->file_flags & FHD_SALT)
{
if (p + 8 > endp) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
memcpy(rar->salt, p, 8);
p += 8;
}
if (rar->file_flags & FHD_EXTTIME) {
if (read_exttime(p, rar, endp) < 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
}
__archive_read_consume(a, header_size - 7);
rar->dbo[0].start_offset = a->filter->position;
rar->dbo[0].end_offset = rar->dbo[0].start_offset + rar->packed_size;
switch(file_header.host_os)
{
case OS_MSDOS:
case OS_OS2:
case OS_WIN32:
rar->mode = archive_le32dec(file_header.file_attr);
if (rar->mode & FILE_ATTRIBUTE_DIRECTORY)
rar->mode = AE_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH;
else
rar->mode = AE_IFREG;
rar->mode |= S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
break;
case OS_UNIX:
case OS_MAC_OS:
case OS_BEOS:
rar->mode = archive_le32dec(file_header.file_attr);
break;
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unknown file attributes from RAR file's host OS");
return (ARCHIVE_FATAL);
}
rar->bytes_uncopied = rar->bytes_unconsumed = 0;
rar->lzss.position = rar->offset = 0;
rar->offset_seek = 0;
rar->dictionary_size = 0;
rar->offset_outgoing = 0;
rar->br.cache_avail = 0;
rar->br.avail_in = 0;
rar->crc_calculated = 0;
rar->entry_eof = 0;
rar->valid = 1;
rar->is_ppmd_block = 0;
rar->start_new_table = 1;
free(rar->unp_buffer);
rar->unp_buffer = NULL;
rar->unp_offset = 0;
rar->unp_buffer_size = UNP_BUFFER_SIZE;
memset(rar->lengthtable, 0, sizeof(rar->lengthtable));
__archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context, &g_szalloc);
rar->ppmd_valid = rar->ppmd_eod = 0;
/* Don't set any archive entries for non-file header types */
if (head_type == NEWSUB_HEAD)
return ret;
archive_entry_set_mtime(entry, rar->mtime, rar->mnsec);
archive_entry_set_ctime(entry, rar->ctime, rar->cnsec);
archive_entry_set_atime(entry, rar->atime, rar->ansec);
archive_entry_set_size(entry, rar->unp_size);
archive_entry_set_mode(entry, rar->mode);
if (archive_entry_copy_pathname_l(entry, filename, filename_size, fn_sconv))
{
if (errno == ENOMEM)
{
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Pathname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Pathname cannot be converted from %s to current locale.",
archive_string_conversion_charset_name(fn_sconv));
ret = (ARCHIVE_WARN);
}
if (((rar->mode) & AE_IFMT) == AE_IFLNK)
{
/* Make sure a symbolic-link file does not have its body. */
rar->bytes_remaining = 0;
archive_entry_set_size(entry, 0);
/* Read a symbolic-link name. */
if ((ret2 = read_symlink_stored(a, entry, sconv)) < (ARCHIVE_WARN))
return ret2;
if (ret > ret2)
ret = ret2;
}
if (rar->bytes_remaining == 0)
rar->entry_eof = 1;
return ret;
}
|
CWE-125
| 180,926 | 2,466 |
235065171922180775195963116640254758392
| null | null | null |
linux
|
edbd58be15a957f6a760c4a514cd475217eb97fd
| 1 |
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct packet_sock *po;
struct sockaddr_ll *sll;
union tpacket_uhdr h;
u8 *skb_head = skb->data;
int skb_len = skb->len;
unsigned int snaplen, res;
unsigned long status = TP_STATUS_USER;
unsigned short macoff, netoff, hdrlen;
struct sk_buff *copy_skb = NULL;
struct timespec ts;
__u32 ts_status;
bool is_drop_n_account = false;
/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
* We may add members to them until current aligned size without forcing
* userspace to call getsockopt(..., PACKET_HDRLEN, ...).
*/
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
if (skb->pkt_type == PACKET_LOOPBACK)
goto drop;
sk = pt->af_packet_priv;
po = pkt_sk(sk);
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
if (dev->header_ops) {
if (sk->sk_type != SOCK_DGRAM)
skb_push(skb, skb->data - skb_mac_header(skb));
else if (skb->pkt_type == PACKET_OUTGOING) {
/* Special case: outgoing packets have ll header at head */
skb_pull(skb, skb_network_offset(skb));
}
}
snaplen = skb->len;
res = run_filter(skb, sk, snaplen);
if (!res)
goto drop_n_restore;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= TP_STATUS_CSUMNOTREADY;
else if (skb->pkt_type != PACKET_OUTGOING &&
(skb->ip_summed == CHECKSUM_COMPLETE ||
skb_csum_unnecessary(skb)))
status |= TP_STATUS_CSUM_VALID;
if (snaplen > res)
snaplen = res;
if (sk->sk_type == SOCK_DGRAM) {
macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
po->tp_reserve;
} else {
unsigned int maclen = skb_network_offset(skb);
netoff = TPACKET_ALIGN(po->tp_hdrlen +
(maclen < 16 ? 16 : maclen)) +
po->tp_reserve;
if (po->has_vnet_hdr)
netoff += sizeof(struct virtio_net_hdr);
macoff = netoff - maclen;
}
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
} else {
copy_skb = skb_get(skb);
skb_head = skb->data;
}
if (copy_skb)
skb_set_owner_r(copy_skb, sk);
}
snaplen = po->rx_ring.frame_size - macoff;
if ((int)snaplen < 0)
snaplen = 0;
}
} else if (unlikely(macoff + snaplen >
GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
u32 nval;
nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
snaplen, nval, macoff);
snaplen = nval;
if (unlikely((int)snaplen < 0)) {
snaplen = 0;
macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
}
}
spin_lock(&sk->sk_receive_queue.lock);
h.raw = packet_current_rx_frame(po, skb,
TP_STATUS_KERNEL, (macoff+snaplen));
if (!h.raw)
goto drop_n_account;
if (po->tp_version <= TPACKET_V2) {
packet_increment_rx_head(po, &po->rx_ring);
/*
* LOSING will be reported till you read the stats,
* because it's COR - Clear On Read.
* Anyways, moving it for V1/V2 only as V3 doesn't need this
* at packet level.
*/
if (po->stats.stats1.tp_drops)
status |= TP_STATUS_LOSING;
}
po->stats.stats1.tp_packets++;
if (copy_skb) {
status |= TP_STATUS_COPY;
__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
}
spin_unlock(&sk->sk_receive_queue.lock);
if (po->has_vnet_hdr) {
if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
sizeof(struct virtio_net_hdr),
vio_le(), true)) {
spin_lock(&sk->sk_receive_queue.lock);
goto drop_n_account;
}
}
skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
getnstimeofday(&ts);
status |= ts_status;
switch (po->tp_version) {
case TPACKET_V1:
h.h1->tp_len = skb->len;
h.h1->tp_snaplen = snaplen;
h.h1->tp_mac = macoff;
h.h1->tp_net = netoff;
h.h1->tp_sec = ts.tv_sec;
h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
hdrlen = sizeof(*h.h1);
break;
case TPACKET_V2:
h.h2->tp_len = skb->len;
h.h2->tp_snaplen = snaplen;
h.h2->tp_mac = macoff;
h.h2->tp_net = netoff;
h.h2->tp_sec = ts.tv_sec;
h.h2->tp_nsec = ts.tv_nsec;
if (skb_vlan_tag_present(skb)) {
h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
h.h2->tp_vlan_tci = 0;
h.h2->tp_vlan_tpid = 0;
}
memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
hdrlen = sizeof(*h.h2);
break;
case TPACKET_V3:
/* tp_nxt_offset,vlan are already populated above.
* So DONT clear those fields here
*/
h.h3->tp_status |= status;
h.h3->tp_len = skb->len;
h.h3->tp_snaplen = snaplen;
h.h3->tp_mac = macoff;
h.h3->tp_net = netoff;
h.h3->tp_sec = ts.tv_sec;
h.h3->tp_nsec = ts.tv_nsec;
memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
hdrlen = sizeof(*h.h3);
break;
default:
BUG();
}
sll = h.raw + TPACKET_ALIGN(hdrlen);
sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
sll->sll_family = AF_PACKET;
sll->sll_hatype = dev->type;
sll->sll_protocol = skb->protocol;
sll->sll_pkttype = skb->pkt_type;
if (unlikely(po->origdev))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
smp_mb();
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
if (po->tp_version <= TPACKET_V2) {
u8 *start, *end;
end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
macoff + snaplen);
for (start = h.raw; start < end; start += PAGE_SIZE)
flush_dcache_page(pgv_to_page(start));
}
smp_wmb();
#endif
if (po->tp_version <= TPACKET_V2) {
__packet_set_status(po, h.raw, status);
sk->sk_data_ready(sk);
} else {
prb_clear_blk_fill_status(&po->rx_ring);
}
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
skb->data = skb_head;
skb->len = skb_len;
}
drop:
if (!is_drop_n_account)
consume_skb(skb);
else
kfree_skb(skb);
return 0;
drop_n_account:
is_drop_n_account = true;
po->stats.stats1.tp_drops++;
spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk);
kfree_skb(copy_skb);
goto drop_n_restore;
}
|
CWE-119
| 180,927 | 2,467 |
149788728253827179499794841470743763164
| null | null | null |
cyrus-imapd
|
6bd33275368edfa71ae117de895488584678ac79
| 1 |
static int mboxlist_do_find(struct find_rock *rock, const strarray_t *patterns)
{
const char *userid = rock->userid;
int isadmin = rock->isadmin;
int crossdomains = config_getswitch(IMAPOPT_CROSSDOMAINS);
char inbox[MAX_MAILBOX_BUFFER];
size_t inboxlen = 0;
size_t prefixlen, len;
size_t domainlen = 0;
size_t userlen = userid ? strlen(userid) : 0;
char domainpat[MAX_MAILBOX_BUFFER]; /* do intra-domain fetches only */
char commonpat[MAX_MAILBOX_BUFFER];
int r = 0;
int i;
const char *p;
if (patterns->count < 1) return 0; /* nothing to do */
for (i = 0; i < patterns->count; i++) {
glob *g = glob_init(strarray_nth(patterns, i), rock->namespace->hier_sep);
ptrarray_append(&rock->globs, g);
}
if (config_virtdomains && userid && (p = strchr(userid, '@'))) {
userlen = p - userid;
domainlen = strlen(p); /* includes separator */
snprintf(domainpat, sizeof(domainpat), "%s!", p+1);
}
else
domainpat[0] = '\0';
/* calculate the inbox (with trailing .INBOX. for later use) */
if (userid && (!(p = strchr(userid, rock->namespace->hier_sep)) ||
((p - userid) > (int)userlen)) &&
strlen(userid)+7 < MAX_MAILBOX_BUFFER) {
char *t, *tmpuser = NULL;
const char *inboxuser;
if (domainlen)
snprintf(inbox, sizeof(inbox), "%s!", userid+userlen+1);
if (rock->namespace->hier_sep == '/' && (p = strchr(userid, '.'))) {
tmpuser = xmalloc(userlen);
memcpy(tmpuser, userid, userlen);
t = tmpuser + (p - userid);
while(t < (tmpuser + userlen)) {
if (*t == '.')
*t = '^';
t++;
}
inboxuser = tmpuser;
} else
inboxuser = userid;
snprintf(inbox+domainlen, sizeof(inbox)-domainlen,
"user.%.*s.INBOX.", (int)userlen, inboxuser);
free(tmpuser);
inboxlen = strlen(inbox) - 7;
}
else {
userid = 0;
}
/* Find the common search prefix of all patterns */
const char *firstpat = strarray_nth(patterns, 0);
for (prefixlen = 0; firstpat[prefixlen]; prefixlen++) {
if (prefixlen >= MAX_MAILBOX_NAME) {
r = IMAP_MAILBOX_BADNAME;
goto done;
}
char c = firstpat[prefixlen];
for (i = 1; i < patterns->count; i++) {
const char *pat = strarray_nth(patterns, i);
if (pat[prefixlen] != c) break;
}
if (i < patterns->count) break;
if (c == '*' || c == '%' || c == '?') break;
commonpat[prefixlen] = c;
}
commonpat[prefixlen] = '\0';
if (patterns->count == 1) {
/* Skip pattern which matches shared namespace prefix */
if (!strcmp(firstpat+prefixlen, "%"))
rock->singlepercent = 2;
/* output prefix regardless */
if (!strcmp(firstpat+prefixlen, "*%"))
rock->singlepercent = 1;
}
/*
* Personal (INBOX) namespace (only if not admin)
*/
if (userid && !isadmin) {
/* first the INBOX */
rock->mb_category = MBNAME_INBOX;
r = cyrusdb_forone(rock->db, inbox, inboxlen, &find_p, &find_cb, rock, NULL);
if (r == CYRUSDB_DONE) r = 0;
if (r) goto done;
if (rock->namespace->isalt) {
/* do exact INBOX subs before resetting the namebuffer */
rock->mb_category = MBNAME_INBOXSUB;
r = cyrusdb_foreach(rock->db, inbox, inboxlen+7, &find_p, &find_cb, rock, NULL);
if (r == CYRUSDB_DONE) r = 0;
if (r) goto done;
/* reset the the namebuffer */
r = (*rock->proc)(NULL, rock->procrock);
if (r) goto done;
}
/* iterate through all the mailboxes under the user's inbox */
rock->mb_category = MBNAME_OWNER;
r = cyrusdb_foreach(rock->db, inbox, inboxlen+1, &find_p, &find_cb, rock, NULL);
if (r == CYRUSDB_DONE) r = 0;
if (r) goto done;
/* "Alt Prefix" folders */
if (rock->namespace->isalt) {
/* reset the the namebuffer */
r = (*rock->proc)(NULL, rock->procrock);
if (r) goto done;
rock->mb_category = MBNAME_ALTINBOX;
/* special case user.foo.INBOX. If we're singlepercent == 2, this could
return DONE, in which case we don't need to foreach the rest of the
altprefix space */
r = cyrusdb_forone(rock->db, inbox, inboxlen+6, &find_p, &find_cb, rock, NULL);
if (r == CYRUSDB_DONE) goto skipalt;
if (r) goto done;
/* special case any other altprefix stuff */
rock->mb_category = MBNAME_ALTPREFIX;
r = cyrusdb_foreach(rock->db, inbox, inboxlen+1, &find_p, &find_cb, rock, NULL);
skipalt: /* we got a done, so skip out of the foreach early */
if (r == CYRUSDB_DONE) r = 0;
if (r) goto done;
}
}
/*
* Other Users namespace
*
* If "Other Users*" can match pattern, search for those mailboxes next
*/
if (isadmin || rock->namespace->accessible[NAMESPACE_USER]) {
len = strlen(rock->namespace->prefix[NAMESPACE_USER]);
if (len) len--; // trailing separator
if (!strncmp(rock->namespace->prefix[NAMESPACE_USER], commonpat, MIN(len, prefixlen))) {
if (prefixlen < len) {
/* we match all users */
strlcpy(domainpat+domainlen, "user.", sizeof(domainpat)-domainlen);
}
else {
/* just those in this prefix */
strlcpy(domainpat+domainlen, "user.", sizeof(domainpat)-domainlen);
strlcpy(domainpat+domainlen+5, commonpat+len+1, sizeof(domainpat)-domainlen-5);
}
rock->mb_category = MBNAME_OTHERUSER;
/* because of how domains work, with crossdomains or admin you can't prefix at all :( */
size_t thislen = (isadmin || crossdomains) ? 0 : strlen(domainpat);
/* reset the the namebuffer */
r = (*rock->proc)(NULL, rock->procrock);
if (r) goto done;
r = mboxlist_find_category(rock, domainpat, thislen);
if (r) goto done;
}
}
/*
* Shared namespace
*
* search for all remaining mailboxes.
* just bother looking at the ones that have the same pattern prefix.
*/
if (isadmin || rock->namespace->accessible[NAMESPACE_SHARED]) {
len = strlen(rock->namespace->prefix[NAMESPACE_SHARED]);
if (len) len--; // trailing separator
if (!strncmp(rock->namespace->prefix[NAMESPACE_SHARED], commonpat, MIN(len, prefixlen))) {
rock->mb_category = MBNAME_SHARED;
/* reset the the namebuffer */
r = (*rock->proc)(NULL, rock->procrock);
if (r) goto done;
/* iterate through all the non-user folders on the server */
r = mboxlist_find_category(rock, domainpat, domainlen);
if (r) goto done;
}
}
/* finish with a reset call always */
r = (*rock->proc)(NULL, rock->procrock);
done:
for (i = 0; i < rock->globs.count; i++) {
glob *g = ptrarray_nth(&rock->globs, i);
glob_free(&g);
}
ptrarray_fini(&rock->globs);
return r;
}
|
CWE-20
| 180,929 | 2,468 |
31191443814716249258193312186143626475
| null | null | null |
FFmpeg
|
9cb4eb772839c5e1de2855d126bf74ff16d13382
| 1 |
static int read_tfra(MOVContext *mov, AVIOContext *f)
{
MOVFragmentIndex* index = NULL;
int version, fieldlength, i, j;
int64_t pos = avio_tell(f);
uint32_t size = avio_rb32(f);
void *tmp;
if (avio_rb32(f) != MKBETAG('t', 'f', 'r', 'a')) {
return 1;
}
av_log(mov->fc, AV_LOG_VERBOSE, "found tfra\n");
index = av_mallocz(sizeof(MOVFragmentIndex));
if (!index) {
return AVERROR(ENOMEM);
}
tmp = av_realloc_array(mov->fragment_index_data,
mov->fragment_index_count + 1,
sizeof(MOVFragmentIndex*));
if (!tmp) {
av_freep(&index);
return AVERROR(ENOMEM);
}
mov->fragment_index_data = tmp;
mov->fragment_index_data[mov->fragment_index_count++] = index;
version = avio_r8(f);
avio_rb24(f);
index->track_id = avio_rb32(f);
fieldlength = avio_rb32(f);
index->item_count = avio_rb32(f);
index->items = av_mallocz_array(
index->item_count, sizeof(MOVFragmentIndexItem));
if (!index->items) {
index->item_count = 0;
return AVERROR(ENOMEM);
}
for (i = 0; i < index->item_count; i++) {
int64_t time, offset;
if (version == 1) {
time = avio_rb64(f);
offset = avio_rb64(f);
} else {
time = avio_rb32(f);
offset = avio_rb32(f);
}
index->items[i].time = time;
index->items[i].moof_offset = offset;
for (j = 0; j < ((fieldlength >> 4) & 3) + 1; j++)
avio_r8(f);
for (j = 0; j < ((fieldlength >> 2) & 3) + 1; j++)
avio_r8(f);
for (j = 0; j < ((fieldlength >> 0) & 3) + 1; j++)
avio_r8(f);
}
avio_seek(f, pos + size, SEEK_SET);
return 0;
}
|
CWE-834
| 180,931 | 2,469 |
104151479220480277467624481026090813390
| null | null | null |
ImageMagick
|
f68a98a9d385838a1c73ec960a14102949940a64
| 1 |
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
status=MagickFalse;
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(short) ReadBlobShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->matte=MagickTrue;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(layer_info,0,(size_t) number_layers*
sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=ReadBlobSignedLong(image);
layer_info[i].page.x=ReadBlobSignedLong(image);
y=ReadBlobSignedLong(image);
x=ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
(void) ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=ReadBlobSignedLong(image);
layer_info[i].mask.page.x=ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
/*
We read it, but don't use it...
*/
for (j=0; j < (ssize_t) length; j+=8)
{
size_t blend_source=ReadBlobLong(image);
size_t blend_dest=ReadBlobLong(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" source(%x), dest(%x)",(unsigned int)
blend_source,(unsigned int) blend_dest);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType)
number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
|
CWE-834
| 180,932 | 2,470 |
271878235127622347145764433416134657647
| null | null | null |
ImageMagick
|
48bcf7c39302cdf9b0d9202ad03bf1b95152c44d
| 1 |
static Image *ReadTXTImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
colorspace[MaxTextExtent],
text[MaxTextExtent];
Image
*image;
IndexPacket
*indexes;
long
x_offset,
y_offset;
MagickBooleanType
status;
MagickPixelPacket
pixel;
QuantumAny
range;
register ssize_t
i,
x;
register PixelPacket
*q;
ssize_t
count,
type,
y;
unsigned long
depth,
height,
max_value,
width;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) ResetMagickMemory(text,0,sizeof(text));
(void) ReadBlobString(image,text);
if (LocaleNCompare((char *) text,MagickID,strlen(MagickID)) != 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
do
{
width=0;
height=0;
max_value=0;
*colorspace='\0';
count=(ssize_t) sscanf(text+32,"%lu,%lu,%lu,%s",&width,&height,&max_value,
colorspace);
if ((count != 4) || (width == 0) || (height == 0) || (max_value == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
image->columns=width;
image->rows=height;
for (depth=1; (GetQuantumRange(depth)+1) < max_value; depth++)
if (depth >= 64)
break;
image->depth=depth;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
LocaleLower(colorspace);
i=(ssize_t) strlen(colorspace)-1;
image->matte=MagickFalse;
if ((i > 0) && (colorspace[i] == 'a'))
{
colorspace[i]='\0';
image->matte=MagickTrue;
}
type=ParseCommandOption(MagickColorspaceOptions,MagickFalse,colorspace);
if (type < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
image->colorspace=(ColorspaceType) type;
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
(void) SetImageBackgroundColor(image);
range=GetQuantumRange(image->depth);
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
index,
opacity,
red;
red=0.0;
green=0.0;
blue=0.0;
index=0.0;
opacity=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (ReadBlobString(image,text) == (char *) NULL)
break;
switch (image->colorspace)
{
case GRAYColorspace:
{
if (image->matte != MagickFalse)
{
(void) sscanf(text,"%ld,%ld: (%lf%*[%,]%lf%*[%,]",&x_offset,
&y_offset,&red,&opacity);
green=red;
blue=red;
break;
}
(void) sscanf(text,"%ld,%ld: (%lf%*[%,]",&x_offset,&y_offset,&red);
green=red;
blue=red;
break;
}
case CMYKColorspace:
{
if (image->matte != MagickFalse)
{
(void) sscanf(text,
"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]",
&x_offset,&y_offset,&red,&green,&blue,&index,&opacity);
break;
}
(void) sscanf(text,
"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]",&x_offset,
&y_offset,&red,&green,&blue,&index);
break;
}
default:
{
if (image->matte != MagickFalse)
{
(void) sscanf(text,
"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]",
&x_offset,&y_offset,&red,&green,&blue,&opacity);
break;
}
(void) sscanf(text,"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]",
&x_offset,&y_offset,&red,&green,&blue);
break;
}
}
if (strchr(text,'%') != (char *) NULL)
{
red*=0.01*range;
green*=0.01*range;
blue*=0.01*range;
index*=0.01*range;
opacity*=0.01*range;
}
if (image->colorspace == LabColorspace)
{
green+=(range+1)/2.0;
blue+=(range+1)/2.0;
}
pixel.red=(MagickRealType) ScaleAnyToQuantum((QuantumAny) (red+0.5),
range);
pixel.green=(MagickRealType) ScaleAnyToQuantum((QuantumAny) (green+0.5),
range);
pixel.blue=(MagickRealType) ScaleAnyToQuantum((QuantumAny) (blue+0.5),
range);
pixel.index=(MagickRealType) ScaleAnyToQuantum((QuantumAny) (index+0.5),
range);
pixel.opacity=(MagickRealType) ScaleAnyToQuantum((QuantumAny) (opacity+
0.5),range);
q=GetAuthenticPixels(image,(ssize_t) x_offset,(ssize_t) y_offset,1,1,
exception);
if (q == (PixelPacket *) NULL)
continue;
SetPixelRed(q,pixel.red);
SetPixelGreen(q,pixel.green);
SetPixelBlue(q,pixel.blue);
if (image->colorspace == CMYKColorspace)
{
indexes=GetAuthenticIndexQueue(image);
SetPixelIndex(indexes,pixel.index);
}
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel.opacity);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
(void) ReadBlobString(image,text);
if (LocaleNCompare((char *) text,MagickID,strlen(MagickID)) == 0)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (LocaleNCompare((char *) text,MagickID,strlen(MagickID)) == 0);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
|
CWE-190
| 180,933 | 2,471 |
274340999401530161051584456391486580030
| null | null | null |
FFmpeg
|
900f39692ca0337a98a7cf047e4e2611071810c2
| 1 |
static int mxf_read_index_entry_array(AVIOContext *pb, MXFIndexTableSegment *segment)
{
int i, length;
segment->nb_index_entries = avio_rb32(pb);
length = avio_rb32(pb);
if (!(segment->temporal_offset_entries=av_calloc(segment->nb_index_entries, sizeof(*segment->temporal_offset_entries))) ||
!(segment->flag_entries = av_calloc(segment->nb_index_entries, sizeof(*segment->flag_entries))) ||
!(segment->stream_offset_entries = av_calloc(segment->nb_index_entries, sizeof(*segment->stream_offset_entries)))) {
av_freep(&segment->temporal_offset_entries);
av_freep(&segment->flag_entries);
return AVERROR(ENOMEM);
}
for (i = 0; i < segment->nb_index_entries; i++) {
segment->temporal_offset_entries[i] = avio_r8(pb);
avio_r8(pb); /* KeyFrameOffset */
segment->flag_entries[i] = avio_r8(pb);
segment->stream_offset_entries[i] = avio_rb64(pb);
avio_skip(pb, length - 11);
}
return 0;
}
|
CWE-834
| 180,937 | 2,473 |
286445280203623570598089851909985716539
| null | null | null |
FFmpeg
|
9d00fb9d70ee8c0cc7002b89318c5be00f1bbdad
| 1 |
static int mxf_read_primer_pack(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
int item_num = avio_rb32(pb);
int item_len = avio_rb32(pb);
if (item_len != 18) {
avpriv_request_sample(pb, "Primer pack item length %d", item_len);
return AVERROR_PATCHWELCOME;
}
if (item_num > 65536) {
av_log(mxf->fc, AV_LOG_ERROR, "item_num %d is too large\n", item_num);
return AVERROR_INVALIDDATA;
}
if (mxf->local_tags)
av_log(mxf->fc, AV_LOG_VERBOSE, "Multiple primer packs\n");
av_free(mxf->local_tags);
mxf->local_tags_count = 0;
mxf->local_tags = av_calloc(item_num, item_len);
if (!mxf->local_tags)
return AVERROR(ENOMEM);
mxf->local_tags_count = item_num;
avio_read(pb, mxf->local_tags, item_num*item_len);
return 0;
}
|
CWE-20
| 180,938 | 2,474 |
249405188241146026680734084412863536752
| null | null | null |
libarchive
|
fa7438a0ff4033e4741c807394a9af6207940d71
| 1 |
atol8(const char *p, size_t char_cnt)
{
int64_t l;
int digit;
l = 0;
while (char_cnt-- > 0) {
if (*p >= '0' && *p <= '7')
digit = *p - '0';
else
break;
p++;
l <<= 3;
l |= digit;
}
return (l);
}
|
CWE-125
| 180,940 | 2,476 |
223379998720734366501523041321530169470
| null | null | null |
openjpeg
|
afb308b9ccbe129608c9205cf3bb39bbefad90b9
| 1 |
static OPJ_BOOL opj_tcd_code_block_enc_allocate_data(opj_tcd_cblk_enc_t *
p_code_block)
{
OPJ_UINT32 l_data_size;
/* The +1 is needed for https://github.com/uclouvain/openjpeg/issues/835 */
l_data_size = 1 + (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) *
(p_code_block->y1 - p_code_block->y0) * (OPJ_INT32)sizeof(OPJ_UINT32));
if (l_data_size > p_code_block->data_size) {
if (p_code_block->data) {
/* We refer to data - 1 since below we incremented it */
opj_free(p_code_block->data - 1);
}
p_code_block->data = (OPJ_BYTE*) opj_malloc(l_data_size + 1);
if (! p_code_block->data) {
p_code_block->data_size = 0U;
return OPJ_FALSE;
}
p_code_block->data_size = l_data_size;
/* We reserve the initial byte as a fake byte to a non-FF value */
/* and increment the data pointer, so that opj_mqc_init_enc() */
/* can do bp = data - 1, and opj_mqc_byteout() can safely dereference */
/* it. */
p_code_block->data[0] = 0;
p_code_block->data += 1; /*why +1 ?*/
}
return OPJ_TRUE;
}
|
CWE-119
| 180,941 | 2,477 |
311894405046094402134295855217155957334
| null | null | null |
linux
|
197e7e521384a23b9e585178f3f11c9fa08274b9
| 1 |
SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
const void __user * __user *, pages,
const int __user *, nodes,
int __user *, status, int, flags)
{
const struct cred *cred = current_cred(), *tcred;
struct task_struct *task;
struct mm_struct *mm;
int err;
nodemask_t task_nodes;
/* Check flags */
if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
return -EINVAL;
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
return -EPERM;
/* Find the mm_struct */
rcu_read_lock();
task = pid ? find_task_by_vpid(pid) : current;
if (!task) {
rcu_read_unlock();
return -ESRCH;
}
get_task_struct(task);
/*
* Check if this process has the right to modify the specified
* process. The right exists if the process has administrative
* capabilities, superuser privileges or the same
* userid as the target process.
*/
tcred = __task_cred(task);
if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
!capable(CAP_SYS_NICE)) {
rcu_read_unlock();
err = -EPERM;
goto out;
}
rcu_read_unlock();
err = security_task_movememory(task);
if (err)
goto out;
task_nodes = cpuset_mems_allowed(task);
mm = get_task_mm(task);
put_task_struct(task);
if (!mm)
return -EINVAL;
if (nodes)
err = do_pages_move(mm, task_nodes, nr_pages, pages,
nodes, status, flags);
else
err = do_pages_stat(mm, nr_pages, pages, status);
mmput(mm);
return err;
out:
put_task_struct(task);
return err;
}
|
CWE-200
| 180,942 | 2,478 |
333020230899744479196439444734213940999
| null | null | null |
linux
|
499350a5a6e7512d9ed369ed63a4244b6536f4f8
| 1 |
int tcp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int err = 0;
int old_state = sk->sk_state;
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
/* ABORT function of RFC793 */
if (old_state == TCP_LISTEN) {
inet_csk_listen_stop(sk);
} else if (unlikely(tp->repair)) {
sk->sk_err = ECONNABORTED;
} else if (tcp_need_reset(old_state) ||
(tp->snd_nxt != tp->write_seq &&
(1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
/* The last check adjusts for discrepancy of Linux wrt. RFC
* states
*/
tcp_send_active_reset(sk, gfp_any());
sk->sk_err = ECONNRESET;
} else if (old_state == TCP_SYN_SENT)
sk->sk_err = ECONNRESET;
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
tcp_write_queue_purge(sk);
tcp_fastopen_active_disable_ofo_check(sk);
skb_rbtree_purge(&tp->out_of_order_queue);
inet->inet_dport = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->srtt_us = 0;
tp->write_seq += tp->max_window + 2;
if (tp->write_seq == 0)
tp->write_seq = 1;
icsk->icsk_backoff = 0;
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
tp->packets_out = 0;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_cnt = 0;
tp->window_clamp = 0;
tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
tcp_saved_syn_free(tp);
/* Clean up fastopen related fields */
tcp_free_fastopen_req(tp);
inet->defer_connect = 0;
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
sk->sk_error_report(sk);
return err;
}
|
CWE-369
| 180,944 | 2,479 |
261705518300475045878184848177140313645
| null | null | null |
FFmpeg
|
7e80b63ecd259d69d383623e75b318bf2bd491f6
| 1 |
static int cine_read_header(AVFormatContext *avctx)
{
AVIOContext *pb = avctx->pb;
AVStream *st;
unsigned int version, compression, offImageHeader, offSetup, offImageOffsets, biBitCount, length, CFA;
int vflip;
char *description;
uint64_t i;
st = avformat_new_stream(avctx, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codecpar->codec_tag = 0;
/* CINEFILEHEADER structure */
avio_skip(pb, 4); // Type, Headersize
compression = avio_rl16(pb);
version = avio_rl16(pb);
if (version != 1) {
avpriv_request_sample(avctx, "unknown version %i", version);
return AVERROR_INVALIDDATA;
}
avio_skip(pb, 12); // FirstMovieImage, TotalImageCount, FirstImageNumber
st->duration = avio_rl32(pb);
offImageHeader = avio_rl32(pb);
offSetup = avio_rl32(pb);
offImageOffsets = avio_rl32(pb);
avio_skip(pb, 8); // TriggerTime
/* BITMAPINFOHEADER structure */
avio_seek(pb, offImageHeader, SEEK_SET);
avio_skip(pb, 4); //biSize
st->codecpar->width = avio_rl32(pb);
st->codecpar->height = avio_rl32(pb);
if (avio_rl16(pb) != 1) // biPlanes
return AVERROR_INVALIDDATA;
biBitCount = avio_rl16(pb);
if (biBitCount != 8 && biBitCount != 16 && biBitCount != 24 && biBitCount != 48) {
avpriv_request_sample(avctx, "unsupported biBitCount %i", biBitCount);
return AVERROR_INVALIDDATA;
}
switch (avio_rl32(pb)) {
case BMP_RGB:
vflip = 0;
break;
case 0x100: /* BI_PACKED */
st->codecpar->codec_tag = MKTAG('B', 'I', 'T', 0);
vflip = 1;
break;
default:
avpriv_request_sample(avctx, "unknown bitmap compression");
return AVERROR_INVALIDDATA;
}
avio_skip(pb, 4); // biSizeImage
/* parse SETUP structure */
avio_seek(pb, offSetup, SEEK_SET);
avio_skip(pb, 140); // FrameRatae16 .. descriptionOld
if (avio_rl16(pb) != 0x5453)
return AVERROR_INVALIDDATA;
length = avio_rl16(pb);
if (length < 0x163C) {
avpriv_request_sample(avctx, "short SETUP header");
return AVERROR_INVALIDDATA;
}
avio_skip(pb, 616); // Binning .. bFlipH
if (!avio_rl32(pb) ^ vflip) {
st->codecpar->extradata = av_strdup("BottomUp");
st->codecpar->extradata_size = 9;
}
avio_skip(pb, 4); // Grid
avpriv_set_pts_info(st, 64, 1, avio_rl32(pb));
avio_skip(pb, 20); // Shutter .. bEnableColor
set_metadata_int(&st->metadata, "camera_version", avio_rl32(pb), 0);
set_metadata_int(&st->metadata, "firmware_version", avio_rl32(pb), 0);
set_metadata_int(&st->metadata, "software_version", avio_rl32(pb), 0);
set_metadata_int(&st->metadata, "recording_timezone", avio_rl32(pb), 0);
CFA = avio_rl32(pb);
set_metadata_int(&st->metadata, "brightness", avio_rl32(pb), 1);
set_metadata_int(&st->metadata, "contrast", avio_rl32(pb), 1);
set_metadata_int(&st->metadata, "gamma", avio_rl32(pb), 1);
avio_skip(pb, 12 + 16); // Reserved1 .. AutoExpRect
set_metadata_float(&st->metadata, "wbgain[0].r", av_int2float(avio_rl32(pb)), 1);
set_metadata_float(&st->metadata, "wbgain[0].b", av_int2float(avio_rl32(pb)), 1);
avio_skip(pb, 36); // WBGain[1].. WBView
st->codecpar->bits_per_coded_sample = avio_rl32(pb);
if (compression == CC_RGB) {
if (biBitCount == 8) {
st->codecpar->format = AV_PIX_FMT_GRAY8;
} else if (biBitCount == 16) {
st->codecpar->format = AV_PIX_FMT_GRAY16LE;
} else if (biBitCount == 24) {
st->codecpar->format = AV_PIX_FMT_BGR24;
} else if (biBitCount == 48) {
st->codecpar->format = AV_PIX_FMT_BGR48LE;
} else {
avpriv_request_sample(avctx, "unsupported biBitCount %i", biBitCount);
return AVERROR_INVALIDDATA;
}
} else if (compression == CC_UNINT) {
switch (CFA & 0xFFFFFF) {
case CFA_BAYER:
if (biBitCount == 8) {
st->codecpar->format = AV_PIX_FMT_BAYER_GBRG8;
} else if (biBitCount == 16) {
st->codecpar->format = AV_PIX_FMT_BAYER_GBRG16LE;
} else {
avpriv_request_sample(avctx, "unsupported biBitCount %i", biBitCount);
return AVERROR_INVALIDDATA;
}
break;
case CFA_BAYERFLIP:
if (biBitCount == 8) {
st->codecpar->format = AV_PIX_FMT_BAYER_RGGB8;
} else if (biBitCount == 16) {
st->codecpar->format = AV_PIX_FMT_BAYER_RGGB16LE;
} else {
avpriv_request_sample(avctx, "unsupported biBitCount %i", biBitCount);
return AVERROR_INVALIDDATA;
}
break;
default:
avpriv_request_sample(avctx, "unsupported Color Field Array (CFA) %i", CFA & 0xFFFFFF);
return AVERROR_INVALIDDATA;
}
} else { //CC_LEAD
avpriv_request_sample(avctx, "unsupported compression %i", compression);
return AVERROR_INVALIDDATA;
}
avio_skip(pb, 668); // Conv8Min ... Sensor
set_metadata_int(&st->metadata, "shutter_ns", avio_rl32(pb), 0);
avio_skip(pb, 24); // EDRShutterNs ... ImHeightAcq
#define DESCRIPTION_SIZE 4096
description = av_malloc(DESCRIPTION_SIZE + 1);
if (!description)
return AVERROR(ENOMEM);
i = avio_get_str(pb, DESCRIPTION_SIZE, description, DESCRIPTION_SIZE + 1);
if (i < DESCRIPTION_SIZE)
avio_skip(pb, DESCRIPTION_SIZE - i);
if (description[0])
av_dict_set(&st->metadata, "description", description, AV_DICT_DONT_STRDUP_VAL);
else
av_free(description);
avio_skip(pb, 1176); // RisingEdge ... cmUser
set_metadata_int(&st->metadata, "enable_crop", avio_rl32(pb), 1);
set_metadata_int(&st->metadata, "crop_left", avio_rl32(pb), 1);
set_metadata_int(&st->metadata, "crop_top", avio_rl32(pb), 1);
set_metadata_int(&st->metadata, "crop_right", avio_rl32(pb), 1);
set_metadata_int(&st->metadata, "crop_bottom", avio_rl32(pb), 1);
/* parse image offsets */
avio_seek(pb, offImageOffsets, SEEK_SET);
for (i = 0; i < st->duration; i++)
av_add_index_entry(st, avio_rl64(pb), i, 0, 0, AVINDEX_KEYFRAME);
return 0;
}
|
CWE-834
| 180,945 | 2,480 |
237724750006394289755951196084490226718
| null | null | null |
FFmpeg
|
7ec414892ddcad88313848494b6fc5f437c9ca4a
| 1 |
static int read_data(void *opaque, uint8_t *buf, int buf_size)
{
struct playlist *v = opaque;
HLSContext *c = v->parent->priv_data;
int ret, i;
int just_opened = 0;
restart:
if (!v->needed)
return AVERROR_EOF;
if (!v->input) {
int64_t reload_interval;
struct segment *seg;
/* Check that the playlist is still needed before opening a new
* segment. */
if (v->ctx && v->ctx->nb_streams) {
v->needed = 0;
for (i = 0; i < v->n_main_streams; i++) {
if (v->main_streams[i]->discard < AVDISCARD_ALL) {
v->needed = 1;
break;
}
}
}
if (!v->needed) {
av_log(v->parent, AV_LOG_INFO, "No longer receiving playlist %d\n",
v->index);
return AVERROR_EOF;
}
/* If this is a live stream and the reload interval has elapsed since
* the last playlist reload, reload the playlists now. */
reload_interval = default_reload_interval(v);
reload:
if (!v->finished &&
av_gettime_relative() - v->last_load_time >= reload_interval) {
if ((ret = parse_playlist(c, v->url, v, NULL)) < 0) {
av_log(v->parent, AV_LOG_WARNING, "Failed to reload playlist %d\n",
v->index);
return ret;
}
/* If we need to reload the playlist again below (if
* there's still no more segments), switch to a reload
* interval of half the target duration. */
reload_interval = v->target_duration / 2;
}
if (v->cur_seq_no < v->start_seq_no) {
av_log(NULL, AV_LOG_WARNING,
"skipping %d segments ahead, expired from playlists\n",
v->start_seq_no - v->cur_seq_no);
v->cur_seq_no = v->start_seq_no;
}
if (v->cur_seq_no >= v->start_seq_no + v->n_segments) {
if (v->finished)
return AVERROR_EOF;
while (av_gettime_relative() - v->last_load_time < reload_interval) {
if (ff_check_interrupt(c->interrupt_callback))
return AVERROR_EXIT;
av_usleep(100*1000);
}
/* Enough time has elapsed since the last reload */
goto reload;
}
seg = current_segment(v);
/* load/update Media Initialization Section, if any */
ret = update_init_section(v, seg);
if (ret)
return ret;
ret = open_input(c, v, seg);
if (ret < 0) {
if (ff_check_interrupt(c->interrupt_callback))
return AVERROR_EXIT;
av_log(v->parent, AV_LOG_WARNING, "Failed to open segment of playlist %d\n",
v->index);
v->cur_seq_no += 1;
goto reload;
}
just_opened = 1;
}
if (v->init_sec_buf_read_offset < v->init_sec_data_len) {
/* Push init section out first before first actual segment */
int copy_size = FFMIN(v->init_sec_data_len - v->init_sec_buf_read_offset, buf_size);
memcpy(buf, v->init_sec_buf, copy_size);
v->init_sec_buf_read_offset += copy_size;
return copy_size;
}
ret = read_from_url(v, current_segment(v), buf, buf_size, READ_NORMAL);
if (ret > 0) {
if (just_opened && v->is_id3_timestamped != 0) {
/* Intercept ID3 tags here, elementary audio streams are required
* to convey timestamps using them in the beginning of each segment. */
intercept_id3(v, buf, buf_size, &ret);
}
return ret;
}
ff_format_io_close(v->parent, &v->input);
v->cur_seq_no++;
c->cur_seq_no = v->cur_seq_no;
goto restart;
}
|
CWE-835
| 180,946 | 2,481 |
113402367066446233577404734885364428310
| null | null | null |
FFmpeg
|
7f9ec5593e04827249e7aeb466da06a98a0d7329
| 1 |
static int asf_read_marker(AVFormatContext *s, int64_t size)
{
AVIOContext *pb = s->pb;
ASFContext *asf = s->priv_data;
int i, count, name_len, ret;
char name[1024];
avio_rl64(pb); // reserved 16 bytes
avio_rl64(pb); // ...
count = avio_rl32(pb); // markers count
avio_rl16(pb); // reserved 2 bytes
name_len = avio_rl16(pb); // name length
for (i = 0; i < name_len; i++)
avio_r8(pb); // skip the name
for (i = 0; i < count; i++) {
int64_t pres_time;
int name_len;
avio_rl64(pb); // offset, 8 bytes
pres_time = avio_rl64(pb); // presentation time
pres_time -= asf->hdr.preroll * 10000;
avio_rl16(pb); // entry length
avio_rl32(pb); // send time
avio_rl32(pb); // flags
name_len = avio_rl32(pb); // name length
if ((ret = avio_get_str16le(pb, name_len * 2, name,
sizeof(name))) < name_len)
avio_skip(pb, name_len - ret);
avpriv_new_chapter(s, i, (AVRational) { 1, 10000000 }, pres_time,
AV_NOPTS_VALUE, name);
}
return 0;
}
|
CWE-834
| 180,947 | 2,482 |
331068034171760210133845430622582820934
| null | null | null |
FFmpeg
|
96f24d1bee7fe7bac08e2b7c74db1a046c9dc0de
| 1 |
static av_cold int rl2_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVStream *st;
unsigned int frame_count;
unsigned int audio_frame_counter = 0;
unsigned int video_frame_counter = 0;
unsigned int back_size;
unsigned short sound_rate;
unsigned short rate;
unsigned short channels;
unsigned short def_sound_size;
unsigned int signature;
unsigned int pts_den = 11025; /* video only case */
unsigned int pts_num = 1103;
unsigned int* chunk_offset = NULL;
int* chunk_size = NULL;
int* audio_size = NULL;
int i;
int ret = 0;
avio_skip(pb,4); /* skip FORM tag */
back_size = avio_rl32(pb); /**< get size of the background frame */
signature = avio_rb32(pb);
avio_skip(pb, 4); /* data size */
frame_count = avio_rl32(pb);
/* disallow back_sizes and frame_counts that may lead to overflows later */
if(back_size > INT_MAX/2 || frame_count > INT_MAX / sizeof(uint32_t))
return AVERROR_INVALIDDATA;
avio_skip(pb, 2); /* encoding method */
sound_rate = avio_rl16(pb);
rate = avio_rl16(pb);
channels = avio_rl16(pb);
def_sound_size = avio_rl16(pb);
/** setup video stream */
st = avformat_new_stream(s, NULL);
if(!st)
return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_RL2;
st->codecpar->codec_tag = 0; /* no fourcc */
st->codecpar->width = 320;
st->codecpar->height = 200;
/** allocate and fill extradata */
st->codecpar->extradata_size = EXTRADATA1_SIZE;
if(signature == RLV3_TAG && back_size > 0)
st->codecpar->extradata_size += back_size;
if(ff_get_extradata(s, st->codecpar, pb, st->codecpar->extradata_size) < 0)
return AVERROR(ENOMEM);
/** setup audio stream if present */
if(sound_rate){
if (!channels || channels > 42) {
av_log(s, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
return AVERROR_INVALIDDATA;
}
pts_num = def_sound_size;
pts_den = rate;
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_PCM_U8;
st->codecpar->codec_tag = 1;
st->codecpar->channels = channels;
st->codecpar->bits_per_coded_sample = 8;
st->codecpar->sample_rate = rate;
st->codecpar->bit_rate = st->codecpar->channels * st->codecpar->sample_rate *
st->codecpar->bits_per_coded_sample;
st->codecpar->block_align = st->codecpar->channels *
st->codecpar->bits_per_coded_sample / 8;
avpriv_set_pts_info(st,32,1,rate);
}
avpriv_set_pts_info(s->streams[0], 32, pts_num, pts_den);
chunk_size = av_malloc(frame_count * sizeof(uint32_t));
audio_size = av_malloc(frame_count * sizeof(uint32_t));
chunk_offset = av_malloc(frame_count * sizeof(uint32_t));
if(!chunk_size || !audio_size || !chunk_offset){
av_free(chunk_size);
av_free(audio_size);
av_free(chunk_offset);
return AVERROR(ENOMEM);
}
/** read offset and size tables */
for(i=0; i < frame_count;i++)
chunk_size[i] = avio_rl32(pb);
for(i=0; i < frame_count;i++)
chunk_offset[i] = avio_rl32(pb);
for(i=0; i < frame_count;i++)
audio_size[i] = avio_rl32(pb) & 0xFFFF;
/** build the sample index */
for(i=0;i<frame_count;i++){
if(chunk_size[i] < 0 || audio_size[i] > chunk_size[i]){
ret = AVERROR_INVALIDDATA;
break;
}
if(sound_rate && audio_size[i]){
av_add_index_entry(s->streams[1], chunk_offset[i],
audio_frame_counter,audio_size[i], 0, AVINDEX_KEYFRAME);
audio_frame_counter += audio_size[i] / channels;
}
av_add_index_entry(s->streams[0], chunk_offset[i] + audio_size[i],
video_frame_counter,chunk_size[i]-audio_size[i],0,AVINDEX_KEYFRAME);
++video_frame_counter;
}
av_free(chunk_size);
av_free(audio_size);
av_free(chunk_offset);
return ret;
}
|
CWE-834
| 180,948 | 2,483 |
106964341680444639909267595463429633575
| null | null | null |
FFmpeg
|
4f05e2e2dc1a89f38cd9f0960a6561083d714f1e
| 1 |
static int mv_read_header(AVFormatContext *avctx)
{
MvContext *mv = avctx->priv_data;
AVIOContext *pb = avctx->pb;
AVStream *ast = NULL, *vst = NULL; //initialization to suppress warning
int version, i;
int ret;
avio_skip(pb, 4);
version = avio_rb16(pb);
if (version == 2) {
uint64_t timestamp;
int v;
avio_skip(pb, 22);
/* allocate audio track first to prevent unnecessary seeking
* (audio packet always precede video packet for a given frame) */
ast = avformat_new_stream(avctx, NULL);
if (!ast)
return AVERROR(ENOMEM);
vst = avformat_new_stream(avctx, NULL);
if (!vst)
return AVERROR(ENOMEM);
avpriv_set_pts_info(vst, 64, 1, 15);
vst->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
vst->avg_frame_rate = av_inv_q(vst->time_base);
vst->nb_frames = avio_rb32(pb);
v = avio_rb32(pb);
switch (v) {
case 1:
vst->codecpar->codec_id = AV_CODEC_ID_MVC1;
break;
case 2:
vst->codecpar->format = AV_PIX_FMT_ARGB;
vst->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
break;
default:
avpriv_request_sample(avctx, "Video compression %i", v);
break;
}
vst->codecpar->codec_tag = 0;
vst->codecpar->width = avio_rb32(pb);
vst->codecpar->height = avio_rb32(pb);
avio_skip(pb, 12);
ast->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
ast->nb_frames = vst->nb_frames;
ast->codecpar->sample_rate = avio_rb32(pb);
if (ast->codecpar->sample_rate <= 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid sample rate %d\n", ast->codecpar->sample_rate);
return AVERROR_INVALIDDATA;
}
avpriv_set_pts_info(ast, 33, 1, ast->codecpar->sample_rate);
if (set_channels(avctx, ast, avio_rb32(pb)) < 0)
return AVERROR_INVALIDDATA;
v = avio_rb32(pb);
if (v == AUDIO_FORMAT_SIGNED) {
ast->codecpar->codec_id = AV_CODEC_ID_PCM_S16BE;
} else {
avpriv_request_sample(avctx, "Audio compression (format %i)", v);
}
avio_skip(pb, 12);
var_read_metadata(avctx, "title", 0x80);
var_read_metadata(avctx, "comment", 0x100);
avio_skip(pb, 0x80);
timestamp = 0;
for (i = 0; i < vst->nb_frames; i++) {
uint32_t pos = avio_rb32(pb);
uint32_t asize = avio_rb32(pb);
uint32_t vsize = avio_rb32(pb);
avio_skip(pb, 8);
av_add_index_entry(ast, pos, timestamp, asize, 0, AVINDEX_KEYFRAME);
av_add_index_entry(vst, pos + asize, i, vsize, 0, AVINDEX_KEYFRAME);
timestamp += asize / (ast->codecpar->channels * 2);
}
} else if (!version && avio_rb16(pb) == 3) {
avio_skip(pb, 4);
if ((ret = read_table(avctx, NULL, parse_global_var)) < 0)
return ret;
if (mv->nb_audio_tracks > 1) {
avpriv_request_sample(avctx, "Multiple audio streams support");
return AVERROR_PATCHWELCOME;
} else if (mv->nb_audio_tracks) {
ast = avformat_new_stream(avctx, NULL);
if (!ast)
return AVERROR(ENOMEM);
ast->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
if ((read_table(avctx, ast, parse_audio_var)) < 0)
return ret;
if (mv->acompression == 100 &&
mv->aformat == AUDIO_FORMAT_SIGNED &&
ast->codecpar->bits_per_coded_sample == 16) {
ast->codecpar->codec_id = AV_CODEC_ID_PCM_S16BE;
} else {
avpriv_request_sample(avctx,
"Audio compression %i (format %i, sr %i)",
mv->acompression, mv->aformat,
ast->codecpar->bits_per_coded_sample);
ast->codecpar->codec_id = AV_CODEC_ID_NONE;
}
if (ast->codecpar->channels <= 0) {
av_log(avctx, AV_LOG_ERROR, "No valid channel count found.\n");
return AVERROR_INVALIDDATA;
}
}
if (mv->nb_video_tracks > 1) {
avpriv_request_sample(avctx, "Multiple video streams support");
return AVERROR_PATCHWELCOME;
} else if (mv->nb_video_tracks) {
vst = avformat_new_stream(avctx, NULL);
if (!vst)
return AVERROR(ENOMEM);
vst->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
if ((ret = read_table(avctx, vst, parse_video_var))<0)
return ret;
}
if (mv->nb_audio_tracks)
read_index(pb, ast);
if (mv->nb_video_tracks)
read_index(pb, vst);
} else {
avpriv_request_sample(avctx, "Version %i", version);
return AVERROR_PATCHWELCOME;
}
return 0;
}
|
CWE-834
| 180,949 | 2,484 |
61768510921436960692985129912388758664
| null | null | null |
FFmpeg
|
124eb202e70678539544f6268efc98131f19fa49
| 1 |
static int ivr_read_header(AVFormatContext *s)
{
unsigned tag, type, len, tlen, value;
int i, j, n, count, nb_streams = 0, ret;
uint8_t key[256], val[256];
AVIOContext *pb = s->pb;
AVStream *st;
int64_t pos, offset, temp;
pos = avio_tell(pb);
tag = avio_rl32(pb);
if (tag == MKTAG('.','R','1','M')) {
if (avio_rb16(pb) != 1)
return AVERROR_INVALIDDATA;
if (avio_r8(pb) != 1)
return AVERROR_INVALIDDATA;
len = avio_rb32(pb);
avio_skip(pb, len);
avio_skip(pb, 5);
temp = avio_rb64(pb);
while (!avio_feof(pb) && temp) {
offset = temp;
temp = avio_rb64(pb);
}
avio_skip(pb, offset - avio_tell(pb));
if (avio_r8(pb) != 1)
return AVERROR_INVALIDDATA;
len = avio_rb32(pb);
avio_skip(pb, len);
if (avio_r8(pb) != 2)
return AVERROR_INVALIDDATA;
avio_skip(pb, 16);
pos = avio_tell(pb);
tag = avio_rl32(pb);
}
if (tag != MKTAG('.','R','E','C'))
return AVERROR_INVALIDDATA;
if (avio_r8(pb) != 0)
return AVERROR_INVALIDDATA;
count = avio_rb32(pb);
for (i = 0; i < count; i++) {
if (avio_feof(pb))
return AVERROR_INVALIDDATA;
type = avio_r8(pb);
tlen = avio_rb32(pb);
avio_get_str(pb, tlen, key, sizeof(key));
len = avio_rb32(pb);
if (type == 5) {
avio_get_str(pb, len, val, sizeof(val));
av_log(s, AV_LOG_DEBUG, "%s = '%s'\n", key, val);
} else if (type == 4) {
av_log(s, AV_LOG_DEBUG, "%s = '0x", key);
for (j = 0; j < len; j++)
av_log(s, AV_LOG_DEBUG, "%X", avio_r8(pb));
av_log(s, AV_LOG_DEBUG, "'\n");
} else if (len == 4 && type == 3 && !strncmp(key, "StreamCount", tlen)) {
nb_streams = value = avio_rb32(pb);
} else if (len == 4 && type == 3) {
value = avio_rb32(pb);
av_log(s, AV_LOG_DEBUG, "%s = %d\n", key, value);
} else {
av_log(s, AV_LOG_DEBUG, "Skipping unsupported key: %s\n", key);
avio_skip(pb, len);
}
}
for (n = 0; n < nb_streams; n++) {
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->priv_data = ff_rm_alloc_rmstream();
if (!st->priv_data)
return AVERROR(ENOMEM);
if (avio_r8(pb) != 1)
return AVERROR_INVALIDDATA;
count = avio_rb32(pb);
for (i = 0; i < count; i++) {
if (avio_feof(pb))
return AVERROR_INVALIDDATA;
type = avio_r8(pb);
tlen = avio_rb32(pb);
avio_get_str(pb, tlen, key, sizeof(key));
len = avio_rb32(pb);
if (type == 5) {
avio_get_str(pb, len, val, sizeof(val));
av_log(s, AV_LOG_DEBUG, "%s = '%s'\n", key, val);
} else if (type == 4 && !strncmp(key, "OpaqueData", tlen)) {
ret = ffio_ensure_seekback(pb, 4);
if (ret < 0)
return ret;
if (avio_rb32(pb) == MKBETAG('M', 'L', 'T', 'I')) {
ret = rm_read_multi(s, pb, st, NULL);
} else {
avio_seek(pb, -4, SEEK_CUR);
ret = ff_rm_read_mdpr_codecdata(s, pb, st, st->priv_data, len, NULL);
}
if (ret < 0)
return ret;
} else if (type == 4) {
int j;
av_log(s, AV_LOG_DEBUG, "%s = '0x", key);
for (j = 0; j < len; j++)
av_log(s, AV_LOG_DEBUG, "%X", avio_r8(pb));
av_log(s, AV_LOG_DEBUG, "'\n");
} else if (len == 4 && type == 3 && !strncmp(key, "Duration", tlen)) {
st->duration = avio_rb32(pb);
} else if (len == 4 && type == 3) {
value = avio_rb32(pb);
av_log(s, AV_LOG_DEBUG, "%s = %d\n", key, value);
} else {
av_log(s, AV_LOG_DEBUG, "Skipping unsupported key: %s\n", key);
avio_skip(pb, len);
}
}
}
if (avio_r8(pb) != 6)
return AVERROR_INVALIDDATA;
avio_skip(pb, 12);
avio_skip(pb, avio_rb64(pb) + pos - avio_tell(s->pb));
if (avio_r8(pb) != 8)
return AVERROR_INVALIDDATA;
avio_skip(pb, 8);
return 0;
}
|
CWE-834
| 180,950 | 2,485 |
335515859715972202389525914039584515273
| null | null | null |
openjpeg
|
e5285319229a5d77bf316bb0d3a6cbd3cb8666d9
| 1 |
opj_image_t* pgxtoimage(const char *filename, opj_cparameters_t *parameters)
{
FILE *f = NULL;
int w, h, prec;
int i, numcomps, max;
OPJ_COLOR_SPACE color_space;
opj_image_cmptparm_t cmptparm; /* maximum of 1 component */
opj_image_t * image = NULL;
int adjustS, ushift, dshift, force8;
char endian1, endian2, sign;
char signtmp[32];
char temp[32];
int bigendian;
opj_image_comp_t *comp = NULL;
numcomps = 1;
color_space = OPJ_CLRSPC_GRAY;
memset(&cmptparm, 0, sizeof(opj_image_cmptparm_t));
max = 0;
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Failed to open %s for reading !\n", filename);
return NULL;
}
fseek(f, 0, SEEK_SET);
if (fscanf(f, "PG%[ \t]%c%c%[ \t+-]%d%[ \t]%d%[ \t]%d", temp, &endian1,
&endian2, signtmp, &prec, temp, &w, temp, &h) != 9) {
fclose(f);
fprintf(stderr,
"ERROR: Failed to read the right number of element from the fscanf() function!\n");
return NULL;
}
i = 0;
sign = '+';
while (signtmp[i] != '\0') {
if (signtmp[i] == '-') {
sign = '-';
}
i++;
}
fgetc(f);
if (endian1 == 'M' && endian2 == 'L') {
bigendian = 1;
} else if (endian2 == 'M' && endian1 == 'L') {
bigendian = 0;
} else {
fclose(f);
fprintf(stderr, "Bad pgx header, please check input file\n");
return NULL;
}
/* initialize image component */
cmptparm.x0 = (OPJ_UINT32)parameters->image_offset_x0;
cmptparm.y0 = (OPJ_UINT32)parameters->image_offset_y0;
cmptparm.w = !cmptparm.x0 ? (OPJ_UINT32)((w - 1) * parameters->subsampling_dx +
1) : cmptparm.x0 + (OPJ_UINT32)(w - 1) * (OPJ_UINT32)parameters->subsampling_dx
+ 1;
cmptparm.h = !cmptparm.y0 ? (OPJ_UINT32)((h - 1) * parameters->subsampling_dy +
1) : cmptparm.y0 + (OPJ_UINT32)(h - 1) * (OPJ_UINT32)parameters->subsampling_dy
+ 1;
if (sign == '-') {
cmptparm.sgnd = 1;
} else {
cmptparm.sgnd = 0;
}
if (prec < 8) {
force8 = 1;
ushift = 8 - prec;
dshift = prec - ushift;
if (cmptparm.sgnd) {
adjustS = (1 << (prec - 1));
} else {
adjustS = 0;
}
cmptparm.sgnd = 0;
prec = 8;
} else {
ushift = dshift = force8 = adjustS = 0;
}
cmptparm.prec = (OPJ_UINT32)prec;
cmptparm.bpp = (OPJ_UINT32)prec;
cmptparm.dx = (OPJ_UINT32)parameters->subsampling_dx;
cmptparm.dy = (OPJ_UINT32)parameters->subsampling_dy;
/* create the image */
image = opj_image_create((OPJ_UINT32)numcomps, &cmptparm, color_space);
if (!image) {
fclose(f);
return NULL;
}
/* set image offset and reference grid */
image->x0 = cmptparm.x0;
image->y0 = cmptparm.x0;
image->x1 = cmptparm.w;
image->y1 = cmptparm.h;
/* set image data */
comp = &image->comps[0];
for (i = 0; i < w * h; i++) {
int v;
if (force8) {
v = readuchar(f) + adjustS;
v = (v << ushift) + (v >> dshift);
comp->data[i] = (unsigned char)v;
if (v > max) {
max = v;
}
continue;
}
if (comp->prec == 8) {
if (!comp->sgnd) {
v = readuchar(f);
} else {
v = (char) readuchar(f);
}
} else if (comp->prec <= 16) {
if (!comp->sgnd) {
v = readushort(f, bigendian);
} else {
v = (short) readushort(f, bigendian);
}
} else {
if (!comp->sgnd) {
v = (int)readuint(f, bigendian);
} else {
v = (int) readuint(f, bigendian);
}
}
if (v > max) {
max = v;
}
comp->data[i] = v;
}
fclose(f);
comp->bpp = (OPJ_UINT32)int_floorlog2(max) + 1;
return image;
}
|
CWE-787
| 180,951 | 2,486 |
218417184881972252840459772900542074139
| null | null | null |
openjpeg
|
2cd30c2b06ce332dede81cccad8b334cde997281
| 1 |
opj_image_t* tgatoimage(const char *filename, opj_cparameters_t *parameters)
{
FILE *f;
opj_image_t *image;
unsigned int image_width, image_height, pixel_bit_depth;
unsigned int x, y;
int flip_image = 0;
opj_image_cmptparm_t cmptparm[4]; /* maximum 4 components */
int numcomps;
OPJ_COLOR_SPACE color_space;
OPJ_BOOL mono ;
OPJ_BOOL save_alpha;
int subsampling_dx, subsampling_dy;
int i;
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Failed to open %s for reading !!\n", filename);
return 0;
}
if (!tga_readheader(f, &pixel_bit_depth, &image_width, &image_height,
&flip_image)) {
fclose(f);
return NULL;
}
/* We currently only support 24 & 32 bit tga's ... */
if (!((pixel_bit_depth == 24) || (pixel_bit_depth == 32))) {
fclose(f);
return NULL;
}
/* initialize image components */
memset(&cmptparm[0], 0, 4 * sizeof(opj_image_cmptparm_t));
mono = (pixel_bit_depth == 8) ||
(pixel_bit_depth == 16); /* Mono with & without alpha. */
save_alpha = (pixel_bit_depth == 16) ||
(pixel_bit_depth == 32); /* Mono with alpha, or RGB with alpha */
if (mono) {
color_space = OPJ_CLRSPC_GRAY;
numcomps = save_alpha ? 2 : 1;
} else {
numcomps = save_alpha ? 4 : 3;
color_space = OPJ_CLRSPC_SRGB;
}
subsampling_dx = parameters->subsampling_dx;
subsampling_dy = parameters->subsampling_dy;
for (i = 0; i < numcomps; i++) {
cmptparm[i].prec = 8;
cmptparm[i].bpp = 8;
cmptparm[i].sgnd = 0;
cmptparm[i].dx = (OPJ_UINT32)subsampling_dx;
cmptparm[i].dy = (OPJ_UINT32)subsampling_dy;
cmptparm[i].w = image_width;
cmptparm[i].h = image_height;
}
/* create the image */
image = opj_image_create((OPJ_UINT32)numcomps, &cmptparm[0], color_space);
if (!image) {
fclose(f);
return NULL;
}
/* set image offset and reference grid */
image->x0 = (OPJ_UINT32)parameters->image_offset_x0;
image->y0 = (OPJ_UINT32)parameters->image_offset_y0;
image->x1 = !image->x0 ? (OPJ_UINT32)(image_width - 1) *
(OPJ_UINT32)subsampling_dx + 1 : image->x0 + (OPJ_UINT32)(image_width - 1) *
(OPJ_UINT32)subsampling_dx + 1;
image->y1 = !image->y0 ? (OPJ_UINT32)(image_height - 1) *
(OPJ_UINT32)subsampling_dy + 1 : image->y0 + (OPJ_UINT32)(image_height - 1) *
(OPJ_UINT32)subsampling_dy + 1;
/* set image data */
for (y = 0; y < image_height; y++) {
int index;
if (flip_image) {
index = (int)((image_height - y - 1) * image_width);
} else {
index = (int)(y * image_width);
}
if (numcomps == 3) {
for (x = 0; x < image_width; x++) {
unsigned char r, g, b;
if (!fread(&b, 1, 1, f)) {
fprintf(stderr,
"\nError: fread return a number of element different from the expected.\n");
opj_image_destroy(image);
fclose(f);
return NULL;
}
if (!fread(&g, 1, 1, f)) {
fprintf(stderr,
"\nError: fread return a number of element different from the expected.\n");
opj_image_destroy(image);
fclose(f);
return NULL;
}
if (!fread(&r, 1, 1, f)) {
fprintf(stderr,
"\nError: fread return a number of element different from the expected.\n");
opj_image_destroy(image);
fclose(f);
return NULL;
}
image->comps[0].data[index] = r;
image->comps[1].data[index] = g;
image->comps[2].data[index] = b;
index++;
}
} else if (numcomps == 4) {
for (x = 0; x < image_width; x++) {
unsigned char r, g, b, a;
if (!fread(&b, 1, 1, f)) {
fprintf(stderr,
"\nError: fread return a number of element different from the expected.\n");
opj_image_destroy(image);
fclose(f);
return NULL;
}
if (!fread(&g, 1, 1, f)) {
fprintf(stderr,
"\nError: fread return a number of element different from the expected.\n");
opj_image_destroy(image);
fclose(f);
return NULL;
}
if (!fread(&r, 1, 1, f)) {
fprintf(stderr,
"\nError: fread return a number of element different from the expected.\n");
opj_image_destroy(image);
fclose(f);
return NULL;
}
if (!fread(&a, 1, 1, f)) {
fprintf(stderr,
"\nError: fread return a number of element different from the expected.\n");
opj_image_destroy(image);
fclose(f);
return NULL;
}
image->comps[0].data[index] = r;
image->comps[1].data[index] = g;
image->comps[2].data[index] = b;
image->comps[3].data[index] = a;
index++;
}
} else {
fprintf(stderr, "Currently unsupported bit depth : %s\n", filename);
}
}
fclose(f);
return image;
}
|
CWE-787
| 180,954 | 2,489 |
91259858852872254645568346681525420670
| null | null | null |
mbedtls
|
d15795acd5074e0b44e71f7ede8bdfe1b48591fc
| 1 |
int mbedtls_x509_crt_verify_with_profile( mbedtls_x509_crt *crt,
mbedtls_x509_crt *trust_ca,
mbedtls_x509_crl *ca_crl,
const mbedtls_x509_crt_profile *profile,
const char *cn, uint32_t *flags,
int (*f_vrfy)(void *, mbedtls_x509_crt *, int, uint32_t *),
void *p_vrfy )
{
size_t cn_len;
int ret;
int pathlen = 0, selfsigned = 0;
mbedtls_x509_crt *parent;
mbedtls_x509_name *name;
mbedtls_x509_sequence *cur = NULL;
mbedtls_pk_type_t pk_type;
if( profile == NULL )
return( MBEDTLS_ERR_X509_BAD_INPUT_DATA );
*flags = 0;
if( cn != NULL )
{
name = &crt->subject;
cn_len = strlen( cn );
if( crt->ext_types & MBEDTLS_X509_EXT_SUBJECT_ALT_NAME )
{
cur = &crt->subject_alt_names;
while( cur != NULL )
{
if( cur->buf.len == cn_len &&
x509_memcasecmp( cn, cur->buf.p, cn_len ) == 0 )
break;
if( cur->buf.len > 2 &&
memcmp( cur->buf.p, "*.", 2 ) == 0 &&
x509_check_wildcard( cn, &cur->buf ) == 0 )
{
break;
}
cur = cur->next;
}
if( cur == NULL )
*flags |= MBEDTLS_X509_BADCERT_CN_MISMATCH;
}
else
{
while( name != NULL )
{
if( MBEDTLS_OID_CMP( MBEDTLS_OID_AT_CN, &name->oid ) == 0 )
{
if( name->val.len == cn_len &&
x509_memcasecmp( name->val.p, cn, cn_len ) == 0 )
break;
if( name->val.len > 2 &&
memcmp( name->val.p, "*.", 2 ) == 0 &&
x509_check_wildcard( cn, &name->val ) == 0 )
break;
}
name = name->next;
}
if( name == NULL )
*flags |= MBEDTLS_X509_BADCERT_CN_MISMATCH;
}
}
/* Check the type and size of the key */
pk_type = mbedtls_pk_get_type( &crt->pk );
if( x509_profile_check_pk_alg( profile, pk_type ) != 0 )
*flags |= MBEDTLS_X509_BADCERT_BAD_PK;
if( x509_profile_check_key( profile, pk_type, &crt->pk ) != 0 )
*flags |= MBEDTLS_X509_BADCERT_BAD_KEY;
/* Look for a parent in trusted CAs */
for( parent = trust_ca; parent != NULL; parent = parent->next )
{
if( x509_crt_check_parent( crt, parent, 0, pathlen == 0 ) == 0 )
break;
}
if( parent != NULL )
{
ret = x509_crt_verify_top( crt, parent, ca_crl, profile,
pathlen, selfsigned, flags, f_vrfy, p_vrfy );
if( ret != 0 )
return( ret );
}
else
{
/* Look for a parent upwards the chain */
for( parent = crt->next; parent != NULL; parent = parent->next )
if( x509_crt_check_parent( crt, parent, 0, pathlen == 0 ) == 0 )
break;
/* Are we part of the chain or at the top? */
if( parent != NULL )
{
ret = x509_crt_verify_child( crt, parent, trust_ca, ca_crl, profile,
pathlen, selfsigned, flags, f_vrfy, p_vrfy );
if( ret != 0 )
return( ret );
}
else
{
ret = x509_crt_verify_top( crt, trust_ca, ca_crl, profile,
pathlen, selfsigned, flags, f_vrfy, p_vrfy );
if( ret != 0 )
return( ret );
}
}
if( *flags != 0 )
return( MBEDTLS_ERR_X509_CERT_VERIFY_FAILED );
return( 0 );
}
|
CWE-287
| 180,955 | 2,490 |
207213925191051561649166193472608984073
| null | null | null |
tcpdump
|
3c4d7c0ee30a30e5abff3d6d9586a3753101faf5
| 1 |
rt6_print(netdissect_options *ndo, register const u_char *bp, const u_char *bp2 _U_)
{
register const struct ip6_rthdr *dp;
register const struct ip6_rthdr0 *dp0;
register const u_char *ep;
int i, len;
register const struct in6_addr *addr;
dp = (const struct ip6_rthdr *)bp;
len = dp->ip6r_len;
/* 'ep' points to the end of available data. */
ep = ndo->ndo_snapend;
ND_TCHECK(dp->ip6r_segleft);
ND_PRINT((ndo, "srcrt (len=%d", dp->ip6r_len)); /*)*/
ND_PRINT((ndo, ", type=%d", dp->ip6r_type));
ND_PRINT((ndo, ", segleft=%d", dp->ip6r_segleft));
switch (dp->ip6r_type) {
case IPV6_RTHDR_TYPE_0:
case IPV6_RTHDR_TYPE_2: /* Mobile IPv6 ID-20 */
dp0 = (const struct ip6_rthdr0 *)dp;
ND_TCHECK(dp0->ip6r0_reserved);
if (dp0->ip6r0_reserved || ndo->ndo_vflag) {
ND_PRINT((ndo, ", rsv=0x%0x",
EXTRACT_32BITS(&dp0->ip6r0_reserved)));
}
if (len % 2 == 1)
goto trunc;
len >>= 1;
addr = &dp0->ip6r0_addr[0];
for (i = 0; i < len; i++) {
if ((const u_char *)(addr + 1) > ep)
goto trunc;
ND_PRINT((ndo, ", [%d]%s", i, ip6addr_string(ndo, addr)));
addr++;
}
/*(*/
ND_PRINT((ndo, ") "));
return((dp0->ip6r0_len + 1) << 3);
break;
default:
goto trunc;
break;
}
trunc:
ND_PRINT((ndo, "[|srcrt]"));
return -1;
}
|
CWE-125
| 180,956 | 2,491 |
311722369782944901497183929225399832994
| null | null | null |
linux
|
a6e544b0a88b53114bfa5a57e21b7be7a8dfc9d0
| 1 |
bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
void *data, __be16 proto, int nhoff, int hlen)
{
struct flow_dissector_key_control *key_control;
struct flow_dissector_key_basic *key_basic;
struct flow_dissector_key_addrs *key_addrs;
struct flow_dissector_key_ports *key_ports;
struct flow_dissector_key_tags *key_tags;
struct flow_dissector_key_keyid *key_keyid;
u8 ip_proto = 0;
if (!data) {
data = skb->data;
proto = skb->protocol;
nhoff = skb_network_offset(skb);
hlen = skb_headlen(skb);
}
/* It is ensured by skb_flow_dissector_init() that control key will
* be always present.
*/
key_control = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_CONTROL,
target_container);
/* It is ensured by skb_flow_dissector_init() that basic key will
* be always present.
*/
key_basic = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_BASIC,
target_container);
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct ethhdr *eth = eth_hdr(skb);
struct flow_dissector_key_eth_addrs *key_eth_addrs;
key_eth_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
target_container);
memcpy(key_eth_addrs, ð->h_dest, sizeof(*key_eth_addrs));
}
again:
switch (proto) {
case htons(ETH_P_IP): {
const struct iphdr *iph;
struct iphdr _iph;
ip:
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph || iph->ihl < 5)
return false;
nhoff += iph->ihl * 4;
ip_proto = iph->protocol;
if (ip_is_fragment(iph))
ip_proto = 0;
if (!skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS))
break;
key_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS, target_container);
memcpy(&key_addrs->v4addrs, &iph->saddr,
sizeof(key_addrs->v4addrs));
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
break;
}
case htons(ETH_P_IPV6): {
const struct ipv6hdr *iph;
struct ipv6hdr _iph;
__be32 flow_label;
ipv6:
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph)
return false;
ip_proto = iph->nexthdr;
nhoff += sizeof(struct ipv6hdr);
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_dissector_key_ipv6_addrs *key_ipv6_addrs;
key_ipv6_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
target_container);
memcpy(key_ipv6_addrs, &iph->saddr, sizeof(*key_ipv6_addrs));
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}
flow_label = ip6_flowlabel(iph);
if (flow_label) {
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
key_tags = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL,
target_container);
key_tags->flow_label = ntohl(flow_label);
}
}
break;
}
case htons(ETH_P_8021AD):
case htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
struct vlan_hdr _vlan;
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan);
if (!vlan)
return false;
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_VLANID)) {
key_tags = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_VLANID,
target_container);
key_tags->vlan_id = skb_vlan_tag_get_id(skb);
}
proto = vlan->h_vlan_encapsulated_proto;
nhoff += sizeof(*vlan);
goto again;
}
case htons(ETH_P_PPP_SES): {
struct {
struct pppoe_hdr hdr;
__be16 proto;
} *hdr, _hdr;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
return false;
proto = hdr->proto;
nhoff += PPPOE_SES_HLEN;
switch (proto) {
case htons(PPP_IP):
goto ip;
case htons(PPP_IPV6):
goto ipv6;
default:
return false;
}
}
case htons(ETH_P_TIPC): {
struct {
__be32 pre[3];
__be32 srcnode;
} *hdr, _hdr;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
return false;
key_basic->n_proto = proto;
key_control->thoff = (u16)nhoff;
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
key_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_TIPC_ADDRS,
target_container);
key_addrs->tipcaddrs.srcnode = hdr->srcnode;
key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
}
return true;
}
case htons(ETH_P_MPLS_UC):
case htons(ETH_P_MPLS_MC): {
struct mpls_label *hdr, _hdr[2];
mpls:
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
hlen, &_hdr);
if (!hdr)
return false;
if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
key_keyid = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
target_container);
key_keyid->keyid = hdr[1].entry &
htonl(MPLS_LS_LABEL_MASK);
}
key_basic->n_proto = proto;
key_basic->ip_proto = ip_proto;
key_control->thoff = (u16)nhoff;
return true;
}
return true;
}
case htons(ETH_P_FCOE):
key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
/* fall through */
default:
return false;
}
ip_proto_again:
switch (ip_proto) {
case IPPROTO_GRE: {
struct gre_hdr {
__be16 flags;
__be16 proto;
} *hdr, _hdr;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
return false;
/*
* Only look inside GRE if version zero and no
* routing
*/
if (hdr->flags & (GRE_VERSION | GRE_ROUTING))
break;
proto = hdr->proto;
nhoff += 4;
if (hdr->flags & GRE_CSUM)
nhoff += 4;
if (hdr->flags & GRE_KEY) {
const __be32 *keyid;
__be32 _keyid;
keyid = __skb_header_pointer(skb, nhoff, sizeof(_keyid),
data, hlen, &_keyid);
if (!keyid)
return false;
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_GRE_KEYID)) {
key_keyid = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_GRE_KEYID,
target_container);
key_keyid->keyid = *keyid;
}
nhoff += 4;
}
if (hdr->flags & GRE_SEQ)
nhoff += 4;
if (proto == htons(ETH_P_TEB)) {
const struct ethhdr *eth;
struct ethhdr _eth;
eth = __skb_header_pointer(skb, nhoff,
sizeof(_eth),
data, hlen, &_eth);
if (!eth)
return false;
proto = eth->h_proto;
nhoff += sizeof(*eth);
}
goto again;
}
case NEXTHDR_HOP:
case NEXTHDR_ROUTING:
case NEXTHDR_DEST: {
u8 _opthdr[2], *opthdr;
if (proto != htons(ETH_P_IPV6))
break;
opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
data, hlen, &_opthdr);
if (!opthdr)
return false;
ip_proto = opthdr[0];
nhoff += (opthdr[1] + 1) << 3;
goto ip_proto_again;
}
case IPPROTO_IPIP:
proto = htons(ETH_P_IP);
goto ip;
case IPPROTO_IPV6:
proto = htons(ETH_P_IPV6);
goto ipv6;
case IPPROTO_MPLS:
proto = htons(ETH_P_MPLS_UC);
goto mpls;
default:
break;
}
key_basic->n_proto = proto;
key_basic->ip_proto = ip_proto;
key_control->thoff = (u16)nhoff;
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS)) {
key_ports = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS,
target_container);
key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
data, hlen);
}
return true;
}
|
CWE-20
| 180,957 | 2,492 |
89513863866329190439338015798431515656
| null | null | null |
acpica
|
37f2c716f2c6ab14c3ba557a539c3ee3224931b5
| 1 |
AcpiNsEvaluate (
ACPI_EVALUATE_INFO *Info)
{
ACPI_STATUS Status;
ACPI_FUNCTION_TRACE (NsEvaluate);
if (!Info)
{
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
if (!Info->Node)
{
/*
* Get the actual namespace node for the target object if we
* need to. Handles these cases:
*
* 1) Null node, valid pathname from root (absolute path)
* 2) Node and valid pathname (path relative to Node)
* 3) Node, Null pathname
*/
Status = AcpiNsGetNode (Info->PrefixNode, Info->RelativePathname,
ACPI_NS_NO_UPSEARCH, &Info->Node);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
}
/*
* For a method alias, we must grab the actual method node so that
* proper scoping context will be established before execution.
*/
if (AcpiNsGetType (Info->Node) == ACPI_TYPE_LOCAL_METHOD_ALIAS)
{
Info->Node = ACPI_CAST_PTR (
ACPI_NAMESPACE_NODE, Info->Node->Object);
}
/* Complete the info block initialization */
Info->ReturnObject = NULL;
Info->NodeFlags = Info->Node->Flags;
Info->ObjDesc = AcpiNsGetAttachedObject (Info->Node);
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "%s [%p] Value %p\n",
Info->RelativePathname, Info->Node,
AcpiNsGetAttachedObject (Info->Node)));
/* Get info if we have a predefined name (_HID, etc.) */
Info->Predefined = AcpiUtMatchPredefinedMethod (Info->Node->Name.Ascii);
/* Get the full pathname to the object, for use in warning messages */
Info->FullPathname = AcpiNsGetNormalizedPathname (Info->Node, TRUE);
if (!Info->FullPathname)
{
return_ACPI_STATUS (AE_NO_MEMORY);
}
/* Count the number of arguments being passed in */
Info->ParamCount = 0;
if (Info->Parameters)
{
while (Info->Parameters[Info->ParamCount])
{
Info->ParamCount++;
}
/* Warn on impossible argument count */
if (Info->ParamCount > ACPI_METHOD_NUM_ARGS)
{
ACPI_WARN_PREDEFINED ((AE_INFO, Info->FullPathname, ACPI_WARN_ALWAYS,
"Excess arguments (%u) - using only %u",
Info->ParamCount, ACPI_METHOD_NUM_ARGS));
Info->ParamCount = ACPI_METHOD_NUM_ARGS;
}
}
/*
* For predefined names: Check that the declared argument count
* matches the ACPI spec -- otherwise this is a BIOS error.
*/
AcpiNsCheckAcpiCompliance (Info->FullPathname, Info->Node,
Info->Predefined);
/*
* For all names: Check that the incoming argument count for
* this method/object matches the actual ASL/AML definition.
*/
AcpiNsCheckArgumentCount (Info->FullPathname, Info->Node,
Info->ParamCount, Info->Predefined);
/* For predefined names: Typecheck all incoming arguments */
AcpiNsCheckArgumentTypes (Info);
/*
* Three major evaluation cases:
*
* 1) Object types that cannot be evaluated by definition
* 2) The object is a control method -- execute it
* 3) The object is not a method -- just return it's current value
*/
switch (AcpiNsGetType (Info->Node))
{
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_EVENT:
case ACPI_TYPE_MUTEX:
case ACPI_TYPE_REGION:
case ACPI_TYPE_THERMAL:
case ACPI_TYPE_LOCAL_SCOPE:
/*
* 1) Disallow evaluation of certain object types. For these,
* object evaluation is undefined and not supported.
*/
ACPI_ERROR ((AE_INFO,
"%s: Evaluation of object type [%s] is not supported",
Info->FullPathname,
AcpiUtGetTypeName (Info->Node->Type)));
Status = AE_TYPE;
goto Cleanup;
case ACPI_TYPE_METHOD:
/*
* 2) Object is a control method - execute it
*/
/* Verify that there is a method object associated with this node */
if (!Info->ObjDesc)
{
ACPI_ERROR ((AE_INFO, "%s: Method has no attached sub-object",
Info->FullPathname));
Status = AE_NULL_OBJECT;
goto Cleanup;
}
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC,
"**** Execute method [%s] at AML address %p length %X\n",
Info->FullPathname,
Info->ObjDesc->Method.AmlStart + 1,
Info->ObjDesc->Method.AmlLength - 1));
/*
* Any namespace deletion must acquire both the namespace and
* interpreter locks to ensure that no thread is using the portion of
* the namespace that is being deleted.
*
* Execute the method via the interpreter. The interpreter is locked
* here before calling into the AML parser
*/
AcpiExEnterInterpreter ();
Status = AcpiPsExecuteMethod (Info);
AcpiExExitInterpreter ();
break;
default:
/*
* 3) All other non-method objects -- get the current object value
*/
/*
* Some objects require additional resolution steps (e.g., the Node
* may be a field that must be read, etc.) -- we can't just grab
* the object out of the node.
*
* Use ResolveNodeToValue() to get the associated value.
*
* NOTE: we can get away with passing in NULL for a walk state because
* the Node is guaranteed to not be a reference to either a method
* local or a method argument (because this interface is never called
* from a running method.)
*
* Even though we do not directly invoke the interpreter for object
* resolution, we must lock it because we could access an OpRegion.
* The OpRegion access code assumes that the interpreter is locked.
*/
AcpiExEnterInterpreter ();
/* TBD: ResolveNodeToValue has a strange interface, fix */
Info->ReturnObject = ACPI_CAST_PTR (ACPI_OPERAND_OBJECT, Info->Node);
Status = AcpiExResolveNodeToValue (ACPI_CAST_INDIRECT_PTR (
ACPI_NAMESPACE_NODE, &Info->ReturnObject), NULL);
AcpiExExitInterpreter ();
if (ACPI_FAILURE (Status))
{
Info->ReturnObject = NULL;
goto Cleanup;
}
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Returned object %p [%s]\n",
Info->ReturnObject,
AcpiUtGetObjectTypeName (Info->ReturnObject)));
Status = AE_CTRL_RETURN_VALUE; /* Always has a "return value" */
break;
}
/*
* For predefined names, check the return value against the ACPI
* specification. Some incorrect return value types are repaired.
*/
(void) AcpiNsCheckReturnValue (Info->Node, Info, Info->ParamCount,
Status, &Info->ReturnObject);
/* Check if there is a return value that must be dealt with */
if (Status == AE_CTRL_RETURN_VALUE)
{
/* If caller does not want the return value, delete it */
if (Info->Flags & ACPI_IGNORE_RETURN_VALUE)
{
AcpiUtRemoveReference (Info->ReturnObject);
Info->ReturnObject = NULL;
}
/* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
Status = AE_OK;
}
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES,
"*** Completed evaluation of object %s ***\n",
Info->RelativePathname));
Cleanup:
/*
* Namespace was unlocked by the handling AcpiNs* function, so we
* just free the pathname and return
*/
ACPI_FREE (Info->FullPathname);
Info->FullPathname = NULL;
return_ACPI_STATUS (Status);
}
|
CWE-200
| 180,958 | 2,493 |
336314678589934849627990055483348133351
| null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.