project
stringclasses 633
values | commit_id
stringlengths 7
81
| target
int64 0
1
| func
stringlengths 5
484k
| cwe
stringclasses 131
values | big_vul_idx
float64 0
189k
⌀ | idx
int64 0
522k
| hash
stringlengths 34
39
| size
float64 1
24k
⌀ | message
stringlengths 0
11.5k
⌀ | dataset
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
linux
|
4ab25786c87eb20857bbb715c3ae34ec8fd6a214
| 1 |
static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
switch (hdev->product) {
case USB_DEVICE_ID_KYE_ERGO_525V:
/* the fixups that need to be done:
* - change led usage page to button for extra buttons
* - report size 8 count 1 must be size 1 count 8 for button
* bitfield
* - change the button usage range to 4-7 for the extra
* buttons
*/
if (*rsize >= 74 &&
rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
rdesc[71] == 0x75 && rdesc[72] == 0x08 &&
rdesc[73] == 0x95 && rdesc[74] == 0x01) {
hid_info(hdev,
"fixing up Kye/Genius Ergo Mouse "
"report descriptor\n");
rdesc[62] = 0x09;
rdesc[64] = 0x04;
rdesc[66] = 0x07;
rdesc[72] = 0x01;
rdesc[74] = 0x08;
}
break;
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
if (*rsize == EASYPEN_I405X_RDESC_ORIG_SIZE) {
rdesc = easypen_i405x_rdesc_fixed;
*rsize = sizeof(easypen_i405x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
rdesc = mousepen_i608x_rdesc_fixed;
*rsize = sizeof(mousepen_i608x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
if (*rsize == EASYPEN_M610X_RDESC_ORIG_SIZE) {
rdesc = easypen_m610x_rdesc_fixed;
*rsize = sizeof(easypen_m610x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
"Genius Gila Gaming Mouse");
break;
case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
"Genius Gx Imperator Keyboard");
break;
case USB_DEVICE_ID_GENIUS_MANTICORE:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
"Genius Manticore Keyboard");
break;
}
return rdesc;
}
|
CWE-119
| 179,543 | 1,291 |
61701421720071808882903110031752917362
| null | null | null |
linux
|
4ab25786c87eb20857bbb715c3ae34ec8fd6a214
| 1 |
static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
struct usb_device_descriptor *udesc;
__u16 bcdDevice, rev_maj, rev_min;
if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
rdesc[84] == 0x8c && rdesc[85] == 0x02) {
hid_info(hdev,
"fixing up Logitech keyboard report descriptor\n");
rdesc[84] = rdesc[89] = 0x4d;
rdesc[85] = rdesc[90] = 0x10;
}
if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
rdesc[49] == 0x81 && rdesc[50] == 0x06) {
hid_info(hdev,
"fixing up rel/abs in Logitech report descriptor\n");
rdesc[33] = rdesc[50] = 0x02;
}
switch (hdev->product) {
/* Several wheels report as this id when operating in emulation mode. */
case USB_DEVICE_ID_LOGITECH_WHEEL:
udesc = &(hid_to_usb_dev(hdev)->descriptor);
if (!udesc) {
hid_err(hdev, "NULL USB device descriptor\n");
break;
}
bcdDevice = le16_to_cpu(udesc->bcdDevice);
rev_maj = bcdDevice >> 8;
rev_min = bcdDevice & 0xff;
/* Update the report descriptor for only the Driving Force wheel */
if (rev_maj == 1 && rev_min == 2 &&
*rsize == DF_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Driving Force report descriptor\n");
rdesc = df_rdesc_fixed;
*rsize = sizeof(df_rdesc_fixed);
}
break;
case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
if (*rsize == MOMO_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Momo Force (Red) report descriptor\n");
rdesc = momo_rdesc_fixed;
*rsize = sizeof(momo_rdesc_fixed);
}
break;
case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
if (*rsize == MOMO2_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Momo Racing Force (Black) report descriptor\n");
rdesc = momo2_rdesc_fixed;
*rsize = sizeof(momo2_rdesc_fixed);
}
break;
case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
if (*rsize == FV_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Formula Vibration report descriptor\n");
rdesc = fv_rdesc_fixed;
*rsize = sizeof(fv_rdesc_fixed);
}
break;
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
if (*rsize == DFP_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Driving Force Pro report descriptor\n");
rdesc = dfp_rdesc_fixed;
*rsize = sizeof(dfp_rdesc_fixed);
}
break;
case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
if (*rsize >= 101 && rdesc[41] == 0x95 && rdesc[42] == 0x0B &&
rdesc[47] == 0x05 && rdesc[48] == 0x09) {
hid_info(hdev, "fixing up Logitech Speed Force Wireless report descriptor\n");
rdesc[41] = 0x05;
rdesc[42] = 0x09;
rdesc[47] = 0x95;
rdesc[48] = 0x0B;
}
break;
}
return rdesc;
}
|
CWE-119
| 179,544 | 1,292 |
279704939167865611128772246993871300695
| null | null | null |
linux
|
4ab25786c87eb20857bbb715c3ae34ec8fd6a214
| 1 |
static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
rdesc[30] = 0x0c;
}
return rdesc;
}
|
CWE-119
| 179,545 | 1,293 |
85432680744687476998085220534657720665
| null | null | null |
linux
|
4ab25786c87eb20857bbb715c3ae34ec8fd6a214
| 1 |
static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
rdesc[60] = 0xfa;
rdesc[40] = 0xfa;
}
return rdesc;
}
|
CWE-119
| 179,546 | 1,294 |
284095831257846902512408232275598352364
| null | null | null |
linux
|
4ab25786c87eb20857bbb715c3ae34ec8fd6a214
| 1 |
static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
rdesc[106] == 0x03) {
hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
rdesc[105] = rdesc[110] = 0x03;
rdesc[106] = rdesc[111] = 0x21;
}
return rdesc;
}
|
CWE-119
| 179,547 | 1,295 |
187317300382672400256987721928336315454
| null | null | null |
linux
|
51217e69697fba92a06e07e16f55c9a52d8e8945
| 1 |
static int logi_dj_ll_raw_request(struct hid_device *hid,
unsigned char reportnum, __u8 *buf,
size_t count, unsigned char report_type,
int reqtype)
{
struct dj_device *djdev = hid->driver_data;
struct dj_receiver_dev *djrcv_dev = djdev->dj_receiver_dev;
u8 *out_buf;
int ret;
if (buf[0] != REPORT_TYPE_LEDS)
return -EINVAL;
out_buf = kzalloc(DJREPORT_SHORT_LENGTH, GFP_ATOMIC);
if (!out_buf)
return -ENOMEM;
if (count < DJREPORT_SHORT_LENGTH - 2)
count = DJREPORT_SHORT_LENGTH - 2;
out_buf[0] = REPORT_ID_DJ_SHORT;
out_buf[1] = djdev->device_index;
memcpy(out_buf + 2, buf, count);
ret = hid_hw_raw_request(djrcv_dev->hdev, out_buf[0], out_buf,
DJREPORT_SHORT_LENGTH, report_type, reqtype);
kfree(out_buf);
return ret;
}
|
CWE-119
| 179,548 | 1,296 |
256114427603892451142846818910237282617
| null | null | null |
linux
|
ad3e14d7c5268c2e24477c6ef54bbdf88add5d36
| 1 |
static int logi_dj_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data,
int size)
{
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
struct dj_report *dj_report = (struct dj_report *) data;
unsigned long flags;
bool report_processed = false;
dbg_hid("%s, size:%d\n", __func__, size);
/* Here we receive all data coming from iface 2, there are 4 cases:
*
* 1) Data should continue its normal processing i.e. data does not
* come from the DJ collection, in which case we do nothing and
* return 0, so hid-core can continue normal processing (will forward
* to associated hidraw device)
*
* 2) Data is from DJ collection, and is intended for this driver i. e.
* data contains arrival, departure, etc notifications, in which case
* we queue them for delayed processing by the work queue. We return 1
* to hid-core as no further processing is required from it.
*
* 3) Data is from DJ collection, and informs a connection change,
* if the change means rf link loss, then we must send a null report
* to the upper layer to discard potentially pressed keys that may be
* repeated forever by the input layer. Return 1 to hid-core as no
* further processing is required.
*
* 4) Data is from DJ collection and is an actual input event from
* a paired DJ device in which case we forward it to the correct hid
* device (via hid_input_report() ) and return 1 so hid-core does not do
* anything else with it.
*/
spin_lock_irqsave(&djrcv_dev->lock, flags);
if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
switch (dj_report->report_type) {
case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
logi_dj_recv_queue_notification(djrcv_dev, dj_report);
break;
case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
STATUS_LINKLOSS) {
logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
}
break;
default:
logi_dj_recv_forward_report(djrcv_dev, dj_report);
}
report_processed = true;
}
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
return report_processed;
}
|
CWE-119
| 179,549 | 1,297 |
260936126132763383743728146298078027704
| null | null | null |
linux
|
c54def7bd64d7c0b6993336abcffb8444795bf38
| 1 |
static int magicmouse_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
struct input_dev *input = msc->input;
int x = 0, y = 0, ii, clicks = 0, npoints;
switch (data[0]) {
case TRACKPAD_REPORT_ID:
/* Expect four bytes of prefix, and N*9 bytes of touch data. */
if (size < 4 || ((size - 4) % 9) != 0)
return 0;
npoints = (size - 4) / 9;
msc->ntouches = 0;
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
clicks = data[1];
/* The following bits provide a device specific timestamp. They
* are unused here.
*
* ts = data[1] >> 6 | data[2] << 2 | data[3] << 10;
*/
break;
case MOUSE_REPORT_ID:
/* Expect six bytes of prefix, and N*8 bytes of touch data. */
if (size < 6 || ((size - 6) % 8) != 0)
return 0;
npoints = (size - 6) / 8;
msc->ntouches = 0;
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
/* When emulating three-button mode, it is important
* to have the current touch information before
* generating a click event.
*/
x = (int)(((data[3] & 0x0c) << 28) | (data[1] << 22)) >> 22;
y = (int)(((data[3] & 0x30) << 26) | (data[2] << 22)) >> 22;
clicks = data[3];
/* The following bits provide a device specific timestamp. They
* are unused here.
*
* ts = data[3] >> 6 | data[4] << 2 | data[5] << 10;
*/
break;
case DOUBLE_REPORT_ID:
/* Sometimes the trackpad sends two touch reports in one
* packet.
*/
magicmouse_raw_event(hdev, report, data + 2, data[1]);
magicmouse_raw_event(hdev, report, data + 2 + data[1],
size - 2 - data[1]);
break;
default:
return 0;
}
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_report_key(input, BTN_MOUSE, clicks & 1);
input_mt_report_pointer_emulation(input, true);
}
input_sync(input);
return 1;
}
|
CWE-119
| 179,551 | 1,299 |
281911557577455946265369695604494053571
| null | null | null |
ppp
|
7658e8257183f062dc01f87969c140707c7e52cb
| 1 |
getword(f, word, newlinep, filename)
FILE *f;
char *word;
int *newlinep;
char *filename;
{
int c, len, escape;
int quoted, comment;
int value, digit, got, n;
#define isoctal(c) ((c) >= '0' && (c) < '8')
*newlinep = 0;
len = 0;
escape = 0;
comment = 0;
quoted = 0;
/*
* First skip white-space and comments.
*/
for (;;) {
c = getc(f);
if (c == EOF)
break;
/*
* A newline means the end of a comment; backslash-newline
* is ignored. Note that we cannot have escape && comment.
*/
if (c == '\n') {
if (!escape) {
*newlinep = 1;
comment = 0;
} else
escape = 0;
continue;
}
/*
* Ignore characters other than newline in a comment.
*/
if (comment)
continue;
/*
* If this character is escaped, we have a word start.
*/
if (escape)
break;
/*
* If this is the escape character, look at the next character.
*/
if (c == '\\') {
escape = 1;
continue;
}
/*
* If this is the start of a comment, ignore the rest of the line.
*/
if (c == '#') {
comment = 1;
continue;
}
/*
* A non-whitespace character is the start of a word.
*/
if (!isspace(c))
break;
}
/*
* Process characters until the end of the word.
*/
while (c != EOF) {
if (escape) {
/*
* This character is escaped: backslash-newline is ignored,
* various other characters indicate particular values
* as for C backslash-escapes.
*/
escape = 0;
if (c == '\n') {
c = getc(f);
continue;
}
got = 0;
switch (c) {
case 'a':
value = '\a';
break;
case 'b':
value = '\b';
break;
case 'f':
value = '\f';
break;
case 'n':
value = '\n';
break;
case 'r':
value = '\r';
break;
case 's':
value = ' ';
break;
case 't':
value = '\t';
break;
default:
if (isoctal(c)) {
/*
* \ddd octal sequence
*/
value = 0;
for (n = 0; n < 3 && isoctal(c); ++n) {
value = (value << 3) + (c & 07);
c = getc(f);
}
got = 1;
break;
}
if (c == 'x') {
/*
* \x<hex_string> sequence
*/
value = 0;
c = getc(f);
for (n = 0; n < 2 && isxdigit(c); ++n) {
digit = toupper(c) - '0';
if (digit > 10)
digit += '0' + 10 - 'A';
value = (value << 4) + digit;
c = getc (f);
}
got = 1;
break;
}
/*
* Otherwise the character stands for itself.
*/
value = c;
break;
}
/*
* Store the resulting character for the escape sequence.
*/
if (len < MAXWORDLEN-1)
word[len] = value;
++len;
if (!got)
c = getc(f);
continue;
}
/*
* Backslash starts a new escape sequence.
*/
if (c == '\\') {
escape = 1;
c = getc(f);
continue;
}
/*
* Not escaped: check for the start or end of a quoted
* section and see if we've reached the end of the word.
*/
if (quoted) {
if (c == quoted) {
quoted = 0;
c = getc(f);
continue;
}
} else if (c == '"' || c == '\'') {
quoted = c;
c = getc(f);
continue;
} else if (isspace(c) || c == '#') {
ungetc (c, f);
break;
}
/*
* An ordinary character: store it in the word and get another.
*/
if (len < MAXWORDLEN-1)
word[len] = c;
++len;
c = getc(f);
}
/*
* End of the word: check for errors.
*/
if (c == EOF) {
if (ferror(f)) {
if (errno == 0)
errno = EIO;
option_error("Error reading %s: %m", filename);
die(1);
}
/*
* If len is zero, then we didn't find a word before the
* end of the file.
*/
if (len == 0)
return 0;
if (quoted)
option_error("warning: quoted word runs to end of file (%.20s...)",
filename, word);
}
/*
* Warn if the word was too long, and append a terminating null.
*/
if (len >= MAXWORDLEN) {
option_error("warning: word in file %s too long (%.20s...)",
filename, word);
len = MAXWORDLEN - 1;
}
word[len] = 0;
return 1;
#undef isoctal
}
|
CWE-119
| 179,552 | 1,300 |
136171222581299573664976896080084416123
| null | null | null |
linux
|
e9c243a5a6de0be8e584c604d353412584b592f8
| 1 |
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
u32 __user *uaddr2, int nr_wake, int nr_requeue,
u32 *cmpval, int requeue_pi)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
int drop_count = 0, task_count = 0, ret;
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
if (requeue_pi) {
/*
* requeue_pi requires a pi_state, try to allocate it now
* without any locks in case it fails.
*/
if (refill_pi_state_cache())
return -ENOMEM;
/*
* requeue_pi must wake as many tasks as it can, up to nr_wake
* + nr_requeue, since it acquires the rt_mutex prior to
* returning to userspace, so as to not leave the rt_mutex with
* waiters and no owner. However, second and third wake-ups
* cannot be predicted as they involve race conditions with the
* first wake and a fault while looking up the pi_state. Both
* pthread_cond_signal() and pthread_cond_broadcast() should
* use nr_wake=1.
*/
if (nr_wake != 1)
return -EINVAL;
}
retry:
if (pi_state != NULL) {
/*
* We will have to lookup the pi_state again, so free this one
* to keep the accounting correct.
*/
free_pi_state(pi_state);
pi_state = NULL;
}
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
hb_waiters_inc(hb2);
double_lock_hb(hb1, hb2);
if (likely(cmpval != NULL)) {
u32 curval;
ret = get_futex_value_locked(&curval, uaddr1);
if (unlikely(ret)) {
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
ret = get_user(curval, uaddr1);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
if (curval != *cmpval) {
ret = -EAGAIN;
goto out_unlock;
}
}
if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
/*
* Attempt to acquire uaddr2 and wake the top waiter. If we
* intend to requeue waiters, force setting the FUTEX_WAITERS
* bit. We force this here where we are able to easily handle
* faults rather in the requeue loop below.
*/
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
&key2, &pi_state, nr_requeue);
/*
* At this point the top_waiter has either taken uaddr2 or is
* waiting on it. If the former, then the pi_state will not
* exist yet, look it up one more time to ensure we have a
* reference to it. If the lock was taken, ret contains the
* vpid of the top waiter task.
*/
if (ret > 0) {
WARN_ON(pi_state);
drop_count++;
task_count++;
/*
* If we acquired the lock, then the user
* space value of uaddr2 should be vpid. It
* cannot be changed by the top waiter as it
* is blocked on hb2 lock if it tries to do
* so. If something fiddled with it behind our
* back the pi state lookup might unearth
* it. So we rather use the known value than
* rereading and handing potential crap to
* lookup_pi_state.
*/
ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL);
}
switch (ret) {
case 0:
break;
case -EFAULT:
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
ret = fault_in_user_writeable(uaddr2);
if (!ret)
goto retry;
goto out;
case -EAGAIN:
/* The owner was exiting, try again. */
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
cond_resched();
goto retry;
default:
goto out_unlock;
}
}
plist_for_each_entry_safe(this, next, &hb1->chain, list) {
if (task_count - nr_wake >= nr_requeue)
break;
if (!match_futex(&this->key, &key1))
continue;
/*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
*
* We should never be requeueing a futex_q with a pi_state,
* which is awaiting a futex_unlock_pi().
*/
if ((requeue_pi && !this->rt_waiter) ||
(!requeue_pi && this->rt_waiter) ||
this->pi_state) {
ret = -EINVAL;
break;
}
/*
* Wake nr_wake waiters. For requeue_pi, if we acquired the
* lock, we already woke the top_waiter. If not, it will be
* woken by futex_unlock_pi().
*/
if (++task_count <= nr_wake && !requeue_pi) {
wake_futex(this);
continue;
}
/* Ensure we requeue to the expected futex for requeue_pi. */
if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
ret = -EINVAL;
break;
}
/*
* Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically.
*/
if (requeue_pi) {
/* Prepare the waiter to take the rt_mutex. */
atomic_inc(&pi_state->refcount);
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter,
this->task, 1);
if (ret == 1) {
/* We got the lock. */
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
free_pi_state(pi_state);
goto out_unlock;
}
}
requeue_futex(this, hb1, hb2, &key2);
drop_count++;
}
out_unlock:
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
/*
* drop_futex_key_refs() must be called outside the spinlocks. During
* the requeue we moved futex_q's from the hash bucket at key1 to the
* one at key2 and updated their key pointer. We no longer need to
* hold the references to key1.
*/
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
if (pi_state != NULL)
free_pi_state(pi_state);
return ret ? ret : task_count;
}
|
CWE-264
| 179,553 | 1,301 |
17926925629239018958604973870909084640
| null | null | null |
linux
|
05ab8f2647e4221cbdb3856dd7d32bd5407316b3
| 1 |
static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = (struct nlattr *) &skb->data[A];
if (nla->nla_len > A - skb->len)
return 0;
nla = nla_find_nested(nla, X);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
|
CWE-189
| 179,556 | 1,303 |
282497948287924023009519278415408669688
| null | null | null |
linux
|
57e68e9cd65b4b8eb4045a1e0d0746458502554c
| 1 |
static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
struct vm_area_struct *vma, struct page *check_page)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
struct page *page;
unsigned long address;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
unsigned long end;
int ret = SWAP_AGAIN;
int locked_vma = 0;
address = (vma->vm_start + cursor) & CLUSTER_MASK;
end = address + CLUSTER_SIZE;
if (address < vma->vm_start)
address = vma->vm_start;
if (end > vma->vm_end)
end = vma->vm_end;
pmd = mm_find_pmd(mm, address);
if (!pmd)
return ret;
mmun_start = address;
mmun_end = end;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/*
* If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
* keep the sem while scanning the cluster for mlocking pages.
*/
if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
locked_vma = (vma->vm_flags & VM_LOCKED);
if (!locked_vma)
up_read(&vma->vm_mm->mmap_sem); /* don't need it */
}
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
for (; address < end; pte++, address += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
page = vm_normal_page(vma, address, *pte);
BUG_ON(!page || PageAnon(page));
if (locked_vma) {
mlock_vma_page(page); /* no-op if already mlocked */
if (page == check_page)
ret = SWAP_MLOCK;
continue; /* don't unmap */
}
if (ptep_clear_flush_young_notify(vma, address, pte))
continue;
/* Nuke the page table entry. */
flush_cache_page(vma, address, pte_pfn(*pte));
pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
if (page->index != linear_page_index(vma, address)) {
pte_t ptfile = pgoff_to_pte(page->index);
if (pte_soft_dirty(pteval))
pte_file_mksoft_dirty(ptfile);
set_pte_at(mm, address, pte, ptfile);
}
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
set_page_dirty(page);
page_remove_rmap(page);
page_cache_release(page);
dec_mm_counter(mm, MM_FILEPAGES);
(*mapcount)--;
}
pte_unmap_unlock(pte - 1, ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (locked_vma)
up_read(&vma->vm_mm->mmap_sem);
return ret;
}
|
CWE-264
| 179,559 | 1,305 |
1665666048709289334997649135817394011
| null | null | null |
linux
|
a03ffcf873fe0f2565386ca8ef832144c42e67fa
| 1 |
void bpf_jit_compile(struct sk_filter *fp)
{
u8 temp[64];
u8 *prog;
unsigned int proglen, oldproglen = 0;
int ilen, i;
int t_offset, f_offset;
u8 t_op, f_op, seen = 0, pass;
u8 *image = NULL;
u8 *func;
int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
unsigned int cleanup_addr; /* epilogue code offset */
unsigned int *addrs;
const struct sock_filter *filter = fp->insns;
int flen = fp->len;
if (!bpf_jit_enable)
return;
addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
return;
/* Before first pass, make a rough estimation of addrs[]
* each bpf instruction is translated to less than 64 bytes
*/
for (proglen = 0, i = 0; i < flen; i++) {
proglen += 64;
addrs[i] = proglen;
}
cleanup_addr = proglen; /* epilogue address */
for (pass = 0; pass < 10; pass++) {
/* no prologue/epilogue for trivial filters (RET something) */
proglen = 0;
prog = temp;
if (seen) {
EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
/* note : must save %rbx in case bpf_error is hit */
if (seen & (SEEN_XREG | SEEN_DATAREF))
EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
if (seen & SEEN_XREG)
CLEAR_X(); /* make sure we dont leek kernel memory */
/*
* If this filter needs to access skb data,
* loads r9 and r8 with :
* r9 = skb->len - skb->data_len
* r8 = skb->data
*/
if (seen & SEEN_DATAREF) {
if (offsetof(struct sk_buff, len) <= 127)
/* mov off8(%rdi),%r9d */
EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
else {
/* mov off32(%rdi),%r9d */
EMIT3(0x44, 0x8b, 0x8f);
EMIT(offsetof(struct sk_buff, len), 4);
}
if (is_imm8(offsetof(struct sk_buff, data_len)))
/* sub off8(%rdi),%r9d */
EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
else {
EMIT3(0x44, 0x2b, 0x8f);
EMIT(offsetof(struct sk_buff, data_len), 4);
}
if (is_imm8(offsetof(struct sk_buff, data)))
/* mov off8(%rdi),%r8 */
EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
else {
/* mov off32(%rdi),%r8 */
EMIT3(0x4c, 0x8b, 0x87);
EMIT(offsetof(struct sk_buff, data), 4);
}
}
}
switch (filter[0].code) {
case BPF_S_RET_K:
case BPF_S_LD_W_LEN:
case BPF_S_ANC_PROTOCOL:
case BPF_S_ANC_IFINDEX:
case BPF_S_ANC_MARK:
case BPF_S_ANC_RXHASH:
case BPF_S_ANC_CPU:
case BPF_S_ANC_QUEUE:
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
/* first instruction sets A register (or is RET 'constant') */
break;
default:
/* make sure we dont leak kernel information to user */
CLEAR_A(); /* A = 0 */
}
for (i = 0; i < flen; i++) {
unsigned int K = filter[i].k;
switch (filter[i].code) {
case BPF_S_ALU_ADD_X: /* A += X; */
seen |= SEEN_XREG;
EMIT2(0x01, 0xd8); /* add %ebx,%eax */
break;
case BPF_S_ALU_ADD_K: /* A += K; */
if (!K)
break;
if (is_imm8(K))
EMIT3(0x83, 0xc0, K); /* add imm8,%eax */
else
EMIT1_off32(0x05, K); /* add imm32,%eax */
break;
case BPF_S_ALU_SUB_X: /* A -= X; */
seen |= SEEN_XREG;
EMIT2(0x29, 0xd8); /* sub %ebx,%eax */
break;
case BPF_S_ALU_SUB_K: /* A -= K */
if (!K)
break;
if (is_imm8(K))
EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
else
EMIT1_off32(0x2d, K); /* sub imm32,%eax */
break;
case BPF_S_ALU_MUL_X: /* A *= X; */
seen |= SEEN_XREG;
EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */
break;
case BPF_S_ALU_MUL_K: /* A *= K */
if (is_imm8(K))
EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
else {
EMIT2(0x69, 0xc0); /* imul imm32,%eax */
EMIT(K, 4);
}
break;
case BPF_S_ALU_DIV_X: /* A /= X; */
seen |= SEEN_XREG;
EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
if (pc_ret0 != -1)
EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4));
else {
EMIT_COND_JMP(X86_JNE, 2 + 5);
CLEAR_A();
EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
}
EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
break;
case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
EMIT(K, 4);
EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
break;
case BPF_S_ALU_AND_X:
seen |= SEEN_XREG;
EMIT2(0x21, 0xd8); /* and %ebx,%eax */
break;
case BPF_S_ALU_AND_K:
if (K >= 0xFFFFFF00) {
EMIT2(0x24, K & 0xFF); /* and imm8,%al */
} else if (K >= 0xFFFF0000) {
EMIT2(0x66, 0x25); /* and imm16,%ax */
EMIT2(K, 2);
} else {
EMIT1_off32(0x25, K); /* and imm32,%eax */
}
break;
case BPF_S_ALU_OR_X:
seen |= SEEN_XREG;
EMIT2(0x09, 0xd8); /* or %ebx,%eax */
break;
case BPF_S_ALU_OR_K:
if (is_imm8(K))
EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
else
EMIT1_off32(0x0d, K); /* or imm32,%eax */
break;
case BPF_S_ALU_LSH_X: /* A <<= X; */
seen |= SEEN_XREG;
EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
break;
case BPF_S_ALU_LSH_K:
if (K == 0)
break;
else if (K == 1)
EMIT2(0xd1, 0xe0); /* shl %eax */
else
EMIT3(0xc1, 0xe0, K);
break;
case BPF_S_ALU_RSH_X: /* A >>= X; */
seen |= SEEN_XREG;
EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */
break;
case BPF_S_ALU_RSH_K: /* A >>= K; */
if (K == 0)
break;
else if (K == 1)
EMIT2(0xd1, 0xe8); /* shr %eax */
else
EMIT3(0xc1, 0xe8, K);
break;
case BPF_S_ALU_NEG:
EMIT2(0xf7, 0xd8); /* neg %eax */
break;
case BPF_S_RET_K:
if (!K) {
if (pc_ret0 == -1)
pc_ret0 = i;
CLEAR_A();
} else {
EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
}
/* fallinto */
case BPF_S_RET_A:
if (seen) {
if (i != flen - 1) {
EMIT_JMP(cleanup_addr - addrs[i]);
break;
}
if (seen & SEEN_XREG)
EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
EMIT1(0xc9); /* leaveq */
}
EMIT1(0xc3); /* ret */
break;
case BPF_S_MISC_TAX: /* X = A */
seen |= SEEN_XREG;
EMIT2(0x89, 0xc3); /* mov %eax,%ebx */
break;
case BPF_S_MISC_TXA: /* A = X */
seen |= SEEN_XREG;
EMIT2(0x89, 0xd8); /* mov %ebx,%eax */
break;
case BPF_S_LD_IMM: /* A = K */
if (!K)
CLEAR_A();
else
EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
break;
case BPF_S_LDX_IMM: /* X = K */
seen |= SEEN_XREG;
if (!K)
CLEAR_X();
else
EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
break;
case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
seen |= SEEN_MEM;
EMIT3(0x8b, 0x45, 0xf0 - K*4);
break;
case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
seen |= SEEN_XREG | SEEN_MEM;
EMIT3(0x8b, 0x5d, 0xf0 - K*4);
break;
case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
seen |= SEEN_MEM;
EMIT3(0x89, 0x45, 0xf0 - K*4);
break;
case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
seen |= SEEN_XREG | SEEN_MEM;
EMIT3(0x89, 0x5d, 0xf0 - K*4);
break;
case BPF_S_LD_W_LEN: /* A = skb->len; */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
if (is_imm8(offsetof(struct sk_buff, len)))
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
else {
EMIT2(0x8b, 0x87);
EMIT(offsetof(struct sk_buff, len), 4);
}
break;
case BPF_S_LDX_W_LEN: /* X = skb->len; */
seen |= SEEN_XREG;
if (is_imm8(offsetof(struct sk_buff, len)))
/* mov off8(%rdi),%ebx */
EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
else {
EMIT2(0x8b, 0x9f);
EMIT(offsetof(struct sk_buff, len), 4);
}
break;
case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
if (is_imm8(offsetof(struct sk_buff, protocol))) {
/* movzwl off8(%rdi),%eax */
EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
} else {
EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
EMIT(offsetof(struct sk_buff, protocol), 4);
}
EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */
break;
case BPF_S_ANC_IFINDEX:
if (is_imm8(offsetof(struct sk_buff, dev))) {
/* movq off8(%rdi),%rax */
EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
} else {
EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
EMIT(offsetof(struct sk_buff, dev), 4);
}
EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */
EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */
EMIT(offsetof(struct net_device, ifindex), 4);
break;
case BPF_S_ANC_MARK:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
if (is_imm8(offsetof(struct sk_buff, mark))) {
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
} else {
EMIT2(0x8b, 0x87);
EMIT(offsetof(struct sk_buff, mark), 4);
}
break;
case BPF_S_ANC_RXHASH:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
if (is_imm8(offsetof(struct sk_buff, rxhash))) {
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash));
} else {
EMIT2(0x8b, 0x87);
EMIT(offsetof(struct sk_buff, rxhash), 4);
}
break;
case BPF_S_ANC_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
/* movzwl off8(%rdi),%eax */
EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
} else {
EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
EMIT(offsetof(struct sk_buff, queue_mapping), 4);
}
break;
case BPF_S_ANC_CPU:
#ifdef CONFIG_SMP
EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
#else
CLEAR_A();
#endif
break;
case BPF_S_LD_W_ABS:
func = sk_load_word;
common_load: seen |= SEEN_DATAREF;
if ((int)K < 0)
goto out;
t_offset = func - (image + addrs[i]);
EMIT1_off32(0xbe, K); /* mov imm32,%esi */
EMIT1_off32(0xe8, t_offset); /* call */
break;
case BPF_S_LD_H_ABS:
func = sk_load_half;
goto common_load;
case BPF_S_LD_B_ABS:
func = sk_load_byte;
goto common_load;
case BPF_S_LDX_B_MSH:
if ((int)K < 0) {
if (pc_ret0 != -1) {
EMIT_JMP(addrs[pc_ret0] - addrs[i]);
break;
}
CLEAR_A();
EMIT_JMP(cleanup_addr - addrs[i]);
break;
}
seen |= SEEN_DATAREF | SEEN_XREG;
t_offset = sk_load_byte_msh - (image + addrs[i]);
EMIT1_off32(0xbe, K); /* mov imm32,%esi */
EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
break;
case BPF_S_LD_W_IND:
func = sk_load_word_ind;
common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
t_offset = func - (image + addrs[i]);
EMIT1_off32(0xbe, K); /* mov imm32,%esi */
EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */
break;
case BPF_S_LD_H_IND:
func = sk_load_half_ind;
goto common_load_ind;
case BPF_S_LD_B_IND:
func = sk_load_byte_ind;
goto common_load_ind;
case BPF_S_JMP_JA:
t_offset = addrs[i + K] - addrs[i];
EMIT_JMP(t_offset);
break;
COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
t_offset = addrs[i + filter[i].jt] - addrs[i];
/* same targets, can avoid doing the test :) */
if (filter[i].jt == filter[i].jf) {
EMIT_JMP(t_offset);
break;
}
switch (filter[i].code) {
case BPF_S_JMP_JGT_X:
case BPF_S_JMP_JGE_X:
case BPF_S_JMP_JEQ_X:
seen |= SEEN_XREG;
EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
break;
case BPF_S_JMP_JSET_X:
seen |= SEEN_XREG;
EMIT2(0x85, 0xd8); /* test %ebx,%eax */
break;
case BPF_S_JMP_JEQ_K:
if (K == 0) {
EMIT2(0x85, 0xc0); /* test %eax,%eax */
break;
}
case BPF_S_JMP_JGT_K:
case BPF_S_JMP_JGE_K:
if (K <= 127)
EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
else
EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
break;
case BPF_S_JMP_JSET_K:
if (K <= 0xFF)
EMIT2(0xa8, K); /* test imm8,%al */
else if (!(K & 0xFFFF00FF))
EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
else if (K <= 0xFFFF) {
EMIT2(0x66, 0xa9); /* test imm16,%ax */
EMIT(K, 2);
} else {
EMIT1_off32(0xa9, K); /* test imm32,%eax */
}
break;
}
if (filter[i].jt != 0) {
if (filter[i].jf)
t_offset += is_near(f_offset) ? 2 : 6;
EMIT_COND_JMP(t_op, t_offset);
if (filter[i].jf)
EMIT_JMP(f_offset);
break;
}
EMIT_COND_JMP(f_op, f_offset);
break;
default:
/* hmm, too complex filter, give up with jit compiler */
goto out;
}
ilen = prog - temp;
if (image) {
if (unlikely(proglen + ilen > oldproglen)) {
pr_err("bpb_jit_compile fatal error\n");
kfree(addrs);
module_free(NULL, image);
return;
}
memcpy(image + proglen, temp, ilen);
}
proglen += ilen;
addrs[i] = proglen;
prog = temp;
}
/* last bpf instruction is always a RET :
* use it to give the cleanup instruction(s) addr
*/
cleanup_addr = proglen - 1; /* ret */
if (seen)
cleanup_addr -= 1; /* leaveq */
if (seen & SEEN_XREG)
cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
if (image) {
WARN_ON(proglen != oldproglen);
break;
}
if (proglen == oldproglen) {
image = module_alloc(max_t(unsigned int,
proglen,
sizeof(struct work_struct)));
if (!image)
goto out;
}
oldproglen = proglen;
}
if (bpf_jit_enable > 1)
pr_err("flen=%d proglen=%u pass=%d image=%p\n",
flen, proglen, pass, image);
if (image) {
if (bpf_jit_enable > 1)
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
16, 1, image, proglen, false);
bpf_flush_icache(image, image + proglen);
fp->bpf_func = (void *)image;
}
out:
kfree(addrs);
return;
}
|
CWE-189
| 179,560 | 1,306 |
197306910127583663033501869105835713601
| null | null | null |
linux
|
b2853fd6c2d0f383dbdf7427e263eb576a633867
| 1 |
static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event;
int offset, ret;
u8 smac[ETH_ALEN];
u8 alt_smac[ETH_ALEN];
u8 *psmac = smac;
u8 *palt_smac = alt_smac;
int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) ==
RDMA_TRANSPORT_IB) &&
(rdma_port_get_link_layer(cm_id->device,
ib_event->param.req_rcvd.port) ==
IB_LINK_LAYER_ETHERNET));
listen_id = cm_id->context;
if (!cma_check_req_qp_type(&listen_id->id, ib_event))
return -EINVAL;
if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
return -ECONNABORTED;
memset(&event, 0, sizeof event);
offset = cma_user_data_offset(listen_id);
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
conn_id = cma_new_udp_id(&listen_id->id, ib_event);
event.param.ud.private_data = ib_event->private_data + offset;
event.param.ud.private_data_len =
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
} else {
conn_id = cma_new_conn_id(&listen_id->id, ib_event);
cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
ib_event->private_data, offset);
}
if (!conn_id) {
ret = -ENOMEM;
goto err1;
}
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
ret = cma_acquire_dev(conn_id, listen_id);
if (ret)
goto err2;
conn_id->cm_id.ib = cm_id;
cm_id->context = conn_id;
cm_id->cm_handler = cma_ib_handler;
/*
* Protect against the user destroying conn_id from another thread
* until we're done accessing it.
*/
atomic_inc(&conn_id->refcount);
ret = conn_id->id.event_handler(&conn_id->id, &event);
if (ret)
goto err3;
if (is_iboe) {
if (ib_event->param.req_rcvd.primary_path != NULL)
rdma_addr_find_smac_by_sgid(
&ib_event->param.req_rcvd.primary_path->sgid,
psmac, NULL);
else
psmac = NULL;
if (ib_event->param.req_rcvd.alternate_path != NULL)
rdma_addr_find_smac_by_sgid(
&ib_event->param.req_rcvd.alternate_path->sgid,
palt_smac, NULL);
else
palt_smac = NULL;
}
/*
* Acquire mutex to prevent user executing rdma_destroy_id()
* while we're accessing the cm_id.
*/
mutex_lock(&lock);
if (is_iboe)
ib_update_cm_av(cm_id, psmac, palt_smac);
if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
(conn_id->id.qp_type != IB_QPT_UD))
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
mutex_unlock(&lock);
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
cma_deref_id(conn_id);
return 0;
err3:
cma_deref_id(conn_id);
/* Destroy the CM ID by returning a non-zero value. */
conn_id->cm_id.ib = NULL;
err2:
cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
err1:
mutex_unlock(&listen_id->handler_mutex);
if (conn_id)
rdma_destroy_id(&conn_id->id);
return ret;
}
|
CWE-20
| 179,562 | 1,308 |
47485012312962957974123923967235310012
| null | null | null |
linux
|
1d147bfa64293b2723c4fec50922168658e613ba
| 1 |
ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
{
struct sta_info *sta = tx->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_local *local = tx->local;
if (unlikely(!sta))
return TX_CONTINUE;
if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
test_sta_flag(sta, WLAN_STA_PS_DRIVER)) &&
!(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
int ac = skb_get_queue_mapping(tx->skb);
ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
sta->sta.addr, sta->sta.aid, ac);
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
ps_dbg(tx->sdata,
"STA %pM TX buffer for AC %d full - dropping oldest frame\n",
sta->sta.addr, ac);
ieee80211_free_txskb(&local->hw, old);
} else
tx->local->total_ps_buffered++;
info->control.jiffies = jiffies;
info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
if (!timer_pending(&local->sta_cleanup))
mod_timer(&local->sta_cleanup,
round_jiffies(jiffies +
STA_INFO_CLEANUP_INTERVAL));
/*
* We queued up some frames, so the TIM bit might
* need to be set, recalculate it.
*/
sta_info_recalc_tim(sta);
return TX_QUEUED;
} else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
ps_dbg(tx->sdata,
"STA %pM in PS mode, but polling/in SP -> send frame\n",
sta->sta.addr);
}
return TX_CONTINUE;
}
|
CWE-362
| 179,565 | 1,310 |
293546470110660309554071626143606986714
| null | null | null |
linux
|
621b5060e823301d0cba4cb52a7ee3491922d291
| 1 |
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
flush_fp_to_thread(src);
flush_altivec_to_thread(src);
flush_vsx_to_thread(src);
flush_spe_to_thread(src);
*dst = *src;
clear_task_ebb(dst);
return 0;
}
|
CWE-20
| 179,566 | 1,311 |
5326801381502655410317857099101449727
| null | null | null |
linux
|
21f8aaee0c62708654988ce092838aa7df4d25d8
| 1 |
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
struct ath_node *an)
{
struct ath_atx_tid *tid;
struct ath_atx_ac *ac;
struct ath_txq *txq;
bool buffered;
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
if (!tid->sched)
continue;
ac = tid->ac;
txq = ac->txq;
ath_txq_lock(sc, txq);
buffered = ath_tid_has_buffered(tid);
tid->sched = false;
list_del(&tid->list);
if (ac->sched) {
ac->sched = false;
list_del(&ac->list);
}
ath_txq_unlock(sc, txq);
ieee80211_sta_set_buffered(sta, tidno, buffered);
}
}
|
CWE-362
| 179,567 | 1,312 |
27558184441602357892967054392522172781
| null | null | null |
linux
|
b22f5126a24b3b2f15448c3f2a254fc10cbc2b92
| 1 |
static int dccp_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb, unsigned int dataoff,
enum ip_conntrack_info *ctinfo,
u_int8_t pf, unsigned int hooknum)
{
struct dccp_hdr _dh, *dh;
unsigned int dccp_len = skb->len - dataoff;
unsigned int cscov;
const char *msg;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
if (dh == NULL) {
msg = "nf_ct_dccp: short packet ";
goto out_invalid;
}
if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
dh->dccph_doff * 4 > dccp_len) {
msg = "nf_ct_dccp: truncated/malformed packet ";
goto out_invalid;
}
cscov = dccp_len;
if (dh->dccph_cscov) {
cscov = (dh->dccph_cscov - 1) * 4;
if (cscov > dccp_len) {
msg = "nf_ct_dccp: bad checksum coverage ";
goto out_invalid;
}
}
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP,
pf)) {
msg = "nf_ct_dccp: bad checksum ";
goto out_invalid;
}
if (dh->dccph_type >= DCCP_PKT_INVALID) {
msg = "nf_ct_dccp: reserved packet type ";
goto out_invalid;
}
return NF_ACCEPT;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
return -NF_ACCEPT;
}
|
CWE-20
| 179,592 | 1,313 |
324117670837559905963400790338875637207
| null | null | null |
linux
|
b22f5126a24b3b2f15448c3f2a254fc10cbc2b92
| 1 |
static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct dccp_net *dn;
struct dccp_hdr _dh, *dh;
const char *msg;
u_int8_t state;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
BUG_ON(dh == NULL);
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
dn = dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "nf_ct_dccp: not picking up existing connection ";
goto out_invalid;
}
case CT_DCCP_REQUEST:
break;
case CT_DCCP_INVALID:
msg = "nf_ct_dccp: invalid state transition ";
goto out_invalid;
}
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.state = CT_DCCP_NONE;
ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
ct->proto.dccp.handshake_seq = 0;
return true;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
NULL, "%s", msg);
return false;
}
|
CWE-20
| 179,593 | 1,314 |
71271037971402571015617838228718316366
| null | null | null |
linux
|
b22f5126a24b3b2f15448c3f2a254fc10cbc2b92
| 1 |
static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, enum ip_conntrack_info ctinfo,
u_int8_t pf, unsigned int hooknum,
unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct dccp_hdr _dh, *dh;
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
BUG_ON(dh == NULL);
type = dh->dccph_type;
if (type == DCCP_PKT_RESET &&
!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
/* Tear down connection immediately if only reply is a RESET */
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
spin_lock_bh(&ct->lock);
role = ct->proto.dccp.role[dir];
old_state = ct->proto.dccp.state;
new_state = dccp_state_table[role][type][old_state];
switch (new_state) {
case CT_DCCP_REQUEST:
if (old_state == CT_DCCP_TIMEWAIT &&
role == CT_DCCP_ROLE_SERVER) {
/* Reincarnation in the reverse direction: reopen and
* reverse client/server roles. */
ct->proto.dccp.role[dir] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_SERVER;
}
break;
case CT_DCCP_RESPOND:
if (old_state == CT_DCCP_REQUEST)
ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
break;
case CT_DCCP_PARTOPEN:
if (old_state == CT_DCCP_RESPOND &&
type == DCCP_PKT_ACK &&
dccp_ack_seq(dh) == ct->proto.dccp.handshake_seq)
set_bit(IPS_ASSURED_BIT, &ct->status);
break;
case CT_DCCP_IGNORE:
/*
* Connection tracking might be out of sync, so we ignore
* packets that might establish a new connection and resync
* if the server responds with a valid Response.
*/
if (ct->proto.dccp.last_dir == !dir &&
ct->proto.dccp.last_pkt == DCCP_PKT_REQUEST &&
type == DCCP_PKT_RESPONSE) {
ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[dir] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
new_state = CT_DCCP_RESPOND;
break;
}
ct->proto.dccp.last_dir = dir;
ct->proto.dccp.last_pkt = type;
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_dccp: invalid packet ignored ");
return NF_ACCEPT;
case CT_DCCP_INVALID:
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_dccp: invalid state transition ");
return -NF_ACCEPT;
}
ct->proto.dccp.last_dir = dir;
ct->proto.dccp.last_pkt = type;
ct->proto.dccp.state = new_state;
spin_unlock_bh(&ct->lock);
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
}
|
CWE-20
| 179,594 | 1,315 |
192674082710633702297802617075410075604
| null | null | null |
linux
|
263b4509ec4d47e0da3e753f85a39ea12d1eff24
| 1 |
static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
{
if (file->f_flags & O_DSYNC)
return 0;
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return 1;
if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL ||
(inode->i_flock->fl_start == 0 &&
inode->i_flock->fl_end == OFFSET_MAX &&
inode->i_flock->fl_type != F_RDLCK)))
return 1;
return 0;
}
|
CWE-20
| 179,596 | 1,316 |
289835272320192960652245810503833865526
| null | null | null |
linux
|
e6a623460e5fc960ac3ee9f946d3106233fd28d8
| 1 |
static long media_device_enum_entities(struct media_device *mdev,
struct media_entity_desc __user *uent)
{
struct media_entity *ent;
struct media_entity_desc u_ent;
if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
return -EFAULT;
ent = find_entity(mdev, u_ent.id);
if (ent == NULL)
return -EINVAL;
u_ent.id = ent->id;
if (ent->name) {
strncpy(u_ent.name, ent->name, sizeof(u_ent.name));
u_ent.name[sizeof(u_ent.name) - 1] = '\0';
} else {
memset(u_ent.name, 0, sizeof(u_ent.name));
}
u_ent.type = ent->type;
u_ent.revision = ent->revision;
u_ent.flags = ent->flags;
u_ent.group_id = ent->group_id;
u_ent.pads = ent->num_pads;
u_ent.links = ent->num_links - ent->num_backlinks;
memcpy(&u_ent.raw, &ent->info, sizeof(ent->info));
if (copy_to_user(uent, &u_ent, sizeof(u_ent)))
return -EFAULT;
return 0;
}
|
CWE-200
| 179,605 | 1,323 |
291851125023156121046432945692712729103
| null | null | null |
linux
|
2145e15e0557a01b9195d1c7199a1b92cb9be81f
| 1 |
static int raw_cmd_copyout(int cmd, void __user *param,
struct floppy_raw_cmd *ptr)
{
int ret;
while (ptr) {
ret = copy_to_user(param, ptr, sizeof(*ptr));
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length) {
if (ptr->length >= 0 &&
ptr->length <= ptr->buffer_length) {
long length = ptr->buffer_length - ptr->length;
ret = fd_copyout(ptr->data, ptr->kernel_data,
length);
if (ret)
return ret;
}
}
ptr = ptr->next;
}
return 0;
}
|
CWE-264
| 179,606 | 1,324 |
339071363535589486337675166434921700616
| null | null | null |
linux
|
2690d97ade05c5325cbf7c72b94b90d265659886
| 1 |
static unsigned int help(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned int matchoff,
unsigned int matchlen,
struct nf_conntrack_expect *exp)
{
char buffer[sizeof("4294967296 65635")];
u_int16_t port;
unsigned int ret;
/* Reply comes from server. */
exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
exp->dir = IP_CT_DIR_REPLY;
exp->expectfn = nf_nat_follow_master;
/* Try to get same port: if not, try to change it. */
for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
int ret;
exp->tuple.dst.u.tcp.port = htons(port);
ret = nf_ct_expect_related(exp);
if (ret == 0)
break;
else if (ret != -EBUSY) {
port = 0;
break;
}
}
if (port == 0) {
nf_ct_helper_log(skb, exp->master, "all ports in use");
return NF_DROP;
}
ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
protoff, matchoff, matchlen, buffer,
strlen(buffer));
if (ret != NF_ACCEPT) {
nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
nf_ct_unexpect_related(exp);
}
return ret;
}
|
CWE-119
| 179,608 | 1,325 |
59351750141192127461878933880847518584
| null | null | null |
linux
|
8e3fbf870481eb53b2d3a322d1fc395ad8b367ed
| 1 |
static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct yam_port *yp = netdev_priv(dev);
struct yamdrv_ioctl_cfg yi;
struct yamdrv_ioctl_mcs *ym;
int ioctl_cmd;
if (copy_from_user(&ioctl_cmd, ifr->ifr_data, sizeof(int)))
return -EFAULT;
if (yp->magic != YAM_MAGIC)
return -EINVAL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd != SIOCDEVPRIVATE)
return -EINVAL;
switch (ioctl_cmd) {
case SIOCYAMRESERVED:
return -EINVAL; /* unused */
case SIOCYAMSMCS:
if (netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
return -ENOBUFS;
if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
kfree(ym);
return -EFAULT;
}
if (ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
}
/* setting predef as 0 for loading userdefined mcs data */
add_mcs(ym->bits, ym->bitrate, 0);
kfree(ym);
break;
case SIOCYAMSCFG:
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BITRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BAUDRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if (yi.cfg.mask & YAM_IOBASE) {
yp->iobase = yi.cfg.iobase;
dev->base_addr = yi.cfg.iobase;
}
if (yi.cfg.mask & YAM_IRQ) {
if (yi.cfg.irq > 15)
return -EINVAL;
yp->irq = yi.cfg.irq;
dev->irq = yi.cfg.irq;
}
if (yi.cfg.mask & YAM_BITRATE) {
if (yi.cfg.bitrate > YAM_MAXBITRATE)
return -EINVAL;
yp->bitrate = yi.cfg.bitrate;
}
if (yi.cfg.mask & YAM_BAUDRATE) {
if (yi.cfg.baudrate > YAM_MAXBAUDRATE)
return -EINVAL;
yp->baudrate = yi.cfg.baudrate;
}
if (yi.cfg.mask & YAM_MODE) {
if (yi.cfg.mode > YAM_MAXMODE)
return -EINVAL;
yp->dupmode = yi.cfg.mode;
}
if (yi.cfg.mask & YAM_HOLDDLY) {
if (yi.cfg.holddly > YAM_MAXHOLDDLY)
return -EINVAL;
yp->holdd = yi.cfg.holddly;
}
if (yi.cfg.mask & YAM_TXDELAY) {
if (yi.cfg.txdelay > YAM_MAXTXDELAY)
return -EINVAL;
yp->txd = yi.cfg.txdelay;
}
if (yi.cfg.mask & YAM_TXTAIL) {
if (yi.cfg.txtail > YAM_MAXTXTAIL)
return -EINVAL;
yp->txtail = yi.cfg.txtail;
}
if (yi.cfg.mask & YAM_PERSIST) {
if (yi.cfg.persist > YAM_MAXPERSIST)
return -EINVAL;
yp->pers = yi.cfg.persist;
}
if (yi.cfg.mask & YAM_SLOTTIME) {
if (yi.cfg.slottime > YAM_MAXSLOTTIME)
return -EINVAL;
yp->slot = yi.cfg.slottime;
yp->slotcnt = yp->slot / 10;
}
break;
case SIOCYAMGCFG:
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;
yi.cfg.bitrate = yp->bitrate;
yi.cfg.baudrate = yp->baudrate;
yi.cfg.mode = yp->dupmode;
yi.cfg.txdelay = yp->txd;
yi.cfg.holddly = yp->holdd;
yi.cfg.txtail = yp->txtail;
yi.cfg.persist = yp->pers;
yi.cfg.slottime = yp->slot;
if (copy_to_user(ifr->ifr_data, &yi, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
break;
default:
return -EINVAL;
}
return 0;
}
|
CWE-399
| 179,609 | 1,326 |
11306301792634981723849234270353662823
| null | null | null |
linux
|
2b13d06c9584b4eb773f1e80bbaedab9a1c344e1
| 1 |
static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings line;
port_t *port = dev_to_port(dev);
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
if (ifr->ifr_settings.size < size) {
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
line.clock_type = get_status(port)->clocking;
line.clock_rate = 0;
line.loopback = 0;
if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
return -EFAULT;
return 0;
case IF_IFACE_SYNC_SERIAL:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
size))
return -EFAULT;
if (line.clock_type != CLOCK_EXT &&
line.clock_type != CLOCK_TXFROMRX)
return -EINVAL; /* No such clock setting */
if (line.loopback != 0)
return -EINVAL;
get_status(port)->clocking = line.clock_type;
return 0;
default:
return hdlc_ioctl(dev, ifr, cmd);
}
}
|
CWE-399
| 179,610 | 1,327 |
289977000113180624110394930896450816184
| null | null | null |
linux
|
96b340406724d87e4621284ebac5e059d67b2194
| 1 |
fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
struct ifreq *ifr)
{
sync_serial_settings sync;
int i;
/* First check what line type is set, we'll default to reporting X.21
* if nothing is set as IF_IFACE_SYNC_SERIAL implies it can't be
* changed
*/
switch (port->hwif) {
case E1:
ifr->ifr_settings.type = IF_IFACE_E1;
break;
case T1:
ifr->ifr_settings.type = IF_IFACE_T1;
break;
case V35:
ifr->ifr_settings.type = IF_IFACE_V35;
break;
case V24:
ifr->ifr_settings.type = IF_IFACE_V24;
break;
case X21D:
ifr->ifr_settings.type = IF_IFACE_X21D;
break;
case X21:
default:
ifr->ifr_settings.type = IF_IFACE_X21;
break;
}
if (ifr->ifr_settings.size == 0) {
return 0; /* only type requested */
}
if (ifr->ifr_settings.size < sizeof (sync)) {
return -ENOMEM;
}
i = port->index;
sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
/* Lucky card and linux use same encoding here */
sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
INTCLK ? CLOCK_INT : CLOCK_EXT;
sync.loopback = 0;
if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof (sync))) {
return -EFAULT;
}
ifr->ifr_settings.size = sizeof (sync);
return 0;
}
|
CWE-399
| 179,611 | 1,328 |
123961501830758972382629971272408980959
| null | null | null |
FreeRDP
|
e2745807c4c3e0a590c0f69a9b655dc74ebaa03e
| 1 |
BOOL license_read_scope_list(wStream* s, SCOPE_LIST* scopeList)
{
UINT32 i;
UINT32 scopeCount;
if (Stream_GetRemainingLength(s) < 4)
return FALSE;
Stream_Read_UINT32(s, scopeCount); /* ScopeCount (4 bytes) */
scopeList->count = scopeCount;
scopeList->array = (LICENSE_BLOB*) malloc(sizeof(LICENSE_BLOB) * scopeCount);
/* ScopeArray */
for (i = 0; i < scopeCount; i++)
{
scopeList->array[i].type = BB_SCOPE_BLOB;
if (!license_read_binary_blob(s, &scopeList->array[i]))
return FALSE;
}
return TRUE;
}
|
CWE-189
| 179,612 | 1,329 |
136158675070583433823164450078683715601
| null | null | null |
file
|
f97486ef5dc3e8735440edc4fc8808c63e1a3ef0
| 1 |
cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements; j++, i++) {
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
}
|
CWE-119
| 179,614 | 1,330 |
203768158432833195128902306432175810308
| null | null | null |
file
|
b8acc83781d5a24cc5101e525d15efe0482c280d
| 1 |
cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h,
cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count)
{
size_t i, maxcount;
const cdf_summary_info_header_t *si =
CAST(const cdf_summary_info_header_t *, sst->sst_tab);
const cdf_section_declaration_t *sd =
CAST(const cdf_section_declaration_t *, (const void *)
((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET));
if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 ||
cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1)
return -1;
ssi->si_byte_order = CDF_TOLE2(si->si_byte_order);
ssi->si_os_version = CDF_TOLE2(si->si_os_version);
ssi->si_os = CDF_TOLE2(si->si_os);
ssi->si_class = si->si_class;
cdf_swap_class(&ssi->si_class);
ssi->si_count = CDF_TOLE2(si->si_count);
*count = 0;
maxcount = 0;
*info = NULL;
for (i = 0; i < CDF_TOLE4(si->si_count); i++) {
if (i >= CDF_LOOP_LIMIT) {
DPRINTF(("Unpack summary info loop limit"));
errno = EFTYPE;
return -1;
}
if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset),
info, count, &maxcount) == -1) {
return -1;
}
}
return 0;
}
|
CWE-399
| 179,615 | 1,331 |
256057506035934494872155526230950557498
| null | null | null |
file
|
6d209c1c489457397a5763bca4b28e43aac90391
| 1 |
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + pos, len);
return len;
}
|
CWE-119
| 179,616 | 1,332 |
17449660697413267711595090029120186354
| null | null | null |
linux
|
7ada876a8703f23befbb20a7465a702ee39b1704
| 1 |
static int futex_wait(u32 __user *uaddr, int fshared,
u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct restart_block *restart;
struct futex_hash_bucket *hb;
struct futex_q q;
int ret;
if (!bitset)
return -EINVAL;
q.pi_state = NULL;
q.bitset = bitset;
q.rt_waiter = NULL;
q.requeue_pi_key = NULL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
retry:
/* Prepare to wait on uaddr. */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
goto out;
/* queue_me and wait for wakeup, timeout, or a signal. */
futex_wait_queue_me(hb, &q, to);
/* If we were woken (and unqueued), we succeeded, whatever. */
ret = 0;
if (!unqueue_me(&q))
goto out_put_key;
ret = -ETIMEDOUT;
if (to && !to->task)
goto out_put_key;
/*
* We expect signal_pending(current), but we might be the
* victim of a spurious wakeup as well.
*/
if (!signal_pending(current)) {
put_futex_key(fshared, &q.key);
goto retry;
}
ret = -ERESTARTSYS;
if (!abs_time)
goto out_put_key;
restart = ¤t_thread_info()->restart_block;
restart->fn = futex_wait_restart;
restart->futex.uaddr = (u32 *)uaddr;
restart->futex.val = val;
restart->futex.time = abs_time->tv64;
restart->futex.bitset = bitset;
restart->futex.flags = FLAGS_HAS_TIMEOUT;
if (fshared)
restart->futex.flags |= FLAGS_SHARED;
if (clockrt)
restart->futex.flags |= FLAGS_CLOCKRT;
ret = -ERESTART_RESTARTBLOCK;
out_put_key:
put_futex_key(fshared, &q.key);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
|
CWE-119
| 179,621 | 1,335 |
173721295716039508648389125595800340840
| null | null | null |
linux
|
86acdca1b63e6890540fa19495cfc708beff3d8b
| 1 |
static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd)
{
int error;
void *cookie;
struct dentry *dentry = path->dentry;
touch_atime(path->mnt, dentry);
nd_set_link(nd, NULL);
if (path->mnt != nd->path.mnt) {
path_to_nameidata(path, nd);
dget(dentry);
}
mntget(path->mnt);
cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(cookie);
if (!IS_ERR(cookie)) {
char *s = nd_get_link(nd);
error = 0;
if (s)
error = __vfs_follow_link(nd, s);
else if (nd->last_type == LAST_BIND) {
error = force_reval_path(&nd->path, nd);
if (error)
path_put(&nd->path);
}
if (dentry->d_inode->i_op->put_link)
dentry->d_inode->i_op->put_link(dentry, nd, cookie);
}
return error;
}
|
CWE-20
| 179,626 | 1,339 |
256736984301789387439648177571089396092
| null | null | null |
linux
|
4291086b1f081b869c6d79e5b7441633dc3ace00
| 1 |
static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
const unsigned char *b = buf;
DECLARE_WAITQUEUE(wait, current);
int c;
ssize_t retval = 0;
/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
retval = tty_check_change(tty);
if (retval)
return retval;
}
down_read(&tty->termios_rwsem);
/* Write out any echoed characters that are still pending */
process_echoes(tty);
add_wait_queue(&tty->write_wait, &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) {
retval = -EIO;
break;
}
if (O_OPOST(tty)) {
while (nr > 0) {
ssize_t num = process_output_block(tty, b, nr);
if (num < 0) {
if (num == -EAGAIN)
break;
retval = num;
goto break_out;
}
b += num;
nr -= num;
if (nr == 0)
break;
c = *b;
if (process_output(c, tty) < 0)
break;
b++; nr--;
}
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
} else {
while (nr > 0) {
c = tty->ops->write(tty, b, nr);
if (c < 0) {
retval = c;
goto break_out;
}
if (!c)
break;
b += c;
nr -= c;
}
}
if (!nr)
break;
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
break;
}
up_read(&tty->termios_rwsem);
schedule();
down_read(&tty->termios_rwsem);
}
break_out:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
if (b - buf != nr && tty->fasync)
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
up_read(&tty->termios_rwsem);
return (b - buf) ? b - buf : retval;
}
|
CWE-362
| 179,628 | 1,341 |
177764399866911617439486199510054592235
| null | null | null |
linux
|
d8316f3991d207fe32881a9ac20241be8fa2bad0
| 1 |
static void handle_rx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *vq = &nvq->vq;
unsigned uninitialized_var(in), log;
struct vhost_log *vq_log;
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
.msg_control = NULL, /* FIXME: get and handle RX aux data. */
.msg_controllen = 0,
.msg_iov = vq->iov,
.msg_flags = MSG_DONTWAIT,
};
struct virtio_net_hdr_mrg_rxbuf hdr = {
.hdr.flags = 0,
.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
};
size_t total_len = 0;
int err, mergeable;
s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
struct socket *sock;
mutex_lock(&vq->mutex);
sock = vq->private_data;
if (!sock)
goto out;
vhost_disable_notify(&net->dev, vq);
vhost_hlen = nvq->vhost_hlen;
sock_hlen = nvq->sock_hlen;
vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
while ((sock_len = peek_head_len(sock->sk))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads, vhost_len,
&in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
break;
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
/* They have slipped one in as we were
* doing that: check again. */
vhost_disable_notify(&net->dev, vq);
continue;
}
/* Nothing new? Wait for eventfd to tell us
* they refilled. */
break;
}
/* We don't need to be notified again. */
if (unlikely((vhost_hlen)))
/* Skip header. TODO: support TSO. */
move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
else
/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
* needed because recvmsg can modify msg_iov. */
copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
msg.msg_iovlen = in;
err = sock->ops->recvmsg(NULL, sock, &msg,
sock_len, MSG_DONTWAIT | MSG_TRUNC);
/* Userspace might have consumed the packet meanwhile:
* it's not supposed to do this usually, but might be hard
* to prevent. Discard data we got (if any) and keep going. */
if (unlikely(err != sock_len)) {
pr_debug("Discarded rx packet: "
" len %d, expected %zd\n", err, sock_len);
vhost_discard_vq_desc(vq, headcount);
continue;
}
if (unlikely(vhost_hlen) &&
memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
vhost_hlen)) {
vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
vq->iov->iov_base);
break;
}
/* TODO: Should check and handle checksum. */
if (likely(mergeable) &&
memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
offsetof(typeof(hdr), num_buffers),
sizeof hdr.num_buffers)) {
vq_err(vq, "Failed num_buffers write");
vhost_discard_vq_desc(vq, headcount);
break;
}
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
headcount);
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len;
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
vhost_poll_queue(&vq->poll);
break;
}
}
out:
mutex_unlock(&vq->mutex);
}
|
CWE-20
| 179,633 | 1,342 |
327050017452533912126917371575525804547
| null | null | null |
linux
|
5d81de8e8667da7135d3a32a964087c0faf5483f
| 1 |
cifs_iovec_write(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset)
{
unsigned long nr_pages, i;
size_t copied, len, cur_len;
ssize_t total_written = 0;
loff_t offset;
struct iov_iter it;
struct cifsFileInfo *open_file;
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
struct cifs_writedata *wdata, *tmp;
struct list_head wdata_list;
int rc;
pid_t pid;
len = iov_length(iov, nr_segs);
if (!len)
return 0;
rc = generic_write_checks(file, poffset, &len, 0);
if (rc)
return rc;
INIT_LIST_HEAD(&wdata_list);
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
open_file = file->private_data;
tcon = tlink_tcon(open_file->tlink);
if (!tcon->ses->server->ops->async_writev)
return -ENOSYS;
offset = *poffset;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
else
pid = current->tgid;
iov_iter_init(&it, iov, nr_segs, len, 0);
do {
size_t save_len;
nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
wdata = cifs_writedata_alloc(nr_pages,
cifs_uncached_writev_complete);
if (!wdata) {
rc = -ENOMEM;
break;
}
rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
if (rc) {
kfree(wdata);
break;
}
save_len = cur_len;
for (i = 0; i < nr_pages; i++) {
copied = min_t(const size_t, cur_len, PAGE_SIZE);
copied = iov_iter_copy_from_user(wdata->pages[i], &it,
0, copied);
cur_len -= copied;
iov_iter_advance(&it, copied);
}
cur_len = save_len - cur_len;
wdata->sync_mode = WB_SYNC_ALL;
wdata->nr_pages = nr_pages;
wdata->offset = (__u64)offset;
wdata->cfile = cifsFileInfo_get(open_file);
wdata->pid = pid;
wdata->bytes = cur_len;
wdata->pagesz = PAGE_SIZE;
wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
rc = cifs_uncached_retry_writev(wdata);
if (rc) {
kref_put(&wdata->refcount,
cifs_uncached_writedata_release);
break;
}
list_add_tail(&wdata->list, &wdata_list);
offset += cur_len;
len -= cur_len;
} while (len > 0);
/*
* If at least one write was successfully sent, then discard any rc
* value from the later writes. If the other write succeeds, then
* we'll end up returning whatever was written. If it fails, then
* we'll get a new rc value from that.
*/
if (!list_empty(&wdata_list))
rc = 0;
/*
* Wait for and collect replies for any successful sends in order of
* increasing offset. Once an error is hit or we get a fatal signal
* while waiting, then return without waiting for any more replies.
*/
restart_loop:
list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
if (!rc) {
/* FIXME: freezable too? */
rc = wait_for_completion_killable(&wdata->done);
if (rc)
rc = -EINTR;
else if (wdata->result)
rc = wdata->result;
else
total_written += wdata->bytes;
/* resend call if it's a retryable error */
if (rc == -EAGAIN) {
rc = cifs_uncached_retry_writev(wdata);
goto restart_loop;
}
}
list_del_init(&wdata->list);
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
if (total_written > 0)
*poffset += total_written;
cifs_stats_bytes_written(tcon, total_written);
return total_written ? total_written : (ssize_t)rc;
}
|
CWE-119
| 179,634 | 1,343 |
82332909522185401279552185166060953871
| null | null | null |
linux
|
a08d3b3b99efd509133946056531cdf8f3a0c09b
| 1 |
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct kvm_mmio_fragment *frag;
unsigned len;
BUG_ON(!vcpu->mmio_needed);
/* Complete previous fragment */
frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
len = min(8u, frag->len);
if (!vcpu->mmio_is_write)
memcpy(frag->data, run->mmio.data, len);
if (frag->len <= 8) {
/* Switch to the next fragment. */
frag++;
vcpu->mmio_cur_fragment++;
} else {
/* Go forward to the next mmio piece. */
frag->data += len;
frag->gpa += len;
frag->len -= len;
}
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
/* FIXME: return into emulator if single-stepping. */
if (vcpu->mmio_is_write)
return 1;
vcpu->mmio_read_completed = 1;
return complete_emulated_io(vcpu);
}
run->exit_reason = KVM_EXIT_MMIO;
run->mmio.phys_addr = frag->gpa;
if (vcpu->mmio_is_write)
memcpy(run->mmio.data, frag->data, min(8u, frag->len));
run->mmio.len = min(8u, frag->len);
run->mmio.is_write = vcpu->mmio_is_write;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
return 0;
}
|
CWE-119
| 179,638 | 1,344 |
64111150231177249086660720102945909955
| null | null | null |
linux
|
2def2ef2ae5f3990aabdbe8a755911902707d268
| 1 |
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
struct compat_timespec __user *timeout)
{
int datagrams;
struct timespec ktspec;
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
if (COMPAT_USE_64BIT_TIME)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT,
(struct timespec *) timeout);
if (timeout == NULL)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, NULL);
if (get_compat_timespec(&ktspec, timeout))
return -EFAULT;
datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, &ktspec);
if (datagrams > 0 && put_compat_timespec(&ktspec, timeout))
datagrams = -EFAULT;
return datagrams;
}
|
CWE-20
| 179,639 | 1,345 |
89530709225040186143754865631195577925
| null | null | null |
linux
|
d558023207e008a4476a3b7bb8706b2a2bf5d84f
| 1 |
static struct kioctx *ioctx_alloc(unsigned nr_events)
{
struct mm_struct *mm = current->mm;
struct kioctx *ctx;
int err = -ENOMEM;
/*
* We keep track of the number of available ringbuffer slots, to prevent
* overflow (reqs_available), and we also use percpu counters for this.
*
* So since up to half the slots might be on other cpu's percpu counters
* and unavailable, double nr_events so userspace sees what they
* expected: additionally, we move req_batch slots to/from percpu
* counters at a time, so make sure that isn't 0:
*/
nr_events = max(nr_events, num_possible_cpus() * 4);
nr_events *= 2;
/* Prevent overflows */
if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
(nr_events > (0x10000000U / sizeof(struct kiocb)))) {
pr_debug("ENOMEM: nr_events too high\n");
return ERR_PTR(-EINVAL);
}
if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL))
return ERR_PTR(-EAGAIN);
ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->max_reqs = nr_events;
if (percpu_ref_init(&ctx->users, free_ioctx_users))
goto err;
if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
goto err;
spin_lock_init(&ctx->ctx_lock);
spin_lock_init(&ctx->completion_lock);
mutex_init(&ctx->ring_lock);
init_waitqueue_head(&ctx->wait);
INIT_LIST_HEAD(&ctx->active_reqs);
ctx->cpu = alloc_percpu(struct kioctx_cpu);
if (!ctx->cpu)
goto err;
if (aio_setup_ring(ctx) < 0)
goto err;
atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
if (ctx->req_batch < 1)
ctx->req_batch = 1;
/* limit the number of system wide aios */
spin_lock(&aio_nr_lock);
if (aio_nr + nr_events > (aio_max_nr * 2UL) ||
aio_nr + nr_events < aio_nr) {
spin_unlock(&aio_nr_lock);
err = -EAGAIN;
goto err;
}
aio_nr += ctx->max_reqs;
spin_unlock(&aio_nr_lock);
percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
err = ioctx_add_table(ctx, mm);
if (err)
goto err_cleanup;
pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
ctx, ctx->user_id, mm, ctx->nr_events);
return ctx;
err_cleanup:
aio_nr_sub(ctx->max_reqs);
err:
aio_free_ring(ctx);
free_percpu(ctx->cpu);
free_percpu(ctx->reqs.pcpu_count);
free_percpu(ctx->users.pcpu_count);
kmem_cache_free(kioctx_cachep, ctx);
pr_debug("error allocating ioctx %d\n", err);
return ERR_PTR(err);
}
|
CWE-399
| 179,640 | 1,346 |
338752304203713818243017510963717993413
| null | null | null |
linux
|
c2349758acf1874e4c2b93fe41d072336f1a31d0
| 1 |
static int rds_ib_laddr_check(__be32 addr)
{
int ret;
struct rdma_cm_id *cm_id;
struct sockaddr_in sin;
/* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs.
*/
cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = addr;
/* rdma_bind_addr will only succeed for IB & iWARP devices */
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support iWARP devices unless we
check node_type. */
if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
&addr, ret,
cm_id->device ? cm_id->device->node_type : -1);
rdma_destroy_id(cm_id);
return ret;
}
|
CWE-399
| 179,641 | 1,347 |
264896393456312274791003613275417079684
| null | null | null |
linux
|
bceaa90240b6019ed73b49965eac7d167610be69
| 1 |
static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len, int noblock, int flags,
int *addr_len)
{
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sk_buff *skb;
struct sockaddr_ieee802154 *saddr;
saddr = (struct sockaddr_ieee802154 *)msg->msg_name;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
/* FIXME: skip headers if necessary ?! */
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_ts_and_drops(msg, sk, skb);
if (saddr) {
saddr->family = AF_IEEE802154;
saddr->addr = mac_cb(skb)->sa;
}
if (addr_len)
*addr_len = sizeof(*saddr);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
if (err)
return err;
return copied;
}
|
CWE-200
| 179,648 | 1,348 |
189468429727011153460515106647418254109
| null | null | null |
linux
|
bceaa90240b6019ed73b49965eac7d167610be69
| 1 |
int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *isk = inet_sk(sk);
int family = sk->sk_family;
struct sockaddr_in *sin;
struct sockaddr_in6 *sin6;
struct sk_buff *skb;
int copied, err;
pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
err = -EOPNOTSUPP;
if (flags & MSG_OOB)
goto out;
if (addr_len) {
if (family == AF_INET)
*addr_len = sizeof(*sin);
else if (family == AF_INET6 && addr_len)
*addr_len = sizeof(*sin6);
}
if (flags & MSG_ERRQUEUE) {
if (family == AF_INET) {
return ip_recv_error(sk, msg, len);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
return pingv6_ops.ipv6_recv_error(sk, msg, len);
#endif
}
}
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
/* Don't bother checking the checksum */
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address and add cmsg data. */
if (family == AF_INET) {
sin = (struct sockaddr_in *) msg->msg_name;
sin->sin_family = AF_INET;
sin->sin_port = 0 /* skb->h.uh->source */;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
if (isk->cmsg_flags)
ip_cmsg_recv(msg, skb);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6hdr *ip6 = ipv6_hdr(skb);
sin6 = (struct sockaddr_in6 *) msg->msg_name;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = 0;
sin6->sin6_addr = ip6->saddr;
sin6->sin6_flowinfo = 0;
if (np->sndflow)
sin6->sin6_flowinfo = ip6_flowinfo(ip6);
sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
IP6CB(skb)->iif);
if (inet6_sk(sk)->rxopt.all)
pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
#endif
} else {
BUG();
}
err = copied;
done:
skb_free_datagram(sk, skb);
out:
pr_debug("ping_recvmsg -> %d\n", err);
return err;
}
|
CWE-200
| 179,649 | 1,349 |
121858951950899136235315640016930590326
| null | null | null |
linux
|
bceaa90240b6019ed73b49965eac7d167610be69
| 1 |
static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
if (addr_len)
*addr_len = sizeof(*sin);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
return err ? err : copied;
}
|
CWE-200
| 179,654 | 1,354 |
263984241115725968551012753387756854041
| null | null | null |
linux
|
bceaa90240b6019ed73b49965eac7d167610be69
| 1 |
static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len)
{
struct sk_buff *skb = NULL;
struct sockaddr_pn sa;
int rval = -EOPNOTSUPP;
int copylen;
if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL|
MSG_CMSG_COMPAT))
goto out_nofree;
if (addr_len)
*addr_len = sizeof(sa);
skb = skb_recv_datagram(sk, flags, noblock, &rval);
if (skb == NULL)
goto out_nofree;
pn_skb_get_src_sockaddr(skb, &sa);
copylen = skb->len;
if (len < copylen) {
msg->msg_flags |= MSG_TRUNC;
copylen = len;
}
rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen);
if (rval) {
rval = -EFAULT;
goto out;
}
rval = (flags & MSG_TRUNC) ? skb->len : copylen;
if (msg->msg_name != NULL)
memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
out:
skb_free_datagram(sk, skb);
out_nofree:
return rval;
}
|
CWE-200
| 179,655 | 1,355 |
225152650498232849433170587680269303149
| null | null | null |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 1 |
mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
struct sockaddr_mISDN *maddr;
int copied, err;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n",
__func__, (int)len, flags, _pms(sk)->ch.nr,
sk->sk_protocol);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
if (sk->sk_state == MISDN_CLOSED)
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
msg->msg_namelen = sizeof(struct sockaddr_mISDN);
maddr = (struct sockaddr_mISDN *)msg->msg_name;
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
(sk->sk_protocol == ISDN_P_LAPD_NT)) {
maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff;
maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff;
maddr->sapi = mISDN_HEAD_ID(skb) & 0xff;
} else {
maddr->channel = _pms(sk)->ch.nr;
maddr->sapi = _pms(sk)->ch.addr & 0xFF;
maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
}
} else {
if (msg->msg_namelen)
printk(KERN_WARNING "%s: too small namelen %d\n",
__func__, msg->msg_namelen);
msg->msg_namelen = 0;
}
copied = skb->len + MISDN_HEADER_LEN;
if (len < copied) {
if (flags & MSG_PEEK)
atomic_dec(&skb->users);
else
skb_queue_head(&sk->sk_receive_queue, skb);
return -ENOSPC;
}
memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb),
MISDN_HEADER_LEN);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
mISDN_sock_cmsg(sk, msg, skb);
skb_free_datagram(sk, skb);
return err ? : copied;
}
|
CWE-20
| 179,658 | 1,357 |
25902652839543212427995890070594911137
| null | null | null |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 1 |
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
struct ddpehdr *ddp;
int copied = 0;
int offset = 0;
int err = 0;
struct sk_buff *skb;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
lock_sock(sk);
if (!skb)
goto out;
/* FIXME: use skb->cb to be able to use shared skbs */
ddp = ddp_hdr(skb);
copied = ntohs(ddp->deh_len_hops) & 1023;
if (sk->sk_type != SOCK_RAW) {
offset = sizeof(*ddp);
copied -= offset;
}
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
if (!err) {
if (sat) {
sat->sat_family = AF_APPLETALK;
sat->sat_port = ddp->deh_sport;
sat->sat_addr.s_node = ddp->deh_snode;
sat->sat_addr.s_net = ddp->deh_snet;
}
msg->msg_namelen = sizeof(*sat);
}
skb_free_datagram(sk, skb); /* Free the datagram. */
out:
release_sock(sk);
return err ? : copied;
}
|
CWE-20
| 179,660 | 1,359 |
30146679912109747954249025598525145220
| null | null | null |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 1 |
static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct ipx_sock *ipxs = ipx_sk(sk);
struct sockaddr_ipx *sipx = (struct sockaddr_ipx *)msg->msg_name;
struct ipxhdr *ipx = NULL;
struct sk_buff *skb;
int copied, rc;
lock_sock(sk);
/* put the autobinding in */
if (!ipxs->port) {
struct sockaddr_ipx uaddr;
uaddr.sipx_port = 0;
uaddr.sipx_network = 0;
#ifdef CONFIG_IPX_INTERN
rc = -ENETDOWN;
if (!ipxs->intrfc)
goto out; /* Someone zonked the iface */
memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN);
#endif /* CONFIG_IPX_INTERN */
rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
sizeof(struct sockaddr_ipx));
if (rc)
goto out;
}
rc = -ENOTCONN;
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &rc);
if (!skb)
goto out;
ipx = ipx_hdr(skb);
copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr);
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov,
copied);
if (rc)
goto out_free;
if (skb->tstamp.tv64)
sk->sk_stamp = skb->tstamp;
msg->msg_namelen = sizeof(*sipx);
if (sipx) {
sipx->sipx_family = AF_IPX;
sipx->sipx_port = ipx->ipx_source.sock;
memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN);
sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net;
sipx->sipx_type = ipx->ipx_type;
sipx->sipx_zero = 0;
}
rc = copied;
out_free:
skb_free_datagram(sk, skb);
out:
release_sock(sk);
return rc;
}
|
CWE-20
| 179,672 | 1,370 |
112645242093379782512441768789141311187
| null | null | null |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 1 |
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
size_t copied;
struct sk_buff *skb;
int er;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
lock_sock(sk);
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
release_sock(sk);
return er;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (er < 0) {
skb_free_datagram(sk, skb);
release_sock(sk);
return er;
}
if (sax != NULL) {
memset(sax, 0, sizeof(*sax));
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);
}
msg->msg_namelen = sizeof(*sax);
skb_free_datagram(sk, skb);
release_sock(sk);
return copied;
}
|
CWE-20
| 179,680 | 1,378 |
122095893171554321138890313628074994863
| null | null | null |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 1 |
static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied, err;
struct sockaddr_ll *sll;
int vnet_hdr_len = 0;
err = -EINVAL;
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
goto out;
#if 0
/* What error should we return now? EUNATTACH? */
if (pkt_sk(sk)->ifindex < 0)
return -ENODEV;
#endif
if (flags & MSG_ERRQUEUE) {
err = sock_recv_errqueue(sk, msg, len,
SOL_PACKET, PACKET_TX_TIMESTAMP);
goto out;
}
/*
* Call the generic datagram receiver. This handles all sorts
* of horrible races and re-entrancy so we can forget about it
* in the protocol layers.
*
* Now it will return ENETDOWN, if device have just gone down,
* but then it will block.
*/
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
/*
* An error occurred so return it. Because skb_recv_datagram()
* handles the blocking we don't see and worry about blocking
* retries.
*/
if (skb == NULL)
goto out;
if (pkt_sk(sk)->has_vnet_hdr) {
struct virtio_net_hdr vnet_hdr = { 0 };
err = -EINVAL;
vnet_hdr_len = sizeof(vnet_hdr);
if (len < vnet_hdr_len)
goto out_free;
len -= vnet_hdr_len;
if (skb_is_gso(skb)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
/* This is a hint as to how much should be linear. */
vnet_hdr.hdr_len = skb_headlen(skb);
vnet_hdr.gso_size = sinfo->gso_size;
if (sinfo->gso_type & SKB_GSO_TCPV4)
vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
else if (sinfo->gso_type & SKB_GSO_UDP)
vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
else if (sinfo->gso_type & SKB_GSO_FCOE)
goto out_free;
else
BUG();
if (sinfo->gso_type & SKB_GSO_TCP_ECN)
vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
} else
vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
vnet_hdr.csum_start = skb_checksum_start_offset(skb);
vnet_hdr.csum_offset = skb->csum_offset;
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
vnet_hdr_len);
if (err < 0)
goto out_free;
}
/*
* If the address length field is there to be filled in, we fill
* it in now.
*/
sll = &PACKET_SKB_CB(skb)->sa.ll;
if (sock->type == SOCK_PACKET)
msg->msg_namelen = sizeof(struct sockaddr_pkt);
else
msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
/*
* You lose any data beyond the buffer you gave. If it worries a
* user program they can ask the device for its MTU anyway.
*/
copied = skb->len;
if (copied > len) {
copied = len;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto out_free;
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name)
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
msg->msg_namelen);
if (pkt_sk(sk)->auxdata) {
struct tpacket_auxdata aux;
aux.tp_status = TP_STATUS_USER;
if (skb->ip_summed == CHECKSUM_PARTIAL)
aux.tp_status |= TP_STATUS_CSUMNOTREADY;
aux.tp_len = PACKET_SKB_CB(skb)->origlen;
aux.tp_snaplen = skb->len;
aux.tp_mac = 0;
aux.tp_net = skb_network_offset(skb);
if (vlan_tx_tag_present(skb)) {
aux.tp_vlan_tci = vlan_tx_tag_get(skb);
aux.tp_status |= TP_STATUS_VLAN_VALID;
} else {
aux.tp_vlan_tci = 0;
}
aux.tp_padding = 0;
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
}
/*
* Free or return the buffer as appropriate. Again this
* hides all the races and re-entrancy issues from us.
*/
err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
out_free:
skb_free_datagram(sk, skb);
out:
return err;
}
|
CWE-20
| 179,683 | 1,381 |
264606051361135517167808491820057383720
| null | null | null |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 1 |
static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
struct x25_sock *x25 = x25_sk(sk);
struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
size_t copied;
int qbit, header_len;
struct sk_buff *skb;
unsigned char *asmptr;
int rc = -ENOTCONN;
lock_sock(sk);
if (x25->neighbour == NULL)
goto out;
header_len = x25->neighbour->extended ?
X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
if (flags & MSG_OOB) {
rc = -EINVAL;
if (sock_flag(sk, SOCK_URGINLINE) ||
!skb_peek(&x25->interrupt_in_queue))
goto out;
skb = skb_dequeue(&x25->interrupt_in_queue);
if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
goto out_free_dgram;
skb_pull(skb, X25_STD_MIN_LEN);
/*
* No Q bit information on Interrupt data.
*/
if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
asmptr = skb_push(skb, 1);
*asmptr = 0x00;
}
msg->msg_flags |= MSG_OOB;
} else {
/* Now we can treat all alike */
release_sock(sk);
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &rc);
lock_sock(sk);
if (!skb)
goto out;
if (!pskb_may_pull(skb, header_len))
goto out_free_dgram;
qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
skb_pull(skb, header_len);
if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
asmptr = skb_push(skb, 1);
*asmptr = qbit;
}
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
/* Currently, each datagram always contains a complete record */
msg->msg_flags |= MSG_EOR;
rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (rc)
goto out_free_dgram;
if (sx25) {
sx25->sx25_family = AF_X25;
sx25->sx25_addr = x25->dest_addr;
}
msg->msg_namelen = sizeof(struct sockaddr_x25);
x25_check_rbuf(sk);
rc = copied;
out_free_dgram:
skb_free_datagram(sk, skb);
out:
release_sock(sk);
return rc;
}
|
CWE-20
| 179,696 | 1,394 |
87996953335419633392336015864465689987
| null | null | null |
mapserver
|
3a10f6b829297dae63492a8c63385044bc6953ed
| 1 |
int msPostGISLayerSetTimeFilter(layerObj *lp, const char *timestring, const char *timefield)
{
char **atimes, **aranges = NULL;
int numtimes=0,i=0,numranges=0;
size_t buffer_size = 512;
char buffer[512], bufferTmp[512];
buffer[0] = '\0';
bufferTmp[0] = '\0';
if (!lp || !timestring || !timefield)
return MS_FALSE;
/* discrete time */
if (strstr(timestring, ",") == NULL &&
strstr(timestring, "/") == NULL) { /* discrete time */
createPostgresTimeCompareSimple(timefield, timestring, buffer, buffer_size);
} else {
/* multiple times, or ranges */
atimes = msStringSplit (timestring, ',', &numtimes);
if (atimes == NULL || numtimes < 1)
return MS_FALSE;
strlcat(buffer, "(", buffer_size);
for(i=0; i<numtimes; i++) {
if(i!=0) {
strlcat(buffer, " OR ", buffer_size);
}
strlcat(buffer, "(", buffer_size);
aranges = msStringSplit(atimes[i], '/', &numranges);
if(!aranges) return MS_FALSE;
if(numranges == 1) {
/* we don't have range, just a simple time */
createPostgresTimeCompareSimple(timefield, atimes[i], bufferTmp, buffer_size);
strlcat(buffer, bufferTmp, buffer_size);
} else if(numranges == 2) {
/* we have a range */
createPostgresTimeCompareRange(timefield, aranges[0], aranges[1], bufferTmp, buffer_size);
strlcat(buffer, bufferTmp, buffer_size);
} else {
return MS_FALSE;
}
msFreeCharArray(aranges, numranges);
strlcat(buffer, ")", buffer_size);
}
strlcat(buffer, ")", buffer_size);
msFreeCharArray(atimes, numtimes);
}
if(!*buffer) {
return MS_FALSE;
}
if(lp->filteritem) free(lp->filteritem);
lp->filteritem = msStrdup(timefield);
if (&lp->filter) {
/* if the filter is set and it's a string type, concatenate it with
the time. If not just free it */
if (lp->filter.type == MS_EXPRESSION) {
snprintf(bufferTmp, buffer_size, "(%s) and %s", lp->filter.string, buffer);
loadExpressionString(&lp->filter, bufferTmp);
} else {
freeExpression(&lp->filter);
loadExpressionString(&lp->filter, buffer);
}
}
return MS_TRUE;
}
|
CWE-89
| 179,697 | 1,395 |
92160701703090280340745521839872003209
| null | null | null |
Little-CMS
|
91c2db7f2559be504211b283bc3a2c631d6f06d9
| 1 |
Curves16Data* CurvesAlloc(cmsContext ContextID, int nCurves, int nElements, cmsToneCurve** G)
{
int i, j;
Curves16Data* c16;
c16 = _cmsMallocZero(ContextID, sizeof(Curves16Data));
if (c16 == NULL) return NULL;
c16 ->nCurves = nCurves;
c16 ->nElements = nElements;
c16 ->Curves = _cmsCalloc(ContextID, nCurves, sizeof(cmsUInt16Number*));
if (c16 ->Curves == NULL) return NULL;
for (i=0; i < nCurves; i++) {
c16->Curves[i] = _cmsCalloc(ContextID, nElements, sizeof(cmsUInt16Number));
if (nElements == 256) {
for (j=0; j < nElements; j++) {
c16 ->Curves[i][j] = cmsEvalToneCurve16(G[i], FROM_8_TO_16(j));
}
}
else {
for (j=0; j < nElements; j++) {
c16 ->Curves[i][j] = cmsEvalToneCurve16(G[i], (cmsUInt16Number) j);
}
}
}
return c16;
}
| 179,716 | 1,412 |
147209237771980332211661461965374575301
| null | null | null |
|
monkey
|
15f72c1ee5e0afad20232bdf0fcecab8d62a5d89
| 1 |
int _mkp_stage_30(struct plugin *p,
struct client_session *cs,
struct session_request *sr)
{
mk_ptr_t referer;
(void) p;
(void) cs;
PLUGIN_TRACE("[FD %i] Mandril validating URL", cs->socket);
if (mk_security_check_url(sr->uri) < 0) {
PLUGIN_TRACE("[FD %i] Close connection, blocked URL", cs->socket);
mk_api->header_set_http_status(sr, MK_CLIENT_FORBIDDEN);
return MK_PLUGIN_RET_CLOSE_CONX;
}
PLUGIN_TRACE("[FD %d] Mandril validating hotlinking", cs->socket);
referer = mk_api->header_get(&sr->headers_toc, "Referer", strlen("Referer"));
if (mk_security_check_hotlink(sr->uri_processed, sr->host, referer) < 0) {
PLUGIN_TRACE("[FD %i] Close connection, deny hotlinking.", cs->socket);
mk_api->header_set_http_status(sr, MK_CLIENT_FORBIDDEN);
return MK_PLUGIN_RET_CLOSE_CONX;
}
return MK_PLUGIN_RET_NOT_ME;
}
|
CWE-264
| 179,717 | 1,413 |
131927884398444168269182244005517413817
| null | null | null |
corosync
|
b3f456a8ceefac6e9f2e9acc2ea0c159d412b595
| 1 |
static int init_nss_hash(struct crypto_instance *instance)
{
PK11SlotInfo* hash_slot = NULL;
SECItem hash_param;
if (!hash_to_nss[instance->crypto_hash_type]) {
return 0;
}
hash_param.type = siBuffer;
hash_param.data = 0;
hash_param.len = 0;
hash_slot = PK11_GetBestSlot(hash_to_nss[instance->crypto_hash_type], NULL);
if (hash_slot == NULL) {
log_printf(instance->log_level_security, "Unable to find security slot (err %d)",
PR_GetError());
return -1;
}
instance->nss_sym_key_sign = PK11_ImportSymKey(hash_slot,
hash_to_nss[instance->crypto_hash_type],
PK11_OriginUnwrap, CKA_SIGN,
&hash_param, NULL);
if (instance->nss_sym_key_sign == NULL) {
log_printf(instance->log_level_security, "Failure to import key into NSS (err %d)",
PR_GetError());
return -1;
}
PK11_FreeSlot(hash_slot);
return 0;
}
| 179,718 | 1,414 |
114430622650469532874355061363597288191
| null | null | null |
|
linux
|
3e10986d1d698140747fcfc2761ec9cb64c1d582
| 1 |
int sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int val;
int valbool;
struct linger ling;
int ret = 0;
/*
* Options without arguments
*/
if (optname == SO_BINDTODEVICE)
return sock_bindtodevice(sk, optval, optlen);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
valbool = val ? 1 : 0;
lock_sock(sk);
switch (optname) {
case SO_DEBUG:
if (val && !capable(CAP_NET_ADMIN))
ret = -EACCES;
else
sock_valbool_flag(sk, SOCK_DBG, valbool);
break;
case SO_REUSEADDR:
sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
break;
case SO_TYPE:
case SO_PROTOCOL:
case SO_DOMAIN:
case SO_ERROR:
ret = -ENOPROTOOPT;
break;
case SO_DONTROUTE:
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
break;
case SO_BROADCAST:
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
break;
case SO_SNDBUF:
/* Don't error on this BSD doesn't and if you think
* about it this is right. Otherwise apps have to
* play 'guess the biggest size' games. RCVBUF/SNDBUF
* are treated in BSD as hints
*/
val = min_t(u32, val, sysctl_wmem_max);
set_sndbuf:
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
/* Wake up sending tasks if we upped the value. */
sk->sk_write_space(sk);
break;
case SO_SNDBUFFORCE:
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
goto set_sndbuf;
case SO_RCVBUF:
/* Don't error on this BSD doesn't and if you think
* about it this is right. Otherwise apps have to
* play 'guess the biggest size' games. RCVBUF/SNDBUF
* are treated in BSD as hints
*/
val = min_t(u32, val, sysctl_rmem_max);
set_rcvbuf:
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/*
* We double it on the way in to account for
* "struct sk_buff" etc. overhead. Applications
* assume that the SO_RCVBUF setting they make will
* allow that much actual data to be received on that
* socket.
*
* Applications are unaware that "struct sk_buff" and
* other overheads allocate from the receive buffer
* during socket buffer allocation.
*
* And after considering the possible alternatives,
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
break;
case SO_RCVBUFFORCE:
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
goto set_rcvbuf;
case SO_KEEPALIVE:
#ifdef CONFIG_INET
if (sk->sk_protocol == IPPROTO_TCP)
tcp_set_keepalive(sk, valbool);
#endif
sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
break;
case SO_OOBINLINE:
sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
break;
case SO_NO_CHECK:
sk->sk_no_check = valbool;
break;
case SO_PRIORITY:
if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
sk->sk_priority = val;
else
ret = -EPERM;
break;
case SO_LINGER:
if (optlen < sizeof(ling)) {
ret = -EINVAL; /* 1003.1g */
break;
}
if (copy_from_user(&ling, optval, sizeof(ling))) {
ret = -EFAULT;
break;
}
if (!ling.l_onoff)
sock_reset_flag(sk, SOCK_LINGER);
else {
#if (BITS_PER_LONG == 32)
if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
else
#endif
sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
sock_set_flag(sk, SOCK_LINGER);
}
break;
case SO_BSDCOMPAT:
sock_warn_obsolete_bsdism("setsockopt");
break;
case SO_PASSCRED:
if (valbool)
set_bit(SOCK_PASSCRED, &sock->flags);
else
clear_bit(SOCK_PASSCRED, &sock->flags);
break;
case SO_TIMESTAMP:
case SO_TIMESTAMPNS:
if (valbool) {
if (optname == SO_TIMESTAMP)
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
else
sock_set_flag(sk, SOCK_RCVTSTAMPNS);
sock_set_flag(sk, SOCK_RCVTSTAMP);
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
} else {
sock_reset_flag(sk, SOCK_RCVTSTAMP);
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
}
break;
case SO_TIMESTAMPING:
if (val & ~SOF_TIMESTAMPING_MASK) {
ret = -EINVAL;
break;
}
sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
val & SOF_TIMESTAMPING_TX_HARDWARE);
sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
val & SOF_TIMESTAMPING_TX_SOFTWARE);
sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
val & SOF_TIMESTAMPING_RX_HARDWARE);
if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
sock_enable_timestamp(sk,
SOCK_TIMESTAMPING_RX_SOFTWARE);
else
sock_disable_timestamp(sk,
(1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
val & SOF_TIMESTAMPING_SOFTWARE);
sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
val & SOF_TIMESTAMPING_SYS_HARDWARE);
sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
val & SOF_TIMESTAMPING_RAW_HARDWARE);
break;
case SO_RCVLOWAT:
if (val < 0)
val = INT_MAX;
sk->sk_rcvlowat = val ? : 1;
break;
case SO_RCVTIMEO:
ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
break;
case SO_SNDTIMEO:
ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
break;
case SO_ATTACH_FILTER:
ret = -EINVAL;
if (optlen == sizeof(struct sock_fprog)) {
struct sock_fprog fprog;
ret = -EFAULT;
if (copy_from_user(&fprog, optval, sizeof(fprog)))
break;
ret = sk_attach_filter(&fprog, sk);
}
break;
case SO_DETACH_FILTER:
ret = sk_detach_filter(sk);
break;
case SO_PASSSEC:
if (valbool)
set_bit(SOCK_PASSSEC, &sock->flags);
else
clear_bit(SOCK_PASSSEC, &sock->flags);
break;
case SO_MARK:
if (!capable(CAP_NET_ADMIN))
ret = -EPERM;
else
sk->sk_mark = val;
break;
/* We implement the SO_SNDLOWAT etc to
not be settable (1003.1g 5.3) */
case SO_RXQ_OVFL:
sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
break;
case SO_WIFI_STATUS:
sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
break;
case SO_PEEK_OFF:
if (sock->ops->set_peek_off)
sock->ops->set_peek_off(sk, val);
else
ret = -EOPNOTSUPP;
break;
case SO_NOFCS:
sock_valbool_flag(sk, SOCK_NOFCS, valbool);
break;
default:
ret = -ENOPROTOOPT;
break;
}
release_sock(sk);
return ret;
}
|
CWE-264
| 179,719 | 1,415 |
34524812571381986499397701453838832866
| null | null | null |
linux
|
6f7b0a2a5c0fb03be7c25bd1745baa50582348ef
| 1 |
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
u32 val, ktime_t *abs_time, u32 bitset,
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
if (!bitset)
return -EINVAL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
/*
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
debug_rt_mutex_init_waiter(&rt_waiter);
rt_waiter.task = NULL;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
q.bitset = bitset;
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
/*
* Prepare to wait on uaddr. On success, increments q.key (key1) ref
* count.
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
goto out_key2;
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquisition by the requeue code. The
* futex_requeue dropped our key1 reference and incremented our key2
* reference count.
*/
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
spin_unlock(q.lock_ptr);
}
} else {
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr2, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
/*
* If fixup_pi_state_owner() faulted and was unable to handle the
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
* it would detect that the user space "val" changed and return
* -EWOULDBLOCK. Save the overhead of the restart and return
* -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
out_put_keys:
put_futex_key(&q.key);
out_key2:
put_futex_key(&key2);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
|
CWE-20
| 179,720 | 1,416 |
97517838480806150368587141619223723538
| null | null | null |
linux
|
fdf5af0daf8019cec2396cdef8fb042d80fe71fa
| 1 |
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int queued = 0;
int res;
tp->rx_opt.saw_tstamp = 0;
switch (sk->sk_state) {
case TCP_CLOSE:
goto discard;
case TCP_LISTEN:
if (th->ack)
return 1;
if (th->rst)
goto discard;
if (th->syn) {
if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
return 1;
/* Now we have several options: In theory there is
* nothing else in the frame. KA9Q has an option to
* send data with the syn, BSD accepts data with the
* syn up to the [to be] advertised window and
* Solaris 2.1 gives you a protocol error. For now
* we just ignore it, that fits the spec precisely
* and avoids incompatibilities. It would be nice in
* future to drop through and process the data.
*
* Now that TTCP is starting to be used we ought to
* queue this data.
* But, this leaves one open to an easy denial of
* service attack, and SYN cookies can't defend
* against this problem. So, we drop the data
* in the interest of security over speed unless
* it's still in use.
*/
kfree_skb(skb);
return 0;
}
goto discard;
case TCP_SYN_SENT:
queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
if (queued >= 0)
return queued;
/* Do step6 onward by hand. */
tcp_urg(sk, skb, th);
__kfree_skb(skb);
tcp_data_snd_check(sk);
return 0;
}
res = tcp_validate_incoming(sk, skb, th, 0);
if (res <= 0)
return -res;
/* step 5: check the ACK field */
if (th->ack) {
int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
switch (sk->sk_state) {
case TCP_SYN_RECV:
if (acceptable) {
tp->copied_seq = tp->rcv_nxt;
smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);
sk->sk_state_change(sk);
/* Note, that this wakeup is only for marginal
* crossed SYN case. Passively open sockets
* are not waked up, because sk->sk_sleep ==
* NULL and sk->sk_socket == NULL.
*/
if (sk->sk_socket)
sk_wake_async(sk,
SOCK_WAKE_IO, POLL_OUT);
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
tp->snd_wnd = ntohs(th->window) <<
tp->rx_opt.snd_wscale;
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
/* Make sure socket is routed, for
* correct metrics.
*/
icsk->icsk_af_ops->rebuild_header(sk);
tcp_init_metrics(sk);
tcp_init_congestion_control(sk);
/* Prevent spurious tcp_cwnd_restart() on
* first data packet.
*/
tp->lsndtime = tcp_time_stamp;
tcp_mtup_init(sk);
tcp_initialize_rcv_mss(sk);
tcp_init_buffer_space(sk);
tcp_fast_path_on(tp);
} else {
return 1;
}
break;
case TCP_FIN_WAIT1:
if (tp->snd_una == tp->write_seq) {
tcp_set_state(sk, TCP_FIN_WAIT2);
sk->sk_shutdown |= SEND_SHUTDOWN;
dst_confirm(__sk_dst_get(sk));
if (!sock_flag(sk, SOCK_DEAD))
/* Wake up lingering close() */
sk->sk_state_change(sk);
else {
int tmo;
if (tp->linger2 < 0 ||
(TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
tcp_done(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
return 1;
}
tmo = tcp_fin_time(sk);
if (tmo > TCP_TIMEWAIT_LEN) {
inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
} else if (th->fin || sock_owned_by_user(sk)) {
/* Bad case. We could lose such FIN otherwise.
* It is not a big problem, but it looks confusing
* and not so rare event. We still can lose it now,
* if it spins in bh_lock_sock(), but it is really
* marginal case.
*/
inet_csk_reset_keepalive_timer(sk, tmo);
} else {
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
goto discard;
}
}
}
break;
case TCP_CLOSING:
if (tp->snd_una == tp->write_seq) {
tcp_time_wait(sk, TCP_TIME_WAIT, 0);
goto discard;
}
break;
case TCP_LAST_ACK:
if (tp->snd_una == tp->write_seq) {
tcp_update_metrics(sk);
tcp_done(sk);
goto discard;
}
break;
}
} else
goto discard;
/* step 6: check the URG bit */
tcp_urg(sk, skb, th);
/* step 7: process the segment text */
switch (sk->sk_state) {
case TCP_CLOSE_WAIT:
case TCP_CLOSING:
case TCP_LAST_ACK:
if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break;
case TCP_FIN_WAIT1:
case TCP_FIN_WAIT2:
/* RFC 793 says to queue data in these states,
* RFC 1122 says we MUST send a reset.
* BSD 4.4 also does reset.
*/
if (sk->sk_shutdown & RCV_SHUTDOWN) {
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
tcp_reset(sk);
return 1;
}
}
/* Fall through */
case TCP_ESTABLISHED:
tcp_data_queue(sk, skb);
queued = 1;
break;
}
/* tcp_data could move socket to TIME-WAIT */
if (sk->sk_state != TCP_CLOSE) {
tcp_data_snd_check(sk);
tcp_ack_snd_check(sk);
}
if (!queued) {
discard:
__kfree_skb(skb);
}
return 0;
}
|
CWE-399
| 179,721 | 1,417 |
47732656845780375439785726749530593734
| null | null | null |
radvd
|
92e22ca23e52066da2258df8c76a2dca8a428bcc
| 1 |
set_interface_var(const char *iface,
const char *var, const char *name,
uint32_t val)
{
FILE *fp;
char spath[64+IFNAMSIZ]; /* XXX: magic constant */
if (snprintf(spath, sizeof(spath), var, iface) >= sizeof(spath))
return -1;
if (access(spath, F_OK) != 0)
return -1;
fp = fopen(spath, "w");
if (!fp) {
if (name)
flog(LOG_ERR, "failed to set %s (%u) for %s: %s",
name, val, iface, strerror(errno));
return -1;
}
fprintf(fp, "%u", val);
fclose(fp);
return 0;
}
|
CWE-22
| 179,722 | 1,418 |
281398530467193595028729310687988122409
| null | null | null |
linux
|
819cbb120eaec7e014e5abd029260db1ca8c5735
| 1 |
static int do_devinfo_ioctl(struct comedi_device *dev,
struct comedi_devinfo __user *arg,
struct file *file)
{
struct comedi_devinfo devinfo;
const unsigned minor = iminor(file->f_dentry->d_inode);
struct comedi_device_file_info *dev_file_info =
comedi_get_device_file_info(minor);
struct comedi_subdevice *read_subdev =
comedi_get_read_subdevice(dev_file_info);
struct comedi_subdevice *write_subdev =
comedi_get_write_subdevice(dev_file_info);
memset(&devinfo, 0, sizeof(devinfo));
/* fill devinfo structure */
devinfo.version_code = COMEDI_VERSION_CODE;
devinfo.n_subdevs = dev->n_subdevices;
memcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
memcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
if (read_subdev)
devinfo.read_subdevice = read_subdev - dev->subdevices;
else
devinfo.read_subdevice = -1;
if (write_subdev)
devinfo.write_subdevice = write_subdev - dev->subdevices;
else
devinfo.write_subdevice = -1;
if (copy_to_user(arg, &devinfo, sizeof(struct comedi_devinfo)))
return -EFAULT;
return 0;
}
|
CWE-200
| 179,729 | 1,419 |
274220629135533136075410211172435680368
| null | null | null |
linux
|
fc3a9157d3148ab91039c75423da8ef97be3e105
| 1 |
static int handle_emulation_failure(struct kvm_vcpu *vcpu)
{
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
kvm_queue_exception(vcpu, UD_VECTOR);
return EMULATE_FAIL;
}
|
CWE-362
| 179,730 | 1,420 |
279029284746417491104163201106928259226
| null | null | null |
linux
|
acff81ec2c79492b180fade3c2894425cd35a545
| 1 |
int ovl_setattr(struct dentry *dentry, struct iattr *attr)
{
int err;
struct dentry *upperdentry;
err = ovl_want_write(dentry);
if (err)
goto out;
upperdentry = ovl_dentry_upper(dentry);
if (upperdentry) {
mutex_lock(&upperdentry->d_inode->i_mutex);
err = notify_change(upperdentry, attr, NULL);
mutex_unlock(&upperdentry->d_inode->i_mutex);
} else {
err = ovl_copy_up_last(dentry, attr, false);
}
ovl_drop_write(dentry);
out:
return err;
}
|
CWE-264
| 179,731 | 1,421 |
126591340298897656122592864326890834329
| null | null | null |
linux
|
09ccfd238e5a0e670d8178cf50180ea81ae09ae1
| 1 |
static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
int sockaddr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
struct pppox_sock *po = pppox_sk(sk);
struct pptp_opt *opt = &po->proto.pptp;
int error = 0;
lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp;
if (add_chan(po))
error = -EBUSY;
release_sock(sk);
return error;
}
|
CWE-200
| 179,732 | 1,422 |
83894244210840036668623252617423351489
| null | null | null |
linux
|
09ccfd238e5a0e670d8178cf50180ea81ae09ae1
| 1 |
static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
int sockaddr_len, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
struct pppox_sock *po = pppox_sk(sk);
struct pptp_opt *opt = &po->proto.pptp;
struct rtable *rt;
struct flowi4 fl4;
int error = 0;
if (sp->sa_protocol != PX_PROTO_PPTP)
return -EINVAL;
if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr))
return -EALREADY;
lock_sock(sk);
/* Check for already bound sockets */
if (sk->sk_state & PPPOX_CONNECTED) {
error = -EBUSY;
goto end;
}
/* Check for already disconnected sockets, on attempts to disconnect */
if (sk->sk_state & PPPOX_DEAD) {
error = -EALREADY;
goto end;
}
if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
error = -EINVAL;
goto end;
}
po->chan.private = sk;
po->chan.ops = &pptp_chan_ops;
rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0,
IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
if (IS_ERR(rt)) {
error = -EHOSTUNREACH;
goto end;
}
sk_setup_caps(sk, &rt->dst);
po->chan.mtu = dst_mtu(&rt->dst);
if (!po->chan.mtu)
po->chan.mtu = PPP_MRU;
ip_rt_put(rt);
po->chan.mtu -= PPTP_HEADER_OVERHEAD;
po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
error = ppp_register_channel(&po->chan);
if (error) {
pr_err("PPTP: failed to register PPP channel (%d)\n", error);
goto end;
}
opt->dst_addr = sp->sa_addr.pptp;
sk->sk_state = PPPOX_CONNECTED;
end:
release_sock(sk);
return error;
}
|
CWE-200
| 179,733 | 1,423 |
246845620878079571093212274674747373244
| null | null | null |
linux
|
8c7188b23474cca017b3ef354c4a58456f68303a
| 1 |
int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
__be32 daddr;
__be16 dport;
struct rds_message *rm = NULL;
struct rds_connection *conn;
int ret = 0;
int queued = 0, allocated_mr = 0;
int nonblock = msg->msg_flags & MSG_DONTWAIT;
long timeo = sock_sndtimeo(sk, nonblock);
/* Mirror Linux UDP mirror of BSD error message compatibility */
/* XXX: Perhaps MSG_MORE someday */
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
ret = -EOPNOTSUPP;
goto out;
}
if (msg->msg_namelen) {
/* XXX fail non-unicast destination IPs? */
if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
ret = -EINVAL;
goto out;
}
daddr = usin->sin_addr.s_addr;
dport = usin->sin_port;
} else {
/* We only care about consistency with ->connect() */
lock_sock(sk);
daddr = rs->rs_conn_addr;
dport = rs->rs_conn_port;
release_sock(sk);
}
/* racing with another thread binding seems ok here */
if (daddr == 0 || rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (payload_len > rds_sk_sndbuf(rs)) {
ret = -EMSGSIZE;
goto out;
}
/* size of rm including all sgs */
ret = rds_rm_size(msg, payload_len);
if (ret < 0)
goto out;
rm = rds_message_alloc(ret, GFP_KERNEL);
if (!rm) {
ret = -ENOMEM;
goto out;
}
/* Attach data to the rm */
if (payload_len) {
rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
if (!rm->data.op_sg) {
ret = -ENOMEM;
goto out;
}
ret = rds_message_copy_from_user(rm, &msg->msg_iter);
if (ret)
goto out;
}
rm->data.op_active = 1;
rm->m_daddr = daddr;
/* rds_conn_create has a spinlock that runs with IRQ off.
* Caching the conn in the socket helps a lot. */
if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
conn = rs->rs_conn;
else {
conn = rds_conn_create_outgoing(sock_net(sock->sk),
rs->rs_bound_addr, daddr,
rs->rs_transport,
sock->sk->sk_allocation);
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
goto out;
}
rs->rs_conn = conn;
}
/* Parse any control messages the user may have included. */
ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
if (ret)
goto out;
if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
&rm->rdma, conn->c_trans->xmit_rdma);
ret = -EOPNOTSUPP;
goto out;
}
if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
&rm->atomic, conn->c_trans->xmit_atomic);
ret = -EOPNOTSUPP;
goto out;
}
rds_conn_connect_if_down(conn);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
if (ret) {
rs->rs_seen_congestion = 1;
goto out;
}
while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
dport, &queued)) {
rds_stats_inc(s_send_queue_full);
if (nonblock) {
ret = -EAGAIN;
goto out;
}
timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
rds_send_queue_rm(rs, conn, rm,
rs->rs_bound_port,
dport,
&queued),
timeo);
rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
continue;
ret = timeo;
if (ret == 0)
ret = -ETIMEDOUT;
goto out;
}
/*
* By now we've committed to the send. We reuse rds_send_worker()
* to retry sends in the rds thread if the transport asks us to.
*/
rds_stats_inc(s_send_queued);
ret = rds_send_xmit(conn);
if (ret == -ENOMEM || ret == -EAGAIN)
queue_delayed_work(rds_wq, &conn->c_send_w, 1);
rds_message_put(rm);
return payload_len;
out:
/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
* If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
* or in any other way, we need to destroy the MR again */
if (allocated_mr)
rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
if (rm)
rds_message_put(rm);
return ret;
}
|
CWE-362
| 179,745 | 1,433 |
63125872970388835555558596351943545304
| null | null | null |
linux
|
4b6184336ebb5c8dc1eae7f7ab46ee608a748b05
| 1 |
long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
unsigned long flags;
void __user *uarg = (void __user *)arg;
switch (cmd) {
case DIGI_GETDD:
{
/*
* This returns the total number of boards
* in the system, as well as driver version
* and has space for a reserved entry
*/
struct digi_dinfo ddi;
spin_lock_irqsave(&dgnc_global_lock, flags);
ddi.dinfo_nboards = dgnc_NumBoards;
sprintf(ddi.dinfo_version, "%s", DG_PART);
spin_unlock_irqrestore(&dgnc_global_lock, flags);
if (copy_to_user(uarg, &ddi, sizeof(ddi)))
return -EFAULT;
break;
}
case DIGI_GETBD:
{
int brd;
struct digi_info di;
if (copy_from_user(&brd, uarg, sizeof(int)))
return -EFAULT;
if (brd < 0 || brd >= dgnc_NumBoards)
return -ENODEV;
memset(&di, 0, sizeof(di));
di.info_bdnum = brd;
spin_lock_irqsave(&dgnc_Board[brd]->bd_lock, flags);
di.info_bdtype = dgnc_Board[brd]->dpatype;
di.info_bdstate = dgnc_Board[brd]->dpastatus;
di.info_ioport = 0;
di.info_physaddr = (ulong)dgnc_Board[brd]->membase;
di.info_physsize = (ulong)dgnc_Board[brd]->membase
- dgnc_Board[brd]->membase_end;
if (dgnc_Board[brd]->state != BOARD_FAILED)
di.info_nports = dgnc_Board[brd]->nasync;
else
di.info_nports = 0;
spin_unlock_irqrestore(&dgnc_Board[brd]->bd_lock, flags);
if (copy_to_user(uarg, &di, sizeof(di)))
return -EFAULT;
break;
}
case DIGI_GET_NI_INFO:
{
struct channel_t *ch;
struct ni_info ni;
unsigned char mstat = 0;
uint board = 0;
uint channel = 0;
if (copy_from_user(&ni, uarg, sizeof(ni)))
return -EFAULT;
board = ni.board;
channel = ni.channel;
/* Verify boundaries on board */
if (board >= dgnc_NumBoards)
return -ENODEV;
/* Verify boundaries on channel */
if (channel >= dgnc_Board[board]->nasync)
return -ENODEV;
ch = dgnc_Board[board]->channels[channel];
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return -ENODEV;
memset(&ni, 0, sizeof(ni));
ni.board = board;
ni.channel = channel;
spin_lock_irqsave(&ch->ch_lock, flags);
mstat = (ch->ch_mostat | ch->ch_mistat);
if (mstat & UART_MCR_DTR) {
ni.mstat |= TIOCM_DTR;
ni.dtr = TIOCM_DTR;
}
if (mstat & UART_MCR_RTS) {
ni.mstat |= TIOCM_RTS;
ni.rts = TIOCM_RTS;
}
if (mstat & UART_MSR_CTS) {
ni.mstat |= TIOCM_CTS;
ni.cts = TIOCM_CTS;
}
if (mstat & UART_MSR_RI) {
ni.mstat |= TIOCM_RI;
ni.ri = TIOCM_RI;
}
if (mstat & UART_MSR_DCD) {
ni.mstat |= TIOCM_CD;
ni.dcd = TIOCM_CD;
}
if (mstat & UART_MSR_DSR)
ni.mstat |= TIOCM_DSR;
ni.iflag = ch->ch_c_iflag;
ni.oflag = ch->ch_c_oflag;
ni.cflag = ch->ch_c_cflag;
ni.lflag = ch->ch_c_lflag;
if (ch->ch_digi.digi_flags & CTSPACE ||
ch->ch_c_cflag & CRTSCTS)
ni.hflow = 1;
else
ni.hflow = 0;
if ((ch->ch_flags & CH_STOPI) ||
(ch->ch_flags & CH_FORCED_STOPI))
ni.recv_stopped = 1;
else
ni.recv_stopped = 0;
if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP))
ni.xmit_stopped = 1;
else
ni.xmit_stopped = 0;
ni.curtx = ch->ch_txcount;
ni.currx = ch->ch_rxcount;
ni.baud = ch->ch_old_baud;
spin_unlock_irqrestore(&ch->ch_lock, flags);
if (copy_to_user(uarg, &ni, sizeof(ni)))
return -EFAULT;
break;
}
}
return 0;
}
|
CWE-200
| 179,746 | 1,434 |
193364450923515906739503853238777679552
| null | null | null |
linux
|
eda98796aff0d9bf41094b06811f5def3b4c333c
| 1 |
static int vivid_fb_ioctl(struct fb_info *info, unsigned cmd, unsigned long arg)
{
struct vivid_dev *dev = (struct vivid_dev *)info->par;
switch (cmd) {
case FBIOGET_VBLANK: {
struct fb_vblank vblank;
vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_VSYNC;
vblank.count = 0;
vblank.vcount = 0;
vblank.hcount = 0;
if (copy_to_user((void __user *)arg, &vblank, sizeof(vblank)))
return -EFAULT;
return 0;
}
default:
dprintk(dev, 1, "Unknown ioctl %08x\n", cmd);
return -EINVAL;
}
return 0;
}
|
CWE-200
| 179,747 | 1,435 |
326103382253395957723824427348802620110
| null | null | null |
linux
|
ce1fad2740c648a4340f6f6c391a8a83769d2e8c
| 1 |
static noinline void key_gc_unused_keys(struct list_head *keys)
{
while (!list_empty(keys)) {
struct key *key =
list_entry(keys->next, struct key, graveyard_link);
list_del(&key->graveyard_link);
kdebug("- %u", key->serial);
key_check(key);
/* Throw away the key data */
if (key->type->destroy)
key->type->destroy(key);
security_key_free(key);
/* deal with the user's key tracking and quota */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
kfree(key->description);
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC_X;
#endif
kmem_cache_free(key_jar, key);
}
}
|
CWE-20
| 179,748 | 1,436 |
121918299636917930420350077736385938344
| null | null | null |
linux
|
b9a532277938798b53178d5a66af6e2915cb27cf
| 1 |
static int newque(struct ipc_namespace *ns, struct ipc_params *params)
{
struct msg_queue *msq;
int id, retval;
key_t key = params->key;
int msgflg = params->flg;
msq = ipc_rcu_alloc(sizeof(*msq));
if (!msq)
return -ENOMEM;
msq->q_perm.mode = msgflg & S_IRWXUGO;
msq->q_perm.key = key;
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq);
if (retval) {
ipc_rcu_putref(msq, ipc_rcu_free);
return retval;
}
/* ipc_addid() locks msq upon success. */
id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
if (id < 0) {
ipc_rcu_putref(msq, msg_rcu_free);
return id;
}
msq->q_stime = msq->q_rtime = 0;
msq->q_ctime = get_seconds();
msq->q_cbytes = msq->q_qnum = 0;
msq->q_qbytes = ns->msg_ctlmnb;
msq->q_lspid = msq->q_lrpid = 0;
INIT_LIST_HEAD(&msq->q_messages);
INIT_LIST_HEAD(&msq->q_receivers);
INIT_LIST_HEAD(&msq->q_senders);
ipc_unlock_object(&msq->q_perm);
rcu_read_unlock();
return msq->q_perm.id;
}
|
CWE-362
| 179,750 | 1,437 |
145137506070763415253920429573779377921
| null | null | null |
linux
|
b9a532277938798b53178d5a66af6e2915cb27cf
| 1 |
static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
{
key_t key = params->key;
int shmflg = params->flg;
size_t size = params->u.size;
int error;
struct shmid_kernel *shp;
size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct file *file;
char name[13];
int id;
vm_flags_t acctflag = 0;
if (size < SHMMIN || size > ns->shm_ctlmax)
return -EINVAL;
if (numpages << PAGE_SHIFT < size)
return -ENOSPC;
if (ns->shm_tot + numpages < ns->shm_tot ||
ns->shm_tot + numpages > ns->shm_ctlall)
return -ENOSPC;
shp = ipc_rcu_alloc(sizeof(*shp));
if (!shp)
return -ENOMEM;
shp->shm_perm.key = key;
shp->shm_perm.mode = (shmflg & S_IRWXUGO);
shp->mlock_user = NULL;
shp->shm_perm.security = NULL;
error = security_shm_alloc(shp);
if (error) {
ipc_rcu_putref(shp, ipc_rcu_free);
return error;
}
sprintf(name, "SYSV%08x", key);
if (shmflg & SHM_HUGETLB) {
struct hstate *hs;
size_t hugesize;
hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
if (!hs) {
error = -EINVAL;
goto no_file;
}
hugesize = ALIGN(size, huge_page_size(hs));
/* hugetlb_file_setup applies strict accounting */
if (shmflg & SHM_NORESERVE)
acctflag = VM_NORESERVE;
file = hugetlb_file_setup(name, hugesize, acctflag,
&shp->mlock_user, HUGETLB_SHMFS_INODE,
(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
} else {
/*
* Do not allow no accounting for OVERCOMMIT_NEVER, even
* if it's asked for.
*/
if ((shmflg & SHM_NORESERVE) &&
sysctl_overcommit_memory != OVERCOMMIT_NEVER)
acctflag = VM_NORESERVE;
file = shmem_kernel_file_setup(name, size, acctflag);
}
error = PTR_ERR(file);
if (IS_ERR(file))
goto no_file;
id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
if (id < 0) {
error = id;
goto no_id;
}
shp->shm_cprid = task_tgid_vnr(current);
shp->shm_lprid = 0;
shp->shm_atim = shp->shm_dtim = 0;
shp->shm_ctim = get_seconds();
shp->shm_segsz = size;
shp->shm_nattch = 0;
shp->shm_file = file;
shp->shm_creator = current;
list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
/*
* shmid gets reported as "inode#" in /proc/pid/maps.
* proc-ps tools use this. Changing this will break them.
*/
file_inode(file)->i_ino = shp->shm_perm.id;
ns->shm_tot += numpages;
error = shp->shm_perm.id;
ipc_unlock_object(&shp->shm_perm);
rcu_read_unlock();
return error;
no_id:
if (is_file_hugepages(file) && shp->mlock_user)
user_shm_unlock(size, shp->mlock_user);
fput(file);
no_file:
ipc_rcu_putref(shp, shm_rcu_free);
return error;
}
|
CWE-362
| 179,751 | 1,438 |
280625932635716952209287726224303850755
| null | null | null |
linux
|
b9a532277938798b53178d5a66af6e2915cb27cf
| 1 |
int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
{
kuid_t euid;
kgid_t egid;
int id;
int next_id = ids->next_id;
if (size > IPCMNI)
size = IPCMNI;
if (ids->in_use >= size)
return -ENOSPC;
idr_preload(GFP_KERNEL);
spin_lock_init(&new->lock);
new->deleted = false;
rcu_read_lock();
spin_lock(&new->lock);
id = idr_alloc(&ids->ipcs_idr, new,
(next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
GFP_NOWAIT);
idr_preload_end();
if (id < 0) {
spin_unlock(&new->lock);
rcu_read_unlock();
return id;
}
ids->in_use++;
current_euid_egid(&euid, &egid);
new->cuid = new->uid = euid;
new->gid = new->cgid = egid;
if (next_id < 0) {
new->seq = ids->seq++;
if (ids->seq > IPCID_SEQ_MAX)
ids->seq = 0;
} else {
new->seq = ipcid_to_seqx(next_id);
ids->next_id = -1;
}
new->id = ipc_buildid(id, new->seq);
return id;
}
|
CWE-362
| 179,752 | 1,439 |
260213467137768854667641021198010622734
| null | null | null |
linux
|
74e98eb085889b0d2d4908f59f6e00026063014f
| 1 |
static struct rds_connection *__rds_conn_create(struct net *net,
__be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp,
int is_outgoing)
{
struct rds_connection *conn, *parent = NULL;
struct hlist_head *head = rds_conn_bucket(laddr, faddr);
struct rds_transport *loop_trans;
unsigned long flags;
int ret;
struct rds_transport *otrans = trans;
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
goto new_conn;
rcu_read_lock();
conn = rds_conn_lookup(net, head, laddr, faddr, trans);
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
laddr == faddr && !is_outgoing) {
/* This is a looped back IB connection, and we're
* called by the code handling the incoming connect.
* We need a second connection object into which we
* can stick the other QP. */
parent = conn;
conn = parent->c_passive;
}
rcu_read_unlock();
if (conn)
goto out;
new_conn:
conn = kmem_cache_zalloc(rds_conn_slab, gfp);
if (!conn) {
conn = ERR_PTR(-ENOMEM);
goto out;
}
INIT_HLIST_NODE(&conn->c_hash_node);
conn->c_laddr = laddr;
conn->c_faddr = faddr;
spin_lock_init(&conn->c_lock);
conn->c_next_tx_seq = 1;
rds_conn_net_set(conn, net);
init_waitqueue_head(&conn->c_waitq);
INIT_LIST_HEAD(&conn->c_send_queue);
INIT_LIST_HEAD(&conn->c_retrans);
ret = rds_cong_get_maps(conn);
if (ret) {
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(ret);
goto out;
}
/*
* This is where a connection becomes loopback. If *any* RDS sockets
* can bind to the destination address then we'd rather the messages
* flow through loopback rather than either transport.
*/
loop_trans = rds_trans_get_preferred(net, faddr);
if (loop_trans) {
rds_trans_put(loop_trans);
conn->c_loopback = 1;
if (is_outgoing && trans->t_prefer_loopback) {
/* "outgoing" connection - and the transport
* says it wants the connection handled by the
* loopback transport. This is what TCP does.
*/
trans = &rds_loop_transport;
}
}
conn->c_trans = trans;
ret = trans->conn_alloc(conn, gfp);
if (ret) {
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(ret);
goto out;
}
atomic_set(&conn->c_state, RDS_CONN_DOWN);
conn->c_send_gen = 0;
conn->c_reconnect_jiffies = 0;
INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker);
INIT_WORK(&conn->c_down_w, rds_shutdown_worker);
mutex_init(&conn->c_cm_lock);
conn->c_flags = 0;
rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
conn, &laddr, &faddr,
trans->t_name ? trans->t_name : "[unknown]",
is_outgoing ? "(outgoing)" : "");
/*
* Since we ran without holding the conn lock, someone could
* have created the same conn (either normal or passive) in the
* interim. We check while holding the lock. If we won, we complete
* init and return our conn. If we lost, we rollback and return the
* other one.
*/
spin_lock_irqsave(&rds_conn_lock, flags);
if (parent) {
/* Creating passive conn */
if (parent->c_passive) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);
conn = parent->c_passive;
} else {
parent->c_passive = conn;
rds_cong_add_conn(conn);
rds_conn_count++;
}
} else {
/* Creating normal conn */
struct rds_connection *found;
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
found = NULL;
else
found = rds_conn_lookup(net, head, laddr, faddr, trans);
if (found) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);
conn = found;
} else {
if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) ||
(otrans->t_type != RDS_TRANS_TCP)) {
/* Only the active side should be added to
* reconnect list for TCP.
*/
hlist_add_head_rcu(&conn->c_hash_node, head);
}
rds_cong_add_conn(conn);
rds_conn_count++;
}
}
spin_unlock_irqrestore(&rds_conn_lock, flags);
out:
return conn;
}
| 179,755 | 1,441 |
192605127811959814217274006624068539949
| null | null | null |
|
openssh-portable
|
5e75f5198769056089fb06c4d738ab0e5abc66f7
| 1 |
mm_answer_pam_free_ctx(int sock, Buffer *m)
{
debug3("%s", __func__);
(sshpam_device.free_ctx)(sshpam_ctxt);
buffer_clear(m);
mm_request_send(sock, MONITOR_ANS_PAM_FREE_CTX, m);
auth_method = "keyboard-interactive";
auth_submethod = "pam";
return (sshpam_authok == sshpam_ctxt);
}
|
CWE-264
| 179,756 | 1,442 |
205096995958077848280016956620484542801
| null | null | null |
openssh-portable
|
d4697fe9a28dab7255c60433e4dd23cf7fce8a8b
| 1 |
mm_answer_pam_init_ctx(int sock, Buffer *m)
{
debug3("%s", __func__);
authctxt->user = buffer_get_string(m, NULL);
sshpam_ctxt = (sshpam_device.init_ctx)(authctxt);
sshpam_authok = NULL;
buffer_clear(m);
if (sshpam_ctxt != NULL) {
monitor_permit(mon_dispatch, MONITOR_REQ_PAM_FREE_CTX, 1);
buffer_put_int(m, 1);
} else {
buffer_put_int(m, 0);
}
mm_request_send(sock, MONITOR_ANS_PAM_INIT_CTX, m);
return (0);
}
|
CWE-20
| 179,757 | 1,443 |
77335835840341275305105461731310067678
| null | null | null |
openssh-portable
|
d4697fe9a28dab7255c60433e4dd23cf7fce8a8b
| 1 |
mm_sshpam_init_ctx(Authctxt *authctxt)
{
Buffer m;
int success;
debug3("%s", __func__);
buffer_init(&m);
buffer_put_cstring(&m, authctxt->user);
mm_request_send(pmonitor->m_recvfd, MONITOR_REQ_PAM_INIT_CTX, &m);
debug3("%s: waiting for MONITOR_ANS_PAM_INIT_CTX", __func__);
mm_request_receive_expect(pmonitor->m_recvfd, MONITOR_ANS_PAM_INIT_CTX, &m);
success = buffer_get_int(&m);
if (success == 0) {
debug3("%s: pam_init_ctx failed", __func__);
buffer_free(&m);
return (NULL);
}
buffer_free(&m);
return (authctxt);
}
|
CWE-20
| 179,758 | 1,444 |
6266967114205273780762035496760258960
| null | null | null |
linux
|
9a5cbce421a283e6aea3c4007f141735bf9da8c3
| 1 |
static void perf_callchain_user_64(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long sp, next_sp;
unsigned long next_ip;
unsigned long lr;
long level = 0;
struct signal_frame_64 __user *sigframe;
unsigned long __user *fp, *uregs;
next_ip = perf_instruction_pointer(regs);
lr = regs->link;
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
for (;;) {
fp = (unsigned long __user *) sp;
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
return;
if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
return;
/*
* Note: the next_sp - sp >= signal frame size check
* is true when next_sp < sp, which can happen when
* transitioning from an alternate signal stack to the
* normal stack.
*/
if (next_sp - sp >= sizeof(struct signal_frame_64) &&
(is_sigreturn_64_address(next_ip, sp) ||
(level <= 1 && is_sigreturn_64_address(lr, sp))) &&
sane_signal_64_frame(sp)) {
/*
* This looks like an signal frame
*/
sigframe = (struct signal_frame_64 __user *) sp;
uregs = sigframe->uc.uc_mcontext.gp_regs;
if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
read_user_stack_64(&uregs[PT_LNK], &lr) ||
read_user_stack_64(&uregs[PT_R1], &sp))
return;
level = 0;
perf_callchain_store(entry, PERF_CONTEXT_USER);
perf_callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
perf_callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
}
|
CWE-399
| 179,759 | 1,445 |
223862452787017047608400156707358896884
| null | null | null |
linux
|
7932c0bd7740f4cd2aa168d3ce0199e7af7d72d5
| 1 |
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
struct file *eventfp, *filep = NULL;
struct eventfd_ctx *ctx = NULL;
u64 p;
long r;
int i, fd;
/* If you are not the owner, you can become one */
if (ioctl == VHOST_SET_OWNER) {
r = vhost_dev_set_owner(d);
goto done;
}
/* You must be the owner to do anything else */
r = vhost_dev_check_owner(d);
if (r)
goto done;
switch (ioctl) {
case VHOST_SET_MEM_TABLE:
r = vhost_set_memory(d, argp);
break;
case VHOST_SET_LOG_BASE:
if (copy_from_user(&p, argp, sizeof p)) {
r = -EFAULT;
break;
}
if ((u64)(unsigned long)p != p) {
r = -EFAULT;
break;
}
for (i = 0; i < d->nvqs; ++i) {
struct vhost_virtqueue *vq;
void __user *base = (void __user *)(unsigned long)p;
vq = d->vqs[i];
mutex_lock(&vq->mutex);
/* If ring is inactive, will check when it's enabled. */
if (vq->private_data && !vq_log_access_ok(vq, base))
r = -EFAULT;
else
vq->log_base = base;
mutex_unlock(&vq->mutex);
}
break;
case VHOST_SET_LOG_FD:
r = get_user(fd, (int __user *)argp);
if (r < 0)
break;
eventfp = fd == -1 ? NULL : eventfd_fget(fd);
if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp);
break;
}
if (eventfp != d->log_file) {
filep = d->log_file;
ctx = d->log_ctx;
d->log_ctx = eventfp ?
eventfd_ctx_fileget(eventfp) : NULL;
} else
filep = eventfp;
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
d->vqs[i]->log_ctx = d->log_ctx;
mutex_unlock(&d->vqs[i]->mutex);
}
if (ctx)
eventfd_ctx_put(ctx);
if (filep)
fput(filep);
break;
default:
r = -ENOIOCTLCMD;
break;
}
done:
return r;
}
|
CWE-399
| 179,763 | 1,449 |
259350127464630770495067684681191854272
| null | null | null |
miniupnp
|
79cca974a4c2ab1199786732a67ff6d898051b78
| 1 |
void IGDstartelt(void * d, const char * name, int l)
{
struct IGDdatas * datas = (struct IGDdatas *)d;
memcpy( datas->cureltname, name, l);
datas->cureltname[l] = '\0';
datas->level++;
if( (l==7) && !memcmp(name, "service", l) ) {
datas->tmp.controlurl[0] = '\0';
datas->tmp.eventsuburl[0] = '\0';
datas->tmp.scpdurl[0] = '\0';
datas->tmp.servicetype[0] = '\0';
}
}
|
CWE-119
| 179,764 | 1,450 |
81376760744966149651166535732174236788
| null | null | null |
linux
|
f15133df088ecadd141ea1907f2c96df67c729f0
| 1 |
static struct file *path_openat(int dfd, struct filename *pathname,
struct nameidata *nd, const struct open_flags *op, int flags)
{
struct file *file;
struct path path;
int opened = 0;
int error;
file = get_empty_filp();
if (IS_ERR(file))
return file;
file->f_flags = op->open_flag;
if (unlikely(file->f_flags & __O_TMPFILE)) {
error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
goto out;
}
error = path_init(dfd, pathname, flags, nd);
if (unlikely(error))
goto out;
error = do_last(nd, &path, file, op, &opened, pathname);
while (unlikely(error > 0)) { /* trailing symlink */
struct path link = path;
void *cookie;
if (!(nd->flags & LOOKUP_FOLLOW)) {
path_put_conditional(&path, nd);
path_put(&nd->path);
error = -ELOOP;
break;
}
error = may_follow_link(&link, nd);
if (unlikely(error))
break;
nd->flags |= LOOKUP_PARENT;
nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
break;
error = do_last(nd, &path, file, op, &opened, pathname);
put_link(nd, &link, cookie);
}
out:
path_cleanup(nd);
if (!(opened & FILE_OPENED)) {
BUG_ON(!error);
put_filp(file);
}
if (unlikely(error)) {
if (error == -EOPENSTALE) {
if (flags & LOOKUP_RCU)
error = -ECHILD;
else
error = -ESTALE;
}
file = ERR_PTR(error);
}
return file;
}
| 179,766 | 1,451 |
75770052784686876193008885941555969798
| null | null | null |
|
linux
|
b6878d9e03043695dbf3fa1caa6dfc09db225b16
| 1 |
static int get_bitmap_file(struct mddev *mddev, void __user * arg)
{
mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
char *ptr;
int err;
file = kmalloc(sizeof(*file), GFP_NOIO);
if (!file)
return -ENOMEM;
err = 0;
spin_lock(&mddev->lock);
/* bitmap disabled, zero the first byte and copy out */
if (!mddev->bitmap_info.file)
file->pathname[0] = '\0';
else if ((ptr = file_path(mddev->bitmap_info.file,
file->pathname, sizeof(file->pathname))),
IS_ERR(ptr))
err = PTR_ERR(ptr);
else
memmove(file->pathname, ptr,
sizeof(file->pathname)-(ptr-file->pathname));
spin_unlock(&mddev->lock);
if (err == 0 &&
copy_to_user(arg, file, sizeof(*file)))
err = -EFAULT;
kfree(file);
return err;
}
|
CWE-200
| 179,767 | 1,452 |
233692859281768076508018721288768193640
| null | null | null |
linux
|
beb39db59d14990e401e235faf66a6b9b31240b0
| 1 |
int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct sk_buff *skb;
unsigned int ulen, copied;
int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
bool slow;
if (flags & MSG_ERRQUEUE)
return ip_recv_error(sk, msg, len, addr_len);
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &off, &err);
if (!skb)
goto out;
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
copied = ulen;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
/*
* If checksum is needed at all, try to do it while copying the
* data. If the data is truncated, or if we only want a partial
* coverage checksum (UDP-Lite), do it before the copy.
*/
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
msg, copied);
else {
err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr),
msg);
if (err == -EINVAL)
goto csum_copy_err;
}
if (unlikely(err)) {
trace_kfree_skb(skb, udp_recvmsg);
if (!peeked) {
atomic_inc(&sk->sk_drops);
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
goto out_free;
}
if (!peeked)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_port = udp_hdr(skb)->source;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
}
if (inet->cmsg_flags)
ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
err = copied;
if (flags & MSG_TRUNC)
err = ulen;
out_free:
skb_free_datagram_locked(sk, skb);
out:
return err;
csum_copy_err:
slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
}
unlock_sock_fast(sk, slow);
if (noblock)
return -EAGAIN;
/* starting over for a new packet */
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
|
CWE-399
| 179,768 | 1,453 |
200373655058309955265776245217854688305
| null | null | null |
linux
|
beb39db59d14990e401e235faf66a6b9b31240b0
| 1 |
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
unsigned int ulen, copied;
int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
int is_udp4;
bool slow;
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &off, &err);
if (!skb)
goto out;
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
copied = ulen;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
/*
* If checksum is needed at all, try to do it while copying the
* data. If the data is truncated, or if we only want a partial
* coverage checksum (UDP-Lite), do it before the copy.
*/
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
msg, copied);
else {
err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
if (err == -EINVAL)
goto csum_copy_err;
}
if (unlikely(err)) {
trace_kfree_skb(skb, udpv6_recvmsg);
if (!peeked) {
atomic_inc(&sk->sk_drops);
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
}
goto out_free;
}
if (!peeked) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
}
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
sin6->sin6_family = AF_INET6;
sin6->sin6_port = udp_hdr(skb)->source;
sin6->sin6_flowinfo = 0;
if (is_udp4) {
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin6->sin6_addr);
sin6->sin6_scope_id = 0;
} else {
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_scope_id =
ipv6_iface_scope_id(&sin6->sin6_addr,
inet6_iif(skb));
}
*addr_len = sizeof(*sin6);
}
if (np->rxopt.all)
ip6_datagram_recv_common_ctl(sk, msg, skb);
if (is_udp4) {
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
} else {
if (np->rxopt.all)
ip6_datagram_recv_specific_ctl(sk, msg, skb);
}
err = copied;
if (flags & MSG_TRUNC)
err = ulen;
out_free:
skb_free_datagram_locked(sk, skb);
out:
return err;
csum_copy_err:
slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
} else {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
}
unlock_sock_fast(sk, slow);
if (noblock)
return -EAGAIN;
/* starting over for a new packet */
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
|
CWE-399
| 179,769 | 1,454 |
122715536700597053461696048193427743841
| null | null | null |
linux
|
54a20552e1eae07aa240fa370a0293e006b5faed
| 1 |
static void init_vmcb(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;
svm->vcpu.fpu_active = 1;
svm->vcpu.arch.hflags = 0;
set_cr_intercept(svm, INTERCEPT_CR0_READ);
set_cr_intercept(svm, INTERCEPT_CR3_READ);
set_cr_intercept(svm, INTERCEPT_CR4_READ);
set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
set_dr_intercepts(svm);
set_exception_intercept(svm, PF_VECTOR);
set_exception_intercept(svm, UD_VECTOR);
set_exception_intercept(svm, MC_VECTOR);
set_intercept(svm, INTERCEPT_INTR);
set_intercept(svm, INTERCEPT_NMI);
set_intercept(svm, INTERCEPT_SMI);
set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
set_intercept(svm, INTERCEPT_RDPMC);
set_intercept(svm, INTERCEPT_CPUID);
set_intercept(svm, INTERCEPT_INVD);
set_intercept(svm, INTERCEPT_HLT);
set_intercept(svm, INTERCEPT_INVLPG);
set_intercept(svm, INTERCEPT_INVLPGA);
set_intercept(svm, INTERCEPT_IOIO_PROT);
set_intercept(svm, INTERCEPT_MSR_PROT);
set_intercept(svm, INTERCEPT_TASK_SWITCH);
set_intercept(svm, INTERCEPT_SHUTDOWN);
set_intercept(svm, INTERCEPT_VMRUN);
set_intercept(svm, INTERCEPT_VMMCALL);
set_intercept(svm, INTERCEPT_VMLOAD);
set_intercept(svm, INTERCEPT_VMSAVE);
set_intercept(svm, INTERCEPT_STGI);
set_intercept(svm, INTERCEPT_CLGI);
set_intercept(svm, INTERCEPT_SKINIT);
set_intercept(svm, INTERCEPT_WBINVD);
set_intercept(svm, INTERCEPT_MONITOR);
set_intercept(svm, INTERCEPT_MWAIT);
set_intercept(svm, INTERCEPT_XSETBV);
control->iopm_base_pa = iopm_base;
control->msrpm_base_pa = __pa(svm->msrpm);
control->int_ctl = V_INTR_MASKING_MASK;
init_seg(&save->es);
init_seg(&save->ss);
init_seg(&save->ds);
init_seg(&save->fs);
init_seg(&save->gs);
save->cs.selector = 0xf000;
save->cs.base = 0xffff0000;
/* Executable/Readable Code Segment */
save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
save->cs.limit = 0xffff;
save->gdtr.limit = 0xffff;
save->idtr.limit = 0xffff;
init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
svm_set_efer(&svm->vcpu, 0);
save->dr6 = 0xffff0ff0;
kvm_set_rflags(&svm->vcpu, 2);
save->rip = 0x0000fff0;
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
/*
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
* It also updates the guest-visible cr0 value.
*/
svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
kvm_mmu_reset_context(&svm->vcpu);
save->cr4 = X86_CR4_PAE;
/* rdx = ?? */
if (npt_enabled) {
/* Setup VMCB for Nested Paging */
control->nested_ctl = 1;
clr_intercept(svm, INTERCEPT_INVLPG);
clr_exception_intercept(svm, PF_VECTOR);
clr_cr_intercept(svm, INTERCEPT_CR3_READ);
clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
save->g_pat = svm->vcpu.arch.pat;
save->cr3 = 0;
save->cr4 = 0;
}
svm->asid_generation = 0;
svm->nested.vmcb = 0;
svm->vcpu.arch.hflags = 0;
if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
control->pause_filter_count = 3000;
set_intercept(svm, INTERCEPT_PAUSE);
}
mark_all_dirty(svm->vmcb);
enable_gif(svm);
}
|
CWE-399
| 179,770 | 1,455 |
279366401211128952055843103446982399570
| null | null | null |
abrt
|
3c1b60cfa62d39e5fff5a53a5bc53dae189e740e
| 1 |
int main(int argc, char** argv)
{
/* Kernel starts us with all fd's closed.
* But it's dangerous:
* fprintf(stderr) can dump messages into random fds, etc.
* Ensure that if any of fd 0,1,2 is closed, we open it to /dev/null.
*/
int fd = xopen("/dev/null", O_RDWR);
while (fd < 2)
fd = xdup(fd);
if (fd > 2)
close(fd);
int err = 1;
logmode = LOGMODE_JOURNAL;
/* Parse abrt.conf */
load_abrt_conf();
/* ... and plugins/CCpp.conf */
bool setting_MakeCompatCore;
bool setting_SaveBinaryImage;
bool setting_SaveFullCore;
bool setting_CreateCoreBacktrace;
bool setting_SaveContainerizedPackageData;
bool setting_StandaloneHook;
{
map_string_t *settings = new_map_string();
load_abrt_plugin_conf_file("CCpp.conf", settings);
const char *value;
value = get_map_string_item_or_NULL(settings, "MakeCompatCore");
setting_MakeCompatCore = value && string_to_bool(value);
value = get_map_string_item_or_NULL(settings, "SaveBinaryImage");
setting_SaveBinaryImage = value && string_to_bool(value);
value = get_map_string_item_or_NULL(settings, "SaveFullCore");
setting_SaveFullCore = value ? string_to_bool(value) : true;
value = get_map_string_item_or_NULL(settings, "CreateCoreBacktrace");
setting_CreateCoreBacktrace = value ? string_to_bool(value) : true;
value = get_map_string_item_or_NULL(settings, "SaveContainerizedPackageData");
setting_SaveContainerizedPackageData = value && string_to_bool(value);
/* Do not call abrt-action-save-package-data with process's root, if ExploreChroots is disabled. */
if (!g_settings_explorechroots)
{
if (setting_SaveContainerizedPackageData)
log_warning("Ignoring SaveContainerizedPackageData because ExploreChroots is disabled");
setting_SaveContainerizedPackageData = false;
}
value = get_map_string_item_or_NULL(settings, "StandaloneHook");
setting_StandaloneHook = value && string_to_bool(value);
value = get_map_string_item_or_NULL(settings, "VerboseLog");
if (value)
g_verbose = xatoi_positive(value);
free_map_string(settings);
}
if (argc == 2 && strcmp(argv[1], "--config-test"))
return test_configuration(setting_SaveFullCore, setting_CreateCoreBacktrace);
if (argc < 8)
{
/* percent specifier: %s %c %p %u %g %t %e %P %i*/
/* argv: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]*/
error_msg_and_die("Usage: %s SIGNO CORE_SIZE_LIMIT PID UID GID TIME BINARY_NAME GLOBAL_PID [TID]", argv[0]);
}
/* Not needed on 2.6.30.
* At least 2.6.18 has a bug where
* argv[1] = "SIGNO CORE_SIZE_LIMIT PID ..."
* argv[2] = "CORE_SIZE_LIMIT PID ..."
* and so on. Fixing it:
*/
if (strchr(argv[1], ' '))
{
int i;
for (i = 1; argv[i]; i++)
{
strchrnul(argv[i], ' ')[0] = '\0';
}
}
errno = 0;
const char* signal_str = argv[1];
int signal_no = xatoi_positive(signal_str);
off_t ulimit_c = strtoull(argv[2], NULL, 10);
if (ulimit_c < 0) /* unlimited? */
{
/* set to max possible >0 value */
ulimit_c = ~((off_t)1 << (sizeof(off_t)*8-1));
}
const char *pid_str = argv[3];
pid_t local_pid = xatoi_positive(argv[3]);
uid_t uid = xatoi_positive(argv[4]);
if (errno || local_pid <= 0)
{
perror_msg_and_die("PID '%s' or limit '%s' is bogus", argv[3], argv[2]);
}
{
char *s = xmalloc_fopen_fgetline_fclose(VAR_RUN"/abrt/saved_core_pattern");
/* If we have a saved pattern and it's not a "|PROG ARGS" thing... */
if (s && s[0] != '|')
core_basename = s;
else
free(s);
}
const char *global_pid_str = argv[8];
pid_t pid = xatoi_positive(argv[8]);
pid_t tid = -1;
const char *tid_str = argv[9];
if (tid_str)
{
tid = xatoi_positive(tid_str);
}
char path[PATH_MAX];
char *executable = get_executable(pid);
if (executable && strstr(executable, "/abrt-hook-ccpp"))
{
error_msg_and_die("PID %lu is '%s', not dumping it to avoid recursion",
(long)pid, executable);
}
user_pwd = get_cwd(pid); /* may be NULL on error */
log_notice("user_pwd:'%s'", user_pwd);
sprintf(path, "/proc/%lu/status", (long)pid);
char *proc_pid_status = xmalloc_xopen_read_close(path, /*maxsz:*/ NULL);
uid_t fsuid = uid;
uid_t tmp_fsuid = get_fsuid(proc_pid_status);
if (tmp_fsuid < 0)
perror_msg_and_die("Can't parse 'Uid: line' in /proc/%lu/status", (long)pid);
const int fsgid = get_fsgid(proc_pid_status);
if (fsgid < 0)
error_msg_and_die("Can't parse 'Gid: line' in /proc/%lu/status", (long)pid);
int suid_policy = dump_suid_policy();
if (tmp_fsuid != uid)
{
/* use root for suided apps unless it's explicitly set to UNSAFE */
fsuid = 0;
if (suid_policy == DUMP_SUID_UNSAFE)
fsuid = tmp_fsuid;
else
{
g_user_core_flags = O_EXCL;
g_need_nonrelative = 1;
}
}
/* Open a fd to compat coredump, if requested and is possible */
int user_core_fd = -1;
if (setting_MakeCompatCore && ulimit_c != 0)
/* note: checks "user_pwd == NULL" inside; updates core_basename */
user_core_fd = open_user_core(uid, fsuid, fsgid, pid, &argv[1]);
if (executable == NULL)
{
/* readlink on /proc/$PID/exe failed, don't create abrt dump dir */
error_msg("Can't read /proc/%lu/exe link", (long)pid);
return create_user_core(user_core_fd, pid, ulimit_c);
}
const char *signame = NULL;
if (!signal_is_fatal(signal_no, &signame))
return create_user_core(user_core_fd, pid, ulimit_c); // not a signal we care about
const int abrtd_running = daemon_is_ok();
if (!setting_StandaloneHook && !abrtd_running)
{
/* not an error, exit with exit code 0 */
log("abrtd is not running. If it crashed, "
"/proc/sys/kernel/core_pattern contains a stale value, "
"consider resetting it to 'core'"
);
return create_user_core(user_core_fd, pid, ulimit_c);
}
if (setting_StandaloneHook)
ensure_writable_dir(g_settings_dump_location, DEFAULT_DUMP_LOCATION_MODE, "abrt");
if (g_settings_nMaxCrashReportsSize > 0)
{
/* If free space is less than 1/4 of MaxCrashReportsSize... */
if (low_free_space(g_settings_nMaxCrashReportsSize, g_settings_dump_location))
return create_user_core(user_core_fd, pid, ulimit_c);
}
/* Check /var/tmp/abrt/last-ccpp marker, do not dump repeated crashes
* if they happen too often. Else, write new marker value.
*/
snprintf(path, sizeof(path), "%s/last-ccpp", g_settings_dump_location);
if (check_recent_crash_file(path, executable))
{
/* It is a repeating crash */
return create_user_core(user_core_fd, pid, ulimit_c);
}
const char *last_slash = strrchr(executable, '/');
if (last_slash && strncmp(++last_slash, "abrt", 4) == 0)
{
if (g_settings_debug_level == 0)
{
log_warning("Ignoring crash of %s (SIG%s).",
executable, signame ? signame : signal_str);
goto cleanup_and_exit;
}
/* If abrtd/abrt-foo crashes, we don't want to create a _directory_,
* since that can make new copy of abrtd to process it,
* and maybe crash again...
* Unlike dirs, mere files are ignored by abrtd.
*/
if (snprintf(path, sizeof(path), "%s/%s-coredump", g_settings_dump_location, last_slash) >= sizeof(path))
error_msg_and_die("Error saving '%s': truncated long file path", path);
int abrt_core_fd = xopen3(path, O_WRONLY | O_CREAT | O_TRUNC, 0600);
off_t core_size = copyfd_eof(STDIN_FILENO, abrt_core_fd, COPYFD_SPARSE);
if (core_size < 0 || fsync(abrt_core_fd) != 0)
{
unlink(path);
/* copyfd_eof logs the error including errno string,
* but it does not log file name */
error_msg_and_die("Error saving '%s'", path);
}
log_notice("Saved core dump of pid %lu (%s) to %s (%llu bytes)", (long)pid, executable, path, (long long)core_size);
err = 0;
goto cleanup_and_exit;
}
unsigned path_len = snprintf(path, sizeof(path), "%s/ccpp-%s-%lu.new",
g_settings_dump_location, iso_date_string(NULL), (long)pid);
if (path_len >= (sizeof(path) - sizeof("/"FILENAME_COREDUMP)))
{
return create_user_core(user_core_fd, pid, ulimit_c);
}
/* If you don't want to have fs owner as root then:
*
* - use fsuid instead of uid for fs owner, so we don't expose any
* sensitive information of suided app in /var/(tmp|spool)/abrt
*
* - use dd_create_skeleton() and dd_reset_ownership(), when you finish
* creating the new dump directory, to prevent the real owner to write to
* the directory until the hook is done (avoid race conditions and defend
* hard and symbolic link attacs)
*/
dd = dd_create(path, /*fs owner*/0, DEFAULT_DUMP_DIR_MODE);
if (dd)
{
char source_filename[sizeof("/proc/%lu/somewhat_long_name") + sizeof(long)*3];
int source_base_ofs = sprintf(source_filename, "/proc/%lu/root", (long)pid);
source_base_ofs -= strlen("root");
/* What's wrong on using /proc/[pid]/root every time ?*/
/* It creates os_info_in_root_dir for all crashes. */
char *rootdir = process_has_own_root(pid) ? get_rootdir(pid) : NULL;
/* Reading data from an arbitrary root directory is not secure. */
if (g_settings_explorechroots)
{
/* Yes, test 'rootdir' but use 'source_filename' because 'rootdir' can
* be '/' for a process with own namespace. 'source_filename' is /proc/[pid]/root. */
dd_create_basic_files(dd, fsuid, (rootdir != NULL) ? source_filename : NULL);
}
else
{
dd_create_basic_files(dd, fsuid, NULL);
}
char *dest_filename = concat_path_file(dd->dd_dirname, "also_somewhat_longish_name");
char *dest_base = strrchr(dest_filename, '/') + 1;
strcpy(source_filename + source_base_ofs, "maps");
dd_copy_file(dd, FILENAME_MAPS, source_filename);
strcpy(source_filename + source_base_ofs, "limits");
dd_copy_file(dd, FILENAME_LIMITS, source_filename);
strcpy(source_filename + source_base_ofs, "cgroup");
dd_copy_file(dd, FILENAME_CGROUP, source_filename);
strcpy(source_filename + source_base_ofs, "mountinfo");
dd_copy_file(dd, FILENAME_MOUNTINFO, source_filename);
strcpy(dest_base, FILENAME_OPEN_FDS);
strcpy(source_filename + source_base_ofs, "fd");
dump_fd_info_ext(dest_filename, source_filename, dd->dd_uid, dd->dd_gid);
strcpy(dest_base, FILENAME_NAMESPACES);
dump_namespace_diff_ext(dest_filename, 1, pid, dd->dd_uid, dd->dd_gid);
free(dest_filename);
char *tmp = NULL;
get_env_variable(pid, "container", &tmp);
if (tmp != NULL)
{
dd_save_text(dd, FILENAME_CONTAINER, tmp);
free(tmp);
tmp = NULL;
}
get_env_variable(pid, "container_uuid", &tmp);
if (tmp != NULL)
{
dd_save_text(dd, FILENAME_CONTAINER_UUID, tmp);
free(tmp);
}
/* There's no need to compare mount namespaces and search for '/' in
* mountifo. Comparison of inodes of '/proc/[pid]/root' and '/' works
* fine. If those inodes do not equal each other, we have to verify
* that '/proc/[pid]/root' is not a symlink to a chroot.
*/
const int containerized = (rootdir != NULL && strcmp(rootdir, "/") == 0);
if (containerized)
{
log_debug("Process %d is considered to be containerized", pid);
pid_t container_pid;
if (get_pid_of_container(pid, &container_pid) == 0)
{
char *container_cmdline = get_cmdline(container_pid);
dd_save_text(dd, FILENAME_CONTAINER_CMDLINE, container_cmdline);
free(container_cmdline);
}
}
dd_save_text(dd, FILENAME_ANALYZER, "abrt-ccpp");
dd_save_text(dd, FILENAME_TYPE, "CCpp");
dd_save_text(dd, FILENAME_EXECUTABLE, executable);
dd_save_text(dd, FILENAME_PID, pid_str);
dd_save_text(dd, FILENAME_GLOBAL_PID, global_pid_str);
dd_save_text(dd, FILENAME_PROC_PID_STATUS, proc_pid_status);
if (user_pwd)
dd_save_text(dd, FILENAME_PWD, user_pwd);
if (tid_str)
dd_save_text(dd, FILENAME_TID, tid_str);
if (rootdir)
{
if (strcmp(rootdir, "/") != 0)
dd_save_text(dd, FILENAME_ROOTDIR, rootdir);
}
free(rootdir);
char *reason = xasprintf("%s killed by SIG%s",
last_slash, signame ? signame : signal_str);
dd_save_text(dd, FILENAME_REASON, reason);
free(reason);
char *cmdline = get_cmdline(pid);
dd_save_text(dd, FILENAME_CMDLINE, cmdline ? : "");
free(cmdline);
char *environ = get_environ(pid);
dd_save_text(dd, FILENAME_ENVIRON, environ ? : "");
free(environ);
char *fips_enabled = xmalloc_fopen_fgetline_fclose("/proc/sys/crypto/fips_enabled");
if (fips_enabled)
{
if (strcmp(fips_enabled, "0") != 0)
dd_save_text(dd, "fips_enabled", fips_enabled);
free(fips_enabled);
}
dd_save_text(dd, FILENAME_ABRT_VERSION, VERSION);
/* In case of errors, treat the process as if it has locked memory */
long unsigned lck_bytes = ULONG_MAX;
const char *vmlck = strstr(proc_pid_status, "VmLck:");
if (vmlck == NULL)
error_msg("/proc/%s/status does not contain 'VmLck:' line", pid_str);
else if (1 != sscanf(vmlck + 6, "%lu kB\n", &lck_bytes))
error_msg("Failed to parse 'VmLck:' line in /proc/%s/status", pid_str);
if (lck_bytes)
{
log_notice("Process %s of user %lu has locked memory",
pid_str, (long unsigned)uid);
dd_mark_as_notreportable(dd, "The process had locked memory "
"which usually indicates efforts to protect sensitive "
"data (passwords) from being written to disk.\n"
"In order to avoid sensitive information leakages, "
"ABRT will not allow you to report this problem to "
"bug tracking tools");
}
if (setting_SaveBinaryImage)
{
if (save_crashing_binary(pid, dd))
{
error_msg("Error saving '%s'", path);
goto cleanup_and_exit;
}
}
off_t core_size = 0;
if (setting_SaveFullCore)
{
strcpy(path + path_len, "/"FILENAME_COREDUMP);
int abrt_core_fd = create_or_die(path, user_core_fd);
/* We write both coredumps at once.
* We can't write user coredump first, since it might be truncated
* and thus can't be copied and used as abrt coredump;
* and if we write abrt coredump first and then copy it as user one,
* then we have a race when process exits but coredump does not exist yet:
* $ echo -e '#include<signal.h>\nmain(){raise(SIGSEGV);}' | gcc -o test -x c -
* $ rm -f core*; ulimit -c unlimited; ./test; ls -l core*
* 21631 Segmentation fault (core dumped) ./test
* ls: cannot access core*: No such file or directory <=== BAD
*/
core_size = copyfd_sparse(STDIN_FILENO, abrt_core_fd, user_core_fd, ulimit_c);
close_user_core(user_core_fd, core_size);
if (fsync(abrt_core_fd) != 0 || close(abrt_core_fd) != 0 || core_size < 0)
{
unlink(path);
/* copyfd_sparse logs the error including errno string,
* but it does not log file name */
error_msg("Error writing '%s'", path);
goto cleanup_and_exit;
}
}
else
{
/* User core is created even if WriteFullCore is off. */
create_user_core(user_core_fd, pid, ulimit_c);
}
/* User core is either written or closed */
user_core_fd = -1;
/*
* ! No other errors should cause removal of the user core !
*/
/* Because of #1211835 and #1126850 */
#if 0
/* Save JVM crash log if it exists. (JVM's coredump per se
* is nearly useless for JVM developers)
*/
{
char *java_log = xasprintf("/tmp/jvm-%lu/hs_error.log", (long)pid);
int src_fd = open(java_log, O_RDONLY);
free(java_log);
/* If we couldn't open the error log in /tmp directory we can try to
* read the log from the current directory. It may produce AVC, it
* may produce some error log but all these are expected.
*/
if (src_fd < 0)
{
java_log = xasprintf("%s/hs_err_pid%lu.log", user_pwd, (long)pid);
src_fd = open(java_log, O_RDONLY);
free(java_log);
}
if (src_fd >= 0)
{
strcpy(path + path_len, "/hs_err.log");
int dst_fd = create_or_die(path, user_core_fd);
off_t sz = copyfd_eof(src_fd, dst_fd, COPYFD_SPARSE);
if (close(dst_fd) != 0 || sz < 0)
{
error_msg("Error saving '%s'", path);
goto cleanup_and_exit;
}
close(src_fd);
}
}
#endif
/* Perform crash-time unwind of the guilty thread. */
if (tid > 0 && setting_CreateCoreBacktrace)
create_core_backtrace(tid, executable, signal_no, dd);
/* We close dumpdir before we start catering for crash storm case.
* Otherwise, delete_dump_dir's from other concurrent
* CCpp's won't be able to delete our dump (their delete_dump_dir
* will wait for us), and we won't be able to delete their dumps.
* Classic deadlock.
*/
dd_close(dd);
dd = NULL;
path[path_len] = '\0'; /* path now contains only directory name */
if (abrtd_running && setting_SaveContainerizedPackageData && containerized)
{ /* Do we really need to run rpm from core_pattern hook? */
sprintf(source_filename, "/proc/%lu/root", (long)pid);
const char *cmd_args[6];
cmd_args[0] = BIN_DIR"/abrt-action-save-package-data";
cmd_args[1] = "-d";
cmd_args[2] = path;
cmd_args[3] = "-r";
cmd_args[4] = source_filename;
cmd_args[5] = NULL;
pid_t pid = fork_execv_on_steroids(0, (char **)cmd_args, NULL, NULL, path, 0);
int stat;
safe_waitpid(pid, &stat, 0);
}
char *newpath = xstrndup(path, path_len - (sizeof(".new")-1));
if (rename(path, newpath) == 0)
strcpy(path, newpath);
free(newpath);
if (core_size > 0)
log_notice("Saved core dump of pid %lu (%s) to %s (%llu bytes)",
(long)pid, executable, path, (long long)core_size);
if (abrtd_running)
notify_new_path(path);
/* rhbz#539551: "abrt going crazy when crashing process is respawned" */
if (g_settings_nMaxCrashReportsSize > 0)
{
/* x1.25 and round up to 64m: go a bit up, so that usual in-daemon trimming
* kicks in first, and we don't "fight" with it:
*/
unsigned maxsize = g_settings_nMaxCrashReportsSize + g_settings_nMaxCrashReportsSize / 4;
maxsize |= 63;
trim_problem_dirs(g_settings_dump_location, maxsize * (double)(1024*1024), path);
}
err = 0;
}
else
{
/* We didn't create abrt dump, but may need to create compat coredump */
return create_user_core(user_core_fd, pid, ulimit_c);
}
cleanup_and_exit:
if (dd)
dd_delete(dd);
if (user_core_fd >= 0)
unlinkat(dirfd(proc_cwd), core_basename, /*only files*/0);
if (proc_cwd != NULL)
closedir(proc_cwd);
return err;
}
|
CWE-59
| 179,776 | 1,461 |
287347886890565573834131591138153910367
| null | null | null |
linux
|
8e2d61e0aed2b7c4ecb35844fe07e0b2b762dee4
| 1 |
static __init int sctp_init(void)
{
int i;
int status = -EINVAL;
unsigned long goal;
unsigned long limit;
int max_share;
int order;
sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
/* Allocate bind_bucket and chunk caches. */
status = -ENOBUFS;
sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket",
sizeof(struct sctp_bind_bucket),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!sctp_bucket_cachep)
goto out;
sctp_chunk_cachep = kmem_cache_create("sctp_chunk",
sizeof(struct sctp_chunk),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!sctp_chunk_cachep)
goto err_chunk_cachep;
status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL);
if (status)
goto err_percpu_counter_init;
/* Implementation specific variables. */
/* Initialize default stream count setup information. */
sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
/* Initialize handle used for association ids. */
idr_init(&sctp_assocs_id);
limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
sysctl_sctp_mem[0] = limit / 4 * 3;
sysctl_sctp_mem[1] = limit;
sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2;
/* Set per-socket limits to no more than 1/128 the pressure threshold*/
limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7);
max_share = min(4UL*1024*1024, limit);
sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */
sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1);
sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
sysctl_sctp_wmem[1] = 16*1024;
sysctl_sctp_wmem[2] = max(64*1024, max_share);
/* Size and allocate the association hash table.
* The methodology is similar to that of the tcp hash tables.
*/
if (totalram_pages >= (128 * 1024))
goal = totalram_pages >> (22 - PAGE_SHIFT);
else
goal = totalram_pages >> (24 - PAGE_SHIFT);
for (order = 0; (1UL << order) < goal; order++)
;
do {
sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
sizeof(struct sctp_hashbucket);
if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0)
continue;
sctp_assoc_hashtable = (struct sctp_hashbucket *)
__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
} while (!sctp_assoc_hashtable && --order > 0);
if (!sctp_assoc_hashtable) {
pr_err("Failed association hash alloc\n");
status = -ENOMEM;
goto err_ahash_alloc;
}
for (i = 0; i < sctp_assoc_hashsize; i++) {
rwlock_init(&sctp_assoc_hashtable[i].lock);
INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain);
}
/* Allocate and initialize the endpoint hash table. */
sctp_ep_hashsize = 64;
sctp_ep_hashtable =
kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL);
if (!sctp_ep_hashtable) {
pr_err("Failed endpoint_hash alloc\n");
status = -ENOMEM;
goto err_ehash_alloc;
}
for (i = 0; i < sctp_ep_hashsize; i++) {
rwlock_init(&sctp_ep_hashtable[i].lock);
INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
}
/* Allocate and initialize the SCTP port hash table. */
do {
sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
sizeof(struct sctp_bind_hashbucket);
if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
continue;
sctp_port_hashtable = (struct sctp_bind_hashbucket *)
__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
} while (!sctp_port_hashtable && --order > 0);
if (!sctp_port_hashtable) {
pr_err("Failed bind hash alloc\n");
status = -ENOMEM;
goto err_bhash_alloc;
}
for (i = 0; i < sctp_port_hashsize; i++) {
spin_lock_init(&sctp_port_hashtable[i].lock);
INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
}
pr_info("Hash tables configured (established %d bind %d)\n",
sctp_assoc_hashsize, sctp_port_hashsize);
sctp_sysctl_register();
INIT_LIST_HEAD(&sctp_address_families);
sctp_v4_pf_init();
sctp_v6_pf_init();
status = sctp_v4_protosw_init();
if (status)
goto err_protosw_init;
status = sctp_v6_protosw_init();
if (status)
goto err_v6_protosw_init;
status = register_pernet_subsys(&sctp_net_ops);
if (status)
goto err_register_pernet_subsys;
status = sctp_v4_add_protocol();
if (status)
goto err_add_protocol;
/* Register SCTP with inet6 layer. */
status = sctp_v6_add_protocol();
if (status)
goto err_v6_add_protocol;
out:
return status;
err_v6_add_protocol:
sctp_v4_del_protocol();
err_add_protocol:
unregister_pernet_subsys(&sctp_net_ops);
err_register_pernet_subsys:
sctp_v6_protosw_exit();
err_v6_protosw_init:
sctp_v4_protosw_exit();
err_protosw_init:
sctp_v4_pf_exit();
sctp_v6_pf_exit();
sctp_sysctl_unregister();
free_pages((unsigned long)sctp_port_hashtable,
get_order(sctp_port_hashsize *
sizeof(struct sctp_bind_hashbucket)));
err_bhash_alloc:
kfree(sctp_ep_hashtable);
err_ehash_alloc:
free_pages((unsigned long)sctp_assoc_hashtable,
get_order(sctp_assoc_hashsize *
sizeof(struct sctp_hashbucket)));
err_ahash_alloc:
percpu_counter_destroy(&sctp_sockets_allocated);
err_percpu_counter_init:
kmem_cache_destroy(sctp_chunk_cachep);
err_chunk_cachep:
kmem_cache_destroy(sctp_bucket_cachep);
goto out;
}
|
CWE-119
| 179,778 | 1,463 |
167189804297399068515278735693731747529
| null | null | null |
abrt
|
50ee8130fb4cd4ef1af7682a2c85dd99cb99424e
| 1 |
int main(int argc, char **argv)
{
/* I18n */
setlocale(LC_ALL, "");
#if ENABLE_NLS
bindtextdomain(PACKAGE, LOCALEDIR);
textdomain(PACKAGE);
#endif
abrt_init(argv);
/* Can't keep these strings/structs static: _() doesn't support that */
const char *program_usage_string = _(
"& [-y] [-i BUILD_IDS_FILE|-i -] [-e PATH[:PATH]...]\n"
"\t[-r REPO]\n"
"\n"
"Installs debuginfo packages for all build-ids listed in BUILD_IDS_FILE to\n"
"ABRT system cache."
);
enum {
OPT_v = 1 << 0,
OPT_y = 1 << 1,
OPT_i = 1 << 2,
OPT_e = 1 << 3,
OPT_r = 1 << 4,
OPT_s = 1 << 5,
};
const char *build_ids = "build_ids";
const char *exact = NULL;
const char *repo = NULL;
const char *size_mb = NULL;
struct options program_options[] = {
OPT__VERBOSE(&g_verbose),
OPT_BOOL ('y', "yes", NULL, _("Noninteractive, assume 'Yes' to all questions")),
OPT_STRING('i', "ids", &build_ids, "BUILD_IDS_FILE", _("- means STDIN, default: build_ids")),
OPT_STRING('e', "exact", &exact, "EXACT", _("Download only specified files")),
OPT_STRING('r', "repo", &repo, "REPO", _("Pattern to use when searching for repos, default: *debug*")),
OPT_STRING('s', "size_mb", &size_mb, "SIZE_MB", _("Ignored option")),
OPT_END()
};
const unsigned opts = parse_opts(argc, argv, program_options, program_usage_string);
const gid_t egid = getegid();
const gid_t rgid = getgid();
const uid_t euid = geteuid();
const gid_t ruid = getuid();
/* We need to open the build ids file under the caller's UID/GID to avoid
* information disclosures when reading files with changed UID.
* Unfortunately, we cannot replace STDIN with the new fd because ABRT uses
* STDIN to communicate with the caller. So, the following code opens a
* dummy file descriptor to the build ids file and passes the new fd's proc
* path to the wrapped program in the ids argument.
* The new fd remains opened, the OS will close it for us. */
char *build_ids_self_fd = NULL;
if (strcmp("-", build_ids) != 0)
{
if (setregid(egid, rgid) < 0)
perror_msg_and_die("setregid(egid, rgid)");
if (setreuid(euid, ruid) < 0)
perror_msg_and_die("setreuid(euid, ruid)");
const int build_ids_fd = open(build_ids, O_RDONLY);
if (setregid(rgid, egid) < 0)
perror_msg_and_die("setregid(rgid, egid)");
if (setreuid(ruid, euid) < 0 )
perror_msg_and_die("setreuid(ruid, euid)");
if (build_ids_fd < 0)
perror_msg_and_die("Failed to open file '%s'", build_ids);
/* We are not going to free this memory. There is no place to do so. */
build_ids_self_fd = xasprintf("/proc/self/fd/%d", build_ids_fd);
}
/* name, -v, --ids, -, -y, -e, EXACT, -r, REPO, --, NULL */
const char *args[11];
{
const char *verbs[] = { "", "-v", "-vv", "-vvv" };
unsigned i = 0;
args[i++] = EXECUTABLE;
args[i++] = "--ids";
args[i++] = (build_ids_self_fd != NULL) ? build_ids_self_fd : "-";
if (g_verbose > 0)
args[i++] = verbs[g_verbose <= 3 ? g_verbose : 3];
if ((opts & OPT_y))
args[i++] = "-y";
if ((opts & OPT_e))
{
args[i++] = "--exact";
args[i++] = exact;
}
if ((opts & OPT_r))
{
args[i++] = "--repo";
args[i++] = repo;
}
args[i++] = "--";
args[i] = NULL;
}
/* Switch real user/group to effective ones.
* Otherwise yum library gets confused - gets EPERM (why??).
*/
/* do setregid only if we have to, to not upset selinux needlessly */
if (egid != rgid)
IGNORE_RESULT(setregid(egid, egid));
if (euid != ruid)
{
IGNORE_RESULT(setreuid(euid, euid));
/* We are suid'ed! */
/* Prevent malicious user from messing up with suid'ed process: */
#if 1
static const char *whitelist[] = {
"REPORT_CLIENT_SLAVE", // Check if the app is being run as a slave
"LANG",
};
const size_t wlsize = sizeof(whitelist)/sizeof(char*);
char *setlist[sizeof(whitelist)/sizeof(char*)] = { 0 };
char *p = NULL;
for (size_t i = 0; i < wlsize; i++)
if ((p = getenv(whitelist[i])) != NULL)
setlist[i] = xstrdup(p);
clearenv();
for (size_t i = 0; i < wlsize; i++)
if (setlist[i] != NULL)
{
xsetenv(whitelist[i], setlist[i]);
free(setlist[i]);
}
#else
/* Clear dangerous stuff from env */
static const char forbid[] =
"LD_LIBRARY_PATH" "\0"
"LD_PRELOAD" "\0"
"LD_TRACE_LOADED_OBJECTS" "\0"
"LD_BIND_NOW" "\0"
"LD_AOUT_LIBRARY_PATH" "\0"
"LD_AOUT_PRELOAD" "\0"
"LD_NOWARN" "\0"
"LD_KEEPDIR" "\0"
;
const char *p = forbid;
do {
unsetenv(p);
p += strlen(p) + 1;
} while (*p);
#endif
/* Set safe PATH */
char path_env[] = "PATH=/usr/sbin:/sbin:/usr/bin:/bin:"BIN_DIR":"SBIN_DIR;
if (euid != 0)
strcpy(path_env, "PATH=/usr/bin:/bin:"BIN_DIR);
putenv(path_env);
/* Use safe umask */
umask(0022);
}
execvp(EXECUTABLE, (char **)args);
error_msg_and_die("Can't execute %s", EXECUTABLE);
}
|
CWE-59
| 179,781 | 1,466 |
92934402516180269336018717421217665238
| null | null | null |
linux
|
48900cb6af4282fa0fb6ff4d72a81aa3dadb5c39
| 1 |
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err;
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
if (!virtnet_validate_features(vdev))
return -EINVAL;
/* Find if host supports multiqueue virtio_net device */
err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
struct virtio_net_config,
max_virtqueue_pairs, &max_queue_pairs);
/* We need at least 2 queue's */
if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
max_queue_pairs = 1;
/* Allocate ourselves a network device with room for our info */
dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
if (!dev)
return -ENOMEM;
/* Set up network device as normal. */
dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
dev->ethtool_ops = &virtnet_ethtool_ops;
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
if (csum)
dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
| NETIF_F_TSO_ECN | NETIF_F_TSO6;
}
/* Individual feature bits: what can host handle? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
dev->hw_features |= NETIF_F_TSO;
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
dev->hw_features |= NETIF_F_TSO6;
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
dev->hw_features |= NETIF_F_TSO_ECN;
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
dev->hw_features |= NETIF_F_UFO;
dev->features |= NETIF_F_GSO_ROBUST;
if (gso)
dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
/* (!csum && gso) case will be fixed by register_netdev() */
}
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
dev->features |= NETIF_F_RXCSUM;
dev->vlan_features = dev->features;
/* Configuration may specify what MAC to use. Otherwise random. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
virtio_cread_bytes(vdev,
offsetof(struct virtio_net_config, mac),
dev->dev_addr, dev->addr_len);
else
eth_hw_addr_random(dev);
/* Set up our device-specific information */
vi = netdev_priv(dev);
vi->dev = dev;
vi->vdev = vdev;
vdev->priv = vi;
vi->stats = alloc_percpu(struct virtnet_stats);
err = -ENOMEM;
if (vi->stats == NULL)
goto free;
for_each_possible_cpu(i) {
struct virtnet_stats *virtnet_stats;
virtnet_stats = per_cpu_ptr(vi->stats, i);
u64_stats_init(&virtnet_stats->tx_syncp);
u64_stats_init(&virtnet_stats->rx_syncp);
}
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
vi->big_packets = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
vi->hdr_len = sizeof(struct virtio_net_hdr);
if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->any_header_sg = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
/* Use single tx/rx queue pair as default */
vi->curr_queue_pairs = 1;
vi->max_queue_pairs = max_queue_pairs;
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
goto free_stats;
#ifdef CONFIG_SYSFS
if (vi->mergeable_rx_bufs)
dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
#endif
netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
goto free_vqs;
}
virtio_device_ready(vdev);
/* Last of all, set up some receive buffers. */
for (i = 0; i < vi->curr_queue_pairs; i++) {
try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
/* If we didn't even get one input buffer, we're useless. */
if (vi->rq[i].vq->num_free ==
virtqueue_get_vring_size(vi->rq[i].vq)) {
free_unused_bufs(vi);
err = -ENOMEM;
goto free_recv_bufs;
}
}
vi->nb.notifier_call = &virtnet_cpu_callback;
err = register_hotcpu_notifier(&vi->nb);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
goto free_recv_bufs;
}
/* Assume link up if device can't report link status,
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
netif_carrier_off(dev);
schedule_work(&vi->config_work);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
netif_carrier_on(dev);
}
pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
dev->name, max_queue_pairs);
return 0;
free_recv_bufs:
vi->vdev->config->reset(vdev);
free_receive_bufs(vi);
unregister_netdev(dev);
free_vqs:
cancel_delayed_work_sync(&vi->refill);
free_receive_page_frags(vi);
virtnet_del_vqs(vi);
free_stats:
free_percpu(vi->stats);
free:
free_netdev(dev);
return err;
}
|
CWE-119
| 179,782 | 1,467 |
286743572691752184776898404441994023786
| null | null | null |
linux
|
3f7352bf21f8fd7ba3e2fcef9488756f188e12be
| 1 |
void bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_binary_header *header = NULL;
int proglen, oldproglen = 0;
struct jit_context ctx = {};
u8 *image = NULL;
int *addrs;
int pass;
int i;
if (!bpf_jit_enable)
return;
if (!prog || !prog->len)
return;
addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
if (!addrs)
return;
/* Before first pass, make a rough estimation of addrs[]
* each bpf instruction is translated to less than 64 bytes
*/
for (proglen = 0, i = 0; i < prog->len; i++) {
proglen += 64;
addrs[i] = proglen;
}
ctx.cleanup_addr = proglen;
for (pass = 0; pass < 10; pass++) {
proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
if (proglen <= 0) {
image = NULL;
if (header)
bpf_jit_binary_free(header);
goto out;
}
if (image) {
if (proglen != oldproglen) {
pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
proglen, oldproglen);
goto out;
}
break;
}
if (proglen == oldproglen) {
header = bpf_jit_binary_alloc(proglen, &image,
1, jit_fill_hole);
if (!header)
goto out;
}
oldproglen = proglen;
}
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, proglen, 0, image);
if (image) {
bpf_flush_icache(header, image + proglen);
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)image;
prog->jited = true;
}
out:
kfree(addrs);
}
|
CWE-17
| 179,783 | 1,468 |
286732731074854733591806138169679928315
| null | null | null |
libmspack
|
18b6a2cc0b87536015bedd4f7763e6b02d5aa4f3
| 1 |
int lzxd_decompress(struct lzxd_stream *lzx, off_t out_bytes) {
/* bitstream and huffman reading variables */
register unsigned int bit_buffer;
register int bits_left, i=0;
unsigned char *i_ptr, *i_end;
register unsigned short sym;
int match_length, length_footer, extra, verbatim_bits, bytes_todo;
int this_run, main_element, aligned_bits, j;
unsigned char *window, *runsrc, *rundest, buf[12];
unsigned int frame_size=0, end_frame, match_offset, window_posn;
unsigned int R0, R1, R2;
/* easy answers */
if (!lzx || (out_bytes < 0)) return MSPACK_ERR_ARGS;
if (lzx->error) return lzx->error;
/* flush out any stored-up bytes before we begin */
i = lzx->o_end - lzx->o_ptr;
if ((off_t) i > out_bytes) i = (int) out_bytes;
if (i) {
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
}
if (out_bytes == 0) return MSPACK_ERR_OK;
/* restore local state */
RESTORE_BITS;
window = lzx->window;
window_posn = lzx->window_posn;
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
end_frame = (unsigned int)((lzx->offset + out_bytes) / LZX_FRAME_SIZE) + 1;
while (lzx->frame < end_frame) {
/* have we reached the reset interval? (if there is one?) */
if (lzx->reset_interval && ((lzx->frame % lzx->reset_interval) == 0)) {
if (lzx->block_remaining) {
D(("%d bytes remaining at reset interval", lzx->block_remaining))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-read the intel header and reset the huffman lengths */
lzxd_reset_state(lzx);
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
}
/* LZX DELTA format has chunk_size, not present in LZX format */
if (lzx->is_delta) {
ENSURE_BITS(16);
REMOVE_BITS(16);
}
/* read header if necessary */
if (!lzx->header_read) {
/* read 1 bit. if bit=0, intel filesize = 0.
* if bit=1, read intel filesize (32 bits) */
j = 0; READ_BITS(i, 1); if (i) { READ_BITS(i, 16); READ_BITS(j, 16); }
lzx->intel_filesize = (i << 16) | j;
lzx->header_read = 1;
}
/* calculate size of frame: all frames are 32k except the final frame
* which is 32kb or less. this can only be calculated when lzx->length
* has been filled in. */
frame_size = LZX_FRAME_SIZE;
if (lzx->length && (lzx->length - lzx->offset) < (off_t)frame_size) {
frame_size = lzx->length - lzx->offset;
}
/* decode until one more frame is available */
bytes_todo = lzx->frame_posn + frame_size - window_posn;
while (bytes_todo > 0) {
/* initialise new block, if one is needed */
if (lzx->block_remaining == 0) {
/* realign if previous block was an odd-sized UNCOMPRESSED block */
if ((lzx->block_type == LZX_BLOCKTYPE_UNCOMPRESSED) &&
(lzx->block_length & 1))
{
READ_IF_NEEDED;
i_ptr++;
}
/* read block type (3 bits) and block length (24 bits) */
READ_BITS(lzx->block_type, 3);
READ_BITS(i, 16); READ_BITS(j, 8);
lzx->block_remaining = lzx->block_length = (i << 8) | j;
/*D(("new block t%d len %u", lzx->block_type, lzx->block_length))*/
/* read individual block headers */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_ALIGNED:
/* read lengths of and build aligned huffman decoding tree */
for (i = 0; i < 8; i++) { READ_BITS(j, 3); lzx->ALIGNED_len[i] = j; }
BUILD_TABLE(ALIGNED);
/* no break -- rest of aligned header is same as verbatim */
case LZX_BLOCKTYPE_VERBATIM:
/* read lengths of and build main huffman decoding tree */
READ_LENGTHS(MAINTREE, 0, 256);
READ_LENGTHS(MAINTREE, 256, LZX_NUM_CHARS + lzx->num_offsets);
BUILD_TABLE(MAINTREE);
/* if the literal 0xE8 is anywhere in the block... */
if (lzx->MAINTREE_len[0xE8] != 0) lzx->intel_started = 1;
/* read lengths of and build lengths huffman decoding tree */
READ_LENGTHS(LENGTH, 0, LZX_NUM_SECONDARY_LENGTHS);
BUILD_TABLE_MAYBE_EMPTY(LENGTH);
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* because we can't assume otherwise */
lzx->intel_started = 1;
/* read 1-16 (not 0-15) bits to align to bytes */
ENSURE_BITS(16);
if (bits_left > 16) i_ptr -= 2;
bits_left = 0; bit_buffer = 0;
/* read 12 bytes of stored R0 / R1 / R2 values */
for (rundest = &buf[0], i = 0; i < 12; i++) {
READ_IF_NEEDED;
*rundest++ = *i_ptr++;
}
R0 = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
R1 = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24);
R2 = buf[8] | (buf[9] << 8) | (buf[10] << 16) | (buf[11] << 24);
break;
default:
D(("bad block type"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
}
/* decode more of the block:
* run = min(what's available, what's needed) */
this_run = lzx->block_remaining;
if (this_run > bytes_todo) this_run = bytes_todo;
/* assume we decode exactly this_run bytes, for now */
bytes_todo -= this_run;
lzx->block_remaining -= this_run;
/* decode at least this_run bytes */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_VERBATIM:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1=R0; R0 = match_offset; break;
case 2: match_offset = R2; R2=R0; R0 = match_offset; break;
case 3: match_offset = 1; R2=R1; R1=R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
READ_BITS(verbatim_bits, extra);
match_offset = position_base[match_offset] - 2 + verbatim_bits;
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_ALIGNED:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1 = R0; R0 = match_offset; break;
case 2: match_offset = R2; R2 = R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
match_offset = position_base[match_offset] - 2;
if (extra > 3) {
/* verbatim and aligned bits */
extra -= 3;
READ_BITS(verbatim_bits, extra);
match_offset += (verbatim_bits << 3);
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra == 3) {
/* aligned bits only */
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra > 0) { /* extra==1, extra==2 */
/* verbatim bits only */
READ_BITS(verbatim_bits, extra);
match_offset += verbatim_bits;
}
else /* extra == 0 */ {
/* ??? not defined in LZX specification! */
match_offset = 1;
}
/* update repeated offset LRU queue */
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* as this_run is limited not to wrap a frame, this also means it
* won't wrap the window (as the window is a multiple of 32k) */
rundest = &window[window_posn];
window_posn += this_run;
while (this_run > 0) {
if ((i = i_end - i_ptr) == 0) {
READ_IF_NEEDED;
}
else {
if (i > this_run) i = this_run;
lzx->sys->copy(i_ptr, rundest, (size_t) i);
rundest += i;
i_ptr += i;
this_run -= i;
}
}
break;
default:
return lzx->error = MSPACK_ERR_DECRUNCH; /* might as well */
}
/* did the final match overrun our desired this_run length? */
if (this_run < 0) {
if ((unsigned int)(-this_run) > lzx->block_remaining) {
D(("overrun went past end of block by %d (%d remaining)",
-this_run, lzx->block_remaining ))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
lzx->block_remaining -= -this_run;
}
} /* while (bytes_todo > 0) */
/* streams don't extend over frame boundaries */
if ((window_posn - lzx->frame_posn) != frame_size) {
D(("decode beyond output frame limits! %d != %d",
window_posn - lzx->frame_posn, frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-align input bitstream */
if (bits_left > 0) ENSURE_BITS(16);
if (bits_left & 15) REMOVE_BITS(bits_left & 15);
/* check that we've used all of the previous frame first */
if (lzx->o_ptr != lzx->o_end) {
D(("%ld avail bytes, new %d frame",
(long)(lzx->o_end - lzx->o_ptr), frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* does this intel block _really_ need decoding? */
if (lzx->intel_started && lzx->intel_filesize &&
(lzx->frame <= 32768) && (frame_size > 10))
{
unsigned char *data = &lzx->e8_buf[0];
unsigned char *dataend = &lzx->e8_buf[frame_size - 10];
signed int curpos = lzx->intel_curpos;
signed int filesize = lzx->intel_filesize;
signed int abs_off, rel_off;
/* copy e8 block to the e8 buffer and tweak if needed */
lzx->o_ptr = data;
lzx->sys->copy(&lzx->window[lzx->frame_posn], data, frame_size);
while (data < dataend) {
if (*data++ != 0xE8) { curpos++; continue; }
abs_off = data[0] | (data[1]<<8) | (data[2]<<16) | (data[3]<<24);
if ((abs_off >= -curpos) && (abs_off < filesize)) {
rel_off = (abs_off >= 0) ? abs_off - curpos : abs_off + filesize;
data[0] = (unsigned char) rel_off;
data[1] = (unsigned char) (rel_off >> 8);
data[2] = (unsigned char) (rel_off >> 16);
data[3] = (unsigned char) (rel_off >> 24);
}
data += 4;
curpos += 5;
}
lzx->intel_curpos += frame_size;
}
else {
lzx->o_ptr = &lzx->window[lzx->frame_posn];
if (lzx->intel_filesize) lzx->intel_curpos += frame_size;
}
lzx->o_end = &lzx->o_ptr[frame_size];
/* write a frame */
i = (out_bytes < (off_t)frame_size) ? (unsigned int)out_bytes : frame_size;
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
/* advance frame start position */
lzx->frame_posn += frame_size;
lzx->frame++;
/* wrap window / frame position pointers */
if (window_posn == lzx->window_size) window_posn = 0;
if (lzx->frame_posn == lzx->window_size) lzx->frame_posn = 0;
} /* while (lzx->frame < end_frame) */
if (out_bytes) {
D(("bytes left to output"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* store local state */
STORE_BITS;
lzx->window_posn = window_posn;
lzx->R0 = R0;
lzx->R1 = R1;
lzx->R2 = R2;
return MSPACK_ERR_OK;
}
|
CWE-189
| 179,784 | 1,469 |
301703386296293359276381348380541750369
| null | null | null |
linux
|
23b133bdc452aa441fcb9b82cbf6dd05cfd342d0
| 1 |
static int udf_read_inode(struct inode *inode, bool hidden_inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
struct extendedFileEntry *efe;
uint16_t ident;
struct udf_inode_info *iinfo = UDF_I(inode);
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
struct kernel_lb_addr *iloc = &iinfo->i_location;
unsigned int link_count;
unsigned int indirections = 0;
int bs = inode->i_sb->s_blocksize;
int ret = -EIO;
reread:
if (iloc->logicalBlockNum >=
sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
udf_debug("block=%d, partition=%d out of range\n",
iloc->logicalBlockNum, iloc->partitionReferenceNum);
return -EIO;
}
/*
* Set defaults, but the inode is still incomplete!
* Note: get_new_inode() sets the following on a new inode:
* i_sb = sb
* i_no = ino
* i_flags = sb->s_flags
* i_state = 0
* clean_inode(): zero fills and sets
* i_count = 1
* i_nlink = 1
* i_op = NULL;
*/
bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
if (!bh) {
udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
return -EIO;
}
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
ident != TAG_IDENT_USE) {
udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
inode->i_ino, ident);
goto out;
}
fe = (struct fileEntry *)bh->b_data;
efe = (struct extendedFileEntry *)bh->b_data;
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
struct buffer_head *ibh;
ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
if (ident == TAG_IDENT_IE && ibh) {
struct kernel_lb_addr loc;
struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
if (ie->indirectICB.extLength) {
brelse(ibh);
memcpy(&iinfo->i_location, &loc,
sizeof(struct kernel_lb_addr));
if (++indirections > UDF_MAX_ICB_NESTING) {
udf_err(inode->i_sb,
"too many ICBs in ICB hierarchy"
" (max %d supported)\n",
UDF_MAX_ICB_NESTING);
goto out;
}
brelse(bh);
goto reread;
}
}
brelse(ibh);
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
udf_err(inode->i_sb, "unsupported strategy type: %d\n",
le16_to_cpu(fe->icbTag.strategyType));
goto out;
}
if (fe->icbTag.strategyType == cpu_to_le16(4))
iinfo->i_strat4096 = 0;
else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
iinfo->i_strat4096 = 1;
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
ICBTAG_FLAG_AD_MASK;
iinfo->i_unique = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenExtents = 0;
iinfo->i_lenAlloc = 0;
iinfo->i_next_alloc_block = 0;
iinfo->i_next_alloc_goal = 0;
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
iinfo->i_efe = 1;
iinfo->i_use = 0;
ret = udf_alloc_i_data(inode, bs -
sizeof(struct extendedFileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct extendedFileEntry),
bs - sizeof(struct extendedFileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
iinfo->i_efe = 0;
iinfo->i_use = 0;
ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct fileEntry),
bs - sizeof(struct fileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
iinfo->i_efe = 0;
iinfo->i_use = 1;
iinfo->i_lenAlloc = le32_to_cpu(
((struct unallocSpaceEntry *)bh->b_data)->
lengthAllocDescs);
ret = udf_alloc_i_data(inode, bs -
sizeof(struct unallocSpaceEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct unallocSpaceEntry),
bs - sizeof(struct unallocSpaceEntry));
return 0;
}
ret = -EIO;
read_lock(&sbi->s_cred_lock);
i_uid_write(inode, le32_to_cpu(fe->uid));
if (!uid_valid(inode->i_uid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
i_gid_write(inode, le32_to_cpu(fe->gid));
if (!gid_valid(inode->i_gid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_fmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_fmode;
else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_dmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_dmode;
else
inode->i_mode = udf_convert_permissions(fe);
inode->i_mode &= ~sbi->s_umask;
read_unlock(&sbi->s_cred_lock);
link_count = le16_to_cpu(fe->fileLinkCount);
if (!link_count) {
if (!hidden_inode) {
ret = -ESTALE;
goto out;
}
link_count = 1;
}
set_nlink(inode, link_count);
inode->i_size = le64_to_cpu(fe->informationLength);
iinfo->i_lenExtents = inode->i_size;
if (iinfo->i_efe == 0) {
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
fe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
} else {
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
efe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
iinfo->i_crtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
}
inode->i_generation = iinfo->i_unique;
/* Sanity checks for files in ICB so that we don't get confused later */
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
/*
* For file in ICB data is stored in allocation descriptor
* so sizes should match
*/
if (iinfo->i_lenAlloc != inode->i_size)
goto out;
/* File in ICB has to fit in there... */
if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
goto out;
}
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
inode->i_op = &udf_dir_inode_operations;
inode->i_fop = &udf_dir_operations;
inode->i_mode |= S_IFDIR;
inc_nlink(inode);
break;
case ICBTAG_FILE_TYPE_REALTIME:
case ICBTAG_FILE_TYPE_REGULAR:
case ICBTAG_FILE_TYPE_UNDEF:
case ICBTAG_FILE_TYPE_VAT20:
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
inode->i_mode |= S_IFREG;
break;
case ICBTAG_FILE_TYPE_BLOCK:
inode->i_mode |= S_IFBLK;
break;
case ICBTAG_FILE_TYPE_CHAR:
inode->i_mode |= S_IFCHR;
break;
case ICBTAG_FILE_TYPE_FIFO:
init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
break;
case ICBTAG_FILE_TYPE_SOCKET:
init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
break;
case ICBTAG_FILE_TYPE_SYMLINK:
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &udf_symlink_inode_operations;
inode->i_mode = S_IFLNK | S_IRWXUGO;
break;
case ICBTAG_FILE_TYPE_MAIN:
udf_debug("METADATA FILE-----\n");
break;
case ICBTAG_FILE_TYPE_MIRROR:
udf_debug("METADATA MIRROR FILE-----\n");
break;
case ICBTAG_FILE_TYPE_BITMAP:
udf_debug("METADATA BITMAP FILE-----\n");
break;
default:
udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
inode->i_ino, fe->icbTag.fileType);
goto out;
}
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
struct deviceSpec *dsea =
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
if (dsea) {
init_special_inode(inode, inode->i_mode,
MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
le32_to_cpu(dsea->minorDeviceIdent)));
/* Developer ID ??? */
} else
goto out;
}
ret = 0;
out:
brelse(bh);
return ret;
}
|
CWE-189
| 179,786 | 1,470 |
154377499296836303513685824320662860107
| null | null | null |
linux
|
04bf464a5dfd9ade0dda918e44366c2c61fce80b
| 1 |
static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
struct oz_usb_hdr *usb_hdr, int len)
{
struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
switch (data_hdr->format) {
case OZ_DATA_F_MULTIPLE_FIXED: {
struct oz_multiple_fixed *body =
(struct oz_multiple_fixed *)data_hdr;
u8 *data = body->data;
int n = (len - sizeof(struct oz_multiple_fixed)+1)
/ body->unit_size;
while (n--) {
oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
data, body->unit_size);
data += body->unit_size;
}
}
break;
case OZ_DATA_F_ISOC_FIXED: {
struct oz_isoc_fixed *body =
(struct oz_isoc_fixed *)data_hdr;
int data_len = len-sizeof(struct oz_isoc_fixed)+1;
int unit_size = body->unit_size;
u8 *data = body->data;
int count;
int i;
if (!unit_size)
break;
count = data_len/unit_size;
for (i = 0; i < count; i++) {
oz_hcd_data_ind(usb_ctx->hport,
body->endpoint, data, unit_size);
data += unit_size;
}
}
break;
}
}
|
CWE-189
| 179,789 | 1,472 |
108503765742072033187979048520454976260
| null | null | null |
linux
|
d114b9fe78c8d6fc6e70808c2092aa307c36dc8e
| 1 |
void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
{
struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1);
struct oz_usb_ctx *usb_ctx;
spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB];
if (usb_ctx)
oz_usb_get(usb_ctx);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
if (usb_ctx == NULL)
return; /* Context has gone so nothing to do. */
if (usb_ctx->stopped)
goto done;
/* If sequence number is non-zero then check it is not a duplicate.
* Zero sequence numbers are always accepted.
*/
if (usb_hdr->elt_seq_num != 0) {
if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0)
/* Reject duplicate element. */
goto done;
}
usb_ctx->rx_seq_num = usb_hdr->elt_seq_num;
switch (usb_hdr->type) {
case OZ_GET_DESC_RSP: {
struct oz_get_desc_rsp *body =
(struct oz_get_desc_rsp *)usb_hdr;
int data_len = elt->length -
sizeof(struct oz_get_desc_rsp) + 1;
u16 offs = le16_to_cpu(get_unaligned(&body->offset));
u16 total_size =
le16_to_cpu(get_unaligned(&body->total_size));
oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
body->rcode, body->data,
data_len, offs, total_size);
}
break;
case OZ_SET_CONFIG_RSP: {
struct oz_set_config_rsp *body =
(struct oz_set_config_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
body->rcode, NULL, 0);
}
break;
case OZ_SET_INTERFACE_RSP: {
struct oz_set_interface_rsp *body =
(struct oz_set_interface_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport,
body->req_id, body->rcode, NULL, 0);
}
break;
case OZ_VENDOR_CLASS_RSP: {
struct oz_vendor_class_rsp *body =
(struct oz_vendor_class_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
body->rcode, body->data, elt->length-
sizeof(struct oz_vendor_class_rsp)+1);
}
break;
case OZ_USB_ENDPOINT_DATA:
oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length);
break;
}
done:
oz_usb_put(usb_ctx);
}
|
CWE-119
| 179,790 | 1,473 |
205444369337433040190709519014315012602
| null | null | null |
linux
|
b1bb5b49373b61bf9d2c73a4d30058ba6f069e4c
| 1 |
void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
int length, int offset, int total_size)
{
struct oz_port *port = hport;
struct urb *urb;
int err = 0;
oz_dbg(ON, "oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
length, offset, total_size);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb)
return;
if (status == 0) {
int copy_len;
int required_size = urb->transfer_buffer_length;
if (required_size > total_size)
required_size = total_size;
copy_len = required_size-offset;
if (length <= copy_len)
copy_len = length;
memcpy(urb->transfer_buffer+offset, desc, copy_len);
offset += copy_len;
if (offset < required_size) {
struct usb_ctrlrequest *setup =
(struct usb_ctrlrequest *)urb->setup_packet;
unsigned wvalue = le16_to_cpu(setup->wValue);
if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
err = -ENOMEM;
else if (oz_usb_get_desc_req(port->hpd, req_id,
setup->bRequestType, (u8)(wvalue>>8),
(u8)wvalue, setup->wIndex, offset,
required_size-offset)) {
oz_dequeue_ep_urb(port, 0, 0, urb);
err = -ENOMEM;
}
if (err == 0)
return;
}
}
urb->actual_length = total_size;
oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
|
CWE-189
| 179,791 | 1,474 |
320708195163871116363817901150537816688
| null | null | null |
rawstudio
|
983bda1f0fa5fa86884381208274198a620f006e
| 1 |
int CLASS ljpeg_start (struct jhead *jh, int info_only)
{
int c, tag, len;
uchar data[0x10000];
const uchar *dp;
memset (jh, 0, sizeof *jh);
jh->restart = INT_MAX;
fread (data, 2, 1, ifp);
if (data[1] != 0xd8) return 0;
do {
fread (data, 2, 2, ifp);
tag = data[0] << 8 | data[1];
len = (data[2] << 8 | data[3]) - 2;
if (tag <= 0xff00) return 0;
fread (data, 1, len, ifp);
switch (tag) {
case 0xffc3:
jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3;
case 0xffc0:
jh->bits = data[0];
jh->high = data[1] << 8 | data[2];
jh->wide = data[3] << 8 | data[4];
jh->clrs = data[5] + jh->sraw;
if (len == 9 && !dng_version) getc(ifp);
break;
case 0xffc4:
if (info_only) break;
for (dp = data; dp < data+len && (c = *dp++) < 4; )
jh->free[c] = jh->huff[c] = make_decoder_ref (&dp);
break;
case 0xffda:
jh->psv = data[1+data[0]*2];
jh->bits -= data[3+data[0]*2] & 15;
break;
case 0xffdd:
jh->restart = data[0] << 8 | data[1];
}
} while (tag != 0xffda);
if (info_only) return 1;
FORC(5) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c];
if (jh->sraw) {
FORC(4) jh->huff[2+c] = jh->huff[1];
FORC(jh->sraw) jh->huff[1+c] = jh->huff[0];
}
jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4);
merror (jh->row, "ljpeg_start()");
return zero_after_ff = 1;
}
|
CWE-189
| 179,794 | 1,477 |
291937476956930911673344322008402224120
| null | null | null |
linux
|
a134f083e79fb4c3d0a925691e732c56911b4326
| 1 |
void ping_unhash(struct sock *sk)
{
struct inet_sock *isk = inet_sk(sk);
pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
if (sk_hashed(sk)) {
write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
write_unlock_bh(&ping_table.lock);
}
}
| 179,795 | 1,478 |
310891391724566178660846077299025608896
| null | null | null |
|
FFmpeg
|
e8714f6f93d1a32f4e4655209960afcf4c185214
| 1 |
void ff_h264_free_tables(H264Context *h, int free_rbsp)
{
int i;
H264Context *hx;
av_freep(&h->intra4x4_pred_mode);
av_freep(&h->chroma_pred_mode_table);
av_freep(&h->cbp_table);
av_freep(&h->mvd_table[0]);
av_freep(&h->mvd_table[1]);
av_freep(&h->direct_table);
av_freep(&h->non_zero_count);
av_freep(&h->slice_table_base);
h->slice_table = NULL;
av_freep(&h->list_counts);
av_freep(&h->mb2b_xy);
av_freep(&h->mb2br_xy);
av_buffer_pool_uninit(&h->qscale_table_pool);
av_buffer_pool_uninit(&h->mb_type_pool);
av_buffer_pool_uninit(&h->motion_val_pool);
av_buffer_pool_uninit(&h->ref_index_pool);
if (free_rbsp && h->DPB) {
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
ff_h264_unref_picture(h, &h->DPB[i]);
av_freep(&h->DPB);
} else if (h->DPB) {
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
h->DPB[i].needs_realloc = 1;
}
h->cur_pic_ptr = NULL;
for (i = 0; i < H264_MAX_THREADS; i++) {
hx = h->thread_context[i];
if (!hx)
continue;
av_freep(&hx->top_borders[1]);
av_freep(&hx->top_borders[0]);
av_freep(&hx->bipred_scratchpad);
av_freep(&hx->edge_emu_buffer);
av_freep(&hx->dc_val_base);
av_freep(&hx->er.mb_index2xy);
av_freep(&hx->er.error_status_table);
av_freep(&hx->er.er_temp_buffer);
av_freep(&hx->er.mbintra_table);
av_freep(&hx->er.mbskip_table);
if (free_rbsp) {
av_freep(&hx->rbsp_buffer[1]);
av_freep(&hx->rbsp_buffer[0]);
hx->rbsp_buffer_size[0] = 0;
hx->rbsp_buffer_size[1] = 0;
}
if (i)
av_freep(&h->thread_context[i]);
}
}
| 179,796 | 1,479 |
160444303816193315214962956231598166990
| null | null | null |
|
linux
|
8b01fc86b9f425899f8a3a8fc1c47d73c2c20543
| 1 |
int prepare_binprm(struct linux_binprm *bprm)
{
struct inode *inode = file_inode(bprm->file);
umode_t mode = inode->i_mode;
int retval;
/* clear any previous set[ug]id data from a previous binary */
bprm->cred->euid = current_euid();
bprm->cred->egid = current_egid();
if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
!task_no_new_privs(current) &&
kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
/* Set-uid? */
if (mode & S_ISUID) {
bprm->per_clear |= PER_CLEAR_ON_SETID;
bprm->cred->euid = inode->i_uid;
}
/* Set-gid? */
/*
* If setgid is set but no group execute bit then this
* is a candidate for mandatory locking, not a setgid
* executable.
*/
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
bprm->per_clear |= PER_CLEAR_ON_SETID;
bprm->cred->egid = inode->i_gid;
}
}
/* fill in binprm security blob */
retval = security_bprm_set_creds(bprm);
if (retval)
return retval;
bprm->cred_prepared = 1;
memset(bprm->buf, 0, BINPRM_BUF_SIZE);
return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
}
|
CWE-362
| 179,797 | 1,480 |
335498349453083006685277225065572088694
| null | null | null |
linux
|
ccfe8c3f7e52ae83155cb038753f4c75b774ca8a
| 1 |
static int __driver_rfc4106_decrypt(struct aead_request *req)
{
u8 one_entry_in_sg = 0;
u8 *src, *dst, *assoc;
unsigned long tempCipherLen = 0;
__be32 counter = cpu_to_be32(1);
int retval = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
u32 key_len = ctx->aes_key_expanded.key_length;
void *aes_ctx = &(ctx->aes_key_expanded);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 iv_and_authTag[32+AESNI_ALIGN];
u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
u8 *authTag = iv + 16;
struct scatter_walk src_sg_walk;
struct scatter_walk assoc_sg_walk;
struct scatter_walk dst_sg_walk;
unsigned int i;
if (unlikely((req->cryptlen < auth_tag_len) ||
(req->assoclen != 8 && req->assoclen != 12)))
return -EINVAL;
if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
return -EINVAL;
if (unlikely(key_len != AES_KEYSIZE_128 &&
key_len != AES_KEYSIZE_192 &&
key_len != AES_KEYSIZE_256))
return -EINVAL;
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length */
/* equal to 8 or 12 bytes */
tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
/* IV below built */
for (i = 0; i < 4; i++)
*(iv+i) = ctx->nonce[i];
for (i = 0; i < 8; i++)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);
scatterwalk_start(&assoc_sg_walk, req->assoc);
src = scatterwalk_map(&src_sg_walk);
assoc = scatterwalk_map(&assoc_sg_walk);
dst = src;
if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst);
dst = scatterwalk_map(&dst_sg_walk);
}
} else {
/* Allocate memory for src, dst, assoc */
src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
if (!src)
return -ENOMEM;
assoc = (src + req->cryptlen + auth_tag_len);
scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
scatterwalk_map_and_copy(assoc, req->assoc, 0,
req->assoclen, 0);
dst = src;
}
aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
authTag, auth_tag_len);
/* Compare generated tag with passed in tag. */
retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
-EBADMSG : 0;
if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) {
scatterwalk_unmap(dst);
scatterwalk_done(&dst_sg_walk, 0, 0);
}
scatterwalk_unmap(src);
scatterwalk_unmap(assoc);
scatterwalk_done(&src_sg_walk, 0, 0);
scatterwalk_done(&assoc_sg_walk, 0, 0);
} else {
scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
kfree(src);
}
return retval;
}
|
CWE-119
| 179,798 | 1,481 |
235636219205578534132022485991165745770
| null | null | null |
linux
|
6fd99094de2b83d1d4c8457f2c83483b2828e75a
| 1 |
static void ndisc_router_discovery(struct sk_buff *skb)
{
struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb);
struct neighbour *neigh = NULL;
struct inet6_dev *in6_dev;
struct rt6_info *rt = NULL;
int lifetime;
struct ndisc_options ndopts;
int optlen;
unsigned int pref = 0;
__u8 *opt = (__u8 *)(ra_msg + 1);
optlen = (skb_tail_pointer(skb) - skb_transport_header(skb)) -
sizeof(struct ra_msg);
ND_PRINTK(2, info,
"RA: %s, dev: %s\n",
__func__, skb->dev->name);
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
ND_PRINTK(2, warn, "RA: source address is not link-local\n");
return;
}
if (optlen < 0) {
ND_PRINTK(2, warn, "RA: packet too short\n");
return;
}
#ifdef CONFIG_IPV6_NDISC_NODETYPE
if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) {
ND_PRINTK(2, warn, "RA: from host or unauthorized router\n");
return;
}
#endif
/*
* set the RA_RECV flag in the interface
*/
in6_dev = __in6_dev_get(skb->dev);
if (in6_dev == NULL) {
ND_PRINTK(0, err, "RA: can't find inet6 device for %s\n",
skb->dev->name);
return;
}
if (!ndisc_parse_options(opt, optlen, &ndopts)) {
ND_PRINTK(2, warn, "RA: invalid ND options\n");
return;
}
if (!ipv6_accept_ra(in6_dev)) {
ND_PRINTK(2, info,
"RA: %s, did not accept ra for dev: %s\n",
__func__, skb->dev->name);
goto skip_linkparms;
}
#ifdef CONFIG_IPV6_NDISC_NODETYPE
/* skip link-specific parameters from interior routers */
if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) {
ND_PRINTK(2, info,
"RA: %s, nodetype is NODEFAULT, dev: %s\n",
__func__, skb->dev->name);
goto skip_linkparms;
}
#endif
if (in6_dev->if_flags & IF_RS_SENT) {
/*
* flag that an RA was received after an RS was sent
* out on this interface.
*/
in6_dev->if_flags |= IF_RA_RCVD;
}
/*
* Remember the managed/otherconf flags from most recently
* received RA message (RFC 2462) -- yoshfuji
*/
in6_dev->if_flags = (in6_dev->if_flags & ~(IF_RA_MANAGED |
IF_RA_OTHERCONF)) |
(ra_msg->icmph.icmp6_addrconf_managed ?
IF_RA_MANAGED : 0) |
(ra_msg->icmph.icmp6_addrconf_other ?
IF_RA_OTHERCONF : 0);
if (!in6_dev->cnf.accept_ra_defrtr) {
ND_PRINTK(2, info,
"RA: %s, defrtr is false for dev: %s\n",
__func__, skb->dev->name);
goto skip_defrtr;
}
/* Do not accept RA with source-addr found on local machine unless
* accept_ra_from_local is set to true.
*/
if (!in6_dev->cnf.accept_ra_from_local &&
ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
NULL, 0)) {
ND_PRINTK(2, info,
"RA from local address detected on dev: %s: default router ignored\n",
skb->dev->name);
goto skip_defrtr;
}
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
#ifdef CONFIG_IPV6_ROUTER_PREF
pref = ra_msg->icmph.icmp6_router_pref;
/* 10b is handled as if it were 00b (medium) */
if (pref == ICMPV6_ROUTER_PREF_INVALID ||
!in6_dev->cnf.accept_ra_rtr_pref)
pref = ICMPV6_ROUTER_PREF_MEDIUM;
#endif
rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
if (rt) {
neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
if (!neigh) {
ND_PRINTK(0, err,
"RA: %s got default router without neighbour\n",
__func__);
ip6_rt_put(rt);
return;
}
}
if (rt && lifetime == 0) {
ip6_del_rt(rt);
rt = NULL;
}
ND_PRINTK(3, info, "RA: rt: %p lifetime: %d, for dev: %s\n",
rt, lifetime, skb->dev->name);
if (rt == NULL && lifetime) {
ND_PRINTK(3, info, "RA: adding default router\n");
rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref);
if (rt == NULL) {
ND_PRINTK(0, err,
"RA: %s failed to add default route\n",
__func__);
return;
}
neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
if (neigh == NULL) {
ND_PRINTK(0, err,
"RA: %s got default router without neighbour\n",
__func__);
ip6_rt_put(rt);
return;
}
neigh->flags |= NTF_ROUTER;
} else if (rt) {
rt->rt6i_flags = (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
}
if (rt)
rt6_set_expires(rt, jiffies + (HZ * lifetime));
if (ra_msg->icmph.icmp6_hop_limit) {
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
if (rt)
dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
ra_msg->icmph.icmp6_hop_limit);
}
skip_defrtr:
/*
* Update Reachable Time and Retrans Timer
*/
if (in6_dev->nd_parms) {
unsigned long rtime = ntohl(ra_msg->retrans_timer);
if (rtime && rtime/1000 < MAX_SCHEDULE_TIMEOUT/HZ) {
rtime = (rtime*HZ)/1000;
if (rtime < HZ/10)
rtime = HZ/10;
NEIGH_VAR_SET(in6_dev->nd_parms, RETRANS_TIME, rtime);
in6_dev->tstamp = jiffies;
inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
}
rtime = ntohl(ra_msg->reachable_time);
if (rtime && rtime/1000 < MAX_SCHEDULE_TIMEOUT/(3*HZ)) {
rtime = (rtime*HZ)/1000;
if (rtime < HZ/10)
rtime = HZ/10;
if (rtime != NEIGH_VAR(in6_dev->nd_parms, BASE_REACHABLE_TIME)) {
NEIGH_VAR_SET(in6_dev->nd_parms,
BASE_REACHABLE_TIME, rtime);
NEIGH_VAR_SET(in6_dev->nd_parms,
GC_STALETIME, 3 * rtime);
in6_dev->nd_parms->reachable_time = neigh_rand_reach_time(rtime);
in6_dev->tstamp = jiffies;
inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
}
}
}
skip_linkparms:
/*
* Process options.
*/
if (!neigh)
neigh = __neigh_lookup(&nd_tbl, &ipv6_hdr(skb)->saddr,
skb->dev, 1);
if (neigh) {
u8 *lladdr = NULL;
if (ndopts.nd_opts_src_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr,
skb->dev);
if (!lladdr) {
ND_PRINTK(2, warn,
"RA: invalid link-layer address length\n");
goto out;
}
}
neigh_update(neigh, lladdr, NUD_STALE,
NEIGH_UPDATE_F_WEAK_OVERRIDE|
NEIGH_UPDATE_F_OVERRIDE|
NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
NEIGH_UPDATE_F_ISROUTER);
}
if (!ipv6_accept_ra(in6_dev)) {
ND_PRINTK(2, info,
"RA: %s, accept_ra is false for dev: %s\n",
__func__, skb->dev->name);
goto out;
}
#ifdef CONFIG_IPV6_ROUTE_INFO
if (!in6_dev->cnf.accept_ra_from_local &&
ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
NULL, 0)) {
ND_PRINTK(2, info,
"RA from local address detected on dev: %s: router info ignored.\n",
skb->dev->name);
goto skip_routeinfo;
}
if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_ri;
p;
p = ndisc_next_option(p, ndopts.nd_opts_ri_end)) {
struct route_info *ri = (struct route_info *)p;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT &&
ri->prefix_len == 0)
continue;
#endif
if (ri->prefix_len == 0 &&
!in6_dev->cnf.accept_ra_defrtr)
continue;
if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
continue;
rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
&ipv6_hdr(skb)->saddr);
}
}
skip_routeinfo:
#endif
#ifdef CONFIG_IPV6_NDISC_NODETYPE
/* skip link-specific ndopts from interior routers */
if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) {
ND_PRINTK(2, info,
"RA: %s, nodetype is NODEFAULT (interior routes), dev: %s\n",
__func__, skb->dev->name);
goto out;
}
#endif
if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_pi;
p;
p = ndisc_next_option(p, ndopts.nd_opts_pi_end)) {
addrconf_prefix_rcv(skb->dev, (u8 *)p,
(p->nd_opt_len) << 3,
ndopts.nd_opts_src_lladdr != NULL);
}
}
if (ndopts.nd_opts_mtu && in6_dev->cnf.accept_ra_mtu) {
__be32 n;
u32 mtu;
memcpy(&n, ((u8 *)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu));
mtu = ntohl(n);
if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu);
} else if (in6_dev->cnf.mtu6 != mtu) {
in6_dev->cnf.mtu6 = mtu;
if (rt)
dst_metric_set(&rt->dst, RTAX_MTU, mtu);
rt6_mtu_change(skb->dev, mtu);
}
}
if (ndopts.nd_useropts) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_useropts;
p;
p = ndisc_next_useropt(p, ndopts.nd_useropts_end)) {
ndisc_ra_useropt(skb, p);
}
}
if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) {
ND_PRINTK(2, warn, "RA: invalid RA options\n");
}
out:
ip6_rt_put(rt);
if (neigh)
neigh_release(neigh);
}
|
CWE-17
| 179,810 | 1,488 |
143930665896709238485982707360067832484
| null | null | null |
das_watchdog
|
bd20bb02e75e2c0483832b52f2577253febfb690
| 1 |
static char *get_pid_environ_val(pid_t pid,char *val){
char temp[500];
int i=0;
int foundit=0;
FILE *fp;
sprintf(temp,"/proc/%d/environ",pid);
fp=fopen(temp,"r");
if(fp==NULL)
return NULL;
for(;;){
temp[i]=fgetc(fp);
if(foundit==1 && (temp[i]==0 || temp[i]=='\0' || temp[i]==EOF)){
char *ret;
temp[i]=0;
ret=malloc(strlen(temp)+10);
sprintf(ret,"%s",temp);
fclose(fp);
return ret;
}
switch(temp[i]){
case EOF:
fclose(fp);
return NULL;
case '=':
temp[i]=0;
if(!strcmp(temp,val)){
foundit=1;
}
i=0;
break;
case '\0':
i=0;
break;
default:
i++;
}
}
}
|
CWE-119
| 179,811 | 1,489 |
127977327279496993347708756622464012906
| null | null | null |
krb5
|
3db8dfec1ef50ddd78d6ba9503185995876a39fd
| 1 |
iakerb_gss_export_sec_context(OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t interprocess_token)
{
OM_uint32 maj;
iakerb_ctx_id_t ctx = (iakerb_ctx_id_t)context_handle;
/* We don't currently support exporting partially established contexts. */
if (!ctx->established)
return GSS_S_UNAVAILABLE;
maj = krb5_gss_export_sec_context(minor_status, &ctx->gssc,
interprocess_token);
if (ctx->gssc == GSS_C_NO_CONTEXT) {
iakerb_release_context(ctx);
*context_handle = GSS_C_NO_CONTEXT;
}
return maj;
}
|
CWE-119
| 179,812 | 1,490 |
327336867353175950733306382456835293672
| null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.