project
stringclasses 633
values | commit_id
stringlengths 7
81
| target
int64 0
1
| func
stringlengths 5
484k
| cwe
stringclasses 131
values | big_vul_idx
float64 0
189k
⌀ | idx
int64 0
522k
| hash
stringlengths 34
39
| size
float64 1
24k
⌀ | message
stringlengths 0
11.5k
⌀ | dataset
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
openssl
|
c175308407858afff3fc8c2e5e085d94d12edc7d
| 1 |
int BN_hex2bn(BIGNUM **bn, const char *a)
{
BIGNUM *ret = NULL;
BN_ULONG l = 0;
int neg = 0, h, m, i, j, k, c;
int num;
if ((a == NULL) || (*a == '\0'))
return (0);
if (*a == '-') {
neg = 1;
a++;
a++;
}
for (i = 0; isxdigit((unsigned char)a[i]); i++) ;
num = i + neg;
if (bn == NULL)
return (0);
} else {
ret = *bn;
BN_zero(ret);
}
| 178,422 | 413 |
107282606395062247454119797341064318614
| null | null | null |
|
openssl
|
6c88c71b4e4825c7bc0489306d062d017634eb88
| 1 |
static int dsa_priv_decode(EVP_PKEY *pkey, PKCS8_PRIV_KEY_INFO *p8)
{
const unsigned char *p, *pm;
int pklen, pmlen;
int ptype;
void *pval;
ASN1_STRING *pstr;
X509_ALGOR *palg;
ASN1_INTEGER *privkey = NULL;
BN_CTX *ctx = NULL;
STACK_OF(ASN1_TYPE) *ndsa = NULL;
DSA *dsa = NULL;
if (!PKCS8_pkey_get0(NULL, &p, &pklen, &palg, p8))
return 0;
X509_ALGOR_get0(NULL, &ptype, &pval, palg);
if (*p == (V_ASN1_SEQUENCE | V_ASN1_CONSTRUCTED)) {
ASN1_TYPE *t1, *t2;
if (!(ndsa = d2i_ASN1_SEQUENCE_ANY(NULL, &p, pklen)))
goto decerr;
if (sk_ASN1_TYPE_num(ndsa) != 2)
goto decerr;
/*-
* Handle Two broken types:
* SEQUENCE {parameters, priv_key}
* SEQUENCE {pub_key, priv_key}
*/
t1 = sk_ASN1_TYPE_value(ndsa, 0);
t2 = sk_ASN1_TYPE_value(ndsa, 1);
if (t1->type == V_ASN1_SEQUENCE) {
p8->broken = PKCS8_EMBEDDED_PARAM;
pval = t1->value.ptr;
} else if (ptype == V_ASN1_SEQUENCE)
p8->broken = PKCS8_NS_DB;
else
goto decerr;
if (t2->type != V_ASN1_INTEGER)
goto decerr;
privkey = t2->value.integer;
} else {
const unsigned char *q = p;
if (!(privkey = d2i_ASN1_INTEGER(NULL, &p, pklen)))
goto decerr;
if (privkey->type == V_ASN1_NEG_INTEGER) {
p8->broken = PKCS8_NEG_PRIVKEY;
ASN1_STRING_clear_free(privkey);
if (!(privkey = d2i_ASN1_UINTEGER(NULL, &q, pklen)))
goto decerr;
}
if (ptype != V_ASN1_SEQUENCE)
goto decerr;
}
pstr = pval;
pm = pstr->data;
pmlen = pstr->length;
if (!(dsa = d2i_DSAparams(NULL, &pm, pmlen)))
goto decerr;
/* We have parameters now set private key */
if (!(dsa->priv_key = ASN1_INTEGER_to_BN(privkey, NULL))) {
DSAerr(DSA_F_DSA_PRIV_DECODE, DSA_R_BN_ERROR);
goto dsaerr;
}
/* Calculate public key */
if (!(dsa->pub_key = BN_new())) {
DSAerr(DSA_F_DSA_PRIV_DECODE, ERR_R_MALLOC_FAILURE);
goto dsaerr;
}
if (!(ctx = BN_CTX_new())) {
DSAerr(DSA_F_DSA_PRIV_DECODE, ERR_R_MALLOC_FAILURE);
goto dsaerr;
}
if (!BN_mod_exp(dsa->pub_key, dsa->g, dsa->priv_key, dsa->p, ctx)) {
DSAerr(DSA_F_DSA_PRIV_DECODE, DSA_R_BN_ERROR);
goto dsaerr;
}
}
| 178,425 | 414 |
188497284893162750346123661088669524089
| null | null | null |
|
openssl
|
708dc2f1291e104fe4eef810bb8ffc1fae5b19c1
| 1 |
static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top,
unsigned char *buf, int idx,
int width)
{
size_t i, j;
if (top > b->top)
top = b->top; /* this works because 'buf' is explicitly
* zeroed */
for (i = 0, j = idx; i < top * sizeof b->d[0]; i++, j += width) {
buf[j] = ((unsigned char *)b->d)[i];
}
return 1;
unsigned char *buf, int idx,
static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top,
unsigned char *buf, int idx,
int width)
{
size_t i, j;
if (bn_wexpand(b, top) == NULL)
return 0;
for (i = 0, j = idx; i < top * sizeof b->d[0]; i++, j += width) {
((unsigned char *)b->d)[i] = buf[j];
}
b->top = top;
if (!BN_is_odd(m)) {
BNerr(BN_F_BN_MOD_EXP_MONT_CONSTTIME, BN_R_CALLED_WITH_EVEN_MODULUS);
return (0);
}
top = m->top;
bits = BN_num_bits(p);
if (bits == 0) {
/* x**0 mod 1 is still zero. */
if (BN_is_one(m)) {
ret = 1;
BN_zero(rr);
} else {
ret = BN_one(rr);
}
return ret;
}
BN_CTX_start(ctx);
/*
* Allocate a montgomery context if it was not supplied by the caller. If
* this is not done, things will break in the montgomery part.
*/
if (in_mont != NULL)
mont = in_mont;
else {
if ((mont = BN_MONT_CTX_new()) == NULL)
goto err;
if (!BN_MONT_CTX_set(mont, m, ctx))
goto err;
}
#ifdef RSAZ_ENABLED
/*
* If the size of the operands allow it, perform the optimized
* RSAZ exponentiation. For further information see
* crypto/bn/rsaz_exp.c and accompanying assembly modules.
*/
if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024)
&& rsaz_avx2_eligible()) {
if (NULL == bn_wexpand(rr, 16))
goto err;
RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d,
mont->n0[0]);
rr->top = 16;
rr->neg = 0;
bn_correct_top(rr);
ret = 1;
goto err;
} else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512)) {
if (NULL == bn_wexpand(rr, 8))
goto err;
RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d);
rr->top = 8;
rr->neg = 0;
bn_correct_top(rr);
ret = 1;
goto err;
}
#endif
/* Get the window size to use with size of p. */
window = BN_window_bits_for_ctime_exponent_size(bits);
#if defined(SPARC_T4_MONT)
if (window >= 5 && (top & 15) == 0 && top <= 64 &&
(OPENSSL_sparcv9cap_P[1] & (CFR_MONTMUL | CFR_MONTSQR)) ==
(CFR_MONTMUL | CFR_MONTSQR) && (t4 = OPENSSL_sparcv9cap_P[0]))
window = 5;
else
#endif
#if defined(OPENSSL_BN_ASM_MONT5)
if (window >= 5) {
window = 5; /* ~5% improvement for RSA2048 sign, and even
* for RSA4096 */
if ((top & 7) == 0)
powerbufLen += 2 * top * sizeof(m->d[0]);
}
#endif
(void)0;
/*
* Allocate a buffer large enough to hold all of the pre-computed powers
* of am, am itself and tmp.
*/
numPowers = 1 << window;
powerbufLen += sizeof(m->d[0]) * (top * numPowers +
((2 * top) >
numPowers ? (2 * top) : numPowers));
#ifdef alloca
if (powerbufLen < 3072)
powerbufFree =
alloca(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH);
else
#endif
if ((powerbufFree =
(unsigned char *)OPENSSL_malloc(powerbufLen +
MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH))
== NULL)
goto err;
powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree);
memset(powerbuf, 0, powerbufLen);
#ifdef alloca
if (powerbufLen < 3072)
powerbufFree = NULL;
#endif
/* lay down tmp and am right after powers table */
tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers);
am.d = tmp.d + top;
tmp.top = am.top = 0;
tmp.dmax = am.dmax = top;
tmp.neg = am.neg = 0;
tmp.flags = am.flags = BN_FLG_STATIC_DATA;
/* prepare a^0 in Montgomery domain */
#if 1 /* by Shay Gueron's suggestion */
if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
/* 2^(top*BN_BITS2) - m */
tmp.d[0] = (0 - m->d[0]) & BN_MASK2;
for (i = 1; i < top; i++)
tmp.d[i] = (~m->d[i]) & BN_MASK2;
tmp.top = top;
} else
#endif
if (!BN_to_montgomery(&tmp, BN_value_one(), mont, ctx))
goto err;
/* prepare a^1 in Montgomery domain */
if (a->neg || BN_ucmp(a, m) >= 0) {
if (!BN_mod(&am, a, m, ctx))
goto err;
if (!BN_to_montgomery(&am, &am, mont, ctx))
goto err;
} else if (!BN_to_montgomery(&am, a, mont, ctx))
goto err;
#if defined(SPARC_T4_MONT)
if (t4) {
typedef int (*bn_pwr5_mont_f) (BN_ULONG *tp, const BN_ULONG *np,
const BN_ULONG *n0, const void *table,
int power, int bits);
int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np,
const BN_ULONG *n0, const void *table,
int power, int bits);
int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np,
const BN_ULONG *n0, const void *table,
int power, int bits);
int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np,
const BN_ULONG *n0, const void *table,
int power, int bits);
int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np,
const BN_ULONG *n0, const void *table,
int power, int bits);
static const bn_pwr5_mont_f pwr5_funcs[4] = {
bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16,
bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32
};
bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top / 16 - 1];
typedef int (*bn_mul_mont_f) (BN_ULONG *rp, const BN_ULONG *ap,
const void *bp, const BN_ULONG *np,
const BN_ULONG *n0);
int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp,
const BN_ULONG *np, const BN_ULONG *n0);
int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap,
const void *bp, const BN_ULONG *np,
const BN_ULONG *n0);
int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap,
const void *bp, const BN_ULONG *np,
const BN_ULONG *n0);
int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap,
const void *bp, const BN_ULONG *np,
const BN_ULONG *n0);
static const bn_mul_mont_f mul_funcs[4] = {
bn_mul_mont_t4_8, bn_mul_mont_t4_16,
bn_mul_mont_t4_24, bn_mul_mont_t4_32
};
bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1];
void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap,
const void *bp, const BN_ULONG *np,
const BN_ULONG *n0, int num);
void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap,
const void *bp, const BN_ULONG *np,
const BN_ULONG *n0, int num);
void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap,
const void *table, const BN_ULONG *np,
const BN_ULONG *n0, int num, int power);
void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num,
void *table, size_t power);
void bn_gather5_t4(BN_ULONG *out, size_t num,
void *table, size_t power);
void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num);
BN_ULONG *np = mont->N.d, *n0 = mont->n0;
int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less
* than 32 */
/*
* BN_to_montgomery can contaminate words above .top [in
* BN_DEBUG[_DEBUG] build]...
*/
for (i = am.top; i < top; i++)
am.d[i] = 0;
for (i = tmp.top; i < top; i++)
tmp.d[i] = 0;
bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 0);
bn_flip_n_scatter5_t4(am.d, top, powerbuf, 1);
if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) &&
!(*mul_worker) (tmp.d, am.d, am.d, np, n0))
bn_mul_mont_vis3(tmp.d, am.d, am.d, np, n0, top);
bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 2);
for (i = 3; i < 32; i++) {
/* Calculate a^i = a^(i-1) * a */
if (!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0) &&
!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0))
bn_mul_mont_vis3(tmp.d, tmp.d, am.d, np, n0, top);
bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, i);
}
/* switch to 64-bit domain */
np = alloca(top * sizeof(BN_ULONG));
top /= 2;
bn_flip_t4(np, mont->N.d, top);
bits--;
for (wvalue = 0, i = bits % 5; i >= 0; i--, bits--)
wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
bn_gather5_t4(tmp.d, top, powerbuf, wvalue);
/*
* Scan the exponent one window at a time starting from the most
* significant bits.
*/
while (bits >= 0) {
if (bits < stride)
stride = bits + 1;
bits -= stride;
wvalue = bn_get_bits(p, bits + 1);
if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride))
continue;
/* retry once and fall back */
if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride))
continue;
bits += stride - 5;
wvalue >>= stride - 5;
wvalue &= 31;
bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_mul_mont_gather5_t4(tmp.d, tmp.d, powerbuf, np, n0, top,
wvalue);
}
bn_flip_t4(tmp.d, tmp.d, top);
top *= 2;
/* back to 32-bit domain */
tmp.top = top;
bn_correct_top(&tmp);
OPENSSL_cleanse(np, top * sizeof(BN_ULONG));
} else
#endif
#if defined(OPENSSL_BN_ASM_MONT5)
if (window == 5 && top > 1) {
/*
* This optimization uses ideas from http://eprint.iacr.org/2011/239,
* specifically optimization of cache-timing attack countermeasures
* and pre-computation optimization.
*/
/*
* Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as
* 512-bit RSA is hardly relevant, we omit it to spare size...
*/
void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap,
const void *table, const BN_ULONG *np,
const BN_ULONG *n0, int num, int power);
void bn_scatter5(const BN_ULONG *inp, size_t num,
void *table, size_t power);
void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power);
void bn_power5(BN_ULONG *rp, const BN_ULONG *ap,
const void *table, const BN_ULONG *np,
const BN_ULONG *n0, int num, int power);
int bn_get_bits5(const BN_ULONG *ap, int off);
int bn_from_montgomery(BN_ULONG *rp, const BN_ULONG *ap,
const BN_ULONG *not_used, const BN_ULONG *np,
const BN_ULONG *n0, int num);
BN_ULONG *np = mont->N.d, *n0 = mont->n0, *np2;
/*
* BN_to_montgomery can contaminate words above .top [in
* BN_DEBUG[_DEBUG] build]...
*/
for (i = am.top; i < top; i++)
am.d[i] = 0;
for (i = tmp.top; i < top; i++)
tmp.d[i] = 0;
if (top & 7)
np2 = np;
else
for (np2 = am.d + top, i = 0; i < top; i++)
np2[2 * i] = np[i];
bn_scatter5(tmp.d, top, powerbuf, 0);
bn_scatter5(am.d, am.top, powerbuf, 1);
bn_mul_mont(tmp.d, am.d, am.d, np, n0, top);
bn_scatter5(tmp.d, top, powerbuf, 2);
# if 0
for (i = 3; i < 32; i++) {
/* Calculate a^i = a^(i-1) * a */
bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1);
bn_scatter5(tmp.d, top, powerbuf, i);
}
# else
/* same as above, but uses squaring for 1/2 of operations */
for (i = 4; i < 32; i *= 2) {
bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_scatter5(tmp.d, top, powerbuf, i);
}
for (i = 3; i < 8; i += 2) {
int j;
bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1);
bn_scatter5(tmp.d, top, powerbuf, i);
for (j = 2 * i; j < 32; j *= 2) {
bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_scatter5(tmp.d, top, powerbuf, j);
}
}
for (; i < 16; i += 2) {
bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1);
bn_scatter5(tmp.d, top, powerbuf, i);
bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_scatter5(tmp.d, top, powerbuf, 2 * i);
}
for (; i < 32; i += 2) {
bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1);
bn_scatter5(tmp.d, top, powerbuf, i);
}
# endif
bits--;
for (wvalue = 0, i = bits % 5; i >= 0; i--, bits--)
wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
bn_gather5(tmp.d, top, powerbuf, wvalue);
/*
* Scan the exponent one window at a time starting from the most
* significant bits.
*/
if (top & 7)
while (bits >= 0) {
for (wvalue = 0, i = 0; i < 5; i++, bits--)
wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top,
wvalue);
} else {
while (bits >= 0) {
wvalue = bn_get_bits5(p->d, bits - 4);
bits -= 5;
bn_power5(tmp.d, tmp.d, powerbuf, np2, n0, top, wvalue);
}
}
ret = bn_from_montgomery(tmp.d, tmp.d, NULL, np2, n0, top);
tmp.top = top;
bn_correct_top(&tmp);
if (ret) {
if (!BN_copy(rr, &tmp))
ret = 0;
goto err; /* non-zero ret means it's not error */
}
} else
#endif
{
if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, numPowers))
goto err;
if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, numPowers))
goto err;
/*
* If the window size is greater than 1, then calculate
* val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) (even
* powers could instead be computed as (a^(i/2))^2 to use the slight
* performance advantage of sqr over mul).
*/
if (window > 1) {
if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx))
goto err;
if (!MOD_EXP_CTIME_COPY_TO_PREBUF
(&tmp, top, powerbuf, 2, numPowers))
goto err;
for (i = 3; i < numPowers; i++) {
/* Calculate a^i = a^(i-1) * a */
if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx))
goto err;
if (!MOD_EXP_CTIME_COPY_TO_PREBUF
(&tmp, top, powerbuf, i, numPowers))
goto err;
}
}
bits--;
for (wvalue = 0, i = bits % window; i >= 0; i--, bits--)
wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
if (!MOD_EXP_CTIME_COPY_FROM_PREBUF
(&tmp, top, powerbuf, wvalue, numPowers))
goto err;
/*
* Scan the exponent one window at a time starting from the most
} else
#endif
{
if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, numPowers))
goto err;
if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, numPowers))
goto err;
/*
wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
}
/*
* Fetch the appropriate pre-computed value from the pre-buf
if (window > 1) {
if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx))
goto err;
if (!MOD_EXP_CTIME_COPY_TO_PREBUF
(&tmp, top, powerbuf, 2, numPowers))
goto err;
for (i = 3; i < numPowers; i++) {
/* Calculate a^i = a^(i-1) * a */
if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx))
goto err;
if (!MOD_EXP_CTIME_COPY_TO_PREBUF
(&tmp, top, powerbuf, i, numPowers))
goto err;
}
}
for (i = 1; i < top; i++)
bits--;
for (wvalue = 0, i = bits % window; i >= 0; i--, bits--)
wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
if (!MOD_EXP_CTIME_COPY_FROM_PREBUF
(&tmp, top, powerbuf, wvalue, numPowers))
goto err;
/*
err:
if ((in_mont == NULL) && (mont != NULL))
BN_MONT_CTX_free(mont);
if (powerbuf != NULL) {
OPENSSL_cleanse(powerbuf, powerbufLen);
if (powerbufFree)
OPENSSL_free(powerbufFree);
}
BN_CTX_end(ctx);
return (ret);
}
int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p,
/*
* Fetch the appropriate pre-computed value from the pre-buf
*/
if (!MOD_EXP_CTIME_COPY_FROM_PREBUF
(&am, top, powerbuf, wvalue, numPowers))
goto err;
/* Multiply the result into the intermediate result */
#define BN_MOD_MUL_WORD(r, w, m) \
(BN_mul_word(r, (w)) && \
(/* BN_ucmp(r, (m)) < 0 ? 1 :*/ \
(BN_mod(t, r, m, ctx) && (swap_tmp = r, r = t, t = swap_tmp, 1))))
/*
* BN_MOD_MUL_WORD is only used with 'w' large, so the BN_ucmp test is
* probably more overhead than always using BN_mod (which uses BN_copy if
* a similar test returns true).
*/
/*
* We can use BN_mod and do not need BN_nnmod because our accumulator is
* never negative (the result of BN_mod does not depend on the sign of
* the modulus).
*/
#define BN_TO_MONTGOMERY_WORD(r, w, mont) \
(BN_set_word(r, (w)) && BN_to_montgomery(r, r, (mont), ctx))
if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0) {
/* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
BNerr(BN_F_BN_MOD_EXP_MONT_WORD, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
return -1;
}
bn_check_top(p);
bn_check_top(m);
if (!BN_is_odd(m)) {
BNerr(BN_F_BN_MOD_EXP_MONT_WORD, BN_R_CALLED_WITH_EVEN_MODULUS);
return (0);
}
if (m->top == 1)
a %= m->d[0]; /* make sure that 'a' is reduced */
bits = BN_num_bits(p);
if (bits == 0) {
/* x**0 mod 1 is still zero. */
if (BN_is_one(m)) {
ret = 1;
BN_zero(rr);
} else {
ret = BN_one(rr);
}
return ret;
}
if (a == 0) {
BN_zero(rr);
ret = 1;
return ret;
}
BN_CTX_start(ctx);
d = BN_CTX_get(ctx);
r = BN_CTX_get(ctx);
t = BN_CTX_get(ctx);
if (d == NULL || r == NULL || t == NULL)
goto err;
if (in_mont != NULL)
mont = in_mont;
else {
if ((mont = BN_MONT_CTX_new()) == NULL)
goto err;
if (!BN_MONT_CTX_set(mont, m, ctx))
goto err;
}
r_is_one = 1; /* except for Montgomery factor */
/* bits-1 >= 0 */
/* The result is accumulated in the product r*w. */
w = a; /* bit 'bits-1' of 'p' is always set */
for (b = bits - 2; b >= 0; b--) {
/* First, square r*w. */
next_w = w * w;
if ((next_w / w) != w) { /* overflow */
if (r_is_one) {
if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
goto err;
r_is_one = 0;
} else {
if (!BN_MOD_MUL_WORD(r, w, m))
goto err;
}
next_w = 1;
}
w = next_w;
if (!r_is_one) {
if (!BN_mod_mul_montgomery(r, r, r, mont, ctx))
goto err;
}
/* Second, multiply r*w by 'a' if exponent bit is set. */
if (BN_is_bit_set(p, b)) {
next_w = w * a;
if ((next_w / a) != w) { /* overflow */
if (r_is_one) {
if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
goto err;
r_is_one = 0;
} else {
if (!BN_MOD_MUL_WORD(r, w, m))
goto err;
}
next_w = a;
}
w = next_w;
}
}
/* Finally, set r:=r*w. */
if (w != 1) {
if (r_is_one) {
if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
goto err;
r_is_one = 0;
} else {
if (!BN_MOD_MUL_WORD(r, w, m))
goto err;
}
}
if (r_is_one) { /* can happen only if a == 1 */
if (!BN_one(rr))
goto err;
} else {
if (!BN_from_montgomery(rr, r, mont, ctx))
goto err;
}
ret = 1;
err:
if ((in_mont == NULL) && (mont != NULL))
BN_MONT_CTX_free(mont);
BN_CTX_end(ctx);
bn_check_top(rr);
return (ret);
}
|
CWE-200
| 178,426 | 415 |
267914895309637342114696368805738267665
| null | null | null |
openssl
|
878e2c5b13010329c203f309ed0c8f2113f85648
| 1 |
int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *ret)
{
int ok = 0;
BIGNUM *q = NULL;
*ret = 0;
q = BN_new();
if (q == NULL)
goto err;
BN_set_word(q, 1);
if (BN_cmp(pub_key, q) <= 0)
*ret |= DH_CHECK_PUBKEY_TOO_SMALL;
BN_copy(q, dh->p);
BN_sub_word(q, 1);
if (BN_cmp(pub_key, q) >= 0)
*ret |= DH_CHECK_PUBKEY_TOO_LARGE;
ok = 1;
err:
if (q != NULL)
BN_free(q);
return (ok);
}
|
CWE-200
| 178,430 | 418 |
335783302544795505763566150355301510519
| null | null | null |
openssl
|
af58be768ebb690f78530f796e92b8ae5c9a4401
| 1 |
int dtls1_read_bytes(SSL *s, int type, int *recvd_type, unsigned char *buf,
int len, int peek)
{
int al, i, j, ret;
unsigned int n;
SSL3_RECORD *rr;
void (*cb) (const SSL *ssl, int type2, int val) = NULL;
if (!SSL3_BUFFER_is_initialised(&s->rlayer.rbuf)) {
/* Not initialized yet */
if (!ssl3_setup_buffers(s))
return (-1);
}
if ((type && (type != SSL3_RT_APPLICATION_DATA) &&
(type != SSL3_RT_HANDSHAKE)) ||
(peek && (type != SSL3_RT_APPLICATION_DATA))) {
SSLerr(SSL_F_DTLS1_READ_BYTES, ERR_R_INTERNAL_ERROR);
return -1;
}
/*
* check whether there's a handshake message (client hello?) waiting
*/
if ((ret = have_handshake_fragment(s, type, buf, len)))
return ret;
/*
* Now s->rlayer.d->handshake_fragment_len == 0 if
* type == SSL3_RT_HANDSHAKE.
*/
#ifndef OPENSSL_NO_SCTP
/*
* Continue handshake if it had to be interrupted to read app data with
* SCTP.
*/
if ((!ossl_statem_get_in_handshake(s) && SSL_in_init(s)) ||
(BIO_dgram_is_sctp(SSL_get_rbio(s))
&& ossl_statem_in_sctp_read_sock(s)
&& s->s3->in_read_app_data != 2))
#else
if (!ossl_statem_get_in_handshake(s) && SSL_in_init(s))
#endif
{
/* type == SSL3_RT_APPLICATION_DATA */
i = s->handshake_func(s);
if (i < 0)
return (i);
if (i == 0) {
SSLerr(SSL_F_DTLS1_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE);
return (-1);
}
}
start:
s->rwstate = SSL_NOTHING;
/*-
* s->s3->rrec.type - is the type of record
* s->s3->rrec.data, - data
* s->s3->rrec.off, - offset into 'data' for next read
* s->s3->rrec.length, - number of bytes.
*/
rr = s->rlayer.rrec;
/*
* We are not handshaking and have no data yet, so process data buffered
* during the last handshake in advance, if any.
*/
if (SSL_is_init_finished(s) && SSL3_RECORD_get_length(rr) == 0) {
pitem *item;
item = pqueue_pop(s->rlayer.d->buffered_app_data.q);
if (item) {
#ifndef OPENSSL_NO_SCTP
/* Restore bio_dgram_sctp_rcvinfo struct */
if (BIO_dgram_is_sctp(SSL_get_rbio(s))) {
DTLS1_RECORD_DATA *rdata = (DTLS1_RECORD_DATA *)item->data;
BIO_ctrl(SSL_get_rbio(s), BIO_CTRL_DGRAM_SCTP_SET_RCVINFO,
sizeof(rdata->recordinfo), &rdata->recordinfo);
}
#endif
dtls1_copy_record(s, item);
OPENSSL_free(item->data);
pitem_free(item);
}
}
/* Check for timeout */
if (dtls1_handle_timeout(s) > 0)
goto start;
/* get new packet if necessary */
if ((SSL3_RECORD_get_length(rr) == 0)
|| (s->rlayer.rstate == SSL_ST_READ_BODY)) {
ret = dtls1_get_record(s);
if (ret <= 0) {
ret = dtls1_read_failed(s, ret);
/* anything other than a timeout is an error */
if (ret <= 0)
return (ret);
else
goto start;
}
}
/* we now have a packet which can be read and processed */
if (s->s3->change_cipher_spec /* set when we receive ChangeCipherSpec,
SSL3_RECORD_get_seq_num(rr)) < 0) {
SSLerr(SSL_F_DTLS1_READ_BYTES, ERR_R_INTERNAL_ERROR);
return -1;
}
SSL3_RECORD_set_length(rr, 0);
goto start;
}
|
CWE-400
| 178,434 | 421 |
226896650776946175366825036905210068548
| null | null | null |
ghostscript
|
f5c7555c30393e64ec1f5ab0dfae5b55b3b3fc78
| 1 |
zsethalftone5(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
uint count;
gs_halftone_component *phtc = 0;
gs_halftone_component *pc;
int code = 0;
int j;
bool have_default;
gs_halftone *pht = 0;
gx_device_halftone *pdht = 0;
ref sprocs[GS_CLIENT_COLOR_MAX_COMPONENTS + 1];
ref tprocs[GS_CLIENT_COLOR_MAX_COMPONENTS + 1];
gs_memory_t *mem;
uint edepth = ref_stack_count(&e_stack);
int npop = 2;
int dict_enum = dict_first(op);
ref rvalue[2];
int cname, colorant_number;
byte * pname;
uint name_size;
int halftonetype, type = 0;
gs_gstate *pgs = igs;
int space_index = r_space_index(op - 1);
mem = (gs_memory_t *) idmemory->spaces_indexed[space_index];
* the device color space, so we need to mark them
* with a different internal halftone type.
*/
code = dict_int_param(op - 1, "HalftoneType", 1, 100, 0, &type);
if (code < 0)
return code;
halftonetype = (type == 2 || type == 4)
? ht_type_multiple_colorscreen
: ht_type_multiple;
/* Count how many components that we will actually use. */
have_default = false;
for (count = 0; ;) {
/* Move to next element in the dictionary */
if ((dict_enum = dict_next(op, dict_enum, rvalue)) == -1)
break;
/*
* Verify that we have a valid component. We may have a
* /HalfToneType entry.
*/
if (!r_has_type(&rvalue[0], t_name))
continue;
if (!r_has_type(&rvalue[1], t_dictionary))
continue;
/* Get the name of the component verify that we will use it. */
cname = name_index(mem, &rvalue[0]);
code = gs_get_colorname_string(mem, cname, &pname, &name_size);
if (code < 0)
break;
colorant_number = gs_cname_to_colorant_number(pgs, pname, name_size,
halftonetype);
if (colorant_number < 0)
continue;
else if (colorant_number == GX_DEVICE_COLOR_MAX_COMPONENTS) {
/* If here then we have the "Default" component */
if (have_default)
return_error(gs_error_rangecheck);
have_default = true;
}
count++;
/*
* Check to see if we have already reached the legal number of
* components.
*/
if (count > GS_CLIENT_COLOR_MAX_COMPONENTS + 1) {
code = gs_note_error(gs_error_rangecheck);
break;
}
}
if (count == 0 || (halftonetype == ht_type_multiple && ! have_default))
code = gs_note_error(gs_error_rangecheck);
if (code >= 0) {
check_estack(5); /* for sampling Type 1 screens */
refset_null(sprocs, count);
refset_null(tprocs, count);
rc_alloc_struct_0(pht, gs_halftone, &st_halftone,
imemory, pht = 0, ".sethalftone5");
phtc = gs_alloc_struct_array(mem, count, gs_halftone_component,
&st_ht_component_element,
".sethalftone5");
rc_alloc_struct_0(pdht, gx_device_halftone, &st_device_halftone,
imemory, pdht = 0, ".sethalftone5");
if (pht == 0 || phtc == 0 || pdht == 0) {
j = 0; /* Quiet the compiler:
gs_note_error isn't necessarily identity,
so j could be left ununitialized. */
code = gs_note_error(gs_error_VMerror);
}
}
if (code >= 0) {
dict_enum = dict_first(op);
for (j = 0, pc = phtc; ;) {
int type;
/* Move to next element in the dictionary */
if ((dict_enum = dict_next(op, dict_enum, rvalue)) == -1)
break;
/*
* Verify that we have a valid component. We may have a
* /HalfToneType entry.
*/
if (!r_has_type(&rvalue[0], t_name))
continue;
if (!r_has_type(&rvalue[1], t_dictionary))
continue;
/* Get the name of the component */
cname = name_index(mem, &rvalue[0]);
code = gs_get_colorname_string(mem, cname, &pname, &name_size);
if (code < 0)
break;
colorant_number = gs_cname_to_colorant_number(pgs, pname, name_size,
halftonetype);
if (colorant_number < 0)
continue; /* Do not use this component */
pc->cname = cname;
pc->comp_number = colorant_number;
/* Now process the component dictionary */
check_dict_read(rvalue[1]);
if (dict_int_param(&rvalue[1], "HalftoneType", 1, 7, 0, &type) < 0) {
code = gs_note_error(gs_error_typecheck);
break;
}
switch (type) {
default:
code = gs_note_error(gs_error_rangecheck);
break;
case 1:
code = dict_spot_params(&rvalue[1], &pc->params.spot,
sprocs + j, tprocs + j, mem);
pc->params.spot.screen.spot_function = spot1_dummy;
pc->type = ht_type_spot;
break;
case 3:
code = dict_threshold_params(&rvalue[1], &pc->params.threshold,
tprocs + j);
pc->type = ht_type_threshold;
break;
case 7:
code = dict_threshold2_params(&rvalue[1], &pc->params.threshold2,
tprocs + j, imemory);
pc->type = ht_type_threshold2;
break;
}
if (code < 0)
break;
pc++;
j++;
}
}
if (code >= 0) {
pht->type = halftonetype;
pht->params.multiple.components = phtc;
pht->params.multiple.num_comp = j;
pht->params.multiple.get_colorname_string = gs_get_colorname_string;
code = gs_sethalftone_prepare(igs, pht, pdht);
}
if (code >= 0) {
/*
* Put the actual frequency and angle in the spot function component dictionaries.
*/
dict_enum = dict_first(op);
for (pc = phtc; ; ) {
/* Move to next element in the dictionary */
if ((dict_enum = dict_next(op, dict_enum, rvalue)) == -1)
break;
/* Verify that we have a valid component */
if (!r_has_type(&rvalue[0], t_name))
continue;
if (!r_has_type(&rvalue[1], t_dictionary))
continue;
/* Get the name of the component and verify that we will use it. */
cname = name_index(mem, &rvalue[0]);
code = gs_get_colorname_string(mem, cname, &pname, &name_size);
if (code < 0)
break;
colorant_number = gs_cname_to_colorant_number(pgs, pname, name_size,
halftonetype);
if (colorant_number < 0)
continue;
if (pc->type == ht_type_spot) {
code = dict_spot_results(i_ctx_p, &rvalue[1], &pc->params.spot);
if (code < 0)
break;
}
pc++;
}
}
if (code >= 0) {
/*
* Schedule the sampling of any Type 1 screens,
* and any (Type 1 or Type 3) TransferFunctions.
* Save the stack depths in case we have to back out.
*/
uint odepth = ref_stack_count(&o_stack);
ref odict, odict5;
odict = op[-1];
odict5 = *op;
pop(2);
op = osp;
esp += 5;
make_mark_estack(esp - 4, es_other, sethalftone_cleanup);
esp[-3] = odict;
make_istruct(esp - 2, 0, pht);
make_istruct(esp - 1, 0, pdht);
make_op_estack(esp, sethalftone_finish);
for (j = 0; j < count; j++) {
gx_ht_order *porder = NULL;
if (pdht->components == 0)
porder = &pdht->order;
else {
/* Find the component in pdht that matches component j in
the pht; gs_sethalftone_prepare() may permute these. */
int k;
int comp_number = phtc[j].comp_number;
for (k = 0; k < count; k++) {
if (pdht->components[k].comp_number == comp_number) {
porder = &pdht->components[k].corder;
break;
}
}
}
switch (phtc[j].type) {
case ht_type_spot:
code = zscreen_enum_init(i_ctx_p, porder,
&phtc[j].params.spot.screen,
&sprocs[j], 0, 0, space_index);
if (code < 0)
break;
/* falls through */
case ht_type_threshold:
if (!r_has_type(tprocs + j, t__invalid)) {
/* Schedule TransferFunction sampling. */
/****** check_xstack IS WRONG ******/
check_ostack(zcolor_remap_one_ostack);
check_estack(zcolor_remap_one_estack);
code = zcolor_remap_one(i_ctx_p, tprocs + j,
porder->transfer, igs,
zcolor_remap_one_finish);
op = osp;
}
break;
default: /* not possible here, but to keep */
/* the compilers happy.... */
;
}
if (code < 0) { /* Restore the stack. */
ref_stack_pop_to(&o_stack, odepth);
ref_stack_pop_to(&e_stack, edepth);
op = osp;
op[-1] = odict;
*op = odict5;
break;
}
npop = 0;
}
}
if (code < 0) {
gs_free_object(mem, pdht, ".sethalftone5");
gs_free_object(mem, phtc, ".sethalftone5");
gs_free_object(mem, pht, ".sethalftone5");
return code;
}
pop(npop);
return (ref_stack_count(&e_stack) > edepth ? o_push_estack : 0);
}
|
CWE-704
| 178,435 | 422 |
113189749909410079659988113313852115728
| null | null | null |
ghostscript
|
8abd22010eb4db0fb1b10e430d5f5d83e015ef70
| 1 |
lib_file_open(gs_file_path_ptr lib_path, const gs_memory_t *mem, i_ctx_t *i_ctx_p,
const char *fname, uint flen, char *buffer, int blen, uint *pclen, ref *pfile)
{ /* i_ctx_p is NULL running arg (@) files.
* lib_path and mem are never NULL
*/
bool starting_arg_file = (i_ctx_p == NULL) ? true : i_ctx_p->starting_arg_file;
bool search_with_no_combine = false;
bool search_with_combine = false;
char fmode[2] = { 'r', 0};
gx_io_device *iodev = iodev_default(mem);
gs_main_instance *minst = get_minst_from_memory(mem);
int code;
/* when starting arg files (@ files) iodev_default is not yet set */
if (iodev == 0)
iodev = (gx_io_device *)gx_io_device_table[0];
search_with_combine = false;
} else {
search_with_no_combine = starting_arg_file;
search_with_combine = true;
}
|
CWE-200
| 178,436 | 423 |
313778305350942767239068089640740094588
| null | null | null |
xserver
|
3f0d3f4d97bce75c1828635c322b6560a45a037f
| 1 |
validGlxScreen(ClientPtr client, int screen, __GLXscreen **pGlxScreen, int *err)
{
/*
** Check if screen exists.
*/
if (screen >= screenInfo.numScreens) {
client->errorValue = screen;
*err = BadValue;
return FALSE;
}
*pGlxScreen = glxGetScreen(screenInfo.screens[screen]);
return TRUE;
}
|
CWE-20
| 178,442 | 426 |
326310780415306211020981690928062594010
| null | null | null |
xserver
|
ec9c97c6bf70b523bc500bd3adf62176f1bb33a4
| 1 |
int __glXDisp_CreateContext(__GLXclientState *cl, GLbyte *pc)
{
xGLXCreateContextReq *req = (xGLXCreateContextReq *) pc;
__GLXconfig *config;
__GLXscreen *pGlxScreen;
int err;
if (!validGlxScreen(cl->client, req->screen, &pGlxScreen, &err))
return err;
if (!validGlxVisual(cl->client, pGlxScreen, req->visual, &config, &err))
config, pGlxScreen, req->isDirect);
}
|
CWE-20
| 178,443 | 427 |
157514919002916136363478838617068996789
| null | null | null |
libav
|
fb1473080223a634b8ac2cca48a632d037a0a69d
| 1 |
static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
int *need_next_header, int *new_frame_start)
{
GetBitContext bits;
AACADTSHeaderInfo hdr;
int size;
union {
uint64_t u64;
uint8_t u8[8];
} tmp;
tmp.u64 = av_be2ne64(state);
init_get_bits(&bits, tmp.u8+8-AAC_ADTS_HEADER_SIZE, AAC_ADTS_HEADER_SIZE * 8);
if ((size = avpriv_aac_parse_header(&bits, &hdr)) < 0)
return 0;
*need_next_header = 0;
*new_frame_start = 1;
hdr_info->sample_rate = hdr.sample_rate;
hdr_info->channels = ff_mpeg4audio_channels[hdr.chan_config];
hdr_info->samples = hdr.samples;
hdr_info->bit_rate = hdr.bit_rate;
return size;
}
|
CWE-125
| 178,450 | 428 |
174832307100128528531862305544810641609
| null | null | null |
mindrot
|
85bdcd7c92fe7ff133bbc4e10a65c91810f88755
| 1 |
do_setup_env(Session *s, const char *shell)
{
struct ssh *ssh = active_state; /* XXX */
char buf[256];
u_int i, envsize;
char **env, *laddr;
struct passwd *pw = s->pw;
#if !defined (HAVE_LOGIN_CAP) && !defined (HAVE_CYGWIN)
char *path = NULL;
#endif
/* Initialize the environment. */
envsize = 100;
env = xcalloc(envsize, sizeof(char *));
env[0] = NULL;
#ifdef HAVE_CYGWIN
/*
* The Windows environment contains some setting which are
* important for a running system. They must not be dropped.
*/
{
char **p;
p = fetch_windows_environment();
copy_environment(p, &env, &envsize);
free_windows_environment(p);
}
#endif
#ifdef GSSAPI
/* Allow any GSSAPI methods that we've used to alter
* the childs environment as they see fit
*/
ssh_gssapi_do_child(&env, &envsize);
#endif
if (!options.use_login) {
/* Set basic environment. */
for (i = 0; i < s->num_env; i++)
child_set_env(&env, &envsize, s->env[i].name,
s->env[i].val);
child_set_env(&env, &envsize, "USER", pw->pw_name);
child_set_env(&env, &envsize, "LOGNAME", pw->pw_name);
#ifdef _AIX
child_set_env(&env, &envsize, "LOGIN", pw->pw_name);
#endif
child_set_env(&env, &envsize, "HOME", pw->pw_dir);
#ifdef HAVE_LOGIN_CAP
if (setusercontext(lc, pw, pw->pw_uid, LOGIN_SETPATH) < 0)
child_set_env(&env, &envsize, "PATH", _PATH_STDPATH);
else
child_set_env(&env, &envsize, "PATH", getenv("PATH"));
#else /* HAVE_LOGIN_CAP */
# ifndef HAVE_CYGWIN
/*
* There's no standard path on Windows. The path contains
* important components pointing to the system directories,
* needed for loading shared libraries. So the path better
* remains intact here.
*/
# ifdef HAVE_ETC_DEFAULT_LOGIN
read_etc_default_login(&env, &envsize, pw->pw_uid);
path = child_get_env(env, "PATH");
# endif /* HAVE_ETC_DEFAULT_LOGIN */
if (path == NULL || *path == '\0') {
child_set_env(&env, &envsize, "PATH",
s->pw->pw_uid == 0 ?
SUPERUSER_PATH : _PATH_STDPATH);
}
# endif /* HAVE_CYGWIN */
#endif /* HAVE_LOGIN_CAP */
snprintf(buf, sizeof buf, "%.200s/%.50s",
_PATH_MAILDIR, pw->pw_name);
child_set_env(&env, &envsize, "MAIL", buf);
/* Normal systems set SHELL by default. */
child_set_env(&env, &envsize, "SHELL", shell);
}
if (getenv("TZ"))
child_set_env(&env, &envsize, "TZ", getenv("TZ"));
/* Set custom environment options from RSA authentication. */
if (!options.use_login) {
while (custom_environment) {
struct envstring *ce = custom_environment;
char *str = ce->s;
for (i = 0; str[i] != '=' && str[i]; i++)
;
if (str[i] == '=') {
str[i] = 0;
child_set_env(&env, &envsize, str, str + i + 1);
}
custom_environment = ce->next;
free(ce->s);
free(ce);
}
}
/* SSH_CLIENT deprecated */
snprintf(buf, sizeof buf, "%.50s %d %d",
ssh_remote_ipaddr(ssh), ssh_remote_port(ssh),
ssh_local_port(ssh));
child_set_env(&env, &envsize, "SSH_CLIENT", buf);
laddr = get_local_ipaddr(packet_get_connection_in());
snprintf(buf, sizeof buf, "%.50s %d %.50s %d",
ssh_remote_ipaddr(ssh), ssh_remote_port(ssh),
laddr, ssh_local_port(ssh));
free(laddr);
child_set_env(&env, &envsize, "SSH_CONNECTION", buf);
if (s->ttyfd != -1)
child_set_env(&env, &envsize, "SSH_TTY", s->tty);
if (s->term)
child_set_env(&env, &envsize, "TERM", s->term);
if (s->display)
child_set_env(&env, &envsize, "DISPLAY", s->display);
if (original_command)
child_set_env(&env, &envsize, "SSH_ORIGINAL_COMMAND",
original_command);
#ifdef _UNICOS
if (cray_tmpdir[0] != '\0')
child_set_env(&env, &envsize, "TMPDIR", cray_tmpdir);
#endif /* _UNICOS */
/*
* Since we clear KRB5CCNAME at startup, if it's set now then it
* must have been set by a native authentication method (eg AIX or
* SIA), so copy it to the child.
*/
{
char *cp;
if ((cp = getenv("KRB5CCNAME")) != NULL)
child_set_env(&env, &envsize, "KRB5CCNAME", cp);
}
#ifdef _AIX
{
char *cp;
if ((cp = getenv("AUTHSTATE")) != NULL)
child_set_env(&env, &envsize, "AUTHSTATE", cp);
read_environment_file(&env, &envsize, "/etc/environment");
}
#endif
#ifdef KRB5
if (s->authctxt->krb5_ccname)
child_set_env(&env, &envsize, "KRB5CCNAME",
s->authctxt->krb5_ccname);
#endif
#ifdef USE_PAM
/*
* Pull in any environment variables that may have
* been set by PAM.
*/
if (options.use_pam) {
char **p;
p = fetch_pam_child_environment();
copy_environment(p, &env, &envsize);
free_pam_environment(p);
p = fetch_pam_environment();
copy_environment(p, &env, &envsize);
free_pam_environment(p);
}
#endif /* USE_PAM */
if (auth_sock_name != NULL)
child_set_env(&env, &envsize, SSH_AUTHSOCKET_ENV_NAME,
auth_sock_name);
/* read $HOME/.ssh/environment. */
if (options.permit_user_env && !options.use_login) {
snprintf(buf, sizeof buf, "%.200s/.ssh/environment",
strcmp(pw->pw_dir, "/") ? pw->pw_dir : "");
read_environment_file(&env, &envsize, buf);
}
if (debug_flag) {
/* dump the environment */
fprintf(stderr, "Environment:\n");
for (i = 0; env[i]; i++)
fprintf(stderr, " %.200s\n", env[i]);
}
return env;
}
|
CWE-264
| 178,455 | 431 |
188520525865912700842267634957388443523
| null | null | null |
openssl
|
1632ef744872edc2aa2a53d487d3e79c965a4ad3
| 1 |
dtls1_reassemble_fragment(SSL *s, struct hm_header_st* msg_hdr, int *ok)
{
hm_fragment *frag = NULL;
pitem *item = NULL;
int i = -1, is_complete;
unsigned char seq64be[8];
unsigned long frag_len = msg_hdr->frag_len, max_len;
if ((msg_hdr->frag_off+frag_len) > msg_hdr->msg_len)
goto err;
/* Determine maximum allowed message size. Depends on (user set)
* maximum certificate length, but 16k is minimum.
*/
if (DTLS1_HM_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH < s->max_cert_list)
max_len = s->max_cert_list;
else
max_len = DTLS1_HM_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH;
if ((msg_hdr->frag_off+frag_len) > max_len)
goto err;
/* Try to find item in queue */
memset(seq64be,0,sizeof(seq64be));
seq64be[6] = (unsigned char) (msg_hdr->seq>>8);
seq64be[7] = (unsigned char) msg_hdr->seq;
item = pqueue_find(s->d1->buffered_messages, seq64be);
if (item == NULL)
{
frag = dtls1_hm_fragment_new(msg_hdr->msg_len, 1);
if ( frag == NULL)
goto err;
memcpy(&(frag->msg_header), msg_hdr, sizeof(*msg_hdr));
frag->msg_header.frag_len = frag->msg_header.msg_len;
frag->msg_header.frag_off = 0;
}
else
frag = (hm_fragment*) item->data;
/* If message is already reassembled, this must be a
* retransmit and can be dropped.
frag_len>sizeof(devnull)?sizeof(devnull):frag_len,0);
if (i<=0) goto err;
frag_len -= i;
}
|
CWE-119
| 178,456 | 432 |
277478106358864729741213619139959810391
| null | null | null |
polkit
|
bc7ffad53643a9c80231fc41f5582d6a8931c32c
| 1 |
polkit_backend_interactive_authority_check_authorization (PolkitBackendAuthority *authority,
PolkitSubject *caller,
PolkitSubject *subject,
const gchar *action_id,
PolkitDetails *details,
PolkitCheckAuthorizationFlags flags,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data)
{
PolkitBackendInteractiveAuthority *interactive_authority;
PolkitBackendInteractiveAuthorityPrivate *priv;
gchar *caller_str;
gchar *subject_str;
PolkitIdentity *user_of_caller;
PolkitIdentity *user_of_subject;
gchar *user_of_caller_str;
gchar *user_of_subject_str;
PolkitAuthorizationResult *result;
GError *error;
GSimpleAsyncResult *simple;
gboolean has_details;
gchar **detail_keys;
interactive_authority = POLKIT_BACKEND_INTERACTIVE_AUTHORITY (authority);
priv = POLKIT_BACKEND_INTERACTIVE_AUTHORITY_GET_PRIVATE (interactive_authority);
error = NULL;
caller_str = NULL;
subject_str = NULL;
user_of_caller = NULL;
user_of_subject = NULL;
user_of_caller_str = NULL;
user_of_subject_str = NULL;
result = NULL;
simple = g_simple_async_result_new (G_OBJECT (authority),
callback,
user_data,
polkit_backend_interactive_authority_check_authorization);
/* handle being called from ourselves */
if (caller == NULL)
{
/* TODO: this is kind of a hack */
GDBusConnection *system_bus;
system_bus = g_bus_get_sync (G_BUS_TYPE_SYSTEM, NULL, NULL);
caller = polkit_system_bus_name_new (g_dbus_connection_get_unique_name (system_bus));
g_object_unref (system_bus);
}
caller_str = polkit_subject_to_string (caller);
subject_str = polkit_subject_to_string (subject);
g_debug ("%s is inquiring whether %s is authorized for %s",
caller_str,
subject_str,
action_id);
action_id);
user_of_caller = polkit_backend_session_monitor_get_user_for_subject (priv->session_monitor,
caller,
&error);
if (error != NULL)
{
g_simple_async_result_complete (simple);
g_object_unref (simple);
g_error_free (error);
goto out;
}
user_of_caller_str = polkit_identity_to_string (user_of_caller);
g_debug (" user of caller is %s", user_of_caller_str);
g_debug (" user of caller is %s", user_of_caller_str);
user_of_subject = polkit_backend_session_monitor_get_user_for_subject (priv->session_monitor,
subject,
&error);
if (error != NULL)
{
g_simple_async_result_complete (simple);
g_object_unref (simple);
g_error_free (error);
goto out;
}
user_of_subject_str = polkit_identity_to_string (user_of_subject);
g_debug (" user of subject is %s", user_of_subject_str);
has_details = FALSE;
if (details != NULL)
{
detail_keys = polkit_details_get_keys (details);
if (detail_keys != NULL)
{
if (g_strv_length (detail_keys) > 0)
has_details = TRUE;
g_strfreev (detail_keys);
}
}
/* Not anyone is allowed to check that process XYZ is allowed to do ABC.
* We only allow this if, and only if,
* We only allow this if, and only if,
*
* - processes may check for another process owned by the *same* user but not
* if details are passed (otherwise you'd be able to spoof the dialog)
*
* - processes running as uid 0 may check anything and pass any details
*
if (!polkit_identity_equal (user_of_caller, user_of_subject) || has_details)
* then any uid referenced by that annotation is also allowed to check
* to check anything and pass any details
*/
if (!polkit_identity_equal (user_of_caller, user_of_subject) || has_details)
{
if (!may_identity_check_authorization (interactive_authority, action_id, user_of_caller))
{
"pass details");
}
else
{
g_simple_async_result_set_error (simple,
POLKIT_ERROR,
POLKIT_ERROR_NOT_AUTHORIZED,
"Only trusted callers (e.g. uid 0 or an action owner) can use CheckAuthorization() for "
"subjects belonging to other identities");
}
g_simple_async_result_complete (simple);
g_object_unref (simple);
goto out;
}
}
|
CWE-200
| 178,460 | 434 |
48230649110562249739254612123636705466
| null | null | null |
xserver
|
dc777c346d5d452a53b13b917c45f6a1bad2f20b
| 1 |
ProcPutImage(ClientPtr client)
{
GC *pGC;
DrawablePtr pDraw;
long length; /* length of scanline server padded */
long lengthProto; /* length of scanline protocol padded */
char *tmpImage;
REQUEST(xPutImageReq);
REQUEST_AT_LEAST_SIZE(xPutImageReq);
VALIDATE_DRAWABLE_AND_GC(stuff->drawable, pDraw, DixWriteAccess);
if (stuff->format == XYBitmap) {
if ((stuff->depth != 1) ||
(stuff->leftPad >= (unsigned int) screenInfo.bitmapScanlinePad))
return BadMatch;
length = BitmapBytePad(stuff->width + stuff->leftPad);
}
else if (stuff->format == XYPixmap) {
if ((pDraw->depth != stuff->depth) ||
(stuff->leftPad >= (unsigned int) screenInfo.bitmapScanlinePad))
return BadMatch;
length = BitmapBytePad(stuff->width + stuff->leftPad);
length *= stuff->depth;
}
else if (stuff->format == ZPixmap) {
if ((pDraw->depth != stuff->depth) || (stuff->leftPad != 0))
return BadMatch;
length = PixmapBytePad(stuff->width, stuff->depth);
}
else {
client->errorValue = stuff->format;
return BadValue;
}
tmpImage = (char *) &stuff[1];
lengthProto = length;
if (lengthProto >= (INT32_MAX / stuff->height))
return BadLength;
if ((bytes_to_int32(lengthProto * stuff->height) +
bytes_to_int32(sizeof(xPutImageReq))) != client->req_len)
return BadLength;
ReformatImage(tmpImage, lengthProto * stuff->height,
stuff->format == ZPixmap ? BitsPerPixel(stuff->depth) : 1,
ClientOrder(client));
(*pGC->ops->PutImage) (pDraw, pGC, stuff->depth, stuff->dstX, stuff->dstY,
stuff->width, stuff->height,
stuff->leftPad, stuff->format, tmpImage);
return Success;
}
|
CWE-369
| 178,480 | 436 |
182090194953869765765912928868079167338
| null | null | null |
openssl
|
d81a1600588b726c2bdccda7efad3cc7a87d6245
| 1 |
static int get_client_hello(SSL *s)
{
int i, n;
unsigned long len;
unsigned char *p;
STACK_OF(SSL_CIPHER) *cs; /* a stack of SSL_CIPHERS */
STACK_OF(SSL_CIPHER) *cl; /* the ones we want to use */
STACK_OF(SSL_CIPHER) *prio, *allow;
int z;
/*
* This is a bit of a hack to check for the correct packet type the first
* time round.
*/
if (s->state == SSL2_ST_GET_CLIENT_HELLO_A) {
s->first_packet = 1;
s->state = SSL2_ST_GET_CLIENT_HELLO_B;
}
p = (unsigned char *)s->init_buf->data;
if (s->state == SSL2_ST_GET_CLIENT_HELLO_B) {
i = ssl2_read(s, (char *)&(p[s->init_num]), 9 - s->init_num);
if (i < (9 - s->init_num))
return (ssl2_part_read(s, SSL_F_GET_CLIENT_HELLO, i));
s->init_num = 9;
if (*(p++) != SSL2_MT_CLIENT_HELLO) {
if (p[-1] != SSL2_MT_ERROR) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_READ_WRONG_PACKET_TYPE);
} else
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_PEER_ERROR);
return (-1);
}
n2s(p, i);
if (i < s->version)
s->version = i;
n2s(p, i);
s->s2->tmp.cipher_spec_length = i;
n2s(p, i);
s->s2->tmp.session_id_length = i;
if ((i < 0) || (i > SSL_MAX_SSL_SESSION_ID_LENGTH)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_LENGTH_MISMATCH);
return -1;
}
n2s(p, i);
s->s2->challenge_length = i;
if ((i < SSL2_MIN_CHALLENGE_LENGTH) ||
(i > SSL2_MAX_CHALLENGE_LENGTH)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_INVALID_CHALLENGE_LENGTH);
return (-1);
}
s->state = SSL2_ST_GET_CLIENT_HELLO_C;
}
/* SSL2_ST_GET_CLIENT_HELLO_C */
p = (unsigned char *)s->init_buf->data;
len =
9 + (unsigned long)s->s2->tmp.cipher_spec_length +
(unsigned long)s->s2->challenge_length +
(unsigned long)s->s2->tmp.session_id_length;
if (len > SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_MESSAGE_TOO_LONG);
return -1;
}
n = (int)len - s->init_num;
i = ssl2_read(s, (char *)&(p[s->init_num]), n);
if (i != n)
return (ssl2_part_read(s, SSL_F_GET_CLIENT_HELLO, i));
if (s->msg_callback) {
/* CLIENT-HELLO */
s->msg_callback(0, s->version, 0, p, (size_t)len, s,
s->msg_callback_arg);
}
p += 9;
/*
* get session-id before cipher stuff so we can get out session structure
* if it is cached
*/
/* session-id */
if ((s->s2->tmp.session_id_length != 0) &&
(s->s2->tmp.session_id_length != SSL2_SSL_SESSION_ID_LENGTH)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_BAD_SSL_SESSION_ID_LENGTH);
return (-1);
}
if (s->s2->tmp.session_id_length == 0) {
if (!ssl_get_new_session(s, 1)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
return (-1);
}
} else {
i = ssl_get_prev_session(s, &(p[s->s2->tmp.cipher_spec_length]),
s->s2->tmp.session_id_length, NULL);
if (i == 1) { /* previous session */
s->hit = 1;
} else if (i == -1) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
return (-1);
} else {
if (s->cert == NULL) {
ssl2_return_error(s, SSL2_PE_NO_CERTIFICATE);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_NO_CERTIFICATE_SET);
return (-1);
}
if (!ssl_get_new_session(s, 1)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
return (-1);
}
}
}
if (!s->hit) {
cs = ssl_bytes_to_cipher_list(s, p, s->s2->tmp.cipher_spec_length,
&s->session->ciphers);
if (cs == NULL)
goto mem_err;
cl = SSL_get_ciphers(s);
if (s->options & SSL_OP_CIPHER_SERVER_PREFERENCE) {
prio = sk_SSL_CIPHER_dup(cl);
if (prio == NULL)
goto mem_err;
allow = cs;
} else {
prio = cs;
allow = cl;
}
for (z = 0; z < sk_SSL_CIPHER_num(prio); z++) {
if (sk_SSL_CIPHER_find(allow, sk_SSL_CIPHER_value(prio, z)) < 0) {
(void)sk_SSL_CIPHER_delete(prio, z);
z--;
}
}
/*
sk_SSL_CIPHER_free(s->session->ciphers);
s->session->ciphers = prio;
}
/*
* s->session->ciphers should now have a list of ciphers that are on
* both the client and server. This list is ordered by the order the
if (s->s2->challenge_length > sizeof s->s2->challenge) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
return -1;
}
memcpy(s->s2->challenge, p, (unsigned int)s->s2->challenge_length);
return (1);
mem_err:
SSLerr(SSL_F_GET_CLIENT_HELLO, ERR_R_MALLOC_FAILURE);
return (0);
}
|
CWE-310
| 178,493 | 437 |
85211151657027253379136284340889459663
| null | null | null |
openssl
|
d81a1600588b726c2bdccda7efad3cc7a87d6245
| 1 |
static int get_client_master_key(SSL *s)
{
int is_export, i, n, keya;
unsigned int num_encrypted_key_bytes, key_length;
unsigned long len;
unsigned char *p;
const SSL_CIPHER *cp;
const EVP_CIPHER *c;
const EVP_MD *md;
unsigned char rand_premaster_secret[SSL_MAX_MASTER_KEY_LENGTH];
unsigned char decrypt_good;
size_t j;
p = (unsigned char *)s->init_buf->data;
if (s->state == SSL2_ST_GET_CLIENT_MASTER_KEY_A) {
i = ssl2_read(s, (char *)&(p[s->init_num]), 10 - s->init_num);
if (i < (10 - s->init_num))
return (ssl2_part_read(s, SSL_F_GET_CLIENT_MASTER_KEY, i));
s->init_num = 10;
if (*(p++) != SSL2_MT_CLIENT_MASTER_KEY) {
if (p[-1] != SSL2_MT_ERROR) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_MASTER_KEY,
SSL_R_READ_WRONG_PACKET_TYPE);
} else
SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_PEER_ERROR);
return (-1);
}
cp = ssl2_get_cipher_by_char(p);
if (cp == NULL) {
ssl2_return_error(s, SSL2_PE_NO_CIPHER);
SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_NO_CIPHER_MATCH);
return (-1);
}
s->session->cipher = cp;
p += 3;
n2s(p, i);
s->s2->tmp.clear = i;
n2s(p, i);
s->s2->tmp.enc = i;
n2s(p, i);
if (i > SSL_MAX_KEY_ARG_LENGTH) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_KEY_ARG_TOO_LONG);
return -1;
}
s->session->key_arg_length = i;
s->state = SSL2_ST_GET_CLIENT_MASTER_KEY_B;
}
/* SSL2_ST_GET_CLIENT_MASTER_KEY_B */
p = (unsigned char *)s->init_buf->data;
if (s->init_buf->length < SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, ERR_R_INTERNAL_ERROR);
return -1;
}
keya = s->session->key_arg_length;
len =
10 + (unsigned long)s->s2->tmp.clear + (unsigned long)s->s2->tmp.enc +
(unsigned long)keya;
if (len > SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_MESSAGE_TOO_LONG);
return -1;
}
n = (int)len - s->init_num;
i = ssl2_read(s, (char *)&(p[s->init_num]), n);
if (i != n)
return (ssl2_part_read(s, SSL_F_GET_CLIENT_MASTER_KEY, i));
if (s->msg_callback) {
/* CLIENT-MASTER-KEY */
s->msg_callback(0, s->version, 0, p, (size_t)len, s,
s->msg_callback_arg);
}
p += 10;
memcpy(s->session->key_arg, &(p[s->s2->tmp.clear + s->s2->tmp.enc]),
(unsigned int)keya);
if (s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey == NULL) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_NO_PRIVATEKEY);
return (-1);
}
is_export = SSL_C_IS_EXPORT(s->session->cipher);
if (!ssl_cipher_get_evp(s->session, &c, &md, NULL, NULL, NULL)) {
ssl2_return_error(s, SSL2_PE_NO_CIPHER);
SSLerr(SSL_F_GET_CLIENT_MASTER_KEY,
SSL_R_PROBLEMS_MAPPING_CIPHER_FUNCTIONS);
return (0);
}
/*
* The format of the CLIENT-MASTER-KEY message is
* 1 byte message type
* 3 bytes cipher
* 2-byte clear key length (stored in s->s2->tmp.clear)
* 2-byte encrypted key length (stored in s->s2->tmp.enc)
* 2-byte key args length (IV etc)
* clear key
* encrypted key
* key args
*
* If the cipher is an export cipher, then the encrypted key bytes
* are a fixed portion of the total key (5 or 8 bytes). The size of
* this portion is in |num_encrypted_key_bytes|. If the cipher is not an
* export cipher, then the entire key material is encrypted (i.e., clear
* key length must be zero).
*/
key_length = (unsigned int)EVP_CIPHER_key_length(c);
if (key_length > SSL_MAX_MASTER_KEY_LENGTH) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, ERR_R_INTERNAL_ERROR);
return -1;
}
if (s->session->cipher->algorithm2 & SSL2_CF_8_BYTE_ENC) {
is_export = 1;
num_encrypted_key_bytes = 8;
} else if (is_export) {
num_encrypted_key_bytes = 5;
} else {
num_encrypted_key_bytes = key_length;
}
if (s->s2->tmp.clear + num_encrypted_key_bytes != key_length) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_MASTER_KEY,SSL_R_BAD_LENGTH);
return -1;
}
/*
* The encrypted blob must decrypt to the encrypted portion of the key.
* Decryption can't be expanding, so if we don't have enough encrypted
* bytes to fit the key in the buffer, stop now.
*/
if (s->s2->tmp.enc < num_encrypted_key_bytes) {
ssl2_return_error(s,SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_MASTER_KEY,SSL_R_LENGTH_TOO_SHORT);
return -1;
}
/*
* We must not leak whether a decryption failure occurs because of
* Bleichenbacher's attack on PKCS #1 v1.5 RSA padding (see RFC 2246,
* section 7.4.7.1). The code follows that advice of the TLS RFC and
* generates a random premaster secret for the case that the decrypt
* fails. See https://tools.ietf.org/html/rfc5246#section-7.4.7.1
*/
/*
* should be RAND_bytes, but we cannot work around a failure.
*/
if (RAND_pseudo_bytes(rand_premaster_secret,
(int)num_encrypted_key_bytes) <= 0)
return 0;
i = ssl_rsa_private_decrypt(s->cert, s->s2->tmp.enc,
&(p[s->s2->tmp.clear]),
&(p[s->s2->tmp.clear]),
(s->s2->ssl2_rollback) ? RSA_SSLV23_PADDING :
RSA_PKCS1_PADDING);
ERR_clear_error();
/*
* If a bad decrypt, continue with protocol but with a random master
* secret (Bleichenbacher attack)
*/
decrypt_good = constant_time_eq_int_8(i, (int)num_encrypted_key_bytes);
for (j = 0; j < num_encrypted_key_bytes; j++) {
p[s->s2->tmp.clear + j] =
constant_time_select_8(decrypt_good, p[s->s2->tmp.clear + j],
rand_premaster_secret[j]);
}
s->session->master_key_length = (int)key_length;
memcpy(s->session->master_key, p, key_length);
OPENSSL_cleanse(p, key_length);
return 1;
}
|
CWE-310
| 178,494 | 438 |
233728387004968835396975914836958756635
| null | null | null |
openssl
|
6939eab03a6e23d2bd2c3f5e34fe1d48e542e787
| 1 |
static int rsa_builtin_keygen(RSA *rsa, int bits, BIGNUM *e_value,
BN_GENCB *cb)
{
BIGNUM *r0 = NULL, *r1 = NULL, *r2 = NULL, *r3 = NULL, *tmp;
int bitsp, bitsq, ok = -1, n = 0;
BN_CTX *ctx = NULL;
unsigned long error = 0;
/*
* When generating ridiculously small keys, we can get stuck
* continually regenerating the same prime values.
*/
if (bits < 16) {
ok = 0; /* we set our own err */
RSAerr(RSA_F_RSA_BUILTIN_KEYGEN, RSA_R_KEY_SIZE_TOO_SMALL);
goto err;
}
ctx = BN_CTX_new();
if (ctx == NULL)
goto err;
BN_CTX_start(ctx);
r0 = BN_CTX_get(ctx);
r1 = BN_CTX_get(ctx);
r2 = BN_CTX_get(ctx);
r3 = BN_CTX_get(ctx);
if (r3 == NULL)
goto err;
bitsp = (bits + 1) / 2;
bitsq = bits - bitsp;
/* We need the RSA components non-NULL */
if (!rsa->n && ((rsa->n = BN_new()) == NULL))
goto err;
if (!rsa->d && ((rsa->d = BN_secure_new()) == NULL))
goto err;
if (!rsa->e && ((rsa->e = BN_new()) == NULL))
goto err;
if (!rsa->p && ((rsa->p = BN_secure_new()) == NULL))
goto err;
if (!rsa->q && ((rsa->q = BN_secure_new()) == NULL))
goto err;
if (!rsa->dmp1 && ((rsa->dmp1 = BN_secure_new()) == NULL))
goto err;
if (!rsa->dmq1 && ((rsa->dmq1 = BN_secure_new()) == NULL))
goto err;
if (!rsa->iqmp && ((rsa->iqmp = BN_secure_new()) == NULL))
goto err;
if (BN_copy(rsa->e, e_value) == NULL)
goto err;
BN_set_flags(r2, BN_FLG_CONSTTIME);
/* generate p and q */
for (;;) {
if (!BN_sub(r2, rsa->p, BN_value_one()))
goto err;
ERR_set_mark();
if (BN_mod_inverse(r1, r2, rsa->e, ctx) != NULL) {
/* GCD == 1 since inverse exists */
break;
}
error = ERR_peek_last_error();
if (ERR_GET_LIB(error) == ERR_LIB_BN
&& ERR_GET_REASON(error) == BN_R_NO_INVERSE) {
/* GCD != 1 */
ERR_pop_to_mark();
} else {
goto err;
}
if (!BN_GENCB_call(cb, 2, n++))
goto err;
}
if (!BN_GENCB_call(cb, 3, 0))
goto err;
for (;;) {
do {
if (!BN_generate_prime_ex(rsa->q, bitsq, 0, NULL, NULL, cb))
goto err;
} while (BN_cmp(rsa->p, rsa->q) == 0);
if (!BN_sub(r2, rsa->q, BN_value_one()))
goto err;
ERR_set_mark();
if (BN_mod_inverse(r1, r2, rsa->e, ctx) != NULL) {
/* GCD == 1 since inverse exists */
break;
}
error = ERR_peek_last_error();
if (ERR_GET_LIB(error) == ERR_LIB_BN
&& ERR_GET_REASON(error) == BN_R_NO_INVERSE) {
/* GCD != 1 */
ERR_pop_to_mark();
} else {
goto err;
}
if (!BN_GENCB_call(cb, 2, n++))
goto err;
}
if (!BN_GENCB_call(cb, 3, 1))
goto err;
if (BN_cmp(rsa->p, rsa->q) < 0) {
tmp = rsa->p;
rsa->p = rsa->q;
rsa->q = tmp;
}
/* calculate n */
if (!BN_mul(rsa->n, rsa->p, rsa->q, ctx))
goto err;
/* calculate d */
if (!BN_sub(r1, rsa->p, BN_value_one()))
goto err; /* p-1 */
if (!BN_sub(r2, rsa->q, BN_value_one()))
goto err; /* q-1 */
if (!BN_mul(r0, r1, r2, ctx))
goto err; /* (p-1)(q-1) */
{
BIGNUM *pr0 = BN_new();
if (pr0 == NULL)
goto err;
BN_with_flags(pr0, r0, BN_FLG_CONSTTIME);
if (!BN_mod_inverse(rsa->d, rsa->e, pr0, ctx)) {
BN_free(pr0);
goto err; /* d */
}
/* We MUST free pr0 before any further use of r0 */
BN_free(pr0);
}
{
BIGNUM *d = BN_new();
if (d == NULL)
goto err;
BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
if ( /* calculate d mod (p-1) */
!BN_mod(rsa->dmp1, d, r1, ctx)
/* calculate d mod (q-1) */
|| !BN_mod(rsa->dmq1, d, r2, ctx)) {
BN_free(d);
goto err;
}
/* We MUST free d before any further use of rsa->d */
BN_free(d);
}
{
BIGNUM *p = BN_new();
if (p == NULL)
goto err;
BN_with_flags(p, rsa->p, BN_FLG_CONSTTIME);
/* calculate inverse of q mod p */
if (!BN_mod_inverse(rsa->iqmp, rsa->q, p, ctx)) {
BN_free(p);
goto err;
}
/* We MUST free p before any further use of rsa->p */
BN_free(p);
}
ok = 1;
err:
if (ok == -1) {
RSAerr(RSA_F_RSA_BUILTIN_KEYGEN, ERR_LIB_BN);
ok = 0;
}
if (ctx != NULL)
BN_CTX_end(ctx);
BN_CTX_free(ctx);
return ok;
}
|
CWE-327
| 178,499 | 442 |
225489912167581908057504415287947179952
| null | null | null |
openssl
|
349a41da1ad88ad87825414752a8ff5fdd6a6c3f
| 1 |
static int rsa_builtin_keygen(RSA *rsa, int bits, BIGNUM *e_value,
BN_GENCB *cb)
{
BIGNUM *r0 = NULL, *r1 = NULL, *r2 = NULL, *r3 = NULL, *tmp;
BIGNUM local_r0, local_d, local_p;
BIGNUM *pr0, *d, *p;
int bitsp, bitsq, ok = -1, n = 0;
BN_CTX *ctx = NULL;
unsigned long error = 0;
/*
* When generating ridiculously small keys, we can get stuck
* continually regenerating the same prime values.
*/
if (bits < 16) {
ok = 0; /* we set our own err */
RSAerr(RSA_F_RSA_BUILTIN_KEYGEN, RSA_R_KEY_SIZE_TOO_SMALL);
goto err;
}
ctx = BN_CTX_new();
if (ctx == NULL)
goto err;
BN_CTX_start(ctx);
r0 = BN_CTX_get(ctx);
r1 = BN_CTX_get(ctx);
r2 = BN_CTX_get(ctx);
r3 = BN_CTX_get(ctx);
if (r3 == NULL)
goto err;
bitsp = (bits + 1) / 2;
bitsq = bits - bitsp;
/* We need the RSA components non-NULL */
if (!rsa->n && ((rsa->n = BN_new()) == NULL))
goto err;
if (!rsa->d && ((rsa->d = BN_new()) == NULL))
goto err;
if (!rsa->e && ((rsa->e = BN_new()) == NULL))
goto err;
if (!rsa->p && ((rsa->p = BN_new()) == NULL))
goto err;
if (!rsa->q && ((rsa->q = BN_new()) == NULL))
goto err;
if (!rsa->dmp1 && ((rsa->dmp1 = BN_new()) == NULL))
goto err;
if (!rsa->dmq1 && ((rsa->dmq1 = BN_new()) == NULL))
goto err;
if (!rsa->iqmp && ((rsa->iqmp = BN_new()) == NULL))
goto err;
if (BN_copy(rsa->e, e_value) == NULL)
goto err;
BN_set_flags(r2, BN_FLG_CONSTTIME);
/* generate p and q */
for (;;) {
if (!BN_sub(r2, rsa->p, BN_value_one()))
goto err;
ERR_set_mark();
if (BN_mod_inverse(r1, r2, rsa->e, ctx) != NULL) {
/* GCD == 1 since inverse exists */
break;
}
error = ERR_peek_last_error();
if (ERR_GET_LIB(error) == ERR_LIB_BN
&& ERR_GET_REASON(error) == BN_R_NO_INVERSE) {
/* GCD != 1 */
ERR_pop_to_mark();
} else {
goto err;
}
if (!BN_GENCB_call(cb, 2, n++))
goto err;
}
if (!BN_GENCB_call(cb, 3, 0))
goto err;
for (;;) {
do {
if (!BN_generate_prime_ex(rsa->q, bitsq, 0, NULL, NULL, cb))
goto err;
} while (BN_cmp(rsa->p, rsa->q) == 0);
if (!BN_sub(r2, rsa->q, BN_value_one()))
goto err;
ERR_set_mark();
if (BN_mod_inverse(r1, r2, rsa->e, ctx) != NULL) {
/* GCD == 1 since inverse exists */
break;
}
error = ERR_peek_last_error();
if (ERR_GET_LIB(error) == ERR_LIB_BN
&& ERR_GET_REASON(error) == BN_R_NO_INVERSE) {
/* GCD != 1 */
ERR_pop_to_mark();
} else {
goto err;
}
if (!BN_GENCB_call(cb, 2, n++))
goto err;
}
if (!BN_GENCB_call(cb, 3, 1))
goto err;
if (BN_cmp(rsa->p, rsa->q) < 0) {
tmp = rsa->p;
rsa->p = rsa->q;
rsa->q = tmp;
}
/* calculate n */
if (!BN_mul(rsa->n, rsa->p, rsa->q, ctx))
goto err;
/* calculate d */
if (!BN_sub(r1, rsa->p, BN_value_one()))
goto err; /* p-1 */
if (!BN_sub(r2, rsa->q, BN_value_one()))
goto err; /* q-1 */
if (!BN_mul(r0, r1, r2, ctx))
goto err; /* (p-1)(q-1) */
if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
pr0 = &local_r0;
BN_with_flags(pr0, r0, BN_FLG_CONSTTIME);
} else
pr0 = r0;
if (!BN_mod_inverse(rsa->d, rsa->e, pr0, ctx))
goto err; /* d */
/* set up d for correct BN_FLG_CONSTTIME flag */
if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
d = &local_d;
BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
} else
d = rsa->d;
/* calculate d mod (p-1) */
if (!BN_mod(rsa->dmp1, d, r1, ctx))
goto err;
/* calculate d mod (q-1) */
if (!BN_mod(rsa->dmq1, d, r2, ctx))
goto err;
/* calculate inverse of q mod p */
if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
p = &local_p;
BN_with_flags(p, rsa->p, BN_FLG_CONSTTIME);
} else
p = rsa->p;
if (!BN_mod_inverse(rsa->iqmp, rsa->q, p, ctx))
goto err;
ok = 1;
err:
if (ok == -1) {
RSAerr(RSA_F_RSA_BUILTIN_KEYGEN, ERR_LIB_BN);
ok = 0;
}
if (ctx != NULL) {
BN_CTX_end(ctx);
BN_CTX_free(ctx);
}
return ok;
}
|
CWE-327
| 178,500 | 443 |
154866957840689228453067859214166162686
| null | null | null |
openssl
|
56fb454d281a023b3f950d969693553d3f3ceea1
| 1 |
static int ec_mul_consttime(const EC_GROUP *group, EC_POINT *r,
const BIGNUM *scalar, const EC_POINT *point,
BN_CTX *ctx)
{
int i, cardinality_bits, group_top, kbit, pbit, Z_is_one;
EC_POINT *s = NULL;
BIGNUM *k = NULL;
BIGNUM *lambda = NULL;
BIGNUM *cardinality = NULL;
BN_CTX *new_ctx = NULL;
int ret = 0;
if (ctx == NULL && (ctx = new_ctx = BN_CTX_secure_new()) == NULL)
return 0;
BN_CTX_start(ctx);
s = EC_POINT_new(group);
if (s == NULL)
goto err;
if (point == NULL) {
if (!EC_POINT_copy(s, group->generator))
goto err;
} else {
if (!EC_POINT_copy(s, point))
goto err;
}
EC_POINT_BN_set_flags(s, BN_FLG_CONSTTIME);
cardinality = BN_CTX_get(ctx);
lambda = BN_CTX_get(ctx);
k = BN_CTX_get(ctx);
if (k == NULL || !BN_mul(cardinality, group->order, group->cofactor, ctx))
goto err;
/*
* Group cardinalities are often on a word boundary.
* So when we pad the scalar, some timing diff might
* pop if it needs to be expanded due to carries.
* So expand ahead of time.
*/
cardinality_bits = BN_num_bits(cardinality);
group_top = bn_get_top(cardinality);
if ((bn_wexpand(k, group_top + 1) == NULL)
|| (bn_wexpand(lambda, group_top + 1) == NULL))
goto err;
if (!BN_copy(k, scalar))
goto err;
BN_set_flags(k, BN_FLG_CONSTTIME);
if ((BN_num_bits(k) > cardinality_bits) || (BN_is_negative(k))) {
/*-
* this is an unusual input, and we don't guarantee
* constant-timeness
*/
if (!BN_nnmod(k, k, cardinality, ctx))
goto err;
}
if (!BN_add(lambda, k, cardinality))
goto err;
BN_set_flags(lambda, BN_FLG_CONSTTIME);
if (!BN_add(k, lambda, cardinality))
goto err;
/*
* lambda := scalar + cardinality
* k := scalar + 2*cardinality
*/
kbit = BN_is_bit_set(lambda, cardinality_bits);
BN_consttime_swap(kbit, k, lambda, group_top + 1);
group_top = bn_get_top(group->field);
if ((bn_wexpand(s->X, group_top) == NULL)
|| (bn_wexpand(s->Y, group_top) == NULL)
|| (bn_wexpand(s->Z, group_top) == NULL)
|| (bn_wexpand(r->X, group_top) == NULL)
|| (bn_wexpand(r->Y, group_top) == NULL)
|| (bn_wexpand(r->Z, group_top) == NULL))
goto err;
/*-
* Apply coordinate blinding for EC_POINT.
*
* The underlying EC_METHOD can optionally implement this function:
* ec_point_blind_coordinates() returns 0 in case of errors or 1 on
* success or if coordinate blinding is not implemented for this
* group.
*/
if (!ec_point_blind_coordinates(group, s, ctx))
goto err;
/* top bit is a 1, in a fixed pos */
if (!EC_POINT_copy(r, s))
goto err;
EC_POINT_BN_set_flags(r, BN_FLG_CONSTTIME);
if (!EC_POINT_dbl(group, s, s, ctx))
goto err;
pbit = 0;
#define EC_POINT_CSWAP(c, a, b, w, t) do { \
BN_consttime_swap(c, (a)->X, (b)->X, w); \
BN_consttime_swap(c, (a)->Y, (b)->Y, w); \
BN_consttime_swap(c, (a)->Z, (b)->Z, w); \
t = ((a)->Z_is_one ^ (b)->Z_is_one) & (c); \
(a)->Z_is_one ^= (t); \
(b)->Z_is_one ^= (t); \
} while(0)
/*-
* The ladder step, with branches, is
*
* k[i] == 0: S = add(R, S), R = dbl(R)
* k[i] == 1: R = add(S, R), S = dbl(S)
*
* Swapping R, S conditionally on k[i] leaves you with state
*
* k[i] == 0: T, U = R, S
* k[i] == 1: T, U = S, R
*
* Then perform the ECC ops.
*
* U = add(T, U)
* T = dbl(T)
*
* Which leaves you with state
*
* k[i] == 0: U = add(R, S), T = dbl(R)
* k[i] == 1: U = add(S, R), T = dbl(S)
*
* Swapping T, U conditionally on k[i] leaves you with state
*
* k[i] == 0: R, S = T, U
* k[i] == 1: R, S = U, T
*
* Which leaves you with state
*
* k[i] == 0: S = add(R, S), R = dbl(R)
* k[i] == 1: R = add(S, R), S = dbl(S)
*
* So we get the same logic, but instead of a branch it's a
* conditional swap, followed by ECC ops, then another conditional swap.
*
* Optimization: The end of iteration i and start of i-1 looks like
*
* ...
* CSWAP(k[i], R, S)
* ECC
* CSWAP(k[i], R, S)
* (next iteration)
* CSWAP(k[i-1], R, S)
* ECC
* CSWAP(k[i-1], R, S)
* ...
*
* So instead of two contiguous swaps, you can merge the condition
* bits and do a single swap.
*
* k[i] k[i-1] Outcome
* 0 0 No Swap
* 0 1 Swap
* 1 0 Swap
* 1 1 No Swap
*
* This is XOR. pbit tracks the previous bit of k.
*/
for (i = cardinality_bits - 1; i >= 0; i--) {
kbit = BN_is_bit_set(k, i) ^ pbit;
EC_POINT_CSWAP(kbit, r, s, group_top, Z_is_one);
if (!EC_POINT_add(group, s, r, s, ctx))
goto err;
if (!EC_POINT_dbl(group, r, r, ctx))
goto err;
/*
* pbit logic merges this cswap with that of the
* next iteration
*/
pbit ^= kbit;
}
/* one final cswap to move the right value into r */
EC_POINT_CSWAP(pbit, r, s, group_top, Z_is_one);
#undef EC_POINT_CSWAP
ret = 1;
err:
EC_POINT_free(s);
BN_CTX_end(ctx);
BN_CTX_free(new_ctx);
return ret;
}
|
CWE-320
| 178,501 | 444 |
157541399300353632904420128039710031599
| null | null | null |
openssl
|
b1d6d55ece1c26fa2829e2b819b038d7b6d692b4
| 1 |
int ec_scalar_mul_ladder(const EC_GROUP *group, EC_POINT *r,
const BIGNUM *scalar, const EC_POINT *point,
BN_CTX *ctx)
{
int i, cardinality_bits, group_top, kbit, pbit, Z_is_one;
EC_POINT *p = NULL;
EC_POINT *s = NULL;
BIGNUM *k = NULL;
BIGNUM *lambda = NULL;
BIGNUM *cardinality = NULL;
int ret = 0;
/* early exit if the input point is the point at infinity */
if (point != NULL && EC_POINT_is_at_infinity(group, point))
return EC_POINT_set_to_infinity(group, r);
if (BN_is_zero(group->order)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_UNKNOWN_ORDER);
return 0;
}
if (BN_is_zero(group->cofactor)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_UNKNOWN_COFACTOR);
return 0;
}
BN_CTX_start(ctx);
if (((p = EC_POINT_new(group)) == NULL)
|| ((s = EC_POINT_new(group)) == NULL)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_MALLOC_FAILURE);
goto err;
}
if (point == NULL) {
if (!EC_POINT_copy(p, group->generator)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_EC_LIB);
goto err;
}
} else {
if (!EC_POINT_copy(p, point)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_EC_LIB);
goto err;
}
}
EC_POINT_BN_set_flags(p, BN_FLG_CONSTTIME);
EC_POINT_BN_set_flags(r, BN_FLG_CONSTTIME);
EC_POINT_BN_set_flags(s, BN_FLG_CONSTTIME);
cardinality = BN_CTX_get(ctx);
lambda = BN_CTX_get(ctx);
k = BN_CTX_get(ctx);
if (k == NULL) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_MALLOC_FAILURE);
goto err;
}
if (!BN_mul(cardinality, group->order, group->cofactor, ctx)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
goto err;
}
/*
* Group cardinalities are often on a word boundary.
* So when we pad the scalar, some timing diff might
* pop if it needs to be expanded due to carries.
* So expand ahead of time.
*/
cardinality_bits = BN_num_bits(cardinality);
group_top = bn_get_top(cardinality);
if ((bn_wexpand(k, group_top + 1) == NULL)
|| (bn_wexpand(lambda, group_top + 1) == NULL)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
goto err;
}
if (!BN_copy(k, scalar)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
goto err;
}
BN_set_flags(k, BN_FLG_CONSTTIME);
if ((BN_num_bits(k) > cardinality_bits) || (BN_is_negative(k))) {
/*-
* this is an unusual input, and we don't guarantee
* constant-timeness
*/
if (!BN_nnmod(k, k, cardinality, ctx)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
goto err;
}
}
if (!BN_add(lambda, k, cardinality)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
goto err;
}
BN_set_flags(lambda, BN_FLG_CONSTTIME);
if (!BN_add(k, lambda, cardinality)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
goto err;
}
/*
* lambda := scalar + cardinality
* k := scalar + 2*cardinality
*/
kbit = BN_is_bit_set(lambda, cardinality_bits);
BN_consttime_swap(kbit, k, lambda, group_top + 1);
group_top = bn_get_top(group->field);
if ((bn_wexpand(s->X, group_top) == NULL)
|| (bn_wexpand(s->Y, group_top) == NULL)
|| (bn_wexpand(s->Z, group_top) == NULL)
|| (bn_wexpand(r->X, group_top) == NULL)
|| (bn_wexpand(r->Y, group_top) == NULL)
|| (bn_wexpand(r->Z, group_top) == NULL)
|| (bn_wexpand(p->X, group_top) == NULL)
|| (bn_wexpand(p->Y, group_top) == NULL)
|| (bn_wexpand(p->Z, group_top) == NULL)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
goto err;
}
/*-
* Apply coordinate blinding for EC_POINT.
*
* The underlying EC_METHOD can optionally implement this function:
* ec_point_blind_coordinates() returns 0 in case of errors or 1 on
* success or if coordinate blinding is not implemented for this
* group.
*/
if (!ec_point_blind_coordinates(group, p, ctx)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_POINT_COORDINATES_BLIND_FAILURE);
goto err;
}
/* Initialize the Montgomery ladder */
if (!ec_point_ladder_pre(group, r, s, p, ctx)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_LADDER_PRE_FAILURE);
goto err;
}
/* top bit is a 1, in a fixed pos */
pbit = 1;
#define EC_POINT_CSWAP(c, a, b, w, t) do { \
BN_consttime_swap(c, (a)->X, (b)->X, w); \
BN_consttime_swap(c, (a)->Y, (b)->Y, w); \
BN_consttime_swap(c, (a)->Z, (b)->Z, w); \
t = ((a)->Z_is_one ^ (b)->Z_is_one) & (c); \
(a)->Z_is_one ^= (t); \
(b)->Z_is_one ^= (t); \
} while(0)
/*-
* The ladder step, with branches, is
*
* k[i] == 0: S = add(R, S), R = dbl(R)
* k[i] == 1: R = add(S, R), S = dbl(S)
*
* Swapping R, S conditionally on k[i] leaves you with state
*
* k[i] == 0: T, U = R, S
* k[i] == 1: T, U = S, R
*
* Then perform the ECC ops.
*
* U = add(T, U)
* T = dbl(T)
*
* Which leaves you with state
*
* k[i] == 0: U = add(R, S), T = dbl(R)
* k[i] == 1: U = add(S, R), T = dbl(S)
*
* Swapping T, U conditionally on k[i] leaves you with state
*
* k[i] == 0: R, S = T, U
* k[i] == 1: R, S = U, T
*
* Which leaves you with state
*
* k[i] == 0: S = add(R, S), R = dbl(R)
* k[i] == 1: R = add(S, R), S = dbl(S)
*
* So we get the same logic, but instead of a branch it's a
* conditional swap, followed by ECC ops, then another conditional swap.
*
* Optimization: The end of iteration i and start of i-1 looks like
*
* ...
* CSWAP(k[i], R, S)
* ECC
* CSWAP(k[i], R, S)
* (next iteration)
* CSWAP(k[i-1], R, S)
* ECC
* CSWAP(k[i-1], R, S)
* ...
*
* So instead of two contiguous swaps, you can merge the condition
* bits and do a single swap.
*
* k[i] k[i-1] Outcome
* 0 0 No Swap
* 0 1 Swap
* 1 0 Swap
* 1 1 No Swap
*
* This is XOR. pbit tracks the previous bit of k.
*/
for (i = cardinality_bits - 1; i >= 0; i--) {
kbit = BN_is_bit_set(k, i) ^ pbit;
EC_POINT_CSWAP(kbit, r, s, group_top, Z_is_one);
/* Perform a single step of the Montgomery ladder */
if (!ec_point_ladder_step(group, r, s, p, ctx)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_LADDER_STEP_FAILURE);
goto err;
}
/*
* pbit logic merges this cswap with that of the
* next iteration
*/
pbit ^= kbit;
}
/* one final cswap to move the right value into r */
EC_POINT_CSWAP(pbit, r, s, group_top, Z_is_one);
#undef EC_POINT_CSWAP
/* Finalize ladder (and recover full point coordinates) */
if (!ec_point_ladder_post(group, r, s, p, ctx)) {
ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_LADDER_POST_FAILURE);
goto err;
}
ret = 1;
err:
EC_POINT_free(p);
EC_POINT_free(s);
BN_CTX_end(ctx);
return ret;
}
|
CWE-320
| 178,502 | 445 |
320901504703671988701424467508914396681
| null | null | null |
openssl
|
43e6a58d4991a451daf4891ff05a48735df871ac
| 1 |
static int dsa_sign_setup(DSA *dsa, BN_CTX *ctx_in, BIGNUM **kinvp,
BIGNUM **rp)
{
BN_CTX *ctx;
BIGNUM k, kq, *K, *kinv = NULL, *r = NULL;
BIGNUM l, m;
int ret = 0;
int q_bits;
if (!dsa->p || !dsa->q || !dsa->g) {
DSAerr(DSA_F_DSA_SIGN_SETUP, DSA_R_MISSING_PARAMETERS);
return 0;
}
BN_init(&k);
BN_init(&kq);
BN_init(&l);
BN_init(&m);
if (ctx_in == NULL) {
if ((ctx = BN_CTX_new()) == NULL)
goto err;
} else
ctx = ctx_in;
if ((r = BN_new()) == NULL)
goto err;
/* Preallocate space */
q_bits = BN_num_bits(dsa->q);
if (!BN_set_bit(&k, q_bits)
|| !BN_set_bit(&l, q_bits)
|| !BN_set_bit(&m, q_bits))
goto err;
/* Get random k */
do
if (!BN_rand_range(&k, dsa->q))
goto err;
while (BN_is_zero(&k));
if ((dsa->flags & DSA_FLAG_NO_EXP_CONSTTIME) == 0) {
BN_set_flags(&k, BN_FLG_CONSTTIME);
}
if (dsa->flags & DSA_FLAG_CACHE_MONT_P) {
if (!BN_MONT_CTX_set_locked(&dsa->method_mont_p,
CRYPTO_LOCK_DSA, dsa->p, ctx))
goto err;
}
/* Compute r = (g^k mod p) mod q */
if ((dsa->flags & DSA_FLAG_NO_EXP_CONSTTIME) == 0) {
/*
* We do not want timing information to leak the length of k, so we
* compute G^k using an equivalent scalar of fixed bit-length.
*
* We unconditionally perform both of these additions to prevent a
* small timing information leakage. We then choose the sum that is
* one bit longer than the modulus.
*
* TODO: revisit the BN_copy aiming for a memory access agnostic
* conditional copy.
*/
if (!BN_add(&l, &k, dsa->q)
|| !BN_add(&m, &l, dsa->q)
|| !BN_copy(&kq, BN_num_bits(&l) > q_bits ? &l : &m))
goto err;
BN_set_flags(&kq, BN_FLG_CONSTTIME);
K = &kq;
} else {
K = &k;
}
DSA_BN_MOD_EXP(goto err, dsa, r, dsa->g, K, dsa->p, ctx,
dsa->method_mont_p);
if (!BN_mod(r, r, dsa->q, ctx))
goto err;
/* Compute part of 's = inv(k) (m + xr) mod q' */
if ((kinv = BN_mod_inverse(NULL, &k, dsa->q, ctx)) == NULL)
goto err;
if (*kinvp != NULL)
BN_clear_free(*kinvp);
*kinvp = kinv;
kinv = NULL;
if (*rp != NULL)
BN_clear_free(*rp);
*rp = r;
ret = 1;
err:
if (!ret) {
DSAerr(DSA_F_DSA_SIGN_SETUP, ERR_R_BN_LIB);
if (r != NULL)
BN_clear_free(r);
}
if (ctx_in == NULL)
BN_CTX_free(ctx);
BN_clear_free(&k);
BN_clear_free(&kq);
BN_clear_free(&l);
BN_clear_free(&m);
return ret;
}
|
CWE-320
| 178,503 | 446 |
105221804155876490897003861168937089366
| null | null | null |
openssl
|
3984ef0b72831da8b3ece4745cac4f8575b19098
| 1 |
static int generate_key(DH *dh)
{
int ok = 0;
int generate_new_key = 0;
unsigned l;
BN_CTX *ctx;
BN_MONT_CTX *mont = NULL;
BIGNUM *pub_key = NULL, *priv_key = NULL;
ctx = BN_CTX_new();
if (ctx == NULL)
goto err;
generate_new_key = 1;
} else
|
CWE-320
| 178,504 | 447 |
163176954131253694890899988904396430434
| null | null | null |
libxfont
|
4d024ac10f964f6bd372ae0dd14f02772a6e5f63
| 1 |
bdfReadCharacters(FontFilePtr file, FontPtr pFont, bdfFileState *pState,
int bit, int byte, int glyph, int scan)
{
unsigned char *line;
register CharInfoPtr ci;
int i,
ndx,
nchars,
nignored;
unsigned int char_row, char_col;
int numEncodedGlyphs = 0;
CharInfoPtr *bdfEncoding[256];
BitmapFontPtr bitmapFont;
BitmapExtraPtr bitmapExtra;
CARD32 *bitmapsSizes;
unsigned char lineBuf[BDFLINELEN];
int nencoding;
bitmapFont = (BitmapFontPtr) pFont->fontPrivate;
bitmapExtra = (BitmapExtraPtr) bitmapFont->bitmapExtra;
if (bitmapExtra) {
bitmapsSizes = bitmapExtra->bitmapsSizes;
for (i = 0; i < GLYPHPADOPTIONS; i++)
bitmapsSizes[i] = 0;
} else
bitmapsSizes = NULL;
bzero(bdfEncoding, sizeof(bdfEncoding));
bitmapFont->metrics = NULL;
ndx = 0;
line = bdfGetLine(file, lineBuf, BDFLINELEN);
if ((!line) || (sscanf((char *) line, "CHARS %d", &nchars) != 1)) {
bdfError("bad 'CHARS' in bdf file\n");
return (FALSE);
}
if (nchars < 1) {
bdfError("invalid number of CHARS in BDF file\n");
return (FALSE);
}
if (nchars > INT32_MAX / sizeof(CharInfoRec)) {
bdfError("Couldn't allocate pCI (%d*%d)\n", nchars,
(int) sizeof(CharInfoRec));
goto BAILOUT;
}
ci = calloc(nchars, sizeof(CharInfoRec));
if (!ci) {
bdfError("Couldn't allocate pCI (%d*%d)\n", nchars,
(int) sizeof(CharInfoRec));
goto BAILOUT;
}
bitmapFont->metrics = ci;
if (bitmapExtra) {
bitmapExtra->glyphNames = malloc(nchars * sizeof(Atom));
if (!bitmapExtra->glyphNames) {
bdfError("Couldn't allocate glyphNames (%d*%d)\n",
nchars, (int) sizeof(Atom));
goto BAILOUT;
}
}
if (bitmapExtra) {
bitmapExtra->sWidths = malloc(nchars * sizeof(int));
if (!bitmapExtra->sWidths) {
bdfError("Couldn't allocate sWidth (%d *%d)\n",
nchars, (int) sizeof(int));
return FALSE;
}
}
line = bdfGetLine(file, lineBuf, BDFLINELEN);
pFont->info.firstRow = 256;
pFont->info.lastRow = 0;
pFont->info.firstCol = 256;
pFont->info.lastCol = 0;
nignored = 0;
for (ndx = 0; (ndx < nchars) && (line) && (bdfIsPrefix(line, "STARTCHAR"));) {
int t;
int wx; /* x component of width */
int wy; /* y component of width */
int bw; /* bounding-box width */
int bh; /* bounding-box height */
int bl; /* bounding-box left */
int bb; /* bounding-box bottom */
int enc,
enc2; /* encoding */
unsigned char *p; /* temp pointer into line */
char charName[100];
int ignore;
if (sscanf((char *) line, "STARTCHAR %s", charName) != 1) {
bdfError("bad character name in BDF file\n");
goto BAILOUT; /* bottom of function, free and return error */
}
if (bitmapExtra)
bitmapExtra->glyphNames[ndx] = bdfForceMakeAtom(charName, NULL);
line = bdfGetLine(file, lineBuf, BDFLINELEN);
if (!line || (t = sscanf((char *) line, "ENCODING %d %d", &enc, &enc2)) < 1) {
bdfError("bad 'ENCODING' in BDF file\n");
goto BAILOUT;
}
if (enc < -1 || (t == 2 && enc2 < -1)) {
bdfError("bad ENCODING value");
goto BAILOUT;
}
if (t == 2 && enc == -1)
enc = enc2;
ignore = 0;
if (enc == -1) {
if (!bitmapExtra) {
nignored++;
ignore = 1;
}
} else if (enc > MAXENCODING) {
bdfError("char '%s' has encoding too large (%d)\n",
charName, enc);
} else {
char_row = (enc >> 8) & 0xFF;
char_col = enc & 0xFF;
if (char_row < pFont->info.firstRow)
pFont->info.firstRow = char_row;
if (char_row > pFont->info.lastRow)
pFont->info.lastRow = char_row;
if (char_col < pFont->info.firstCol)
pFont->info.firstCol = char_col;
if (char_col > pFont->info.lastCol)
pFont->info.lastCol = char_col;
if (bdfEncoding[char_row] == (CharInfoPtr *) NULL) {
bdfEncoding[char_row] = malloc(256 * sizeof(CharInfoPtr));
if (!bdfEncoding[char_row]) {
bdfError("Couldn't allocate row %d of encoding (%d*%d)\n",
char_row, INDICES, (int) sizeof(CharInfoPtr));
goto BAILOUT;
}
for (i = 0; i < 256; i++)
bdfEncoding[char_row][i] = (CharInfoPtr) NULL;
}
if (bdfEncoding[char_row] != NULL) {
bdfEncoding[char_row][char_col] = ci;
numEncodedGlyphs++;
}
}
line = bdfGetLine(file, lineBuf, BDFLINELEN);
if ((!line) || (sscanf((char *) line, "SWIDTH %d %d", &wx, &wy) != 2)) {
bdfError("bad 'SWIDTH'\n");
goto BAILOUT;
}
if (wy != 0) {
bdfError("SWIDTH y value must be zero\n");
goto BAILOUT;
}
if (bitmapExtra)
bitmapExtra->sWidths[ndx] = wx;
/* 5/31/89 (ef) -- we should be able to ditch the character and recover */
/* from all of these. */
line = bdfGetLine(file, lineBuf, BDFLINELEN);
if ((!line) || (sscanf((char *) line, "DWIDTH %d %d", &wx, &wy) != 2)) {
bdfError("bad 'DWIDTH'\n");
goto BAILOUT;
}
if (wy != 0) {
bdfError("DWIDTH y value must be zero\n");
goto BAILOUT;
}
line = bdfGetLine(file, lineBuf, BDFLINELEN);
if ((!line) || (sscanf((char *) line, "BBX %d %d %d %d", &bw, &bh, &bl, &bb) != 4)) {
bdfError("bad 'BBX'\n");
goto BAILOUT;
}
if ((bh < 0) || (bw < 0)) {
bdfError("character '%s' has a negative sized bitmap, %dx%d\n",
charName, bw, bh);
goto BAILOUT;
}
line = bdfGetLine(file, lineBuf, BDFLINELEN);
if ((line) && (bdfIsPrefix(line, "ATTRIBUTES"))) {
for (p = line + strlen("ATTRIBUTES ");
(*p == ' ') || (*p == '\t');
p++)
/* empty for loop */ ;
ci->metrics.attributes = (bdfHexByte(p) << 8) + bdfHexByte(p + 2);
line = bdfGetLine(file, lineBuf, BDFLINELEN);
} else
ci->metrics.attributes = 0;
if (!line || !bdfIsPrefix(line, "BITMAP")) {
bdfError("missing 'BITMAP'\n");
goto BAILOUT;
}
/* collect data for generated properties */
if ((strlen(charName) == 1)) {
if ((charName[0] >= '0') && (charName[0] <= '9')) {
pState->digitWidths += wx;
pState->digitCount++;
} else if (charName[0] == 'x') {
pState->exHeight = (bh + bb) <= 0 ? bh : bh + bb;
}
}
if (!ignore) {
ci->metrics.leftSideBearing = bl;
ci->metrics.rightSideBearing = bl + bw;
ci->metrics.ascent = bh + bb;
ci->metrics.descent = -bb;
ci->metrics.characterWidth = wx;
ci->bits = NULL;
bdfReadBitmap(ci, file, bit, byte, glyph, scan, bitmapsSizes);
ci++;
ndx++;
} else
bdfSkipBitmap(file, bh);
line = bdfGetLine(file, lineBuf, BDFLINELEN); /* get STARTCHAR or
* ENDFONT */
}
if (ndx + nignored != nchars) {
bdfError("%d too few characters\n", nchars - (ndx + nignored));
goto BAILOUT;
}
nchars = ndx;
bitmapFont->num_chars = nchars;
if ((line) && (bdfIsPrefix(line, "STARTCHAR"))) {
bdfError("more characters than specified\n");
goto BAILOUT;
}
if ((!line) || (!bdfIsPrefix(line, "ENDFONT"))) {
bdfError("missing 'ENDFONT'\n");
goto BAILOUT;
}
if (numEncodedGlyphs == 0)
bdfWarning("No characters with valid encodings\n");
nencoding = (pFont->info.lastRow - pFont->info.firstRow + 1) *
(pFont->info.lastCol - pFont->info.firstCol + 1);
bitmapFont->encoding = calloc(NUM_SEGMENTS(nencoding),sizeof(CharInfoPtr*));
if (!bitmapFont->encoding) {
bdfError("Couldn't allocate ppCI (%d,%d)\n",
NUM_SEGMENTS(nencoding),
(int) sizeof(CharInfoPtr*));
goto BAILOUT;
}
pFont->info.allExist = TRUE;
i = 0;
for (char_row = pFont->info.firstRow;
char_row <= pFont->info.lastRow;
char_row++) {
if (bdfEncoding[char_row] == (CharInfoPtr *) NULL) {
pFont->info.allExist = FALSE;
i += pFont->info.lastCol - pFont->info.firstCol + 1;
} else {
for (char_col = pFont->info.firstCol;
char_col <= pFont->info.lastCol;
char_col++) {
if (!bdfEncoding[char_row][char_col])
pFont->info.allExist = FALSE;
else {
if (!bitmapFont->encoding[SEGMENT_MAJOR(i)]) {
bitmapFont->encoding[SEGMENT_MAJOR(i)]=
calloc(BITMAP_FONT_SEGMENT_SIZE,
sizeof(CharInfoPtr));
if (!bitmapFont->encoding[SEGMENT_MAJOR(i)])
goto BAILOUT;
}
ACCESSENCODINGL(bitmapFont->encoding,i) =
bdfEncoding[char_row][char_col];
}
i++;
}
}
}
for (i = 0; i < 256; i++)
if (bdfEncoding[i])
free(bdfEncoding[i]);
return (TRUE);
BAILOUT:
for (i = 0; i < 256; i++)
if (bdfEncoding[i])
free(bdfEncoding[i]);
/* bdfFreeFontBits will clean up the rest */
return (FALSE);
}
|
CWE-119
| 178,505 | 448 |
130454261969297534776999866382375298733
| null | null | null |
enlightment
|
1f9b0b32728803a1578e658cd0955df773e34f49
| 1 |
load(ImlibImage * im, ImlibProgressFunction progress, char progress_granularity,
char immediate_load)
{
static const int intoffset[] = { 0, 4, 2, 1 };
static const int intjump[] = { 8, 8, 4, 2 };
int rc;
DATA32 *ptr;
GifFileType *gif;
GifRowType *rows;
GifRecordType rec;
ColorMapObject *cmap;
int i, j, done, bg, r, g, b, w = 0, h = 0;
float per = 0.0, per_inc;
int last_per = 0, last_y = 0;
int transp;
int fd;
done = 0;
rows = NULL;
transp = -1;
/* if immediate_load is 1, then dont delay image laoding as below, or */
/* already data in this image - dont load it again */
if (im->data)
return 0;
fd = open(im->real_file, O_RDONLY);
if (fd < 0)
return 0;
#if GIFLIB_MAJOR >= 5
gif = DGifOpenFileHandle(fd, NULL);
#else
gif = DGifOpenFileHandle(fd);
#endif
if (!gif)
{
close(fd);
return 0;
}
rc = 0; /* Failure */
do
{
if (DGifGetRecordType(gif, &rec) == GIF_ERROR)
{
/* PrintGifError(); */
rec = TERMINATE_RECORD_TYPE;
}
if ((rec == IMAGE_DESC_RECORD_TYPE) && (!done))
{
if (DGifGetImageDesc(gif) == GIF_ERROR)
{
/* PrintGifError(); */
rec = TERMINATE_RECORD_TYPE;
break;
}
w = gif->Image.Width;
h = gif->Image.Height;
if (!IMAGE_DIMENSIONS_OK(w, h))
goto quit2;
rows = calloc(h, sizeof(GifRowType *));
if (!rows)
goto quit2;
for (i = 0; i < h; i++)
{
rows[i] = malloc(w * sizeof(GifPixelType));
if (!rows[i])
goto quit;
}
if (gif->Image.Interlace)
{
for (i = 0; i < 4; i++)
{
for (j = intoffset[i]; j < h; j += intjump[i])
{
if (DGifGetLine(gif, rows[i], w) == GIF_ERROR)
{
break;
}
}
}
}
else
{
for (i = 0; i < h; i++)
{
if (DGifGetLine(gif, rows[i], w) == GIF_ERROR)
{
break;
}
}
}
done = 1;
}
else if (rec == EXTENSION_RECORD_TYPE)
{
int ext_code;
GifByteType *ext;
ext = NULL;
DGifGetExtension(gif, &ext_code, &ext);
while (ext)
{
if ((ext_code == 0xf9) && (ext[1] & 1) && (transp < 0))
{
transp = (int)ext[4];
}
ext = NULL;
DGifGetExtensionNext(gif, &ext);
}
}
}
while (rec != TERMINATE_RECORD_TYPE);
if (transp >= 0)
{
SET_FLAG(im->flags, F_HAS_ALPHA);
}
else
{
UNSET_FLAG(im->flags, F_HAS_ALPHA);
}
/* set the format string member to the lower-case full extension */
/* name for the format - so example names would be: */
im->format = strdup("gif");
if (im->loader || immediate_load || progress)
{
bg = gif->SBackGroundColor;
cmap = (gif->Image.ColorMap ? gif->Image.ColorMap : gif->SColorMap);
im->data = (DATA32 *) malloc(sizeof(DATA32) * w * h);
if (!im->data)
goto quit;
if (!cmap)
{
/* No colormap? Now what?? Let's clear the image (and not segv) */
memset(im->data, 0, sizeof(DATA32) * w * h);
rc = 1;
goto finish;
}
ptr = im->data;
per_inc = 100.0 / (((float)w) * h);
for (i = 0; i < h; i++)
{
for (j = 0; j < w; j++)
{
if (rows[i][j] == transp)
{
r = cmap->Colors[bg].Red;
g = cmap->Colors[bg].Green;
b = cmap->Colors[bg].Blue;
*ptr++ = 0x00ffffff & ((r << 16) | (g << 8) | b);
}
else
{
r = cmap->Colors[rows[i][j]].Red;
g = cmap->Colors[rows[i][j]].Green;
b = cmap->Colors[rows[i][j]].Blue;
*ptr++ = (0xff << 24) | (r << 16) | (g << 8) | b;
}
per += per_inc;
if (progress && (((int)per) != last_per)
&& (((int)per) % progress_granularity == 0))
{
last_per = (int)per;
if (!(progress(im, (int)per, 0, last_y, w, i)))
{
rc = 2;
goto quit;
}
last_y = i;
}
}
}
finish:
if (progress)
progress(im, 100, 0, last_y, w, h);
}
rc = 1; /* Success */
quit:
for (i = 0; i < h; i++)
free(rows[i]);
free(rows);
quit2:
#if GIFLIB_MAJOR > 5 || (GIFLIB_MAJOR == 5 && GIFLIB_MINOR >= 1)
DGifCloseFile(gif, NULL);
#else
DGifCloseFile(gif);
#endif
return rc;
}
|
CWE-20
| 178,510 | 450 |
27541275052542240986111419310120810569
| null | null | null |
enlightment
|
c21beaf1780cf3ca291735ae7d58a3dde63277a2
| 1 |
load(ImlibImage * im, ImlibProgressFunction progress,
char progress_granularity, char immediate_load)
{
int rc;
char p = ' ', numbers = 3, count = 0;
int w = 0, h = 0, v = 255, c = 0;
char buf[256];
FILE *f = NULL;
if (im->data)
return 0;
f = fopen(im->real_file, "rb");
if (!f)
return 0;
/* can't use fgets(), because there might be
* binary data after the header and there
* needn't be a newline before the data, so
* no chance to distinguish between end of buffer
* and a binary 0.
*/
/* read the header info */
rc = 0; /* Error */
c = fgetc(f);
if (c != 'P')
goto quit;
p = fgetc(f);
if (p == '1' || p == '4')
numbers = 2; /* bitimages don't have max value */
if ((p < '1') || (p > '8'))
goto quit;
count = 0;
while (count < numbers)
{
c = fgetc(f);
if (c == EOF)
goto quit;
/* eat whitespace */
while (isspace(c))
c = fgetc(f);
/* if comment, eat that */
if (c == '#')
{
do
c = fgetc(f);
while (c != '\n' && c != EOF);
}
/* no comment -> proceed */
else
{
int i = 0;
/* read numbers */
while (c != EOF && !isspace(c) && (i < 255))
{
buf[i++] = c;
c = fgetc(f);
}
if (i)
{
buf[i] = 0;
count++;
switch (count)
{
/* width */
case 1:
w = atoi(buf);
break;
/* height */
case 2:
h = atoi(buf);
break;
/* max value, only for color and greyscale */
case 3:
v = atoi(buf);
break;
}
}
}
}
if ((v < 0) || (v > 255))
goto quit;
im->w = w;
im->h = h;
if (!IMAGE_DIMENSIONS_OK(w, h))
goto quit;
if (!im->format)
{
if (p == '8')
SET_FLAG(im->flags, F_HAS_ALPHA);
else
UNSET_FLAG(im->flags, F_HAS_ALPHA);
im->format = strdup("pnm");
}
rc = 1; /* Ok */
if (((!im->data) && (im->loader)) || (immediate_load) || (progress))
{
DATA8 *data = NULL; /* for the binary versions */
DATA8 *ptr = NULL;
int *idata = NULL; /* for the ASCII versions */
int *iptr;
char buf2[256];
DATA32 *ptr2;
int i, j, x, y, pl = 0;
char pper = 0;
/* must set the im->data member before callign progress function */
ptr2 = im->data = malloc(w * h * sizeof(DATA32));
if (!im->data)
goto quit_error;
/* start reading the data */
switch (p)
{
case '1': /* ASCII monochrome */
buf[0] = 0;
i = 0;
for (y = 0; y < h; y++)
{
x = 0;
while (x < w)
{
if (!buf[i]) /* fill buffer */
{
if (!fgets(buf, 255, f))
goto quit_error;
i = 0;
}
while (buf[i] && isspace(buf[i]))
i++;
if (buf[i])
{
if (buf[i] == '1')
*ptr2 = 0xff000000;
else if (buf[i] == '0')
*ptr2 = 0xffffffff;
else
goto quit_error;
ptr2++;
i++;
}
}
if (progress &&
do_progress(im, progress, progress_granularity,
&pper, &pl, y))
goto quit_progress;
}
break;
case '2': /* ASCII greyscale */
idata = malloc(sizeof(int) * w);
if (!idata)
goto quit_error;
buf[0] = 0;
i = 0;
j = 0;
for (y = 0; y < h; y++)
{
iptr = idata;
x = 0;
while (x < w)
{
int k;
/* check 4 chars ahead to see if we need to
* fill the buffer */
for (k = 0; k < 4; k++)
{
if (!buf[i + k]) /* fill buffer */
{
if (fseek(f, -k, SEEK_CUR) == -1 ||
!fgets(buf, 255, f))
goto quit_error;
i = 0;
break;
}
}
while (buf[i] && isspace(buf[i]))
i++;
while (buf[i] && !isspace(buf[i]))
buf2[j++] = buf[i++];
if (j)
{
buf2[j] = 0;
*(iptr++) = atoi(buf2);
j = 0;
x++;
}
}
iptr = idata;
if (v == 255)
{
for (x = 0; x < w; x++)
{
*ptr2 =
0xff000000 | (iptr[0] << 16) | (iptr[0] << 8)
| iptr[0];
ptr2++;
iptr++;
}
}
else
{
for (x = 0; x < w; x++)
{
*ptr2 =
0xff000000 |
(((iptr[0] * 255) / v) << 16) |
(((iptr[0] * 255) / v) << 8) |
((iptr[0] * 255) / v);
ptr2++;
iptr++;
}
}
if (progress &&
do_progress(im, progress, progress_granularity,
&pper, &pl, y))
goto quit_progress;
}
break;
case '3': /* ASCII RGB */
idata = malloc(3 * sizeof(int) * w);
if (!idata)
goto quit_error;
buf[0] = 0;
i = 0;
j = 0;
for (y = 0; y < h; y++)
{
int w3 = 3 * w;
iptr = idata;
x = 0;
while (x < w3)
{
int k;
/* check 4 chars ahead to see if we need to
* fill the buffer */
for (k = 0; k < 4; k++)
{
if (!buf[i + k]) /* fill buffer */
{
if (fseek(f, -k, SEEK_CUR) == -1 ||
!fgets(buf, 255, f))
goto quit_error;
i = 0;
break;
}
}
while (buf[i] && isspace(buf[i]))
i++;
while (buf[i] && !isspace(buf[i]))
buf2[j++] = buf[i++];
if (j)
{
buf2[j] = 0;
*(iptr++) = atoi(buf2);
j = 0;
x++;
}
}
iptr = idata;
if (v == 255)
{
for (x = 0; x < w; x++)
{
*ptr2 =
0xff000000 | (iptr[0] << 16) | (iptr[1] << 8)
| iptr[2];
ptr2++;
iptr += 3;
}
}
else
{
for (x = 0; x < w; x++)
{
*ptr2 =
0xff000000 |
(((iptr[0] * 255) / v) << 16) |
(((iptr[1] * 255) / v) << 8) |
((iptr[2] * 255) / v);
ptr2++;
iptr += 3;
}
}
if (progress &&
do_progress(im, progress, progress_granularity,
&pper, &pl, y))
goto quit_progress;
}
break;
case '4': /* binary 1bit monochrome */
data = malloc((w + 7) / 8 * sizeof(DATA8));
if (!data)
goto quit_error;
ptr2 = im->data;
for (y = 0; y < h; y++)
{
if (!fread(data, (w + 7) / 8, 1, f))
goto quit_error;
ptr = data;
for (x = 0; x < w; x += 8)
{
j = (w - x >= 8) ? 8 : w - x;
for (i = 0; i < j; i++)
{
if (ptr[0] & (0x80 >> i))
*ptr2 = 0xff000000;
else
*ptr2 = 0xffffffff;
ptr2++;
}
ptr++;
}
if (progress &&
do_progress(im, progress, progress_granularity,
&pper, &pl, y))
goto quit_progress;
}
break;
case '5': /* binary 8bit grayscale GGGGGGGG */
data = malloc(1 * sizeof(DATA8) * w);
if (!data)
goto quit_error;
ptr2 = im->data;
for (y = 0; y < h; y++)
{
if (!fread(data, w * 1, 1, f))
break;
ptr = data;
if (v == 255)
{
for (x = 0; x < w; x++)
{
*ptr2 =
0xff000000 | (ptr[0] << 16) | (ptr[0] << 8) |
ptr[0];
ptr2++;
ptr++;
}
}
else
{
for (x = 0; x < w; x++)
{
*ptr2 =
0xff000000 |
(((ptr[0] * 255) / v) << 16) |
(((ptr[0] * 255) / v) << 8) |
((ptr[0] * 255) / v);
ptr2++;
ptr++;
}
}
if (progress &&
do_progress(im, progress, progress_granularity,
&pper, &pl, y))
goto quit_progress;
}
break;
case '6': /* 24bit binary RGBRGBRGB */
data = malloc(3 * sizeof(DATA8) * w);
if (!data)
goto quit_error;
ptr2 = im->data;
for (y = 0; y < h; y++)
{
if (!fread(data, w * 3, 1, f))
break;
ptr = data;
if (v == 255)
{
for (x = 0; x < w; x++)
{
*ptr2 =
0xff000000 | (ptr[0] << 16) | (ptr[1] << 8) |
ptr[2];
ptr2++;
ptr += 3;
}
}
else
{
for (x = 0; x < w; x++)
{
*ptr2 =
0xff000000 |
(((ptr[0] * 255) / v) << 16) |
(((ptr[1] * 255) / v) << 8) |
((ptr[2] * 255) / v);
ptr2++;
ptr += 3;
}
}
if (progress &&
do_progress(im, progress, progress_granularity,
&pper, &pl, y))
goto quit_progress;
}
break;
case '7': /* XV's 8bit 332 format */
data = malloc(1 * sizeof(DATA8) * w);
if (!data)
goto quit_error;
ptr2 = im->data;
for (y = 0; y < h; y++)
{
if (!fread(data, w * 1, 1, f))
break;
ptr = data;
for (x = 0; x < w; x++)
{
int r, g, b;
r = (*ptr >> 5) & 0x7;
g = (*ptr >> 2) & 0x7;
b = (*ptr) & 0x3;
*ptr2 =
0xff000000 |
(((r << 21) | (r << 18) | (r << 15)) & 0xff0000) |
(((g << 13) | (g << 10) | (g << 7)) & 0xff00) |
((b << 6) | (b << 4) | (b << 2) | (b << 0));
ptr2++;
ptr++;
}
if (progress &&
do_progress(im, progress, progress_granularity,
&pper, &pl, y))
goto quit_progress;
}
break;
case '8': /* 24bit binary RGBARGBARGBA */
data = malloc(4 * sizeof(DATA8) * w);
if (!data)
goto quit_error;
ptr2 = im->data;
for (y = 0; y < h; y++)
{
if (!fread(data, w * 4, 1, f))
break;
ptr = data;
if (v == 255)
{
for (x = 0; x < w; x++)
{
*ptr2 =
(ptr[3] << 24) | (ptr[0] << 16) |
(ptr[1] << 8) | ptr[2];
ptr2++;
ptr += 4;
}
}
else
{
for (x = 0; x < w; x++)
{
*ptr2 =
(((ptr[3] * 255) / v) << 24) |
(((ptr[0] * 255) / v) << 16) |
(((ptr[1] * 255) / v) << 8) |
((ptr[2] * 255) / v);
ptr2++;
ptr += 4;
}
}
if (progress &&
do_progress(im, progress, progress_granularity,
&pper, &pl, y))
goto quit_progress;
}
break;
default:
quit_error:
rc = 0;
break;
quit_progress:
rc = 2;
break;
}
if (idata)
free(idata);
if (data)
free(data);
}
quit:
fclose(f);
return rc;
}
|
CWE-189
| 178,511 | 451 |
32746376758885450059100501942516345295
| null | null | null |
enlightment
|
39641e74a560982fbf93f29bf96b37d27803cb56
| 1 |
load(ImlibImage * im, ImlibProgressFunction progress, char progress_granularity,
char immediate_load)
{
static const int intoffset[] = { 0, 4, 2, 1 };
static const int intjump[] = { 8, 8, 4, 2 };
int rc;
DATA32 *ptr;
GifFileType *gif;
GifRowType *rows;
GifRecordType rec;
ColorMapObject *cmap;
int i, j, done, bg, r, g, b, w = 0, h = 0;
float per = 0.0, per_inc;
int last_per = 0, last_y = 0;
int transp;
int fd;
done = 0;
rows = NULL;
transp = -1;
/* if immediate_load is 1, then dont delay image laoding as below, or */
/* already data in this image - dont load it again */
if (im->data)
return 0;
fd = open(im->real_file, O_RDONLY);
if (fd < 0)
return 0;
#if GIFLIB_MAJOR >= 5
gif = DGifOpenFileHandle(fd, NULL);
#else
gif = DGifOpenFileHandle(fd);
#endif
if (!gif)
{
close(fd);
return 0;
}
rc = 0; /* Failure */
do
{
if (DGifGetRecordType(gif, &rec) == GIF_ERROR)
{
/* PrintGifError(); */
rec = TERMINATE_RECORD_TYPE;
}
if ((rec == IMAGE_DESC_RECORD_TYPE) && (!done))
{
if (DGifGetImageDesc(gif) == GIF_ERROR)
{
/* PrintGifError(); */
rec = TERMINATE_RECORD_TYPE;
}
w = gif->Image.Width;
h = gif->Image.Height;
if (!IMAGE_DIMENSIONS_OK(w, h))
goto quit2;
rows = calloc(h, sizeof(GifRowType *));
if (!rows)
goto quit2;
for (i = 0; i < h; i++)
{
rows[i] = malloc(w * sizeof(GifPixelType));
if (!rows[i])
goto quit;
}
if (gif->Image.Interlace)
{
for (i = 0; i < 4; i++)
{
for (j = intoffset[i]; j < h; j += intjump[i])
{
DGifGetLine(gif, rows[j], w);
}
}
}
else
{
for (i = 0; i < h; i++)
{
DGifGetLine(gif, rows[i], w);
}
}
done = 1;
}
else if (rec == EXTENSION_RECORD_TYPE)
{
int ext_code;
GifByteType *ext;
ext = NULL;
DGifGetExtension(gif, &ext_code, &ext);
while (ext)
{
if ((ext_code == 0xf9) && (ext[1] & 1) && (transp < 0))
{
transp = (int)ext[4];
}
ext = NULL;
DGifGetExtensionNext(gif, &ext);
}
}
}
while (rec != TERMINATE_RECORD_TYPE);
if (transp >= 0)
{
SET_FLAG(im->flags, F_HAS_ALPHA);
}
else
{
UNSET_FLAG(im->flags, F_HAS_ALPHA);
}
/* set the format string member to the lower-case full extension */
/* name for the format - so example names would be: */
/* "png", "jpeg", "tiff", "ppm", "pgm", "pbm", "gif", "xpm" ... */
im->w = w;
im->h = h;
if (!im->format)
im->format = strdup("gif");
if (im->loader || immediate_load || progress)
{
bg = gif->SBackGroundColor;
cmap = (gif->Image.ColorMap ? gif->Image.ColorMap : gif->SColorMap);
im->data = (DATA32 *) malloc(sizeof(DATA32) * w * h);
if (!im->data)
goto quit;
ptr = im->data;
per_inc = 100.0 / (((float)w) * h);
for (i = 0; i < h; i++)
*ptr++ = 0x00ffffff & ((r << 16) | (g << 8) | b);
}
else
{
r = cmap->Colors[rows[i][j]].Red;
g = cmap->Colors[rows[i][j]].Green;
b = cmap->Colors[rows[i][j]].Blue;
*ptr++ = (0xff << 24) | (r << 16) | (g << 8) | b;
}
per += per_inc;
if (progress && (((int)per) != last_per)
&& (((int)per) % progress_granularity == 0))
{
last_per = (int)per;
if (!(progress(im, (int)per, 0, last_y, w, i)))
{
rc = 2;
goto quit;
}
last_y = i;
}
}
|
CWE-20
| 178,512 | 452 |
248094330483319540643307520070744576919
| null | null | null |
savannah
|
8b281f83e8516535756f92dbf90940ac44bd45e1
| 1 |
cid_parse_font_matrix( CID_Face face,
CID_Parser* parser )
{
CID_FaceDict dict;
FT_Face root = (FT_Face)&face->root;
FT_Fixed temp[6];
FT_Fixed temp_scale;
if ( parser->num_dict >= 0 && parser->num_dict < face->cid.num_dicts )
{
FT_Matrix* matrix;
FT_Vector* offset;
dict = face->cid.font_dicts + parser->num_dict;
matrix = &dict->font_matrix;
offset = &dict->font_offset;
(void)cid_parser_to_fixed_array( parser, 6, temp, 3 );
temp_scale = FT_ABS( temp[3] );
/* Set Units per EM based on FontMatrix values. We set the value to */
/* 1000 / temp_scale, because temp_scale was already multiplied by */
/* 1000 (in t1_tofixed, from psobjs.c). */
temp[5] = FT_DivFix( temp[5], temp_scale );
temp[3] = 0x10000L;
}
matrix->xx = temp[0];
matrix->yx = temp[1];
matrix->xy = temp[2];
matrix->yy = temp[3];
temp[2] = FT_DivFix( temp[2], temp_scale );
temp[4] = FT_DivFix( temp[4], temp_scale );
temp[5] = FT_DivFix( temp[5], temp_scale );
temp[3] = 0x10000L;
}
|
CWE-20
| 178,513 | 453 |
228135000173489451605067622407611533397
| null | null | null |
savannah
|
8b281f83e8516535756f92dbf90940ac44bd45e1
| 1 |
t1_parse_font_matrix( T1_Face face,
T1_Loader loader )
{
T1_Parser parser = &loader->parser;
FT_Matrix* matrix = &face->type1.font_matrix;
FT_Vector* offset = &face->type1.font_offset;
FT_Face root = (FT_Face)&face->root;
FT_Fixed temp[6];
FT_Fixed temp_scale;
FT_Int result;
result = T1_ToFixedArray( parser, 6, temp, 3 );
if ( result < 0 )
{
parser->root.error = FT_THROW( Invalid_File_Format );
return;
}
temp_scale = FT_ABS( temp[3] );
if ( temp_scale == 0 )
{
FT_ERROR(( "t1_parse_font_matrix: invalid font matrix\n" ));
parser->root.error = FT_THROW( Invalid_File_Format );
return;
}
/* Set Units per EM based on FontMatrix values. We set the value to */
/* 1000 / temp_scale, because temp_scale was already multiplied by */
/* 1000 (in t1_tofixed, from psobjs.c). */
root->units_per_EM = (FT_UShort)FT_DivFix( 1000, temp_scale );
/* we need to scale the values by 1.0/temp_scale */
if ( temp_scale != 0x10000L )
{
temp[0] = FT_DivFix( temp[0], temp_scale );
temp[1] = FT_DivFix( temp[1], temp_scale );
temp[2] = FT_DivFix( temp[2], temp_scale );
temp[4] = FT_DivFix( temp[4], temp_scale );
temp[5] = FT_DivFix( temp[5], temp_scale );
temp[3] = temp[3] < 0 ? -0x10000L : 0x10000L;
}
matrix->xx = temp[0];
matrix->yx = temp[1];
matrix->xy = temp[2];
matrix->yy = temp[3];
/* note that the offsets must be expressed in integer font units */
offset->x = temp[4] >> 16;
offset->y = temp[5] >> 16;
}
|
CWE-20
| 178,514 | 454 |
46188906777019512287845078917191518470
| null | null | null |
savannah
|
8b281f83e8516535756f92dbf90940ac44bd45e1
| 1 |
t42_parse_font_matrix( T42_Face face,
T42_Loader loader )
{
T42_Parser parser = &loader->parser;
FT_Matrix* matrix = &face->type1.font_matrix;
FT_Vector* offset = &face->type1.font_offset;
FT_Face root = (FT_Face)&face->root;
FT_Fixed temp[6];
FT_Fixed temp_scale;
(void)T1_ToFixedArray( parser, 6, temp, 3 );
temp_scale = FT_ABS( temp[3] );
/* Set Units per EM based on FontMatrix values. We set the value to */
/* 1000 / temp_scale, because temp_scale was already multiplied by */
/* 1000 (in t1_tofixed, from psobjs.c). */
matrix->xx = temp[0];
matrix->yx = temp[1];
matrix->xy = temp[2];
matrix->yy = temp[3];
/* note that the offsets must be expressed in integer font units */
offset->x = temp[4] >> 16;
offset->y = temp[5] >> 16;
temp[2] = FT_DivFix( temp[2], temp_scale );
temp[4] = FT_DivFix( temp[4], temp_scale );
temp[5] = FT_DivFix( temp[5], temp_scale );
temp[3] = 0x10000L;
}
|
CWE-20
| 178,515 | 455 |
80677870207684603238504720847744994120
| null | null | null |
enlightment
|
c94d83ccab15d5ef02f88d42dce38ed3f0892882
| 1 |
__imlib_Ellipse_DrawToData(int xc, int yc, int a, int b, DATA32 color,
DATA32 * dst, int dstw, int clx, int cly, int clw,
int clh, ImlibOp op, char dst_alpha, char blend)
{
ImlibPointDrawFunction pfunc;
int xx, yy, x, y, prev_x, prev_y, ty, by, lx, rx;
DATA32 a2, b2, *tp, *bp;
DATA64 dx, dy;
if (A_VAL(&color) == 0xff)
blend = 0;
pfunc = __imlib_GetPointDrawFunction(op, dst_alpha, blend);
if (!pfunc)
return;
xc -= clx;
yc -= cly;
dst += (dstw * cly) + clx;
a2 = a * a;
b2 = b * b;
yy = b << 16;
prev_y = b;
dx = a2 * b;
dy = 0;
ty = yc - b - 1;
by = yc + b;
lx = xc - 1;
rx = xc;
tp = dst + (dstw * ty) + lx;
bp = dst + (dstw * by) + lx;
while (dy < dx)
{
int len;
y = yy >> 16;
y += ((yy - (y << 16)) >> 15);
if (prev_y != y)
{
prev_y = y;
dx -= a2;
ty++;
by--;
tp += dstw;
bp -= dstw;
}
len = rx - lx;
if (IN_RANGE(lx, ty, clw, clh))
pfunc(color, tp);
if (IN_RANGE(rx, ty, clw, clh))
pfunc(color, tp + len);
if (IN_RANGE(lx, by, clw, clh))
pfunc(color, bp);
if (IN_RANGE(rx, by, clw, clh))
pfunc(color, bp + len);
dy += b2;
yy -= ((dy << 16) / dx);
lx--;
if ((lx < 0) && (rx > clw))
return;
if ((ty > clh) || (by < 0))
return;
}
xx = yy;
prev_x = xx >> 16;
dx = dy;
ty++;
by--;
tp += dstw;
bp -= dstw;
while (ty < yc)
{
int len;
x = xx >> 16;
x += ((xx - (x << 16)) >> 15);
if (prev_x != x)
{
prev_x = x;
dy += b2;
lx--;
rx++;
tp--;
bp--;
}
len = rx - lx;
if (IN_RANGE(lx, ty, clw, clh))
pfunc(color, tp);
if (IN_RANGE(rx, ty, clw, clh))
pfunc(color, tp + len);
if (IN_RANGE(lx, by, clw, clh))
pfunc(color, bp);
if (IN_RANGE(rx, by, clw, clh))
pfunc(color, bp + len);
if (IN_RANGE(rx, by, clw, clh))
pfunc(color, bp + len);
dx -= a2;
xx += ((dx << 16) / dy);
ty++;
if ((ty > clh) || (by < 0))
return;
}
}
|
CWE-189
| 178,516 | 456 |
155577364624288382699010849819044776563
| null | null | null |
openssl
|
db82b8f9bd432a59aea8e1014694e15fc457c2bb
| 1 |
RSA *RSA_generate_key(int bits, unsigned long e_value,
void (*callback)(int,int,void *), void *cb_arg)
{
RSA *rsa=NULL;
BIGNUM *r0=NULL,*r1=NULL,*r2=NULL,*r3=NULL,*tmp;
int bitsp,bitsq,ok= -1,n=0,i;
BN_CTX *ctx=NULL,*ctx2=NULL;
ctx=BN_CTX_new();
if (ctx == NULL) goto err;
ctx2=BN_CTX_new();
if (ctx2 == NULL) goto err;
BN_CTX_start(ctx);
r0 = BN_CTX_get(ctx);
r1 = BN_CTX_get(ctx);
r2 = BN_CTX_get(ctx);
r3 = BN_CTX_get(ctx);
if (r3 == NULL) goto err;
bitsp=(bits+1)/2;
bitsq=bits-bitsp;
rsa=RSA_new();
if (rsa == NULL) goto err;
/* set e */
rsa->e=BN_new();
if (rsa->e == NULL) goto err;
#if 1
/* The problem is when building with 8, 16, or 32 BN_ULONG,
* unsigned long can be larger */
for (i=0; i<sizeof(unsigned long)*8; i++)
{
if (e_value & (1<<i))
BN_set_bit(rsa->e,i);
}
#else
if (!BN_set_word(rsa->e,e_value)) goto err;
#endif
/* generate p and q */
for (;;)
{
rsa->p=BN_generate_prime(NULL,bitsp,0,NULL,NULL,callback,cb_arg);
if (rsa->p == NULL) goto err;
if (!BN_sub(r2,rsa->p,BN_value_one())) goto err;
if (!BN_gcd(r1,r2,rsa->e,ctx)) goto err;
if (BN_is_one(r1)) break;
if (callback != NULL) callback(2,n++,cb_arg);
BN_free(rsa->p);
}
if (callback != NULL) callback(3,0,cb_arg);
for (;;)
{
rsa->q=BN_generate_prime(NULL,bitsq,0,NULL,NULL,callback,cb_arg);
if (rsa->q == NULL) goto err;
if (!BN_sub(r2,rsa->q,BN_value_one())) goto err;
if (!BN_gcd(r1,r2,rsa->e,ctx)) goto err;
if (BN_is_one(r1) && (BN_cmp(rsa->p,rsa->q) != 0))
break;
if (callback != NULL) callback(2,n++,cb_arg);
BN_free(rsa->q);
}
if (callback != NULL) callback(3,1,cb_arg);
if (BN_cmp(rsa->p,rsa->q) < 0)
{
tmp=rsa->p;
rsa->p=rsa->q;
rsa->q=tmp;
}
/* calculate n */
rsa->n=BN_new();
if (rsa->n == NULL) goto err;
if (!BN_mul(rsa->n,rsa->p,rsa->q,ctx)) goto err;
/* calculate d */
if (!BN_sub(r1,rsa->p,BN_value_one())) goto err; /* p-1 */
if (!BN_sub(r2,rsa->q,BN_value_one())) goto err; /* q-1 */
if (!BN_mul(r0,r1,r2,ctx)) goto err; /* (p-1)(q-1) */
/* should not be needed, since gcd(p-1,e) == 1 and gcd(q-1,e) == 1 */
/* for (;;)
{
if (!BN_gcd(r3,r0,rsa->e,ctx)) goto err;
if (BN_is_one(r3)) break;
if (1)
{
if (!BN_add_word(rsa->e,2L)) goto err;
continue;
}
RSAerr(RSA_F_RSA_GENERATE_KEY,RSA_R_BAD_E_VALUE);
goto err;
}
*/
rsa->d=BN_mod_inverse(NULL,rsa->e,r0,ctx2); /* d */
if (rsa->d == NULL) goto err;
/* calculate d mod (p-1) */
rsa->dmp1=BN_new();
if (rsa->dmp1 == NULL) goto err;
if (!BN_mod(rsa->dmp1,rsa->d,r1,ctx)) goto err;
/* calculate d mod (q-1) */
rsa->dmq1=BN_new();
if (rsa->dmq1 == NULL) goto err;
if (!BN_mod(rsa->dmq1,rsa->d,r2,ctx)) goto err;
/* calculate inverse of q mod p */
rsa->iqmp=BN_mod_inverse(NULL,rsa->q,rsa->p,ctx2);
if (rsa->iqmp == NULL) goto err;
ok=1;
err:
if (ok == -1)
{
RSAerr(RSA_F_RSA_GENERATE_KEY,ERR_LIB_BN);
ok=0;
}
BN_CTX_end(ctx);
BN_CTX_free(ctx);
BN_CTX_free(ctx2);
if (!ok)
{
if (rsa != NULL) RSA_free(rsa);
return(NULL);
}
else
return(rsa);
}
|
CWE-310
| 178,517 | 457 |
270513867548826429500076415731725088047
| null | null | null |
gnupg
|
9010d1576e278a4274ad3f4aa15776c28f6ba965
| 1 |
_gcry_ecc_ecdsa_sign (gcry_mpi_t input, ECC_secret_key *skey,
gcry_mpi_t r, gcry_mpi_t s,
int flags, int hashalgo)
{
gpg_err_code_t rc = 0;
int extraloops = 0;
gcry_mpi_t k, dr, sum, k_1, x;
mpi_point_struct I;
gcry_mpi_t hash;
const void *abuf;
unsigned int abits, qbits;
mpi_ec_t ctx;
if (DBG_CIPHER)
log_mpidump ("ecdsa sign hash ", input );
/* Convert the INPUT into an MPI if needed. */
rc = _gcry_dsa_normalize_hash (input, &hash, qbits);
if (rc)
return rc;
if (rc)
return rc;
k = NULL;
dr = mpi_alloc (0);
sum = mpi_alloc (0);
{
do
{
mpi_free (k);
k = NULL;
if ((flags & PUBKEY_FLAG_RFC6979) && hashalgo)
{
/* Use Pornin's method for deterministic DSA. If this
flag is set, it is expected that HASH is an opaque
MPI with the to be signed hash. That hash is also
used as h1 from 3.2.a. */
if (!mpi_is_opaque (input))
{
rc = GPG_ERR_CONFLICT;
goto leave;
}
abuf = mpi_get_opaque (input, &abits);
rc = _gcry_dsa_gen_rfc6979_k (&k, skey->E.n, skey->d,
abuf, (abits+7)/8,
hashalgo, extraloops);
if (rc)
goto leave;
extraloops++;
}
else
k = _gcry_dsa_gen_k (skey->E.n, GCRY_STRONG_RANDOM);
_gcry_mpi_ec_mul_point (&I, k, &skey->E.G, ctx);
if (_gcry_mpi_ec_get_affine (x, NULL, &I, ctx))
{
if (DBG_CIPHER)
log_debug ("ecc sign: Failed to get affine coordinates\n");
rc = GPG_ERR_BAD_SIGNATURE;
goto leave;
}
mpi_mod (r, x, skey->E.n); /* r = x mod n */
}
while (!mpi_cmp_ui (r, 0));
mpi_mulm (dr, skey->d, r, skey->E.n); /* dr = d*r mod n */
mpi_addm (sum, hash, dr, skey->E.n); /* sum = hash + (d*r) mod n */
mpi_invm (k_1, k, skey->E.n); /* k_1 = k^(-1) mod n */
mpi_mulm (s, k_1, sum, skey->E.n); /* s = k^(-1)*(hash+(d*r)) mod n */
}
while (!mpi_cmp_ui (s, 0));
if (DBG_CIPHER)
}
|
CWE-200
| 178,520 | 460 |
208238798261774624885998817593735102427
| null | null | null |
busybox
|
352f79acbd759c14399e39baef21fc4ffe180ac2
| 1 |
static NOINLINE char *xmalloc_optname_optval(uint8_t *option, const struct dhcp_optflag *optflag, const char *opt_name)
{
unsigned upper_length;
int len, type, optlen;
char *dest, *ret;
/* option points to OPT_DATA, need to go back to get OPT_LEN */
len = option[-OPT_DATA + OPT_LEN];
type = optflag->flags & OPTION_TYPE_MASK;
optlen = dhcp_option_lengths[type];
upper_length = len_of_option_as_string[type]
* ((unsigned)(len + optlen - 1) / (unsigned)optlen);
dest = ret = xmalloc(upper_length + strlen(opt_name) + 2);
dest += sprintf(ret, "%s=", opt_name);
while (len >= optlen) {
switch (type) {
case OPTION_IP:
case OPTION_IP_PAIR:
dest += sprint_nip(dest, "", option);
if (type == OPTION_IP)
break;
dest += sprint_nip(dest, "/", option + 4);
break;
case OPTION_U8:
dest += sprintf(dest, "%u", *option);
break;
case OPTION_U16: {
uint16_t val_u16;
move_from_unaligned16(val_u16, option);
dest += sprintf(dest, "%u", ntohs(val_u16));
break;
}
case OPTION_S32:
case OPTION_U32: {
uint32_t val_u32;
move_from_unaligned32(val_u32, option);
dest += sprintf(dest, type == OPTION_U32 ? "%lu" : "%ld", (unsigned long) ntohl(val_u32));
break;
}
/* Note: options which use 'return' instead of 'break'
* (for example, OPTION_STRING) skip the code which handles
* the case of list of options.
*/
case OPTION_STRING:
case OPTION_STRING_HOST:
memcpy(dest, option, len);
dest[len] = '\0';
if (type == OPTION_STRING_HOST && !good_hostname(dest))
safe_strncpy(dest, "bad", len);
return ret;
case OPTION_STATIC_ROUTES: {
/* Option binary format:
* mask [one byte, 0..32]
* ip [big endian, 0..4 bytes depending on mask]
* router [big endian, 4 bytes]
* may be repeated
*
* We convert it to a string "IP/MASK ROUTER IP2/MASK2 ROUTER2"
*/
const char *pfx = "";
while (len >= 1 + 4) { /* mask + 0-byte ip + router */
uint32_t nip;
uint8_t *p;
unsigned mask;
int bytes;
mask = *option++;
if (mask > 32)
break;
len--;
nip = 0;
p = (void*) &nip;
bytes = (mask + 7) / 8; /* 0 -> 0, 1..8 -> 1, 9..16 -> 2 etc */
while (--bytes >= 0) {
*p++ = *option++;
len--;
}
if (len < 4)
break;
/* print ip/mask */
dest += sprint_nip(dest, pfx, (void*) &nip);
pfx = " ";
dest += sprintf(dest, "/%u ", mask);
/* print router */
dest += sprint_nip(dest, "", option);
option += 4;
len -= 4;
}
return ret;
}
case OPTION_6RD:
/* Option binary format (see RFC 5969):
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | OPTION_6RD | option-length | IPv4MaskLen | 6rdPrefixLen |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | 6rdPrefix |
* ... (16 octets) ...
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* ... 6rdBRIPv4Address(es) ...
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* We convert it to a string
* "IPv4MaskLen 6rdPrefixLen 6rdPrefix 6rdBRIPv4Address..."
*
* Sanity check: ensure that our length is at least 22 bytes, that
* IPv4MaskLen <= 32,
* 6rdPrefixLen <= 128,
* 6rdPrefixLen + (32 - IPv4MaskLen) <= 128
* (2nd condition need no check - it follows from 1st and 3rd).
* Else, return envvar with empty value ("optname=")
*/
if (len >= (1 + 1 + 16 + 4)
&& option[0] <= 32
&& (option[1] + 32 - option[0]) <= 128
) {
/* IPv4MaskLen */
dest += sprintf(dest, "%u ", *option++);
/* 6rdPrefixLen */
dest += sprintf(dest, "%u ", *option++);
/* 6rdPrefix */
dest += sprint_nip6(dest, /* "", */ option);
option += 16;
len -= 1 + 1 + 16 + 4;
/* "+ 4" above corresponds to the length of IPv4 addr
* we consume in the loop below */
while (1) {
/* 6rdBRIPv4Address(es) */
dest += sprint_nip(dest, " ", option);
option += 4;
len -= 4; /* do we have yet another 4+ bytes? */
if (len < 0)
break; /* no */
}
}
return ret;
#if ENABLE_FEATURE_UDHCP_RFC3397
case OPTION_DNS_STRING:
/* unpack option into dest; use ret for prefix (i.e., "optname=") */
dest = dname_dec(option, len, ret);
if (dest) {
free(ret);
return dest;
}
/* error. return "optname=" string */
return ret;
case OPTION_SIP_SERVERS:
/* Option binary format:
* type: byte
* type=0: domain names, dns-compressed
* type=1: IP addrs
*/
option++;
len--;
if (option[-1] == 0) {
dest = dname_dec(option, len, ret);
if (dest) {
free(ret);
return dest;
}
} else
if (option[-1] == 1) {
const char *pfx = "";
while (1) {
len -= 4;
if (len < 0)
break;
dest += sprint_nip(dest, pfx, option);
pfx = " ";
option += 4;
}
}
return ret;
#endif
} /* switch */
/* If we are here, try to format any remaining data
* in the option as another, similarly-formatted option
*/
option += optlen;
len -= optlen;
if (len < optlen /* || !(optflag->flags & OPTION_LIST) */)
break;
*dest++ = ' ';
*dest = '\0';
} /* while */
return ret;
}
|
CWE-119
| 178,521 | 461 |
126744712125939498246335946299723440030
| null | null | null |
libbsd
|
c8f0723d2b4520bdd6b9eb7c3e7976de726d7ff7
| 1 |
fgetwln(FILE *stream, size_t *lenp)
{
struct filewbuf *fb;
wint_t wc;
size_t wused = 0;
/* Try to diminish the possibility of several fgetwln() calls being
* used on different streams, by using a pool of buffers per file. */
fb = &fb_pool[fb_pool_cur];
if (fb->fp != stream && fb->fp != NULL) {
fb_pool_cur++;
fb_pool_cur %= FILEWBUF_POOL_ITEMS;
fb = &fb_pool[fb_pool_cur];
}
fb->fp = stream;
while ((wc = fgetwc(stream)) != WEOF) {
if (!fb->len || wused > fb->len) {
wchar_t *wp;
if (fb->len)
fb->len *= 2;
else
fb->len = FILEWBUF_INIT_LEN;
wp = reallocarray(fb->wbuf, fb->len, sizeof(wchar_t));
if (wp == NULL) {
wused = 0;
break;
}
fb->wbuf = wp;
}
fb->wbuf[wused++] = wc;
if (wc == L'\n')
break;
}
*lenp = wused;
return wused ? fb->wbuf : NULL;
}
|
CWE-119
| 178,522 | 462 |
111742657801644456380340973720301242850
| null | null | null |
openssl
|
197e0ea817ad64820789d86711d55ff50d71f631
| 1 |
static void ssl3_take_mac(SSL *s)
{
const char *sender;
int slen;
if (s->state & SSL_ST_CONNECT)
{
sender=s->method->ssl3_enc->server_finished_label;
sender=s->method->ssl3_enc->client_finished_label;
slen=s->method->ssl3_enc->client_finished_label_len;
}
s->s3->tmp.peer_finish_md_len = s->method->ssl3_enc->final_finish_mac(s,
sender,slen,s->s3->tmp.peer_finish_md);
}
|
CWE-20
| 178,532 | 466 |
122161343720297889429758366270471715136
| null | null | null |
samba
|
b9b9f6738fba5c32e87cb9c36b358355b444fb9b
| 1 |
static int ctdb_tcp_listen_automatic(struct ctdb_context *ctdb)
{
struct ctdb_tcp *ctcp = talloc_get_type(ctdb->private_data,
struct ctdb_tcp);
ctdb_sock_addr sock;
int lock_fd, i;
const char *lock_path = "/tmp/.ctdb_socket_lock";
struct flock lock;
int one = 1;
int sock_size;
struct tevent_fd *fde;
/* If there are no nodes, then it won't be possible to find
* the first one. Log a failure and short circuit the whole
* process.
*/
if (ctdb->num_nodes == 0) {
DEBUG(DEBUG_CRIT,("No nodes available to attempt bind to - is the nodes file empty?\n"));
return -1;
}
/* in order to ensure that we don't get two nodes with the
same adddress, we must make the bind() and listen() calls
atomic. The SO_REUSEADDR setsockopt only prevents double
binds if the first socket is in LISTEN state */
lock_fd = open(lock_path, O_RDWR|O_CREAT, 0666);
if (lock_fd == -1) {
DEBUG(DEBUG_CRIT,("Unable to open %s\n", lock_path));
return -1;
}
lock.l_type = F_WRLCK;
lock.l_whence = SEEK_SET;
lock.l_start = 0;
lock.l_len = 1;
lock.l_pid = 0;
if (fcntl(lock_fd, F_SETLKW, &lock) != 0) {
DEBUG(DEBUG_CRIT,("Unable to lock %s\n", lock_path));
close(lock_fd);
return -1;
}
for (i=0; i < ctdb->num_nodes; i++) {
if (ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) {
continue;
}
ZERO_STRUCT(sock);
if (ctdb_tcp_get_address(ctdb,
ctdb->nodes[i]->address.address,
&sock) != 0) {
continue;
}
switch (sock.sa.sa_family) {
case AF_INET:
sock.ip.sin_port = htons(ctdb->nodes[i]->address.port);
sock_size = sizeof(sock.ip);
break;
case AF_INET6:
sock.ip6.sin6_port = htons(ctdb->nodes[i]->address.port);
sock_size = sizeof(sock.ip6);
break;
default:
DEBUG(DEBUG_ERR, (__location__ " unknown family %u\n",
sock.sa.sa_family));
continue;
}
#ifdef HAVE_SOCK_SIN_LEN
sock.ip.sin_len = sock_size;
#endif
ctcp->listen_fd = socket(sock.sa.sa_family, SOCK_STREAM, IPPROTO_TCP);
if (ctcp->listen_fd == -1) {
ctdb_set_error(ctdb, "socket failed\n");
continue;
}
set_close_on_exec(ctcp->listen_fd);
setsockopt(ctcp->listen_fd,SOL_SOCKET,SO_REUSEADDR,(char *)&one,sizeof(one));
if (bind(ctcp->listen_fd, (struct sockaddr * )&sock, sock_size) == 0) {
break;
}
if (errno == EADDRNOTAVAIL) {
DEBUG(DEBUG_DEBUG,(__location__ " Failed to bind() to socket. %s(%d)\n",
strerror(errno), errno));
} else {
DEBUG(DEBUG_ERR,(__location__ " Failed to bind() to socket. %s(%d)\n",
strerror(errno), errno));
}
}
if (i == ctdb->num_nodes) {
DEBUG(DEBUG_CRIT,("Unable to bind to any of the node addresses - giving up\n"));
goto failed;
}
ctdb->address.address = talloc_strdup(ctdb, ctdb->nodes[i]->address.address);
ctdb->address.port = ctdb->nodes[i]->address.port;
ctdb->name = talloc_asprintf(ctdb, "%s:%u",
ctdb->address.address,
ctdb->address.port);
ctdb->pnn = ctdb->nodes[i]->pnn;
DEBUG(DEBUG_INFO,("ctdb chose network address %s:%u pnn %u\n",
ctdb->address.address,
ctdb->address.port,
ctdb->pnn));
if (listen(ctcp->listen_fd, 10) == -1) {
goto failed;
}
fde = event_add_fd(ctdb->ev, ctcp, ctcp->listen_fd, EVENT_FD_READ,
ctdb_listen_event, ctdb);
tevent_fd_set_auto_close(fde);
close(lock_fd);
return 0;
failed:
close(lock_fd);
close(ctcp->listen_fd);
ctcp->listen_fd = -1;
return -1;
}
|
CWE-264
| 178,533 | 467 |
42617125390009847099030406555395060021
| null | null | null |
savannah
|
f6f9c48fb40b8a1e8218799724b0b61a7161eb1d
| 1 |
get_cdtext_generic (void *p_user_data)
{
generic_img_private_t *p_env = p_user_data;
uint8_t *p_cdtext_data = NULL;
size_t len;
if (!p_env) return NULL;
if (p_env->b_cdtext_error) return NULL;
if (NULL == p_env->cdtext) {
p_cdtext_data = read_cdtext_generic (p_env);
if (NULL != p_cdtext_data) {
len = CDIO_MMC_GET_LEN16(p_cdtext_data)-2;
p_env->cdtext = cdtext_init();
if(len <= 0 || 0 != cdtext_data_init (p_env->cdtext, &p_cdtext_data[4], len)) {
p_env->b_cdtext_error = true;
cdtext_destroy (p_env->cdtext);
free(p_env->cdtext);
p_env->cdtext = NULL;
}
}
free(p_cdtext_data);
}
}
|
CWE-415
| 178,542 | 468 |
291782179769331618127265948832596496179
| null | null | null |
ghostscript
|
2fc463d0efbd044a8232611f0898eeb12b72a970
| 1 |
file_continue(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
es_ptr pscratch = esp - 2;
file_enum *pfen = r_ptr(esp - 1, file_enum);
int devlen = esp[-3].value.intval;
gx_io_device *iodev = r_ptr(esp - 4, gx_io_device);
uint len = r_size(pscratch);
uint code;
if (len < devlen)
return_error(gs_error_rangecheck); /* not even room for device len */
do {
memcpy((char *)pscratch->value.bytes, iodev->dname, devlen);
code = iodev->procs.enumerate_next(pfen, (char *)pscratch->value.bytes + devlen,
len - devlen);
if (code == ~(uint) 0) { /* all done */
esp -= 5; /* pop proc, pfen, devlen, iodev , mark */
return o_pop_estack;
} else if (code > len) /* overran string */
return_error(gs_error_rangecheck);
else if (iodev != iodev_default(imemory)
|| (check_file_permissions_reduced(i_ctx_p, (char *)pscratch->value.bytes, code + devlen, iodev, "PermitFileReading")) == 0) {
push(1);
ref_assign(op, pscratch);
r_set_size(op, code + devlen);
push_op_estack(file_continue); /* come again */
*++esp = pscratch[2]; /* proc */
return o_push_estack;
}
} while(1);
}
|
CWE-200
| 178,561 | 484 |
235592933251983526176302387021450828775
| null | null | null |
busybox
|
4e314faa0aecb66717418e9a47a4451aec59262b
| 1 |
static void add_probe(const char *name)
{
struct module_entry *m;
m = get_or_add_modentry(name);
if (!(option_mask32 & (OPT_REMOVE | OPT_SHOW_DEPS))
&& (m->flags & MODULE_FLAG_LOADED)
&& strncmp(m->modname, "symbol:", 7) == 0
) {
G.need_symbols = 1;
}
}
|
CWE-20
| 178,570 | 493 |
226438009548517158634898284321538934651
| null | null | null |
libxfont
|
5bf703700ee4a5d6eae20da07cb7a29369667aef
| 1 |
CatalogueRescan (FontPathElementPtr fpe)
{
CataloguePtr cat = fpe->private;
char link[MAXFONTFILENAMELEN];
char dest[MAXFONTFILENAMELEN];
char *attrib;
FontPathElementPtr subfpe;
struct stat statbuf;
const char *path;
DIR *dir;
struct dirent *entry;
int len;
int pathlen;
path = fpe->name + strlen(CataloguePrefix);
if (stat(path, &statbuf) < 0 || !S_ISDIR(statbuf.st_mode))
return BadFontPath;
if (statbuf.st_mtime <= cat->mtime)
return Successful;
dir = opendir(path);
if (dir == NULL)
{
xfree(cat);
return BadFontPath;
}
CatalogueUnrefFPEs (fpe);
while (entry = readdir(dir), entry != NULL)
{
snprintf(link, sizeof link, "%s/%s", path, entry->d_name);
len = readlink(link, dest, sizeof dest);
if (len < 0)
continue;
dest[len] = '\0';
if (dest[0] != '/')
{
pathlen = strlen(path);
memmove(dest + pathlen + 1, dest, sizeof dest - pathlen - 1);
memcpy(dest, path, pathlen);
memcpy(dest + pathlen, "/", 1);
len += pathlen + 1;
}
attrib = strchr(link, ':');
if (attrib && len + strlen(attrib) < sizeof dest)
{
memcpy(dest + len, attrib, strlen(attrib));
len += strlen(attrib);
}
subfpe = xalloc(sizeof *subfpe);
if (subfpe == NULL)
continue;
/* The fonts returned by OpenFont will point back to the
* subfpe they come from. So set the type of the subfpe to
* what the catalogue fpe was assigned, so calls to CloseFont
* (which uses font->fpe->type) goes to CatalogueCloseFont. */
subfpe->type = fpe->type;
subfpe->name_length = len;
subfpe->name = xalloc (len + 1);
if (subfpe == NULL)
{
xfree(subfpe);
continue;
}
memcpy(subfpe->name, dest, len);
subfpe->name[len] = '\0';
/* The X server will manipulate the subfpe ref counts
* associated with the font in OpenFont and CloseFont, so we
* have to make sure it's valid. */
subfpe->refcount = 1;
if (FontFileInitFPE (subfpe) != Successful)
{
xfree(subfpe->name);
xfree(subfpe);
continue;
}
if (CatalogueAddFPE(cat, subfpe) != Successful)
{
FontFileFreeFPE (subfpe);
xfree(subfpe);
continue;
}
}
closedir(dir);
qsort(cat->fpeList,
cat->fpeCount, sizeof cat->fpeList[0], ComparePriority);
cat->mtime = statbuf.st_mtime;
return Successful;
}
|
CWE-119
| 178,597 | 511 |
320948107529517805290902775059513993785
| null | null | null |
savannah
|
57cbb8c148999ba8f14ed53435fc071ac9953afd
| 1 |
tt_cmap14_validate( FT_Byte* table,
FT_Validator valid )
{
FT_Byte* p;
FT_ULong length;
FT_ULong num_selectors;
if ( table + 2 + 4 + 4 > valid->limit )
FT_INVALID_TOO_SHORT;
p = table + 2;
length = TT_NEXT_ULONG( p );
num_selectors = TT_NEXT_ULONG( p );
if ( length > (FT_ULong)( valid->limit - table ) ||
/* length < 10 + 11 * num_selectors ? */
length < 10 ||
( length - 10 ) / 11 < num_selectors )
FT_INVALID_TOO_SHORT;
/* check selectors, they must be in increasing order */
{
/* we start lastVarSel at 1 because a variant selector value of 0
* isn't valid.
*/
FT_ULong n, lastVarSel = 1;
for ( n = 0; n < num_selectors; n++ )
{
FT_ULong varSel = TT_NEXT_UINT24( p );
FT_ULong defOff = TT_NEXT_ULONG( p );
FT_ULong nondefOff = TT_NEXT_ULONG( p );
if ( defOff >= length || nondefOff >= length )
FT_INVALID_TOO_SHORT;
if ( varSel < lastVarSel )
FT_INVALID_DATA;
lastVarSel = varSel + 1;
/* check the default table (these glyphs should be reached */
/* through the normal Unicode cmap, no GIDs, just check order) */
if ( defOff != 0 )
{
FT_Byte* defp = table + defOff;
FT_ULong numRanges = TT_NEXT_ULONG( defp );
FT_ULong i;
FT_ULong lastBase = 0;
/* defp + numRanges * 4 > valid->limit ? */
if ( numRanges > (FT_ULong)( valid->limit - defp ) / 4 )
FT_INVALID_TOO_SHORT;
if ( base + cnt >= 0x110000UL ) /* end of Unicode */
FT_INVALID_DATA;
if ( base < lastBase )
FT_INVALID_DATA;
lastBase = base + cnt + 1U;
}
}
/* and the non-default table (these glyphs are specified here) */
if ( nondefOff != 0 )
{
FT_Byte* ndp = table + nondefOff;
FT_ULong numMappings = TT_NEXT_ULONG( ndp );
/* and the non-default table (these glyphs are specified here) */
if ( nondefOff != 0 )
{
FT_Byte* ndp = table + nondefOff;
FT_ULong numMappings = TT_NEXT_ULONG( ndp );
FT_ULong i, lastUni = 0;
/* numMappings * 4 > (FT_ULong)( valid->limit - ndp ) ? */
if ( numMappings > ( (FT_ULong)( valid->limit - ndp ) ) / 4 )
FT_INVALID_TOO_SHORT;
for ( i = 0; i < numMappings; ++i )
lastUni = uni + 1U;
if ( valid->level >= FT_VALIDATE_TIGHT &&
gid >= TT_VALID_GLYPH_COUNT( valid ) )
FT_INVALID_GLYPH_ID;
}
}
}
}
|
CWE-125
| 178,598 | 512 |
150741055389882228657992840309186286521
| null | null | null |
savannah
|
db5a4a9ae7b0048f033361744421da8569642f73
| 1 |
ps_parser_skip_PS_token( PS_Parser parser )
{
/* Note: PostScript allows any non-delimiting, non-whitespace */
/* character in a name (PS Ref Manual, 3rd ed, p31). */
/* PostScript delimiters are (, ), <, >, [, ], {, }, /, and %. */
FT_Byte* cur = parser->cursor;
FT_Byte* limit = parser->limit;
FT_Error error = FT_Err_Ok;
skip_spaces( &cur, limit ); /* this also skips comments */
if ( cur >= limit )
goto Exit;
/* self-delimiting, single-character tokens */
if ( *cur == '[' || *cur == ']' )
{
cur++;
goto Exit;
}
/* skip balanced expressions (procedures and strings) */
if ( *cur == '{' ) /* {...} */
{
error = skip_procedure( &cur, limit );
goto Exit;
}
if ( *cur == '(' ) /* (...) */
{
error = skip_literal_string( &cur, limit );
goto Exit;
}
if ( *cur == '<' ) /* <...> */
{
if ( cur + 1 < limit && *(cur + 1) == '<' ) /* << */
{
cur++;
cur++;
}
else
error = skip_string( &cur, limit );
goto Exit;
}
if ( *cur == '>' )
{
cur++;
if ( cur >= limit || *cur != '>' ) /* >> */
{
FT_ERROR(( "ps_parser_skip_PS_token:"
" unexpected closing delimiter `>'\n" ));
error = FT_THROW( Invalid_File_Format );
goto Exit;
}
cur++;
goto Exit;
}
if ( *cur == '/' )
cur++;
/* anything else */
while ( cur < limit )
{
/* *cur might be invalid (e.g., ')' or '}'), but this */
/* is handled by the test `cur == parser->cursor' below */
if ( IS_PS_DELIM( *cur ) )
break;
cur++;
}
Exit:
if ( cur < limit && cur == parser->cursor )
{
FT_ERROR(( "ps_parser_skip_PS_token:"
" current token is `%c' which is self-delimiting\n"
" "
" but invalid at this point\n",
*cur ));
error = FT_THROW( Invalid_File_Format );
}
parser->error = error;
parser->cursor = cur;
}
|
CWE-125
| 178,599 | 513 |
33447871913940490078521635904057187670
| null | null | null |
savannah
|
e3058617f384cb6709f3878f753fa17aca9e3a30
| 1 |
T1_Get_Private_Dict( T1_Parser parser,
PSAux_Service psaux )
{
FT_Stream stream = parser->stream;
FT_Memory memory = parser->root.memory;
FT_Error error = FT_Err_Ok;
FT_ULong size;
if ( parser->in_pfb )
{
/* in the case of the PFB format, the private dictionary can be */
/* made of several segments. We thus first read the number of */
/* segments to compute the total size of the private dictionary */
/* then re-read them into memory. */
FT_ULong start_pos = FT_STREAM_POS();
FT_UShort tag;
parser->private_len = 0;
for (;;)
{
error = read_pfb_tag( stream, &tag, &size );
if ( error )
goto Fail;
if ( tag != 0x8002U )
break;
parser->private_len += size;
if ( FT_STREAM_SKIP( size ) )
goto Fail;
}
/* Check that we have a private dictionary there */
/* and allocate private dictionary buffer */
if ( parser->private_len == 0 )
{
FT_ERROR(( "T1_Get_Private_Dict:"
" invalid private dictionary section\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
if ( FT_STREAM_SEEK( start_pos ) ||
FT_ALLOC( parser->private_dict, parser->private_len ) )
goto Fail;
parser->private_len = 0;
for (;;)
{
error = read_pfb_tag( stream, &tag, &size );
if ( error || tag != 0x8002U )
{
error = FT_Err_Ok;
break;
}
if ( FT_STREAM_READ( parser->private_dict + parser->private_len,
size ) )
goto Fail;
parser->private_len += size;
}
}
else
{
/* We have already `loaded' the whole PFA font file into memory; */
/* if this is a memory resource, allocate a new block to hold */
/* the private dict. Otherwise, simply overwrite into the base */
/* dictionary block in the heap. */
/* first of all, look at the `eexec' keyword */
FT_Byte* cur = parser->base_dict;
FT_Byte* limit = cur + parser->base_len;
FT_Byte c;
FT_Pointer pos_lf;
FT_Bool test_cr;
Again:
for (;;)
{
c = cur[0];
if ( c == 'e' && cur + 9 < limit ) /* 9 = 5 letters for `eexec' + */
/* whitespace + 4 chars */
{
if ( cur[1] == 'e' &&
cur[2] == 'x' &&
cur[3] == 'e' &&
cur[4] == 'c' )
break;
}
cur++;
if ( cur >= limit )
{
FT_ERROR(( "T1_Get_Private_Dict:"
" could not find `eexec' keyword\n" ));
error = FT_THROW( Invalid_File_Format );
goto Exit;
}
}
/* check whether `eexec' was real -- it could be in a comment */
/* or string (as e.g. in u003043t.gsf from ghostscript) */
parser->root.cursor = parser->base_dict;
/* set limit to `eexec' + whitespace + 4 characters */
parser->root.limit = cur + 10;
cur = parser->root.cursor;
limit = parser->root.limit;
while ( cur < limit )
{
if ( *cur == 'e' && ft_strncmp( (char*)cur, "eexec", 5 ) == 0 )
goto Found;
T1_Skip_PS_Token( parser );
if ( parser->root.error )
break;
T1_Skip_Spaces ( parser );
cur = parser->root.cursor;
}
/* we haven't found the correct `eexec'; go back and continue */
/* searching */
cur = limit;
limit = parser->base_dict + parser->base_len;
goto Again;
/* now determine where to write the _encrypted_ binary private */
/* According to the Type 1 spec, the first cipher byte must not be */
/* an ASCII whitespace character code (blank, tab, carriage return */
/* or line feed). We have seen Type 1 fonts with two line feed */
/* characters... So skip now all whitespace character codes. */
/* */
/* On the other hand, Adobe's Type 1 parser handles fonts just */
/* fine that are violating this limitation, so we add a heuristic */
/* test to stop at \r only if it is not used for EOL. */
pos_lf = ft_memchr( cur, '\n', (size_t)( limit - cur ) );
test_cr = FT_BOOL( !pos_lf ||
pos_lf > ft_memchr( cur,
'\r',
(size_t)( limit - cur ) ) );
while ( cur < limit &&
( *cur == ' ' ||
*cur == '\t' ||
(test_cr && *cur == '\r' ) ||
*cur == '\n' ) )
++cur;
if ( cur >= limit )
{
FT_ERROR(( "T1_Get_Private_Dict:"
" `eexec' not properly terminated\n" ));
error = FT_THROW( Invalid_File_Format );
goto Exit;
}
size = parser->base_len - (FT_ULong)( cur - parser->base_dict );
if ( parser->in_memory )
{
/* note that we allocate one more byte to put a terminating `0' */
if ( FT_ALLOC( parser->private_dict, size + 1 ) )
goto Fail;
parser->private_len = size;
}
else
{
parser->single_block = 1;
parser->private_dict = parser->base_dict;
parser->private_len = size;
parser->base_dict = NULL;
parser->base_len = 0;
}
/* now determine whether the private dictionary is encoded in binary */
/* or hexadecimal ASCII format -- decode it accordingly */
/* we need to access the next 4 bytes (after the final whitespace */
/* following the `eexec' keyword); if they all are hexadecimal */
/* digits, then we have a case of ASCII storage */
if ( cur + 3 < limit &&
ft_isxdigit( cur[0] ) && ft_isxdigit( cur[1] ) &&
ft_isxdigit( cur[2] ) && ft_isxdigit( cur[3] ) )
{
/* ASCII hexadecimal encoding */
FT_ULong len;
parser->root.cursor = cur;
(void)psaux->ps_parser_funcs->to_bytes( &parser->root,
parser->private_dict,
parser->private_len,
&len,
0 );
parser->private_len = len;
/* put a safeguard */
parser->private_dict[len] = '\0';
}
else
/* binary encoding -- copy the private dict */
FT_MEM_MOVE( parser->private_dict, cur, size );
}
/* we now decrypt the encoded binary private dictionary */
psaux->t1_decrypt( parser->private_dict, parser->private_len, 55665U );
if ( parser->private_len < 4 )
{
FT_ERROR(( "T1_Get_Private_Dict:"
" invalid private dictionary section\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
/* replace the four random bytes at the beginning with whitespace */
parser->private_dict[0] = ' ';
parser->private_dict[1] = ' ';
parser->private_dict[2] = ' ';
parser->private_dict[3] = ' ';
parser->root.base = parser->private_dict;
parser->root.cursor = parser->private_dict;
parser->root.limit = parser->root.cursor + parser->private_len;
Fail:
Exit:
return error;
}
|
CWE-125
| 178,600 | 514 |
338869587383884165669569476106491353567
| null | null | null |
savannah
|
2549e421c14aff886629b8482c14af800f411070
| 1 |
eXosip_init (struct eXosip_t *excontext)
{
osip_t *osip;
int i;
memset (excontext, 0, sizeof (eXosip_t));
excontext->dscp = 0x1A;
snprintf (excontext->ipv4_for_gateway, 256, "%s", "217.12.3.11");
snprintf (excontext->ipv6_for_gateway, 256, "%s", "2001:638:500:101:2e0:81ff:fe24:37c6");
#ifdef WIN32
/* Initializing windows socket library */
{
WORD wVersionRequested;
WSADATA wsaData;
wVersionRequested = MAKEWORD (1, 1);
i = WSAStartup (wVersionRequested, &wsaData);
if (i != 0) {
OSIP_TRACE (osip_trace (__FILE__, __LINE__, OSIP_WARNING, NULL, "eXosip: Unable to initialize WINSOCK, reason: %d\n", i));
/* return -1; It might be already initilized?? */
}
}
#endif
excontext->user_agent = osip_strdup ("eXosip/" EXOSIP_VERSION);
if (excontext->user_agent == NULL)
return OSIP_NOMEM;
excontext->j_calls = NULL;
excontext->j_stop_ua = 0;
#ifndef OSIP_MONOTHREAD
excontext->j_thread = NULL;
#endif
i = osip_list_init (&excontext->j_transactions);
excontext->j_reg = NULL;
#ifndef OSIP_MONOTHREAD
#if !defined (_WIN32_WCE)
excontext->j_cond = (struct osip_cond *) osip_cond_init ();
if (excontext->j_cond == NULL) {
osip_free (excontext->user_agent);
excontext->user_agent = NULL;
return OSIP_NOMEM;
}
#endif
excontext->j_mutexlock = (struct osip_mutex *) osip_mutex_init ();
if (excontext->j_mutexlock == NULL) {
osip_free (excontext->user_agent);
excontext->user_agent = NULL;
#if !defined (_WIN32_WCE)
osip_cond_destroy ((struct osip_cond *) excontext->j_cond);
excontext->j_cond = NULL;
#endif
return OSIP_NOMEM;
}
#endif
i = osip_init (&osip);
if (i != 0) {
OSIP_TRACE (osip_trace (__FILE__, __LINE__, OSIP_ERROR, NULL, "eXosip: Cannot initialize osip!\n"));
return i;
}
osip_set_application_context (osip, &excontext);
_eXosip_set_callbacks (osip);
excontext->j_osip = osip;
#ifndef OSIP_MONOTHREAD
/* open a TCP socket to wake up the application when needed. */
excontext->j_socketctl = jpipe ();
if (excontext->j_socketctl == NULL)
return OSIP_UNDEFINED_ERROR;
excontext->j_socketctl_event = jpipe ();
if (excontext->j_socketctl_event == NULL)
return OSIP_UNDEFINED_ERROR;
#endif
/* To be changed in osip! */
excontext->j_events = (osip_fifo_t *) osip_malloc (sizeof (osip_fifo_t));
if (excontext->j_events == NULL)
return OSIP_NOMEM;
osip_fifo_init (excontext->j_events);
excontext->use_rport = 1;
excontext->dns_capabilities = 2;
excontext->enable_dns_cache = 1;
excontext->ka_interval = 17000;
snprintf(excontext->ka_crlf, sizeof(excontext->ka_crlf), "\r\n\r\n");
excontext->ka_options = 0;
excontext->autoanswer_bye = 1;
excontext->auto_masquerade_contact = 1;
excontext->masquerade_via=0;
return OSIP_SUCCESS;
}
|
CWE-189
| 178,601 | 515 |
238204468281732099704255338098998330461
| null | null | null |
xserver
|
d088e3c1286b548a58e62afdc70bb40981cdb9e8
| 1 |
SProcXIBarrierReleasePointer(ClientPtr client)
{
xXIBarrierReleasePointerInfo *info;
REQUEST(xXIBarrierReleasePointerReq);
int i;
swaps(&stuff->length);
REQUEST_AT_LEAST_SIZE(xXIBarrierReleasePointerReq);
swapl(&stuff->num_barriers);
REQUEST_FIXED_SIZE(xXIBarrierReleasePointerReq, stuff->num_barriers * sizeof(xXIBarrierReleasePointerInfo));
info = (xXIBarrierReleasePointerInfo*) &stuff[1];
swapl(&info->barrier);
swapl(&info->eventid);
}
|
CWE-190
| 178,617 | 528 |
175285775827241400816743220052795737738
| null | null | null |
xserver
|
859b08d523307eebde7724fd1a0789c44813e821
| 1 |
ProcXIChangeHierarchy(ClientPtr client)
{
xXIAnyHierarchyChangeInfo *any;
size_t len; /* length of data remaining in request */
int rc = Success;
int flags[MAXDEVICES] = { 0 };
REQUEST(xXIChangeHierarchyReq);
REQUEST_AT_LEAST_SIZE(xXIChangeHierarchyReq);
if (!stuff->num_changes)
return rc;
len = ((size_t)stuff->length << 2) - sizeof(xXIAnyHierarchyChangeInfo);
any = (xXIAnyHierarchyChangeInfo *) &stuff[1];
while (stuff->num_changes--) {
if (len < sizeof(xXIAnyHierarchyChangeInfo)) {
rc = BadLength;
goto unwind;
}
SWAPIF(swaps(&any->type));
SWAPIF(swaps(&any->length));
if (len < ((size_t)any->length << 2))
return BadLength;
#define CHANGE_SIZE_MATCH(type) \
do { \
if ((len < sizeof(type)) || (any->length != (sizeof(type) >> 2))) { \
rc = BadLength; \
goto unwind; \
} \
} while(0)
switch (any->type) {
case XIAddMaster:
{
xXIAddMasterInfo *c = (xXIAddMasterInfo *) any;
/* Variable length, due to appended name string */
if (len < sizeof(xXIAddMasterInfo)) {
rc = BadLength;
goto unwind;
}
SWAPIF(swaps(&c->name_len));
if (c->name_len > (len - sizeof(xXIAddMasterInfo))) {
rc = BadLength;
goto unwind;
}
rc = add_master(client, c, flags);
if (rc != Success)
goto unwind;
}
break;
case XIRemoveMaster:
{
xXIRemoveMasterInfo *r = (xXIRemoveMasterInfo *) any;
CHANGE_SIZE_MATCH(xXIRemoveMasterInfo);
rc = remove_master(client, r, flags);
if (rc != Success)
goto unwind;
}
break;
case XIDetachSlave:
{
xXIDetachSlaveInfo *c = (xXIDetachSlaveInfo *) any;
CHANGE_SIZE_MATCH(xXIDetachSlaveInfo);
rc = detach_slave(client, c, flags);
if (rc != Success)
goto unwind;
}
break;
case XIAttachSlave:
{
xXIAttachSlaveInfo *c = (xXIAttachSlaveInfo *) any;
CHANGE_SIZE_MATCH(xXIAttachSlaveInfo);
rc = attach_slave(client, c, flags);
if (rc != Success)
goto unwind;
}
break;
}
len -= any->length * 4;
any = (xXIAnyHierarchyChangeInfo *) ((char *) any + any->length * 4);
}
unwind:
XISendDeviceHierarchyEvent(flags);
return rc;
}
|
CWE-20
| 178,618 | 529 |
273466064398137743204838873494483760207
| null | null | null |
xserver
|
4ca68b878e851e2136c234f40a25008297d8d831
| 1 |
ProcDbeGetVisualInfo(ClientPtr client)
{
REQUEST(xDbeGetVisualInfoReq);
DbeScreenPrivPtr pDbeScreenPriv;
xDbeGetVisualInfoReply rep;
Drawable *drawables;
DrawablePtr *pDrawables = NULL;
register int i, j, rc;
register int count; /* number of visual infos in reply */
register int length; /* length of reply */
ScreenPtr pScreen;
XdbeScreenVisualInfo *pScrVisInfo;
REQUEST_AT_LEAST_SIZE(xDbeGetVisualInfoReq);
if (stuff->n > UINT32_MAX / sizeof(DrawablePtr))
return BadAlloc;
return BadAlloc;
}
|
CWE-190
| 178,619 | 530 |
253695885025975717381813837853768936500
| null | null | null |
xserver
|
b747da5e25be944337a9cd1415506fc06b70aa81
| 1 |
ProcEstablishConnection(ClientPtr client)
{
const char *reason;
char *auth_proto, *auth_string;
xConnClientPrefix *prefix;
REQUEST(xReq);
prefix = (xConnClientPrefix *) ((char *) stuff + sz_xReq);
auth_proto = (char *) prefix + sz_xConnClientPrefix;
auth_string = auth_proto + pad_to_int32(prefix->nbytesAuthProto);
if ((prefix->majorVersion != X_PROTOCOL) ||
(prefix->minorVersion != X_PROTOCOL_REVISION))
reason = "Protocol version mismatch";
else
return (SendConnSetup(client, reason));
}
|
CWE-20
| 178,620 | 531 |
25271822103545983315254060887820611240
| null | null | null |
gnupg
|
8725c99ffa41778f382ca97233183bcd687bb0ce
| 1 |
secret_core_crt (gcry_mpi_t M, gcry_mpi_t C,
gcry_mpi_t D, unsigned int Nlimbs,
gcry_mpi_t P, gcry_mpi_t Q, gcry_mpi_t U)
{
gcry_mpi_t m1 = mpi_alloc_secure ( Nlimbs + 1 );
gcry_mpi_t m2 = mpi_alloc_secure ( Nlimbs + 1 );
gcry_mpi_t h = mpi_alloc_secure ( Nlimbs + 1 );
/* m1 = c ^ (d mod (p-1)) mod p */
mpi_sub_ui ( h, P, 1 );
mpi_fdiv_r ( h, D, h );
mpi_powm ( m1, C, h, P );
/* m2 = c ^ (d mod (q-1)) mod q */
mpi_sub_ui ( h, Q, 1 );
mpi_fdiv_r ( h, D, h );
mpi_powm ( m2, C, h, Q );
/* h = u * ( m2 - m1 ) mod q */
mpi_sub ( h, m2, m1 );
/* Remove superfluous leading zeroes from INPUT. */
mpi_normalize (input);
if (!skey->p || !skey->q || !skey->u)
{
secret_core_std (output, input, skey->d, skey->n);
}
else
{
secret_core_crt (output, input, skey->d, mpi_get_nlimbs (skey->n),
skey->p, skey->q, skey->u);
}
}
|
CWE-310
| 178,628 | 532 |
284361140638249111546015871629160873818
| null | null | null |
libICE
|
ff5e59f32255913bb1cdf51441b98c9107ae165b
| 1 |
IceGenerateMagicCookie (
int len
)
{
char *auth;
#ifndef HAVE_ARC4RANDOM_BUF
long ldata[2];
int seed;
int value;
int i;
#endif
if ((auth = malloc (len + 1)) == NULL)
return (NULL);
#ifdef HAVE_ARC4RANDOM_BUF
arc4random_buf(auth, len);
#else
#ifdef ITIMER_REAL
{
struct timeval now;
int i;
ldata[0] = now.tv_sec;
ldata[1] = now.tv_usec;
}
#else
{
long time ();
ldata[0] = time ((long *) 0);
ldata[1] = getpid ();
}
#endif
seed = (ldata[0]) + (ldata[1] << 16);
srand (seed);
for (i = 0; i < len; i++)
ldata[1] = now.tv_usec;
value = rand ();
auth[i] = value & 0xff;
}
|
CWE-331
| 178,643 | 533 |
20612687529230169325192849544045591476
| null | null | null |
libXdmcp
|
0554324ec6bbc2071f5d1f8ad211a1643e29eb1f
| 1 |
XdmcpGenerateKey (XdmAuthKeyPtr key)
{
#ifndef HAVE_ARC4RANDOM_BUF
long lowbits, highbits;
srandom ((int)getpid() ^ time((Time_t *)0));
highbits = random ();
highbits = random ();
getbits (lowbits, key->data);
getbits (highbits, key->data + 4);
#else
arc4random_buf(key->data, 8);
#endif
}
|
CWE-320
| 178,644 | 534 |
156663800142374220804667295065486940538
| null | null | null |
savannah
|
a0d7fe4589651c64bd16ddaaa634030bb0455866
| 1 |
pch_write_line (lin line, FILE *file)
{
bool after_newline = p_line[line][p_len[line] - 1] == '\n';
if (! fwrite (p_line[line], sizeof (*p_line[line]), p_len[line], file))
write_fatal ();
return after_newline;
}
|
CWE-119
| 178,645 | 535 |
109748454555121695743182615951035156943
| null | null | null |
ghostscript
|
e698d5c11d27212aa1098bc5b1673a3378563092
| 1 |
jbig2_decode_gray_scale_image(Jbig2Ctx *ctx, Jbig2Segment *segment,
const byte *data, const size_t size,
bool GSMMR, uint32_t GSW, uint32_t GSH,
uint32_t GSBPP, bool GSUSESKIP, Jbig2Image *GSKIP, int GSTEMPLATE, Jbig2ArithCx *GB_stats)
{
uint8_t **GSVALS = NULL;
size_t consumed_bytes = 0;
int i, j, code, stride;
int x, y;
Jbig2Image **GSPLANES;
Jbig2GenericRegionParams rparams;
Jbig2WordStream *ws = NULL;
Jbig2ArithState *as = NULL;
/* allocate GSPLANES */
GSPLANES = jbig2_new(ctx, Jbig2Image *, GSBPP);
if (GSPLANES == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate %d bytes for GSPLANES", GSBPP);
return NULL;
}
for (i = 0; i < GSBPP; ++i) {
GSPLANES[i] = jbig2_image_new(ctx, GSW, GSH);
if (GSPLANES[i] == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate %dx%d image for GSPLANES", GSW, GSH);
/* free already allocated */
for (j = i - 1; j >= 0; --j) {
jbig2_image_release(ctx, GSPLANES[j]);
}
jbig2_free(ctx->allocator, GSPLANES);
return NULL;
}
}
}
|
CWE-119
| 178,659 | 540 |
304560407984194980999184469502825270703
| null | null | null |
openssl
|
8aed2a7548362e88e84a7feb795a3a97e8395008
| 1 |
void EC_GROUP_clear_free(EC_GROUP *group)
{
if (!group) return;
if (group->meth->group_clear_finish != 0)
group->meth->group_clear_finish(group);
else if (group->meth->group_finish != 0)
group->meth->group_finish(group);
EC_EX_DATA_clear_free_all_data(&group->extra_data);
if (group->generator != NULL)
EC_EX_DATA_clear_free_all_data(&group->extra_data);
if (group->generator != NULL)
EC_POINT_clear_free(group->generator);
BN_clear_free(&group->order);
OPENSSL_cleanse(group, sizeof *group);
OPENSSL_free(group);
}
|
CWE-320
| 178,678 | 558 |
83588821557112965724679101879504507869
| null | null | null |
busybox
|
1de25a6e87e0e627aa34298105a3d17c60a1f44e
| 1 |
static int huft_build(const unsigned *b, const unsigned n,
const unsigned s, const unsigned short *d,
const unsigned char *e, huft_t **t, unsigned *m)
{
unsigned a; /* counter for codes of length k */
unsigned c[BMAX + 1]; /* bit length count table */
unsigned eob_len; /* length of end-of-block code (value 256) */
unsigned f; /* i repeats in table every f entries */
int g; /* maximum code length */
int htl; /* table level */
unsigned i; /* counter, current code */
unsigned j; /* counter */
int k; /* number of bits in current code */
unsigned *p; /* pointer into c[], b[], or v[] */
huft_t *q; /* points to current table */
huft_t r; /* table entry for structure assignment */
huft_t *u[BMAX]; /* table stack */
unsigned v[N_MAX]; /* values in order of bit length */
int ws[BMAX + 1]; /* bits decoded stack */
int w; /* bits decoded */
unsigned x[BMAX + 1]; /* bit offsets, then code stack */
int y; /* number of dummy codes added */
unsigned z; /* number of entries in current table */
/* Length of EOB code, if any */
eob_len = n > 256 ? b[256] : BMAX;
*t = NULL;
/* Generate counts for each bit length */
memset(c, 0, sizeof(c));
p = (unsigned *) b; /* cast allows us to reuse p for pointing to b */
i = n;
do {
c[*p]++; /* assume all entries <= BMAX */
} while (--i);
if (c[0] == n) { /* null input - all zero length codes */
*m = 0;
return 2;
}
/* Find minimum and maximum length, bound *m by those */
for (j = 1; (j <= BMAX) && (c[j] == 0); j++)
continue;
k = j; /* minimum code length */
for (i = BMAX; (c[i] == 0) && i; i--)
continue;
g = i; /* maximum code length */
*m = (*m < j) ? j : ((*m > i) ? i : *m);
/* Adjust last length count to fill out codes, if needed */
for (y = 1 << j; j < i; j++, y <<= 1) {
y -= c[j];
if (y < 0)
return 2; /* bad input: more codes than bits */
}
y -= c[i];
if (y < 0)
return 2;
c[i] += y;
/* Generate starting offsets into the value table for each length */
x[1] = j = 0;
p = c + 1;
xp = x + 2;
while (--i) { /* note that i == g from above */
j += *p++;
*xp++ = j;
}
}
|
CWE-476
| 178,680 | 559 |
44886300504321350078297998527005640356
| null | null | null |
wpitchoune
|
8b10426dcc0246c1712a99460dd470dcb1cc4d9c
| 1 |
create_response(const char *nurl, const char *method, unsigned int *rp_code)
{
char *page, *fpath;
struct MHD_Response *resp = NULL;
if (!strncmp(nurl, URL_BASE_API_1_1, strlen(URL_BASE_API_1_1))) {
resp = create_response_api(nurl, method, rp_code);
} else {
fpath = get_path(nurl, server_data.www_dir);
resp = create_response_file(nurl, method, rp_code, fpath);
free(fpath);
}
}
|
CWE-22
| 178,681 | 560 |
1938873346208583269260163743899020800
| null | null | null |
enlightment
|
666df815cd86a50343859bce36c5cf968c5f38b0
| 1 |
main(int argc,
char **argv)
{
int i, gn;
int test = 0;
char *action = NULL, *cmd;
char *output = NULL;
#ifdef HAVE_EEZE_MOUNT
Eina_Bool mnt = EINA_FALSE;
const char *act;
#endif
gid_t gid, gl[65536], egid;
for (i = 1; i < argc; i++)
{
if ((!strcmp(argv[i], "-h")) ||
(!strcmp(argv[i], "-help")) ||
(!strcmp(argv[i], "--help")))
{
printf(
"This is an internal tool for Enlightenment.\n"
"do not use it.\n"
);
exit(0);
}
}
if (argc >= 3)
{
if ((argc == 3) && (!strcmp(argv[1], "-t")))
{
test = 1;
action = argv[2];
}
else if (!strcmp(argv[1], "l2ping"))
{
action = argv[1];
output = argv[2];
}
#ifdef HAVE_EEZE_MOUNT
else
{
const char *s;
s = strrchr(argv[1], '/');
if ((!s) || (!s[1])) exit(1); /* eeze always uses complete path */
s++;
if (strcmp(s, "mount") && strcmp(s, "umount") && strcmp(s, "eject")) exit(1);
mnt = EINA_TRUE;
act = s;
action = argv[1];
}
#endif
}
else if (argc == 2)
{
action = argv[1];
}
else
{
exit(1);
}
if (!action) exit(1);
fprintf(stderr, "action %s %i\n", action, argc);
uid = getuid();
gid = getgid();
egid = getegid();
gn = getgroups(65536, gl);
if (gn < 0)
{
printf("ERROR: MEMBER OF MORE THAN 65536 GROUPS\n");
exit(3);
}
if (setuid(0) != 0)
{
printf("ERROR: UNABLE TO ASSUME ROOT PRIVILEGES\n");
exit(5);
}
if (setgid(0) != 0)
{
printf("ERROR: UNABLE TO ASSUME ROOT GROUP PRIVILEGES\n");
exit(7);
}
eina_init();
if (!auth_action_ok(action, gid, gl, gn, egid))
{
printf("ERROR: ACTION NOT ALLOWED: %s\n", action);
exit(10);
}
/* we can add more levels of auth here */
/* when mounting, this will match the exact path to the exe,
* as required in sysactions.conf
* this is intentionally pedantic for security
*/
cmd = eina_hash_find(actions, action);
if (!cmd)
{
printf("ERROR: UNDEFINED ACTION: %s\n", action);
exit(20);
}
if (!test && !strcmp(action, "l2ping"))
{
char tmp[128];
double latency;
latency = e_sys_l2ping(output);
eina_convert_dtoa(latency, tmp);
fputs(tmp, stdout);
return (latency < 0) ? 1 : 0;
}
/* sanitize environment */
#ifdef HAVE_UNSETENV
# define NOENV(x) unsetenv(x)
#else
# define NOENV(x)
#endif
NOENV("IFS");
/* sanitize environment */
#ifdef HAVE_UNSETENV
# define NOENV(x) unsetenv(x)
#else
# define NOENV(x)
#endif
NOENV("IFS");
NOENV("LD_PRELOAD");
NOENV("PYTHONPATH");
NOENV("LD_LIBRARY_PATH");
#ifdef HAVE_CLEARENV
clearenv();
#endif
/* set path and ifs to minimal defaults */
putenv("PATH=/bin:/usr/bin");
putenv("IFS= \t\n");
const char *p;
char *end;
unsigned long muid;
Eina_Bool nosuid, nodev, noexec, nuid;
nosuid = nodev = noexec = nuid = EINA_FALSE;
/* these are the only possible options which can be present here; check them strictly */
if (eina_strlcpy(buf, opts, sizeof(buf)) >= sizeof(buf)) return EINA_FALSE;
for (p = buf; p && p[1]; p = strchr(p + 1, ','))
{
if (p[0] == ',') p++;
#define CMP(OPT) \
if (!strncmp(p, OPT, sizeof(OPT) - 1))
CMP("nosuid,")
{
nosuid = EINA_TRUE;
continue;
}
CMP("nodev,")
{
nodev = EINA_TRUE;
continue;
}
CMP("noexec,")
{
noexec = EINA_TRUE;
continue;
}
CMP("utf8,") continue;
CMP("utf8=0,") continue;
CMP("utf8=1,") continue;
CMP("iocharset=utf8,") continue;
CMP("uid=")
{
p += 4;
errno = 0;
muid = strtoul(p, &end, 10);
if (muid == ULONG_MAX) return EINA_FALSE;
if (errno) return EINA_FALSE;
if (end[0] != ',') return EINA_FALSE;
if (muid != uid) return EINA_FALSE;
nuid = EINA_TRUE;
continue;
}
return EINA_FALSE;
}
if ((!nosuid) || (!nodev) || (!noexec) || (!nuid)) return EINA_FALSE;
return EINA_TRUE;
}
|
CWE-264
| 178,685 | 563 |
300699053216122192196495428322166073677
| null | null | null |
pango
|
4de30e5500eaeb49f4bf0b7a07f718e149a2ed5e
| 1 |
pango_glyph_string_set_size (PangoGlyphString *string, gint new_len)
{
g_return_if_fail (new_len >= 0);
while (new_len > string->space)
{
if (string->space == 0)
string->space = 1;
else
string->space *= 2;
if (string->space < 0)
{
g_warning ("glyph string length overflows maximum integer size, truncated");
new_len = string->space = G_MAXINT - 8;
}
}
string->glyphs = g_realloc (string->glyphs, string->space * sizeof (PangoGlyphInfo));
string->log_clusters = g_realloc (string->log_clusters, string->space * sizeof (gint));
string->num_glyphs = new_len;
}
|
CWE-189
| 178,686 | 564 |
263605045719264644933052685862095185543
| null | null | null |
beanstalkd
|
2e8e8c6387ecdf5923dfc4d7718d18eba1b0873d
| 1 |
dispatch_cmd(conn c)
{
int r, i, timeout = -1;
size_t z;
unsigned int count;
job j;
unsigned char type;
char *size_buf, *delay_buf, *ttr_buf, *pri_buf, *end_buf, *name;
unsigned int pri, body_size;
usec delay, ttr;
uint64_t id;
tube t = NULL;
/* NUL-terminate this string so we can use strtol and friends */
c->cmd[c->cmd_len - 2] = '\0';
/* check for possible maliciousness */
if (strlen(c->cmd) != c->cmd_len - 2) {
return reply_msg(c, MSG_BAD_FORMAT);
}
type = which_cmd(c);
dprintf("got %s command: \"%s\"\n", op_names[(int) type], c->cmd);
switch (type) {
case OP_PUT:
r = read_pri(&pri, c->cmd + 4, &delay_buf);
if (r) return reply_msg(c, MSG_BAD_FORMAT);
r = read_delay(&delay, delay_buf, &ttr_buf);
if (r) return reply_msg(c, MSG_BAD_FORMAT);
r = read_ttr(&ttr, ttr_buf, &size_buf);
if (r) return reply_msg(c, MSG_BAD_FORMAT);
errno = 0;
body_size = strtoul(size_buf, &end_buf, 10);
if (errno) return reply_msg(c, MSG_BAD_FORMAT);
if (body_size > job_data_size_limit) {
return reply_msg(c, MSG_JOB_TOO_BIG);
}
/* don't allow trailing garbage */
if (end_buf[0] != '\0') return reply_msg(c, MSG_BAD_FORMAT);
conn_set_producer(c);
c->in_job = make_job(pri, delay, ttr ? : 1, body_size + 2, c->use);
/* OOM? */
if (!c->in_job) {
/* throw away the job body and respond with OUT_OF_MEMORY */
twarnx("server error: " MSG_OUT_OF_MEMORY);
return skip(c, body_size + 2, MSG_OUT_OF_MEMORY);
}
fill_extra_data(c);
/* it's possible we already have a complete job */
maybe_enqueue_incoming_job(c);
break;
case OP_PEEK_READY:
/* don't allow trailing garbage */
if (c->cmd_len != CMD_PEEK_READY_LEN + 2) {
return reply_msg(c, MSG_BAD_FORMAT);
}
op_ct[type]++;
j = job_copy(pq_peek(&c->use->ready));
if (!j) return reply(c, MSG_NOTFOUND, MSG_NOTFOUND_LEN, STATE_SENDWORD);
reply_job(c, j, MSG_FOUND);
break;
case OP_PEEK_DELAYED:
/* don't allow trailing garbage */
if (c->cmd_len != CMD_PEEK_DELAYED_LEN + 2) {
return reply_msg(c, MSG_BAD_FORMAT);
}
op_ct[type]++;
j = job_copy(pq_peek(&c->use->delay));
if (!j) return reply(c, MSG_NOTFOUND, MSG_NOTFOUND_LEN, STATE_SENDWORD);
reply_job(c, j, MSG_FOUND);
break;
case OP_PEEK_BURIED:
/* don't allow trailing garbage */
if (c->cmd_len != CMD_PEEK_BURIED_LEN + 2) {
return reply_msg(c, MSG_BAD_FORMAT);
}
op_ct[type]++;
j = job_copy(buried_job_p(c->use)? j = c->use->buried.next : NULL);
if (!j) return reply(c, MSG_NOTFOUND, MSG_NOTFOUND_LEN, STATE_SENDWORD);
reply_job(c, j, MSG_FOUND);
break;
case OP_PEEKJOB:
errno = 0;
id = strtoull(c->cmd + CMD_PEEKJOB_LEN, &end_buf, 10);
if (errno) return reply_msg(c, MSG_BAD_FORMAT);
op_ct[type]++;
/* So, peek is annoying, because some other connection might free the
* job while we are still trying to write it out. So we copy it and
* then free the copy when it's done sending. */
j = job_copy(peek_job(id));
if (!j) return reply(c, MSG_NOTFOUND, MSG_NOTFOUND_LEN, STATE_SENDWORD);
reply_job(c, j, MSG_FOUND);
break;
case OP_RESERVE_TIMEOUT:
errno = 0;
timeout = strtol(c->cmd + CMD_RESERVE_TIMEOUT_LEN, &end_buf, 10);
if (errno) return reply_msg(c, MSG_BAD_FORMAT);
case OP_RESERVE: /* FALLTHROUGH */
/* don't allow trailing garbage */
if (type == OP_RESERVE && c->cmd_len != CMD_RESERVE_LEN + 2) {
return reply_msg(c, MSG_BAD_FORMAT);
}
op_ct[type]++;
conn_set_worker(c);
if (conn_has_close_deadline(c) && !conn_ready(c)) {
return reply_msg(c, MSG_DEADLINE_SOON);
}
/* try to get a new job for this guy */
wait_for_job(c, timeout);
process_queue();
break;
case OP_DELETE:
errno = 0;
id = strtoull(c->cmd + CMD_DELETE_LEN, &end_buf, 10);
if (errno) return reply_msg(c, MSG_BAD_FORMAT);
op_ct[type]++;
j = job_find(id);
j = remove_reserved_job(c, j) ? :
remove_ready_job(j) ? :
remove_buried_job(j);
if (!j) return reply(c, MSG_NOTFOUND, MSG_NOTFOUND_LEN, STATE_SENDWORD);
j->state = JOB_STATE_INVALID;
r = binlog_write_job(j);
job_free(j);
if (!r) return reply_serr(c, MSG_INTERNAL_ERROR);
reply(c, MSG_DELETED, MSG_DELETED_LEN, STATE_SENDWORD);
break;
case OP_RELEASE:
errno = 0;
id = strtoull(c->cmd + CMD_RELEASE_LEN, &pri_buf, 10);
if (errno) return reply_msg(c, MSG_BAD_FORMAT);
r = read_pri(&pri, pri_buf, &delay_buf);
if (r) return reply_msg(c, MSG_BAD_FORMAT);
r = read_delay(&delay, delay_buf, NULL);
if (r) return reply_msg(c, MSG_BAD_FORMAT);
op_ct[type]++;
j = remove_reserved_job(c, job_find(id));
if (!j) return reply(c, MSG_NOTFOUND, MSG_NOTFOUND_LEN, STATE_SENDWORD);
/* We want to update the delay deadline on disk, so reserve space for
* that. */
if (delay) {
z = binlog_reserve_space_update(j);
if (!z) return reply_serr(c, MSG_OUT_OF_MEMORY);
j->reserved_binlog_space += z;
}
j->pri = pri;
j->delay = delay;
j->release_ct++;
r = enqueue_job(j, delay, !!delay);
if (r < 0) return reply_serr(c, MSG_INTERNAL_ERROR);
if (r == 1) {
return reply(c, MSG_RELEASED, MSG_RELEASED_LEN, STATE_SENDWORD);
}
/* out of memory trying to grow the queue, so it gets buried */
bury_job(j, 0);
reply(c, MSG_BURIED, MSG_BURIED_LEN, STATE_SENDWORD);
break;
case OP_BURY:
errno = 0;
id = strtoull(c->cmd + CMD_BURY_LEN, &pri_buf, 10);
if (errno) return reply_msg(c, MSG_BAD_FORMAT);
r = read_pri(&pri, pri_buf, NULL);
if (r) return reply_msg(c, MSG_BAD_FORMAT);
op_ct[type]++;
j = remove_reserved_job(c, job_find(id));
if (!j) return reply(c, MSG_NOTFOUND, MSG_NOTFOUND_LEN, STATE_SENDWORD);
j->pri = pri;
r = bury_job(j, 1);
if (!r) return reply_serr(c, MSG_INTERNAL_ERROR);
reply(c, MSG_BURIED, MSG_BURIED_LEN, STATE_SENDWORD);
break;
case OP_KICK:
errno = 0;
count = strtoul(c->cmd + CMD_KICK_LEN, &end_buf, 10);
if (end_buf == c->cmd + CMD_KICK_LEN) {
return reply_msg(c, MSG_BAD_FORMAT);
}
if (errno) return reply_msg(c, MSG_BAD_FORMAT);
op_ct[type]++;
i = kick_jobs(c->use, count);
return reply_line(c, STATE_SENDWORD, "KICKED %u\r\n", i);
case OP_TOUCH:
errno = 0;
id = strtoull(c->cmd + CMD_TOUCH_LEN, &end_buf, 10);
if (errno) return twarn("strtoull"), reply_msg(c, MSG_BAD_FORMAT);
op_ct[type]++;
j = touch_job(c, job_find(id));
if (j) {
reply(c, MSG_TOUCHED, MSG_TOUCHED_LEN, STATE_SENDWORD);
} else {
return reply(c, MSG_NOTFOUND, MSG_NOTFOUND_LEN, STATE_SENDWORD);
}
break;
case OP_STATS:
/* don't allow trailing garbage */
if (c->cmd_len != CMD_STATS_LEN + 2) {
return reply_msg(c, MSG_BAD_FORMAT);
}
op_ct[type]++;
do_stats(c, fmt_stats, NULL);
break;
case OP_JOBSTATS:
errno = 0;
id = strtoull(c->cmd + CMD_JOBSTATS_LEN, &end_buf, 10);
if (errno) return reply_msg(c, MSG_BAD_FORMAT);
op_ct[type]++;
j = peek_job(id);
if (!j) return reply(c, MSG_NOTFOUND, MSG_NOTFOUND_LEN, STATE_SENDWORD);
if (!j->tube) return reply_serr(c, MSG_INTERNAL_ERROR);
do_stats(c, (fmt_fn) fmt_job_stats, j);
break;
case OP_STATS_TUBE:
name = c->cmd + CMD_STATS_TUBE_LEN;
if (!name_is_ok(name, 200)) return reply_msg(c, MSG_BAD_FORMAT);
op_ct[type]++;
t = tube_find(name);
if (!t) return reply_msg(c, MSG_NOTFOUND);
do_stats(c, (fmt_fn) fmt_stats_tube, t);
t = NULL;
break;
case OP_LIST_TUBES:
/* don't allow trailing garbage */
if (c->cmd_len != CMD_LIST_TUBES_LEN + 2) {
return reply_msg(c, MSG_BAD_FORMAT);
}
op_ct[type]++;
do_list_tubes(c, &tubes);
break;
case OP_LIST_TUBE_USED:
/* don't allow trailing garbage */
if (c->cmd_len != CMD_LIST_TUBE_USED_LEN + 2) {
return reply_msg(c, MSG_BAD_FORMAT);
}
op_ct[type]++;
reply_line(c, STATE_SENDWORD, "USING %s\r\n", c->use->name);
break;
case OP_LIST_TUBES_WATCHED:
/* don't allow trailing garbage */
if (c->cmd_len != CMD_LIST_TUBES_WATCHED_LEN + 2) {
return reply_msg(c, MSG_BAD_FORMAT);
}
op_ct[type]++;
do_list_tubes(c, &c->watch);
break;
case OP_USE:
name = c->cmd + CMD_USE_LEN;
if (!name_is_ok(name, 200)) return reply_msg(c, MSG_BAD_FORMAT);
op_ct[type]++;
TUBE_ASSIGN(t, tube_find_or_make(name));
if (!t) return reply_serr(c, MSG_OUT_OF_MEMORY);
c->use->using_ct--;
TUBE_ASSIGN(c->use, t);
TUBE_ASSIGN(t, NULL);
c->use->using_ct++;
reply_line(c, STATE_SENDWORD, "USING %s\r\n", c->use->name);
break;
case OP_WATCH:
name = c->cmd + CMD_WATCH_LEN;
if (!name_is_ok(name, 200)) return reply_msg(c, MSG_BAD_FORMAT);
op_ct[type]++;
TUBE_ASSIGN(t, tube_find_or_make(name));
if (!t) return reply_serr(c, MSG_OUT_OF_MEMORY);
r = 1;
if (!ms_contains(&c->watch, t)) r = ms_append(&c->watch, t);
TUBE_ASSIGN(t, NULL);
if (!r) return reply_serr(c, MSG_OUT_OF_MEMORY);
reply_line(c, STATE_SENDWORD, "WATCHING %d\r\n", c->watch.used);
break;
case OP_IGNORE:
name = c->cmd + CMD_IGNORE_LEN;
if (!name_is_ok(name, 200)) return reply_msg(c, MSG_BAD_FORMAT);
op_ct[type]++;
t = NULL;
for (i = 0; i < c->watch.used; i++) {
t = c->watch.items[i];
if (strncmp(t->name, name, MAX_TUBE_NAME_LEN) == 0) break;
t = NULL;
}
if (t && c->watch.used < 2) return reply_msg(c, MSG_NOT_IGNORED);
if (t) ms_remove(&c->watch, t); /* may free t if refcount => 0 */
t = NULL;
reply_line(c, STATE_SENDWORD, "WATCHING %d\r\n", c->watch.used);
break;
case OP_QUIT:
conn_close(c);
break;
case OP_PAUSE_TUBE:
op_ct[type]++;
r = read_tube_name(&name, c->cmd + CMD_PAUSE_TUBE_LEN, &delay_buf);
if (r) return reply_msg(c, MSG_BAD_FORMAT);
r = read_delay(&delay, delay_buf, NULL);
if (r) return reply_msg(c, MSG_BAD_FORMAT);
*delay_buf = '\0';
t = tube_find(name);
if (!t) return reply_msg(c, MSG_NOTFOUND);
t->deadline_at = now_usec() + delay;
t->pause = delay;
t->stat.pause_ct++;
set_main_delay_timeout();
reply_line(c, STATE_SENDWORD, "PAUSED\r\n");
break;
default:
return reply_msg(c, MSG_UNKNOWN_COMMAND);
}
}
| 178,687 | 565 |
187391797579604796843033309226969343864
| null | null | null |
|
memcached
|
d9cd01ede97f4145af9781d448c62a3318952719
| 1 |
static int try_read_command(conn *c) {
assert(c != NULL);
assert(c->rcurr <= (c->rbuf + c->rsize));
assert(c->rbytes > 0);
if (c->protocol == negotiating_prot || c->transport == udp_transport) {
if ((unsigned char)c->rbuf[0] == (unsigned char)PROTOCOL_BINARY_REQ) {
c->protocol = binary_prot;
} else {
c->protocol = ascii_prot;
}
if (settings.verbose > 1) {
fprintf(stderr, "%d: Client using the %s protocol\n", c->sfd,
prot_text(c->protocol));
}
}
if (c->protocol == binary_prot) {
/* Do we have the complete packet header? */
if (c->rbytes < sizeof(c->binary_header)) {
/* need more data! */
return 0;
} else {
#ifdef NEED_ALIGN
if (((long)(c->rcurr)) % 8 != 0) {
/* must realign input buffer */
memmove(c->rbuf, c->rcurr, c->rbytes);
c->rcurr = c->rbuf;
if (settings.verbose > 1) {
fprintf(stderr, "%d: Realign input buffer\n", c->sfd);
}
}
#endif
protocol_binary_request_header* req;
req = (protocol_binary_request_header*)c->rcurr;
if (settings.verbose > 1) {
/* Dump the packet before we convert it to host order */
int ii;
fprintf(stderr, "<%d Read binary protocol data:", c->sfd);
for (ii = 0; ii < sizeof(req->bytes); ++ii) {
if (ii % 4 == 0) {
fprintf(stderr, "\n<%d ", c->sfd);
}
fprintf(stderr, " 0x%02x", req->bytes[ii]);
}
fprintf(stderr, "\n");
}
c->binary_header = *req;
c->binary_header.request.keylen = ntohs(req->request.keylen);
c->binary_header.request.bodylen = ntohl(req->request.bodylen);
c->binary_header.request.cas = ntohll(req->request.cas);
if (c->binary_header.request.magic != PROTOCOL_BINARY_REQ) {
if (settings.verbose) {
fprintf(stderr, "Invalid magic: %x\n",
c->binary_header.request.magic);
}
conn_set_state(c, conn_closing);
return -1;
}
c->msgcurr = 0;
c->msgused = 0;
c->iovused = 0;
if (add_msghdr(c) != 0) {
out_string(c, "SERVER_ERROR out of memory");
return 0;
}
c->cmd = c->binary_header.request.opcode;
c->keylen = c->binary_header.request.keylen;
c->opaque = c->binary_header.request.opaque;
/* clear the returned cas value */
c->cas = 0;
dispatch_bin_command(c);
c->rbytes -= sizeof(c->binary_header);
c->rcurr += sizeof(c->binary_header);
}
} else {
char *el, *cont;
if (c->rbytes == 0)
return 0;
el = memchr(c->rcurr, '\n', c->rbytes);
if (!el) {
if (c->rbytes > 1024) {
/*
* We didn't have a '\n' in the first k. This _has_ to be a
* large multiget, if not we should just nuke the connection.
*/
char *ptr = c->rcurr;
while (*ptr == ' ') { /* ignore leading whitespaces */
++ptr;
}
if (strcmp(ptr, "get ") && strcmp(ptr, "gets ")) {
conn_set_state(c, conn_closing);
return 1;
}
}
return 0;
}
cont = el + 1;
if ((el - c->rcurr) > 1 && *(el - 1) == '\r') {
el--;
}
*el = '\0';
assert(cont <= (c->rcurr + c->rbytes));
process_command(c, c->rcurr);
c->rbytes -= (cont - c->rcurr);
c->rcurr = cont;
assert(c->rcurr <= (c->rbuf + c->rsize));
}
return 1;
}
|
CWE-20
| 178,693 | 566 |
41269603813800370216521915768948343467
| null | null | null |
uzbl
|
1958b52d41cba96956dc1995660de49525ed1047
| 1 |
eval_js(WebKitWebView * web_view, gchar *script, GString *result) {
WebKitWebFrame *frame;
JSGlobalContextRef context;
JSObjectRef globalobject;
JSStringRef var_name;
JSStringRef js_script;
JSValueRef js_result;
JSStringRef js_result_string;
size_t js_result_size;
js_init();
frame = webkit_web_view_get_main_frame(WEBKIT_WEB_VIEW(web_view));
context = webkit_web_frame_get_global_context(frame);
globalobject = JSContextGetGlobalObject(context);
/* uzbl javascript namespace */
var_name = JSStringCreateWithUTF8CString("Uzbl");
JSObjectSetProperty(context, globalobject, var_name,
JSObjectMake(context, uzbl.js.classref, NULL),
kJSClassAttributeNone, NULL);
/* evaluate the script and get return value*/
js_script = JSStringCreateWithUTF8CString(script);
js_result = JSEvaluateScript(context, js_script, globalobject, NULL, 0, NULL);
if (js_result && !JSValueIsUndefined(context, js_result)) {
js_result_string = JSValueToStringCopy(context, js_result, NULL);
js_result_size = JSStringGetMaximumUTF8CStringSize(js_result_string);
if (js_result_size) {
char js_result_utf8[js_result_size];
JSStringGetUTF8CString(js_result_string, js_result_utf8, js_result_size);
g_string_assign(result, js_result_utf8);
}
JSStringRelease(js_result_string);
}
/* cleanup */
JSObjectDeleteProperty(context, globalobject, var_name, NULL);
JSStringRelease(var_name);
JSStringRelease(js_script);
}
|
CWE-264
| 178,695 | 568 |
251201670477899884555531350964553866649
| null | null | null |
FFmpeg
|
8312e3fc9041027a33c8bc667bb99740fdf41dd5
| 1 |
static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap)
{
AVIOContext *pb = s->pb;
APEContext *ape = s->priv_data;
AVStream *st;
uint32_t tag;
int i;
int total_blocks;
int64_t pts;
/* TODO: Skip any leading junk such as id3v2 tags */
ape->junklength = 0;
tag = avio_rl32(pb);
if (tag != MKTAG('M', 'A', 'C', ' '))
return -1;
ape->fileversion = avio_rl16(pb);
if (ape->fileversion < APE_MIN_VERSION || ape->fileversion > APE_MAX_VERSION) {
av_log(s, AV_LOG_ERROR, "Unsupported file version - %d.%02d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10);
return -1;
}
if (ape->fileversion >= 3980) {
ape->padding1 = avio_rl16(pb);
ape->descriptorlength = avio_rl32(pb);
ape->headerlength = avio_rl32(pb);
ape->seektablelength = avio_rl32(pb);
ape->wavheaderlength = avio_rl32(pb);
ape->audiodatalength = avio_rl32(pb);
ape->audiodatalength_high = avio_rl32(pb);
ape->wavtaillength = avio_rl32(pb);
avio_read(pb, ape->md5, 16);
/* Skip any unknown bytes at the end of the descriptor.
This is for future compatibility */
if (ape->descriptorlength > 52)
avio_seek(pb, ape->descriptorlength - 52, SEEK_CUR);
/* Read header data */
ape->compressiontype = avio_rl16(pb);
ape->formatflags = avio_rl16(pb);
ape->blocksperframe = avio_rl32(pb);
ape->finalframeblocks = avio_rl32(pb);
ape->totalframes = avio_rl32(pb);
ape->bps = avio_rl16(pb);
ape->channels = avio_rl16(pb);
ape->samplerate = avio_rl32(pb);
} else {
ape->descriptorlength = 0;
ape->headerlength = 32;
ape->compressiontype = avio_rl16(pb);
ape->formatflags = avio_rl16(pb);
ape->channels = avio_rl16(pb);
ape->samplerate = avio_rl32(pb);
ape->wavheaderlength = avio_rl32(pb);
ape->wavtaillength = avio_rl32(pb);
ape->totalframes = avio_rl32(pb);
ape->finalframeblocks = avio_rl32(pb);
if (ape->formatflags & MAC_FORMAT_FLAG_HAS_PEAK_LEVEL) {
avio_seek(pb, 4, SEEK_CUR); /* Skip the peak level */
ape->headerlength += 4;
}
if (ape->formatflags & MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS) {
ape->seektablelength = avio_rl32(pb);
ape->headerlength += 4;
ape->seektablelength *= sizeof(int32_t);
} else
ape->seektablelength = ape->totalframes * sizeof(int32_t);
if (ape->formatflags & MAC_FORMAT_FLAG_8_BIT)
ape->bps = 8;
else if (ape->formatflags & MAC_FORMAT_FLAG_24_BIT)
ape->bps = 24;
else
ape->bps = 16;
if (ape->fileversion >= 3950)
ape->blocksperframe = 73728 * 4;
else if (ape->fileversion >= 3900 || (ape->fileversion >= 3800 && ape->compressiontype >= 4000))
ape->blocksperframe = 73728;
else
ape->blocksperframe = 9216;
/* Skip any stored wav header */
if (!(ape->formatflags & MAC_FORMAT_FLAG_CREATE_WAV_HEADER))
avio_seek(pb, ape->wavheaderlength, SEEK_CUR);
}
if(ape->totalframes > UINT_MAX / sizeof(APEFrame)){
av_log(s, AV_LOG_ERROR, "Too many frames: %d\n", ape->totalframes);
return -1;
}
ape->frames = av_malloc(ape->totalframes * sizeof(APEFrame));
if(!ape->frames)
return AVERROR(ENOMEM);
ape->firstframe = ape->junklength + ape->descriptorlength + ape->headerlength + ape->seektablelength + ape->wavheaderlength;
ape->currentframe = 0;
ape->totalsamples = ape->finalframeblocks;
if (ape->totalframes > 1)
ape->totalsamples += ape->blocksperframe * (ape->totalframes - 1);
if (ape->seektablelength > 0) {
ape->seektable = av_malloc(ape->seektablelength);
for (i = 0; i < ape->seektablelength / sizeof(uint32_t); i++)
ape->seektable[i] = avio_rl32(pb);
}
ape->frames[0].pos = ape->firstframe;
ape->frames[0].nblocks = ape->blocksperframe;
ape->frames[0].skip = 0;
for (i = 1; i < ape->totalframes; i++) {
ape->frames[i].pos = ape->seektable[i]; //ape->frames[i-1].pos + ape->blocksperframe;
ape->frames[i].nblocks = ape->blocksperframe;
ape->frames[i - 1].size = ape->frames[i].pos - ape->frames[i - 1].pos;
ape->frames[i].skip = (ape->frames[i].pos - ape->frames[0].pos) & 3;
}
ape->frames[ape->totalframes - 1].size = ape->finalframeblocks * 4;
ape->frames[ape->totalframes - 1].nblocks = ape->finalframeblocks;
for (i = 0; i < ape->totalframes; i++) {
if(ape->frames[i].skip){
ape->frames[i].pos -= ape->frames[i].skip;
ape->frames[i].size += ape->frames[i].skip;
}
ape->frames[i].size = (ape->frames[i].size + 3) & ~3;
}
ape_dumpinfo(s, ape);
/* try to read APE tags */
if (!url_is_streamed(pb)) {
ff_ape_parse_tag(s);
avio_seek(pb, 0, SEEK_SET);
}
av_log(s, AV_LOG_DEBUG, "Decoding file - v%d.%02d, compression level %d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10, ape->compressiontype);
/* now we are ready: build format streams */
st = av_new_stream(s, 0);
if (!st)
return -1;
total_blocks = (ape->totalframes == 0) ? 0 : ((ape->totalframes - 1) * ape->blocksperframe) + ape->finalframeblocks;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_APE;
st->codec->codec_tag = MKTAG('A', 'P', 'E', ' ');
st->codec->channels = ape->channels;
st->codec->sample_rate = ape->samplerate;
st->codec->bits_per_coded_sample = ape->bps;
st->codec->frame_size = MAC_SUBFRAME_SIZE;
st->nb_frames = ape->totalframes;
st->start_time = 0;
st->duration = total_blocks / MAC_SUBFRAME_SIZE;
av_set_pts_info(st, 64, MAC_SUBFRAME_SIZE, ape->samplerate);
st->codec->extradata = av_malloc(APE_EXTRADATA_SIZE);
st->codec->extradata_size = APE_EXTRADATA_SIZE;
AV_WL16(st->codec->extradata + 0, ape->fileversion);
AV_WL16(st->codec->extradata + 2, ape->compressiontype);
AV_WL16(st->codec->extradata + 4, ape->formatflags);
pts = 0;
for (i = 0; i < ape->totalframes; i++) {
ape->frames[i].pts = pts;
av_add_index_entry(st, ape->frames[i].pos, ape->frames[i].pts, 0, 0, AVINDEX_KEYFRAME);
pts += ape->blocksperframe / MAC_SUBFRAME_SIZE;
}
return 0;
}
|
CWE-399
| 178,696 | 569 |
4670505959256340435798380662682646268
| null | null | null |
nbd
|
3ef52043861ab16352d49af89e048ba6339d6df8
| 1 |
int mainloop(CLIENT *client) {
struct nbd_request request;
struct nbd_reply reply;
gboolean go_on=TRUE;
#ifdef DODBG
int i = 0;
#endif
negotiate(client->net, client, NULL);
DEBUG("Entering request loop!\n");
reply.magic = htonl(NBD_REPLY_MAGIC);
reply.error = 0;
while (go_on) {
char buf[BUFSIZE];
size_t len;
#ifdef DODBG
i++;
printf("%d: ", i);
#endif
readit(client->net, &request, sizeof(request));
request.from = ntohll(request.from);
request.type = ntohl(request.type);
if (request.type==NBD_CMD_DISC) {
msg2(LOG_INFO, "Disconnect request received.");
if (client->server->flags & F_COPYONWRITE) {
if (client->difmap) g_free(client->difmap) ;
close(client->difffile);
unlink(client->difffilename);
free(client->difffilename);
}
go_on=FALSE;
continue;
}
len = ntohl(request.len);
if (request.magic != htonl(NBD_REQUEST_MAGIC))
err("Not enough magic.");
if (len > BUFSIZE + sizeof(struct nbd_reply))
err("Request too big!");
#ifdef DODBG
printf("%s from %llu (%llu) len %d, ", request.type ? "WRITE" :
"READ", (unsigned long long)request.from,
(unsigned long long)request.from / 512, len);
#endif
memcpy(reply.handle, request.handle, sizeof(reply.handle));
if ((request.from + len) > (OFFT_MAX)) {
DEBUG("[Number too large!]");
ERROR(client, reply, EINVAL);
continue;
}
if (((ssize_t)((off_t)request.from + len) > client->exportsize)) {
DEBUG("[RANGE!]");
ERROR(client, reply, EINVAL);
continue;
}
if (request.type==NBD_CMD_WRITE) {
DEBUG("wr: net->buf, ");
readit(client->net, buf, len);
DEBUG("buf->exp, ");
if ((client->server->flags & F_READONLY) ||
(client->server->flags & F_AUTOREADONLY)) {
DEBUG("[WRITE to READONLY!]");
ERROR(client, reply, EPERM);
continue;
}
if (expwrite(request.from, buf, len, client)) {
DEBUG("Write failed: %m" );
ERROR(client, reply, errno);
continue;
}
SEND(client->net, reply);
DEBUG("OK!\n");
continue;
}
/* READ */
DEBUG("exp->buf, ");
if (expread(request.from, buf + sizeof(struct nbd_reply), len, client)) {
DEBUG("Read failed: %m");
ERROR(client, reply, errno);
continue;
}
DEBUG("buf->net, ");
memcpy(buf, &reply, sizeof(struct nbd_reply));
writeit(client->net, buf, len + sizeof(struct nbd_reply));
DEBUG("OK!\n");
}
return 0;
}
|
CWE-119
| 178,699 | 570 |
21610058009013440610924702255406066217
| null | null | null |
linux
|
95a69adab9acfc3981c504737a2b6578e4d846ef
| 1 |
int main(void)
{
int fd, len, sock_opt;
int error;
struct cn_msg *message;
struct pollfd pfd;
struct nlmsghdr *incoming_msg;
struct cn_msg *incoming_cn_msg;
struct hv_kvp_msg *hv_msg;
char *p;
char *key_value;
char *key_name;
int op;
int pool;
char *if_name;
struct hv_kvp_ipaddr_value *kvp_ip_val;
daemon(1, 0);
openlog("KVP", 0, LOG_USER);
syslog(LOG_INFO, "KVP starting; pid is:%d", getpid());
/*
* Retrieve OS release information.
*/
kvp_get_os_info();
if (kvp_file_init()) {
syslog(LOG_ERR, "Failed to initialize the pools");
exit(EXIT_FAILURE);
}
fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
if (fd < 0) {
syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd);
exit(EXIT_FAILURE);
}
addr.nl_family = AF_NETLINK;
addr.nl_pad = 0;
addr.nl_pid = 0;
addr.nl_groups = CN_KVP_IDX;
error = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
if (error < 0) {
syslog(LOG_ERR, "bind failed; error:%d", error);
close(fd);
exit(EXIT_FAILURE);
}
sock_opt = addr.nl_groups;
setsockopt(fd, 270, 1, &sock_opt, sizeof(sock_opt));
/*
* Register ourselves with the kernel.
*/
message = (struct cn_msg *)kvp_send_buffer;
message->id.idx = CN_KVP_IDX;
message->id.val = CN_KVP_VAL;
hv_msg = (struct hv_kvp_msg *)message->data;
hv_msg->kvp_hdr.operation = KVP_OP_REGISTER1;
message->ack = 0;
message->len = sizeof(struct hv_kvp_msg);
len = netlink_send(fd, message);
if (len < 0) {
syslog(LOG_ERR, "netlink_send failed; error:%d", len);
close(fd);
exit(EXIT_FAILURE);
}
pfd.fd = fd;
while (1) {
struct sockaddr *addr_p = (struct sockaddr *) &addr;
socklen_t addr_l = sizeof(addr);
pfd.events = POLLIN;
pfd.revents = 0;
poll(&pfd, 1, -1);
len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
addr_p, &addr_l);
if (len < 0 || addr.nl_pid) {
syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
addr.nl_pid, errno, strerror(errno));
close(fd);
return -1;
}
incoming_msg = (struct nlmsghdr *)kvp_recv_buffer;
incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg);
hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data;
/*
* We will use the KVP header information to pass back
* the error from this daemon. So, first copy the state
* and set the error code to success.
*/
op = hv_msg->kvp_hdr.operation;
pool = hv_msg->kvp_hdr.pool;
hv_msg->error = HV_S_OK;
if ((in_hand_shake) && (op == KVP_OP_REGISTER1)) {
/*
* Driver is registering with us; stash away the version
* information.
*/
in_hand_shake = 0;
p = (char *)hv_msg->body.kvp_register.version;
lic_version = malloc(strlen(p) + 1);
if (lic_version) {
strcpy(lic_version, p);
syslog(LOG_INFO, "KVP LIC Version: %s",
lic_version);
} else {
syslog(LOG_ERR, "malloc failed");
}
continue;
}
switch (op) {
case KVP_OP_GET_IP_INFO:
kvp_ip_val = &hv_msg->body.kvp_ip_val;
if_name =
kvp_mac_to_if_name((char *)kvp_ip_val->adapter_id);
if (if_name == NULL) {
/*
* We could not map the mac address to an
* interface name; return error.
*/
hv_msg->error = HV_E_FAIL;
break;
}
error = kvp_get_ip_info(
0, if_name, KVP_OP_GET_IP_INFO,
kvp_ip_val,
(MAX_IP_ADDR_SIZE * 2));
if (error)
hv_msg->error = error;
free(if_name);
break;
case KVP_OP_SET_IP_INFO:
kvp_ip_val = &hv_msg->body.kvp_ip_val;
if_name = kvp_get_if_name(
(char *)kvp_ip_val->adapter_id);
if (if_name == NULL) {
/*
* We could not map the guid to an
* interface name; return error.
*/
hv_msg->error = HV_GUID_NOTFOUND;
break;
}
error = kvp_set_ip_info(if_name, kvp_ip_val);
if (error)
hv_msg->error = error;
free(if_name);
break;
case KVP_OP_SET:
if (kvp_key_add_or_modify(pool,
hv_msg->body.kvp_set.data.key,
hv_msg->body.kvp_set.data.key_size,
hv_msg->body.kvp_set.data.value,
hv_msg->body.kvp_set.data.value_size))
hv_msg->error = HV_S_CONT;
break;
case KVP_OP_GET:
if (kvp_get_value(pool,
hv_msg->body.kvp_set.data.key,
hv_msg->body.kvp_set.data.key_size,
hv_msg->body.kvp_set.data.value,
hv_msg->body.kvp_set.data.value_size))
hv_msg->error = HV_S_CONT;
break;
case KVP_OP_DELETE:
if (kvp_key_delete(pool,
hv_msg->body.kvp_delete.key,
hv_msg->body.kvp_delete.key_size))
hv_msg->error = HV_S_CONT;
break;
default:
break;
}
if (op != KVP_OP_ENUMERATE)
goto kvp_done;
/*
* If the pool is KVP_POOL_AUTO, dynamically generate
* both the key and the value; if not read from the
* appropriate pool.
*/
if (pool != KVP_POOL_AUTO) {
if (kvp_pool_enumerate(pool,
hv_msg->body.kvp_enum_data.index,
hv_msg->body.kvp_enum_data.data.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE,
hv_msg->body.kvp_enum_data.data.value,
HV_KVP_EXCHANGE_MAX_VALUE_SIZE))
hv_msg->error = HV_S_CONT;
goto kvp_done;
}
hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data;
key_name = (char *)hv_msg->body.kvp_enum_data.data.key;
key_value = (char *)hv_msg->body.kvp_enum_data.data.value;
switch (hv_msg->body.kvp_enum_data.index) {
case FullyQualifiedDomainName:
kvp_get_domain_name(key_value,
HV_KVP_EXCHANGE_MAX_VALUE_SIZE);
strcpy(key_name, "FullyQualifiedDomainName");
break;
case IntegrationServicesVersion:
strcpy(key_name, "IntegrationServicesVersion");
strcpy(key_value, lic_version);
break;
case NetworkAddressIPv4:
kvp_get_ip_info(AF_INET, NULL, KVP_OP_ENUMERATE,
key_value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE);
strcpy(key_name, "NetworkAddressIPv4");
break;
case NetworkAddressIPv6:
kvp_get_ip_info(AF_INET6, NULL, KVP_OP_ENUMERATE,
key_value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE);
strcpy(key_name, "NetworkAddressIPv6");
break;
case OSBuildNumber:
strcpy(key_value, os_build);
strcpy(key_name, "OSBuildNumber");
break;
case OSName:
strcpy(key_value, os_name);
strcpy(key_name, "OSName");
break;
case OSMajorVersion:
strcpy(key_value, os_major);
strcpy(key_name, "OSMajorVersion");
break;
case OSMinorVersion:
strcpy(key_value, os_minor);
strcpy(key_name, "OSMinorVersion");
break;
case OSVersion:
strcpy(key_value, os_version);
strcpy(key_name, "OSVersion");
break;
case ProcessorArchitecture:
strcpy(key_value, processor_arch);
strcpy(key_name, "ProcessorArchitecture");
break;
default:
hv_msg->error = HV_S_CONT;
break;
}
/*
* Send the value back to the kernel. The response is
* already in the receive buffer. Update the cn_msg header to
* reflect the key value that has been added to the message
*/
kvp_done:
incoming_cn_msg->id.idx = CN_KVP_IDX;
incoming_cn_msg->id.val = CN_KVP_VAL;
incoming_cn_msg->ack = 0;
incoming_cn_msg->len = sizeof(struct hv_kvp_msg);
len = netlink_send(fd, incoming_cn_msg);
if (len < 0) {
syslog(LOG_ERR, "net_link send failed; error:%d", len);
exit(EXIT_FAILURE);
}
}
}
| 178,700 | 571 |
263544301252933566409345439453042251774
| null | null | null |
|
linux
|
08dff7b7d629807dbb1f398c68dd9cd58dd657a1
| 1 |
int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
{
unsigned long onlined_pages = 0;
struct zone *zone;
int need_zonelists_rebuild = 0;
int nid;
int ret;
struct memory_notify arg;
lock_memory_hotplug();
arg.start_pfn = pfn;
arg.nr_pages = nr_pages;
arg.status_change_nid = -1;
nid = page_to_nid(pfn_to_page(pfn));
if (node_present_pages(nid) == 0)
arg.status_change_nid = nid;
ret = memory_notify(MEM_GOING_ONLINE, &arg);
ret = notifier_to_errno(ret);
if (ret) {
memory_notify(MEM_CANCEL_ONLINE, &arg);
unlock_memory_hotplug();
return ret;
}
/*
* This doesn't need a lock to do pfn_to_page().
* The section can't be removed here because of the
* memory_block->state_mutex.
*/
zone = page_zone(pfn_to_page(pfn));
/*
* If this zone is not populated, then it is not in zonelist.
* This means the page allocator ignores this zone.
* So, zonelist must be updated after online.
*/
mutex_lock(&zonelists_mutex);
if (!populated_zone(zone))
need_zonelists_rebuild = 1;
ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
online_pages_range);
if (ret) {
mutex_unlock(&zonelists_mutex);
printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
(unsigned long long) pfn << PAGE_SHIFT,
(((unsigned long long) pfn + nr_pages)
<< PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_ONLINE, &arg);
unlock_memory_hotplug();
return ret;
}
zone->present_pages += onlined_pages;
zone->zone_pgdat->node_present_pages += onlined_pages;
if (need_zonelists_rebuild)
build_all_zonelists(NULL, zone);
else
zone_pcp_update(zone);
mutex_unlock(&zonelists_mutex);
init_per_zone_wmark_min();
if (onlined_pages) {
kswapd_run(zone_to_nid(zone));
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
}
vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit();
if (onlined_pages)
memory_notify(MEM_ONLINE, &arg);
unlock_memory_hotplug();
return 0;
}
| 178,701 | 572 |
330882086021001935334044472482899202693
| null | null | null |
|
linux
|
8f363b77ee4fbf7c3bbcf5ec2c5ca482d396d664
| 1 |
static void tcp_illinois_info(struct sock *sk, u32 ext,
struct sk_buff *skb)
{
const struct illinois *ca = inet_csk_ca(sk);
if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
struct tcpvegas_info info = {
.tcpv_enabled = 1,
.tcpv_rttcnt = ca->cnt_rtt,
.tcpv_minrtt = ca->base_rtt,
};
u64 t = ca->sum_rtt;
do_div(t, ca->cnt_rtt);
info.tcpv_rtt = t;
nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
}
}
|
CWE-189
| 178,702 | 573 |
162920631819149101156572064040648031330
| null | null | null |
linux
|
ed6fe9d614fc1bca95eb8c0ccd0e92db00ef9d5d
| 1 |
static int do_siocgstamp(struct net *net, struct socket *sock,
unsigned int cmd, void __user *up)
{
mm_segment_t old_fs = get_fs();
struct timeval ktv;
int err;
set_fs(KERNEL_DS);
err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
set_fs(old_fs);
if (!err)
err = compat_put_timeval(up, &ktv);
return err;
}
|
CWE-399
| 178,708 | 579 |
324785292477380531106035548806009996367
| null | null | null |
linux
|
ed6fe9d614fc1bca95eb8c0ccd0e92db00ef9d5d
| 1 |
static int do_siocgstampns(struct net *net, struct socket *sock,
unsigned int cmd, void __user *up)
{
mm_segment_t old_fs = get_fs();
struct timespec kts;
int err;
set_fs(KERNEL_DS);
err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
set_fs(old_fs);
if (!err)
err = compat_put_timespec(up, &kts);
return err;
}
|
CWE-399
| 178,709 | 580 |
301299746212078990359883459971628199755
| null | null | null |
linux
|
70789d7052239992824628db8133de08dc78e593
| 1 |
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
struct frag_hdr *fhdr, int nhoff)
{
struct sk_buff *prev, *next;
struct net_device *dev;
int offset, end;
struct net *net = dev_net(skb_dst(skb)->dev);
if (fq->q.last_in & INET_FRAG_COMPLETE)
goto err;
offset = ntohs(fhdr->frag_off) & ~0x7;
end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
((u8 *)&fhdr->frag_off -
skb_network_header(skb)));
return -1;
}
if (skb->ip_summed == CHECKSUM_COMPLETE) {
const unsigned char *nh = skb_network_header(skb);
skb->csum = csum_sub(skb->csum,
csum_partial(nh, (u8 *)(fhdr + 1) - nh,
0));
}
/* Is this the final fragment? */
if (!(fhdr->frag_off & htons(IP6_MF))) {
/* If we already have some bits beyond end
* or have different end, the segment is corrupted.
*/
if (end < fq->q.len ||
((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
goto err;
fq->q.last_in |= INET_FRAG_LAST_IN;
fq->q.len = end;
} else {
/* Check if the fragment is rounded to 8 bytes.
* Required by the RFC.
*/
if (end & 0x7) {
/* RFC2460 says always send parameter problem in
* this case. -DaveM
*/
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
offsetof(struct ipv6hdr, payload_len));
return -1;
}
if (end > fq->q.len) {
/* Some bits beyond end -> corruption. */
if (fq->q.last_in & INET_FRAG_LAST_IN)
goto err;
fq->q.len = end;
}
}
if (end == offset)
goto err;
/* Point into the IP datagram 'data' part. */
if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
goto err;
if (pskb_trim_rcsum(skb, end - offset))
goto err;
/* Find out which fragments are in front and at the back of us
* in the chain of fragments so far. We must know where to put
* this fragment, right?
*/
prev = fq->q.fragments_tail;
if (!prev || FRAG6_CB(prev)->offset < offset) {
next = NULL;
goto found;
}
prev = NULL;
for(next = fq->q.fragments; next != NULL; next = next->next) {
if (FRAG6_CB(next)->offset >= offset)
break; /* bingo! */
prev = next;
}
found:
/* We found where to put this one. Check for overlap with
* preceding fragment, and, if needed, align things so that
* any overlaps are eliminated.
*/
if (prev) {
int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
if (i > 0) {
offset += i;
if (end <= offset)
goto err;
if (!pskb_pull(skb, i))
goto err;
if (skb->ip_summed != CHECKSUM_UNNECESSARY)
skb->ip_summed = CHECKSUM_NONE;
}
}
/* Look for overlap with succeeding segments.
* If we can merge fragments, do it.
*/
while (next && FRAG6_CB(next)->offset < end) {
int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
if (i < next->len) {
/* Eat head of the next overlapped fragment
* and leave the loop. The next ones cannot overlap.
*/
if (!pskb_pull(next, i))
goto err;
FRAG6_CB(next)->offset += i; /* next fragment */
fq->q.meat -= i;
if (next->ip_summed != CHECKSUM_UNNECESSARY)
next->ip_summed = CHECKSUM_NONE;
break;
} else {
struct sk_buff *free_it = next;
/* Old fragment is completely overridden with
* new one drop it.
*/
next = next->next;
if (prev)
prev->next = next;
else
fq->q.fragments = next;
fq->q.meat -= free_it->len;
frag_kfree_skb(fq->q.net, free_it);
}
}
FRAG6_CB(skb)->offset = offset;
/* Insert this fragment in the chain of fragments. */
skb->next = next;
if (!next)
fq->q.fragments_tail = skb;
if (prev)
prev->next = skb;
else
fq->q.fragments = skb;
dev = skb->dev;
if (dev) {
fq->iif = dev->ifindex;
skb->dev = NULL;
}
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
atomic_add(skb->truesize, &fq->q.net->mem);
/* The first fragment.
* nhoffset is obtained from the first fragment, of course.
*/
if (offset == 0) {
fq->nhoffset = nhoff;
fq->q.last_in |= INET_FRAG_FIRST_IN;
}
if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len)
return ip6_frag_reasm(fq, prev, dev);
write_lock(&ip6_frags.lock);
list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
write_unlock(&ip6_frags.lock);
return -1;
err:
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
return -1;
}
| 178,711 | 582 |
19687138240682457256413348681164791537
| null | null | null |
|
linux
|
f0ec1aaf54caddd21c259aea8b2ecfbde4ee4fb9
| 1 |
void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
{
/* convert pages-jiffies to Mbyte-usec */
stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB;
stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB;
if (p->mm) {
/* adjust to KB unit */
stats->hiwater_rss = p->mm->hiwater_rss * PAGE_SIZE / KB;
stats->hiwater_vm = p->mm->hiwater_vm * PAGE_SIZE / KB;
}
stats->read_char = p->rchar;
stats->write_char = p->wchar;
stats->read_syscalls = p->syscr;
stats->write_syscalls = p->syscw;
}
|
CWE-399
| 178,754 | 620 |
312814071675674284808977830825703661829
| null | null | null |
linux
|
06b6a1cf6e776426766298d055bb3991957d90a7
| 1 |
int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int msg_flags)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
long timeo;
int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
struct sockaddr_in *sin;
struct rds_incoming *inc = NULL;
/* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
timeo = sock_rcvtimeo(sk, nonblock);
rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
if (msg_flags & MSG_OOB)
goto out;
while (1) {
/* If there are pending notifications, do those - and nothing else */
if (!list_empty(&rs->rs_notify_queue)) {
ret = rds_notify_queue_get(rs, msg);
break;
}
if (rs->rs_cong_notify) {
ret = rds_notify_cong(rs, msg);
break;
}
if (!rds_next_incoming(rs, &inc)) {
if (nonblock) {
ret = -EAGAIN;
break;
}
timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
(!list_empty(&rs->rs_notify_queue) ||
rs->rs_cong_notify ||
rds_next_incoming(rs, &inc)), timeo);
rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
timeo);
if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
continue;
ret = timeo;
if (ret == 0)
ret = -ETIMEDOUT;
break;
}
rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
&inc->i_conn->c_faddr,
ntohs(inc->i_hdr.h_sport));
ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov,
size);
if (ret < 0)
break;
/*
* if the message we just copied isn't at the head of the
* recv queue then someone else raced us to return it, try
* to get the next message.
*/
if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
rds_inc_put(inc);
inc = NULL;
rds_stats_inc(s_recv_deliver_raced);
continue;
}
if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
if (msg_flags & MSG_TRUNC)
ret = be32_to_cpu(inc->i_hdr.h_len);
msg->msg_flags |= MSG_TRUNC;
}
if (rds_cmsg_recv(inc, msg)) {
ret = -EFAULT;
goto out;
}
rds_stats_inc(s_recv_delivered);
sin = (struct sockaddr_in *)msg->msg_name;
if (sin) {
sin->sin_family = AF_INET;
sin->sin_port = inc->i_hdr.h_sport;
sin->sin_addr.s_addr = inc->i_saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
}
break;
}
if (inc)
rds_inc_put(inc);
out:
return ret;
}
|
CWE-200
| 178,755 | 621 |
145358621780894131503963249402616979504
| null | null | null |
linux
|
adee11b2085bee90bd8f4f52123ffb07882d6256
| 1 |
static int udf_load_logicalvol(struct super_block *sb, sector_t block,
struct kernel_lb_addr *fileset)
{
struct logicalVolDesc *lvd;
int i, j, offset;
uint8_t type;
struct udf_sb_info *sbi = UDF_SB(sb);
struct genericPartitionMap *gpm;
uint16_t ident;
struct buffer_head *bh;
int ret = 0;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
return 1;
BUG_ON(ident != TAG_IDENT_LVD);
lvd = (struct logicalVolDesc *)bh->b_data;
ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
if (ret)
goto out_bh;
for (i = 0, offset = 0;
i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
i++, offset += gpm->partitionMapLength) {
struct udf_part_map *map = &sbi->s_partmaps[i];
gpm = (struct genericPartitionMap *)
&(lvd->partitionMaps[offset]);
type = gpm->partitionMapType;
if (type == 1) {
struct genericPartitionMap1 *gpm1 =
(struct genericPartitionMap1 *)gpm;
map->s_partition_type = UDF_TYPE1_MAP15;
map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
map->s_partition_func = NULL;
} else if (type == 2) {
struct udfPartitionMap2 *upm2 =
(struct udfPartitionMap2 *)gpm;
if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
strlen(UDF_ID_VIRTUAL))) {
u16 suf =
le16_to_cpu(((__le16 *)upm2->partIdent.
identSuffix)[0]);
if (suf < 0x0200) {
map->s_partition_type =
UDF_VIRTUAL_MAP15;
map->s_partition_func =
udf_get_pblock_virt15;
} else {
map->s_partition_type =
UDF_VIRTUAL_MAP20;
map->s_partition_func =
udf_get_pblock_virt20;
}
} else if (!strncmp(upm2->partIdent.ident,
UDF_ID_SPARABLE,
strlen(UDF_ID_SPARABLE))) {
uint32_t loc;
struct sparingTable *st;
struct sparablePartitionMap *spm =
(struct sparablePartitionMap *)gpm;
map->s_partition_type = UDF_SPARABLE_MAP15;
map->s_type_specific.s_sparing.s_packet_len =
le16_to_cpu(spm->packetLength);
for (j = 0; j < spm->numSparingTables; j++) {
struct buffer_head *bh2;
loc = le32_to_cpu(
spm->locSparingTable[j]);
bh2 = udf_read_tagged(sb, loc, loc,
&ident);
map->s_type_specific.s_sparing.
s_spar_map[j] = bh2;
if (bh2 == NULL)
continue;
st = (struct sparingTable *)bh2->b_data;
if (ident != 0 || strncmp(
st->sparingIdent.ident,
UDF_ID_SPARING,
strlen(UDF_ID_SPARING))) {
brelse(bh2);
map->s_type_specific.s_sparing.
s_spar_map[j] = NULL;
}
}
map->s_partition_func = udf_get_pblock_spar15;
} else if (!strncmp(upm2->partIdent.ident,
UDF_ID_METADATA,
strlen(UDF_ID_METADATA))) {
struct udf_meta_data *mdata =
&map->s_type_specific.s_metadata;
struct metadataPartitionMap *mdm =
(struct metadataPartitionMap *)
&(lvd->partitionMaps[offset]);
udf_debug("Parsing Logical vol part %d type %d id=%s\n",
i, type, UDF_ID_METADATA);
map->s_partition_type = UDF_METADATA_MAP25;
map->s_partition_func = udf_get_pblock_meta25;
mdata->s_meta_file_loc =
le32_to_cpu(mdm->metadataFileLoc);
mdata->s_mirror_file_loc =
le32_to_cpu(mdm->metadataMirrorFileLoc);
mdata->s_bitmap_file_loc =
le32_to_cpu(mdm->metadataBitmapFileLoc);
mdata->s_alloc_unit_size =
le32_to_cpu(mdm->allocUnitSize);
mdata->s_align_unit_size =
le16_to_cpu(mdm->alignUnitSize);
if (mdm->flags & 0x01)
mdata->s_flags |= MF_DUPLICATE_MD;
udf_debug("Metadata Ident suffix=0x%x\n",
le16_to_cpu(*(__le16 *)
mdm->partIdent.identSuffix));
udf_debug("Metadata part num=%d\n",
le16_to_cpu(mdm->partitionNum));
udf_debug("Metadata part alloc unit size=%d\n",
le32_to_cpu(mdm->allocUnitSize));
udf_debug("Metadata file loc=%d\n",
le32_to_cpu(mdm->metadataFileLoc));
udf_debug("Mirror file loc=%d\n",
le32_to_cpu(mdm->metadataMirrorFileLoc));
udf_debug("Bitmap file loc=%d\n",
le32_to_cpu(mdm->metadataBitmapFileLoc));
udf_debug("Flags: %d %d\n",
mdata->s_flags, mdm->flags);
} else {
udf_debug("Unknown ident: %s\n",
upm2->partIdent.ident);
continue;
}
map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
map->s_partition_num = le16_to_cpu(upm2->partitionNum);
}
udf_debug("Partition (%d:%d) type %d on volume %d\n",
i, map->s_partition_num, type, map->s_volumeseqnum);
}
if (fileset) {
struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
*fileset = lelb_to_cpu(la->extLocation);
udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n",
fileset->logicalBlockNum,
fileset->partitionReferenceNum);
}
if (lvd->integritySeqExt.extLength)
udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
out_bh:
brelse(bh);
return ret;
}
|
CWE-119
| 178,759 | 625 |
69425838954019907958605312786539079080
| null | null | null |
linux
|
13d518074a952d33d47c428419693f63389547e9
| 1 |
SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
struct epoll_event __user *, event)
{
int error;
int did_lock_epmutex = 0;
struct file *file, *tfile;
struct eventpoll *ep;
struct epitem *epi;
struct epoll_event epds;
error = -EFAULT;
if (ep_op_has_event(op) &&
copy_from_user(&epds, event, sizeof(struct epoll_event)))
goto error_return;
/* Get the "struct file *" for the eventpoll file */
error = -EBADF;
file = fget(epfd);
if (!file)
goto error_return;
/* Get the "struct file *" for the target file */
tfile = fget(fd);
if (!tfile)
goto error_fput;
/* The target file descriptor must support poll */
error = -EPERM;
if (!tfile->f_op || !tfile->f_op->poll)
goto error_tgt_fput;
/*
* We have to check that the file structure underneath the file descriptor
* the user passed to us _is_ an eventpoll file. And also we do not permit
* adding an epoll file descriptor inside itself.
*/
error = -EINVAL;
if (file == tfile || !is_file_epoll(file))
goto error_tgt_fput;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
ep = file->private_data;
/*
* When we insert an epoll file descriptor, inside another epoll file
* descriptor, there is the change of creating closed loops, which are
* better be handled here, than in more critical paths. While we are
* checking for loops we also determine the list of files reachable
* and hang them on the tfile_check_list, so we can check that we
* haven't created too many possible wakeup paths.
*
* We need to hold the epmutex across both ep_insert and ep_remove
* b/c we want to make sure we are looking at a coherent view of
* epoll network.
*/
if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
mutex_lock(&epmutex);
did_lock_epmutex = 1;
}
if (op == EPOLL_CTL_ADD) {
if (is_file_epoll(tfile)) {
error = -ELOOP;
if (ep_loop_check(ep, tfile) != 0)
goto error_tgt_fput;
} else
list_add(&tfile->f_tfile_llink, &tfile_check_list);
}
mutex_lock_nested(&ep->mtx, 0);
/*
* Try to lookup the file inside our RB tree, Since we grabbed "mtx"
* above, we can be sure to be able to use the item looked up by
* ep_find() till we release the mutex.
*/
epi = ep_find(ep, tfile, fd);
error = -EINVAL;
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
epds.events |= POLLERR | POLLHUP;
error = ep_insert(ep, &epds, tfile, fd);
} else
error = -EEXIST;
clear_tfile_check_list();
break;
case EPOLL_CTL_DEL:
if (epi)
error = ep_remove(ep, epi);
else
error = -ENOENT;
break;
case EPOLL_CTL_MOD:
if (epi) {
epds.events |= POLLERR | POLLHUP;
error = ep_modify(ep, epi, &epds);
} else
error = -ENOENT;
break;
}
mutex_unlock(&ep->mtx);
error_tgt_fput:
if (did_lock_epmutex)
mutex_unlock(&epmutex);
fput(tfile);
error_fput:
fput(file);
error_return:
return error;
}
| 178,760 | 626 |
158960947259321225881221408709650413864
| null | null | null |
|
linux
|
79549c6dfda0603dba9a70a53467ce62d9335c33
| 1 |
int copy_creds(struct task_struct *p, unsigned long clone_flags)
{
#ifdef CONFIG_KEYS
struct thread_group_cred *tgcred;
#endif
struct cred *new;
int ret;
if (
#ifdef CONFIG_KEYS
!p->cred->thread_keyring &&
#endif
clone_flags & CLONE_THREAD
) {
p->real_cred = get_cred(p->cred);
get_cred(p->cred);
alter_cred_subscribers(p->cred, 2);
kdebug("share_creds(%p{%d,%d})",
p->cred, atomic_read(&p->cred->usage),
read_cred_subscribers(p->cred));
atomic_inc(&p->cred->user->processes);
return 0;
}
new = prepare_creds();
if (!new)
return -ENOMEM;
if (clone_flags & CLONE_NEWUSER) {
ret = create_user_ns(new);
if (ret < 0)
goto error_put;
}
/* cache user_ns in cred. Doesn't need a refcount because it will
* stay pinned by cred->user
*/
new->user_ns = new->user->user_ns;
#ifdef CONFIG_KEYS
/* new threads get their own thread keyrings if their parent already
* had one */
if (new->thread_keyring) {
key_put(new->thread_keyring);
new->thread_keyring = NULL;
if (clone_flags & CLONE_THREAD)
install_thread_keyring_to_cred(new);
}
/* we share the process and session keyrings between all the threads in
* a process - this is slightly icky as we violate COW credentials a
* bit */
if (!(clone_flags & CLONE_THREAD)) {
tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
if (!tgcred) {
ret = -ENOMEM;
goto error_put;
}
atomic_set(&tgcred->usage, 1);
spin_lock_init(&tgcred->lock);
tgcred->process_keyring = NULL;
tgcred->session_keyring = key_get(new->tgcred->session_keyring);
release_tgcred(new);
new->tgcred = tgcred;
}
#endif
atomic_inc(&new->user->processes);
p->cred = p->real_cred = get_cred(new);
alter_cred_subscribers(new, 2);
validate_creds(new);
return 0;
error_put:
put_cred(new);
return ret;
}
|
CWE-119
| 178,761 | 627 |
297010684195110082025299424642759875579
| null | null | null |
linux
|
bcc2c9c3fff859e0eb019fe6fec26f9b8eba795c
| 1 |
int main(void)
{
int fd, len, sock_opt;
int error;
struct cn_msg *message;
struct pollfd pfd;
struct nlmsghdr *incoming_msg;
struct cn_msg *incoming_cn_msg;
struct hv_kvp_msg *hv_msg;
char *p;
char *key_value;
char *key_name;
daemon(1, 0);
openlog("KVP", 0, LOG_USER);
syslog(LOG_INFO, "KVP starting; pid is:%d", getpid());
/*
* Retrieve OS release information.
*/
kvp_get_os_info();
if (kvp_file_init()) {
syslog(LOG_ERR, "Failed to initialize the pools");
exit(-1);
}
fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
if (fd < 0) {
syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd);
exit(-1);
}
addr.nl_family = AF_NETLINK;
addr.nl_pad = 0;
addr.nl_pid = 0;
addr.nl_groups = CN_KVP_IDX;
error = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
if (error < 0) {
syslog(LOG_ERR, "bind failed; error:%d", error);
close(fd);
exit(-1);
}
sock_opt = addr.nl_groups;
setsockopt(fd, 270, 1, &sock_opt, sizeof(sock_opt));
/*
* Register ourselves with the kernel.
*/
message = (struct cn_msg *)kvp_send_buffer;
message->id.idx = CN_KVP_IDX;
message->id.val = CN_KVP_VAL;
hv_msg = (struct hv_kvp_msg *)message->data;
hv_msg->kvp_hdr.operation = KVP_OP_REGISTER;
message->ack = 0;
message->len = sizeof(struct hv_kvp_msg);
len = netlink_send(fd, message);
if (len < 0) {
syslog(LOG_ERR, "netlink_send failed; error:%d", len);
close(fd);
exit(-1);
}
pfd.fd = fd;
while (1) {
pfd.events = POLLIN;
pfd.revents = 0;
poll(&pfd, 1, -1);
len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0);
if (len < 0) {
syslog(LOG_ERR, "recv failed; error:%d", len);
close(fd);
return -1;
}
incoming_msg = (struct nlmsghdr *)kvp_recv_buffer;
incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg);
hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data;
switch (hv_msg->kvp_hdr.operation) {
case KVP_OP_REGISTER:
/*
* Driver is registering with us; stash away the version
* information.
*/
p = (char *)hv_msg->body.kvp_register.version;
lic_version = malloc(strlen(p) + 1);
if (lic_version) {
strcpy(lic_version, p);
syslog(LOG_INFO, "KVP LIC Version: %s",
lic_version);
} else {
syslog(LOG_ERR, "malloc failed");
}
continue;
/*
* The current protocol with the kernel component uses a
* NULL key name to pass an error condition.
* For the SET, GET and DELETE operations,
* use the existing protocol to pass back error.
*/
case KVP_OP_SET:
if (kvp_key_add_or_modify(hv_msg->kvp_hdr.pool,
hv_msg->body.kvp_set.data.key,
hv_msg->body.kvp_set.data.key_size,
hv_msg->body.kvp_set.data.value,
hv_msg->body.kvp_set.data.value_size))
strcpy(hv_msg->body.kvp_set.data.key, "");
break;
case KVP_OP_GET:
if (kvp_get_value(hv_msg->kvp_hdr.pool,
hv_msg->body.kvp_set.data.key,
hv_msg->body.kvp_set.data.key_size,
hv_msg->body.kvp_set.data.value,
hv_msg->body.kvp_set.data.value_size))
strcpy(hv_msg->body.kvp_set.data.key, "");
break;
case KVP_OP_DELETE:
if (kvp_key_delete(hv_msg->kvp_hdr.pool,
hv_msg->body.kvp_delete.key,
hv_msg->body.kvp_delete.key_size))
strcpy(hv_msg->body.kvp_delete.key, "");
break;
default:
break;
}
if (hv_msg->kvp_hdr.operation != KVP_OP_ENUMERATE)
goto kvp_done;
/*
* If the pool is KVP_POOL_AUTO, dynamically generate
* both the key and the value; if not read from the
* appropriate pool.
*/
if (hv_msg->kvp_hdr.pool != KVP_POOL_AUTO) {
kvp_pool_enumerate(hv_msg->kvp_hdr.pool,
hv_msg->body.kvp_enum_data.index,
hv_msg->body.kvp_enum_data.data.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE,
hv_msg->body.kvp_enum_data.data.value,
HV_KVP_EXCHANGE_MAX_VALUE_SIZE);
goto kvp_done;
}
hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data;
key_name = (char *)hv_msg->body.kvp_enum_data.data.key;
key_value = (char *)hv_msg->body.kvp_enum_data.data.value;
switch (hv_msg->body.kvp_enum_data.index) {
case FullyQualifiedDomainName:
kvp_get_domain_name(key_value,
HV_KVP_EXCHANGE_MAX_VALUE_SIZE);
strcpy(key_name, "FullyQualifiedDomainName");
break;
case IntegrationServicesVersion:
strcpy(key_name, "IntegrationServicesVersion");
strcpy(key_value, lic_version);
break;
case NetworkAddressIPv4:
kvp_get_ip_address(AF_INET, key_value,
HV_KVP_EXCHANGE_MAX_VALUE_SIZE);
strcpy(key_name, "NetworkAddressIPv4");
break;
case NetworkAddressIPv6:
kvp_get_ip_address(AF_INET6, key_value,
HV_KVP_EXCHANGE_MAX_VALUE_SIZE);
strcpy(key_name, "NetworkAddressIPv6");
break;
case OSBuildNumber:
strcpy(key_value, os_build);
strcpy(key_name, "OSBuildNumber");
break;
case OSName:
strcpy(key_value, os_name);
strcpy(key_name, "OSName");
break;
case OSMajorVersion:
strcpy(key_value, os_major);
strcpy(key_name, "OSMajorVersion");
break;
case OSMinorVersion:
strcpy(key_value, os_minor);
strcpy(key_name, "OSMinorVersion");
break;
case OSVersion:
strcpy(key_value, os_build);
strcpy(key_name, "OSVersion");
break;
case ProcessorArchitecture:
strcpy(key_value, processor_arch);
strcpy(key_name, "ProcessorArchitecture");
break;
default:
strcpy(key_value, "Unknown Key");
/*
* We use a null key name to terminate enumeration.
*/
strcpy(key_name, "");
break;
}
/*
* Send the value back to the kernel. The response is
* already in the receive buffer. Update the cn_msg header to
* reflect the key value that has been added to the message
*/
kvp_done:
incoming_cn_msg->id.idx = CN_KVP_IDX;
incoming_cn_msg->id.val = CN_KVP_VAL;
incoming_cn_msg->ack = 0;
incoming_cn_msg->len = sizeof(struct hv_kvp_msg);
len = netlink_send(fd, incoming_cn_msg);
if (len < 0) {
syslog(LOG_ERR, "net_link send failed; error:%d", len);
exit(-1);
}
}
}
|
CWE-20
| 178,765 | 629 |
258681515104986794544522758346436167071
| null | null | null |
linux
|
ed8cd3b2cd61004cab85380c52b1817aca1ca49b
| 1 |
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec2_list);
return -EFAULT;
}
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
drm_free_large(exec2_list);
return ret;
}
|
CWE-189
| 178,769 | 632 |
227357713410382223578737737134983082503
| null | null | null |
linux
|
20e0fa98b751facf9a1101edaefbc19c82616a68
| 1 |
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_getaclres res = {
.acl_len = buflen,
};
void *resp_buf;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
.rpc_resp = &res,
};
int ret = -ENOMEM, npages, i, acl_len = 0;
npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
if (npages == 0)
npages = 1;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i])
goto out_free;
}
if (npages > 1) {
/* for decoding across pages */
res.acl_scratch = alloc_page(GFP_KERNEL);
if (!res.acl_scratch)
goto out_free;
}
args.acl_len = npages * PAGE_SIZE;
args.acl_pgbase = 0;
/* Let decode_getfacl know not to fail if the ACL data is larger than
* the page we send as a guess */
if (buf == NULL)
res.acl_flags |= NFS4_ACL_LEN_REQUEST;
resp_buf = page_address(pages[0]);
dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
__func__, buf, buflen, npages, args.acl_len);
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
&msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
acl_len = res.acl_len - res.acl_data_offset;
if (acl_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, acl_len);
else
nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
acl_len);
if (buf) {
ret = -ERANGE;
if (acl_len > buflen)
goto out_free;
_copy_from_pages(buf, pages, res.acl_data_offset,
res.acl_len);
}
ret = acl_len;
out_free:
for (i = 0; i < npages; i++)
if (pages[i])
__free_page(pages[i]);
if (res.acl_scratch)
__free_page(res.acl_scratch);
return ret;
}
|
CWE-189
| 178,770 | 633 |
331883198314548248557810532406148163299
| null | null | null |
linux
|
1bb57e940e1958e40d51f2078f50c3a96a9b2d75
| 1 |
rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
{
int phy_addr;
struct netdev_private *np = netdev_priv(dev);
struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
struct netdev_desc *desc;
int i;
phy_addr = np->phy_addr;
switch (cmd) {
case SIOCDEVPRIVATE:
break;
case SIOCDEVPRIVATE + 1:
miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
break;
case SIOCDEVPRIVATE + 2:
mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
break;
case SIOCDEVPRIVATE + 3:
break;
case SIOCDEVPRIVATE + 4:
break;
case SIOCDEVPRIVATE + 5:
netif_stop_queue (dev);
break;
case SIOCDEVPRIVATE + 6:
netif_wake_queue (dev);
break;
case SIOCDEVPRIVATE + 7:
printk
("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
np->old_rx);
break;
case SIOCDEVPRIVATE + 8:
printk("TX ring:\n");
for (i = 0; i < TX_RING_SIZE; i++) {
desc = &np->tx_ring[i];
printk
("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
i,
(u32) (np->tx_ring_dma + i * sizeof (*desc)),
(u32)le64_to_cpu(desc->next_desc),
(u32)le64_to_cpu(desc->status),
(u32)(le64_to_cpu(desc->fraginfo) >> 32),
(u32)le64_to_cpu(desc->fraginfo));
printk ("\n");
}
printk ("\n");
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
|
CWE-264
| 178,773 | 636 |
221960374900390280617069609386932287052
| null | null | null |
linux
|
cc9b17ad29ecaa20bfe426a8d4dbfb94b13ff1cc
| 1 |
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
unsigned long data_len, int noblock,
int *errcode)
{
struct sk_buff *skb;
gfp_t gfp_mask;
long timeo;
int err;
gfp_mask = sk->sk_allocation;
if (gfp_mask & __GFP_WAIT)
gfp_mask |= __GFP_REPEAT;
timeo = sock_sndtimeo(sk, noblock);
while (1) {
err = sock_error(sk);
if (err != 0)
goto failure;
err = -EPIPE;
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto failure;
if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
skb = alloc_skb(header_len, gfp_mask);
if (skb) {
int npages;
int i;
/* No pages, we're done... */
if (!data_len)
break;
npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
skb->truesize += data_len;
skb_shinfo(skb)->nr_frags = npages;
for (i = 0; i < npages; i++) {
struct page *page;
page = alloc_pages(sk->sk_allocation, 0);
if (!page) {
err = -ENOBUFS;
skb_shinfo(skb)->nr_frags = i;
kfree_skb(skb);
goto failure;
}
__skb_fill_page_desc(skb, i,
page, 0,
(data_len >= PAGE_SIZE ?
PAGE_SIZE :
data_len));
data_len -= PAGE_SIZE;
}
/* Full success... */
break;
}
err = -ENOBUFS;
goto failure;
}
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
err = -EAGAIN;
if (!timeo)
goto failure;
if (signal_pending(current))
goto interrupted;
timeo = sock_wait_for_wmem(sk, timeo);
}
skb_set_owner_w(skb, sk);
return skb;
interrupted:
err = sock_intr_errno(timeo);
failure:
*errcode = err;
return NULL;
}
|
CWE-20
| 178,774 | 637 |
265176515415961300033144108136008262302
| null | null | null |
linux
|
d52fc5dde171f030170a6cb78034d166b13c9445
| 1 |
int cap_bprm_set_creds(struct linux_binprm *bprm)
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
bool effective, has_cap = false;
int ret;
effective = false;
ret = get_file_caps(bprm, &effective, &has_cap);
if (ret < 0)
return ret;
if (!issecure(SECURE_NOROOT)) {
/*
* If the legacy file capability is set, then don't set privs
* for a setuid root binary run by a non-root user. Do set it
* for a root user just to cause least surprise to an admin.
*/
if (has_cap && new->uid != 0 && new->euid == 0) {
warn_setuid_and_fcaps_mixed(bprm->filename);
goto skip;
}
/*
* To support inheritance of root-permissions and suid-root
* executables under compatibility mode, we override the
* capability sets for the file.
*
* If only the real uid is 0, we do not set the effective bit.
*/
if (new->euid == 0 || new->uid == 0) {
/* pP' = (cap_bset & ~0) | (pI & ~0) */
new->cap_permitted = cap_combine(old->cap_bset,
old->cap_inheritable);
}
if (new->euid == 0)
effective = true;
}
skip:
/* Don't let someone trace a set[ug]id/setpcap binary with the revised
* credentials unless they have the appropriate permit
*/
if ((new->euid != old->uid ||
new->egid != old->gid ||
!cap_issubset(new->cap_permitted, old->cap_permitted)) &&
bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
/* downgrade; they get no more than they had, and maybe less */
if (!capable(CAP_SETUID)) {
new->euid = new->uid;
new->egid = new->gid;
}
new->cap_permitted = cap_intersect(new->cap_permitted,
old->cap_permitted);
}
new->suid = new->fsuid = new->euid;
new->sgid = new->fsgid = new->egid;
if (effective)
new->cap_effective = new->cap_permitted;
else
cap_clear(new->cap_effective);
bprm->cap_effective = effective;
/*
* Audit candidate if current->cap_effective is set
*
* We do not bother to audit if 3 things are true:
* 1) cap_effective has all caps
* 2) we are root
* 3) root is supposed to have all caps (SECURE_NOROOT)
* Since this is just a normal root execing a process.
*
* Number 1 above might fail if you don't have a full bset, but I think
* that is interesting information to audit.
*/
if (!cap_isclear(new->cap_effective)) {
if (!cap_issubset(CAP_FULL_SET, new->cap_effective) ||
new->euid != 0 || new->uid != 0 ||
issecure(SECURE_NOROOT)) {
ret = audit_log_bprm_fcaps(bprm, new, old);
if (ret < 0)
return ret;
}
}
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
return 0;
}
|
CWE-264
| 178,788 | 649 |
248446296481147634994342901563339492126
| null | null | null |
linux
|
d50f2ab6f050311dbf7b8f5501b25f0bf64a439b
| 1 |
static int ext4_fill_flex_info(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_desc *gdp = NULL;
ext4_group_t flex_group_count;
ext4_group_t flex_group;
int groups_per_flex = 0;
size_t size;
int i;
sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
groups_per_flex = 1 << sbi->s_log_groups_per_flex;
if (groups_per_flex < 2) {
sbi->s_log_groups_per_flex = 0;
return 1;
}
/* We allocate both existing and potentially added groups */
flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
size = flex_group_count * sizeof(struct flex_groups);
sbi->s_flex_groups = ext4_kvzalloc(size, GFP_KERNEL);
if (sbi->s_flex_groups == NULL) {
ext4_msg(sb, KERN_ERR, "not enough memory for %u flex groups",
flex_group_count);
goto failed;
}
for (i = 0; i < sbi->s_groups_count; i++) {
gdp = ext4_get_group_desc(sb, i, NULL);
flex_group = ext4_flex_group(sbi, i);
atomic_add(ext4_free_inodes_count(sb, gdp),
&sbi->s_flex_groups[flex_group].free_inodes);
atomic_add(ext4_free_group_clusters(sb, gdp),
&sbi->s_flex_groups[flex_group].free_clusters);
atomic_add(ext4_used_dirs_count(sb, gdp),
&sbi->s_flex_groups[flex_group].used_dirs);
}
return 1;
failed:
return 0;
}
|
CWE-189
| 178,791 | 652 |
274161292744789021551891355559204521107
| null | null | null |
linux
|
d0772b70faaf8e9f2013b6c4273d94d5eac8047a
| 1 |
static int xfrm6_tunnel_rcv(struct sk_buff *skb)
{
struct ipv6hdr *iph = ipv6_hdr(skb);
__be32 spi;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
return xfrm6_rcv_spi(skb, spi);
}
|
CWE-399
| 178,794 | 655 |
339521390955967229083981310567143521368
| null | null | null |
linux
|
371528caec553785c37f73fa3926ea0de84f986f
| 1 |
static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
struct cftype *cft, struct eventfd_ctx *eventfd)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup_thresholds *thresholds;
struct mem_cgroup_threshold_ary *new;
int type = MEMFILE_TYPE(cft->private);
u64 usage;
int i, j, size;
mutex_lock(&memcg->thresholds_lock);
if (type == _MEM)
thresholds = &memcg->thresholds;
else if (type == _MEMSWAP)
thresholds = &memcg->memsw_thresholds;
else
BUG();
/*
* Something went wrong if we trying to unregister a threshold
* if we don't have thresholds
*/
BUG_ON(!thresholds);
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before removing */
__mem_cgroup_threshold(memcg, type == _MEMSWAP);
/* Calculate new number of threshold */
size = 0;
for (i = 0; i < thresholds->primary->size; i++) {
if (thresholds->primary->entries[i].eventfd != eventfd)
size++;
}
new = thresholds->spare;
/* Set thresholds array to NULL if we don't have thresholds */
if (!size) {
kfree(new);
new = NULL;
goto swap_buffers;
}
new->size = size;
/* Copy thresholds and find current threshold */
new->current_threshold = -1;
for (i = 0, j = 0; i < thresholds->primary->size; i++) {
if (thresholds->primary->entries[i].eventfd == eventfd)
continue;
new->entries[j] = thresholds->primary->entries[i];
if (new->entries[j].threshold < usage) {
/*
* new->current_threshold will not be used
* until rcu_assign_pointer(), so it's safe to increment
* it here.
*/
++new->current_threshold;
}
j++;
}
swap_buffers:
/* Swap primary and spare array */
thresholds->spare = thresholds->primary;
rcu_assign_pointer(thresholds->primary, new);
/* To be sure that nobody uses thresholds */
synchronize_rcu();
mutex_unlock(&memcg->thresholds_lock);
}
| 178,815 | 673 |
68337715219973267467150749987222879580
| null | null | null |
|
linux
|
c8e252586f8d5de906385d8cf6385fee289a825e
| 1 |
static int fill_thread_core_info(struct elf_thread_core_info *t,
const struct user_regset_view *view,
long signr, size_t *total)
{
unsigned int i;
/*
* NT_PRSTATUS is the one special case, because the regset data
* goes into the pr_reg field inside the note contents, rather
* than being the whole note contents. We fill the reset in here.
* We assume that regset 0 is NT_PRSTATUS.
*/
fill_prstatus(&t->prstatus, t->task, signr);
(void) view->regsets[0].get(t->task, &view->regsets[0],
0, sizeof(t->prstatus.pr_reg),
&t->prstatus.pr_reg, NULL);
fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
sizeof(t->prstatus), &t->prstatus);
*total += notesize(&t->notes[0]);
do_thread_regset_writeback(t->task, &view->regsets[0]);
/*
* Each other regset might generate a note too. For each regset
* that has no core_note_type or is inactive, we leave t->notes[i]
* all zero and we'll know to skip writing it later.
*/
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
if (regset->core_note_type &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size;
void *data = kmalloc(size, GFP_KERNEL);
if (unlikely(!data))
return 0;
ret = regset->get(t->task, regset,
0, size, data, NULL);
if (unlikely(ret))
kfree(data);
else {
if (regset->core_note_type != NT_PRFPREG)
fill_note(&t->notes[i], "LINUX",
regset->core_note_type,
size, data);
else {
t->prstatus.pr_fpvalid = 1;
fill_note(&t->notes[i], "CORE",
NT_PRFPREG, size, data);
}
*total += notesize(&t->notes[i]);
}
}
}
return 1;
}
| 178,816 | 674 |
133523848575813678645718165406809677011
| null | null | null |
|
linux
|
2702b1526c7278c4d65d78de209a465d4de2885e
| 1 |
static int override_release(char __user *release, int len)
{
int ret = 0;
char buf[65];
if (current->personality & UNAME26) {
char *rest = UTS_RELEASE;
int ndots = 0;
unsigned v;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
break;
if (!isdigit(*rest) && *rest != '.')
break;
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
snprintf(buf, len, "2.6.%u%s", v, rest);
ret = copy_to_user(release, buf, len);
}
return ret;
}
|
CWE-16
| 178,819 | 675 |
194389486511445546758455417032901658740
| null | null | null |
linux
|
61cc74fbb87af6aa551a06a370590c9bc07e29d9
| 1 |
void exit_io_context(void)
{
struct io_context *ioc;
task_lock(current);
ioc = current->io_context;
current->io_context = NULL;
task_unlock(current);
if (atomic_dec_and_test(&ioc->nr_tasks)) {
if (ioc->aic && ioc->aic->exit)
ioc->aic->exit(ioc->aic);
cfq_exit(ioc);
put_io_context(ioc);
}
}
|
CWE-20
| 178,820 | 676 |
254860370760445159991908460061484776520
| null | null | null |
suhosin
|
73b1968ee30f6d9d2dae497544b910e68e114bfa
| 1 |
char *suhosin_encrypt_single_cookie(char *name, int name_len, char *value, int value_len, char *key TSRMLS_DC)
{
char buffer[4096];
char buffer2[4096];
char *buf = buffer, *buf2 = buffer2, *d, *d_url;
int l;
if (name_len > sizeof(buffer)-2) {
buf = estrndup(name, name_len);
} else {
memcpy(buf, name, name_len);
buf[name_len] = 0;
}
name_len = php_url_decode(buf, name_len);
normalize_varname(buf);
name_len = strlen(buf);
if (SUHOSIN_G(cookie_plainlist)) {
if (zend_hash_exists(SUHOSIN_G(cookie_plainlist), buf, name_len+1)) {
encrypt_return_plain:
if (buf != buffer) {
efree(buf);
}
return estrndup(value, value_len);
}
} else if (SUHOSIN_G(cookie_cryptlist)) {
if (!zend_hash_exists(SUHOSIN_G(cookie_cryptlist), buf, name_len+1)) {
goto encrypt_return_plain;
}
}
if (strlen(value) <= sizeof(buffer2)-2) {
memcpy(buf2, value, value_len);
buf2[value_len] = 0;
} else {
buf2 = estrndup(value, value_len);
}
value_len = php_url_decode(buf2, value_len);
d = suhosin_encrypt_string(buf2, value_len, buf, name_len, key TSRMLS_CC);
d_url = php_url_encode(d, strlen(d), &l);
efree(d);
if (buf != buffer) {
efree(buf);
}
if (buf2 != buffer2) {
efree(buf2);
}
return d_url;
}
|
CWE-119
| 178,822 | 678 |
41838469559935618440812276411540013889
| null | null | null |
linux
|
a8c1f65c79cbbb2f7da782d4c9d15639a9b94b27
| 1 |
static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
int len)
{
struct igmphdr *ih = igmp_hdr(skb);
struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
struct ip_mc_list *im;
__be32 group = ih->group;
int max_delay;
int mark = 0;
if (len == 8) {
if (ih->code == 0) {
/* Alas, old v1 router presents here. */
max_delay = IGMP_Query_Response_Interval;
in_dev->mr_v1_seen = jiffies +
IGMP_V1_Router_Present_Timeout;
group = 0;
} else {
/* v2 router present */
max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
in_dev->mr_v2_seen = jiffies +
IGMP_V2_Router_Present_Timeout;
}
/* cancel the interface change timer */
in_dev->mr_ifc_count = 0;
if (del_timer(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev);
/* clear deleted report items */
igmpv3_clear_delrec(in_dev);
} else if (len < 12) {
return; /* ignore bogus packet; freed by caller */
} else if (IGMP_V1_SEEN(in_dev)) {
/* This is a v3 query with v1 queriers present */
max_delay = IGMP_Query_Response_Interval;
group = 0;
} else if (IGMP_V2_SEEN(in_dev)) {
/* this is a v3 query with v2 queriers present;
* Interpretation of the max_delay code is problematic here.
* A real v2 host would use ih_code directly, while v3 has a
* different encoding. We use the v3 encoding as more likely
* to be intended in a v3 query.
*/
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
} else { /* v3 */
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
return;
ih3 = igmpv3_query_hdr(skb);
if (ih3->nsrcs) {
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
+ ntohs(ih3->nsrcs)*sizeof(__be32)))
return;
ih3 = igmpv3_query_hdr(skb);
}
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
if (!max_delay)
max_delay = 1; /* can't mod w/ 0 */
in_dev->mr_maxdelay = max_delay;
if (ih3->qrv)
in_dev->mr_qrv = ih3->qrv;
if (!group) { /* general query */
if (ih3->nsrcs)
return; /* no sources allowed */
igmp_gq_start_timer(in_dev);
return;
}
/* mark sources to include, if group & source-specific */
mark = ih3->nsrcs != 0;
}
/*
* - Start the timers in all of our membership records
* that the query applies to for the interface on
* which the query arrived excl. those that belong
* to a "local" group (224.0.0.X)
* - For timers already running check if they need to
* be reset.
* - Use the igmp->igmp_code field as the maximum
* delay possible
*/
rcu_read_lock();
for_each_pmc_rcu(in_dev, im) {
int changed;
if (group && group != im->multiaddr)
continue;
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
spin_lock_bh(&im->lock);
if (im->tm_running)
im->gsquery = im->gsquery && mark;
else
im->gsquery = mark;
changed = !im->gsquery ||
igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
spin_unlock_bh(&im->lock);
if (changed)
igmp_mod_timer(im, max_delay);
}
rcu_read_unlock();
}
|
CWE-399
| 178,823 | 679 |
178069013729538916469367764653907803605
| null | null | null |
linux
|
802f43594d6e4d2ac61086d239153c17873a0428
| 1 |
static void kiocb_batch_free(struct kiocb_batch *batch)
{
struct kiocb *req, *n;
list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
list_del(&req->ki_batch);
kmem_cache_free(kiocb_cachep, req);
}
}
|
CWE-399
| 178,825 | 681 |
31800320877295179157630219886955530312
| null | null | null |
linux
|
a5cd335165e31db9dbab636fd29895d41da55dd2
| 1 |
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_clip_rect __user *clips_ptr;
struct drm_clip_rect *clips = NULL;
struct drm_mode_fb_dirty_cmd *r = data;
struct drm_mode_object *obj;
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out_err1;
}
fb = obj_to_fb(obj);
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
if (!num_clips != !clips_ptr) {
ret = -EINVAL;
goto out_err1;
}
flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
/* If userspace annotates copy, clips must come in pairs */
if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
ret = -EINVAL;
goto out_err1;
}
if (num_clips && clips_ptr) {
clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;
goto out_err1;
}
ret = copy_from_user(clips, clips_ptr,
num_clips * sizeof(*clips));
if (ret) {
ret = -EFAULT;
goto out_err2;
}
}
if (fb->funcs->dirty) {
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
} else {
ret = -ENOSYS;
goto out_err2;
}
out_err2:
kfree(clips);
out_err1:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
|
CWE-189
| 178,827 | 682 |
108013488907801722177515826606033443457
| null | null | null |
linux
|
be20250c13f88375345ad99950190685eda51eb8
| 1 |
static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char l, n = 0;
char callsign[11];
do {
switch (*p & 0xC0) {
case 0x00:
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
l = p[1];
if (*p == FAC_CCITT_DEST_NSAP) {
memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->source_call, callsign);
}
if (*p == FAC_CCITT_SRC_NSAP) {
memcpy(&facilities->dest_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->dest_call, callsign);
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
|
CWE-20
| 178,843 | 693 |
116021283864279117765367862922593228970
| null | null | null |
linux
|
be20250c13f88375345ad99950190685eda51eb8
| 1 |
static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char *pt;
unsigned char l, lg, n = 0;
int fac_national_digis_received = 0;
do {
switch (*p & 0xC0) {
case 0x00:
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
if (*p == FAC_NATIONAL_RAND)
facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF);
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
l = p[1];
if (*p == FAC_NATIONAL_DEST_DIGI) {
if (!fac_national_digis_received) {
memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN);
facilities->source_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_SRC_DIGI) {
if (!fac_national_digis_received) {
memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN);
facilities->dest_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_FAIL_CALL) {
memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_FAIL_ADD) {
memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_DIGIS) {
fac_national_digis_received = 1;
facilities->source_ndigis = 0;
facilities->dest_ndigis = 0;
for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) {
if (pt[6] & AX25_HBIT)
memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN);
else
memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN);
}
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
|
CWE-20
| 178,845 | 695 |
145952537291143530766670206813172327524
| null | null | null |
linux
|
0837e3242c73566fc1c0196b4ec61779c25ffc93
| 1 |
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
perf_read_regs(regs);
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
val = read_pmc(event->hw.idx);
if ((int)val < 0) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
}
}
/*
* In case we didn't find and reset the event that caused
* the interrupt, scan all events and reset any that are
* negative, to avoid getting continual interrupts.
* Any that we processed in the previous loop will not be negative.
*/
if (!found) {
for (i = 0; i < ppmu->n_counter; ++i) {
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
if ((int)val < 0)
write_pmc(i + 1, 0);
}
}
/*
* Reset MMCR0 to its normal value. This will set PMXE and
* clear FC (freeze counters) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again.
* XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt.
*/
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
if (nmi)
nmi_exit();
else
irq_exit();
}
|
CWE-189
| 178,851 | 700 |
175304852490672568941195560375503904990
| null | null | null |
linux
|
bc909d9ddbf7778371e36a651d6e4194b1cc7d4c
| 1 |
static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
struct msghdr *msg_sys, unsigned flags,
struct used_address *used_address)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
struct sockaddr_storage address;
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
unsigned char ctl[sizeof(struct cmsghdr) + 20]
__attribute__ ((aligned(sizeof(__kernel_size_t))));
/* 20 is size of ipv6_pktinfo */
unsigned char *ctl_buf = ctl;
int err, ctl_len, iov_size, total_len;
err = -EFAULT;
if (MSG_CMSG_COMPAT & flags) {
if (get_compat_msghdr(msg_sys, msg_compat))
return -EFAULT;
} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
return -EFAULT;
/* do not move before msg_sys is valid */
err = -EMSGSIZE;
if (msg_sys->msg_iovlen > UIO_MAXIOV)
goto out;
/* Check whether to allocate the iovec area */
err = -ENOMEM;
iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
if (!iov)
goto out;
}
/* This will also move the address data into kernel space */
if (MSG_CMSG_COMPAT & flags) {
err = verify_compat_iovec(msg_sys, iov,
(struct sockaddr *)&address,
VERIFY_READ);
} else
err = verify_iovec(msg_sys, iov,
(struct sockaddr *)&address,
VERIFY_READ);
if (err < 0)
goto out_freeiov;
total_len = err;
err = -ENOBUFS;
if (msg_sys->msg_controllen > INT_MAX)
goto out_freeiov;
ctl_len = msg_sys->msg_controllen;
if ((MSG_CMSG_COMPAT & flags) && ctl_len) {
err =
cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl,
sizeof(ctl));
if (err)
goto out_freeiov;
ctl_buf = msg_sys->msg_control;
ctl_len = msg_sys->msg_controllen;
} else if (ctl_len) {
if (ctl_len > sizeof(ctl)) {
ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
if (ctl_buf == NULL)
goto out_freeiov;
}
err = -EFAULT;
/*
* Careful! Before this, msg_sys->msg_control contains a user pointer.
* Afterwards, it will be a kernel pointer. Thus the compiler-assisted
* checking falls down on this.
*/
if (copy_from_user(ctl_buf,
(void __user __force *)msg_sys->msg_control,
ctl_len))
goto out_freectl;
msg_sys->msg_control = ctl_buf;
}
msg_sys->msg_flags = flags;
if (sock->file->f_flags & O_NONBLOCK)
msg_sys->msg_flags |= MSG_DONTWAIT;
/*
* If this is sendmmsg() and current destination address is same as
* previously succeeded address, omit asking LSM's decision.
* used_address->name_len is initialized to UINT_MAX so that the first
* destination address never matches.
*/
if (used_address && used_address->name_len == msg_sys->msg_namelen &&
!memcmp(&used_address->name, msg->msg_name,
used_address->name_len)) {
err = sock_sendmsg_nosec(sock, msg_sys, total_len);
goto out_freectl;
}
err = sock_sendmsg(sock, msg_sys, total_len);
/*
* If this is sendmmsg() and sending to current destination address was
* successful, remember it.
*/
if (used_address && err >= 0) {
used_address->name_len = msg_sys->msg_namelen;
memcpy(&used_address->name, msg->msg_name,
used_address->name_len);
}
out_freectl:
if (ctl_buf != ctl)
sock_kfree_s(sock->sk, ctl_buf, ctl_len);
out_freeiov:
if (iov != iovstack)
sock_kfree_s(sock->sk, iov, iov_size);
out:
return err;
}
| 178,852 | 701 |
202239731063059493710429826741229649679
| null | null | null |
|
linux
|
a9cf73ea7ff78f52662c8658d93c226effbbedde
| 1 |
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
unsigned int unfrag_ip6hlen, unfrag_len;
struct frag_hdr *fptr;
u8 *mac_start, *prevhdr;
u8 nexthdr;
u8 frag_hdr_sz = sizeof(struct frag_hdr);
int offset;
__wsum csum;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int type = skb_shinfo(skb)->gso_type;
if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
!(type & (SKB_GSO_UDP))))
goto out;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
segs = NULL;
goto out;
}
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments.
*/
offset = skb->csum_start - skb_headroom(skb);
csum = skb_checksum(skb, offset, skb->len- offset, 0);
offset += skb->csum_offset;
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
/* Check if there is enough headroom to insert fragment header. */
if ((skb_headroom(skb) < frag_hdr_sz) &&
pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
goto out;
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
unfrag_ip6hlen;
mac_start = skb_mac_header(skb);
memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
skb->mac_header -= frag_hdr_sz;
skb->network_header -= frag_hdr_sz;
fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
ipv6_select_ident(fptr);
/* Fragment the skb. ipv6 header and the remaining fields of the
* fragment header are updated in ipv6_gso_segment()
*/
segs = skb_segment(skb, features);
out:
return segs;
}
|
CWE-399
| 178,853 | 702 |
94739129464540861081039385118781610904
| null | null | null |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
| 1 |
static void encode_share_access(struct xdr_stream *xdr, int open_flags)
{
__be32 *p;
RESERVE_SPACE(8);
switch (open_flags & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ:
WRITE32(NFS4_SHARE_ACCESS_READ);
break;
case FMODE_WRITE:
WRITE32(NFS4_SHARE_ACCESS_WRITE);
break;
case FMODE_READ|FMODE_WRITE:
WRITE32(NFS4_SHARE_ACCESS_BOTH);
break;
default:
BUG();
}
WRITE32(0); /* for linux, share_deny = 0 always */
}
| 178,887 | 730 |
128669092881783839785461404425903912833
| null | null | null |
|
linux
|
56c6a8a4aadca809e04276eabe5552935c51387f
| 1 |
unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
const nodemask_t *nodemask, unsigned long totalpages)
{
int points;
if (oom_unkillable_task(p, mem, nodemask))
return 0;
p = find_lock_task_mm(p);
if (!p)
return 0;
/*
* Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
* so the entire heuristic doesn't need to be executed for something
* that cannot be killed.
*/
if (atomic_read(&p->mm->oom_disable_count)) {
task_unlock(p);
return 0;
}
/*
* The memory controller may have a limit of 0 bytes, so avoid a divide
* by zero, if necessary.
*/
if (!totalpages)
totalpages = 1;
/*
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
points = get_mm_rss(p->mm) + p->mm->nr_ptes;
points += get_mm_counter(p->mm, MM_SWAPENTS);
points *= 1000;
points /= totalpages;
task_unlock(p);
/*
* Root processes get 3% bonus, just like the __vm_enough_memory()
* implementation used by LSMs.
*/
if (has_capability_noaudit(p, CAP_SYS_ADMIN))
points -= 30;
/*
* /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
* either completely disable oom killing or always prefer a certain
* task.
*/
points += p->signal->oom_score_adj;
/*
* Never return 0 for an eligible task that may be killed since it's
* possible that no single user task uses more than 0.1% of memory and
* no single admin tasks uses more than 3.0%.
*/
if (points <= 0)
return 1;
return (points < 1000) ? points : 1000;
}
|
CWE-189
| 178,912 | 753 |
321295227204559210817209603409376127665
| null | null | null |
linux
|
15291164b22a357cb211b618adfef4fa82fc0de3
| 1 |
static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
{
transaction_t *transaction;
struct journal_head *jh;
int may_free = 1;
int ret;
BUFFER_TRACE(bh, "entry");
/*
* It is safe to proceed here without the j_list_lock because the
* buffers cannot be stolen by try_to_free_buffers as long as we are
* holding the page lock. --sct
*/
if (!buffer_jbd(bh))
goto zap_buffer_unlocked;
/* OK, we have data buffer in journaled mode */
write_lock(&journal->j_state_lock);
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
jh = jbd2_journal_grab_journal_head(bh);
if (!jh)
goto zap_buffer_no_jh;
/*
* We cannot remove the buffer from checkpoint lists until the
* transaction adding inode to orphan list (let's call it T)
* is committed. Otherwise if the transaction changing the
* buffer would be cleaned from the journal before T is
* committed, a crash will cause that the correct contents of
* the buffer will be lost. On the other hand we have to
* clear the buffer dirty bit at latest at the moment when the
* transaction marking the buffer as freed in the filesystem
* structures is committed because from that moment on the
* buffer can be reallocated and used by a different page.
* Since the block hasn't been freed yet but the inode has
* already been added to orphan list, it is safe for us to add
* the buffer to BJ_Forget list of the newest transaction.
*/
transaction = jh->b_transaction;
if (transaction == NULL) {
/* First case: not on any transaction. If it
* has no checkpoint link, then we can zap it:
* it's a writeback-mode buffer so we don't care
* if it hits disk safely. */
if (!jh->b_cp_transaction) {
JBUFFER_TRACE(jh, "not on any transaction: zap");
goto zap_buffer;
}
if (!buffer_dirty(bh)) {
/* bdflush has written it. We can drop it now */
goto zap_buffer;
}
/* OK, it must be in the journal but still not
* written fully to disk: it's metadata or
* journaled data... */
if (journal->j_running_transaction) {
/* ... and once the current transaction has
* committed, the buffer won't be needed any
* longer. */
JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
ret = __dispose_buffer(jh,
journal->j_running_transaction);
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
write_unlock(&journal->j_state_lock);
return ret;
} else {
/* There is no currently-running transaction. So the
* orphan record which we wrote for this file must have
* passed into commit. We must attach this buffer to
* the committing transaction, if it exists. */
if (journal->j_committing_transaction) {
JBUFFER_TRACE(jh, "give to committing trans");
ret = __dispose_buffer(jh,
journal->j_committing_transaction);
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
write_unlock(&journal->j_state_lock);
return ret;
} else {
/* The orphan record's transaction has
* committed. We can cleanse this buffer */
clear_buffer_jbddirty(bh);
goto zap_buffer;
}
}
} else if (transaction == journal->j_committing_transaction) {
JBUFFER_TRACE(jh, "on committing transaction");
/*
* The buffer is committing, we simply cannot touch
* it. So we just set j_next_transaction to the
* running transaction (if there is one) and mark
* buffer as freed so that commit code knows it should
* clear dirty bits when it is done with the buffer.
*/
set_buffer_freed(bh);
if (journal->j_running_transaction && buffer_jbddirty(bh))
jh->b_next_transaction = journal->j_running_transaction;
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
write_unlock(&journal->j_state_lock);
return 0;
} else {
/* Good, the buffer belongs to the running transaction.
* We are writing our own transaction's data, not any
* previous one's, so it is safe to throw it away
* (remember that we expect the filesystem to have set
* i_size already for this truncate so recovery will not
* expose the disk blocks we are discarding here.) */
J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
JBUFFER_TRACE(jh, "on running transaction");
may_free = __dispose_buffer(jh, transaction);
}
zap_buffer:
jbd2_journal_put_journal_head(jh);
zap_buffer_no_jh:
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
write_unlock(&journal->j_state_lock);
zap_buffer_unlocked:
clear_buffer_dirty(bh);
J_ASSERT_BH(bh, !buffer_jbddirty(bh));
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
bh->b_bdev = NULL;
return may_free;
}
|
CWE-119
| 178,913 | 754 |
113708723622617744945038418263908097733
| null | null | null |
linux
|
7ed47b7d142ec99ad6880bbbec51e9f12b3af74c
| 1 |
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
ghash_flush(ctx, dctx);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
return 0;
}
| 178,914 | 755 |
152497505395738615244159472791616185936
| null | null | null |
|
linux
|
7ed47b7d142ec99ad6880bbbec51e9f12b3af74c
| 1 |
static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
if (dctx->bytes) {
int n = min(srclen, dctx->bytes);
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
dctx->bytes -= n;
srclen -= n;
while (n--)
*pos++ ^= *src++;
if (!dctx->bytes)
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
while (srclen >= GHASH_BLOCK_SIZE) {
crypto_xor(dst, src, GHASH_BLOCK_SIZE);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
src += GHASH_BLOCK_SIZE;
srclen -= GHASH_BLOCK_SIZE;
}
if (srclen) {
dctx->bytes = GHASH_BLOCK_SIZE - srclen;
while (srclen--)
*dst++ ^= *src++;
}
return 0;
}
| 178,915 | 756 |
33132435692963445490556889579559293749
| null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.