project
stringclasses 633
values | commit_id
stringlengths 7
81
| target
int64 0
1
| func
stringlengths 5
484k
| cwe
stringclasses 131
values | big_vul_idx
float64 0
189k
⌀ | idx
int64 0
522k
| hash
stringlengths 34
39
| size
float64 1
24k
⌀ | message
stringlengths 0
11.5k
⌀ | dataset
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
Android
|
85d253fab5e2c01bd90990667c6de25c282fc5cd
| 1 |
void BufferQueueConsumer::dump(String8& result, const char* prefix) const {
mCore->dump(result, prefix);
}
|
CWE-264
| 188,254 | 8,790 |
185822041652000101252356257682866951737
| null | null | null |
Android
|
d72ea85c78a1a68bf99fd5804ad9784b4102fe57
| 1 |
int equalizer_get_parameter(effect_context_t *context, effect_param_t *p,
uint32_t *size)
{
equalizer_context_t *eq_ctxt = (equalizer_context_t *)context;
int voffset = ((p->psize - 1) / sizeof(int32_t) + 1) * sizeof(int32_t);
int32_t *param_tmp = (int32_t *)p->data;
int32_t param = *param_tmp++;
int32_t param2;
char *name;
void *value = p->data + voffset;
int i;
ALOGV("%s", __func__);
p->status = 0;
switch (param) {
case EQ_PARAM_NUM_BANDS:
case EQ_PARAM_CUR_PRESET:
case EQ_PARAM_GET_NUM_OF_PRESETS:
case EQ_PARAM_BAND_LEVEL:
case EQ_PARAM_GET_BAND:
if (p->vsize < sizeof(int16_t))
p->status = -EINVAL;
p->vsize = sizeof(int16_t);
break;
case EQ_PARAM_LEVEL_RANGE:
if (p->vsize < 2 * sizeof(int16_t))
p->status = -EINVAL;
p->vsize = 2 * sizeof(int16_t);
break;
case EQ_PARAM_BAND_FREQ_RANGE:
if (p->vsize < 2 * sizeof(int32_t))
p->status = -EINVAL;
p->vsize = 2 * sizeof(int32_t);
break;
case EQ_PARAM_CENTER_FREQ:
if (p->vsize < sizeof(int32_t))
p->status = -EINVAL;
p->vsize = sizeof(int32_t);
break;
case EQ_PARAM_GET_PRESET_NAME:
break;
case EQ_PARAM_PROPERTIES:
if (p->vsize < (2 + NUM_EQ_BANDS) * sizeof(uint16_t))
p->status = -EINVAL;
p->vsize = (2 + NUM_EQ_BANDS) * sizeof(uint16_t);
break;
default:
p->status = -EINVAL;
}
*size = sizeof(effect_param_t) + voffset + p->vsize;
if (p->status != 0)
return 0;
switch (param) {
case EQ_PARAM_NUM_BANDS:
ALOGV("%s: EQ_PARAM_NUM_BANDS", __func__);
*(uint16_t *)value = (uint16_t)NUM_EQ_BANDS;
break;
case EQ_PARAM_LEVEL_RANGE:
ALOGV("%s: EQ_PARAM_LEVEL_RANGE", __func__);
*(int16_t *)value = -1500;
*((int16_t *)value + 1) = 1500;
break;
case EQ_PARAM_BAND_LEVEL:
ALOGV("%s: EQ_PARAM_BAND_LEVEL", __func__);
param2 = *param_tmp;
if (param2 >= NUM_EQ_BANDS) {
p->status = -EINVAL;
break;
}
*(int16_t *)value = (int16_t)equalizer_get_band_level(eq_ctxt, param2);
break;
case EQ_PARAM_CENTER_FREQ:
ALOGV("%s: EQ_PARAM_CENTER_FREQ", __func__);
param2 = *param_tmp;
if (param2 >= NUM_EQ_BANDS) {
p->status = -EINVAL;
break;
}
*(int32_t *)value = equalizer_get_center_frequency(eq_ctxt, param2);
break;
case EQ_PARAM_BAND_FREQ_RANGE:
ALOGV("%s: EQ_PARAM_BAND_FREQ_RANGE", __func__);
param2 = *param_tmp;
if (param2 >= NUM_EQ_BANDS) {
p->status = -EINVAL;
break;
}
equalizer_get_band_freq_range(eq_ctxt, param2, (uint32_t *)value,
((uint32_t *)value + 1));
break;
case EQ_PARAM_GET_BAND:
ALOGV("%s: EQ_PARAM_GET_BAND", __func__);
param2 = *param_tmp;
*(uint16_t *)value = (uint16_t)equalizer_get_band(eq_ctxt, param2);
break;
case EQ_PARAM_CUR_PRESET:
ALOGV("%s: EQ_PARAM_CUR_PRESET", __func__);
*(uint16_t *)value = (uint16_t)equalizer_get_preset(eq_ctxt);
break;
case EQ_PARAM_GET_NUM_OF_PRESETS:
ALOGV("%s: EQ_PARAM_GET_NUM_OF_PRESETS", __func__);
*(uint16_t *)value = (uint16_t)equalizer_get_num_presets(eq_ctxt);
break;
case EQ_PARAM_GET_PRESET_NAME:
ALOGV("%s: EQ_PARAM_GET_PRESET_NAME", __func__);
param2 = *param_tmp;
ALOGV("param2: %d", param2);
if (param2 >= equalizer_get_num_presets(eq_ctxt)) {
p->status = -EINVAL;
break;
}
name = (char *)value;
strlcpy(name, equalizer_get_preset_name(eq_ctxt, param2), p->vsize - 1);
name[p->vsize - 1] = 0;
p->vsize = strlen(name) + 1;
break;
case EQ_PARAM_PROPERTIES: {
ALOGV("%s: EQ_PARAM_PROPERTIES", __func__);
int16_t *prop = (int16_t *)value;
prop[0] = (int16_t)equalizer_get_preset(eq_ctxt);
prop[1] = (int16_t)NUM_EQ_BANDS;
for (i = 0; i < NUM_EQ_BANDS; i++) {
prop[2 + i] = (int16_t)equalizer_get_band_level(eq_ctxt, i);
}
} break;
default:
p->status = -EINVAL;
break;
}
return 0;
}
|
CWE-200
| 188,633 | 9,098 |
204868137892530330279811202144299626640
| null | null | null |
Android
|
9fe27a9b445f7e911286ed31c1087ceac567736b
| 1 |
uint8_t rfc_parse_data(tRFC_MCB* p_mcb, MX_FRAME* p_frame, BT_HDR* p_buf) {
uint8_t ead, eal, fcs;
uint8_t* p_data = (uint8_t*)(p_buf + 1) + p_buf->offset;
uint8_t* p_start = p_data;
uint16_t len;
if (p_buf->len < RFCOMM_CTRL_FRAME_LEN) {
RFCOMM_TRACE_ERROR("Bad Length1: %d", p_buf->len);
return (RFC_EVENT_BAD_FRAME);
}
RFCOMM_PARSE_CTRL_FIELD(ead, p_frame->cr, p_frame->dlci, p_data);
if (!ead) {
RFCOMM_TRACE_ERROR("Bad Address(EA must be 1)");
return (RFC_EVENT_BAD_FRAME);
}
RFCOMM_PARSE_TYPE_FIELD(p_frame->type, p_frame->pf, p_data);
RFCOMM_PARSE_LEN_FIELD(eal, len, p_data);
p_buf->len -= (3 + !ead + !eal + 1); /* Additional 1 for FCS */
p_buf->offset += (3 + !ead + !eal);
/* handle credit if credit based flow control */
if ((p_mcb->flow == PORT_FC_CREDIT) && (p_frame->type == RFCOMM_UIH) &&
(p_frame->dlci != RFCOMM_MX_DLCI) && (p_frame->pf == 1)) {
p_frame->credit = *p_data++;
p_buf->len--;
p_buf->offset++;
} else
p_frame->credit = 0;
if (p_buf->len != len) {
RFCOMM_TRACE_ERROR("Bad Length2 %d %d", p_buf->len, len);
return (RFC_EVENT_BAD_FRAME);
}
fcs = *(p_data + len);
/* All control frames that we are sending are sent with P=1, expect */
/* reply with F=1 */
/* According to TS 07.10 spec ivalid frames are discarded without */
/* notification to the sender */
switch (p_frame->type) {
case RFCOMM_SABME:
if (RFCOMM_FRAME_IS_RSP(p_mcb->is_initiator, p_frame->cr) ||
!p_frame->pf || len || !RFCOMM_VALID_DLCI(p_frame->dlci) ||
!rfc_check_fcs(RFCOMM_CTRL_FRAME_LEN, p_start, fcs)) {
RFCOMM_TRACE_ERROR("Bad SABME");
return (RFC_EVENT_BAD_FRAME);
} else
return (RFC_EVENT_SABME);
case RFCOMM_UA:
if (RFCOMM_FRAME_IS_CMD(p_mcb->is_initiator, p_frame->cr) ||
!p_frame->pf || len || !RFCOMM_VALID_DLCI(p_frame->dlci) ||
!rfc_check_fcs(RFCOMM_CTRL_FRAME_LEN, p_start, fcs)) {
RFCOMM_TRACE_ERROR("Bad UA");
return (RFC_EVENT_BAD_FRAME);
} else
return (RFC_EVENT_UA);
case RFCOMM_DM:
if (RFCOMM_FRAME_IS_CMD(p_mcb->is_initiator, p_frame->cr) || len ||
!RFCOMM_VALID_DLCI(p_frame->dlci) ||
!rfc_check_fcs(RFCOMM_CTRL_FRAME_LEN, p_start, fcs)) {
RFCOMM_TRACE_ERROR("Bad DM");
return (RFC_EVENT_BAD_FRAME);
} else
return (RFC_EVENT_DM);
case RFCOMM_DISC:
if (RFCOMM_FRAME_IS_RSP(p_mcb->is_initiator, p_frame->cr) ||
!p_frame->pf || len || !RFCOMM_VALID_DLCI(p_frame->dlci) ||
!rfc_check_fcs(RFCOMM_CTRL_FRAME_LEN, p_start, fcs)) {
RFCOMM_TRACE_ERROR("Bad DISC");
return (RFC_EVENT_BAD_FRAME);
} else
return (RFC_EVENT_DISC);
case RFCOMM_UIH:
if (!RFCOMM_VALID_DLCI(p_frame->dlci)) {
RFCOMM_TRACE_ERROR("Bad UIH - invalid DLCI");
return (RFC_EVENT_BAD_FRAME);
} else if (!rfc_check_fcs(2, p_start, fcs)) {
RFCOMM_TRACE_ERROR("Bad UIH - FCS");
return (RFC_EVENT_BAD_FRAME);
} else if (RFCOMM_FRAME_IS_RSP(p_mcb->is_initiator, p_frame->cr)) {
/* we assume that this is ok to allow bad implementations to work */
RFCOMM_TRACE_ERROR("Bad UIH - response");
return (RFC_EVENT_UIH);
} else
return (RFC_EVENT_UIH);
}
return (RFC_EVENT_BAD_FRAME);
}
|
CWE-125
| 188,634 | 9,099 |
151463744903347161976953210358853267842
| null | null | null |
Android
|
d4a34fefbf292d1e02336e4e272da3ef1e3eef85
| 1 |
uint8_t rfc_parse_data(tRFC_MCB* p_mcb, MX_FRAME* p_frame, BT_HDR* p_buf) {
uint8_t ead, eal, fcs;
uint8_t* p_data = (uint8_t*)(p_buf + 1) + p_buf->offset;
uint8_t* p_start = p_data;
uint16_t len;
if (p_buf->len < RFCOMM_CTRL_FRAME_LEN) {
RFCOMM_TRACE_ERROR("Bad Length1: %d", p_buf->len);
return (RFC_EVENT_BAD_FRAME);
}
RFCOMM_PARSE_CTRL_FIELD(ead, p_frame->cr, p_frame->dlci, p_data);
if (!ead) {
RFCOMM_TRACE_ERROR("Bad Address(EA must be 1)");
return (RFC_EVENT_BAD_FRAME);
}
RFCOMM_PARSE_TYPE_FIELD(p_frame->type, p_frame->pf, p_data);
eal = *(p_data)&RFCOMM_EA;
len = *(p_data)++ >> RFCOMM_SHIFT_LENGTH1;
if (eal == 0 && p_buf->len < RFCOMM_CTRL_FRAME_LEN) {
len += (*(p_data)++ << RFCOMM_SHIFT_LENGTH2);
} else if (eal == 0) {
RFCOMM_TRACE_ERROR("Bad Length when EAL = 0: %d", p_buf->len);
android_errorWriteLog(0x534e4554, "78288018");
return RFC_EVENT_BAD_FRAME;
}
p_buf->len -= (3 + !ead + !eal + 1); /* Additional 1 for FCS */
p_buf->offset += (3 + !ead + !eal);
/* handle credit if credit based flow control */
if ((p_mcb->flow == PORT_FC_CREDIT) && (p_frame->type == RFCOMM_UIH) &&
(p_frame->dlci != RFCOMM_MX_DLCI) && (p_frame->pf == 1)) {
p_frame->credit = *p_data++;
p_buf->len--;
p_buf->offset++;
} else
p_frame->credit = 0;
if (p_buf->len != len) {
RFCOMM_TRACE_ERROR("Bad Length2 %d %d", p_buf->len, len);
return (RFC_EVENT_BAD_FRAME);
}
fcs = *(p_data + len);
/* All control frames that we are sending are sent with P=1, expect */
/* reply with F=1 */
/* According to TS 07.10 spec ivalid frames are discarded without */
/* notification to the sender */
switch (p_frame->type) {
case RFCOMM_SABME:
if (RFCOMM_FRAME_IS_RSP(p_mcb->is_initiator, p_frame->cr) ||
!p_frame->pf || len || !RFCOMM_VALID_DLCI(p_frame->dlci) ||
!rfc_check_fcs(RFCOMM_CTRL_FRAME_LEN, p_start, fcs)) {
RFCOMM_TRACE_ERROR("Bad SABME");
return (RFC_EVENT_BAD_FRAME);
} else
return (RFC_EVENT_SABME);
case RFCOMM_UA:
if (RFCOMM_FRAME_IS_CMD(p_mcb->is_initiator, p_frame->cr) ||
!p_frame->pf || len || !RFCOMM_VALID_DLCI(p_frame->dlci) ||
!rfc_check_fcs(RFCOMM_CTRL_FRAME_LEN, p_start, fcs)) {
RFCOMM_TRACE_ERROR("Bad UA");
return (RFC_EVENT_BAD_FRAME);
} else
return (RFC_EVENT_UA);
case RFCOMM_DM:
if (RFCOMM_FRAME_IS_CMD(p_mcb->is_initiator, p_frame->cr) || len ||
!RFCOMM_VALID_DLCI(p_frame->dlci) ||
!rfc_check_fcs(RFCOMM_CTRL_FRAME_LEN, p_start, fcs)) {
RFCOMM_TRACE_ERROR("Bad DM");
return (RFC_EVENT_BAD_FRAME);
} else
return (RFC_EVENT_DM);
case RFCOMM_DISC:
if (RFCOMM_FRAME_IS_RSP(p_mcb->is_initiator, p_frame->cr) ||
!p_frame->pf || len || !RFCOMM_VALID_DLCI(p_frame->dlci) ||
!rfc_check_fcs(RFCOMM_CTRL_FRAME_LEN, p_start, fcs)) {
RFCOMM_TRACE_ERROR("Bad DISC");
return (RFC_EVENT_BAD_FRAME);
} else
return (RFC_EVENT_DISC);
case RFCOMM_UIH:
if (!RFCOMM_VALID_DLCI(p_frame->dlci)) {
RFCOMM_TRACE_ERROR("Bad UIH - invalid DLCI");
return (RFC_EVENT_BAD_FRAME);
} else if (!rfc_check_fcs(2, p_start, fcs)) {
RFCOMM_TRACE_ERROR("Bad UIH - FCS");
return (RFC_EVENT_BAD_FRAME);
} else if (RFCOMM_FRAME_IS_RSP(p_mcb->is_initiator, p_frame->cr)) {
/* we assume that this is ok to allow bad implementations to work */
RFCOMM_TRACE_ERROR("Bad UIH - response");
return (RFC_EVENT_UIH);
} else
return (RFC_EVENT_UIH);
}
return (RFC_EVENT_BAD_FRAME);
}
|
CWE-125
| 188,635 | 9,100 |
152278252648224562922869007780507826369
| null | null | null |
poppler
|
c839b706092583f6b12ed3cc634bf5af34b7a2bb
| 1 |
create_surface_from_thumbnail_data (guchar *data,
gint width,
gint height,
gint rowstride)
{
guchar *cairo_pixels;
cairo_surface_t *surface;
static cairo_user_data_key_t key;
int j;
cairo_pixels = (guchar *)g_malloc (4 * width * height);
surface = cairo_image_surface_create_for_data ((unsigned char *)cairo_pixels,
CAIRO_FORMAT_RGB24,
width, height, 4 * width);
cairo_surface_set_user_data (surface, &key,
cairo_pixels, (cairo_destroy_func_t)g_free);
for (j = height; j; j--) {
guchar *p = data;
guchar *q = cairo_pixels;
guchar *end = p + 3 * width;
while (p < end) {
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
q[0] = p[2];
q[1] = p[1];
q[2] = p[0];
#else
q[1] = p[0];
q[2] = p[1];
q[3] = p[2];
#endif
p += 3;
q += 4;
}
data += rowstride;
cairo_pixels += 4 * width;
}
return surface;
}
|
CWE-189
| 177,773 | 9,103 |
200511016327768201854565868573404023031
| null | null | null |
shibboleth
|
6182b0acf2df670e75423c2ed7afe6950ef11c9d
| 1 |
DynamicMetadataProvider::DynamicMetadataProvider(const DOMElement* e)
: AbstractMetadataProvider(e),
m_validate(XMLHelper::getAttrBool(e, false, validate)),
m_id(XMLHelper::getAttrString(e, "Dynamic", id)),
m_lock(RWLock::create()),
m_refreshDelayFactor(0.75),
m_minCacheDuration(XMLHelper::getAttrInt(e, 600, minCacheDuration)),
m_maxCacheDuration(XMLHelper::getAttrInt(e, 28800, maxCacheDuration)),
m_shutdown(false),
m_cleanupInterval(XMLHelper::getAttrInt(e, 1800, cleanupInterval)),
m_cleanupTimeout(XMLHelper::getAttrInt(e, 1800, cleanupTimeout)),
m_cleanup_wait(nullptr), m_cleanup_thread(nullptr)
{
if (m_minCacheDuration > m_maxCacheDuration) {
Category::getInstance(SAML_LOGCAT ".MetadataProvider.Dynamic").error(
"minCacheDuration setting exceeds maxCacheDuration setting, lowering to match it"
);
m_minCacheDuration = m_maxCacheDuration;
}
const XMLCh* delay = e ? e->getAttributeNS(nullptr, refreshDelayFactor) : nullptr;
if (delay && *delay) {
auto_ptr_char temp(delay);
m_refreshDelayFactor = atof(temp.get());
if (m_refreshDelayFactor <= 0.0 || m_refreshDelayFactor >= 1.0) {
Category::getInstance(SAML_LOGCAT ".MetadataProvider.Dynamic").error(
"invalid refreshDelayFactor setting, using default"
);
m_refreshDelayFactor = 0.75;
}
}
if (m_cleanupInterval > 0) {
if (m_cleanupTimeout < 0)
m_cleanupTimeout = 0;
m_cleanup_wait = CondWait::create();
m_cleanup_thread = Thread::create(&cleanup_fn, this);
}
}
|
CWE-347
| 177,794 | 9,106 |
60434405028896349893789802602177359540
| null | null | null |
samba
|
1e7a32924b22d1f786b6f490ce8590656f578f91
| 1 |
static int check_mtab(const char *progname, const char *devname,
const char *dir)
{
if (check_newline(progname, devname) == -1 ||
check_newline(progname, dir) == -1)
return EX_USAGE;
return 0;
}
|
CWE-20
| 177,834 | 9,111 |
313609897848347265266652576337528308196
| null | null | null |
samba
|
0454b95657846fcecf0f51b6f1194faac02518bd
| 1 |
char *ldb_dn_escape_value(TALLOC_CTX *mem_ctx, struct ldb_val value)
{
char *dst;
if (!value.length)
return NULL;
/* allocate destination string, it will be at most 3 times the source */
dst = talloc_array(mem_ctx, char, value.length * 3 + 1);
if ( ! dst) {
talloc_free(dst);
return NULL;
}
ldb_dn_escape_internal(dst, (const char *)value.data, value.length);
dst = talloc_realloc(mem_ctx, dst, char, strlen(dst) + 1);
return dst;
}
|
CWE-200
| 177,846 | 9,112 |
51084379435452885525670290990800368822
| null | null | null |
samba
|
a819d2b440aafa3138d95ff6e8b824da885a70e9
| 1 |
uint8_t smb2cli_session_security_mode(struct smbXcli_session *session)
{
struct smbXcli_conn *conn = session->conn;
uint8_t security_mode = 0;
if (conn == NULL) {
return security_mode;
}
security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
if (conn->mandatory_signing) {
security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
}
return security_mode;
}
|
CWE-20
| 177,847 | 9,113 |
82438240769109888567609160265631652057
| null | null | null |
samba
|
1ba49b8f389eda3414b14410c7fbcb4041ca06b1
| 1 |
SMBC_server_internal(TALLOC_CTX *ctx,
SMBCCTX *context,
bool connect_if_not_found,
const char *server,
uint16_t port,
const char *share,
char **pp_workgroup,
char **pp_username,
char **pp_password,
bool *in_cache)
{
SMBCSRV *srv=NULL;
char *workgroup = NULL;
struct cli_state *c = NULL;
const char *server_n = server;
int is_ipc = (share != NULL && strcmp(share, "IPC$") == 0);
uint32_t fs_attrs = 0;
const char *username_used;
NTSTATUS status;
char *newserver, *newshare;
int flags = 0;
struct smbXcli_tcon *tcon = NULL;
ZERO_STRUCT(c);
*in_cache = false;
if (server[0] == 0) {
errno = EPERM;
return NULL;
}
/* Look for a cached connection */
srv = SMBC_find_server(ctx, context, server, share,
pp_workgroup, pp_username, pp_password);
/*
* If we found a connection and we're only allowed one share per
* server...
*/
if (srv &&
share != NULL && *share != '\0' &&
smbc_getOptionOneSharePerServer(context)) {
/*
* ... then if there's no current connection to the share,
* connect to it. SMBC_find_server(), or rather the function
* pointed to by context->get_cached_srv_fn which
* was called by SMBC_find_server(), will have issued a tree
* disconnect if the requested share is not the same as the
* one that was already connected.
*/
/*
* Use srv->cli->desthost and srv->cli->share instead of
* server and share below to connect to the actual share,
* i.e., a normal share or a referred share from
* 'msdfs proxy' share.
*/
if (!cli_state_has_tcon(srv->cli)) {
/* Ensure we have accurate auth info */
SMBC_call_auth_fn(ctx, context,
smbXcli_conn_remote_name(srv->cli->conn),
srv->cli->share,
pp_workgroup,
pp_username,
pp_password);
if (!*pp_workgroup || !*pp_username || !*pp_password) {
errno = ENOMEM;
cli_shutdown(srv->cli);
srv->cli = NULL;
smbc_getFunctionRemoveCachedServer(context)(context,
srv);
return NULL;
}
/*
* We don't need to renegotiate encryption
* here as the encryption context is not per
* tid.
*/
status = cli_tree_connect(srv->cli,
srv->cli->share,
"?????",
*pp_password,
strlen(*pp_password)+1);
if (!NT_STATUS_IS_OK(status)) {
errno = map_errno_from_nt_status(status);
cli_shutdown(srv->cli);
srv->cli = NULL;
smbc_getFunctionRemoveCachedServer(context)(context,
srv);
srv = NULL;
}
/* Determine if this share supports case sensitivity */
if (is_ipc) {
DEBUG(4,
("IPC$ so ignore case sensitivity\n"));
status = NT_STATUS_OK;
} else {
status = cli_get_fs_attr_info(c, &fs_attrs);
}
if (!NT_STATUS_IS_OK(status)) {
DEBUG(4, ("Could not retrieve "
"case sensitivity flag: %s.\n",
nt_errstr(status)));
/*
* We can't determine the case sensitivity of
* the share. We have no choice but to use the
* user-specified case sensitivity setting.
*/
if (smbc_getOptionCaseSensitive(context)) {
cli_set_case_sensitive(c, True);
} else {
cli_set_case_sensitive(c, False);
}
} else if (!is_ipc) {
DEBUG(4,
("Case sensitive: %s\n",
(fs_attrs & FILE_CASE_SENSITIVE_SEARCH
? "True"
: "False")));
cli_set_case_sensitive(
c,
(fs_attrs & FILE_CASE_SENSITIVE_SEARCH
? True
: False));
}
/*
* Regenerate the dev value since it's based on both
* server and share
*/
if (srv) {
const char *remote_name =
smbXcli_conn_remote_name(srv->cli->conn);
srv->dev = (dev_t)(str_checksum(remote_name) ^
str_checksum(srv->cli->share));
}
}
}
/* If we have a connection... */
if (srv) {
/* ... then we're done here. Give 'em what they came for. */
*in_cache = true;
goto done;
}
/* If we're not asked to connect when a connection doesn't exist... */
if (! connect_if_not_found) {
/* ... then we're done here. */
return NULL;
}
if (!*pp_workgroup || !*pp_username || !*pp_password) {
errno = ENOMEM;
return NULL;
}
DEBUG(4,("SMBC_server: server_n=[%s] server=[%s]\n", server_n, server));
DEBUG(4,(" -> server_n=[%s] server=[%s]\n", server_n, server));
status = NT_STATUS_UNSUCCESSFUL;
if (smbc_getOptionUseKerberos(context)) {
flags |= CLI_FULL_CONNECTION_USE_KERBEROS;
}
if (smbc_getOptionFallbackAfterKerberos(context)) {
flags |= CLI_FULL_CONNECTION_FALLBACK_AFTER_KERBEROS;
}
if (smbc_getOptionUseCCache(context)) {
flags |= CLI_FULL_CONNECTION_USE_CCACHE;
}
if (smbc_getOptionUseNTHash(context)) {
flags |= CLI_FULL_CONNECTION_USE_NT_HASH;
flags |= CLI_FULL_CONNECTION_USE_NT_HASH;
}
if (port == 0) {
if (share == NULL || *share == '\0' || is_ipc) {
/*
}
*/
status = cli_connect_nb(server_n, NULL, NBT_SMB_PORT, 0x20,
smbc_getNetbiosName(context),
SMB_SIGNING_DEFAULT, flags, &c);
}
}
|
CWE-20
| 177,849 | 9,114 |
326844384621281319651367695194404681534
| null | null | null |
savannah
|
3fcd042d26d70856e826a42b5f93dc4854d80bf0
| 1 |
do_ed_script (char const *inname, char const *outname,
bool *outname_needs_removal, FILE *ofp)
{
static char const editor_program[] = EDITOR_PROGRAM;
file_offset beginning_of_this_line;
size_t chars_read;
FILE *tmpfp = 0;
char const *tmpname;
int tmpfd;
pid_t pid;
if (! dry_run && ! skip_rest_of_patch)
{
/* Write ed script to a temporary file. This causes ed to abort on
invalid commands such as when line numbers or ranges exceed the
number of available lines. When ed reads from a pipe, it rejects
invalid commands and treats the next line as a new command, which
can lead to arbitrary command execution. */
tmpfd = make_tempfile (&tmpname, 'e', NULL, O_RDWR | O_BINARY, 0);
if (tmpfd == -1)
pfatal ("Can't create temporary file %s", quotearg (tmpname));
tmpfp = fdopen (tmpfd, "w+b");
if (! tmpfp)
pfatal ("Can't open stream for file %s", quotearg (tmpname));
}
for (;;) {
char ed_command_letter;
beginning_of_this_line = file_tell (pfp);
chars_read = get_line ();
if (! chars_read) {
next_intuit_at(beginning_of_this_line,p_input_line);
break;
}
ed_command_letter = get_ed_command_letter (buf);
if (ed_command_letter) {
if (tmpfp)
if (! fwrite (buf, sizeof *buf, chars_read, tmpfp))
write_fatal ();
if (ed_command_letter != 'd' && ed_command_letter != 's') {
p_pass_comments_through = true;
while ((chars_read = get_line ()) != 0) {
if (tmpfp)
if (! fwrite (buf, sizeof *buf, chars_read, tmpfp))
write_fatal ();
if (chars_read == 2 && strEQ (buf, ".\n"))
break;
}
p_pass_comments_through = false;
}
}
else {
next_intuit_at(beginning_of_this_line,p_input_line);
break;
}
}
if (!tmpfp)
return;
if (fwrite ("w\nq\n", sizeof (char), (size_t) 4, tmpfp) == 0
|| fflush (tmpfp) != 0)
write_fatal ();
if (lseek (tmpfd, 0, SEEK_SET) == -1)
pfatal ("Can't rewind to the beginning of file %s", quotearg (tmpname));
if (! dry_run && ! skip_rest_of_patch) {
int exclusive = *outname_needs_removal ? 0 : O_EXCL;
*outname_needs_removal = true;
if (inerrno != ENOENT)
{
*outname_needs_removal = true;
copy_file (inname, outname, 0, exclusive, instat.st_mode, true);
}
sprintf (buf, "%s %s%s", editor_program,
verbosity == VERBOSE ? "" : "- ",
outname);
fflush (stdout);
pid = fork();
fflush (stdout);
else if (pid == 0)
{
dup2 (tmpfd, 0);
execl ("/bin/sh", "sh", "-c", buf, (char *) 0);
_exit (2);
}
else
}
else
{
int wstatus;
if (waitpid (pid, &wstatus, 0) == -1
|| ! WIFEXITED (wstatus)
|| WEXITSTATUS (wstatus) != 0)
fatal ("%s FAILED", editor_program);
}
}
|
CWE-78
| 177,856 | 9,115 |
332019548078279612116698879022130688281
| null | null | null |
xserver
|
94f11ca5cf011ef123bd222cabeaef6f424d76ac
| 1 |
tbGetBuffer(unsigned size)
{
char *rtrn;
if (size >= BUFFER_SIZE)
return NULL;
if ((BUFFER_SIZE - tbNext) <= size)
tbNext = 0;
rtrn = &textBuffer[tbNext];
tbNext += size;
return rtrn;
}
|
CWE-119
| 177,864 | 9,116 |
27093761620872921794312634195475922111
| null | null | null |
moodle
|
0c0b0859ae1aba64861599f0e7f74f143f305932
| 1 |
gs_heap_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname)
{
gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
byte *ptr = 0;
#ifdef DEBUG
const char *msg;
static const char *const ok_msg = "OK";
# define set_msg(str) (msg = (str))
#else
# define set_msg(str) DO_NOTHING
#endif
/* Exclusive acces so our decisions and changes are 'atomic' */
if (mmem->monitor)
gx_monitor_enter(mmem->monitor);
if (size > mmem->limit - sizeof(gs_malloc_block_t)) {
/* Definitely too large to allocate; also avoids overflow. */
set_msg("exceeded limit");
} else {
uint added = size + sizeof(gs_malloc_block_t);
if (mmem->limit - added < mmem->used)
set_msg("exceeded limit");
else if ((ptr = (byte *) Memento_label(malloc(added), cname)) == 0)
set_msg("failed");
else {
gs_malloc_block_t *bp = (gs_malloc_block_t *) ptr;
/*
* We would like to check that malloc aligns blocks at least as
* strictly as the compiler (as defined by ARCH_ALIGN_MEMORY_MOD).
* However, Microsoft VC 6 does not satisfy this requirement.
* See gsmemory.h for more explanation.
*/
set_msg(ok_msg);
if (mmem->allocated)
mmem->allocated->prev = bp;
bp->next = mmem->allocated;
bp->prev = 0;
bp->size = size;
bp->type = &st_bytes;
bp->cname = cname;
mmem->allocated = bp;
ptr = (byte *) (bp + 1);
mmem->used += size + sizeof(gs_malloc_block_t);
if (mmem->used > mmem->max_used)
mmem->max_used = mmem->used;
}
}
if (mmem->monitor)
gx_monitor_leave(mmem->monitor); /* Done with exclusive access */
/* We don't want to 'fill' under mutex to keep the window smaller */
if (ptr)
gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
#ifdef DEBUG
if (gs_debug_c('a') || msg != ok_msg)
dmlprintf6(mem, "[a+]gs_malloc(%s)(%u) = 0x%lx: %s, used=%ld, max=%ld\n",
client_name_string(cname), size, (ulong) ptr, msg, mmem->used, mmem->max_used);
#endif
return ptr;
#undef set_msg
}
|
CWE-189
| 177,887 | 9,118 |
271606655835909013601048901854823602410
| null | null | null |
openssl
|
3c66a669dfc7b3792f7af0758ea26fe8502ce70c
| 1 |
int ssl3_get_client_key_exchange(SSL *s)
{
int i, al, ok;
long n;
unsigned long alg_k;
unsigned char *p;
#ifndef OPENSSL_NO_RSA
RSA *rsa = NULL;
EVP_PKEY *pkey = NULL;
#endif
#ifndef OPENSSL_NO_DH
BIGNUM *pub = NULL;
DH *dh_srvr, *dh_clnt = NULL;
#endif
#ifndef OPENSSL_NO_KRB5
KSSL_ERR kssl_err;
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
EC_KEY *srvr_ecdh = NULL;
EVP_PKEY *clnt_pub_pkey = NULL;
EC_POINT *clnt_ecpoint = NULL;
BN_CTX *bn_ctx = NULL;
#endif
n = s->method->ssl_get_message(s,
SSL3_ST_SR_KEY_EXCH_A,
SSL3_ST_SR_KEY_EXCH_B,
SSL3_MT_CLIENT_KEY_EXCHANGE, 2048, &ok);
if (!ok)
return ((int)n);
p = (unsigned char *)s->init_msg;
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
#ifndef OPENSSL_NO_RSA
if (alg_k & SSL_kRSA) {
unsigned char rand_premaster_secret[SSL_MAX_MASTER_KEY_LENGTH];
int decrypt_len;
unsigned char decrypt_good, version_good;
size_t j;
/* FIX THIS UP EAY EAY EAY EAY */
if (s->s3->tmp.use_rsa_tmp) {
if ((s->cert != NULL) && (s->cert->rsa_tmp != NULL))
rsa = s->cert->rsa_tmp;
/*
* Don't do a callback because rsa_tmp should be sent already
*/
if (rsa == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_RSA_PKEY);
goto f_err;
}
} else {
pkey = s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey;
if ((pkey == NULL) ||
(pkey->type != EVP_PKEY_RSA) || (pkey->pkey.rsa == NULL)) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_RSA_CERTIFICATE);
goto f_err;
}
rsa = pkey->pkey.rsa;
}
/* TLS and [incidentally] DTLS{0xFEFF} */
if (s->version > SSL3_VERSION && s->version != DTLS1_BAD_VER) {
n2s(p, i);
if (n != i + 2) {
if (!(s->options & SSL_OP_TLS_D5_BUG)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG);
goto f_err;
} else
p -= 2;
} else
n = i;
}
/*
* Reject overly short RSA ciphertext because we want to be sure
* that the buffer size makes it safe to iterate over the entire
* size of a premaster secret (SSL_MAX_MASTER_KEY_LENGTH). The
* actual expected size is larger due to RSA padding, but the
* bound is sufficient to be safe.
*/
if (n < SSL_MAX_MASTER_KEY_LENGTH) {
al = SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG);
goto f_err;
}
/*
* We must not leak whether a decryption failure occurs because of
* Bleichenbacher's attack on PKCS #1 v1.5 RSA padding (see RFC 2246,
* section 7.4.7.1). The code follows that advice of the TLS RFC and
* generates a random premaster secret for the case that the decrypt
* fails. See https://tools.ietf.org/html/rfc5246#section-7.4.7.1
*/
/*
* should be RAND_bytes, but we cannot work around a failure.
*/
if (RAND_pseudo_bytes(rand_premaster_secret,
sizeof(rand_premaster_secret)) <= 0)
goto err;
decrypt_len =
RSA_private_decrypt((int)n, p, p, rsa, RSA_PKCS1_PADDING);
ERR_clear_error();
/*
* decrypt_len should be SSL_MAX_MASTER_KEY_LENGTH. decrypt_good will
* be 0xff if so and zero otherwise.
*/
decrypt_good =
constant_time_eq_int_8(decrypt_len, SSL_MAX_MASTER_KEY_LENGTH);
/*
* If the version in the decrypted pre-master secret is correct then
* version_good will be 0xff, otherwise it'll be zero. The
* Klima-Pokorny-Rosa extension of Bleichenbacher's attack
* (http://eprint.iacr.org/2003/052/) exploits the version number
* check as a "bad version oracle". Thus version checks are done in
* constant time and are treated like any other decryption error.
*/
version_good =
constant_time_eq_8(p[0], (unsigned)(s->client_version >> 8));
version_good &=
constant_time_eq_8(p[1], (unsigned)(s->client_version & 0xff));
/*
* The premaster secret must contain the same version number as the
* ClientHello to detect version rollback attacks (strangely, the
* protocol does not offer such protection for DH ciphersuites).
* However, buggy clients exist that send the negotiated protocol
* version instead if the server does not support the requested
* protocol version. If SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such
* clients.
*/
if (s->options & SSL_OP_TLS_ROLLBACK_BUG) {
unsigned char workaround_good;
workaround_good =
constant_time_eq_8(p[0], (unsigned)(s->version >> 8));
workaround_good &=
constant_time_eq_8(p[1], (unsigned)(s->version & 0xff));
version_good |= workaround_good;
}
/*
* Both decryption and version must be good for decrypt_good to
* remain non-zero (0xff).
*/
decrypt_good &= version_good;
/*
* Now copy rand_premaster_secret over from p using
* decrypt_good_mask. If decryption failed, then p does not
* contain valid plaintext, however, a check above guarantees
* it is still sufficiently large to read from.
*/
for (j = 0; j < sizeof(rand_premaster_secret); j++) {
p[j] = constant_time_select_8(decrypt_good, p[j],
rand_premaster_secret[j]);
}
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
p,
sizeof
(rand_premaster_secret));
OPENSSL_cleanse(p, sizeof(rand_premaster_secret));
} else
#endif
#ifndef OPENSSL_NO_DH
if (alg_k & (SSL_kEDH | SSL_kDHr | SSL_kDHd)) {
int idx = -1;
EVP_PKEY *skey = NULL;
if (n > 1) {
n2s(p, i);
} else {
if (alg_k & SSL_kDHE) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG);
goto f_err;
}
i = 0;
}
if (n && n != i + 2) {
if (!(s->options & SSL_OP_SSLEAY_080_CLIENT_DH_BUG)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG);
goto err;
} else {
p -= 2;
i = (int)n;
}
}
if (alg_k & SSL_kDHr)
idx = SSL_PKEY_DH_RSA;
else if (alg_k & SSL_kDHd)
idx = SSL_PKEY_DH_DSA;
if (idx >= 0) {
skey = s->cert->pkeys[idx].privatekey;
if ((skey == NULL) ||
(skey->type != EVP_PKEY_DH) || (skey->pkey.dh == NULL)) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_RSA_CERTIFICATE);
goto f_err;
}
dh_srvr = skey->pkey.dh;
} else if (s->s3->tmp.dh == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
} else
dh_srvr = s->s3->tmp.dh;
if (n == 0L) {
/* Get pubkey from cert */
EVP_PKEY *clkey = X509_get_pubkey(s->session->peer);
if (clkey) {
if (EVP_PKEY_cmp_parameters(clkey, skey) == 1)
dh_clnt = EVP_PKEY_get1_DH(clkey);
}
if (dh_clnt == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
EVP_PKEY_free(clkey);
pub = dh_clnt->pub_key;
} else
pub = BN_bin2bn(p, i, NULL);
if (pub == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, SSL_R_BN_LIB);
goto err;
}
i = DH_compute_key(p, pub, dh_srvr);
if (i <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_DH_LIB);
BN_clear_free(pub);
goto err;
}
DH_free(s->s3->tmp.dh);
s->s3->tmp.dh = NULL;
if (dh_clnt)
DH_free(dh_clnt);
else
BN_clear_free(pub);
pub = NULL;
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
p, i);
OPENSSL_cleanse(p, i);
if (dh_clnt)
return 2;
} else
#endif
#ifndef OPENSSL_NO_KRB5
if (alg_k & SSL_kKRB5) {
krb5_error_code krb5rc;
krb5_data enc_ticket;
krb5_data authenticator;
krb5_data enc_pms;
KSSL_CTX *kssl_ctx = s->kssl_ctx;
EVP_CIPHER_CTX ciph_ctx;
const EVP_CIPHER *enc = NULL;
unsigned char iv[EVP_MAX_IV_LENGTH];
unsigned char pms[SSL_MAX_MASTER_KEY_LENGTH + EVP_MAX_BLOCK_LENGTH];
int padl, outl;
krb5_timestamp authtime = 0;
krb5_ticket_times ttimes;
int kerr = 0;
EVP_CIPHER_CTX_init(&ciph_ctx);
if (!kssl_ctx)
kssl_ctx = kssl_ctx_new();
n2s(p, i);
enc_ticket.length = i;
if (n < (long)(enc_ticket.length + 6)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
enc_ticket.data = (char *)p;
p += enc_ticket.length;
n2s(p, i);
authenticator.length = i;
if (n < (long)(enc_ticket.length + authenticator.length + 6)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
authenticator.data = (char *)p;
p += authenticator.length;
n2s(p, i);
enc_pms.length = i;
enc_pms.data = (char *)p;
p += enc_pms.length;
/*
* Note that the length is checked again below, ** after decryption
*/
if (enc_pms.length > sizeof pms) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if (n != (long)(enc_ticket.length + authenticator.length +
enc_pms.length + 6)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if ((krb5rc = kssl_sget_tkt(kssl_ctx, &enc_ticket, &ttimes,
&kssl_err)) != 0) {
# ifdef KSSL_DEBUG
fprintf(stderr, "kssl_sget_tkt rtn %d [%d]\n",
krb5rc, kssl_err.reason);
if (kssl_err.text)
fprintf(stderr, "kssl_err text= %s\n", kssl_err.text);
# endif /* KSSL_DEBUG */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, kssl_err.reason);
goto err;
}
/*
* Note: no authenticator is not considered an error, ** but will
* return authtime == 0.
*/
if ((krb5rc = kssl_check_authent(kssl_ctx, &authenticator,
&authtime, &kssl_err)) != 0) {
# ifdef KSSL_DEBUG
fprintf(stderr, "kssl_check_authent rtn %d [%d]\n",
krb5rc, kssl_err.reason);
if (kssl_err.text)
fprintf(stderr, "kssl_err text= %s\n", kssl_err.text);
# endif /* KSSL_DEBUG */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, kssl_err.reason);
goto err;
}
if ((krb5rc = kssl_validate_times(authtime, &ttimes)) != 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, krb5rc);
goto err;
}
# ifdef KSSL_DEBUG
kssl_ctx_show(kssl_ctx);
# endif /* KSSL_DEBUG */
enc = kssl_map_enc(kssl_ctx->enctype);
if (enc == NULL)
goto err;
memset(iv, 0, sizeof iv); /* per RFC 1510 */
if (!EVP_DecryptInit_ex(&ciph_ctx, enc, NULL, kssl_ctx->key, iv)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto err;
}
if (!EVP_DecryptUpdate(&ciph_ctx, pms, &outl,
(unsigned char *)enc_pms.data, enc_pms.length))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
kerr = 1;
goto kclean;
}
if (outl > SSL_MAX_MASTER_KEY_LENGTH) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
kerr = 1;
goto kclean;
}
if (!EVP_DecryptFinal_ex(&ciph_ctx, &(pms[outl]), &padl)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
kerr = 1;
goto kclean;
}
outl += padl;
if (outl > SSL_MAX_MASTER_KEY_LENGTH) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
kerr = 1;
goto kclean;
}
if (!((pms[0] == (s->client_version >> 8))
&& (pms[1] == (s->client_version & 0xff)))) {
/*
* The premaster secret must contain the same version number as
* the ClientHello to detect version rollback attacks (strangely,
* the protocol does not offer such protection for DH
* ciphersuites). However, buggy clients exist that send random
* bytes instead of the protocol version. If
* SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such clients.
* (Perhaps we should have a separate BUG value for the Kerberos
* cipher)
*/
if (!(s->options & SSL_OP_TLS_ROLLBACK_BUG)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_AD_DECODE_ERROR);
kerr = 1;
goto kclean;
}
}
EVP_CIPHER_CTX_cleanup(&ciph_ctx);
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
pms, outl);
if (kssl_ctx->client_princ) {
size_t len = strlen(kssl_ctx->client_princ);
if (len < SSL_MAX_KRB5_PRINCIPAL_LENGTH) {
s->session->krb5_client_princ_len = len;
memcpy(s->session->krb5_client_princ, kssl_ctx->client_princ,
len);
}
}
/*- Was doing kssl_ctx_free() here,
* but it caused problems for apache.
* kssl_ctx = kssl_ctx_free(kssl_ctx);
* if (s->kssl_ctx) s->kssl_ctx = NULL;
*/
kclean:
OPENSSL_cleanse(pms, sizeof(pms));
if (kerr)
goto err;
} else
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
if (alg_k & (SSL_kEECDH | SSL_kECDHr | SSL_kECDHe)) {
int ret = 1;
int field_size = 0;
const EC_KEY *tkey;
const EC_GROUP *group;
const BIGNUM *priv_key;
/* initialize structures for server's ECDH key pair */
if ((srvr_ecdh = EC_KEY_new()) == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto err;
}
/* Let's get server private key and group information */
if (alg_k & (SSL_kECDHr | SSL_kECDHe)) {
/* use the certificate */
tkey = s->cert->pkeys[SSL_PKEY_ECC].privatekey->pkey.ec;
} else {
/*
* use the ephermeral values we saved when generating the
* ServerKeyExchange msg.
*/
tkey = s->s3->tmp.ecdh;
}
group = EC_KEY_get0_group(tkey);
priv_key = EC_KEY_get0_private_key(tkey);
if (!EC_KEY_set_group(srvr_ecdh, group) ||
!EC_KEY_set_private_key(srvr_ecdh, priv_key)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
/* Let's get client's public key */
if ((clnt_ecpoint = EC_POINT_new(group)) == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto err;
}
if (n == 0L) {
/* Client Publickey was in Client Certificate */
if (alg_k & SSL_kEECDH) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_ECDH_KEY);
goto f_err;
}
if (((clnt_pub_pkey = X509_get_pubkey(s->session->peer))
== NULL) || (clnt_pub_pkey->type != EVP_PKEY_EC)) {
/*
* XXX: For now, we do not support client authentication
* using ECDH certificates so this branch (n == 0L) of the
* code is never executed. When that support is added, we
* ought to ensure the key received in the certificate is
* authorized for key agreement. ECDH_compute_key implicitly
* checks that the two ECDH shares are for the same group.
*/
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_UNABLE_TO_DECODE_ECDH_CERTS);
goto f_err;
}
if (EC_POINT_copy(clnt_ecpoint,
EC_KEY_get0_public_key(clnt_pub_pkey->
pkey.ec)) == 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
ret = 2; /* Skip certificate verify processing */
} else {
/*
* Get client's public key from encoded point in the
* ClientKeyExchange message.
*/
if ((bn_ctx = BN_CTX_new()) == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
/* Get encoded point length */
i = *p;
p += 1;
if (n != 1 + i) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
if (EC_POINT_oct2point(group, clnt_ecpoint, p, i, bn_ctx) == 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
/*
* p is pointing to somewhere in the buffer currently, so set it
* to the start
*/
p = (unsigned char *)s->init_buf->data;
}
/* Compute the shared pre-master secret */
field_size = EC_GROUP_get_degree(group);
if (field_size <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB);
goto err;
}
i = ECDH_compute_key(p, (field_size + 7) / 8, clnt_ecpoint, srvr_ecdh,
NULL);
if (i <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB);
goto err;
}
EVP_PKEY_free(clnt_pub_pkey);
EC_POINT_free(clnt_ecpoint);
EC_KEY_free(srvr_ecdh);
BN_CTX_free(bn_ctx);
EC_KEY_free(s->s3->tmp.ecdh);
s->s3->tmp.ecdh = NULL;
/* Compute the master secret */
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
p, i);
OPENSSL_cleanse(p, i);
return (ret);
} else
#endif
#ifndef OPENSSL_NO_PSK
if (alg_k & SSL_kPSK) {
unsigned char *t = NULL;
unsigned char psk_or_pre_ms[PSK_MAX_PSK_LEN * 2 + 4];
unsigned int pre_ms_len = 0, psk_len = 0;
int psk_err = 1;
char tmp_id[PSK_MAX_IDENTITY_LEN + 1];
al = SSL_AD_HANDSHAKE_FAILURE;
n2s(p, i);
if (n != i + 2) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, SSL_R_LENGTH_MISMATCH);
goto psk_err;
}
if (i > PSK_MAX_IDENTITY_LEN) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto psk_err;
}
if (s->psk_server_callback == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_NO_SERVER_CB);
goto psk_err;
}
/*
* Create guaranteed NULL-terminated identity string for the callback
*/
memcpy(tmp_id, p, i);
memset(tmp_id + i, 0, PSK_MAX_IDENTITY_LEN + 1 - i);
psk_len = s->psk_server_callback(s, tmp_id,
psk_or_pre_ms,
sizeof(psk_or_pre_ms));
OPENSSL_cleanse(tmp_id, PSK_MAX_IDENTITY_LEN + 1);
if (psk_len > PSK_MAX_PSK_LEN) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto psk_err;
} else if (psk_len == 0) {
/*
* PSK related to the given identity not found
*/
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_IDENTITY_NOT_FOUND);
al = SSL_AD_UNKNOWN_PSK_IDENTITY;
goto psk_err;
}
/* create PSK pre_master_secret */
pre_ms_len = 2 + psk_len + 2 + psk_len;
t = psk_or_pre_ms;
memmove(psk_or_pre_ms + psk_len + 4, psk_or_pre_ms, psk_len);
s2n(psk_len, t);
memset(t, 0, psk_len);
t += psk_len;
s2n(psk_len, t);
if (s->session->psk_identity != NULL)
OPENSSL_free(s->session->psk_identity);
s->session->psk_identity = BUF_strdup((char *)p);
if (s->session->psk_identity == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto psk_err;
}
if (s->session->psk_identity_hint != NULL)
OPENSSL_free(s->session->psk_identity_hint);
s->session->psk_identity_hint = BUF_strdup(s->ctx->psk_identity_hint);
if (s->ctx->psk_identity_hint != NULL &&
s->session->psk_identity_hint == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto psk_err;
}
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
psk_or_pre_ms,
pre_ms_len);
psk_err = 0;
psk_err:
OPENSSL_cleanse(psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_err != 0)
goto f_err;
} else
#endif
#ifndef OPENSSL_NO_SRP
if (alg_k & SSL_kSRP) {
int param_len;
n2s(p, i);
param_len = i + 2;
if (param_len > n) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_BAD_SRP_A_LENGTH);
goto f_err;
}
if (!(s->srp_ctx.A = BN_bin2bn(p, i, NULL))) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_BN_LIB);
goto err;
}
if (BN_ucmp(s->srp_ctx.A, s->srp_ctx.N) >= 0
|| BN_is_zero(s->srp_ctx.A)) {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_BAD_SRP_PARAMETERS);
goto f_err;
}
if (s->session->srp_username != NULL)
OPENSSL_free(s->session->srp_username);
s->session->srp_username = BUF_strdup(s->srp_ctx.login);
if (s->session->srp_username == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto err;
}
if ((s->session->master_key_length =
SRP_generate_server_master_secret(s,
s->session->master_key)) < 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
p += i;
} else
#endif /* OPENSSL_NO_SRP */
if (alg_k & SSL_kGOST) {
int ret = 0;
EVP_PKEY_CTX *pkey_ctx;
EVP_PKEY *client_pub_pkey = NULL, *pk = NULL;
unsigned char premaster_secret[32], *start;
size_t outlen = 32, inlen;
unsigned long alg_a;
int Ttag, Tclass;
long Tlen;
/* Get our certificate private key */
alg_a = s->s3->tmp.new_cipher->algorithm_auth;
if (alg_a & SSL_aGOST94)
pk = s->cert->pkeys[SSL_PKEY_GOST94].privatekey;
else if (alg_a & SSL_aGOST01)
pk = s->cert->pkeys[SSL_PKEY_GOST01].privatekey;
pkey_ctx = EVP_PKEY_CTX_new(pk, NULL);
EVP_PKEY_decrypt_init(pkey_ctx);
/*
* If client certificate is present and is of the same type, maybe
* use it for key exchange. Don't mind errors from
* EVP_PKEY_derive_set_peer, because it is completely valid to use a
* client certificate for authorization only.
*/
client_pub_pkey = X509_get_pubkey(s->session->peer);
if (client_pub_pkey) {
if (EVP_PKEY_derive_set_peer(pkey_ctx, client_pub_pkey) <= 0)
ERR_clear_error();
}
/* Decrypt session key */
if (ASN1_get_object
((const unsigned char **)&p, &Tlen, &Ttag, &Tclass,
n) != V_ASN1_CONSTRUCTED || Ttag != V_ASN1_SEQUENCE
|| Tclass != V_ASN1_UNIVERSAL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto gerr;
}
start = p;
inlen = Tlen;
if (EVP_PKEY_decrypt
(pkey_ctx, premaster_secret, &outlen, start, inlen) <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto gerr;
}
/* Generate master secret */
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
premaster_secret, 32);
OPENSSL_cleanse(premaster_secret, sizeof(premaster_secret));
/* Check if pubkey from client certificate was used */
if (EVP_PKEY_CTX_ctrl
(pkey_ctx, -1, -1, EVP_PKEY_CTRL_PEER_KEY, 2, NULL) > 0)
ret = 2;
else
ret = 1;
gerr:
EVP_PKEY_free(client_pub_pkey);
EVP_PKEY_CTX_free(pkey_ctx);
if (ret)
return ret;
else
goto err;
} else {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, SSL_R_UNKNOWN_CIPHER_TYPE);
goto f_err;
}
return (1);
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
#if !defined(OPENSSL_NO_DH) || !defined(OPENSSL_NO_RSA) || !defined(OPENSSL_NO_ECDH) || defined(OPENSSL_NO_SRP)
err:
#endif
#ifndef OPENSSL_NO_ECDH
EVP_PKEY_free(clnt_pub_pkey);
EC_POINT_free(clnt_ecpoint);
if (srvr_ecdh != NULL)
EC_KEY_free(srvr_ecdh);
BN_CTX_free(bn_ctx);
#endif
s->state = SSL_ST_ERR;
return (-1);
}
|
CWE-362
| 177,889 | 9,119 |
25645315172611315748951183695743778967
| null | null | null |
poppler
|
a9b8ab4657dec65b8b86c225d12c533ad7e984e2
| 1 |
void Splash::arbitraryTransformMask(SplashImageMaskSource src, void *srcData,
int srcWidth, int srcHeight,
SplashCoord *mat, GBool glyphMode) {
SplashBitmap *scaledMask;
SplashClipResult clipRes, clipRes2;
SplashPipe pipe;
int scaledWidth, scaledHeight, t0, t1;
SplashCoord r00, r01, r10, r11, det, ir00, ir01, ir10, ir11;
SplashCoord vx[4], vy[4];
int xMin, yMin, xMax, yMax;
ImageSection section[3];
int nSections;
int y, xa, xb, x, i, xx, yy;
vx[0] = mat[4]; vy[0] = mat[5];
vx[1] = mat[2] + mat[4]; vy[1] = mat[3] + mat[5];
vx[2] = mat[0] + mat[2] + mat[4]; vy[2] = mat[1] + mat[3] + mat[5];
vx[3] = mat[0] + mat[4]; vy[3] = mat[1] + mat[5];
xMin = imgCoordMungeLowerC(vx[0], glyphMode);
xMax = imgCoordMungeUpperC(vx[0], glyphMode);
yMin = imgCoordMungeLowerC(vy[0], glyphMode);
yMax = imgCoordMungeUpperC(vy[0], glyphMode);
for (i = 1; i < 4; ++i) {
t0 = imgCoordMungeLowerC(vx[i], glyphMode);
if (t0 < xMin) {
xMin = t0;
}
t0 = imgCoordMungeUpperC(vx[i], glyphMode);
if (t0 > xMax) {
xMax = t0;
}
t1 = imgCoordMungeLowerC(vy[i], glyphMode);
if (t1 < yMin) {
yMin = t1;
}
t1 = imgCoordMungeUpperC(vy[i], glyphMode);
if (t1 > yMax) {
yMax = t1;
}
}
clipRes = state->clip->testRect(xMin, yMin, xMax - 1, yMax - 1);
opClipRes = clipRes;
if (clipRes == splashClipAllOutside) {
return;
}
if (mat[0] >= 0) {
t0 = imgCoordMungeUpperC(mat[0] + mat[4], glyphMode) -
imgCoordMungeLowerC(mat[4], glyphMode);
} else {
t0 = imgCoordMungeUpperC(mat[4], glyphMode) -
imgCoordMungeLowerC(mat[0] + mat[4], glyphMode);
}
if (mat[1] >= 0) {
t1 = imgCoordMungeUpperC(mat[1] + mat[5], glyphMode) -
imgCoordMungeLowerC(mat[5], glyphMode);
} else {
t1 = imgCoordMungeUpperC(mat[5], glyphMode) -
imgCoordMungeLowerC(mat[1] + mat[5], glyphMode);
}
scaledWidth = t0 > t1 ? t0 : t1;
if (mat[2] >= 0) {
t0 = imgCoordMungeUpperC(mat[2] + mat[4], glyphMode) -
imgCoordMungeLowerC(mat[4], glyphMode);
} else {
t0 = imgCoordMungeUpperC(mat[4], glyphMode) -
imgCoordMungeLowerC(mat[2] + mat[4], glyphMode);
}
if (mat[3] >= 0) {
t1 = imgCoordMungeUpperC(mat[3] + mat[5], glyphMode) -
imgCoordMungeLowerC(mat[5], glyphMode);
} else {
t1 = imgCoordMungeUpperC(mat[5], glyphMode) -
imgCoordMungeLowerC(mat[3] + mat[5], glyphMode);
}
scaledHeight = t0 > t1 ? t0 : t1;
if (scaledWidth == 0) {
scaledWidth = 1;
}
if (scaledHeight == 0) {
scaledHeight = 1;
}
r00 = mat[0] / scaledWidth;
r01 = mat[1] / scaledWidth;
r10 = mat[2] / scaledHeight;
r11 = mat[3] / scaledHeight;
det = r00 * r11 - r01 * r10;
if (splashAbs(det) < 1e-6) {
return;
}
ir00 = r11 / det;
ir01 = -r01 / det;
ir10 = -r10 / det;
ir11 = r00 / det;
scaledMask = scaleMask(src, srcData, srcWidth, srcHeight,
scaledWidth, scaledHeight);
if (scaledMask->data == NULL) {
error(errInternal, -1, "scaledMask->data is NULL in Splash::scaleMaskYuXu");
delete scaledMask;
return;
}
i = (vy[2] <= vy[3]) ? 2 : 3;
if (vy[1] <= vy[i]) {
i = 1;
}
if (vy[0] < vy[i] || (i != 3 && vy[0] == vy[i])) {
i = 0;
}
if (vy[i] == vy[(i+1) & 3]) {
section[0].y0 = imgCoordMungeLowerC(vy[i], glyphMode);
section[0].y1 = imgCoordMungeUpperC(vy[(i+2) & 3], glyphMode) - 1;
if (vx[i] < vx[(i+1) & 3]) {
section[0].ia0 = i;
section[0].ia1 = (i+3) & 3;
section[0].ib0 = (i+1) & 3;
section[0].ib1 = (i+2) & 3;
} else {
section[0].ia0 = (i+1) & 3;
section[0].ia1 = (i+2) & 3;
section[0].ib0 = i;
section[0].ib1 = (i+3) & 3;
}
nSections = 1;
} else {
section[0].y0 = imgCoordMungeLowerC(vy[i], glyphMode);
section[2].y1 = imgCoordMungeUpperC(vy[(i+2) & 3], glyphMode) - 1;
section[0].ia0 = section[0].ib0 = i;
section[2].ia1 = section[2].ib1 = (i+2) & 3;
if (vx[(i+1) & 3] < vx[(i+3) & 3]) {
section[0].ia1 = section[2].ia0 = (i+1) & 3;
section[0].ib1 = section[2].ib0 = (i+3) & 3;
} else {
section[0].ia1 = section[2].ia0 = (i+3) & 3;
section[0].ib1 = section[2].ib0 = (i+1) & 3;
}
if (vy[(i+1) & 3] < vy[(i+3) & 3]) {
section[1].y0 = imgCoordMungeLowerC(vy[(i+1) & 3], glyphMode);
section[2].y0 = imgCoordMungeUpperC(vy[(i+3) & 3], glyphMode);
if (vx[(i+1) & 3] < vx[(i+3) & 3]) {
section[1].ia0 = (i+1) & 3;
section[1].ia1 = (i+2) & 3;
section[1].ib0 = i;
section[1].ib1 = (i+3) & 3;
} else {
section[1].ia0 = i;
section[1].ia1 = (i+3) & 3;
section[1].ib0 = (i+1) & 3;
section[1].ib1 = (i+2) & 3;
}
} else {
section[1].y0 = imgCoordMungeLowerC(vy[(i+3) & 3], glyphMode);
section[2].y0 = imgCoordMungeUpperC(vy[(i+1) & 3], glyphMode);
if (vx[(i+1) & 3] < vx[(i+3) & 3]) {
section[1].ia0 = i;
section[1].ia1 = (i+1) & 3;
section[1].ib0 = (i+3) & 3;
section[1].ib1 = (i+2) & 3;
} else {
section[1].ia0 = (i+3) & 3;
section[1].ia1 = (i+2) & 3;
section[1].ib0 = i;
section[1].ib1 = (i+1) & 3;
}
}
section[0].y1 = section[1].y0 - 1;
section[1].y1 = section[2].y0 - 1;
nSections = 3;
}
for (i = 0; i < nSections; ++i) {
section[i].xa0 = vx[section[i].ia0];
section[i].ya0 = vy[section[i].ia0];
section[i].xa1 = vx[section[i].ia1];
section[i].ya1 = vy[section[i].ia1];
section[i].xb0 = vx[section[i].ib0];
section[i].yb0 = vy[section[i].ib0];
section[i].xb1 = vx[section[i].ib1];
section[i].yb1 = vy[section[i].ib1];
section[i].dxdya = (section[i].xa1 - section[i].xa0) /
(section[i].ya1 - section[i].ya0);
section[i].dxdyb = (section[i].xb1 - section[i].xb0) /
(section[i].yb1 - section[i].yb0);
}
pipeInit(&pipe, 0, 0, state->fillPattern, NULL,
(Guchar)splashRound(state->fillAlpha * 255), gTrue, gFalse);
if (vectorAntialias) {
drawAAPixelInit();
}
if (nSections == 1) {
if (section[0].y0 == section[0].y1) {
++section[0].y1;
clipRes = opClipRes = splashClipPartial;
}
} else {
if (section[0].y0 == section[2].y1) {
++section[1].y1;
clipRes = opClipRes = splashClipPartial;
}
}
for (i = 0; i < nSections; ++i) {
for (y = section[i].y0; y <= section[i].y1; ++y) {
xa = imgCoordMungeLowerC(section[i].xa0 +
((SplashCoord)y + 0.5 - section[i].ya0) *
section[i].dxdya,
glyphMode);
xb = imgCoordMungeUpperC(section[i].xb0 +
((SplashCoord)y + 0.5 - section[i].yb0) *
section[i].dxdyb,
glyphMode);
if (xa == xb) {
++xb;
}
if (clipRes != splashClipAllInside) {
clipRes2 = state->clip->testSpan(xa, xb - 1, y);
} else {
clipRes2 = clipRes;
}
for (x = xa; x < xb; ++x) {
xx = splashFloor(((SplashCoord)x + 0.5 - mat[4]) * ir00 +
((SplashCoord)y + 0.5 - mat[5]) * ir10);
yy = splashFloor(((SplashCoord)x + 0.5 - mat[4]) * ir01 +
((SplashCoord)y + 0.5 - mat[5]) * ir11);
if (xx < 0) {
xx = 0;
} else if (xx >= scaledWidth) {
xx = scaledWidth - 1;
}
if (yy < 0) {
yy = 0;
} else if (yy >= scaledHeight) {
yy = scaledHeight - 1;
}
pipe.shape = scaledMask->data[yy * scaledWidth + xx];
if (vectorAntialias && clipRes2 != splashClipAllInside) {
drawAAPixel(&pipe, x, y);
} else {
drawPixel(&pipe, x, y, clipRes2 == splashClipAllInside);
}
}
}
}
delete scaledMask;
}
| 177,903 | 9,120 |
335865570448652228832556287433161700430
| null | null | null |
|
xserver
|
05442de962d3dc624f79fc1a00eca3ffc5489ced
| 1 |
SProcXSendExtensionEvent(ClientPtr client)
{
CARD32 *p;
int i;
xEvent eventT;
xEvent *eventP;
EventSwapPtr proc;
REQUEST(xSendExtensionEventReq);
swaps(&stuff->length);
REQUEST_AT_LEAST_SIZE(xSendExtensionEventReq);
swapl(&stuff->destination);
swaps(&stuff->count);
if (stuff->length !=
bytes_to_int32(sizeof(xSendExtensionEventReq)) + stuff->count +
bytes_to_int32(stuff->num_events * sizeof(xEvent)))
return BadLength;
eventP = (xEvent *) &stuff[1];
for (i = 0; i < stuff->num_events; i++, eventP++) {
proc = EventSwapVector[eventP->u.u.type & 0177];
if (proc == NotImplemented) /* no swapping proc; invalid event type? */
return BadValue;
(*proc) (eventP, &eventT);
*eventP = eventT;
}
p = (CARD32 *) (((xEvent *) &stuff[1]) + stuff->num_events);
SwapLongs(p, stuff->count);
return (ProcXSendExtensionEvent(client));
}
|
CWE-665
| 177,935 | 9,121 |
87398706943777438419996444099871842349
| null | null | null |
savannah
|
83a95bd8c8561875b948cadd417c653dbe7ef2e2
| 1 |
bmexec_trans (kwset_t kwset, char const *text, size_t size)
{
unsigned char const *d1;
char const *ep, *sp, *tp;
int d;
int len = kwset->mind;
char const *trans = kwset->trans;
if (len == 0)
return 0;
if (len > size)
return -1;
if (len == 1)
{
tp = memchr_kwset (text, size, kwset);
return tp ? tp - text : -1;
}
d1 = kwset->delta;
sp = kwset->target + len;
tp = text + len;
char gc1 = kwset->gc1;
char gc2 = kwset->gc2;
/* Significance of 12: 1 (initial offset) + 10 (skip loop) + 1 (md2). */
if (size > 12 * len)
/* 11 is not a bug, the initial offset happens only once. */
for (ep = text + size - 11 * len; tp <= ep; )
{
char const *tp0 = tp;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
/* As a heuristic, prefer memchr to seeking by
delta1 when the latter doesn't advance much. */
int advance_heuristic = 16 * sizeof (long);
if (advance_heuristic <= tp - tp0)
goto big_advance;
tp--;
tp = memchr_kwset (tp, text + size - tp, kwset);
if (! tp)
return -1;
tp++;
}
}
}
big_advance:;
}
/* Now we have only a few characters left to search. We
carefully avoid ever producing an out-of-bounds pointer. */
ep = text + size;
d = d1[U(tp[-1])];
while (d <= ep - tp)
{
d = d1[U((tp += d)[-1])];
if (d != 0)
continue;
if (bm_delta2_search (&tp, ep, sp, len, trans, gc1, gc2, NULL, kwset))
return tp - text;
}
return -1;
}
|
CWE-119
| 177,943 | 9,122 |
263057791492357295900969045135541855081
| null | null | null |
harfbuzz
|
a6a79df5fe2ed2cd307e7a991346faee164e70d9
| 1 |
hb_buffer_ensure (hb_buffer_t *buffer, unsigned int size)
{
unsigned int new_allocated = buffer->allocated;
if (size > new_allocated)
{
while (size > new_allocated)
new_allocated += (new_allocated >> 1) + 8;
if (buffer->pos)
buffer->pos = (hb_internal_glyph_position_t *) realloc (buffer->pos, new_allocated * sizeof (buffer->pos[0]));
if (buffer->out_info != buffer->info)
{
buffer->info = (hb_internal_glyph_info_t *) realloc (buffer->info, new_allocated * sizeof (buffer->info[0]));
buffer->out_info = (hb_internal_glyph_info_t *) buffer->pos;
}
else
{
buffer->info = (hb_internal_glyph_info_t *) realloc (buffer->info, new_allocated * sizeof (buffer->info[0]));
buffer->out_info = buffer->info;
}
buffer->allocated = new_allocated;
}
}
| 177,946 | 9,124 |
15142711828806308641714507755924299311
| null | null | null |
|
ghostscript
|
c501a58f8d5650c8ba21d447c0d6f07eafcb0f15
| 1 |
static void Ins_JMPR( INS_ARG )
{
CUR.IP += (Int)(args[0]);
CUR.step_ins = FALSE;
* allow for simple cases here by just checking the preceding byte.
* Fonts with this problem are not uncommon.
*/
CUR.IP -= 1;
}
|
CWE-125
| 177,950 | 9,126 |
252024308972366795127261910281364074569
| null | null | null |
ghostscript
|
d2ab84732936b6e7e5a461dc94344902965e9a06
| 1 |
xps_load_sfnt_name(xps_font_t *font, char *namep)
{
byte *namedata;
int offset, length;
/*int format;*/
int count, stringoffset;
int found;
int i, k;
found = 0;
strcpy(namep, "Unknown");
offset = xps_find_sfnt_table(font, "name", &length);
if (offset < 0 || length < 6)
{
gs_warn("cannot find name table");
return;
}
namedata = font->data + offset;
/*format = u16(namedata + 0);*/
count = u16(namedata + 2);
stringoffset = u16(namedata + 4);
if (length < 6 + (count * 12))
{
gs_warn("name table too short");
{
if (pid == 1 && eid == 0 && langid == 0) /* mac roman, english */
{
if (found < 3)
{
memcpy(namep, namedata + stringoffset + offset, length);
namep[length] = 0;
found = 3;
}
}
if (pid == 3 && eid == 1 && langid == 0x409) /* windows unicode ucs-2, US */
{
if (found < 2)
{
unsigned char *s = namedata + stringoffset + offset;
int n = length / 2;
for (k = 0; k < n; k ++)
{
int c = u16(s + k * 2);
namep[k] = isprint(c) ? c : '?';
}
namep[k] = 0;
found = 2;
}
}
if (pid == 3 && eid == 10 && langid == 0x409) /* windows unicode ucs-4, US */
{
if (found < 1)
{
unsigned char *s = namedata + stringoffset + offset;
int n = length / 4;
for (k = 0; k < n; k ++)
{
int c = u32(s + k * 4);
namep[k] = isprint(c) ? c : '?';
}
namep[k] = 0;
found = 1;
}
}
}
}
}
|
CWE-125
| 177,959 | 9,128 |
121699156759284536867795595019449092410
| null | null | null |
openssl
|
76343947ada960b6269090638f5391068daee88d
| 1 |
int tls1_set_server_sigalgs(SSL *s)
{
int al;
size_t i;
/* Clear any shared sigtnature algorithms */
if (s->cert->shared_sigalgs) {
OPENSSL_free(s->cert->shared_sigalgs);
s->cert->shared_sigalgs = NULL;
}
/* Clear certificate digests and validity flags */
for (i = 0; i < SSL_PKEY_NUM; i++) {
s->cert->pkeys[i].valid_flags = 0;
}
/* If sigalgs received process it. */
if (s->cert->peer_sigalgs) {
if (!tls1_process_sigalgs(s)) {
SSLerr(SSL_F_TLS1_SET_SERVER_SIGALGS, ERR_R_MALLOC_FAILURE);
al = SSL_AD_INTERNAL_ERROR;
goto err;
}
/* Fatal error is no shared signature algorithms */
if (!s->cert->shared_sigalgs) {
SSLerr(SSL_F_TLS1_SET_SERVER_SIGALGS,
SSL_R_NO_SHARED_SIGATURE_ALGORITHMS);
al = SSL_AD_ILLEGAL_PARAMETER;
goto err;
}
} else
ssl_cert_set_default_md(s->cert);
return 1;
err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
return 0;
}
| 177,976 | 9,129 |
207456127363619145861478531352618011551
| null | null | null |
|
savannah
|
b3500af717010137046ec4076d1e1c0641e33727
| 1 |
Load_SBit_Png( FT_GlyphSlot slot,
FT_Int x_offset,
FT_Int y_offset,
FT_Int pix_bits,
TT_SBit_Metrics metrics,
FT_Memory memory,
FT_Byte* data,
FT_UInt png_len,
FT_Bool populate_map_and_metrics )
{
FT_Bitmap *map = &slot->bitmap;
FT_Error error = FT_Err_Ok;
FT_StreamRec stream;
png_structp png;
png_infop info;
png_uint_32 imgWidth, imgHeight;
int bitdepth, color_type, interlace;
FT_Int i;
png_byte* *rows = NULL; /* pacify compiler */
if ( x_offset < 0 ||
y_offset < 0 )
{
error = FT_THROW( Invalid_Argument );
goto Exit;
}
if ( !populate_map_and_metrics &&
( x_offset + metrics->width > map->width ||
y_offset + metrics->height > map->rows ||
pix_bits != 32 ||
map->pixel_mode != FT_PIXEL_MODE_BGRA ) )
{
error = FT_THROW( Invalid_Argument );
goto Exit;
}
FT_Stream_OpenMemory( &stream, data, png_len );
png = png_create_read_struct( PNG_LIBPNG_VER_STRING,
&error,
error_callback,
warning_callback );
if ( !png )
{
error = FT_THROW( Out_Of_Memory );
goto Exit;
}
info = png_create_info_struct( png );
if ( !info )
{
error = FT_THROW( Out_Of_Memory );
png_destroy_read_struct( &png, NULL, NULL );
goto Exit;
}
if ( ft_setjmp( png_jmpbuf( png ) ) )
{
error = FT_THROW( Invalid_File_Format );
goto DestroyExit;
}
png_set_read_fn( png, &stream, read_data_from_FT_Stream );
png_read_info( png, info );
png_get_IHDR( png, info,
&imgWidth, &imgHeight,
&bitdepth, &color_type, &interlace,
NULL, NULL );
if ( error ||
( !populate_map_and_metrics &&
( (FT_Int)imgWidth != metrics->width ||
(FT_Int)imgHeight != metrics->height ) ) )
goto DestroyExit;
if ( populate_map_and_metrics )
{
FT_Long size;
metrics->width = (FT_Int)imgWidth;
metrics->height = (FT_Int)imgHeight;
map->width = metrics->width;
map->rows = metrics->height;
map->pixel_mode = FT_PIXEL_MODE_BGRA;
map->pitch = map->width * 4;
map->num_grays = 256;
/* reject too large bitmaps similarly to the rasterizer */
if ( map->rows > 0x7FFF || map->width > 0x7FFF )
{
error = FT_THROW( Array_Too_Large );
goto DestroyExit;
}
size = map->rows * map->pitch;
error = ft_glyphslot_alloc_bitmap( slot, size );
if ( error )
goto DestroyExit;
}
/* convert palette/gray image to rgb */
if ( color_type == PNG_COLOR_TYPE_PALETTE )
png_set_palette_to_rgb( png );
/* expand gray bit depth if needed */
if ( color_type == PNG_COLOR_TYPE_GRAY )
{
#if PNG_LIBPNG_VER >= 10209
png_set_expand_gray_1_2_4_to_8( png );
#else
png_set_gray_1_2_4_to_8( png );
#endif
}
/* transform transparency to alpha */
if ( png_get_valid(png, info, PNG_INFO_tRNS ) )
png_set_tRNS_to_alpha( png );
if ( bitdepth == 16 )
png_set_strip_16( png );
if ( bitdepth < 8 )
png_set_packing( png );
/* convert grayscale to RGB */
if ( color_type == PNG_COLOR_TYPE_GRAY ||
color_type == PNG_COLOR_TYPE_GRAY_ALPHA )
png_set_gray_to_rgb( png );
if ( interlace != PNG_INTERLACE_NONE )
png_set_interlace_handling( png );
png_set_filler( png, 0xFF, PNG_FILLER_AFTER );
/* recheck header after setting EXPAND options */
png_read_update_info(png, info );
png_get_IHDR( png, info,
&imgWidth, &imgHeight,
&bitdepth, &color_type, &interlace,
NULL, NULL );
if ( bitdepth != 8 ||
!( color_type == PNG_COLOR_TYPE_RGB ||
color_type == PNG_COLOR_TYPE_RGB_ALPHA ) )
{
error = FT_THROW( Invalid_File_Format );
goto DestroyExit;
}
switch ( color_type )
{
default:
/* Shouldn't happen, but fall through. */
case PNG_COLOR_TYPE_RGB_ALPHA:
png_set_read_user_transform_fn( png, premultiply_data );
break;
case PNG_COLOR_TYPE_RGB:
/* Humm, this smells. Carry on though. */
png_set_read_user_transform_fn( png, convert_bytes_to_data );
break;
}
if ( FT_NEW_ARRAY( rows, imgHeight ) )
{
error = FT_THROW( Out_Of_Memory );
goto DestroyExit;
}
for ( i = 0; i < (FT_Int)imgHeight; i++ )
rows[i] = map->buffer + ( y_offset + i ) * map->pitch + x_offset * 4;
png_read_image( png, rows );
FT_FREE( rows );
png_read_end( png, info );
DestroyExit:
png_destroy_read_struct( &png, &info, NULL );
FT_Stream_Close( &stream );
Exit:
return error;
}
|
CWE-119
| 178,026 | 9,135 |
110222045843909920788436328705710900608
| null | null | null |
savannah
|
9bd20b7304aae61de5d50ac359cf27132bafd4c1
| 1 |
tt_cmap4_validate( FT_Byte* table,
FT_Validator valid )
{
FT_Byte* p;
FT_UInt length;
FT_Byte *ends, *starts, *offsets, *deltas, *glyph_ids;
FT_UInt num_segs;
FT_Error error = FT_Err_Ok;
if ( table + 2 + 2 > valid->limit )
FT_INVALID_TOO_SHORT;
p = table + 2; /* skip format */
length = TT_NEXT_USHORT( p );
if ( length < 16 )
FT_INVALID_TOO_SHORT;
/* in certain fonts, the `length' field is invalid and goes */
/* out of bound. We try to correct this here... */
if ( table + length > valid->limit )
/* in certain fonts, the `length' field is invalid and goes */
/* out of bound. We try to correct this here... */
if ( table + length > valid->limit )
{
length = (FT_UInt)( valid->limit - table );
}
p = table + 6;
num_segs = TT_NEXT_USHORT( p ); /* read segCountX2 */
if ( valid->level >= FT_VALIDATE_PARANOID )
{
/* check that we have an even value here */
if ( num_segs & 1 )
FT_INVALID_DATA;
}
num_segs /= 2;
if ( length < 16 + num_segs * 2 * 4 )
FT_INVALID_TOO_SHORT;
/* check the search parameters - even though we never use them */
/* */
if ( valid->level >= FT_VALIDATE_PARANOID )
{
/* check the values of `searchRange', `entrySelector', `rangeShift' */
FT_UInt search_range = TT_NEXT_USHORT( p );
FT_UInt entry_selector = TT_NEXT_USHORT( p );
FT_UInt range_shift = TT_NEXT_USHORT( p );
if ( ( search_range | range_shift ) & 1 ) /* must be even values */
FT_INVALID_DATA;
search_range /= 2;
range_shift /= 2;
/* `search range' is the greatest power of 2 that is <= num_segs */
if ( search_range > num_segs ||
search_range * 2 < num_segs ||
search_range + range_shift != num_segs ||
search_range != ( 1U << entry_selector ) )
FT_INVALID_DATA;
}
ends = table + 14;
starts = table + 16 + num_segs * 2;
deltas = starts + num_segs * 2;
offsets = deltas + num_segs * 2;
glyph_ids = offsets + num_segs * 2;
/* check last segment; its end count value must be 0xFFFF */
if ( valid->level >= FT_VALIDATE_PARANOID )
{
p = ends + ( num_segs - 1 ) * 2;
if ( TT_PEEK_USHORT( p ) != 0xFFFFU )
FT_INVALID_DATA;
}
{
FT_UInt start, end, offset, n;
FT_UInt last_start = 0, last_end = 0;
FT_Int delta;
FT_Byte* p_start = starts;
FT_Byte* p_end = ends;
FT_Byte* p_delta = deltas;
FT_Byte* p_offset = offsets;
for ( n = 0; n < num_segs; n++ )
{
p = p_offset;
start = TT_NEXT_USHORT( p_start );
end = TT_NEXT_USHORT( p_end );
delta = TT_NEXT_SHORT( p_delta );
offset = TT_NEXT_USHORT( p_offset );
if ( start > end )
FT_INVALID_DATA;
/* this test should be performed at default validation level; */
/* unfortunately, some popular Asian fonts have overlapping */
/* ranges in their charmaps */
/* */
if ( start <= last_end && n > 0 )
{
if ( valid->level >= FT_VALIDATE_TIGHT )
FT_INVALID_DATA;
else
{
/* allow overlapping segments, provided their start points */
/* and end points, respectively, are in ascending order */
/* */
if ( last_start > start || last_end > end )
error |= TT_CMAP_FLAG_UNSORTED;
else
error |= TT_CMAP_FLAG_OVERLAPPING;
}
}
if ( offset && offset != 0xFFFFU )
{
p += offset; /* start of glyph ID array */
/* check that we point within the glyph IDs table only */
if ( valid->level >= FT_VALIDATE_TIGHT )
{
if ( p < glyph_ids ||
p + ( end - start + 1 ) * 2 > table + length )
FT_INVALID_DATA;
}
/* Some fonts handle the last segment incorrectly. In */
/* theory, 0xFFFF might point to an ordinary glyph -- */
/* a cmap 4 is versatile and could be used for any */
/* encoding, not only Unicode. However, reality shows */
/* that far too many fonts are sloppy and incorrectly */
/* set all fields but `start' and `end' for the last */
/* segment if it contains only a single character. */
/* */
/* We thus omit the test here, delaying it to the */
/* routines which actually access the cmap. */
else if ( n != num_segs - 1 ||
!( start == 0xFFFFU && end == 0xFFFFU ) )
{
if ( p < glyph_ids ||
p + ( end - start + 1 ) * 2 > valid->limit )
FT_INVALID_DATA;
}
/* check glyph indices within the segment range */
if ( valid->level >= FT_VALIDATE_TIGHT )
{
FT_UInt i, idx;
for ( i = start; i < end; i++ )
{
idx = FT_NEXT_USHORT( p );
if ( idx != 0 )
{
idx = (FT_UInt)( idx + delta ) & 0xFFFFU;
if ( idx >= TT_VALID_GLYPH_COUNT( valid ) )
FT_INVALID_GLYPH_ID;
}
}
}
}
else if ( offset == 0xFFFFU )
{
/* some fonts (erroneously?) use a range offset of 0xFFFF */
/* to mean missing glyph in cmap table */
/* */
if ( valid->level >= FT_VALIDATE_PARANOID ||
n != num_segs - 1 ||
!( start == 0xFFFFU && end == 0xFFFFU ) )
FT_INVALID_DATA;
}
last_start = start;
last_end = end;
}
}
return error;
}
|
CWE-119
| 178,031 | 9,136 |
144007186553087559457236290260378717662
| null | null | null |
virglrenderer
|
737c3350850ca4dbc5633b3bdb4118176ce59920
| 1 |
int vrend_create_vertex_elements_state(struct vrend_context *ctx,
uint32_t handle,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
if (!v)
return ENOMEM;
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
desc = util_format_description(elements[i].src_format);
if (!desc) {
FREE(v);
return EINVAL;
}
type = GL_FALSE;
if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT) {
if (desc->channel[0].size == 32)
type = GL_FLOAT;
else if (desc->channel[0].size == 64)
type = GL_DOUBLE;
else if (desc->channel[0].size == 16)
type = GL_HALF_FLOAT;
} else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
desc->channel[0].size == 8)
type = GL_UNSIGNED_BYTE;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
desc->channel[0].size == 8)
type = GL_BYTE;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
desc->channel[0].size == 16)
type = GL_UNSIGNED_SHORT;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
desc->channel[0].size == 16)
type = GL_SHORT;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
desc->channel[0].size == 32)
type = GL_UNSIGNED_INT;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
desc->channel[0].size == 32)
type = GL_INT;
else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SSCALED ||
elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SNORM ||
elements[i].src_format == PIPE_FORMAT_B10G10R10A2_SNORM)
type = GL_INT_2_10_10_10_REV;
else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_USCALED ||
elements[i].src_format == PIPE_FORMAT_R10G10B10A2_UNORM ||
elements[i].src_format == PIPE_FORMAT_B10G10R10A2_UNORM)
type = GL_UNSIGNED_INT_2_10_10_10_REV;
else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
type = GL_UNSIGNED_INT_10F_11F_11F_REV;
if (type == GL_FALSE) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT, elements[i].src_format);
FREE(v);
return EINVAL;
}
v->elements[i].type = type;
if (desc->channel[0].normalized)
v->elements[i].norm = GL_TRUE;
if (desc->nr_channels == 4 && desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z)
v->elements[i].nr_chan = GL_BGRA;
else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
v->elements[i].nr_chan = 3;
else
v->elements[i].nr_chan = desc->nr_channels;
}
if (vrend_state.have_vertex_attrib_binding) {
glGenVertexArrays(1, &v->id);
glBindVertexArray(v->id);
for (i = 0; i < num_elements; i++) {
struct vrend_vertex_element *ve = &v->elements[i];
if (util_format_is_pure_integer(ve->base.src_format))
glVertexAttribIFormat(i, ve->nr_chan, ve->type, ve->base.src_offset);
else
glVertexAttribFormat(i, ve->nr_chan, ve->type, ve->norm, ve->base.src_offset);
glVertexAttribBinding(i, ve->base.vertex_buffer_index);
glVertexBindingDivisor(i, ve->base.instance_divisor);
glEnableVertexAttribArray(i);
}
}
ret_handle = vrend_renderer_object_insert(ctx, v, sizeof(struct vrend_vertex_element), handle,
VIRGL_OBJECT_VERTEX_ELEMENTS);
if (!ret_handle) {
FREE(v);
return ENOMEM;
}
return 0;
}
|
CWE-772
| 178,116 | 9,138 |
61621239061415559298502141117626230635
| null | null | null |
openssl
|
55d83bf7c10c7b205fffa23fa7c3977491e56c07
| 1 |
int MDC2_Update(MDC2_CTX *c, const unsigned char *in, size_t len)
{
size_t i, j;
i = c->num;
if (i != 0) {
if (i + len < MDC2_BLOCK) {
/* partial block */
memcpy(&(c->data[i]), in, len);
c->num += (int)len;
return 1;
} else {
/* filled one */
j = MDC2_BLOCK - i;
memcpy(&(c->data[i]), in, j);
len -= j;
in += j;
c->num = 0;
mdc2_body(c, &(c->data[0]), MDC2_BLOCK);
}
}
i = len & ~((size_t)MDC2_BLOCK - 1);
if (i > 0)
mdc2_body(c, in, i);
j = len - i;
if (j > 0) {
memcpy(&(c->data[0]), &(in[i]), j);
c->num = (int)j;
}
return 1;
}
|
CWE-787
| 178,137 | 9,139 |
66671043126099491722722126650489150471
| null | null | null |
savannah
|
1fbee57ef3c72db2206dd87e4162108b2f425555
| 1 |
stringprep_utf8_nfkc_normalize (const char *str, ssize_t len)
{
return g_utf8_normalize (str, len, G_NORMALIZE_NFKC);
}
|
CWE-125
| 178,156 | 9,140 |
297537804287697693483081230557742789206
| null | null | null |
gnupg
|
243d12fdec66a4360fbb3e307a046b39b5b4ffc3
| 1 |
append_utf8_value (const unsigned char *value, size_t length,
struct stringbuf *sb)
{
unsigned char tmp[6];
const unsigned char *s;
size_t n;
int i, nmore;
if (length && (*value == ' ' || *value == '#'))
{
tmp[0] = '\\';
tmp[1] = *value;
put_stringbuf_mem (sb, tmp, 2);
value++;
length--;
}
if (length && value[length-1] == ' ')
{
tmp[0] = '\\';
tmp[1] = ' ';
put_stringbuf_mem (sb, tmp, 2);
length--;
}
/* FIXME: check that the invalid encoding handling is correct */
for (s=value, n=0;;)
{
for (value = s; n < length && !(*s & 0x80); n++, s++)
for (value = s; n < length && !(*s & 0x80); n++, s++)
;
append_quoted (sb, value, s-value, 0);
if (n==length)
return; /* ready */
assert ((*s & 0x80));
if ( (*s & 0xe0) == 0xc0 ) /* 110x xxxx */
nmore = 1;
else if ( (*s & 0xf0) == 0xe0 ) /* 1110 xxxx */
nmore = 2;
else if ( (*s & 0xf8) == 0xf0 ) /* 1111 0xxx */
nmore = 3;
else if ( (*s & 0xfc) == 0xf8 ) /* 1111 10xx */
nmore = 4;
else if ( (*s & 0xfe) == 0xfc ) /* 1111 110x */
nmore = 5;
else /* invalid encoding */
nmore = 5; /* we will reduce the check length anyway */
if (n+nmore > length)
nmore = length - n; /* oops, encoding to short */
tmp[0] = *s++; n++;
for (i=1; i <= nmore; i++)
{
if ( (*s & 0xc0) != 0x80)
break; /* invalid encoding - stop */
tmp[i] = *s++;
n++;
}
put_stringbuf_mem (sb, tmp, i);
}
}
|
CWE-119
| 178,222 | 9,143 |
258091812889344872506242785611530851861
| null | null | null |
mindrot
|
2fecfd486bdba9f51b3a789277bb0733ca36e1c0
| 1 |
ssh_packet_read_poll2(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
{
struct session_state *state = ssh->state;
u_int padlen, need;
u_char *cp, macbuf[SSH_DIGEST_MAX_LENGTH];
u_int maclen, aadlen = 0, authlen = 0, block_size;
struct sshenc *enc = NULL;
struct sshmac *mac = NULL;
struct sshcomp *comp = NULL;
int r;
*typep = SSH_MSG_NONE;
if (state->packet_discard)
return 0;
if (state->newkeys[MODE_IN] != NULL) {
enc = &state->newkeys[MODE_IN]->enc;
mac = &state->newkeys[MODE_IN]->mac;
comp = &state->newkeys[MODE_IN]->comp;
/* disable mac for authenticated encryption */
if ((authlen = cipher_authlen(enc->cipher)) != 0)
mac = NULL;
}
maclen = mac && mac->enabled ? mac->mac_len : 0;
block_size = enc ? enc->block_size : 8;
aadlen = (mac && mac->enabled && mac->etm) || authlen ? 4 : 0;
if (aadlen && state->packlen == 0) {
if (cipher_get_length(&state->receive_context,
&state->packlen, state->p_read.seqnr,
sshbuf_ptr(state->input), sshbuf_len(state->input)) != 0)
return 0;
if (state->packlen < 1 + 4 ||
state->packlen > PACKET_MAX_SIZE) {
#ifdef PACKET_DEBUG
sshbuf_dump(state->input, stderr);
#endif
logit("Bad packet length %u.", state->packlen);
if ((r = sshpkt_disconnect(ssh, "Packet corrupt")) != 0)
return r;
}
sshbuf_reset(state->incoming_packet);
} else if (state->packlen == 0) {
* check if input size is less than the cipher block size,
* decrypt first block and extract length of incoming packet
*/
if (sshbuf_len(state->input) < block_size)
return 0;
sshbuf_reset(state->incoming_packet);
if ((r = sshbuf_reserve(state->incoming_packet, block_size,
&cp)) != 0)
goto out;
if ((r = cipher_crypt(&state->receive_context,
state->p_send.seqnr, cp, sshbuf_ptr(state->input),
block_size, 0, 0)) != 0)
goto out;
state->packlen = PEEK_U32(sshbuf_ptr(state->incoming_packet));
if (state->packlen < 1 + 4 ||
state->packlen > PACKET_MAX_SIZE) {
#ifdef PACKET_DEBUG
fprintf(stderr, "input: \n");
sshbuf_dump(state->input, stderr);
fprintf(stderr, "incoming_packet: \n");
sshbuf_dump(state->incoming_packet, stderr);
#endif
logit("Bad packet length %u.", state->packlen);
return ssh_packet_start_discard(ssh, enc, mac,
state->packlen, PACKET_MAX_SIZE);
}
if ((r = sshbuf_consume(state->input, block_size)) != 0)
goto out;
}
DBG(debug("input: packet len %u", state->packlen+4));
if (aadlen) {
/* only the payload is encrypted */
need = state->packlen;
} else {
/*
* the payload size and the payload are encrypted, but we
* have a partial packet of block_size bytes
*/
need = 4 + state->packlen - block_size;
}
DBG(debug("partial packet: block %d, need %d, maclen %d, authlen %d,"
" aadlen %d", block_size, need, maclen, authlen, aadlen));
if (need % block_size != 0) {
logit("padding error: need %d block %d mod %d",
need, block_size, need % block_size);
return ssh_packet_start_discard(ssh, enc, mac,
state->packlen, PACKET_MAX_SIZE - block_size);
}
/*
* check if the entire packet has been received and
* decrypt into incoming_packet:
* 'aadlen' bytes are unencrypted, but authenticated.
* 'need' bytes are encrypted, followed by either
* 'authlen' bytes of authentication tag or
* 'maclen' bytes of message authentication code.
*/
if (sshbuf_len(state->input) < aadlen + need + authlen + maclen)
return 0;
#ifdef PACKET_DEBUG
fprintf(stderr, "read_poll enc/full: ");
sshbuf_dump(state->input, stderr);
#endif
/* EtM: compute mac over encrypted input */
if (mac && mac->enabled && mac->etm) {
if ((r = mac_compute(mac, state->p_read.seqnr,
sshbuf_ptr(state->input), aadlen + need,
macbuf, sizeof(macbuf))) != 0)
goto out;
}
if ((r = sshbuf_reserve(state->incoming_packet, aadlen + need,
&cp)) != 0)
goto out;
if ((r = cipher_crypt(&state->receive_context, state->p_read.seqnr, cp,
sshbuf_ptr(state->input), need, aadlen, authlen)) != 0)
goto out;
if ((r = sshbuf_consume(state->input, aadlen + need + authlen)) != 0)
goto out;
/*
* compute MAC over seqnr and packet,
* increment sequence number for incoming packet
*/
if (mac && mac->enabled) {
if (!mac->etm)
if ((r = mac_compute(mac, state->p_read.seqnr,
sshbuf_ptr(state->incoming_packet),
sshbuf_len(state->incoming_packet),
macbuf, sizeof(macbuf))) != 0)
goto out;
if (timingsafe_bcmp(macbuf, sshbuf_ptr(state->input),
mac->mac_len) != 0) {
logit("Corrupted MAC on input.");
if (need > PACKET_MAX_SIZE)
return SSH_ERR_INTERNAL_ERROR;
return ssh_packet_start_discard(ssh, enc, mac,
state->packlen, PACKET_MAX_SIZE - need);
}
DBG(debug("MAC #%d ok", state->p_read.seqnr));
if ((r = sshbuf_consume(state->input, mac->mac_len)) != 0)
goto out;
}
if (seqnr_p != NULL)
*seqnr_p = state->p_read.seqnr;
if (++state->p_read.seqnr == 0)
logit("incoming seqnr wraps around");
if (++state->p_read.packets == 0)
if (!(ssh->compat & SSH_BUG_NOREKEY))
return SSH_ERR_NEED_REKEY;
state->p_read.blocks += (state->packlen + 4) / block_size;
state->p_read.bytes += state->packlen + 4;
/* get padlen */
padlen = sshbuf_ptr(state->incoming_packet)[4];
DBG(debug("input: padlen %d", padlen));
if (padlen < 4) {
if ((r = sshpkt_disconnect(ssh,
"Corrupted padlen %d on input.", padlen)) != 0 ||
(r = ssh_packet_write_wait(ssh)) != 0)
return r;
return SSH_ERR_CONN_CORRUPT;
}
/* skip packet size + padlen, discard padding */
if ((r = sshbuf_consume(state->incoming_packet, 4 + 1)) != 0 ||
((r = sshbuf_consume_end(state->incoming_packet, padlen)) != 0))
goto out;
DBG(debug("input: len before de-compress %zd",
sshbuf_len(state->incoming_packet)));
if (comp && comp->enabled) {
sshbuf_reset(state->compression_buffer);
if ((r = uncompress_buffer(ssh, state->incoming_packet,
state->compression_buffer)) != 0)
goto out;
sshbuf_reset(state->incoming_packet);
if ((r = sshbuf_putb(state->incoming_packet,
state->compression_buffer)) != 0)
goto out;
DBG(debug("input: len after de-compress %zd",
sshbuf_len(state->incoming_packet)));
}
/*
* get packet type, implies consume.
* return length of payload (without type field)
*/
if ((r = sshbuf_get_u8(state->incoming_packet, typep)) != 0)
goto out;
if (*typep < SSH2_MSG_MIN || *typep >= SSH2_MSG_LOCAL_MIN) {
if ((r = sshpkt_disconnect(ssh,
"Invalid ssh2 packet type: %d", *typep)) != 0 ||
(r = ssh_packet_write_wait(ssh)) != 0)
return r;
return SSH_ERR_PROTOCOL_ERROR;
}
if (*typep == SSH2_MSG_NEWKEYS)
r = ssh_set_newkeys(ssh, MODE_IN);
else if (*typep == SSH2_MSG_USERAUTH_SUCCESS && !state->server_side)
r = ssh_packet_enable_delayed_compress(ssh);
else
r = 0;
#ifdef PACKET_DEBUG
fprintf(stderr, "read/plain[%d]:\r\n", *typep);
sshbuf_dump(state->incoming_packet, stderr);
#endif
/* reset for next packet */
state->packlen = 0;
out:
return r;
}
|
CWE-119
| 178,391 | 9,155 |
186648279900332201351185841299560074263
| null | null | null |
savannah
|
beecf80a6deecbaf5d264d4f864451bde4fe98b8
| 1 |
cff_parser_run( CFF_Parser parser,
FT_Byte* start,
FT_Byte* limit )
{
FT_Byte* p = start;
FT_Error error = FT_Err_Ok;
FT_Library library = parser->library;
FT_UNUSED( library );
parser->top = parser->stack;
parser->start = start;
parser->limit = limit;
parser->cursor = start;
while ( p < limit )
{
FT_UInt v = *p;
/* Opcode 31 is legacy MM T2 operator, not a number. */
/* Opcode 255 is reserved and should not appear in fonts; */
/* it is used internally for CFF2 blends. */
if ( v >= 27 && v != 31 && v != 255 )
{
/* it's a number; we will push its position on the stack */
if ( (FT_UInt)( parser->top - parser->stack ) >= parser->stackSize )
goto Stack_Overflow;
*parser->top++ = p;
/* now, skip it */
if ( v == 30 )
{
/* skip real number */
p++;
for (;;)
{
/* An unterminated floating point number at the */
/* end of a dictionary is invalid but harmless. */
if ( p >= limit )
goto Exit;
v = p[0] >> 4;
if ( v == 15 )
break;
v = p[0] & 0xF;
if ( v == 15 )
break;
p++;
}
}
else if ( v == 28 )
p += 2;
else if ( v == 29 )
p += 4;
else if ( v > 246 )
p += 1;
}
#ifdef CFF_CONFIG_OPTION_OLD_ENGINE
else if ( v == 31 )
{
/* a Type 2 charstring */
CFF_Decoder decoder;
CFF_FontRec cff_rec;
FT_Byte* charstring_base;
FT_ULong charstring_len;
FT_Fixed* stack;
FT_Byte* q;
charstring_base = ++p;
/* search `endchar' operator */
for (;;)
{
if ( p >= limit )
goto Exit;
if ( *p == 14 )
break;
p++;
}
charstring_len = (FT_ULong)( p - charstring_base ) + 1;
/* construct CFF_Decoder object */
FT_ZERO( &decoder );
FT_ZERO( &cff_rec );
cff_rec.top_font.font_dict.num_designs = parser->num_designs;
cff_rec.top_font.font_dict.num_axes = parser->num_axes;
decoder.cff = &cff_rec;
error = cff_decoder_parse_charstrings( &decoder,
charstring_base,
charstring_len,
1 );
/* Now copy the stack data in the temporary decoder object, */
/* converting it back to charstring number representations */
/* (this is ugly, I know). */
/* */
/* We overwrite the original top DICT charstring under the */
/* assumption that the charstring representation of the result */
/* of `cff_decoder_parse_charstrings' is shorter, which should */
/* be always true. */
q = charstring_base - 1;
stack = decoder.stack;
while ( stack < decoder.top )
{
FT_ULong num;
FT_Bool neg;
if ( (FT_UInt)( parser->top - parser->stack ) >= parser->stackSize )
goto Stack_Overflow;
*parser->top++ = q;
if ( *stack < 0 )
{
num = (FT_ULong)-*stack;
neg = 1;
}
else
{
num = (FT_ULong)*stack;
neg = 0;
}
if ( num & 0xFFFFU )
{
if ( neg )
num = (FT_ULong)-num;
*q++ = 255;
*q++ = ( num & 0xFF000000U ) >> 24;
*q++ = ( num & 0x00FF0000U ) >> 16;
*q++ = ( num & 0x0000FF00U ) >> 8;
*q++ = num & 0x000000FFU;
}
else
{
num >>= 16;
if ( neg )
{
if ( num <= 107 )
*q++ = (FT_Byte)( 139 - num );
else if ( num <= 1131 )
{
*q++ = (FT_Byte)( ( ( num - 108 ) >> 8 ) + 251 );
*q++ = (FT_Byte)( ( num - 108 ) & 0xFF );
}
else
{
num = (FT_ULong)-num;
*q++ = 28;
*q++ = (FT_Byte)( num >> 8 );
*q++ = (FT_Byte)( num & 0xFF );
}
}
else
{
if ( num <= 107 )
*q++ = (FT_Byte)( num + 139 );
else if ( num <= 1131 )
{
*q++ = (FT_Byte)( ( ( num - 108 ) >> 8 ) + 247 );
*q++ = (FT_Byte)( ( num - 108 ) & 0xFF );
}
else
{
*q++ = 28;
*q++ = (FT_Byte)( num >> 8 );
*q++ = (FT_Byte)( num & 0xFF );
}
}
}
stack++;
}
}
#endif /* CFF_CONFIG_OPTION_OLD_ENGINE */
else
{
/* This is not a number, hence it's an operator. Compute its code */
/* and look for it in our current list. */
FT_UInt code;
FT_UInt num_args = (FT_UInt)
( parser->top - parser->stack );
const CFF_Field_Handler* field;
*parser->top = p;
code = v;
if ( v == 12 )
{
/* two byte operator */
code = 0x100 | p[0];
}
code = code | parser->object_code;
for ( field = CFF_FIELD_HANDLERS_GET; field->kind; field++ )
{
if ( field->code == (FT_Int)code )
{
/* we found our field's handler; read it */
FT_Long val;
FT_Byte* q = (FT_Byte*)parser->object + field->offset;
#ifdef FT_DEBUG_LEVEL_TRACE
FT_TRACE4(( " %s", field->id ));
#endif
/* check that we have enough arguments -- except for */
/* delta encoded arrays, which can be empty */
if ( field->kind != cff_kind_delta && num_args < 1 )
goto Stack_Underflow;
switch ( field->kind )
{
case cff_kind_bool:
case cff_kind_string:
case cff_kind_num:
val = cff_parse_num( parser, parser->stack );
goto Store_Number;
case cff_kind_fixed:
val = cff_parse_fixed( parser, parser->stack );
goto Store_Number;
case cff_kind_fixed_thousand:
val = cff_parse_fixed_scaled( parser, parser->stack, 3 );
Store_Number:
switch ( field->size )
{
case (8 / FT_CHAR_BIT):
*(FT_Byte*)q = (FT_Byte)val;
break;
case (16 / FT_CHAR_BIT):
*(FT_Short*)q = (FT_Short)val;
break;
case (32 / FT_CHAR_BIT):
*(FT_Int32*)q = (FT_Int)val;
break;
default: /* for 64-bit systems */
*(FT_Long*)q = val;
}
#ifdef FT_DEBUG_LEVEL_TRACE
switch ( field->kind )
{
case cff_kind_bool:
FT_TRACE4(( " %s\n", val ? "true" : "false" ));
break;
case cff_kind_string:
FT_TRACE4(( " %ld (SID)\n", val ));
break;
case cff_kind_num:
FT_TRACE4(( " %ld\n", val ));
break;
case cff_kind_fixed:
FT_TRACE4(( " %f\n", (double)val / 65536 ));
break;
case cff_kind_fixed_thousand:
FT_TRACE4(( " %f\n", (double)val / 65536 / 1000 ));
default:
; /* never reached */
}
#endif
break;
case cff_kind_delta:
{
FT_Byte* qcount = (FT_Byte*)parser->object +
field->count_offset;
FT_Byte** data = parser->stack;
if ( num_args > field->array_max )
num_args = field->array_max;
FT_TRACE4(( " [" ));
/* store count */
*qcount = (FT_Byte)num_args;
val = 0;
while ( num_args > 0 )
{
val += cff_parse_num( parser, data++ );
switch ( field->size )
{
case (8 / FT_CHAR_BIT):
*(FT_Byte*)q = (FT_Byte)val;
break;
case (16 / FT_CHAR_BIT):
*(FT_Short*)q = (FT_Short)val;
break;
case (32 / FT_CHAR_BIT):
*(FT_Int32*)q = (FT_Int)val;
break;
default: /* for 64-bit systems */
*(FT_Long*)q = val;
}
FT_TRACE4(( " %ld", val ));
q += field->size;
num_args--;
}
FT_TRACE4(( "]\n" ));
}
break;
default: /* callback or blend */
error = field->reader( parser );
if ( error )
goto Exit;
}
goto Found;
}
}
/* this is an unknown operator, or it is unsupported; */
/* we will ignore it for now. */
Found:
/* clear stack */
/* TODO: could clear blend stack here, */
/* but we don't have access to subFont */
if ( field->kind != cff_kind_blend )
parser->top = parser->stack;
}
p++;
}
Exit:
return error;
Stack_Overflow:
error = FT_THROW( Invalid_Argument );
goto Exit;
Stack_Underflow:
error = FT_THROW( Invalid_Argument );
goto Exit;
Syntax_Error:
error = FT_THROW( Invalid_Argument );
goto Exit;
}
|
CWE-787
| 178,406 | 9,157 |
204522571381877294602273773439469707243
| null | null | null |
ghostscript
|
77ab465f1c394bb77f00966cd950650f3f53cb24
| 1 |
static void jsR_calllwfunction(js_State *J, int n, js_Function *F, js_Environment *scope)
{
js_Value v;
int i;
jsR_savescope(J, scope);
if (n > F->numparams) {
js_pop(J, F->numparams - n);
n = F->numparams;
}
for (i = n; i < F->varlen; ++i)
js_pushundefined(J);
jsR_run(J, F);
v = *stackidx(J, -1);
TOP = --BOT; /* clear stack */
js_pushvalue(J, v);
jsR_restorescope(J);
}
|
CWE-119
| 178,412 | 9,158 |
298221118055225794854445878360019454714
| null | null | null |
libav
|
2d1c0dea5f6b91bec7f5fa53ec050913d851e366
| 1 |
static int dv_extract_audio(uint8_t* frame, uint8_t* ppcm[4],
const DVprofile *sys)
{
int size, chan, i, j, d, of, smpls, freq, quant, half_ch;
uint16_t lc, rc;
const uint8_t* as_pack;
uint8_t *pcm, ipcm;
as_pack = dv_extract_pack(frame, dv_audio_source);
if (!as_pack) /* No audio ? */
return 0;
smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */
freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48kHz, 1 - 44,1kHz, 2 - 32kHz */
quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */
if (quant > 1)
return -1; /* unsupported quantization */
size = (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */
half_ch = sys->difseg_size / 2;
/* We work with 720p frames split in half, thus even frames have
* channels 0,1 and odd 2,3. */
ipcm = (sys->height == 720 && !(frame[1] & 0x0C)) ? 2 : 0;
pcm = ppcm[ipcm++];
/* for each DIF channel */
for (chan = 0; chan < sys->n_difchan; chan++) {
/* for each DIF segment */
for (i = 0; i < sys->difseg_size; i++) {
frame += 6 * 80; /* skip DIF segment header */
break;
}
/* for each AV sequence */
for (j = 0; j < 9; j++) {
for (d = 8; d < 80; d += 2) {
if (quant == 0) { /* 16bit quantization */
of = sys->audio_shuffle[i][j] + (d - 8) / 2 * sys->audio_stride;
if (of*2 >= size)
continue;
pcm[of*2] = frame[d+1]; // FIXME: maybe we have to admit
pcm[of*2+1] = frame[d]; // that DV is a big-endian PCM
if (pcm[of*2+1] == 0x80 && pcm[of*2] == 0x00)
pcm[of*2+1] = 0;
} else { /* 12bit quantization */
lc = ((uint16_t)frame[d] << 4) |
((uint16_t)frame[d+2] >> 4);
rc = ((uint16_t)frame[d+1] << 4) |
((uint16_t)frame[d+2] & 0x0f);
lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc));
rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc));
of = sys->audio_shuffle[i%half_ch][j] + (d - 8) / 3 * sys->audio_stride;
if (of*2 >= size)
continue;
pcm[of*2] = lc & 0xff; // FIXME: maybe we have to admit
pcm[of*2+1] = lc >> 8; // that DV is a big-endian PCM
of = sys->audio_shuffle[i%half_ch+half_ch][j] +
(d - 8) / 3 * sys->audio_stride;
pcm[of*2] = rc & 0xff; // FIXME: maybe we have to admit
pcm[of*2+1] = rc >> 8; // that DV is a big-endian PCM
++d;
}
}
frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
}
}
frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
}
|
CWE-20
| 178,414 | 9,159 |
218089898526816259208409888938234249356
| null | null | null |
xserver
|
6c69235a9dfc52e4b4e47630ff4bab1a820eb543
| 1 |
int __glXDispSwap_CreateContext(__GLXclientState *cl, GLbyte *pc)
{
xGLXCreateContextReq *req = (xGLXCreateContextReq *) pc;
__GLX_DECLARE_SWAP_VARIABLES;
__GLX_SWAP_SHORT(&req->length);
__GLX_SWAP_INT(&req->context);
__GLX_SWAP_INT(&req->visual);
return __glXDisp_CreateContext(cl, pc);
}
|
CWE-20
| 178,441 | 9,163 |
339483667857637655861912070254190836266
| null | null | null |
openssl
|
d3152655d5319ce883c8e3ac4b99f8de4c59d846
| 1 |
dtls1_get_message_fragment(SSL *s, int st1, int stn, long max, int *ok)
{
unsigned char wire[DTLS1_HM_HEADER_LENGTH];
unsigned long len, frag_off, frag_len;
int i,al;
struct hm_header_st msg_hdr;
/* see if we have the required fragment already */
if ((frag_len = dtls1_retrieve_buffered_fragment(s,max,ok)) || *ok)
{
return frag_len;
}
/* read handshake message header */
i=s->method->ssl_read_bytes(s,SSL3_RT_HANDSHAKE,wire,
DTLS1_HM_HEADER_LENGTH, 0);
if (i <= 0) /* nbio, or an error */
{
s->rwstate=SSL_READING;
*ok = 0;
return i;
}
/* Handshake fails if message header is incomplete */
if (i != DTLS1_HM_HEADER_LENGTH)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_DTLS1_GET_MESSAGE_FRAGMENT,SSL_R_UNEXPECTED_MESSAGE);
goto f_err;
}
/* parse the message fragment header */
dtls1_get_message_header(wire, &msg_hdr);
/*
* if this is a future (or stale) message it gets buffered
* (or dropped)--no further processing at this time
* While listening, we accept seq 1 (ClientHello with cookie)
* although we're still expecting seq 0 (ClientHello)
*/
if (msg_hdr.seq != s->d1->handshake_read_seq && !(s->d1->listen && msg_hdr.seq == 1))
return dtls1_process_out_of_seq_message(s, &msg_hdr, ok);
len = msg_hdr.msg_len;
frag_off = msg_hdr.frag_off;
frag_len = msg_hdr.frag_len;
if (frag_len && frag_len < len)
return dtls1_reassemble_fragment(s, &msg_hdr, ok);
if (!s->server && s->d1->r_msg_hdr.frag_off == 0 &&
wire[0] == SSL3_MT_HELLO_REQUEST)
{
/* The server may always send 'Hello Request' messages --
* we are doing a handshake anyway now, so ignore them
* if their format is correct. Does not count for
* 'Finished' MAC. */
if (wire[1] == 0 && wire[2] == 0 && wire[3] == 0)
{
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_HANDSHAKE,
wire, DTLS1_HM_HEADER_LENGTH, s,
s->msg_callback_arg);
s->msg_callback_arg);
s->init_num = 0;
return dtls1_get_message_fragment(s, st1, stn,
max, ok);
}
else /* Incorrectly formated Hello request */
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_DTLS1_GET_MESSAGE_FRAGMENT,SSL_R_UNEXPECTED_MESSAGE);
goto f_err;
}
}
if ((al=dtls1_preprocess_fragment(s,&msg_hdr,max)))
goto f_err;
/* XDTLS: ressurect this when restart is in place */
s->state=stn;
if ( frag_len > 0)
{
unsigned char *p=(unsigned char *)s->init_buf->data+DTLS1_HM_HEADER_LENGTH;
i=s->method->ssl_read_bytes(s,SSL3_RT_HANDSHAKE,
&p[frag_off],frag_len,0);
/* XDTLS: fix this--message fragments cannot span multiple packets */
if (i <= 0)
{
s->rwstate=SSL_READING;
*ok = 0;
return i;
}
}
else
i = 0;
/* XDTLS: an incorrectly formatted fragment should cause the
* handshake to fail */
if (i != (int)frag_len)
{
al=SSL3_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_DTLS1_GET_MESSAGE_FRAGMENT,SSL3_AD_ILLEGAL_PARAMETER);
goto f_err;
}
*ok = 1;
/* Note that s->init_num is *not* used as current offset in
* s->init_buf->data, but as a counter summing up fragments'
* lengths: as soon as they sum up to handshake packet
* length, we assume we have got all the fragments. */
s->init_num = frag_len;
return frag_len;
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
s->init_num = 0;
*ok=0;
return(-1);
}
|
CWE-399
| 178,454 | 9,164 |
111272718706111077678654256802983649039
| null | null | null |
openssl
|
2ac4c6f7b2b2af20c0e2b0ba05367e454cd11b33
| 1 |
static int asn1_d2i_ex_primitive(ASN1_VALUE **pval,
static int asn1_template_ex_d2i(ASN1_VALUE **pval,
const unsigned char **in, long len,
const ASN1_TEMPLATE *tt, char opt,
ASN1_TLC *ctx);
static int asn1_template_noexp_d2i(ASN1_VALUE **val,
const unsigned char **in, long len,
const ASN1_TEMPLATE *tt, char opt,
ASN1_TLC *ctx);
static int asn1_d2i_ex_primitive(ASN1_VALUE **pval,
const unsigned char **in, long len,
const ASN1_ITEM *it,
/* tags 4- 7 */
B_ASN1_OCTET_STRING, 0, 0, B_ASN1_UNKNOWN,
/* tags 8-11 */
B_ASN1_UNKNOWN, B_ASN1_UNKNOWN, B_ASN1_UNKNOWN, B_ASN1_UNKNOWN,
/* tags 12-15 */
B_ASN1_UTF8STRING, B_ASN1_UNKNOWN, B_ASN1_UNKNOWN, B_ASN1_UNKNOWN,
/* tags 16-19 */
B_ASN1_SEQUENCE, 0, B_ASN1_NUMERICSTRING, B_ASN1_PRINTABLESTRING,
/* tags 20-22 */
B_ASN1_T61STRING, B_ASN1_VIDEOTEXSTRING, B_ASN1_IA5STRING,
/* tags 23-24 */
B_ASN1_UTCTIME, B_ASN1_GENERALIZEDTIME,
/* tags 25-27 */
B_ASN1_GRAPHICSTRING, B_ASN1_ISO64STRING, B_ASN1_GENERALSTRING,
/* tags 28-31 */
B_ASN1_UNIVERSALSTRING, B_ASN1_UNKNOWN, B_ASN1_BMPSTRING, B_ASN1_UNKNOWN,
};
unsigned long ASN1_tag2bit(int tag)
{
|
CWE-400
| 178,498 | 9,165 |
235254389150224636486464924998474329028
| null | null | null |
linux
|
9ab4233dd08036fe34a89c7dc6f47a8bf2eb29eb
| 1 |
static long madvise_remove(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end)
{
loff_t offset;
int error;
*prev = NULL; /* tell sys_madvise we drop mmap_sem */
if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
return -EINVAL;
if (!vma->vm_file || !vma->vm_file->f_mapping
|| !vma->vm_file->f_mapping->host) {
return -EINVAL;
}
if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
return -EACCES;
offset = (loff_t)(start - vma->vm_start)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
/* filesystem's fallocate may need to take i_mutex */
up_read(¤t->mm->mmap_sem);
error = do_fallocate(vma->vm_file,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
offset, end - start);
down_read(¤t->mm->mmap_sem);
return error;
}
|
CWE-362
| 178,753 | 9,178 |
320481942482873424370261682146263307991
| null | null | null |
linux
|
44afb3a04391a74309d16180d1e4f8386fdfa745
| 1 |
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head objects;
struct eb_objects *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
u32 exec_start, exec_len;
u32 seqno;
u32 mask;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n");
return -EINVAL;
}
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
return ret;
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
ring = &dev_priv->ring[RCS];
break;
case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
DRM_DEBUG("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
DRM_DEBUG("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->ring[BCS];
break;
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4)
return -EINVAL;
if (INTEL_INFO(dev)->gen > 5 &&
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
return -EINVAL;
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev)->gen >= 6)
mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto pre_mutex_err;
}
if (copy_from_user(cliprects,
(struct drm_clip_rect __user *)(uintptr_t)
args->cliprects_ptr,
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto pre_mutex_err;
}
}
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto pre_mutex_err;
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
}
eb = eb_create(args->buffer_count);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
goto pre_mutex_err;
}
/* Look up object handles */
INIT_LIST_HEAD(&objects);
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
goto err;
}
if (!list_empty(&obj->exec_list)) {
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
}
list_add_tail(&obj->exec_list, &objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_reserve(ring, file, &objects);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
&objects, eb,
exec,
args->buffer_count);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
if (batch_obj->base.pending_write_domain) {
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
goto err;
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret)
goto err;
seqno = i915_gem_next_request_seqno(ring);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
if (seqno < ring->sync_seqno[i]) {
/* The GPU can not handle its semaphore value wrapping,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
ret = i915_gpu_idle(dev, true);
if (ret)
goto err;
BUG_ON(ring->sync_seqno[i]);
}
}
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
goto err;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit(ring, mask << 16 | mode);
intel_ring_advance(ring);
dev_priv->relative_constants_mode = mode;
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
if (ret)
goto err;
}
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto err;
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len);
if (ret)
goto err;
}
} else {
ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
if (ret)
goto err;
}
i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
eb_destroy(eb);
while (!list_empty(&objects)) {
struct drm_i915_gem_object *obj;
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
kfree(cliprects);
return ret;
}
|
CWE-189
| 178,768 | 9,180 |
74444851096954785728179020873301313744
| null | null | null |
linux
|
88d7d4e4a439f32acc56a6d860e415ee71d3df08
| 1 |
cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
struct nameidata *nd)
{
int xid;
int rc = 0; /* to get around spurious gcc warning, set to zero here */
__u32 oplock = enable_oplocks ? REQ_OPLOCK : 0;
__u16 fileHandle = 0;
bool posix_open = false;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
struct cifsFileInfo *cfile;
struct inode *newInode = NULL;
char *full_path = NULL;
struct file *filp;
xid = GetXid();
cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p",
parent_dir_inode, direntry->d_name.name, direntry);
/* check whether path exists */
cifs_sb = CIFS_SB(parent_dir_inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
FreeXid(xid);
return (struct dentry *)tlink;
}
pTcon = tlink_tcon(tlink);
/*
* Don't allow the separator character in a path component.
* The VFS will not allow "/", but "\" is allowed by posix.
*/
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
int i;
for (i = 0; i < direntry->d_name.len; i++)
if (direntry->d_name.name[i] == '\\') {
cFYI(1, "Invalid file name");
rc = -EINVAL;
goto lookup_out;
}
}
/*
* O_EXCL: optimize away the lookup, but don't hash the dentry. Let
* the VFS handle the create.
*/
if (nd && (nd->flags & LOOKUP_EXCL)) {
d_instantiate(direntry, NULL);
rc = 0;
goto lookup_out;
}
/* can not grab the rename sem here since it would
deadlock in the cases (beginning of sys_rename itself)
in which we already have the sb rename sem */
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto lookup_out;
}
if (direntry->d_inode != NULL) {
cFYI(1, "non-NULL inode in lookup");
} else {
cFYI(1, "NULL inode in lookup");
}
cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode);
/* Posix open is only called (at lookup time) for file create now.
* For opens (rather than creates), because we do not know if it
* is a file or directory yet, and current Samba no longer allows
* us to do posix open on dirs, we could end up wasting an open call
* on what turns out to be a dir. For file opens, we wait to call posix
* open till cifs_open. It could be added here (lookup) in the future
* but the performance tradeoff of the extra network request when EISDIR
* or EACCES is returned would have to be weighed against the 50%
* reduction in network traffic in the other paths.
*/
if (pTcon->unix_ext) {
if (nd && !(nd->flags & LOOKUP_DIRECTORY) &&
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
(nd->intent.open.file->f_flags & O_CREAT)) {
rc = cifs_posix_open(full_path, &newInode,
parent_dir_inode->i_sb,
nd->intent.open.create_mode,
nd->intent.open.file->f_flags, &oplock,
&fileHandle, xid);
/*
* The check below works around a bug in POSIX
* open in samba versions 3.3.1 and earlier where
* open could incorrectly fail with invalid parameter.
* If either that or op not supported returned, follow
* the normal lookup.
*/
if ((rc == 0) || (rc == -ENOENT))
posix_open = true;
else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
pTcon->broken_posix_open = true;
}
if (!posix_open)
rc = cifs_get_inode_info_unix(&newInode, full_path,
parent_dir_inode->i_sb, xid);
} else
rc = cifs_get_inode_info(&newInode, full_path, NULL,
parent_dir_inode->i_sb, xid, NULL);
if ((rc == 0) && (newInode != NULL)) {
d_add(direntry, newInode);
if (posix_open) {
filp = lookup_instantiate_filp(nd, direntry,
generic_file_open);
if (IS_ERR(filp)) {
rc = PTR_ERR(filp);
CIFSSMBClose(xid, pTcon, fileHandle);
goto lookup_out;
}
cfile = cifs_new_fileinfo(fileHandle, filp, tlink,
oplock);
if (cfile == NULL) {
fput(filp);
CIFSSMBClose(xid, pTcon, fileHandle);
rc = -ENOMEM;
goto lookup_out;
}
}
/* since paths are not looked up by component - the parent
directories are presumed to be good here */
renew_parental_timestamps(direntry);
} else if (rc == -ENOENT) {
rc = 0;
direntry->d_time = jiffies;
d_add(direntry, NULL);
/* if it was once a directory (but how can we tell?) we could do
shrink_dcache_parent(direntry); */
} else if (rc != -EACCES) {
cERROR(1, "Unexpected lookup error %d", rc);
/* We special case check for Access Denied - since that
is a common return code */
}
lookup_out:
kfree(full_path);
cifs_put_tlink(tlink);
FreeXid(xid);
return ERR_PTR(rc);
}
|
CWE-264
| 178,817 | 9,186 |
78921931124181501030937814275274450414
| null | null | null |
krb5
|
c5be6209311d4a8f10fda37d0d3f876c1b33b77b
| 1 |
check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple, char **passptr)
{
int i;
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
if (!(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
/* The 1.6 dummy password was the octets 1..255. */
for (i = 0; (unsigned char) password[i] == i + 1; i++);
if (password[i] != '\0' || i != 255)
return;
/* This will make the caller use a random password instead. */
*passptr = NULL;
}
| 178,818 | 9,187 |
224544473814035149780336818294961145372
| null | null | null |
|
linux
|
c2226fc9e87ba3da060e47333657cd6616652b84
| 1 |
static int em_syscall(struct x86_emulate_ctxt *ctxt)
{
struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
/* syscall is not available in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_EFER, &efer);
setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_STAR, &msr_data);
msr_data >>= 32;
cs_sel = (u16)(msr_data & 0xfffc);
ss_sel = (u16)(msr_data + 8);
if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
if (efer & EFER_LMA) {
#ifdef CONFIG_X86_64
ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
ops->get_msr(ctxt,
ctxt->mode == X86EMUL_MODE_PROT64 ?
MSR_LSTAR : MSR_CSTAR, &msr_data);
ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~(msr_data | EFLG_RF);
#endif
} else {
/* legacy mode */
ops->get_msr(ctxt, MSR_STAR, &msr_data);
ctxt->_eip = (u32)msr_data;
ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
}
return X86EMUL_CONTINUE;
}
| 178,826 | 9,188 |
38887675212651309593140099460610554167
| null | null | null |
|
linux
|
fa8b18edd752a8b4e9d1ee2cd615b82c93cf8bba
| 1 |
xfs_acl_from_disk(struct xfs_acl *aclp)
{
struct posix_acl_entry *acl_e;
struct posix_acl *acl;
struct xfs_acl_entry *ace;
int count, i;
count = be32_to_cpu(aclp->acl_cnt);
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
acl_e = &acl->a_entries[i];
ace = &aclp->acl_entry[i];
/*
* The tag is 32 bits on disk and 16 bits in core.
*
* Because every access to it goes through the core
* format first this is not a problem.
*/
acl_e->e_tag = be32_to_cpu(ace->ae_tag);
acl_e->e_perm = be16_to_cpu(ace->ae_perm);
switch (acl_e->e_tag) {
case ACL_USER:
case ACL_GROUP:
acl_e->e_id = be32_to_cpu(ace->ae_id);
break;
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
acl_e->e_id = ACL_UNDEFINED_ID;
break;
default:
goto fail;
}
}
return acl;
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
|
CWE-189
| 178,828 | 9,189 |
176969275131239170054727135174228140785
| null | null | null |
linux
|
76597cd31470fa130784c78fadb4dab2e624a723
| 1 |
static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
vma_stop(priv, vma);
if (priv->task)
put_task_struct(priv->task);
}
|
CWE-20
| 178,916 | 9,203 |
61888074922712538336413917366812196244
| null | null | null |
linux
|
9438fabb73eb48055b58b89fc51e0bc4db22fabd
| 1 |
int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
__u16 searchHandle, struct cifs_search_info *psrch_inf)
{
TRANSACTION2_FNEXT_REQ *pSMB = NULL;
TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
T2_FNEXT_RSP_PARMS *parms;
char *response_data;
int rc = 0;
int bytes_returned, name_len;
__u16 params, byte_count;
cFYI(1, "In FindNext");
if (psrch_inf->endOfSearch)
return -ENOENT;
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 14; /* includes 2 bytes of null string, converted to LE below*/
byte_count = 0;
pSMB->TotalDataCount = 0; /* no EAs */
pSMB->MaxParameterCount = cpu_to_le16(8);
pSMB->MaxDataCount =
cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) &
0xFFFFFF00);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(
offsetof(struct smb_com_transaction2_fnext_req,SearchHandle) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_NEXT);
pSMB->SearchHandle = searchHandle; /* always kept as le */
pSMB->SearchCount =
cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO));
pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
pSMB->ResumeKey = psrch_inf->resume_key;
pSMB->SearchFlags =
cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME);
name_len = psrch_inf->resume_name_len;
params += name_len;
if (name_len < PATH_MAX) {
memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len);
byte_count += name_len;
/* 14 byte parm len above enough for 2 byte null terminator */
pSMB->ResumeFileName[name_len] = 0;
pSMB->ResumeFileName[name_len+1] = 0;
} else {
rc = -EINVAL;
goto FNext2_err_exit;
}
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_fnext);
if (rc) {
if (rc == -EBADF) {
psrch_inf->endOfSearch = true;
cifs_buf_release(pSMB);
rc = 0; /* search probably was closed at end of search*/
} else
cFYI(1, "FindNext returned = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc == 0) {
unsigned int lnoff;
/* BB fixme add lock for file (srch_info) struct here */
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
psrch_inf->unicode = true;
else
psrch_inf->unicode = false;
response_data = (char *) &pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.ParameterOffset);
parms = (T2_FNEXT_RSP_PARMS *)response_data;
response_data = (char *)&pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.DataOffset);
if (psrch_inf->smallBuf)
cifs_small_buf_release(
psrch_inf->ntwrk_buf_start);
else
cifs_buf_release(psrch_inf->ntwrk_buf_start);
psrch_inf->srch_entries_start = response_data;
psrch_inf->ntwrk_buf_start = (char *)pSMB;
psrch_inf->smallBuf = 0;
if (parms->EndofSearch)
psrch_inf->endOfSearch = true;
else
psrch_inf->endOfSearch = false;
psrch_inf->entries_in_buffer =
le16_to_cpu(parms->SearchCount);
psrch_inf->index_of_last_entry +=
psrch_inf->entries_in_buffer;
lnoff = le16_to_cpu(parms->LastNameOffset);
if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE <
lnoff) {
cERROR(1, "ignoring corrupt resume name");
psrch_inf->last_entry = NULL;
return rc;
} else
psrch_inf->last_entry =
psrch_inf->srch_entries_start + lnoff;
/* cFYI(1, "fnxt2 entries in buf %d index_of_last %d",
psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry); */
/* BB fixme add unlock here */
}
}
/* BB On error, should we leave previous search buf (and count and
last entry fields) intact or free the previous one? */
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
FNext2_err_exit:
if (rc != 0)
cifs_buf_release(pSMB);
return rc;
}
|
CWE-189
| 178,931 | 9,204 |
168747422728345508248514807716181598995
| null | null | null |
linux
|
b5b515445f4f5a905c5dd27e6e682868ccd6c09d
| 1 |
static long pmcraid_ioctl_passthrough(
struct pmcraid_instance *pinstance,
unsigned int ioctl_cmd,
unsigned int buflen,
unsigned long arg
)
{
struct pmcraid_passthrough_ioctl_buffer *buffer;
struct pmcraid_ioarcb *ioarcb;
struct pmcraid_cmd *cmd;
struct pmcraid_cmd *cancel_cmd;
unsigned long request_buffer;
unsigned long request_offset;
unsigned long lock_flags;
void *ioasa;
u32 ioasc;
int request_size;
int buffer_size;
u8 access, direction;
int rc = 0;
/* If IOA reset is in progress, wait 10 secs for reset to complete */
if (pinstance->ioa_reset_in_progress) {
rc = wait_event_interruptible_timeout(
pinstance->reset_wait_q,
!pinstance->ioa_reset_in_progress,
msecs_to_jiffies(10000));
if (!rc)
return -ETIMEDOUT;
else if (rc < 0)
return -ERESTARTSYS;
}
/* If adapter is not in operational state, return error */
if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
pmcraid_err("IOA is not operational\n");
return -ENOTTY;
}
buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
pmcraid_err("no memory for passthrough buffer\n");
return -ENOMEM;
}
request_offset =
offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
request_buffer = arg + request_offset;
rc = __copy_from_user(buffer,
(struct pmcraid_passthrough_ioctl_buffer *) arg,
sizeof(struct pmcraid_passthrough_ioctl_buffer));
ioasa =
(void *)(arg +
offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
if (rc) {
pmcraid_err("ioctl: can't copy passthrough buffer\n");
rc = -EFAULT;
goto out_free_buffer;
}
request_size = buffer->ioarcb.data_transfer_length;
if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
access = VERIFY_READ;
direction = DMA_TO_DEVICE;
} else {
access = VERIFY_WRITE;
direction = DMA_FROM_DEVICE;
}
if (request_size > 0) {
rc = access_ok(access, arg, request_offset + request_size);
if (!rc) {
rc = -EFAULT;
goto out_free_buffer;
}
} else if (request_size < 0) {
rc = -EINVAL;
goto out_free_buffer;
}
/* check if we have any additional command parameters */
if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) {
rc = -EINVAL;
goto out_free_buffer;
}
cmd = pmcraid_get_free_cmd(pinstance);
if (!cmd) {
pmcraid_err("free command block is not available\n");
rc = -ENOMEM;
goto out_free_buffer;
}
cmd->scsi_cmd = NULL;
ioarcb = &(cmd->ioa_cb->ioarcb);
/* Copy the user-provided IOARCB stuff field by field */
ioarcb->resource_handle = buffer->ioarcb.resource_handle;
ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
ioarcb->request_type = buffer->ioarcb.request_type;
ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
if (buffer->ioarcb.add_cmd_param_length) {
ioarcb->add_cmd_param_length =
buffer->ioarcb.add_cmd_param_length;
ioarcb->add_cmd_param_offset =
buffer->ioarcb.add_cmd_param_offset;
memcpy(ioarcb->add_data.u.add_cmd_params,
buffer->ioarcb.add_data.u.add_cmd_params,
buffer->ioarcb.add_cmd_param_length);
}
/* set hrrq number where the IOA should respond to. Note that all cmds
* generated internally uses hrrq_id 0, exception to this is the cmd
* block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
* hrrq_id assigned here in queuecommand
*/
ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
pinstance->num_hrrq;
if (request_size) {
rc = pmcraid_build_passthrough_ioadls(cmd,
request_size,
direction);
if (rc) {
pmcraid_err("couldn't build passthrough ioadls\n");
goto out_free_buffer;
}
}
/* If data is being written into the device, copy the data from user
* buffers
*/
if (direction == DMA_TO_DEVICE && request_size > 0) {
rc = pmcraid_copy_sglist(cmd->sglist,
request_buffer,
request_size,
direction);
if (rc) {
pmcraid_err("failed to copy user buffer\n");
goto out_free_sglist;
}
}
/* passthrough ioctl is a blocking command so, put the user to sleep
* until timeout. Note that a timeout value of 0 means, do timeout.
*/
cmd->cmd_done = pmcraid_internal_done;
init_completion(&cmd->wait_for_completion);
cmd->completion_req = 1;
pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
_pmcraid_fire_command(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
/* NOTE ! Remove the below line once abort_task is implemented
* in firmware. This line disables ioctl command timeout handling logic
* similar to IO command timeout handling, making ioctl commands to wait
* until the command completion regardless of timeout value specified in
* ioarcb
*/
buffer->ioarcb.cmd_timeout = 0;
/* If command timeout is specified put caller to wait till that time,
* otherwise it would be blocking wait. If command gets timed out, it
* will be aborted.
*/
if (buffer->ioarcb.cmd_timeout == 0) {
wait_for_completion(&cmd->wait_for_completion);
} else if (!wait_for_completion_timeout(
&cmd->wait_for_completion,
msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) {
pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2),
cmd->ioa_cb->ioarcb.cdb[0]);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
cancel_cmd = pmcraid_abort_cmd(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
if (cancel_cmd) {
wait_for_completion(&cancel_cmd->wait_for_completion);
ioasc = cancel_cmd->ioa_cb->ioasa.ioasc;
pmcraid_return_cmd(cancel_cmd);
/* if abort task couldn't find the command i.e it got
* completed prior to aborting, return good completion.
* if command got aborted successfully or there was IOA
* reset due to abort task itself getting timedout then
* return -ETIMEDOUT
*/
if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
rc = -ETIMEDOUT;
goto out_handle_response;
}
}
/* no command block for abort task or abort task failed to abort
* the IOARCB, then wait for 150 more seconds and initiate reset
* sequence after timeout
*/
if (!wait_for_completion_timeout(
&cmd->wait_for_completion,
msecs_to_jiffies(150 * 1000))) {
pmcraid_reset_bringup(cmd->drv_inst);
rc = -ETIMEDOUT;
}
}
out_handle_response:
/* copy entire IOASA buffer and return IOCTL success.
* If copying IOASA to user-buffer fails, return
* EFAULT
*/
if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
sizeof(struct pmcraid_ioasa))) {
pmcraid_err("failed to copy ioasa buffer to user\n");
rc = -EFAULT;
}
/* If the data transfer was from device, copy the data onto user
* buffers
*/
else if (direction == DMA_FROM_DEVICE && request_size > 0) {
rc = pmcraid_copy_sglist(cmd->sglist,
request_buffer,
request_size,
direction);
if (rc) {
pmcraid_err("failed to copy user buffer\n");
rc = -EFAULT;
}
}
out_free_sglist:
pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
pmcraid_return_cmd(cmd);
out_free_buffer:
kfree(buffer);
return rc;
}
|
CWE-189
| 179,018 | 9,213 |
295586048330353887553455689305190810232
| null | null | null |
linux
|
fc66c5210ec2539e800e87d7b3a985323c7be96e
| 1 |
static inline void x86_assign_hw_event(struct perf_event *event,
struct cpu_hw_events *cpuc, int i)
{
struct hw_perf_event *hwc = &event->hw;
hwc->idx = cpuc->assign[i];
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0;
hwc->event_base = 0;
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0;
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
}
}
|
CWE-189
| 179,027 | 9,215 |
54222039831800633742006737798139001622
| null | null | null |
linux
|
4e78c724d47e2342aa8fde61f6b8536f662f795f
| 1 |
static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
struct path *dir, char *type, unsigned long flags)
{
struct path path;
struct file_system_type *fstype = NULL;
const char *requested_type = NULL;
const char *requested_dir_name = NULL;
const char *requested_dev_name = NULL;
struct tomoyo_path_info rtype;
struct tomoyo_path_info rdev;
struct tomoyo_path_info rdir;
int need_dev = 0;
int error = -ENOMEM;
/* Get fstype. */
requested_type = tomoyo_encode(type);
if (!requested_type)
goto out;
rtype.name = requested_type;
tomoyo_fill_path_info(&rtype);
/* Get mount point. */
requested_dir_name = tomoyo_realpath_from_path(dir);
if (!requested_dir_name) {
error = -ENOMEM;
goto out;
}
rdir.name = requested_dir_name;
tomoyo_fill_path_info(&rdir);
/* Compare fs name. */
if (!strcmp(type, TOMOYO_MOUNT_REMOUNT_KEYWORD)) {
/* dev_name is ignored. */
} else if (!strcmp(type, TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_SHARED_KEYWORD)) {
/* dev_name is ignored. */
} else if (!strcmp(type, TOMOYO_MOUNT_BIND_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MOVE_KEYWORD)) {
need_dev = -1; /* dev_name is a directory */
} else {
fstype = get_fs_type(type);
if (!fstype) {
error = -ENODEV;
goto out;
}
if (fstype->fs_flags & FS_REQUIRES_DEV)
/* dev_name is a block device file. */
need_dev = 1;
}
if (need_dev) {
/* Get mount point or device file. */
if (kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
error = -ENOENT;
goto out;
}
requested_dev_name = tomoyo_realpath_from_path(&path);
path_put(&path);
if (!requested_dev_name) {
error = -ENOENT;
goto out;
}
} else {
/* Map dev_name to "<NULL>" if no dev_name given. */
if (!dev_name)
dev_name = "<NULL>";
requested_dev_name = tomoyo_encode(dev_name);
if (!requested_dev_name) {
error = -ENOMEM;
goto out;
}
}
rdev.name = requested_dev_name;
tomoyo_fill_path_info(&rdev);
r->param_type = TOMOYO_TYPE_MOUNT_ACL;
r->param.mount.need_dev = need_dev;
r->param.mount.dev = &rdev;
r->param.mount.dir = &rdir;
r->param.mount.type = &rtype;
r->param.mount.flags = flags;
do {
tomoyo_check_acl(r, tomoyo_check_mount_acl);
error = tomoyo_audit_mount_log(r);
} while (error == TOMOYO_RETRY_REQUEST);
out:
kfree(requested_dev_name);
kfree(requested_dir_name);
if (fstype)
put_filesystem(fstype);
kfree(requested_type);
return error;
}
|
CWE-20
| 179,028 | 9,216 |
163792367432020116491478625673365826919
| null | null | null |
linux
|
982134ba62618c2d69fbbbd166d0a11ee3b7e3d8
| 1 |
static struct vm_area_struct *vma_to_resize(unsigned long addr,
unsigned long old_len, unsigned long new_len, unsigned long *p)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
goto Efault;
if (is_vm_hugetlb_page(vma))
goto Einval;
/* We can't remap across vm area boundaries */
if (old_len > vma->vm_end - addr)
goto Efault;
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
if (new_len > old_len)
goto Efault;
}
if (vma->vm_flags & VM_LOCKED) {
unsigned long locked, lock_limit;
locked = mm->locked_vm << PAGE_SHIFT;
lock_limit = rlimit(RLIMIT_MEMLOCK);
locked += new_len - old_len;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
goto Eagain;
}
if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
goto Enomem;
if (vma->vm_flags & VM_ACCOUNT) {
unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
if (security_vm_enough_memory(charged))
goto Efault;
*p = charged;
}
return vma;
Efault: /* very odd choice for most of the cases, but... */
return ERR_PTR(-EFAULT);
Einval:
return ERR_PTR(-EINVAL);
Enomem:
return ERR_PTR(-ENOMEM);
Eagain:
return ERR_PTR(-EAGAIN);
}
|
CWE-189
| 179,031 | 9,218 |
292133340135753070222274986926337714178
| null | null | null |
linux
|
1d1221f375c94ef961ba8574ac4f85c8870ddd51
| 1 |
static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
{
struct task_io_accounting acct = task->ioac;
unsigned long flags;
if (whole && lock_task_sighand(task, &flags)) {
struct task_struct *t = task;
task_io_accounting_add(&acct, &task->signal->ioac);
while_each_thread(task, t)
task_io_accounting_add(&acct, &t->ioac);
unlock_task_sighand(task, &flags);
}
return sprintf(buffer,
"rchar: %llu\n"
"wchar: %llu\n"
"syscr: %llu\n"
"syscw: %llu\n"
"read_bytes: %llu\n"
"write_bytes: %llu\n"
"cancelled_write_bytes: %llu\n",
(unsigned long long)acct.rchar,
(unsigned long long)acct.wchar,
(unsigned long long)acct.syscr,
(unsigned long long)acct.syscw,
(unsigned long long)acct.read_bytes,
(unsigned long long)acct.write_bytes,
(unsigned long long)acct.cancelled_write_bytes);
}
|
CWE-264
| 179,032 | 9,219 |
332054479918898635206192650620303320428
| null | null | null |
linux
|
1309d7afbed112f0e8e90be9af975550caa0076b
| 1 |
int tpm_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
struct tpm_chip *chip = NULL, *pos;
rcu_read_lock();
list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
if (pos->vendor.miscdev.minor == minor) {
chip = pos;
get_device(chip->dev);
break;
}
}
rcu_read_unlock();
if (!chip)
return -ENODEV;
if (test_and_set_bit(0, &chip->is_open)) {
dev_dbg(chip->dev, "Another process owns this TPM\n");
put_device(chip->dev);
return -EBUSY;
}
chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
if (chip->data_buffer == NULL) {
clear_bit(0, &chip->is_open);
put_device(chip->dev);
return -ENOMEM;
}
atomic_set(&chip->data_pending, 0);
file->private_data = chip;
return 0;
}
|
CWE-200
| 179,067 | 9,222 |
322550929471969998124965057697402903756
| null | null | null |
linux
|
c4c896e1471aec3b004a693c689f60be3b17ac86
| 1 |
static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
int len, err = 0;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case SCO_OPTIONS:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
opts.mtu = sco_pi(sk)->conn->mtu;
BT_DBG("mtu %d", opts.mtu);
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
break;
case SCO_CONNINFO:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
|
CWE-200
| 179,070 | 9,223 |
239002668337010972487303411545552736079
| null | null | null |
linux
|
ae7b4e1f213aa659aedf9c6ecad0bf5f0476e1e2
| 1 |
int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
{
struct fib6_node *fn, *pn = NULL;
int err = -ENOMEM;
int allow_create = 1;
int replace_required = 0;
if (info->nlh) {
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
allow_create = 0;
if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
replace_required = 1;
}
if (!allow_create && !replace_required)
pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
offsetof(struct rt6_info, rt6i_dst), allow_create,
replace_required);
if (IS_ERR(fn)) {
err = PTR_ERR(fn);
goto out;
}
pn = fn;
#ifdef CONFIG_IPV6_SUBTREES
if (rt->rt6i_src.plen) {
struct fib6_node *sn;
if (!fn->subtree) {
struct fib6_node *sfn;
/*
* Create subtree.
*
* fn[main tree]
* |
* sfn[subtree root]
* \
* sn[new leaf node]
*/
/* Create subtree root node */
sfn = node_alloc();
if (!sfn)
goto st_failure;
sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
sfn->fn_flags = RTN_ROOT;
sfn->fn_sernum = fib6_new_sernum();
/* Now add the first leaf node to new subtree */
sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);
if (IS_ERR(sn)) {
/* If it is failed, discard just allocated
root, and then (in st_failure) stale node
in main tree.
*/
node_free(sfn);
err = PTR_ERR(sn);
goto st_failure;
}
/* Now link new subtree to main tree */
sfn->parent = fn;
fn->subtree = sfn;
} else {
sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);
if (IS_ERR(sn)) {
err = PTR_ERR(sn);
goto st_failure;
}
}
if (!fn->leaf) {
fn->leaf = rt;
atomic_inc(&rt->rt6i_ref);
}
fn = sn;
}
#endif
err = fib6_add_rt2node(fn, rt, info);
if (!err) {
fib6_start_gc(info->nl_net, rt);
if (!(rt->rt6i_flags & RTF_CACHE))
fib6_prune_clones(info->nl_net, pn, rt);
}
out:
if (err) {
#ifdef CONFIG_IPV6_SUBTREES
/*
* If fib6_add_1 has cleared the old leaf pointer in the
* super-tree leaf node we have to find a new one for it.
*/
if (pn != fn && pn->leaf == rt) {
pn->leaf = NULL;
atomic_dec(&rt->rt6i_ref);
}
if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) {
pn->leaf = fib6_find_prefix(info->nl_net, pn);
#if RT6_DEBUG >= 2
if (!pn->leaf) {
WARN_ON(pn->leaf == NULL);
pn->leaf = info->nl_net->ipv6.ip6_null_entry;
}
#endif
atomic_inc(&pn->leaf->rt6i_ref);
}
#endif
dst_free(&rt->dst);
}
return err;
#ifdef CONFIG_IPV6_SUBTREES
/* Subtree creation failed, probably main tree node
is orphan. If it is, shoot it.
*/
st_failure:
if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
fib6_repair_tree(info->nl_net, fn);
dst_free(&rt->dst);
return err;
#endif
}
|
CWE-264
| 179,110 | 9,226 |
184447025253627893482253456982397708648
| null | null | null |
linux
|
12d6e7538e2d418c08f082b1b44ffa5fb7270ed8
| 1 |
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
int r;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
struct kvm_memslots *slots, *old_memslots;
r = check_memory_region_flags(mem);
if (r)
goto out;
r = -EINVAL;
/* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1))
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
/* We can read the guest memory with __xxx_user() later on. */
if (user_alloc &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok(VERIFY_WRITE,
(void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size)))
goto out;
if (mem->slot >= KVM_MEM_SLOTS_NUM)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
memslot = id_to_memslot(kvm->memslots, mem->slot);
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
r = -EINVAL;
if (npages > KVM_MEM_MAX_NR_PAGES)
goto out;
if (!npages)
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
new = old = *memslot;
new.id = mem->slot;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
/* Disallow changing a memory slot's size. */
r = -EINVAL;
if (npages && old.npages && npages != old.npages)
goto out_free;
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages)
continue;
if (!((base_gfn + npages <= s->base_gfn) ||
(base_gfn >= s->base_gfn + s->npages)))
goto out_free;
}
/* Free page dirty bitmap if unneeded */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
r = -ENOMEM;
/* Allocate if a slot is being created */
if (npages && !old.npages) {
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
if (kvm_arch_create_memslot(&new, npages))
goto out_free;
}
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
/* destroy any largepage mappings for dirty tracking */
}
if (!npages) {
struct kvm_memory_slot *slot;
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
slot = id_to_memslot(slots, mem->slot);
slot->flags |= KVM_MEMSLOT_INVALID;
update_memslots(slots, NULL);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
/* From this point no new shadow pages pointing to a deleted
* memslot will be created.
*
* validation of sp->gfn happens in:
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);
kfree(old_memslots);
}
r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
if (r)
goto out_free;
/* map/unmap the pages in iommu page table */
if (npages) {
r = kvm_iommu_map_pages(kvm, &new);
if (r)
goto out_free;
} else
kvm_iommu_unmap_pages(kvm, &old);
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) {
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
}
update_memslots(slots, &new);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
/*
* If the new memory slot is created, we need to clear all
* mmio sptes.
*/
if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
kvm_arch_flush_shadow_all(kvm);
kvm_free_physmem_slot(&old, &new);
kfree(old_memslots);
return 0;
out_free:
kvm_free_physmem_slot(&new, &old);
out:
return r;
}
|
CWE-399
| 179,127 | 9,227 |
121585265049784867174796811695571929882
| null | null | null |
linux
|
04bcef2a83f40c6db24222b27a52892cba39dffb
| 1 |
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
unsigned char arg[128];
int ret = 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (*len < get_arglen[GET_CMDID(cmd)]) {
pr_err("get_ctl: len %u < %u\n",
*len, get_arglen[GET_CMDID(cmd)]);
return -EINVAL;
}
if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0)
return -EFAULT;
if (mutex_lock_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
switch (cmd) {
case IP_VS_SO_GET_VERSION:
{
char buf[64];
sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE);
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
ret = -EFAULT;
goto out;
}
*len = strlen(buf)+1;
}
break;
case IP_VS_SO_GET_INFO:
{
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = IP_VS_CONN_TAB_SIZE;
info.num_services = ip_vs_num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_SERVICES:
{
struct ip_vs_get_services *get;
int size;
get = (struct ip_vs_get_services *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_service_entry) * get->num_services;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_service_entries(get, user);
}
break;
case IP_VS_SO_GET_SERVICE:
{
struct ip_vs_service_entry *entry;
struct ip_vs_service *svc;
union nf_inet_addr addr;
entry = (struct ip_vs_service_entry *)arg;
addr.ip = entry->addr;
if (entry->fwmark)
svc = __ip_vs_svc_fwm_get(AF_INET, entry->fwmark);
else
svc = __ip_vs_service_get(AF_INET, entry->protocol,
&addr, entry->port);
if (svc) {
ip_vs_copy_service(entry, svc);
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
ret = -EFAULT;
ip_vs_service_put(svc);
} else
ret = -ESRCH;
}
break;
case IP_VS_SO_GET_DESTS:
{
struct ip_vs_get_dests *get;
int size;
get = (struct ip_vs_get_dests *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_dest_entry) * get->num_dests;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_dest_entries(get, user);
}
break;
case IP_VS_SO_GET_TIMEOUT:
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(&t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_DAEMON:
{
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
d[0].syncid = ip_vs_master_syncid;
}
if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
d[1].syncid = ip_vs_backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
}
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
|
CWE-119
| 179,129 | 9,228 |
171209038462118155141573118053075919369
| null | null | null |
linux
|
201f99f170df14ba52ea4c52847779042b7a623b
| 1 |
static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
int tmp;
if (copy_from_user(buf, buffer, count))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0);
if ((*end != '\0') && !isspace(*end))
return -EINVAL;
uml_exitcode = tmp;
return count;
}
|
CWE-119
| 179,138 | 9,229 |
196296543578911643711491902380101464697
| null | null | null |
linux
|
6160968cee8b90a5dd95318d716e31d7775c4ef3
| 1 |
int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
{
struct cred *cred;
if (!(unshare_flags & CLONE_NEWUSER))
return 0;
cred = prepare_creds();
if (!cred)
return -ENOMEM;
*new_cred = cred;
return create_user_ns(cred);
}
|
CWE-399
| 179,185 | 9,233 |
11304749673525120782247143879561251219
| null | null | null |
linux
|
c8c499175f7d295ef867335bceb9a76a2c3cdc38
| 1 |
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sco_pinfo *pi = sco_pi(sk);
lock_sock(sk);
if (sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
hci_conn_accept(pi->conn->hcon, 0);
sk->sk_state = BT_CONFIG;
release_sock(sk);
return 0;
}
release_sock(sk);
return bt_sock_recvmsg(iocb, sock, msg, len, flags);
}
|
CWE-200
| 179,213 | 9,235 |
241725848163111905817351145335445375566
| null | null | null |
linux
|
72a763d805a48ac8c0bf48fdb510e84c12de51fe
| 1 |
static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
int err;
if (len > ds)
len = ds;
else if (len < ds)
msg->msg_flags |= MSG_TRUNC;
lock_sock(sk);
if (ctx->more) {
ctx->more = 0;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
if (err)
goto unlock;
}
err = memcpy_toiovec(msg->msg_iov, ctx->result, len);
unlock:
release_sock(sk);
return err ?: len;
}
|
CWE-200
| 179,218 | 9,236 |
223283461660244758234438857135365750372
| null | null | null |
linux
|
f1923820c447e986a9da0fc6bf60c1dccdf0408e
| 1 |
__init int intel_pmu_init(void)
{
union cpuid10_edx edx;
union cpuid10_eax eax;
union cpuid10_ebx ebx;
struct event_constraint *c;
unsigned int unused;
int version;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
case 0x6:
return p6_pmu_init();
case 0xb:
return knc_pmu_init();
case 0xf:
return p4_pmu_init();
}
return -ENODEV;
}
/*
* Check whether the Architectural PerfMon supports
* Branch Misses Retired hw_event or not.
*/
cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
return -ENODEV;
version = eax.split.version_id;
if (version < 2)
x86_pmu = core_pmu;
else
x86_pmu = intel_pmu;
x86_pmu.version = version;
x86_pmu.num_counters = eax.split.num_counters;
x86_pmu.cntval_bits = eax.split.bit_width;
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
x86_pmu.events_maskl = ebx.full;
x86_pmu.events_mask_len = eax.split.mask_length;
x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
*/
if (version > 1)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
/*
* v2 and above have a perf capabilities MSR
*/
if (version > 1) {
u64 capabilities;
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
x86_pmu.intel_cap.capabilities = capabilities;
}
intel_ds_init();
x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
/*
* Install the hw-cache-events table:
*/
switch (boot_cpu_data.x86_model) {
case 14: /* 65 nm core solo/duo, "Yonah" */
pr_cont("Core events, ");
break;
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
x86_add_quirk(intel_clovertown_quirk);
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
case 29: /* six-core 45 nm xeon "Dunnington" */
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
intel_pmu_lbr_init_core();
x86_pmu.event_constraints = intel_core2_event_constraints;
x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
pr_cont("Core2 events, ");
break;
case 26: /* 45 nm nehalem, "Bloomfield" */
case 30: /* 45 nm nehalem, "Lynnfield" */
case 46: /* 45 nm nehalem-ex, "Beckton" */
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_nehalem_event_constraints;
x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
x86_add_quirk(intel_nehalem_quirk);
pr_cont("Nehalem events, ");
break;
case 28: /* Atom */
case 38: /* Lincroft */
case 39: /* Penwell */
case 53: /* Cloverview */
case 54: /* Cedarview */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
intel_pmu_lbr_init_atom();
x86_pmu.event_constraints = intel_gen_event_constraints;
x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
pr_cont("Atom events, ");
break;
case 37: /* 32 nm nehalem, "Clarkdale" */
case 44: /* 32 nm nehalem, "Gulftown" */
case 47: /* 32 nm Xeon E7 */
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_westmere_event_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
x86_pmu.extra_regs = intel_westmere_extra_regs;
x86_pmu.er_flags |= ERF_HAS_RSP_1;
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
pr_cont("Westmere events, ");
break;
case 42: /* SandyBridge */
case 45: /* SandyBridge, "Romely-EP" */
x86_add_quirk(intel_sandybridge_quirk);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_snb();
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
/* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
pr_cont("SandyBridge events, ");
break;
case 58: /* IvyBridge */
case 62: /* IvyBridge EP */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_snb();
x86_pmu.event_constraints = intel_ivb_event_constraints;
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
pr_cont("IvyBridge events, ");
break;
default:
switch (x86_pmu.version) {
case 1:
x86_pmu.event_constraints = intel_v1_event_constraints;
pr_cont("generic architected perfmon v1, ");
break;
default:
/*
* default constraints for v2 and up
*/
x86_pmu.event_constraints = intel_gen_event_constraints;
pr_cont("generic architected perfmon, ");
break;
}
}
if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
}
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
}
x86_pmu.intel_ctrl |=
((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
if (x86_pmu.event_constraints) {
/*
* event on fixed counter2 (REF_CYCLES) only works on this
* counter, so do not extend mask to generic counters
*/
for_each_event_constraint(c, x86_pmu.event_constraints) {
if (c->cmask != X86_RAW_EVENT_MASK
|| c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
continue;
}
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
c->weight += x86_pmu.num_counters;
}
}
return 0;
}
|
CWE-20
| 179,253 | 9,241 |
168687725266084829810589765130961493013
| null | null | null |
linux
|
8176cced706b5e5d15887584150764894e94e02f
| 1 |
static int perf_swevent_init(struct perf_event *event)
{
int event_id = event->attr.config;
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (event_id) {
case PERF_COUNT_SW_CPU_CLOCK:
case PERF_COUNT_SW_TASK_CLOCK:
return -ENOENT;
default:
break;
}
if (event_id >= PERF_COUNT_SW_MAX)
return -ENOENT;
if (!event->parent) {
int err;
err = swevent_hlist_get(event);
if (err)
return err;
static_key_slow_inc(&perf_swevent_enabled[event_id]);
event->destroy = sw_perf_event_destroy;
}
return 0;
}
|
CWE-189
| 179,257 | 9,242 |
19870798287114216433645045562158663473
| null | null | null |
linux
|
5f00110f7273f9ff04ac69a5f85bb535a4fd0987
| 1 |
static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
struct shmem_sb_info config = *sbinfo;
unsigned long inodes;
int error = -EINVAL;
if (shmem_parse_options(data, &config, true))
return error;
spin_lock(&sbinfo->stat_lock);
inodes = sbinfo->max_inodes - sbinfo->free_inodes;
if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
goto out;
if (config.max_inodes < inodes)
goto out;
/*
* Those tests disallow limited->unlimited while any are in use;
* but we must separately disallow unlimited->limited, because
* in that case we have no record of how much is already in use.
*/
if (config.max_blocks && !sbinfo->max_blocks)
goto out;
if (config.max_inodes && !sbinfo->max_inodes)
goto out;
error = 0;
sbinfo->max_blocks = config.max_blocks;
sbinfo->max_inodes = config.max_inodes;
sbinfo->free_inodes = config.max_inodes - inodes;
mpol_put(sbinfo->mpol);
sbinfo->mpol = config.mpol; /* transfers initial ref */
out:
spin_unlock(&sbinfo->stat_lock);
return error;
}
|
CWE-399
| 179,299 | 9,246 |
69198739652828038349167530971157251076
| null | null | null |
linux
|
6e601a53566d84e1ffd25e7b6fe0b6894ffd79c0
| 1 |
static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int err;
struct sock_diag_req *req = nlmsg_data(nlh);
const struct sock_diag_handler *hndl;
if (nlmsg_len(nlh) < sizeof(*req))
return -EINVAL;
hndl = sock_diag_lock_handler(req->sdiag_family);
if (hndl == NULL)
err = -ENOENT;
else
err = hndl->dump(skb, nlh);
sock_diag_unlock_handler(hndl);
return err;
}
|
CWE-20
| 179,300 | 9,247 |
81691601596043684378205537284430358856
| null | null | null |
linux
|
43da5f2e0d0c69ded3d51907d9552310a6b545e8
| 1 |
static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
{
struct compat_ifconf ifc32;
struct ifconf ifc;
struct ifconf __user *uifc;
struct compat_ifreq __user *ifr32;
struct ifreq __user *ifr;
unsigned int i, j;
int err;
if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
return -EFAULT;
if (ifc32.ifcbuf == 0) {
ifc32.ifc_len = 0;
ifc.ifc_len = 0;
ifc.ifc_req = NULL;
uifc = compat_alloc_user_space(sizeof(struct ifconf));
} else {
size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) *
sizeof(struct ifreq);
uifc = compat_alloc_user_space(sizeof(struct ifconf) + len);
ifc.ifc_len = len;
ifr = ifc.ifc_req = (void __user *)(uifc + 1);
ifr32 = compat_ptr(ifc32.ifcbuf);
for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) {
if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq)))
return -EFAULT;
ifr++;
ifr32++;
}
}
if (copy_to_user(uifc, &ifc, sizeof(struct ifconf)))
return -EFAULT;
err = dev_ioctl(net, SIOCGIFCONF, uifc);
if (err)
return err;
if (copy_from_user(&ifc, uifc, sizeof(struct ifconf)))
return -EFAULT;
ifr = ifc.ifc_req;
ifr32 = compat_ptr(ifc32.ifcbuf);
for (i = 0, j = 0;
i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len;
i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) {
if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq)))
return -EFAULT;
ifr32++;
ifr++;
}
if (ifc32.ifcbuf == 0) {
/* Translate from 64-bit structure multiple to
* a 32-bit one.
*/
i = ifc.ifc_len;
i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq));
ifc32.ifc_len = i;
} else {
ifc32.ifc_len = i;
}
if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf)))
return -EFAULT;
return 0;
}
|
CWE-200
| 179,359 | 9,252 |
260748882966970711791833207647662319891
| null | null | null |
linux
|
4c87308bdea31a7b4828a51f6156e6f721a1fcc9
| 1 |
static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
{
struct xfrm_algo *algo;
struct nlattr *nla;
nla = nla_reserve(skb, XFRMA_ALG_AUTH,
sizeof(*algo) + (auth->alg_key_len + 7) / 8);
if (!nla)
return -EMSGSIZE;
algo = nla_data(nla);
strcpy(algo->alg_name, auth->alg_name);
memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
algo->alg_key_len = auth->alg_key_len;
return 0;
}
|
CWE-200
| 179,360 | 9,253 |
16551793964917884659495147932563448751
| null | null | null |
krb5
|
d1f707024f1d0af6e54a18885322d70fa15ec4d3
| 1 |
krb5_ldap_get_password_policy_from_dn(krb5_context context, char *pol_name,
char *pol_dn, osa_policy_ent_t *policy)
{
krb5_error_code st=0, tempst=0;
LDAP *ld=NULL;
LDAPMessage *result=NULL,*ent=NULL;
kdb5_dal_handle *dal_handle=NULL;
krb5_ldap_context *ldap_context=NULL;
krb5_ldap_server_handle *ldap_server_handle=NULL;
/* Clear the global error string */
krb5_clear_error_message(context);
/* validate the input parameters */
if (pol_dn == NULL)
return EINVAL;
*policy = NULL;
SETUP_CONTEXT();
GET_HANDLE();
*(policy) = (osa_policy_ent_t) malloc(sizeof(osa_policy_ent_rec));
if (*policy == NULL) {
st = ENOMEM;
goto cleanup;
}
memset(*policy, 0, sizeof(osa_policy_ent_rec));
LDAP_SEARCH(pol_dn, LDAP_SCOPE_BASE, "(objectclass=krbPwdPolicy)", password_policy_attributes);
ent=ldap_first_entry(ld, result);
if (ent != NULL) {
if ((st = populate_policy(context, ld, ent, pol_name, *policy)) != 0)
goto cleanup;
}
cleanup:
ldap_msgfree(result);
if (st != 0) {
if (*policy != NULL) {
krb5_ldap_free_password_policy(context, *policy);
*policy = NULL;
}
}
krb5_ldap_put_handle_to_pool(ldap_context, ldap_server_handle);
return st;
}
| 179,446 | 9,258 |
312745617712492783578690133075920437465
| null | null | null |
|
linux
|
07f4d9d74a04aa7c72c5dae0ef97565f28f17b92
| 1 |
static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
int op_flag,
unsigned int size,
unsigned int __user *tlv)
{
struct user_element *ue = kcontrol->private_data;
int change = 0;
void *new_data;
if (op_flag > 0) {
if (size > 1024 * 128) /* sane value */
return -EINVAL;
new_data = memdup_user(tlv, size);
if (IS_ERR(new_data))
return PTR_ERR(new_data);
change = ue->tlv_data_size != size;
if (!change)
change = memcmp(ue->tlv_data, new_data, size);
kfree(ue->tlv_data);
ue->tlv_data = new_data;
ue->tlv_data_size = size;
} else {
if (! ue->tlv_data_size || ! ue->tlv_data)
return -ENXIO;
if (size < ue->tlv_data_size)
return -ENOSPC;
if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
return -EFAULT;
}
return change;
}
|
CWE-362
| 179,471 | 9,260 |
95140620080426878026149051356488819207
| null | null | null |
krb5
|
dc7ed55c689d57de7f7408b34631bf06fec9dab1
| 1 |
krb5_encode_krbsecretkey(krb5_key_data *key_data_in, int n_key_data,
krb5_kvno mkvno) {
struct berval **ret = NULL;
int currkvno;
int num_versions = 1;
int i, j, last;
krb5_error_code err = 0;
krb5_key_data *key_data;
if (n_key_data <= 0)
return NULL;
/* Make a shallow copy of the key data so we can alter it. */
key_data = k5calloc(n_key_data, sizeof(*key_data), &err);
if (key_data_in == NULL)
goto cleanup;
memcpy(key_data, key_data_in, n_key_data * sizeof(*key_data));
/* Unpatched krb5 1.11 and 1.12 cannot decode KrbKey sequences with no salt
* field. For compatibility, always encode a salt field. */
for (i = 0; i < n_key_data; i++) {
if (key_data[i].key_data_ver == 1) {
key_data[i].key_data_ver = 2;
key_data[i].key_data_type[1] = KRB5_KDB_SALTTYPE_NORMAL;
key_data[i].key_data_length[1] = 0;
key_data[i].key_data_contents[1] = NULL;
}
}
/* Find the number of key versions */
for (i = 0; i < n_key_data - 1; i++)
if (key_data[i].key_data_kvno != key_data[i + 1].key_data_kvno)
num_versions++;
ret = (struct berval **) calloc (num_versions + 1, sizeof (struct berval *));
if (ret == NULL) {
err = ENOMEM;
goto cleanup;
}
for (i = 0, last = 0, j = 0, currkvno = key_data[0].key_data_kvno; i < n_key_data; i++) {
krb5_data *code;
if (i == n_key_data - 1 || key_data[i + 1].key_data_kvno != currkvno) {
ret[j] = k5alloc(sizeof(struct berval), &err);
if (ret[j] == NULL)
goto cleanup;
err = asn1_encode_sequence_of_keys(key_data + last,
(krb5_int16)i - last + 1,
mkvno, &code);
if (err)
goto cleanup;
/*CHECK_NULL(ret[j]); */
ret[j]->bv_len = code->length;
ret[j]->bv_val = code->data;
free(code);
j++;
last = i + 1;
currkvno = key_data[i].key_data_kvno;
}
}
ret[num_versions] = NULL;
cleanup:
free(key_data);
if (err != 0) {
if (ret != NULL) {
for (i = 0; i <= num_versions; i++)
if (ret[i] != NULL)
free (ret[i]);
free (ret);
ret = NULL;
}
}
return ret;
}
|
CWE-189
| 179,481 | 9,262 |
106082474012218971477368852592152122324
| null | null | null |
linux
|
a642fc305053cc1c6e47e4f4df327895747ab485
| 1 |
static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
{
u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 exit_reason = vmx->exit_reason;
trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
vmcs_readl(EXIT_QUALIFICATION),
vmx->idt_vectoring_info,
intr_info,
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
KVM_ISA_VMX);
if (vmx->nested.nested_run_pending)
return 0;
if (unlikely(vmx->fail)) {
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
vmcs_read32(VM_INSTRUCTION_ERROR));
return 1;
}
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
if (!is_exception(intr_info))
return 0;
else if (is_page_fault(intr_info))
return enable_ept;
else if (is_no_device(intr_info) &&
!(vmcs12->guest_cr0 & X86_CR0_TS))
return 0;
return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
case EXIT_REASON_EXTERNAL_INTERRUPT:
return 0;
case EXIT_REASON_TRIPLE_FAULT:
return 1;
case EXIT_REASON_PENDING_INTERRUPT:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
case EXIT_REASON_NMI_WINDOW:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
case EXIT_REASON_TASK_SWITCH:
return 1;
case EXIT_REASON_CPUID:
if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
return 0;
return 1;
case EXIT_REASON_HLT:
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
case EXIT_REASON_INVD:
return 1;
case EXIT_REASON_INVLPG:
return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
case EXIT_REASON_RDPMC:
return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
case EXIT_REASON_RDTSC:
return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
case EXIT_REASON_INVEPT:
/*
* VMX instructions trap unconditionally. This allows L1 to
* emulate them for its L2 guest, i.e., allows 3-level nesting!
*/
return 1;
case EXIT_REASON_CR_ACCESS:
return nested_vmx_exit_handled_cr(vcpu, vmcs12);
case EXIT_REASON_DR_ACCESS:
return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
case EXIT_REASON_IO_INSTRUCTION:
return nested_vmx_exit_handled_io(vcpu, vmcs12);
case EXIT_REASON_MSR_READ:
case EXIT_REASON_MSR_WRITE:
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
case EXIT_REASON_INVALID_STATE:
return 1;
case EXIT_REASON_MWAIT_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
case EXIT_REASON_MONITOR_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
case EXIT_REASON_PAUSE_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
nested_cpu_has2(vmcs12,
SECONDARY_EXEC_PAUSE_LOOP_EXITING);
case EXIT_REASON_MCE_DURING_VMENTRY:
return 0;
case EXIT_REASON_TPR_BELOW_THRESHOLD:
return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
case EXIT_REASON_APIC_ACCESS:
return nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
case EXIT_REASON_EPT_VIOLATION:
/*
* L0 always deals with the EPT violation. If nested EPT is
* used, and the nested mmu code discovers that the address is
* missing in the guest EPT table (EPT12), the EPT violation
* will be injected with nested_ept_inject_page_fault()
*/
return 0;
case EXIT_REASON_EPT_MISCONFIG:
/*
* L2 never uses directly L1's EPT, but rather L0's own EPT
* table (shadow on EPT) or a merged EPT table that L0 built
* (EPT on EPT). So any problems with the structure of the
* table is L0's fault.
*/
return 0;
case EXIT_REASON_WBINVD:
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
case EXIT_REASON_XSETBV:
return 1;
default:
return 1;
}
}
|
CWE-264
| 179,516 | 9,267 |
25917898868493212617815396114791261383
| null | null | null |
linux
|
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
| 1 |
static int wrmsr_interception(struct vcpu_svm *svm)
{
struct msr_data msr;
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
msr.data = data;
msr.index = ecx;
msr.host_initiated = false;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
if (svm_set_msr(&svm->vcpu, &msr)) {
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(&svm->vcpu, 0);
} else {
trace_kvm_msr_write(ecx, data);
skip_emulated_instruction(&svm->vcpu);
}
return 1;
}
|
CWE-264
| 179,520 | 9,268 |
142189173362958506501832317068945782185
| null | null | null |
linux
|
05ab8f2647e4221cbdb3856dd7d32bd5407316b3
| 1 |
static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
|
CWE-189
| 179,555 | 9,271 |
257641762135738592354784648632560996645
| null | null | null |
linux
|
2172fa709ab32ca60e86179dc67d0857be8e2c98
| 1 |
static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
u32 *sid, u32 def_sid, gfp_t gfp_flags,
int force)
{
char *scontext2, *str = NULL;
struct context context;
int rc = 0;
if (!ss_initialized) {
int i;
for (i = 1; i < SECINITSID_NUM; i++) {
if (!strcmp(initial_sid_to_string[i], scontext)) {
*sid = i;
return 0;
}
}
*sid = SECINITSID_KERNEL;
return 0;
}
*sid = SECSID_NULL;
/* Copy the string so that we can modify the copy as we parse it. */
scontext2 = kmalloc(scontext_len + 1, gfp_flags);
if (!scontext2)
return -ENOMEM;
memcpy(scontext2, scontext, scontext_len);
scontext2[scontext_len] = 0;
if (force) {
/* Save another copy for storing in uninterpreted form */
rc = -ENOMEM;
str = kstrdup(scontext2, gfp_flags);
if (!str)
goto out;
}
read_lock(&policy_rwlock);
rc = string_to_context_struct(&policydb, &sidtab, scontext2,
scontext_len, &context, def_sid);
if (rc == -EINVAL && force) {
context.str = str;
context.len = scontext_len;
str = NULL;
} else if (rc)
goto out_unlock;
rc = sidtab_context_to_sid(&sidtab, &context, sid);
context_destroy(&context);
out_unlock:
read_unlock(&policy_rwlock);
out:
kfree(scontext2);
kfree(str);
return rc;
}
|
CWE-20
| 179,604 | 9,275 |
25979071260218318113704712414040708949
| null | null | null |
linux
|
ef87dbe7614341c2e7bfe8d32fcb7028cc97442c
| 1 |
static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd)
{
struct floppy_raw_cmd *ptr;
int ret;
int i;
*rcmd = NULL;
loop:
ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER);
if (!ptr)
return -ENOMEM;
*rcmd = ptr;
ret = copy_from_user(ptr, param, sizeof(*ptr));
if (ret)
return -EFAULT;
ptr->next = NULL;
ptr->buffer_length = 0;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > 33)
/* the command may now also take up the space
* initially intended for the reply & the
* reply count. Needed for long 82078 commands
* such as RESTORE, which takes ... 17 command
* bytes. Murphy's law #137: When you reserve
* 16 bytes for a structure, you'll one day
* discover that you really need 17...
*/
return -EINVAL;
for (i = 0; i < 16; i++)
ptr->reply[i] = 0;
ptr->resultcode = 0;
ptr->kernel_data = NULL;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
if (ptr->length <= 0)
return -EINVAL;
ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length);
fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
if (!ptr->kernel_data)
return -ENOMEM;
ptr->buffer_length = ptr->length;
}
if (ptr->flags & FD_RAW_WRITE) {
ret = fd_copyin(ptr->data, ptr->kernel_data, ptr->length);
if (ret)
return ret;
}
if (ptr->flags & FD_RAW_MORE) {
rcmd = &(ptr->next);
ptr->rate &= 0x43;
goto loop;
}
return 0;
}
|
CWE-264
| 179,607 | 9,276 |
116582411855757033491178694505916418899
| null | null | null |
torque
|
3ed749263abe3d69fa3626d142a5789dcb5a5684
| 1 |
int disrsi_(
int stream,
int *negate,
unsigned *value,
unsigned count)
{
int c;
unsigned locval;
unsigned ndigs;
char *cp;
char scratch[DIS_BUFSIZ+1];
assert(negate != NULL);
assert(value != NULL);
assert(count);
assert(stream >= 0);
assert(dis_getc != NULL);
assert(dis_gets != NULL);
memset(scratch, 0, DIS_BUFSIZ+1);
if (dis_umaxd == 0)
disiui_();
switch (c = (*dis_getc)(stream))
{
case '-':
case '+':
*negate = c == '-';
if ((*dis_gets)(stream, scratch, count) != (int)count)
{
return(DIS_EOD);
}
if (count >= dis_umaxd)
{
if (count > dis_umaxd)
goto overflow;
if (memcmp(scratch, dis_umax, dis_umaxd) > 0)
goto overflow;
}
cp = scratch;
locval = 0;
do
{
if (((c = *cp++) < '0') || (c > '9'))
{
return(DIS_NONDIGIT);
}
locval = 10 * locval + c - '0';
}
while (--count);
*value = locval;
return (DIS_SUCCESS);
break;
case '0':
return (DIS_LEADZRO);
break;
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
ndigs = c - '0';
if (count > 1)
{
if ((*dis_gets)(stream, scratch + 1, count - 1) != (int)count - 1)
{
return(DIS_EOD);
}
cp = scratch;
if (count >= dis_umaxd)
{
if (count > dis_umaxd)
break;
*cp = c;
if (memcmp(scratch, dis_umax, dis_umaxd) > 0)
break;
}
while (--count)
{
if (((c = *++cp) < '0') || (c > '9'))
{
return(DIS_NONDIGIT);
}
ndigs = 10 * ndigs + c - '0';
}
} /* END if (count > 1) */
return(disrsi_(stream, negate, value, ndigs));
/*NOTREACHED*/
break;
case - 1:
return(DIS_EOD);
/*NOTREACHED*/
break;
case -2:
return(DIS_EOF);
/*NOTREACHED*/
break;
default:
return(DIS_NONDIGIT);
/*NOTREACHED*/
break;
}
*negate = FALSE;
overflow:
*value = UINT_MAX;
return(DIS_OVERFLOW);
} /* END disrsi_() */
|
CWE-119
| 179,613 | 9,277 |
119468526822998809894381995546326164596
| null | null | null |
linux
|
edfbbf388f293d70bf4b7c0bc38774d05e6f711a
| 1 |
static long aio_read_events_ring(struct kioctx *ctx,
struct io_event __user *event, long nr)
{
struct aio_ring *ring;
unsigned head, tail, pos;
long ret = 0;
int copy_ret;
mutex_lock(&ctx->ring_lock);
/* Access to ->ring_pages here is protected by ctx->ring_lock. */
ring = kmap_atomic(ctx->ring_pages[0]);
head = ring->head;
tail = ring->tail;
kunmap_atomic(ring);
pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
if (head == tail)
goto out;
while (ret < nr) {
long avail;
struct io_event *ev;
struct page *page;
avail = (head <= tail ? tail : ctx->nr_events) - head;
if (head == tail)
break;
avail = min(avail, nr - ret);
avail = min_t(long, avail, AIO_EVENTS_PER_PAGE -
((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE));
pos = head + AIO_EVENTS_OFFSET;
page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
pos %= AIO_EVENTS_PER_PAGE;
ev = kmap(page);
copy_ret = copy_to_user(event + ret, ev + pos,
sizeof(*ev) * avail);
kunmap(page);
if (unlikely(copy_ret)) {
ret = -EFAULT;
goto out;
}
ret += avail;
head += avail;
head %= ctx->nr_events;
}
ring = kmap_atomic(ctx->ring_pages[0]);
ring->head = head;
kunmap_atomic(ring);
flush_dcache_page(ctx->ring_pages[0]);
pr_debug("%li h%u t%u\n", ret, head, tail);
out:
mutex_unlock(&ctx->ring_lock);
return ret;
}
| 179,620 | 9,279 |
175931741530477547556665829727126584269
| null | null | null |
|
linux
|
1fd819ecb90cc9b822cd84d3056ddba315d3340f
| 1 |
struct sk_buff *skb_segment(struct sk_buff *head_skb,
netdev_features_t features)
{
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
skb_frag_t *frag = skb_shinfo(head_skb)->frags;
unsigned int mss = skb_shinfo(head_skb)->gso_size;
unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
unsigned int offset = doffset;
unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
unsigned int headroom;
unsigned int len;
__be16 proto;
bool csum;
int sg = !!(features & NETIF_F_SG);
int nfrags = skb_shinfo(head_skb)->nr_frags;
int err = -ENOMEM;
int i = 0;
int pos;
proto = skb_network_protocol(head_skb);
if (unlikely(!proto))
return ERR_PTR(-EINVAL);
csum = !!can_checksum_protocol(features, proto);
__skb_push(head_skb, doffset);
headroom = skb_headroom(head_skb);
pos = skb_headlen(head_skb);
do {
struct sk_buff *nskb;
skb_frag_t *nskb_frag;
int hsize;
int size;
len = head_skb->len - offset;
if (len > mss)
len = mss;
hsize = skb_headlen(head_skb) - offset;
if (hsize < 0)
hsize = 0;
if (hsize > len || !sg)
hsize = len;
if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
(skb_headlen(list_skb) == len || sg)) {
BUG_ON(skb_headlen(list_skb) > len);
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
pos += skb_headlen(list_skb);
while (pos < offset + len) {
BUG_ON(i >= nfrags);
size = skb_frag_size(frag);
if (pos + size > offset + len)
break;
i++;
pos += size;
frag++;
}
nskb = skb_clone(list_skb, GFP_ATOMIC);
list_skb = list_skb->next;
if (unlikely(!nskb))
goto err;
if (unlikely(pskb_trim(nskb, len))) {
kfree_skb(nskb);
goto err;
}
hsize = skb_end_offset(nskb);
if (skb_cow_head(nskb, doffset + headroom)) {
kfree_skb(nskb);
goto err;
}
nskb->truesize += skb_end_offset(nskb) - hsize;
skb_release_head_state(nskb);
__skb_push(nskb, doffset);
} else {
nskb = __alloc_skb(hsize + doffset + headroom,
GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
NUMA_NO_NODE);
if (unlikely(!nskb))
goto err;
skb_reserve(nskb, headroom);
__skb_put(nskb, doffset);
}
if (segs)
tail->next = nskb;
else
segs = nskb;
tail = nskb;
__copy_skb_header(nskb, head_skb);
nskb->mac_len = head_skb->mac_len;
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
nskb->data - tnl_hlen,
doffset + tnl_hlen);
if (nskb->len == len + doffset)
goto perform_csum_check;
if (!sg) {
nskb->ip_summed = CHECKSUM_NONE;
nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb, len),
len, 0);
continue;
}
nskb_frag = skb_shinfo(nskb)->frags;
skb_copy_from_linear_data_offset(head_skb, offset,
skb_put(nskb, hsize), hsize);
skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
SKBTX_SHARED_FRAG;
while (pos < offset + len) {
if (i >= nfrags) {
BUG_ON(skb_headlen(list_skb));
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
BUG_ON(!nfrags);
list_skb = list_skb->next;
}
if (unlikely(skb_shinfo(nskb)->nr_frags >=
MAX_SKB_FRAGS)) {
net_warn_ratelimited(
"skb_segment: too many frags: %u %u\n",
pos, mss);
goto err;
}
*nskb_frag = *frag;
__skb_frag_ref(nskb_frag);
size = skb_frag_size(nskb_frag);
if (pos < offset) {
nskb_frag->page_offset += offset - pos;
skb_frag_size_sub(nskb_frag, offset - pos);
}
skb_shinfo(nskb)->nr_frags++;
if (pos + size <= offset + len) {
i++;
frag++;
pos += size;
} else {
skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
goto skip_fraglist;
}
nskb_frag++;
}
skip_fraglist:
nskb->data_len = len - hsize;
nskb->len += nskb->data_len;
nskb->truesize += nskb->data_len;
perform_csum_check:
if (!csum) {
nskb->csum = skb_checksum(nskb, doffset,
nskb->len - doffset, 0);
nskb->ip_summed = CHECKSUM_NONE;
}
} while ((offset += len) < head_skb->len);
return segs;
err:
kfree_skb_list(segs);
return ERR_PTR(err);
}
|
CWE-416
| 179,630 | 9,281 |
183173230648926987878497484132451117155
| null | null | null |
linux
|
ec0223ec48a90cb605244b45f7c62de856403729
| 1 |
sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_association *new_asoc;
sctp_init_chunk_t *peer_init;
struct sctp_chunk *repl;
struct sctp_ulpevent *ev, *ai_ev = NULL;
int error = 0;
struct sctp_chunk *err_chk_p;
struct sock *sk;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
}
/* Make sure that the COOKIE_ECHO chunk has a valid length.
* In this case, we check that we have enough for at least a
* chunk header. More detailed verification is done
* in sctp_unpack_cookie().
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* If the endpoint is not listening or if the number of associations
* on the TCP-style socket exceed the max backlog, respond with an
* ABORT.
*/
sk = ep->base.sk;
if (!sctp_sstate(sk, LISTENING) ||
(sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr =
(struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
goto nomem;
/* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint
* "Z" will reply with a COOKIE ACK chunk after building a TCB
* and moving to the ESTABLISHED state.
*/
new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
&err_chk_p);
/* FIXME:
* If the re-build failed, what is the proper error path
* from here?
*
* [We should abort the association. --piggy]
*/
if (!new_asoc) {
/* FIXME: Several errors are possible. A bad cookie should
* be silently discarded, but think about logging it too.
*/
switch (error) {
case -SCTP_IERROR_NOMEM:
goto nomem;
case -SCTP_IERROR_STALE_COOKIE:
sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
err_chk_p);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
case -SCTP_IERROR_BAD_SIG:
default:
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
}
/* Delay state machine commands until later.
*
* Re-build the bind address for the association is done in
* the sctp_unpack_cookie() already.
*/
/* This is a brand-new association, so these are not yet side
* effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
if (!sctp_process_init(new_asoc, chunk,
&chunk->subh.cookie_hdr->c.peer_addr,
peer_init, GFP_ATOMIC))
goto nomem_init;
/* SCTP-AUTH: Now that we've populate required fields in
* sctp_process_init, set up the assocaition shared keys as
* necessary so that we can potentially authenticate the ACK
*/
error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC);
if (error)
goto nomem_init;
/* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
* is supposed to be authenticated and we have to do delayed
* authentication. We've just recreated the association using
* the information in the cookie and now it's much easier to
* do the authentication.
*/
if (chunk->auth_chunk) {
struct sctp_chunk auth;
sctp_ierror_t ret;
/* set-up our fake chunk so that we can process it */
auth.skb = chunk->auth_chunk;
auth.asoc = chunk->asoc;
auth.sctp_hdr = chunk->sctp_hdr;
auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
sizeof(sctp_chunkhdr_t));
skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
auth.transport = chunk->transport;
ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
/* We can now safely free the auth_chunk clone */
kfree_skb(chunk->auth_chunk);
if (ret != SCTP_IERROR_NO_ERROR) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
}
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem_init;
/* RFC 2960 5.1 Normal Establishment of an Association
*
* D) IMPLEMENTATION NOTE: An implementation may choose to
* send the Communication Up notification to the SCTP user
* upon reception of a valid COOKIE ECHO chunk.
*/
ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0,
new_asoc->c.sinit_num_ostreams,
new_asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem_ev;
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter , SCTP
* delivers this notification to inform the application that of the
* peers requested adaptation layer.
*/
if (new_asoc->peer.adaptation_ind) {
ai_ev = sctp_ulpevent_make_adaptation_indication(new_asoc,
GFP_ATOMIC);
if (!ai_ev)
goto nomem_aiev;
}
/* Add all the state machine commands now since we've created
* everything. This way we don't introduce memory corruptions
* during side-effect processing and correclty count established
* associations.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* This will send the COOKIE ACK */
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/* Queue the ASSOC_CHANGE event */
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Send up the Adaptation Layer Indication event */
if (ai_ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ai_ev));
return SCTP_DISPOSITION_CONSUME;
nomem_aiev:
sctp_ulpevent_free(ev);
nomem_ev:
sctp_chunk_free(repl);
nomem_init:
sctp_association_free(new_asoc);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
|
CWE-20
| 179,631 | 9,282 |
212126296819801914612125910080964082732
| null | null | null |
linux
|
d8316f3991d207fe32881a9ac20241be8fa2bad0
| 1 |
static int get_rx_bufs(struct vhost_virtqueue *vq,
struct vring_used_elem *heads,
int datalen,
unsigned *iovcount,
struct vhost_log *log,
unsigned *log_num,
unsigned int quota)
{
unsigned int out, in;
int seg = 0;
int headcount = 0;
unsigned d;
int r, nlogs = 0;
while (datalen > 0 && headcount < quota) {
if (unlikely(seg >= UIO_MAXIOV)) {
r = -ENOBUFS;
goto err;
}
d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num);
if (d == vq->num) {
r = 0;
goto err;
}
if (unlikely(out || in <= 0)) {
vq_err(vq, "unexpected descriptor format for RX: "
"out %d, in %d\n", out, in);
r = -EINVAL;
goto err;
}
if (unlikely(log)) {
nlogs += *log_num;
log += *log_num;
}
heads[headcount].id = d;
heads[headcount].len = iov_length(vq->iov + seg, in);
datalen -= heads[headcount].len;
++headcount;
seg += in;
}
heads[headcount - 1].len += datalen;
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
return headcount;
err:
vhost_discard_vq_desc(vq, headcount);
return r;
}
|
CWE-20
| 179,632 | 9,283 |
91983981836047379239393341888620978239
| null | null | null |
linux
|
0305cd5f7fca85dae392b9ba85b116896eb7c1c7
| 1 |
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode,
u64 new_size, u32 min_type)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
struct btrfs_key found_key;
u64 extent_start = 0;
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
u64 last_size = new_size;
u32 found_type = (u8)-1;
int found_extent;
int del_item;
int pending_del_nr = 0;
int pending_del_slot = 0;
int extent_type = -1;
int ret;
int err = 0;
u64 ino = btrfs_ino(inode);
u64 bytes_deleted = 0;
bool be_nice = 0;
bool should_throttle = 0;
bool should_end = 0;
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
/*
* for non-free space inodes and ref cows, we want to back off from
* time to time
*/
if (!btrfs_is_free_space_inode(inode) &&
test_bit(BTRFS_ROOT_REF_COWS, &root->state))
be_nice = 1;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = -1;
/*
* We want to drop from the next block forward in case this new size is
* not block aligned since we will be keeping the last block of the
* extent just the way it is.
*/
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
root == root->fs_info->tree_root)
btrfs_drop_extent_cache(inode, ALIGN(new_size,
root->sectorsize), (u64)-1, 0);
/*
* This function is also used to drop the items in the log tree before
* we relog the inode, so if root != BTRFS_I(inode)->root, it means
* it is used to drop the loged items. So we shouldn't kill the delayed
* items.
*/
if (min_type == 0 && root == BTRFS_I(inode)->root)
btrfs_kill_delayed_inode_items(inode);
key.objectid = ino;
key.offset = (u64)-1;
key.type = (u8)-1;
search_again:
/*
* with a 16K leaf size and 128MB extents, you can actually queue
* up a huge file in a single leaf. Most of the time that
* bytes_deleted is > 0, it will be huge by the time we get here
*/
if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
if (btrfs_should_end_transaction(trans, root)) {
err = -EAGAIN;
goto error;
}
}
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0) {
/* there are no items in the tree for us to truncate, we're
* done
*/
if (path->slots[0] == 0)
goto out;
path->slots[0]--;
}
while (1) {
fi = NULL;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
found_type = found_key.type;
if (found_key.objectid != ino)
break;
if (found_type < min_type)
break;
item_end = found_key.offset;
if (found_type == BTRFS_EXTENT_DATA_KEY) {
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
item_end +=
btrfs_file_extent_num_bytes(leaf, fi);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
item_end += btrfs_file_extent_inline_len(leaf,
path->slots[0], fi);
}
item_end--;
}
if (found_type > min_type) {
del_item = 1;
} else {
if (item_end < new_size)
break;
if (found_key.offset >= new_size)
del_item = 1;
else
del_item = 0;
}
found_extent = 0;
/* FIXME, shrink the extent if the ref count is only 1 */
if (found_type != BTRFS_EXTENT_DATA_KEY)
goto delete;
if (del_item)
last_size = found_key.offset;
else
last_size = new_size;
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
u64 num_dec;
extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
if (!del_item) {
u64 orig_num_bytes =
btrfs_file_extent_num_bytes(leaf, fi);
extent_num_bytes = ALIGN(new_size -
found_key.offset,
root->sectorsize);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_num_bytes);
num_dec = (orig_num_bytes -
extent_num_bytes);
if (test_bit(BTRFS_ROOT_REF_COWS,
&root->state) &&
extent_start != 0)
inode_sub_bytes(inode, num_dec);
btrfs_mark_buffer_dirty(leaf);
} else {
extent_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf,
fi);
extent_offset = found_key.offset -
btrfs_file_extent_offset(leaf, fi);
/* FIXME blocksize != 4096 */
num_dec = btrfs_file_extent_num_bytes(leaf, fi);
if (extent_start != 0) {
found_extent = 1;
if (test_bit(BTRFS_ROOT_REF_COWS,
&root->state))
inode_sub_bytes(inode, num_dec);
}
}
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
/*
* we can't truncate inline items that have had
* special encodings
*/
if (!del_item &&
btrfs_file_extent_compression(leaf, fi) == 0 &&
btrfs_file_extent_encryption(leaf, fi) == 0 &&
btrfs_file_extent_other_encoding(leaf, fi) == 0) {
u32 size = new_size - found_key.offset;
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
inode_sub_bytes(inode, item_end + 1 -
new_size);
/*
* update the ram bytes to properly reflect
* the new size of our item
*/
btrfs_set_file_extent_ram_bytes(leaf, fi, size);
size =
btrfs_file_extent_calc_inline_size(size);
btrfs_truncate_item(root, path, size, 1);
} else if (test_bit(BTRFS_ROOT_REF_COWS,
&root->state)) {
inode_sub_bytes(inode, item_end + 1 -
found_key.offset);
}
}
delete:
if (del_item) {
if (!pending_del_nr) {
/* no pending yet, add ourselves */
pending_del_slot = path->slots[0];
pending_del_nr = 1;
} else if (pending_del_nr &&
path->slots[0] + 1 == pending_del_slot) {
/* hop on the pending chunk */
pending_del_nr++;
pending_del_slot = path->slots[0];
} else {
BUG();
}
} else {
break;
}
should_throttle = 0;
if (found_extent &&
(test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
root == root->fs_info->tree_root)) {
btrfs_set_path_blocking(path);
bytes_deleted += extent_num_bytes;
ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
ino, extent_offset, 0);
BUG_ON(ret);
if (btrfs_should_throttle_delayed_refs(trans, root))
btrfs_async_run_delayed_refs(root,
trans->delayed_ref_updates * 2, 0);
if (be_nice) {
if (truncate_space_check(trans, root,
extent_num_bytes)) {
should_end = 1;
}
if (btrfs_should_throttle_delayed_refs(trans,
root)) {
should_throttle = 1;
}
}
}
if (found_type == BTRFS_INODE_ITEM_KEY)
break;
if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot ||
should_throttle || should_end) {
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
pending_del_nr);
if (ret) {
btrfs_abort_transaction(trans,
root, ret);
goto error;
}
pending_del_nr = 0;
}
btrfs_release_path(path);
if (should_throttle) {
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
ret = btrfs_run_delayed_refs(trans, root, updates * 2);
if (ret && !err)
err = ret;
}
}
/*
* if we failed to refill our space rsv, bail out
* and let the transaction restart
*/
if (should_end) {
err = -EAGAIN;
goto error;
}
goto search_again;
} else {
path->slots[0]--;
}
}
out:
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path, pending_del_slot,
pending_del_nr);
if (ret)
btrfs_abort_transaction(trans, root, ret);
}
error:
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
btrfs_ordered_update_i_size(inode, last_size, NULL);
btrfs_free_path(path);
if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
ret = btrfs_run_delayed_refs(trans, root, updates * 2);
if (ret && !err)
err = ret;
}
}
return err;
}
|
CWE-200
| 179,739 | 9,288 |
202426911853200139148272740044181355255
| null | null | null |
linux
|
451a2886b6bf90e2fb378f7c46c655450fb96e81
| 1 |
sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
int dxfer_dir = hp->dxfer_direction;
unsigned int iov_count = hp->iovec_count;
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
unsigned char *long_cmdp = NULL;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_start_req: dxfer_len=%d\n",
dxfer_len));
if (hp->cmd_len > BLK_MAX_CDB) {
long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
if (!long_cmdp)
return -ENOMEM;
}
/*
* NOTE
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
* preallocated requests are already in use, then using GFP_ATOMIC with
* blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
* will cause blk_get_request() to sleep until an active command
* completes, freeing up a request. Neither option is ideal, but
* GFP_KERNEL is the better choice to prevent userspace from getting an
* unexpected EWOULDBLOCK.
*
* With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
* does not sleep except under memory pressure.
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
}
blk_rq_set_block_pc(rq);
if (hp->cmd_len > BLK_MAX_CDB)
rq->cmd = long_cmdp;
memcpy(rq->cmd, cmd, hp->cmd_len);
rq->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
rq->sense = srp->sense_b;
rq->retries = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
!sfp->parentdp->device->host->unchecked_isa_dma &&
blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
md = NULL;
else
md = &map_data;
if (md) {
if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
sg_link_reserve(sfp, srp, dxfer_len);
else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
if (res)
return res;
}
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
md->nr_entries = req_schp->k_use_sg;
md->offset = 0;
md->null_mapped = hp->dxferp ? 0 : 1;
if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
md->from_user = 1;
else
md->from_user = 0;
}
if (iov_count) {
int size = sizeof(struct iovec) * iov_count;
struct iovec *iov;
struct iov_iter i;
iov = memdup_user(hp->dxferp, size);
if (IS_ERR(iov))
return PTR_ERR(iov);
iov_iter_init(&i, rw, iov, iov_count,
min_t(size_t, hp->dxfer_len,
iov_length(iov, iov_count)));
res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
kfree(iov);
} else
res = blk_rq_map_user(q, rq, md, hp->dxferp,
hp->dxfer_len, GFP_ATOMIC);
if (!res) {
srp->bio = rq->bio;
if (!md) {
req_schp->dio_in_use = 1;
hp->info |= SG_INFO_DIRECT_IO;
}
}
return res;
}
|
CWE-189
| 179,765 | 9,292 |
199085659186409226955400327225790934156
| null | null | null |
linux
|
59c816c1f24df0204e01851431d3bab3eb76719c
| 1 |
vhost_scsi_make_tpg(struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
struct vhost_scsi_tport *tport = container_of(wwn,
struct vhost_scsi_tport, tport_wwn);
struct vhost_scsi_tpg *tpg;
unsigned long tpgt;
int ret;
if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL);
if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
return ERR_PTR(-EINVAL);
tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
if (!tpg) {
pr_err("Unable to allocate struct vhost_scsi_tpg");
return ERR_PTR(-ENOMEM);
}
mutex_init(&tpg->tv_tpg_mutex);
INIT_LIST_HEAD(&tpg->tv_tpg_list);
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
return NULL;
}
mutex_lock(&vhost_scsi_mutex);
list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
mutex_unlock(&vhost_scsi_mutex);
return &tpg->se_tpg;
}
|
CWE-119
| 179,787 | 9,293 |
138793578128477909099133968364558272769
| null | null | null |
linux
|
ee73f656a604d5aa9df86a97102e4e462dd79924
| 1 |
static int pit_ioport_read(struct kvm_io_device *this,
gpa_t addr, int len, void *data)
{
struct kvm_pit *pit = dev_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
struct kvm *kvm = pit->kvm;
int ret, count;
struct kvm_kpit_channel_state *s;
if (!pit_in_range(addr))
return -EOPNOTSUPP;
addr &= KVM_PIT_CHANNEL_MASK;
s = &pit_state->channels[addr];
mutex_lock(&pit_state->lock);
if (s->status_latched) {
s->status_latched = 0;
ret = s->status;
} else if (s->count_latched) {
switch (s->count_latched) {
default:
case RW_STATE_LSB:
ret = s->latched_count & 0xff;
s->count_latched = 0;
break;
case RW_STATE_MSB:
ret = s->latched_count >> 8;
s->count_latched = 0;
break;
case RW_STATE_WORD0:
ret = s->latched_count & 0xff;
s->count_latched = RW_STATE_MSB;
break;
}
} else {
switch (s->read_state) {
default:
case RW_STATE_LSB:
count = pit_get_count(kvm, addr);
ret = count & 0xff;
break;
case RW_STATE_MSB:
count = pit_get_count(kvm, addr);
ret = (count >> 8) & 0xff;
break;
case RW_STATE_WORD0:
count = pit_get_count(kvm, addr);
ret = count & 0xff;
s->read_state = RW_STATE_WORD1;
break;
case RW_STATE_WORD1:
count = pit_get_count(kvm, addr);
ret = (count >> 8) & 0xff;
s->read_state = RW_STATE_WORD0;
break;
}
}
if (len > sizeof(ret))
len = sizeof(ret);
memcpy(data, (char *)&ret, len);
mutex_unlock(&pit_state->lock);
return 0;
}
|
CWE-119
| 179,799 | 9,294 |
56541777842879498149260732514920892693
| null | null | null |
openssl
|
4924b37ee01f71ae19c94a8934b80eeb2f677932
| 1 |
int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
{
BIGNUM *b, *c = NULL, *u = NULL, *v = NULL, *tmp;
int ret = 0;
bn_check_top(a);
bn_check_top(p);
BN_CTX_start(ctx);
if ((b = BN_CTX_get(ctx)) == NULL)
goto err;
if ((c = BN_CTX_get(ctx)) == NULL)
goto err;
if ((u = BN_CTX_get(ctx)) == NULL)
goto err;
if ((v = BN_CTX_get(ctx)) == NULL)
goto err;
if (!BN_GF2m_mod(u, a, p))
goto err;
if (BN_is_zero(u))
goto err;
if (!BN_copy(v, p))
goto err;
# if 0
if (!BN_one(b))
goto err;
while (1) {
while (!BN_is_odd(u)) {
if (BN_is_zero(u))
goto err;
if (!BN_rshift1(u, u))
goto err;
if (BN_is_odd(b)) {
if (!BN_GF2m_add(b, b, p))
goto err;
}
if (!BN_rshift1(b, b))
goto err;
}
if (BN_abs_is_word(u, 1))
break;
if (BN_num_bits(u) < BN_num_bits(v)) {
tmp = u;
u = v;
v = tmp;
tmp = b;
b = c;
c = tmp;
}
if (!BN_GF2m_add(u, u, v))
goto err;
if (!BN_GF2m_add(b, b, c))
goto err;
}
# else
{
int i, ubits = BN_num_bits(u), vbits = BN_num_bits(v), /* v is copy
* of p */
top = p->top;
BN_ULONG *udp, *bdp, *vdp, *cdp;
bn_wexpand(u, top);
udp = u->d;
for (i = u->top; i < top; i++)
udp[i] = 0;
u->top = top;
bn_wexpand(b, top);
bdp = b->d;
bdp[0] = 1;
for (i = 1; i < top; i++)
bdp[i] = 0;
b->top = top;
bn_wexpand(c, top);
cdp = c->d;
for (i = 0; i < top; i++)
cdp[i] = 0;
c->top = top;
vdp = v->d; /* It pays off to "cache" *->d pointers,
* because it allows optimizer to be more
* aggressive. But we don't have to "cache"
* p->d, because *p is declared 'const'... */
while (1) {
while (ubits && !(udp[0] & 1)) {
BN_ULONG u0, u1, b0, b1, mask;
u0 = udp[0];
b0 = bdp[0];
mask = (BN_ULONG)0 - (b0 & 1);
b0 ^= p->d[0] & mask;
for (i = 0; i < top - 1; i++) {
u1 = udp[i + 1];
udp[i] = ((u0 >> 1) | (u1 << (BN_BITS2 - 1))) & BN_MASK2;
u0 = u1;
b1 = bdp[i + 1] ^ (p->d[i + 1] & mask);
bdp[i] = ((b0 >> 1) | (b1 << (BN_BITS2 - 1))) & BN_MASK2;
b0 = b1;
}
udp[i] = u0 >> 1;
bdp[i] = b0 >> 1;
ubits--;
}
if (ubits <= BN_BITS2 && udp[0] == 1)
break;
if (ubits < vbits) {
i = ubits;
ubits = vbits;
vbits = i;
tmp = u;
u = v;
v = tmp;
tmp = b;
b = c;
c = tmp;
udp = vdp;
vdp = v->d;
bdp = cdp;
cdp = c->d;
}
for (i = 0; i < top; i++) {
udp[i] ^= vdp[i];
bdp[i] ^= cdp[i];
}
if (ubits == vbits) {
BN_ULONG ul;
int utop = (ubits - 1) / BN_BITS2;
while ((ul = udp[utop]) == 0 && utop)
utop--;
ubits = utop * BN_BITS2 + BN_num_bits_word(ul);
}
}
bn_correct_top(b);
}
# endif
if (!BN_copy(r, b))
goto err;
bn_check_top(r);
ret = 1;
err:
# ifdef BN_DEBUG /* BN_CTX_end would complain about the
* expanded form */
bn_correct_top(c);
bn_correct_top(u);
bn_correct_top(v);
# endif
BN_CTX_end(ctx);
return ret;
}
|
CWE-399
| 179,866 | 9,302 |
302254574823175282283689932857725153262
| null | null | null |
pigz
|
fdad1406b3ec809f4954ff7cdf9e99eb18c2458f
| 1 |
local void process(char *path)
{
int method = -1; /* get_header() return value */
size_t len; /* length of base name (minus suffix) */
struct stat st; /* to get file type and mod time */
/* all compressed suffixes for decoding search, in length order */
static char *sufs[] = {".z", "-z", "_z", ".Z", ".gz", "-gz", ".zz", "-zz",
".zip", ".ZIP", ".tgz", NULL};
/* open input file with name in, descriptor ind -- set name and mtime */
if (path == NULL) {
strcpy(g.inf, "<stdin>");
g.ind = 0;
g.name = NULL;
g.mtime = g.headis & 2 ?
(fstat(g.ind, &st) ? time(NULL) : st.st_mtime) : 0;
len = 0;
}
else {
/* set input file name (already set if recursed here) */
if (path != g.inf) {
strncpy(g.inf, path, sizeof(g.inf));
if (g.inf[sizeof(g.inf) - 1])
bail("name too long: ", path);
}
len = strlen(g.inf);
/* try to stat input file -- if not there and decoding, look for that
name with compressed suffixes */
if (lstat(g.inf, &st)) {
if (errno == ENOENT && (g.list || g.decode)) {
char **try = sufs;
do {
if (*try == NULL || len + strlen(*try) >= sizeof(g.inf))
break;
strcpy(g.inf + len, *try++);
errno = 0;
} while (lstat(g.inf, &st) && errno == ENOENT);
}
#ifdef EOVERFLOW
if (errno == EOVERFLOW || errno == EFBIG)
bail(g.inf,
" too large -- not compiled with large file support");
#endif
if (errno) {
g.inf[len] = 0;
complain("%s does not exist -- skipping", g.inf);
return;
}
len = strlen(g.inf);
}
/* only process regular files, but allow symbolic links if -f,
recurse into directory if -r */
if ((st.st_mode & S_IFMT) != S_IFREG &&
(st.st_mode & S_IFMT) != S_IFLNK &&
(st.st_mode & S_IFMT) != S_IFDIR) {
complain("%s is a special file or device -- skipping", g.inf);
return;
}
if ((st.st_mode & S_IFMT) == S_IFLNK && !g.force && !g.pipeout) {
complain("%s is a symbolic link -- skipping", g.inf);
return;
}
if ((st.st_mode & S_IFMT) == S_IFDIR && !g.recurse) {
complain("%s is a directory -- skipping", g.inf);
return;
}
/* recurse into directory (assumes Unix) */
if ((st.st_mode & S_IFMT) == S_IFDIR) {
char *roll, *item, *cut, *base, *bigger;
size_t len, hold;
DIR *here;
struct dirent *next;
/* accumulate list of entries (need to do this, since readdir()
behavior not defined if directory modified between calls) */
here = opendir(g.inf);
if (here == NULL)
return;
hold = 512;
roll = MALLOC(hold);
if (roll == NULL)
bail("not enough memory", "");
*roll = 0;
item = roll;
while ((next = readdir(here)) != NULL) {
if (next->d_name[0] == 0 ||
(next->d_name[0] == '.' && (next->d_name[1] == 0 ||
(next->d_name[1] == '.' && next->d_name[2] == 0))))
continue;
len = strlen(next->d_name) + 1;
if (item + len + 1 > roll + hold) {
do { /* make roll bigger */
hold <<= 1;
} while (item + len + 1 > roll + hold);
bigger = REALLOC(roll, hold);
if (bigger == NULL) {
FREE(roll);
bail("not enough memory", "");
}
item = bigger + (item - roll);
roll = bigger;
}
strcpy(item, next->d_name);
item += len;
*item = 0;
}
closedir(here);
/* run process() for each entry in the directory */
cut = base = g.inf + strlen(g.inf);
if (base > g.inf && base[-1] != (unsigned char)'/') {
if ((size_t)(base - g.inf) >= sizeof(g.inf))
bail("path too long", g.inf);
*base++ = '/';
}
item = roll;
while (*item) {
strncpy(base, item, sizeof(g.inf) - (base - g.inf));
if (g.inf[sizeof(g.inf) - 1]) {
strcpy(g.inf + (sizeof(g.inf) - 4), "...");
bail("path too long: ", g.inf);
}
process(g.inf);
item += strlen(item) + 1;
}
*cut = 0;
/* release list of entries */
FREE(roll);
return;
}
/* don't compress .gz (or provided suffix) files, unless -f */
if (!(g.force || g.list || g.decode) && len >= strlen(g.sufx) &&
strcmp(g.inf + len - strlen(g.sufx), g.sufx) == 0) {
complain("%s ends with %s -- skipping", g.inf, g.sufx);
return;
}
/* create output file only if input file has compressed suffix */
if (g.decode == 1 && !g.pipeout && !g.list) {
int suf = compressed_suffix(g.inf);
if (suf == 0) {
complain("%s does not have compressed suffix -- skipping",
g.inf);
return;
}
len -= suf;
}
/* open input file */
g.ind = open(g.inf, O_RDONLY, 0);
if (g.ind < 0)
bail("read error on ", g.inf);
/* prepare gzip header information for compression */
g.name = g.headis & 1 ? justname(g.inf) : NULL;
g.mtime = g.headis & 2 ? st.st_mtime : 0;
}
SET_BINARY_MODE(g.ind);
/* if decoding or testing, try to read gzip header */
g.hname = NULL;
if (g.decode) {
in_init();
method = get_header(1);
if (method != 8 && method != 257 &&
/* gzip -cdf acts like cat on uncompressed input */
!(method == -2 && g.force && g.pipeout && g.decode != 2 &&
!g.list)) {
RELEASE(g.hname);
if (g.ind != 0)
close(g.ind);
if (method != -1)
complain(method < 0 ? "%s is not compressed -- skipping" :
"%s has unknown compression method -- skipping",
g.inf);
return;
}
/* if requested, test input file (possibly a special list) */
if (g.decode == 2) {
if (method == 8)
infchk();
else {
unlzw();
if (g.list) {
g.in_tot -= 3;
show_info(method, 0, g.out_tot, 0);
}
}
RELEASE(g.hname);
if (g.ind != 0)
close(g.ind);
return;
}
}
/* if requested, just list information about input file */
if (g.list) {
list_info();
RELEASE(g.hname);
if (g.ind != 0)
close(g.ind);
return;
}
/* create output file out, descriptor outd */
if (path == NULL || g.pipeout) {
/* write to stdout */
g.outf = MALLOC(strlen("<stdout>") + 1);
if (g.outf == NULL)
bail("not enough memory", "");
strcpy(g.outf, "<stdout>");
g.outd = 1;
if (!g.decode && !g.force && isatty(g.outd))
bail("trying to write compressed data to a terminal",
" (use -f to force)");
}
else {
char *to, *repl;
/* use header name for output when decompressing with -N */
to = g.inf;
if (g.decode && (g.headis & 1) != 0 && g.hname != NULL) {
to = g.hname;
len = strlen(g.hname);
}
/* replace .tgz with .tar when decoding */
repl = g.decode && strcmp(to + len, ".tgz") ? "" : ".tar";
/* create output file and open to write */
g.outf = MALLOC(len + (g.decode ? strlen(repl) : strlen(g.sufx)) + 1);
if (g.outf == NULL)
bail("not enough memory", "");
memcpy(g.outf, to, len);
strcpy(g.outf + len, g.decode ? repl : g.sufx);
g.outd = open(g.outf, O_CREAT | O_TRUNC | O_WRONLY |
(g.force ? 0 : O_EXCL), 0600);
/* if exists and not -f, give user a chance to overwrite */
if (g.outd < 0 && errno == EEXIST && isatty(0) && g.verbosity) {
int ch, reply;
fprintf(stderr, "%s exists -- overwrite (y/n)? ", g.outf);
fflush(stderr);
reply = -1;
do {
ch = getchar();
if (reply < 0 && ch != ' ' && ch != '\t')
reply = ch == 'y' || ch == 'Y' ? 1 : 0;
} while (ch != EOF && ch != '\n' && ch != '\r');
if (reply == 1)
g.outd = open(g.outf, O_CREAT | O_TRUNC | O_WRONLY,
0600);
}
/* if exists and no overwrite, report and go on to next */
if (g.outd < 0 && errno == EEXIST) {
complain("%s exists -- skipping", g.outf);
RELEASE(g.outf);
RELEASE(g.hname);
if (g.ind != 0)
close(g.ind);
return;
}
/* if some other error, give up */
if (g.outd < 0)
bail("write error on ", g.outf);
}
SET_BINARY_MODE(g.outd);
RELEASE(g.hname);
/* process ind to outd */
if (g.verbosity > 1)
fprintf(stderr, "%s to %s ", g.inf, g.outf);
if (g.decode) {
if (method == 8)
infchk();
else if (method == 257)
unlzw();
else
cat();
}
#ifndef NOTHREAD
else if (g.procs > 1)
parallel_compress();
#endif
else
single_compress(0);
if (g.verbosity > 1) {
putc('\n', stderr);
fflush(stderr);
}
/* finish up, copy attributes, set times, delete original */
if (g.ind != 0)
close(g.ind);
if (g.outd != 1) {
if (close(g.outd))
bail("write error on ", g.outf);
g.outd = -1; /* now prevent deletion on interrupt */
if (g.ind != 0) {
copymeta(g.inf, g.outf);
if (!g.keep)
unlink(g.inf);
}
if (g.decode && (g.headis & 2) != 0 && g.stamp)
touch(g.outf, g.stamp);
}
RELEASE(g.outf);
}
|
CWE-22
| 179,899 | 9,306 |
211054325335456133360070456910075205460
| null | null | null |
linux
|
0f2af21aae11972fa924374ddcf52e88347cf5a8
| 1 |
static long ext4_zero_range(struct file *file, loff_t offset,
loff_t len, int mode)
{
struct inode *inode = file_inode(file);
handle_t *handle = NULL;
unsigned int max_blocks;
loff_t new_size = 0;
int ret = 0;
int flags;
int credits;
int partial_begin, partial_end;
loff_t start, end;
ext4_lblk_t lblk;
struct address_space *mapping = inode->i_mapping;
unsigned int blkbits = inode->i_blkbits;
trace_ext4_zero_range(inode, offset, len, mode);
if (!S_ISREG(inode->i_mode))
return -EINVAL;
/* Call ext4_force_commit to flush all data in case of data=journal. */
if (ext4_should_journal_data(inode)) {
ret = ext4_force_commit(inode->i_sb);
if (ret)
return ret;
}
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
*/
if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + len - 1);
if (ret)
return ret;
}
/*
* Round up offset. This is not fallocate, we neet to zero out
* blocks, so convert interior block aligned part of the range to
* unwritten and possibly manually zero out unaligned parts of the
* range.
*/
start = round_up(offset, 1 << blkbits);
end = round_down((offset + len), 1 << blkbits);
if (start < offset || end > offset + len)
return -EINVAL;
partial_begin = offset & ((1 << blkbits) - 1);
partial_end = (offset + len) & ((1 << blkbits) - 1);
lblk = start >> blkbits;
max_blocks = (end >> blkbits);
if (max_blocks < lblk)
max_blocks = 0;
else
max_blocks -= lblk;
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
EXT4_EX_NOCACHE;
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
mutex_lock(&inode->i_mutex);
/*
* Indirect files do not support unwritten extnets
*/
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
ret = -EOPNOTSUPP;
goto out_mutex;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
if (ret)
goto out_mutex;
/*
* If we have a partial block after EOF we have to allocate
* the entire block.
*/
if (partial_end)
max_blocks += 1;
}
if (max_blocks > 0) {
/* Now release the pages and zero block aligned part of pages*/
truncate_pagecache_range(inode, start, end - 1);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
/* Wait all existing dio workers, newcomers will block on i_mutex */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags, mode);
if (ret)
goto out_dio;
/*
* Remove entire range from the extent status tree.
*
* ext4_es_remove_extent(inode, lblk, max_blocks) is
* NOT sufficient. I'm not sure why this is the case,
* but let's be conservative and remove the extent
* status tree for the entire inode. There should be
* no outstanding delalloc extents thanks to the
* filemap_write_and_wait_range() call above.
*/
ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
if (ret)
goto out_dio;
}
if (!partial_begin && !partial_end)
goto out_dio;
/*
* In worst case we have to writeout two nonadjacent unwritten
* blocks and update the inode
*/
credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
if (ext4_should_journal_data(inode))
credits += 2;
handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(inode->i_sb, ret);
goto out_dio;
}
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
if (new_size) {
ext4_update_inode_size(inode, new_size);
} else {
/*
* Mark that we allocate beyond EOF so the subsequent truncate
* can proceed even if the new size is the same as i_size.
*/
if ((offset + len) > i_size_read(inode))
ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
}
ext4_mark_inode_dirty(handle, inode);
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
ext4_journal_stop(handle);
out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
}
|
CWE-17
| 179,901 | 9,307 |
140250258899090144555494658145477985759
| null | null | null |
libsndfile
|
725c7dbb95bfaf8b4bb7b04820e3a00cceea9ce6
| 1 |
psf_fwrite (const void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
if (psf->virtual_io)
return psf->vio.write (ptr, bytes*items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the writes down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : items ;
count = write (psf->file.filedes, ((const char*) ptr) + total, count) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
if (psf->is_pipe)
psf->pipeoffset += total ;
return total / bytes ;
} /* psf_fwrite */
|
CWE-189
| 179,926 | 9,310 |
168840506501797735728922334619912865145
| null | null | null |
linux
|
e159332b9af4b04d882dbcfe1bb0117f0a6d4b58
| 1 |
static int udf_read_inode(struct inode *inode, bool hidden_inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
struct extendedFileEntry *efe;
uint16_t ident;
struct udf_inode_info *iinfo = UDF_I(inode);
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
struct kernel_lb_addr *iloc = &iinfo->i_location;
unsigned int link_count;
unsigned int indirections = 0;
int ret = -EIO;
reread:
if (iloc->logicalBlockNum >=
sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
udf_debug("block=%d, partition=%d out of range\n",
iloc->logicalBlockNum, iloc->partitionReferenceNum);
return -EIO;
}
/*
* Set defaults, but the inode is still incomplete!
* Note: get_new_inode() sets the following on a new inode:
* i_sb = sb
* i_no = ino
* i_flags = sb->s_flags
* i_state = 0
* clean_inode(): zero fills and sets
* i_count = 1
* i_nlink = 1
* i_op = NULL;
*/
bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
if (!bh) {
udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
return -EIO;
}
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
ident != TAG_IDENT_USE) {
udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
inode->i_ino, ident);
goto out;
}
fe = (struct fileEntry *)bh->b_data;
efe = (struct extendedFileEntry *)bh->b_data;
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
struct buffer_head *ibh;
ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
if (ident == TAG_IDENT_IE && ibh) {
struct kernel_lb_addr loc;
struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
if (ie->indirectICB.extLength) {
brelse(ibh);
memcpy(&iinfo->i_location, &loc,
sizeof(struct kernel_lb_addr));
if (++indirections > UDF_MAX_ICB_NESTING) {
udf_err(inode->i_sb,
"too many ICBs in ICB hierarchy"
" (max %d supported)\n",
UDF_MAX_ICB_NESTING);
goto out;
}
brelse(bh);
goto reread;
}
}
brelse(ibh);
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
udf_err(inode->i_sb, "unsupported strategy type: %d\n",
le16_to_cpu(fe->icbTag.strategyType));
goto out;
}
if (fe->icbTag.strategyType == cpu_to_le16(4))
iinfo->i_strat4096 = 0;
else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
iinfo->i_strat4096 = 1;
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
ICBTAG_FLAG_AD_MASK;
iinfo->i_unique = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenExtents = 0;
iinfo->i_lenAlloc = 0;
iinfo->i_next_alloc_block = 0;
iinfo->i_next_alloc_goal = 0;
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
iinfo->i_efe = 1;
iinfo->i_use = 0;
ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
sizeof(struct extendedFileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct extendedFileEntry),
inode->i_sb->s_blocksize -
sizeof(struct extendedFileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
iinfo->i_efe = 0;
iinfo->i_use = 0;
ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
sizeof(struct fileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct fileEntry),
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
iinfo->i_efe = 0;
iinfo->i_use = 1;
iinfo->i_lenAlloc = le32_to_cpu(
((struct unallocSpaceEntry *)bh->b_data)->
lengthAllocDescs);
ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct unallocSpaceEntry),
inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry));
return 0;
}
ret = -EIO;
read_lock(&sbi->s_cred_lock);
i_uid_write(inode, le32_to_cpu(fe->uid));
if (!uid_valid(inode->i_uid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
i_gid_write(inode, le32_to_cpu(fe->gid));
if (!gid_valid(inode->i_gid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_fmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_fmode;
else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_dmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_dmode;
else
inode->i_mode = udf_convert_permissions(fe);
inode->i_mode &= ~sbi->s_umask;
read_unlock(&sbi->s_cred_lock);
link_count = le16_to_cpu(fe->fileLinkCount);
if (!link_count) {
if (!hidden_inode) {
ret = -ESTALE;
goto out;
}
link_count = 1;
}
set_nlink(inode, link_count);
inode->i_size = le64_to_cpu(fe->informationLength);
iinfo->i_lenExtents = inode->i_size;
if (iinfo->i_efe == 0) {
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
fe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
} else {
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
efe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
iinfo->i_crtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
}
inode->i_generation = iinfo->i_unique;
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
inode->i_op = &udf_dir_inode_operations;
inode->i_fop = &udf_dir_operations;
inode->i_mode |= S_IFDIR;
inc_nlink(inode);
break;
case ICBTAG_FILE_TYPE_REALTIME:
case ICBTAG_FILE_TYPE_REGULAR:
case ICBTAG_FILE_TYPE_UNDEF:
case ICBTAG_FILE_TYPE_VAT20:
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
inode->i_mode |= S_IFREG;
break;
case ICBTAG_FILE_TYPE_BLOCK:
inode->i_mode |= S_IFBLK;
break;
case ICBTAG_FILE_TYPE_CHAR:
inode->i_mode |= S_IFCHR;
break;
case ICBTAG_FILE_TYPE_FIFO:
init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
break;
case ICBTAG_FILE_TYPE_SOCKET:
init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
break;
case ICBTAG_FILE_TYPE_SYMLINK:
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &udf_symlink_inode_operations;
inode->i_mode = S_IFLNK | S_IRWXUGO;
break;
case ICBTAG_FILE_TYPE_MAIN:
udf_debug("METADATA FILE-----\n");
break;
case ICBTAG_FILE_TYPE_MIRROR:
udf_debug("METADATA MIRROR FILE-----\n");
break;
case ICBTAG_FILE_TYPE_BITMAP:
udf_debug("METADATA BITMAP FILE-----\n");
break;
default:
udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
inode->i_ino, fe->icbTag.fileType);
goto out;
}
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
struct deviceSpec *dsea =
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
if (dsea) {
init_special_inode(inode, inode->i_mode,
MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
le32_to_cpu(dsea->minorDeviceIdent)));
/* Developer ID ??? */
} else
goto out;
}
ret = 0;
out:
brelse(bh);
return ret;
}
| 179,934 | 9,312 |
164719948166104942504296077265701012218
| null | null | null |
|
linux
|
5b6698b0e4a37053de35cc24ee695b98a7eb712b
| 1 |
batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
{
struct batadv_frag_packet *packet;
struct batadv_frag_list_entry *entry;
struct sk_buff *skb_out = NULL;
int size, hdr_size = sizeof(struct batadv_frag_packet);
/* Make sure incoming skb has non-bogus data. */
packet = (struct batadv_frag_packet *)skb->data;
size = ntohs(packet->total_size);
if (size > batadv_frag_size_limit())
goto free;
/* Remove first entry, as this is the destination for the rest of the
* fragments.
*/
entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
hlist_del(&entry->list);
skb_out = entry->skb;
kfree(entry);
/* Make room for the rest of the fragments. */
if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
kfree_skb(skb_out);
skb_out = NULL;
goto free;
}
/* Move the existing MAC header to just before the payload. (Override
* the fragment header.)
*/
skb_pull_rcsum(skb_out, hdr_size);
memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
skb_set_mac_header(skb_out, -ETH_HLEN);
skb_reset_network_header(skb_out);
skb_reset_transport_header(skb_out);
/* Copy the payload of the each fragment into the last skb */
hlist_for_each_entry(entry, chain, list) {
size = entry->skb->len - hdr_size;
memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
size);
}
free:
/* Locking is not needed, because 'chain' is not part of any orig. */
batadv_frag_clear_chain(chain);
return skb_out;
}
|
CWE-399
| 179,958 | 9,314 |
327120903356038727852614033936872101627
| null | null | null |
krb5
|
5bb8a6b9c9eb8dd22bc9526751610aaa255ead9c
| 1 |
svcauth_gss_accept_sec_context(struct svc_req *rqst,
struct rpc_gss_init_res *gr)
{
struct svc_rpc_gss_data *gd;
struct rpc_gss_cred *gc;
gss_buffer_desc recv_tok, seqbuf;
gss_OID mech;
OM_uint32 maj_stat = 0, min_stat = 0, ret_flags, seq;
log_debug("in svcauth_gss_accept_context()");
gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth);
gc = (struct rpc_gss_cred *)rqst->rq_clntcred;
memset(gr, 0, sizeof(*gr));
/* Deserialize arguments. */
memset(&recv_tok, 0, sizeof(recv_tok));
if (!svc_getargs(rqst->rq_xprt, xdr_rpc_gss_init_args,
(caddr_t)&recv_tok))
return (FALSE);
gr->gr_major = gss_accept_sec_context(&gr->gr_minor,
&gd->ctx,
svcauth_gss_creds,
&recv_tok,
GSS_C_NO_CHANNEL_BINDINGS,
&gd->client_name,
&mech,
&gr->gr_token,
&ret_flags,
NULL,
NULL);
svc_freeargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok);
log_status("accept_sec_context", gr->gr_major, gr->gr_minor);
if (gr->gr_major != GSS_S_COMPLETE &&
gr->gr_major != GSS_S_CONTINUE_NEEDED) {
badauth(gr->gr_major, gr->gr_minor, rqst->rq_xprt);
gd->ctx = GSS_C_NO_CONTEXT;
goto errout;
}
/*
* ANDROS: krb5 mechglue returns ctx of size 8 - two pointers,
* one to the mechanism oid, one to the internal_ctx_id
*/
if ((gr->gr_ctx.value = mem_alloc(sizeof(gss_union_ctx_id_desc))) == NULL) {
fprintf(stderr, "svcauth_gss_accept_context: out of memory\n");
goto errout;
}
memcpy(gr->gr_ctx.value, gd->ctx, sizeof(gss_union_ctx_id_desc));
gr->gr_ctx.length = sizeof(gss_union_ctx_id_desc);
/* gr->gr_win = 0x00000005; ANDROS: for debugging linux kernel version... */
gr->gr_win = sizeof(gd->seqmask) * 8;
/* Save client info. */
gd->sec.mech = mech;
gd->sec.qop = GSS_C_QOP_DEFAULT;
gd->sec.svc = gc->gc_svc;
gd->seq = gc->gc_seq;
gd->win = gr->gr_win;
if (gr->gr_major == GSS_S_COMPLETE) {
#ifdef SPKM
/* spkm3: no src_name (anonymous) */
if(!g_OID_equal(gss_mech_spkm3, mech)) {
#endif
maj_stat = gss_display_name(&min_stat, gd->client_name,
&gd->cname, &gd->sec.mech);
#ifdef SPKM
}
#endif
if (maj_stat != GSS_S_COMPLETE) {
log_status("display_name", maj_stat, min_stat);
goto errout;
}
#ifdef DEBUG
#ifdef HAVE_HEIMDAL
log_debug("accepted context for %.*s with "
"<mech {}, qop %d, svc %d>",
gd->cname.length, (char *)gd->cname.value,
gd->sec.qop, gd->sec.svc);
#else
{
gss_buffer_desc mechname;
gss_oid_to_str(&min_stat, mech, &mechname);
log_debug("accepted context for %.*s with "
"<mech %.*s, qop %d, svc %d>",
gd->cname.length, (char *)gd->cname.value,
mechname.length, (char *)mechname.value,
gd->sec.qop, gd->sec.svc);
gss_release_buffer(&min_stat, &mechname);
}
#endif
#endif /* DEBUG */
seq = htonl(gr->gr_win);
seqbuf.value = &seq;
seqbuf.length = sizeof(seq);
gss_release_buffer(&min_stat, &gd->checksum);
maj_stat = gss_sign(&min_stat, gd->ctx, GSS_C_QOP_DEFAULT,
&seqbuf, &gd->checksum);
if (maj_stat != GSS_S_COMPLETE) {
goto errout;
}
rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS;
rqst->rq_xprt->xp_verf.oa_base = gd->checksum.value;
rqst->rq_xprt->xp_verf.oa_length = gd->checksum.length;
}
return (TRUE);
errout:
gss_release_buffer(&min_stat, &gr->gr_token);
return (FALSE);
}
|
CWE-200
| 179,960 | 9,315 |
160165331551633579061756493474183060797
| null | null | null |
krb5
|
6609658db0799053fbef0d7d0aa2f1fd68ef32d8
| 1 |
check_rpcsec_auth(struct svc_req *rqstp)
{
gss_ctx_id_t ctx;
krb5_context kctx;
OM_uint32 maj_stat, min_stat;
gss_name_t name;
krb5_principal princ;
int ret, success;
krb5_data *c1, *c2, *realm;
gss_buffer_desc gss_str;
kadm5_server_handle_t handle;
size_t slen;
char *sdots;
success = 0;
handle = (kadm5_server_handle_t)global_server_handle;
if (rqstp->rq_cred.oa_flavor != RPCSEC_GSS)
return 0;
ctx = rqstp->rq_svccred;
maj_stat = gss_inquire_context(&min_stat, ctx, NULL, &name,
NULL, NULL, NULL, NULL, NULL);
if (maj_stat != GSS_S_COMPLETE) {
krb5_klog_syslog(LOG_ERR, _("check_rpcsec_auth: failed "
"inquire_context, stat=%u"), maj_stat);
log_badauth(maj_stat, min_stat, rqstp->rq_xprt, NULL);
goto fail_name;
}
kctx = handle->context;
ret = gss_to_krb5_name_1(rqstp, kctx, name, &princ, &gss_str);
if (ret == 0)
goto fail_name;
slen = gss_str.length;
trunc_name(&slen, &sdots);
/*
* Since we accept with GSS_C_NO_NAME, the client can authenticate
* against the entire kdb. Therefore, ensure that the service
* name is something reasonable.
*/
if (krb5_princ_size(kctx, princ) != 2)
goto fail_princ;
c1 = krb5_princ_component(kctx, princ, 0);
c2 = krb5_princ_component(kctx, princ, 1);
realm = krb5_princ_realm(kctx, princ);
if (strncmp(handle->params.realm, realm->data, realm->length) == 0
&& strncmp("kadmin", c1->data, c1->length) == 0) {
if (strncmp("history", c2->data, c2->length) == 0)
goto fail_princ;
else
success = 1;
}
fail_princ:
if (!success) {
krb5_klog_syslog(LOG_ERR, _("bad service principal %.*s%s"),
(int) slen, (char *) gss_str.value, sdots);
}
gss_release_buffer(&min_stat, &gss_str);
krb5_free_principal(kctx, princ);
fail_name:
gss_release_name(&min_stat, &name);
return success;
}
|
CWE-284
| 179,961 | 9,316 |
94969864754992495289605771427087841140
| null | null | null |
openssl
|
cb62ab4b17818fe66d2fed0a7fe71969131c811b
| 1 |
int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a,
ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey)
{
EVP_MD_CTX ctx;
unsigned char *buf_in=NULL;
int ret= -1,inl;
int mdnid, pknid;
if (!pkey)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY, ERR_R_PASSED_NULL_PARAMETER);
return -1;
}
if (signature->type == V_ASN1_BIT_STRING && signature->flags & 0x7)
{
ASN1err(ASN1_F_ASN1_VERIFY, ASN1_R_INVALID_BIT_STRING_BITS_LEFT);
return -1;
}
EVP_MD_CTX_init(&ctx);
/* Convert signature OID into digest and public key OIDs */
if (!OBJ_find_sigid_algs(OBJ_obj2nid(a->algorithm), &mdnid, &pknid))
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM);
goto err;
}
if (mdnid == NID_undef)
{
if (!pkey->ameth || !pkey->ameth->item_verify)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM);
goto err;
}
ret = pkey->ameth->item_verify(&ctx, it, asn, a,
signature, pkey);
/* Return value of 2 means carry on, anything else means we
* exit straight away: either a fatal error of the underlying
* verification routine handles all verification.
*/
if (ret != 2)
goto err;
ret = -1;
}
else
{
const EVP_MD *type;
type=EVP_get_digestbynid(mdnid);
if (type == NULL)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM);
goto err;
}
/* Check public key OID matches public key type */
if (EVP_PKEY_type(pknid) != pkey->ameth->pkey_id)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_WRONG_PUBLIC_KEY_TYPE);
goto err;
}
if (!EVP_DigestVerifyInit(&ctx, NULL, type, NULL, pkey))
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB);
ret=0;
goto err;
}
}
inl = ASN1_item_i2d(asn, &buf_in, it);
if (buf_in == NULL)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_MALLOC_FAILURE);
goto err;
}
ret = EVP_DigestVerifyUpdate(&ctx,buf_in,inl);
OPENSSL_cleanse(buf_in,(unsigned int)inl);
OPENSSL_free(buf_in);
if (!ret)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB);
goto err;
}
ret = -1;
if (EVP_DigestVerifyFinal(&ctx,signature->data,
(size_t)signature->length) <= 0)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB);
ret=0;
goto err;
}
/* we don't need to zero the 'ctx' because we just checked
* public information */
/* memset(&ctx,0,sizeof(ctx)); */
ret=1;
err:
EVP_MD_CTX_cleanup(&ctx);
return(ret);
}
|
CWE-310
| 179,965 | 9,317 |
82007499822031695969871000045432232205
| null | null | null |
krb5
|
102bb6ebf20f9174130c85c3b052ae104e5073ec
| 1 |
recvauth_common(krb5_context context,
krb5_auth_context * auth_context,
/* IN */
krb5_pointer fd,
char *appl_version,
krb5_principal server,
krb5_int32 flags,
krb5_keytab keytab,
/* OUT */
krb5_ticket ** ticket,
krb5_data *version)
{
krb5_auth_context new_auth_context;
krb5_flags ap_option = 0;
krb5_error_code retval, problem;
krb5_data inbuf;
krb5_data outbuf;
krb5_rcache rcache = 0;
krb5_octet response;
krb5_data null_server;
int need_error_free = 0;
int local_rcache = 0, local_authcon = 0;
/*
* Zero out problem variable. If problem is set at the end of
* the intial version negotiation section, it means that we
* need to send an error code back to the client application
* and exit.
*/
problem = 0;
response = 0;
if (!(flags & KRB5_RECVAUTH_SKIP_VERSION)) {
/*
* First read the sendauth version string and check it.
*/
if ((retval = krb5_read_message(context, fd, &inbuf)))
return(retval);
if (strcmp(inbuf.data, sendauth_version)) {
problem = KRB5_SENDAUTH_BADAUTHVERS;
response = 1;
}
free(inbuf.data);
}
if (flags & KRB5_RECVAUTH_BADAUTHVERS) {
problem = KRB5_SENDAUTH_BADAUTHVERS;
response = 1;
}
/*
* Do the same thing for the application version string.
*/
if ((retval = krb5_read_message(context, fd, &inbuf)))
return(retval);
if (appl_version && strcmp(inbuf.data, appl_version)) {
if (!problem) {
problem = KRB5_SENDAUTH_BADAPPLVERS;
response = 2;
}
}
if (version && !problem)
*version = inbuf;
else
free(inbuf.data);
/*
* Now we actually write the response. If the response is non-zero,
* exit with a return value of problem
*/
if ((krb5_net_write(context, *((int *)fd), (char *)&response, 1)) < 0) {
return(problem); /* We'll return the top-level problem */
}
if (problem)
return(problem);
/* We are clear of errors here */
/*
* Now, let's read the AP_REQ message and decode it
*/
if ((retval = krb5_read_message(context, fd, &inbuf)))
return retval;
if (*auth_context == NULL) {
problem = krb5_auth_con_init(context, &new_auth_context);
*auth_context = new_auth_context;
local_authcon = 1;
}
krb5_auth_con_getrcache(context, *auth_context, &rcache);
if ((!problem) && rcache == NULL) {
/*
* Setup the replay cache.
*/
if (server != NULL && server->length > 0) {
problem = krb5_get_server_rcache(context, &server->data[0],
&rcache);
} else {
null_server.length = 7;
null_server.data = "default";
problem = krb5_get_server_rcache(context, &null_server, &rcache);
}
if (!problem)
problem = krb5_auth_con_setrcache(context, *auth_context, rcache);
local_rcache = 1;
}
if (!problem) {
problem = krb5_rd_req(context, auth_context, &inbuf, server,
keytab, &ap_option, ticket);
free(inbuf.data);
}
/*
* If there was a problem, send back a krb5_error message,
* preceeded by the length of the krb5_error message. If
* everything's ok, send back 0 for the length.
*/
if (problem) {
krb5_error error;
const char *message;
memset(&error, 0, sizeof(error));
krb5_us_timeofday(context, &error.stime, &error.susec);
if(server)
error.server = server;
else {
/* If this fails - ie. ENOMEM we are hosed
we cannot even send the error if we wanted to... */
(void) krb5_parse_name(context, "????", &error.server);
need_error_free = 1;
}
error.error = problem - ERROR_TABLE_BASE_krb5;
if (error.error > 127)
error.error = KRB_ERR_GENERIC;
message = error_message(problem);
error.text.length = strlen(message) + 1;
error.text.data = strdup(message);
if (!error.text.data) {
retval = ENOMEM;
goto cleanup;
}
if ((retval = krb5_mk_error(context, &error, &outbuf))) {
free(error.text.data);
goto cleanup;
}
free(error.text.data);
if(need_error_free)
krb5_free_principal(context, error.server);
} else {
outbuf.length = 0;
outbuf.data = 0;
}
retval = krb5_write_message(context, fd, &outbuf);
if (outbuf.data) {
free(outbuf.data);
/* We sent back an error, we need cleanup then return */
retval = problem;
goto cleanup;
}
if (retval)
goto cleanup;
/* Here lies the mutual authentication stuff... */
if ((ap_option & AP_OPTS_MUTUAL_REQUIRED)) {
if ((retval = krb5_mk_rep(context, *auth_context, &outbuf))) {
return(retval);
}
retval = krb5_write_message(context, fd, &outbuf);
free(outbuf.data);
}
cleanup:;
if (retval) {
if (local_authcon) {
krb5_auth_con_free(context, *auth_context);
} else if (local_rcache && rcache != NULL) {
krb5_rc_close(context, rcache);
krb5_auth_con_setrcache(context, *auth_context, NULL);
}
}
return retval;
}
| 179,984 | 9,318 |
146562876660269345333958311093572073915
| null | null | null |
|
krb5
|
82dc33da50338ac84c7b4102dc6513d897d0506a
| 1 |
krb5_gss_process_context_token(minor_status, context_handle,
token_buffer)
OM_uint32 *minor_status;
gss_ctx_id_t context_handle;
gss_buffer_t token_buffer;
{
krb5_gss_ctx_id_rec *ctx;
OM_uint32 majerr;
ctx = (krb5_gss_ctx_id_t) context_handle;
if (! ctx->established) {
*minor_status = KG_CTX_INCOMPLETE;
return(GSS_S_NO_CONTEXT);
}
/* "unseal" the token */
if (GSS_ERROR(majerr = kg_unseal(minor_status, context_handle,
token_buffer,
GSS_C_NO_BUFFER, NULL, NULL,
KG_TOK_DEL_CTX)))
return(majerr);
/* that's it. delete the context */
return(krb5_gss_delete_sec_context(minor_status, &context_handle,
GSS_C_NO_BUFFER));
}
| 179,995 | 9,322 |
158561950850829051905687736642604639992
| null | null | null |
|
linux
|
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
| 1 |
struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
if (!name)
return ERR_PTR(-ENOENT);
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
type &= mask;
alg = crypto_alg_lookup(name, type, mask);
if (!alg) {
request_module("%s", name);
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
CRYPTO_ALG_NEED_FALLBACK))
request_module("%s-all", name);
alg = crypto_alg_lookup(name, type, mask);
}
if (alg)
return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
return crypto_larval_add(name, type, mask);
}
|
CWE-264
| 180,011 | 9,325 |
324790015319107212356521045039275593516
| null | null | null |
linux
|
bf911e985d6bbaa328c20c3e05f4eb03de11fdd6
| 1 |
sctp_disposition_t sctp_sf_ootb(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sk_buff *skb = chunk->skb;
sctp_chunkhdr_t *ch;
sctp_errhdr_t *err;
__u8 *ch_end;
int ootb_shut_ack = 0;
int ootb_cookie_ack = 0;
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
do {
/* Report violation if the chunk is less then minimal */
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Now that we know we at least have a chunk header,
* do things that are type appropriate.
*/
if (SCTP_CID_SHUTDOWN_ACK == ch->type)
ootb_shut_ack = 1;
/* RFC 2960, Section 3.3.7
* Moreover, under any circumstances, an endpoint that
* receives an ABORT MUST NOT respond to that ABORT by
* sending an ABORT of its own.
*/
if (SCTP_CID_ABORT == ch->type)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
* or a COOKIE ACK the SCTP Packet should be silently
* discarded.
*/
if (SCTP_CID_COOKIE_ACK == ch->type)
ootb_cookie_ack = 1;
if (SCTP_CID_ERROR == ch->type) {
sctp_walk_errors(err, ch) {
if (SCTP_ERROR_STALE_COOKIE == err->cause) {
ootb_cookie_ack = 1;
break;
}
}
}
/* Report violation if chunk len overflows */
ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
if (ch_end > skb_tail_pointer(skb))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
if (ootb_shut_ack)
return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
else if (ootb_cookie_ack)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
else
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
}
|
CWE-125
| 180,033 | 9,326 |
324777000170628960274348698711000345943
| null | null | null |
libtiff
|
83a4b92815ea04969d494416eaae3d4c6b338e4a#diff-c8b4b355f9b5c06d585b23138e1c185f
| 1 |
TIFFFlushData1(TIFF* tif)
{
if (tif->tif_rawcc > 0 && tif->tif_flags & TIFF_BUF4WRITE ) {
if (!isFillOrder(tif, tif->tif_dir.td_fillorder) &&
(tif->tif_flags & TIFF_NOBITREV) == 0)
TIFFReverseBits((uint8*)tif->tif_rawdata,
tif->tif_rawcc);
if (!TIFFAppendToStrip(tif,
isTiled(tif) ? tif->tif_curtile : tif->tif_curstrip,
tif->tif_rawdata, tif->tif_rawcc))
return (0);
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
}
return (1);
}
|
CWE-787
| 180,043 | 9,328 |
323177524682187789382266817986861189816
| null | null | null |
linux
|
05692d7005a364add85c6e25a6c4447ce08f913a
| 1 |
static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
int ret;
if (!is_irq_none(vdev))
return -EINVAL;
vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
if (!vdev->ctx)
return -ENOMEM;
/* return the number of supported vectors if we can't get all: */
ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
if (ret < nvec) {
if (ret > 0)
pci_free_irq_vectors(pdev);
kfree(vdev->ctx);
return ret;
}
vdev->num_ctx = nvec;
vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
VFIO_PCI_MSI_IRQ_INDEX;
if (!msix) {
/*
* Compute the virtual hardware field for max msi vectors -
* it is the log base 2 of the number of vectors.
*/
vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
}
return 0;
}
|
CWE-190
| 180,073 | 9,330 |
265085938067485270518338012291794975502
| null | null | null |
linux
|
f5527fffff3f002b0a6b376163613b82f69de073
| 1 |
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
{
mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL;
mpi_ptr_t xp_marker = NULL;
mpi_ptr_t tspace = NULL;
mpi_ptr_t rp, ep, mp, bp;
mpi_size_t esize, msize, bsize, rsize;
int esign, msign, bsign, rsign;
mpi_size_t size;
int mod_shift_cnt;
int negative_result;
int assign_rp = 0;
mpi_size_t tsize = 0; /* to avoid compiler warning */
/* fixme: we should check that the warning is void */
int rc = -ENOMEM;
esize = exp->nlimbs;
msize = mod->nlimbs;
size = 2 * msize;
esign = exp->sign;
msign = mod->sign;
rp = res->d;
ep = exp->d;
if (!msize)
return -EINVAL;
if (!esize) {
/* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
* depending on if MOD equals 1. */
rp[0] = 1;
res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
res->sign = 0;
goto leave;
}
/* Normalize MOD (i.e. make its most significant bit set) as required by
* mpn_divrem. This will make the intermediate values in the calculation
* slightly larger, but the correct result is obtained after a final
* reduction using the original MOD value. */
mp = mp_marker = mpi_alloc_limb_space(msize);
if (!mp)
goto enomem;
mod_shift_cnt = count_leading_zeros(mod->d[msize - 1]);
if (mod_shift_cnt)
mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt);
else
MPN_COPY(mp, mod->d, msize);
bsize = base->nlimbs;
bsign = base->sign;
if (bsize > msize) { /* The base is larger than the module. Reduce it. */
/* Allocate (BSIZE + 1) with space for remainder and quotient.
* (The quotient is (bsize - msize + 1) limbs.) */
bp = bp_marker = mpi_alloc_limb_space(bsize + 1);
if (!bp)
goto enomem;
MPN_COPY(bp, base->d, bsize);
/* We don't care about the quotient, store it above the remainder,
* at BP + MSIZE. */
mpihelp_divrem(bp + msize, 0, bp, bsize, mp, msize);
bsize = msize;
/* Canonicalize the base, since we are going to multiply with it
* quite a few times. */
MPN_NORMALIZE(bp, bsize);
} else
bp = base->d;
if (!bsize) {
res->nlimbs = 0;
res->sign = 0;
goto leave;
}
if (res->alloced < size) {
/* We have to allocate more space for RES. If any of the input
* parameters are identical to RES, defer deallocation of the old
* space. */
if (rp == ep || rp == mp || rp == bp) {
rp = mpi_alloc_limb_space(size);
if (!rp)
goto enomem;
assign_rp = 1;
} else {
if (mpi_resize(res, size) < 0)
goto enomem;
rp = res->d;
}
} else { /* Make BASE, EXP and MOD not overlap with RES. */
if (rp == bp) {
/* RES and BASE are identical. Allocate temp. space for BASE. */
BUG_ON(bp_marker);
bp = bp_marker = mpi_alloc_limb_space(bsize);
if (!bp)
goto enomem;
MPN_COPY(bp, rp, bsize);
}
if (rp == ep) {
/* RES and EXP are identical. Allocate temp. space for EXP. */
ep = ep_marker = mpi_alloc_limb_space(esize);
if (!ep)
goto enomem;
MPN_COPY(ep, rp, esize);
}
if (rp == mp) {
/* RES and MOD are identical. Allocate temporary space for MOD. */
BUG_ON(mp_marker);
mp = mp_marker = mpi_alloc_limb_space(msize);
if (!mp)
goto enomem;
MPN_COPY(mp, rp, msize);
}
}
MPN_COPY(rp, bp, bsize);
rsize = bsize;
rsign = bsign;
{
mpi_size_t i;
mpi_ptr_t xp;
int c;
mpi_limb_t e;
mpi_limb_t carry_limb;
struct karatsuba_ctx karactx;
xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1));
if (!xp)
goto enomem;
memset(&karactx, 0, sizeof karactx);
negative_result = (ep[0] & 1) && base->sign;
i = esize - 1;
e = ep[i];
c = count_leading_zeros(e);
e = (e << c) << 1; /* shift the exp bits to the left, lose msb */
c = BITS_PER_MPI_LIMB - 1 - c;
/* Main loop.
*
* Make the result be pointed to alternately by XP and RP. This
* helps us avoid block copying, which would otherwise be necessary
* with the overlap restrictions of mpihelp_divmod. With 50% probability
* the result after this loop will be in the area originally pointed
* by RP (==RES->d), and with 50% probability in the area originally
* pointed to by XP.
*/
for (;;) {
while (c) {
mpi_ptr_t tp;
mpi_size_t xsize;
/*if (mpihelp_mul_n(xp, rp, rp, rsize) < 0) goto enomem */
if (rsize < KARATSUBA_THRESHOLD)
mpih_sqr_n_basecase(xp, rp, rsize);
else {
if (!tspace) {
tsize = 2 * rsize;
tspace =
mpi_alloc_limb_space(tsize);
if (!tspace)
goto enomem;
} else if (tsize < (2 * rsize)) {
mpi_free_limb_space(tspace);
tsize = 2 * rsize;
tspace =
mpi_alloc_limb_space(tsize);
if (!tspace)
goto enomem;
}
mpih_sqr_n(xp, rp, rsize, tspace);
}
xsize = 2 * rsize;
if (xsize > msize) {
mpihelp_divrem(xp + msize, 0, xp, xsize,
mp, msize);
xsize = msize;
}
tp = rp;
rp = xp;
xp = tp;
rsize = xsize;
if ((mpi_limb_signed_t) e < 0) {
/*mpihelp_mul( xp, rp, rsize, bp, bsize ); */
if (bsize < KARATSUBA_THRESHOLD) {
mpi_limb_t tmp;
if (mpihelp_mul
(xp, rp, rsize, bp, bsize,
&tmp) < 0)
goto enomem;
} else {
if (mpihelp_mul_karatsuba_case
(xp, rp, rsize, bp, bsize,
&karactx) < 0)
goto enomem;
}
xsize = rsize + bsize;
if (xsize > msize) {
mpihelp_divrem(xp + msize, 0,
xp, xsize, mp,
msize);
xsize = msize;
}
tp = rp;
rp = xp;
xp = tp;
rsize = xsize;
}
e <<= 1;
c--;
}
i--;
if (i < 0)
break;
e = ep[i];
c = BITS_PER_MPI_LIMB;
}
/* We shifted MOD, the modulo reduction argument, left MOD_SHIFT_CNT
* steps. Adjust the result by reducing it with the original MOD.
*
* Also make sure the result is put in RES->d (where it already
* might be, see above).
*/
if (mod_shift_cnt) {
carry_limb =
mpihelp_lshift(res->d, rp, rsize, mod_shift_cnt);
rp = res->d;
if (carry_limb) {
rp[rsize] = carry_limb;
rsize++;
}
} else {
MPN_COPY(res->d, rp, rsize);
rp = res->d;
}
if (rsize >= msize) {
mpihelp_divrem(rp + msize, 0, rp, rsize, mp, msize);
rsize = msize;
}
/* Remove any leading zero words from the result. */
if (mod_shift_cnt)
mpihelp_rshift(rp, rp, rsize, mod_shift_cnt);
MPN_NORMALIZE(rp, rsize);
mpihelp_release_karatsuba_ctx(&karactx);
}
if (negative_result && rsize) {
if (mod_shift_cnt)
mpihelp_rshift(mp, mp, msize, mod_shift_cnt);
mpihelp_sub(rp, mp, msize, rp, rsize);
rsize = msize;
rsign = msign;
MPN_NORMALIZE(rp, rsize);
}
res->nlimbs = rsize;
res->sign = rsign;
leave:
rc = 0;
enomem:
if (assign_rp)
mpi_assign_limb_space(res, rp, size);
if (mp_marker)
mpi_free_limb_space(mp_marker);
if (bp_marker)
mpi_free_limb_space(bp_marker);
if (ep_marker)
mpi_free_limb_space(ep_marker);
if (xp_marker)
mpi_free_limb_space(xp_marker);
if (tspace)
mpi_free_limb_space(tspace);
return rc;
}
|
CWE-20
| 180,083 | 9,332 |
52745785378836685527175977948100057226
| null | null | null |
linux
|
8148a73c9901a8794a50f950083c00ccf97d43b3
| 1 |
static ssize_t environ_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
char *page;
unsigned long src = *ppos;
int ret = 0;
struct mm_struct *mm = file->private_data;
unsigned long env_start, env_end;
if (!mm)
return 0;
page = (char *)__get_free_page(GFP_TEMPORARY);
if (!page)
return -ENOMEM;
ret = 0;
if (!atomic_inc_not_zero(&mm->mm_users))
goto free;
down_read(&mm->mmap_sem);
env_start = mm->env_start;
env_end = mm->env_end;
up_read(&mm->mmap_sem);
while (count > 0) {
size_t this_len, max_len;
int retval;
if (src >= (env_end - env_start))
break;
this_len = env_end - (env_start + src);
max_len = min_t(size_t, PAGE_SIZE, count);
this_len = min(max_len, this_len);
retval = access_remote_vm(mm, (env_start + src),
page, this_len, 0);
if (retval <= 0) {
ret = retval;
break;
}
if (copy_to_user(buf, page, retval)) {
ret = -EFAULT;
break;
}
ret += retval;
src += retval;
buf += retval;
count -= retval;
}
*ppos = src;
mmput(mm);
free:
free_page((unsigned long) page);
return ret;
}
|
CWE-362
| 180,092 | 9,333 |
154024528585163020704459360552752357525
| null | null | null |
linux
|
7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
| 1 |
static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
struct scsi_cmnd *cmd)
{
char *buffer;
unsigned short use_sg;
int retvalue = 0, transfer_len = 0;
unsigned long flags;
struct CMD_MESSAGE_FIELD *pcmdmessagefld;
uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
(uint32_t)cmd->cmnd[6] << 16 |
(uint32_t)cmd->cmnd[7] << 8 |
(uint32_t)cmd->cmnd[8];
struct scatterlist *sg;
use_sg = scsi_sg_count(cmd);
sg = scsi_sglist(cmd);
buffer = kmap_atomic(sg_page(sg)) + sg->offset;
if (use_sg > 1) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
transfer_len += sg->length;
if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
retvalue = ARCMSR_MESSAGE_FAIL;
pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
goto message_out;
}
pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
switch (controlcode) {
case ARCMSR_MESSAGE_READ_RQBUFFER: {
unsigned char *ver_addr;
uint8_t *ptmpQbuffer;
uint32_t allxfer_len = 0;
ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
if (!ver_addr) {
retvalue = ARCMSR_MESSAGE_FAIL;
pr_info("%s: memory not enough!\n", __func__);
goto message_out;
}
ptmpQbuffer = ver_addr;
spin_lock_irqsave(&acb->rqbuffer_lock, flags);
if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
unsigned int tail = acb->rqbuf_getIndex;
unsigned int head = acb->rqbuf_putIndex;
unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
allxfer_len = ARCMSR_API_DATA_BUFLEN;
if (allxfer_len <= cnt_to_end)
memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
else {
memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
}
acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
}
memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
allxfer_len);
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
struct QBUFFER __iomem *prbuffer;
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
}
spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
kfree(ver_addr);
pcmdmessagefld->cmdmessage.Length = allxfer_len;
if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
else
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
break;
}
case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
unsigned char *ver_addr;
int32_t user_len, cnt2end;
uint8_t *pQbuffer, *ptmpuserbuffer;
ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
if (!ver_addr) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
ptmpuserbuffer = ver_addr;
user_len = pcmdmessagefld->cmdmessage.Length;
memcpy(ptmpuserbuffer,
pcmdmessagefld->messagedatabuffer, user_len);
spin_lock_irqsave(&acb->wqbuffer_lock, flags);
if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
struct SENSE_DATA *sensebuffer =
(struct SENSE_DATA *)cmd->sense_buffer;
arcmsr_write_ioctldata2iop(acb);
/* has error report sensedata */
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->SenseKey = ILLEGAL_REQUEST;
sensebuffer->AdditionalSenseLength = 0x0A;
sensebuffer->AdditionalSenseCode = 0x20;
sensebuffer->Valid = 1;
retvalue = ARCMSR_MESSAGE_FAIL;
} else {
pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
if (user_len > cnt2end) {
memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
ptmpuserbuffer += cnt2end;
user_len -= cnt2end;
acb->wqbuf_putIndex = 0;
pQbuffer = acb->wqbuffer;
}
memcpy(pQbuffer, ptmpuserbuffer, user_len);
acb->wqbuf_putIndex += user_len;
acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
acb->acb_flags &=
~ACB_F_MESSAGE_WQBUFFER_CLEARED;
arcmsr_write_ioctldata2iop(acb);
}
}
spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
kfree(ver_addr);
if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
else
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
break;
}
case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
uint8_t *pQbuffer = acb->rqbuffer;
arcmsr_clear_iop2drv_rqueue_buffer(acb);
spin_lock_irqsave(&acb->rqbuffer_lock, flags);
acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
acb->rqbuf_getIndex = 0;
acb->rqbuf_putIndex = 0;
memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
else
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
break;
}
case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
uint8_t *pQbuffer = acb->wqbuffer;
spin_lock_irqsave(&acb->wqbuffer_lock, flags);
acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
ACB_F_MESSAGE_WQBUFFER_READED);
acb->wqbuf_getIndex = 0;
acb->wqbuf_putIndex = 0;
memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
else
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
break;
}
case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
uint8_t *pQbuffer;
arcmsr_clear_iop2drv_rqueue_buffer(acb);
spin_lock_irqsave(&acb->rqbuffer_lock, flags);
acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
acb->rqbuf_getIndex = 0;
acb->rqbuf_putIndex = 0;
pQbuffer = acb->rqbuffer;
memset(pQbuffer, 0, sizeof(struct QBUFFER));
spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
spin_lock_irqsave(&acb->wqbuffer_lock, flags);
acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
ACB_F_MESSAGE_WQBUFFER_READED);
acb->wqbuf_getIndex = 0;
acb->wqbuf_putIndex = 0;
pQbuffer = acb->wqbuffer;
memset(pQbuffer, 0, sizeof(struct QBUFFER));
spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
else
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
break;
}
case ARCMSR_MESSAGE_RETURN_CODE_3F: {
if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
else
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_3F;
break;
}
case ARCMSR_MESSAGE_SAY_HELLO: {
int8_t *hello_string = "Hello! I am ARCMSR";
if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
else
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
memcpy(pcmdmessagefld->messagedatabuffer,
hello_string, (int16_t)strlen(hello_string));
break;
}
case ARCMSR_MESSAGE_SAY_GOODBYE: {
if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
else
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
arcmsr_iop_parking(acb);
break;
}
case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
else
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
arcmsr_flush_adapter_cache(acb);
break;
}
default:
retvalue = ARCMSR_MESSAGE_FAIL;
pr_info("%s: unknown controlcode!\n", __func__);
}
message_out:
if (use_sg) {
struct scatterlist *sg = scsi_sglist(cmd);
kunmap_atomic(buffer - sg->offset);
}
return retvalue;
}
|
CWE-119
| 180,100 | 9,334 |
280628888206953272523103459430311361338
| null | null | null |
php-src
|
b88393f08a558eec14964a55d3c680fe67407712?w=1
| 1 |
static int wddx_stack_destroy(wddx_stack *stack)
{
register int i;
if (stack->elements) {
for (i = 0; i < stack->top; i++) {
if (((st_entry *)stack->elements[i])->data) {
zval_ptr_dtor(&((st_entry *)stack->elements[i])->data);
}
if (((st_entry *)stack->elements[i])->varname) {
efree(((st_entry *)stack->elements[i])->varname);
}
efree(stack->elements[i]);
}
efree(stack->elements);
}
return SUCCESS;
}
|
CWE-416
| 180,108 | 9,336 |
167114955492293738789662564321919947450
| null | null | null |
charybdis
|
818a3fda944b26d4814132cee14cfda4ea4aa824
| 1 |
m_authenticate(struct Client *client_p, struct Client *source_p,
int parc, const char *parv[])
{
struct Client *agent_p = NULL;
struct Client *saslserv_p = NULL;
/* They really should use CAP for their own sake. */
if(!IsCapable(source_p, CLICAP_SASL))
return 0;
if (strlen(client_p->id) == 3)
{
exit_client(client_p, client_p, client_p, "Mixing client and server protocol");
return 0;
}
saslserv_p = find_named_client(ConfigFileEntry.sasl_service);
if (saslserv_p == NULL || !IsService(saslserv_p))
{
sendto_one(source_p, form_str(ERR_SASLABORTED), me.name, EmptyString(source_p->name) ? "*" : source_p->name);
return 0;
}
if(source_p->localClient->sasl_complete)
{
*source_p->localClient->sasl_agent = '\0';
source_p->localClient->sasl_complete = 0;
}
if(strlen(parv[1]) > 400)
{
sendto_one(source_p, form_str(ERR_SASLTOOLONG), me.name, EmptyString(source_p->name) ? "*" : source_p->name);
return 0;
}
if(!*source_p->id)
{
/* Allocate a UID. */
strcpy(source_p->id, generate_uid());
add_to_id_hash(source_p->id, source_p);
}
if(*source_p->localClient->sasl_agent)
agent_p = find_id(source_p->localClient->sasl_agent);
if(agent_p == NULL)
{
sendto_one(saslserv_p, ":%s ENCAP %s SASL %s %s H %s %s",
me.id, saslserv_p->servptr->name, source_p->id, saslserv_p->id,
source_p->host, source_p->sockhost);
if (!strcmp(parv[1], "EXTERNAL") && source_p->certfp != NULL)
sendto_one(saslserv_p, ":%s ENCAP %s SASL %s %s S %s %s",
me.id, saslserv_p->servptr->name, source_p->id, saslserv_p->id,
parv[1], source_p->certfp);
else
sendto_one(saslserv_p, ":%s ENCAP %s SASL %s %s S %s",
me.id, saslserv_p->servptr->name, source_p->id, saslserv_p->id,
parv[1]);
rb_strlcpy(source_p->localClient->sasl_agent, saslserv_p->id, IDLEN);
}
else
sendto_one(agent_p, ":%s ENCAP %s SASL %s %s C %s",
me.id, agent_p->servptr->name, source_p->id, agent_p->id,
parv[1]);
source_p->localClient->sasl_out++;
return 0;
}
|
CWE-285
| 180,116 | 9,337 |
245147725102129380999399869984511938929
| null | null | null |
php-src
|
20ce2fe8e3c211a42fee05a461a5881be9a8790e?w=1
| 1 |
static int php_var_unserialize_internal(UNSERIALIZE_PARAMETER)
{
const unsigned char *cursor, *limit, *marker, *start;
zval *rval_ref;
limit = max;
cursor = *p;
if (YYCURSOR >= YYLIMIT) {
return 0;
}
if (var_hash && (*p)[0] != 'R') {
var_push(var_hash, rval);
}
start = cursor;
#line 554 "ext/standard/var_unserializer.c"
{
YYCTYPE yych;
static const unsigned char yybm[] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7);
yych = *YYCURSOR;
switch (yych) {
case 'C':
case 'O': goto yy13;
case 'N': goto yy5;
case 'R': goto yy2;
case 'S': goto yy10;
case 'a': goto yy11;
case 'b': goto yy6;
case 'd': goto yy8;
case 'i': goto yy7;
case 'o': goto yy12;
case 'r': goto yy4;
case 's': goto yy9;
case '}': goto yy14;
default: goto yy16;
}
yy2:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy95;
yy3:
#line 884 "ext/standard/var_unserializer.re"
{ return 0; }
#line 580 "ext/standard/var_unserializer.c"
yy4:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy89;
goto yy3;
yy5:
yych = *++YYCURSOR;
if (yych == ';') goto yy87;
goto yy3;
yy6:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy83;
goto yy3;
yy7:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy77;
goto yy3;
yy8:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy53;
goto yy3;
yy9:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy46;
goto yy3;
yy10:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy39;
goto yy3;
yy11:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy32;
goto yy3;
yy12:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy25;
goto yy3;
yy13:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy17;
goto yy3;
yy14:
++YYCURSOR;
#line 878 "ext/standard/var_unserializer.re"
{
/* this is the case where we have less data than planned */
php_error_docref(NULL, E_NOTICE, "Unexpected end of serialized data");
return 0; /* not sure if it should be 0 or 1 here? */
}
#line 629 "ext/standard/var_unserializer.c"
yy16:
yych = *++YYCURSOR;
goto yy3;
yy17:
yych = *++YYCURSOR;
if (yybm[0+yych] & 128) {
goto yy20;
}
if (yych == '+') goto yy19;
yy18:
YYCURSOR = YYMARKER;
goto yy3;
yy19:
yych = *++YYCURSOR;
if (yybm[0+yych] & 128) {
goto yy20;
}
goto yy18;
yy20:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yybm[0+yych] & 128) {
goto yy20;
}
if (yych != ':') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 733 "ext/standard/var_unserializer.re"
{
size_t len, len2, len3, maxlen;
zend_long elements;
char *str;
zend_string *class_name;
zend_class_entry *ce;
int incomplete_class = 0;
int custom_object = 0;
zval user_func;
zval retval;
zval args[1];
if (!var_hash) return 0;
if (*start == 'C') {
custom_object = 1;
}
len2 = len = parse_uiv(start + 2);
maxlen = max - YYCURSOR;
if (maxlen < len || len == 0) {
*p = start + 2;
return 0;
}
str = (char*)YYCURSOR;
YYCURSOR += len;
if (*(YYCURSOR) != '"') {
*p = YYCURSOR;
return 0;
}
if (*(YYCURSOR+1) != ':') {
*p = YYCURSOR+1;
return 0;
}
len3 = strspn(str, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\");
if (len3 != len)
{
*p = YYCURSOR + len3 - len;
return 0;
}
class_name = zend_string_init(str, len, 0);
do {
if(!unserialize_allowed_class(class_name, classes)) {
incomplete_class = 1;
ce = PHP_IC_ENTRY;
break;
}
/* Try to find class directly */
BG(serialize_lock)++;
ce = zend_lookup_class(class_name);
if (ce) {
BG(serialize_lock)--;
if (EG(exception)) {
zend_string_release(class_name);
return 0;
}
break;
}
BG(serialize_lock)--;
if (EG(exception)) {
zend_string_release(class_name);
return 0;
}
/* Check for unserialize callback */
if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) {
incomplete_class = 1;
ce = PHP_IC_ENTRY;
break;
}
/* Call unserialize callback */
ZVAL_STRING(&user_func, PG(unserialize_callback_func));
ZVAL_STR_COPY(&args[0], class_name);
BG(serialize_lock)++;
if (call_user_function_ex(CG(function_table), NULL, &user_func, &retval, 1, args, 0, NULL) != SUCCESS) {
BG(serialize_lock)--;
if (EG(exception)) {
zend_string_release(class_name);
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&args[0]);
return 0;
}
php_error_docref(NULL, E_WARNING, "defined (%s) but not found", Z_STRVAL(user_func));
incomplete_class = 1;
ce = PHP_IC_ENTRY;
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&args[0]);
break;
}
BG(serialize_lock)--;
zval_ptr_dtor(&retval);
if (EG(exception)) {
zend_string_release(class_name);
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&args[0]);
return 0;
}
/* The callback function may have defined the class */
if ((ce = zend_lookup_class(class_name)) == NULL) {
php_error_docref(NULL, E_WARNING, "Function %s() hasn't defined the class it was called for", Z_STRVAL(user_func));
incomplete_class = 1;
ce = PHP_IC_ENTRY;
}
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&args[0]);
break;
} while (1);
*p = YYCURSOR;
if (custom_object) {
int ret;
ret = object_custom(UNSERIALIZE_PASSTHRU, ce);
if (ret && incomplete_class) {
php_store_class_name(rval, ZSTR_VAL(class_name), len2);
}
zend_string_release(class_name);
return ret;
}
elements = object_common1(UNSERIALIZE_PASSTHRU, ce);
if (incomplete_class) {
php_store_class_name(rval, ZSTR_VAL(class_name), len2);
}
zend_string_release(class_name);
return object_common2(UNSERIALIZE_PASSTHRU, elements);
}
#line 804 "ext/standard/var_unserializer.c"
yy25:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy26;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy27;
goto yy18;
}
yy26:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy27:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy27;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 726 "ext/standard/var_unserializer.re"
{
if (!var_hash) return 0;
return object_common2(UNSERIALIZE_PASSTHRU,
object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR));
}
#line 836 "ext/standard/var_unserializer.c"
yy32:
yych = *++YYCURSOR;
if (yych == '+') goto yy33;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy34;
goto yy18;
yy33:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy34:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy34;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '{') goto yy18;
++YYCURSOR;
#line 702 "ext/standard/var_unserializer.re"
{
zend_long elements = parse_iv(start + 2);
/* use iv() not uiv() in order to check data range */
*p = YYCURSOR;
if (!var_hash) return 0;
if (elements < 0) {
return 0;
}
array_init_size(rval, elements);
if (elements) {
/* we can't convert from packed to hash during unserialization, because
reference to some zvals might be keept in var_hash (to support references) */
zend_hash_real_init(Z_ARRVAL_P(rval), 0);
}
if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_P(rval), elements, 0)) {
return 0;
}
return finish_nested_data(UNSERIALIZE_PASSTHRU);
}
#line 881 "ext/standard/var_unserializer.c"
yy39:
yych = *++YYCURSOR;
if (yych == '+') goto yy40;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy41;
goto yy18;
yy40:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy41:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy41;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 668 "ext/standard/var_unserializer.re"
{
size_t len, maxlen;
zend_string *str;
len = parse_uiv(start + 2);
maxlen = max - YYCURSOR;
if (maxlen < len) {
*p = start + 2;
return 0;
}
if ((str = unserialize_str(&YYCURSOR, len, maxlen)) == NULL) {
return 0;
}
if (*(YYCURSOR) != '"') {
zend_string_free(str);
*p = YYCURSOR;
return 0;
}
if (*(YYCURSOR + 1) != ';') {
efree(str);
*p = YYCURSOR + 1;
return 0;
}
YYCURSOR += 2;
*p = YYCURSOR;
ZVAL_STR(rval, str);
return 1;
}
#line 936 "ext/standard/var_unserializer.c"
yy46:
yych = *++YYCURSOR;
if (yych == '+') goto yy47;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy48;
goto yy18;
yy47:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy48:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy48;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 636 "ext/standard/var_unserializer.re"
{
size_t len, maxlen;
char *str;
len = parse_uiv(start + 2);
maxlen = max - YYCURSOR;
if (maxlen < len) {
*p = start + 2;
return 0;
}
str = (char*)YYCURSOR;
YYCURSOR += len;
if (*(YYCURSOR) != '"') {
*p = YYCURSOR;
return 0;
}
if (*(YYCURSOR + 1) != ';') {
*p = YYCURSOR + 1;
return 0;
}
YYCURSOR += 2;
*p = YYCURSOR;
ZVAL_STRINGL(rval, str, len);
return 1;
}
#line 989 "ext/standard/var_unserializer.c"
yy53:
yych = *++YYCURSOR;
if (yych <= '/') {
if (yych <= ',') {
if (yych == '+') goto yy57;
goto yy18;
} else {
if (yych <= '-') goto yy55;
if (yych <= '.') goto yy60;
goto yy18;
}
} else {
if (yych <= 'I') {
if (yych <= '9') goto yy58;
if (yych <= 'H') goto yy18;
goto yy56;
} else {
if (yych != 'N') goto yy18;
}
}
yych = *++YYCURSOR;
if (yych == 'A') goto yy76;
goto yy18;
yy55:
yych = *++YYCURSOR;
if (yych <= '/') {
if (yych == '.') goto yy60;
goto yy18;
} else {
if (yych <= '9') goto yy58;
if (yych != 'I') goto yy18;
}
yy56:
yych = *++YYCURSOR;
if (yych == 'N') goto yy72;
goto yy18;
yy57:
yych = *++YYCURSOR;
if (yych == '.') goto yy60;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy58:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);
yych = *YYCURSOR;
if (yych <= ':') {
if (yych <= '.') {
if (yych <= '-') goto yy18;
goto yy70;
} else {
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy58;
goto yy18;
}
} else {
if (yych <= 'E') {
if (yych <= ';') goto yy63;
if (yych <= 'D') goto yy18;
goto yy65;
} else {
if (yych == 'e') goto yy65;
goto yy18;
}
}
yy60:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy61:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);
yych = *YYCURSOR;
if (yych <= ';') {
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy61;
if (yych <= ':') goto yy18;
} else {
if (yych <= 'E') {
if (yych <= 'D') goto yy18;
goto yy65;
} else {
if (yych == 'e') goto yy65;
goto yy18;
}
}
yy63:
++YYCURSOR;
#line 627 "ext/standard/var_unserializer.re"
{
#if SIZEOF_ZEND_LONG == 4
use_double:
#endif
*p = YYCURSOR;
ZVAL_DOUBLE(rval, zend_strtod((const char *)start + 2, NULL));
return 1;
}
#line 1086 "ext/standard/var_unserializer.c"
yy65:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy66;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy67;
goto yy18;
}
yy66:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych == '+') goto yy69;
goto yy18;
} else {
if (yych <= '-') goto yy69;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
}
yy67:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy67;
if (yych == ';') goto yy63;
goto yy18;
yy69:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy67;
goto yy18;
yy70:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);
yych = *YYCURSOR;
if (yych <= ';') {
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy70;
if (yych <= ':') goto yy18;
goto yy63;
} else {
if (yych <= 'E') {
if (yych <= 'D') goto yy18;
goto yy65;
} else {
if (yych == 'e') goto yy65;
goto yy18;
}
}
yy72:
yych = *++YYCURSOR;
if (yych != 'F') goto yy18;
yy73:
yych = *++YYCURSOR;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 611 "ext/standard/var_unserializer.re"
{
*p = YYCURSOR;
if (!strncmp((char*)start + 2, "NAN", 3)) {
ZVAL_DOUBLE(rval, php_get_nan());
} else if (!strncmp((char*)start + 2, "INF", 3)) {
ZVAL_DOUBLE(rval, php_get_inf());
} else if (!strncmp((char*)start + 2, "-INF", 4)) {
ZVAL_DOUBLE(rval, -php_get_inf());
} else {
ZVAL_NULL(rval);
}
return 1;
}
#line 1161 "ext/standard/var_unserializer.c"
yy76:
yych = *++YYCURSOR;
if (yych == 'N') goto yy73;
goto yy18;
yy77:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy78;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy79;
goto yy18;
}
yy78:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy79:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy79;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 585 "ext/standard/var_unserializer.re"
{
#if SIZEOF_ZEND_LONG == 4
int digits = YYCURSOR - start - 3;
if (start[2] == '-' || start[2] == '+') {
digits--;
}
/* Use double for large zend_long values that were serialized on a 64-bit system */
if (digits >= MAX_LENGTH_OF_LONG - 1) {
if (digits == MAX_LENGTH_OF_LONG - 1) {
int cmp = strncmp((char*)YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1);
if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) {
goto use_double;
}
} else {
goto use_double;
}
}
#endif
*p = YYCURSOR;
ZVAL_LONG(rval, parse_iv(start + 2));
return 1;
}
#line 1214 "ext/standard/var_unserializer.c"
yy83:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= '2') goto yy18;
yych = *++YYCURSOR;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 579 "ext/standard/var_unserializer.re"
{
*p = YYCURSOR;
ZVAL_BOOL(rval, parse_iv(start + 2));
return 1;
}
#line 1228 "ext/standard/var_unserializer.c"
yy87:
++YYCURSOR;
#line 573 "ext/standard/var_unserializer.re"
{
*p = YYCURSOR;
ZVAL_NULL(rval);
return 1;
}
#line 1237 "ext/standard/var_unserializer.c"
yy89:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy90;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy91;
goto yy18;
}
yy90:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy91:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy91;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 548 "ext/standard/var_unserializer.re"
{
zend_long id;
*p = YYCURSOR;
if (!var_hash) return 0;
id = parse_iv(start + 2) - 1;
if (id == -1 || (rval_ref = var_access(var_hash, id)) == NULL) {
return 0;
}
if (rval_ref == rval) {
return 0;
}
if (Z_ISUNDEF_P(rval_ref) || (Z_ISREF_P(rval_ref) && Z_ISUNDEF_P(Z_REFVAL_P(rval_ref)))) {
ZVAL_UNDEF(rval);
return 1;
}
ZVAL_COPY(rval, rval_ref);
return 1;
}
#line 1285 "ext/standard/var_unserializer.c"
yy95:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy96;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy97;
goto yy18;
}
yy96:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy97:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy97;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 522 "ext/standard/var_unserializer.re"
{
zend_long id;
*p = YYCURSOR;
if (!var_hash) return 0;
id = parse_iv(start + 2) - 1;
if (id == -1 || (rval_ref = var_access(var_hash, id)) == NULL) {
return 0;
}
zval_ptr_dtor(rval);
if (Z_ISUNDEF_P(rval_ref) || (Z_ISREF_P(rval_ref) && Z_ISUNDEF_P(Z_REFVAL_P(rval_ref)))) {
ZVAL_UNDEF(rval);
return 1;
}
if (Z_ISREF_P(rval_ref)) {
ZVAL_COPY(rval, rval_ref);
} else {
ZVAL_NEW_REF(rval_ref, rval_ref);
ZVAL_COPY(rval, rval_ref);
}
return 1;
}
#line 1334 "ext/standard/var_unserializer.c"
}
#line 886 "ext/standard/var_unserializer.re"
return 0;
}
|
CWE-502
| 180,132 | 9,341 |
270468889219152011719609320601797866906
| null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.