id
int32 0
27.3k
| func
stringlengths 26
142k
| target
bool 2
classes | project
stringclasses 2
values | commit_id
stringlengths 40
40
|
---|---|---|---|---|
8,474 | void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
{
const ARMCPRegInfo *ri = rip;
ri->writefn(env, ri, value);
}
| true | qemu | 8d04fb55dec381bc5105cb47f29d918e579e8cbd |
8,475 | static int libspeex_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LibSpeexContext *s = avctx->priv_data;
int16_t *output;
int ret, consumed = 0;
/* get output buffer */
s->frame.nb_samples = s->frame_size;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
output = (int16_t *)s->frame.data[0];
/* if there is not enough data left for the smallest possible frame or the
next 5 bits are a terminator code, reset the libspeex buffer using the
current packet, otherwise ignore the current packet and keep decoding
frames from the libspeex buffer. */
if (speex_bits_remaining(&s->bits) < 5 ||
speex_bits_peek_unsigned(&s->bits, 5) == 0x1F) {
/* check for flush packet */
if (!buf || !buf_size) {
*got_frame_ptr = 0;
return buf_size;
}
/* set new buffer */
speex_bits_read_from(&s->bits, buf, buf_size);
consumed = buf_size;
}
/* decode a single frame */
ret = speex_decode_int(s->dec_state, &s->bits, output);
if (ret <= -2) {
av_log(avctx, AV_LOG_ERROR, "Error decoding Speex frame.\n");
return AVERROR_INVALIDDATA;
}
if (avctx->channels == 2)
speex_decode_stereo_int(output, s->frame_size, &s->stereo);
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return consumed;
}
| false | FFmpeg | f3c9d66bafde9b8586bd63dd3307daa87352af75 |
8,476 | static void vmport_class_initfn(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = vmport_realizefn;
dc->no_user = 1;
}
| true | qemu | efec3dd631d94160288392721a5f9c39e50fb2bc |
8,477 | void qemu_file_skip(QEMUFile *f, int size)
{
if (f->buf_index + size <= f->buf_size) {
f->buf_index += size;
}
}
| true | qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 |
8,478 | static int doTest(uint8_t *ref[4], int refStride[4], int w, int h,
enum PixelFormat srcFormat, enum PixelFormat dstFormat,
int srcW, int srcH, int dstW, int dstH, int flags)
{
uint8_t *src[4] = {0};
uint8_t *dst[4] = {0};
uint8_t *out[4] = {0};
int srcStride[4], dstStride[4];
int i;
uint64_t ssdY, ssdU=0, ssdV=0, ssdA=0;
struct SwsContext *srcContext = NULL, *dstContext = NULL,
*outContext = NULL;
int res;
res = 0;
for (i=0; i<4; i++) {
// avoid stride % bpp != 0
if (srcFormat==PIX_FMT_RGB24 || srcFormat==PIX_FMT_BGR24)
srcStride[i]= srcW*3;
else if (srcFormat==PIX_FMT_RGB48BE || srcFormat==PIX_FMT_RGB48LE)
srcStride[i]= srcW*6;
else
srcStride[i]= srcW*4;
if (dstFormat==PIX_FMT_RGB24 || dstFormat==PIX_FMT_BGR24)
dstStride[i]= dstW*3;
else if (dstFormat==PIX_FMT_RGB48BE || dstFormat==PIX_FMT_RGB48LE)
dstStride[i]= dstW*6;
else
dstStride[i]= dstW*4;
/* Image buffers passed into libswscale can be allocated any way you
* prefer, as long as they're aligned enough for the architecture, and
* they're freed appropriately (such as using av_free for buffers
* allocated with av_malloc). */
/* An extra 16 bytes is being allocated because some scalers may write
* out of bounds. */
src[i]= av_mallocz(srcStride[i]*srcH+16);
dst[i]= av_mallocz(dstStride[i]*dstH+16);
out[i]= av_mallocz(refStride[i]*h);
if (!src[i] || !dst[i] || !out[i]) {
perror("Malloc");
res = -1;
goto end;
}
}
srcContext= sws_getContext(w, h, PIX_FMT_YUVA420P, srcW, srcH, srcFormat, flags, NULL, NULL, NULL);
if (!srcContext) {
fprintf(stderr, "Failed to get %s ---> %s\n",
av_pix_fmt_descriptors[PIX_FMT_YUVA420P].name,
av_pix_fmt_descriptors[srcFormat].name);
res = -1;
goto end;
}
dstContext= sws_getContext(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, NULL, NULL, NULL);
if (!dstContext) {
fprintf(stderr, "Failed to get %s ---> %s\n",
av_pix_fmt_descriptors[srcFormat].name,
av_pix_fmt_descriptors[dstFormat].name);
res = -1;
goto end;
}
outContext= sws_getContext(dstW, dstH, dstFormat, w, h, PIX_FMT_YUVA420P, flags, NULL, NULL, NULL);
if (!outContext) {
fprintf(stderr, "Failed to get %s ---> %s\n",
av_pix_fmt_descriptors[dstFormat].name,
av_pix_fmt_descriptors[PIX_FMT_YUVA420P].name);
res = -1;
goto end;
}
// printf("test %X %X %X -> %X %X %X\n", (int)ref[0], (int)ref[1], (int)ref[2],
// (int)src[0], (int)src[1], (int)src[2]);
sws_scale(srcContext, ref, refStride, 0, h , src, srcStride);
sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);
ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h);
if (hasChroma(srcFormat) && hasChroma(dstFormat)) {
//FIXME check that output is really gray
ssdU= getSSD(ref[1], out[1], refStride[1], refStride[1], (w+1)>>1, (h+1)>>1);
ssdV= getSSD(ref[2], out[2], refStride[2], refStride[2], (w+1)>>1, (h+1)>>1);
}
if (isALPHA(srcFormat) && isALPHA(dstFormat))
ssdA= getSSD(ref[3], out[3], refStride[3], refStride[3], w, h);
ssdY/= w*h;
ssdU/= w*h/4;
ssdV/= w*h/4;
ssdA/= w*h;
printf(" %s %dx%d -> %s %4dx%4d flags=%2d SSD=%5"PRId64",%5"PRId64",%5"PRId64",%5"PRId64"\n",
av_pix_fmt_descriptors[srcFormat].name, srcW, srcH,
av_pix_fmt_descriptors[dstFormat].name, dstW, dstH,
flags, ssdY, ssdU, ssdV, ssdA);
fflush(stdout);
end:
sws_freeContext(srcContext);
sws_freeContext(dstContext);
sws_freeContext(outContext);
for (i=0; i<4; i++) {
av_free(src[i]);
av_free(dst[i]);
av_free(out[i]);
}
return res;
}
| true | FFmpeg | b6f1e7ec44dfaa7c8a49c14221b977d4052adb4d |
8,479 | static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
{
mp_image_t *dmpi;
// hope we'll get DR buffer:
dmpi=ff_vf_get_image(vf->next, IMGFMT_YV12,
MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE |
((vf->priv->scaleh == 1) ? MP_IMGFLAG_READABLE : 0),
mpi->w * vf->priv->scalew,
mpi->h / vf->priv->scaleh - vf->priv->skipline);
toright(dmpi->planes, mpi->planes, dmpi->stride,
mpi->stride, mpi->w, mpi->h, vf->priv);
return ff_vf_next_put_image(vf,dmpi, pts);
}
| true | FFmpeg | 2f11aa141a01f97c5d2a015bd9dbdb27314b79c4 |
8,480 | static void runstate_init(void)
{
const RunStateTransition *p;
memset(&runstate_valid_transitions, 0, sizeof(runstate_valid_transitions));
for (p = &runstate_transitions_def[0]; p->from != RUN_STATE_MAX; p++) {
runstate_valid_transitions[p->from][p->to] = true;
}
}
| true | qemu | 74892d2468b9f0c56b915ce94848d6f7fac39740 |
8,481 | static void decode_vui(HEVCContext *s, HEVCSPS *sps)
{
VUI *vui = &sps->vui;
GetBitContext *gb = &s->HEVClc->gb;
GetBitContext backup;
int sar_present, alt = 0;
av_log(s->avctx, AV_LOG_DEBUG, "Decoding VUI\n");
sar_present = get_bits1(gb);
if (sar_present) {
uint8_t sar_idx = get_bits(gb, 8);
if (sar_idx < FF_ARRAY_ELEMS(vui_sar))
vui->sar = vui_sar[sar_idx];
else if (sar_idx == 255) {
vui->sar.num = get_bits(gb, 16);
vui->sar.den = get_bits(gb, 16);
} else
av_log(s->avctx, AV_LOG_WARNING,
"Unknown SAR index: %u.\n", sar_idx);
}
vui->overscan_info_present_flag = get_bits1(gb);
if (vui->overscan_info_present_flag)
vui->overscan_appropriate_flag = get_bits1(gb);
vui->video_signal_type_present_flag = get_bits1(gb);
if (vui->video_signal_type_present_flag) {
vui->video_format = get_bits(gb, 3);
vui->video_full_range_flag = get_bits1(gb);
vui->colour_description_present_flag = get_bits1(gb);
if (vui->video_full_range_flag && sps->pix_fmt == AV_PIX_FMT_YUV420P)
sps->pix_fmt = AV_PIX_FMT_YUVJ420P;
if (vui->colour_description_present_flag) {
vui->colour_primaries = get_bits(gb, 8);
vui->transfer_characteristic = get_bits(gb, 8);
vui->matrix_coeffs = get_bits(gb, 8);
// Set invalid values to "unspecified"
if (vui->colour_primaries >= AVCOL_PRI_NB)
vui->colour_primaries = AVCOL_PRI_UNSPECIFIED;
if (vui->transfer_characteristic >= AVCOL_TRC_NB)
vui->transfer_characteristic = AVCOL_TRC_UNSPECIFIED;
if (vui->matrix_coeffs >= AVCOL_SPC_NB)
vui->matrix_coeffs = AVCOL_SPC_UNSPECIFIED;
}
}
vui->chroma_loc_info_present_flag = get_bits1(gb);
if (vui->chroma_loc_info_present_flag) {
vui->chroma_sample_loc_type_top_field = get_ue_golomb_long(gb);
vui->chroma_sample_loc_type_bottom_field = get_ue_golomb_long(gb);
}
vui->neutra_chroma_indication_flag = get_bits1(gb);
vui->field_seq_flag = get_bits1(gb);
vui->frame_field_info_present_flag = get_bits1(gb);
vui->default_display_window_flag = get_bits1(gb);
// Backup context in case an alternate header is detected
if( get_bits_left(gb) >= 66)
memcpy(&backup, gb, sizeof(backup));
if (vui->default_display_window_flag) {
//TODO: * 2 is only valid for 420
vui->def_disp_win.left_offset = get_ue_golomb_long(gb) * 2;
vui->def_disp_win.right_offset = get_ue_golomb_long(gb) * 2;
vui->def_disp_win.top_offset = get_ue_golomb_long(gb) * 2;
vui->def_disp_win.bottom_offset = get_ue_golomb_long(gb) * 2;
if (s->apply_defdispwin &&
s->avctx->flags2 & CODEC_FLAG2_IGNORE_CROP) {
av_log(s->avctx, AV_LOG_DEBUG,
"discarding vui default display window, "
"original values are l:%u r:%u t:%u b:%u\n",
vui->def_disp_win.left_offset,
vui->def_disp_win.right_offset,
vui->def_disp_win.top_offset,
vui->def_disp_win.bottom_offset);
vui->def_disp_win.left_offset =
vui->def_disp_win.right_offset =
vui->def_disp_win.top_offset =
vui->def_disp_win.bottom_offset = 0;
}
}
vui->vui_timing_info_present_flag = get_bits1(gb);
if (vui->vui_timing_info_present_flag) {
if( get_bits_left(gb) < 66) {
// The alternate syntax seem to have timing info located
// at where def_disp_win is normally located
av_log(s->avctx, AV_LOG_WARNING,
"Strange VUI timing information, retrying...\n");
vui->default_display_window_flag = 0;
memset(&vui->def_disp_win, 0, sizeof(vui->def_disp_win));
memcpy(gb, &backup, sizeof(backup));
alt = 1;
}
vui->vui_num_units_in_tick = get_bits_long(gb, 32);
vui->vui_time_scale = get_bits_long(gb, 32);
if (alt) {
av_log(s->avctx, AV_LOG_INFO, "Retry got %i/%i fps\n",
vui->vui_time_scale, vui->vui_num_units_in_tick);
}
vui->vui_poc_proportional_to_timing_flag = get_bits1(gb);
if (vui->vui_poc_proportional_to_timing_flag)
vui->vui_num_ticks_poc_diff_one_minus1 = get_ue_golomb_long(gb);
vui->vui_hrd_parameters_present_flag = get_bits1(gb);
if (vui->vui_hrd_parameters_present_flag)
decode_hrd(s, 1, sps->max_sub_layers);
}
vui->bitstream_restriction_flag = get_bits1(gb);
if (vui->bitstream_restriction_flag) {
vui->tiles_fixed_structure_flag = get_bits1(gb);
vui->motion_vectors_over_pic_boundaries_flag = get_bits1(gb);
vui->restricted_ref_pic_lists_flag = get_bits1(gb);
vui->min_spatial_segmentation_idc = get_ue_golomb_long(gb);
vui->max_bytes_per_pic_denom = get_ue_golomb_long(gb);
vui->max_bits_per_min_cu_denom = get_ue_golomb_long(gb);
vui->log2_max_mv_length_horizontal = get_ue_golomb_long(gb);
vui->log2_max_mv_length_vertical = get_ue_golomb_long(gb);
}
}
| false | FFmpeg | cbb277988afc7032e632393e2c96a70d4389ac4f |
8,483 | void ff_vc1_interp_mc(VC1Context *v)
{
MpegEncContext *s = &v->s;
H264ChromaContext *h264chroma = &v->h264chroma;
uint8_t *srcY, *srcU, *srcV;
int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
int off, off_uv;
int v_edge_pos = s->v_edge_pos >> v->field_mode;
int use_ic = v->next_use_ic;
if (!v->field_mode && !v->s.next_picture.f->data[0])
return;
mx = s->mv[1][0][0];
my = s->mv[1][0][1];
uvmx = (mx + ((mx & 3) == 3)) >> 1;
uvmy = (my + ((my & 3) == 3)) >> 1;
if (v->field_mode) {
if (v->cur_field_type != v->ref_field_type[1])
my = my - 2 + 4 * v->cur_field_type;
uvmy = uvmy - 2 + 4 * v->cur_field_type;
}
if (v->fastuvmc) {
uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
}
srcY = s->next_picture.f->data[0];
srcU = s->next_picture.f->data[1];
srcV = s->next_picture.f->data[2];
src_x = s->mb_x * 16 + (mx >> 2);
src_y = s->mb_y * 16 + (my >> 2);
uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
if (v->profile != PROFILE_ADVANCED) {
src_x = av_clip( src_x, -16, s->mb_width * 16);
src_y = av_clip( src_y, -16, s->mb_height * 16);
uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
} else {
src_x = av_clip( src_x, -17, s->avctx->coded_width);
src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
}
srcY += src_y * s->linesize + src_x;
srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
if (v->field_mode && v->ref_field_type[1]) {
srcY += s->current_picture_ptr->f->linesize[0];
srcU += s->current_picture_ptr->f->linesize[1];
srcV += s->current_picture_ptr->f->linesize[2];
}
/* for grayscale we should not try to read from unknown area */
if (s->flags & CODEC_FLAG_GRAY) {
srcU = s->edge_emu_buffer + 18 * s->linesize;
srcV = s->edge_emu_buffer + 18 * s->linesize;
}
if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
|| (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
|| (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
srcY -= s->mspel * (1 + s->linesize);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
s->linesize, s->linesize,
17 + s->mspel * 2, 17 + s->mspel * 2,
src_x - s->mspel, src_y - s->mspel,
s->h_edge_pos, v_edge_pos);
srcY = s->edge_emu_buffer;
s->vdsp.emulated_edge_mc(uvbuf, srcU,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1,
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1,
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
srcU = uvbuf;
srcV = uvbuf + 16;
/* if we deal with range reduction we need to scale source blocks */
if (v->rangeredfrm) {
int i, j;
uint8_t *src, *src2;
src = srcY;
for (j = 0; j < 17 + s->mspel * 2; j++) {
for (i = 0; i < 17 + s->mspel * 2; i++)
src[i] = ((src[i] - 128) >> 1) + 128;
src += s->linesize;
}
src = srcU;
src2 = srcV;
for (j = 0; j < 9; j++) {
for (i = 0; i < 9; i++) {
src[i] = ((src[i] - 128) >> 1) + 128;
src2[i] = ((src2[i] - 128) >> 1) + 128;
}
src += s->uvlinesize;
src2 += s->uvlinesize;
}
}
if (use_ic) {
uint8_t (*luty )[256] = v->next_luty;
uint8_t (*lutuv)[256] = v->next_lutuv;
int i, j;
uint8_t *src, *src2;
src = srcY;
for (j = 0; j < 17 + s->mspel * 2; j++) {
int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
for (i = 0; i < 17 + s->mspel * 2; i++)
src[i] = luty[f][src[i]];
src += s->linesize;
}
src = srcU;
src2 = srcV;
for (j = 0; j < 9; j++) {
int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
for (i = 0; i < 9; i++) {
src[i] = lutuv[f][src[i]];
src2[i] = lutuv[f][src2[i]];
}
src += s->uvlinesize;
src2 += s->uvlinesize;
}
}
srcY += s->mspel * (1 + s->linesize);
}
off = 0;
off_uv = 0;
if (s->mspel) {
dxy = ((my & 3) << 2) | (mx & 3);
v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
srcY += s->linesize * 8;
v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
} else { // hpel mc
dxy = (my & 2) | ((mx & 2) >> 1);
if (!v->rnd)
s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
else
s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
}
if (s->flags & CODEC_FLAG_GRAY) return;
/* Chroma MC always uses qpel blilinear */
uvmx = (uvmx & 3) << 1;
uvmy = (uvmy & 3) << 1;
if (!v->rnd) {
h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
} else {
v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
}
}
| false | FFmpeg | 28d82b7675bea76a1349070a3cdd737d964d4775 |
8,484 | static av_cold int msrle_decode_init(AVCodecContext *avctx)
{
MsrleContext *s = avctx->priv_data;
s->avctx = avctx;
switch (avctx->bits_per_coded_sample) {
case 4:
case 8:
avctx->pix_fmt = AV_PIX_FMT_PAL8;
break;
case 24:
avctx->pix_fmt = AV_PIX_FMT_BGR24;
break;
default:
av_log(avctx, AV_LOG_ERROR, "unsupported bits per sample\n");
return AVERROR_INVALIDDATA;
}
s->frame.data[0] = NULL;
return 0;
}
| false | FFmpeg | 3b199d29cd597a3518136d78860e172060b9e83d |
8,485 | static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert)
{
int x = 0, y = 0;
while (bytestream2_get_bytes_left(&s->gb) > 0) {
int run_length, color;
if (y >= s->avctx->height)
return 0;
color = bytestream2_get_byte(&s->gb);
if (color & 0x80) {
run_length = 1;
}else{
run_length = (color & 0x7f) + 2;
color = bytestream2_get_byte(&s->gb);
}
if (half_horiz)
run_length *=2;
if (run_length > s->avctx->width - x)
return AVERROR_INVALIDDATA;
if (color) {
memset(s->frame->data[0] + y*s->frame->linesize[0] + x, color, run_length);
if (half_vert)
memset(s->frame->data[0] + (y+1)*s->frame->linesize[0] + x, color, run_length);
}
x+= run_length;
if (x >= s->avctx->width) {
x=0;
y += 1 + half_vert;
}
}
return 0;
}
| false | FFmpeg | 8b0e96e1f21b761ca15dbb470cd619a1ebf86c3e |
8,486 | static int mxf_read_partition_pack(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
MXFPartition *partition;
UID op;
uint64_t footer_partition;
uint32_t nb_essence_containers;
int err;
if (mxf->partitions_count+1 >= UINT_MAX / sizeof(*mxf->partitions))
return AVERROR(ENOMEM);
if ((err = av_reallocp_array(&mxf->partitions, mxf->partitions_count + 1,
sizeof(*mxf->partitions))) < 0) {
mxf->partitions_count = 0;
return err;
}
if (mxf->parsing_backward) {
/* insert the new partition pack in the middle
* this makes the entries in mxf->partitions sorted by offset */
memmove(&mxf->partitions[mxf->last_forward_partition+1],
&mxf->partitions[mxf->last_forward_partition],
(mxf->partitions_count - mxf->last_forward_partition)*sizeof(*mxf->partitions));
partition = mxf->current_partition = &mxf->partitions[mxf->last_forward_partition];
} else {
mxf->last_forward_partition++;
partition = mxf->current_partition = &mxf->partitions[mxf->partitions_count];
}
memset(partition, 0, sizeof(*partition));
mxf->partitions_count++;
partition->pack_length = avio_tell(pb) - klv_offset + size;
switch(uid[13]) {
case 2:
partition->type = Header;
break;
case 3:
partition->type = BodyPartition;
break;
case 4:
partition->type = Footer;
break;
default:
av_log(mxf->fc, AV_LOG_ERROR, "unknown partition type %i\n", uid[13]);
return AVERROR_INVALIDDATA;
}
/* consider both footers to be closed (there is only Footer and CompleteFooter) */
partition->closed = partition->type == Footer || !(uid[14] & 1);
partition->complete = uid[14] > 2;
avio_skip(pb, 4);
partition->kag_size = avio_rb32(pb);
partition->this_partition = avio_rb64(pb);
partition->previous_partition = avio_rb64(pb);
footer_partition = avio_rb64(pb);
partition->header_byte_count = avio_rb64(pb);
partition->index_byte_count = avio_rb64(pb);
partition->index_sid = avio_rb32(pb);
avio_skip(pb, 8);
partition->body_sid = avio_rb32(pb);
avio_read(pb, op, sizeof(UID));
nb_essence_containers = avio_rb32(pb);
/* some files don'thave FooterPartition set in every partition */
if (footer_partition) {
if (mxf->footer_partition && mxf->footer_partition != footer_partition) {
av_log(mxf->fc, AV_LOG_ERROR,
"inconsistent FooterPartition value: %"PRIu64" != %"PRIu64"\n",
mxf->footer_partition, footer_partition);
} else {
mxf->footer_partition = footer_partition;
}
}
av_dlog(mxf->fc,
"PartitionPack: ThisPartition = 0x%"PRIX64
", PreviousPartition = 0x%"PRIX64", "
"FooterPartition = 0x%"PRIX64", IndexSID = %i, BodySID = %i\n",
partition->this_partition,
partition->previous_partition, footer_partition,
partition->index_sid, partition->body_sid);
/* sanity check PreviousPartition if set */
if (partition->previous_partition &&
mxf->run_in + partition->previous_partition >= klv_offset) {
av_log(mxf->fc, AV_LOG_ERROR,
"PreviousPartition points to this partition or forward\n");
return AVERROR_INVALIDDATA;
}
if (op[12] == 1 && op[13] == 1) mxf->op = OP1a;
else if (op[12] == 1 && op[13] == 2) mxf->op = OP1b;
else if (op[12] == 1 && op[13] == 3) mxf->op = OP1c;
else if (op[12] == 2 && op[13] == 1) mxf->op = OP2a;
else if (op[12] == 2 && op[13] == 2) mxf->op = OP2b;
else if (op[12] == 2 && op[13] == 3) mxf->op = OP2c;
else if (op[12] == 3 && op[13] == 1) mxf->op = OP3a;
else if (op[12] == 3 && op[13] == 2) mxf->op = OP3b;
else if (op[12] == 3 && op[13] == 3) mxf->op = OP3c;
else if (op[12] == 64&& op[13] == 1) mxf->op = OPSonyOpt;
else if (op[12] == 0x10) {
/* SMPTE 390m: "There shall be exactly one essence container"
* The following block deals with files that violate this, namely:
* 2011_DCPTEST_24FPS.V.mxf - two ECs, OP1a
* abcdefghiv016f56415e.mxf - zero ECs, OPAtom, output by Avid AirSpeed */
if (nb_essence_containers != 1) {
MXFOP op = nb_essence_containers ? OP1a : OPAtom;
/* only nag once */
if (!mxf->op)
av_log(mxf->fc, AV_LOG_WARNING,
"\"OPAtom\" with %u ECs - assuming %s\n",
nb_essence_containers,
op == OP1a ? "OP1a" : "OPAtom");
mxf->op = op;
} else
mxf->op = OPAtom;
} else {
av_log(mxf->fc, AV_LOG_ERROR, "unknown operational pattern: %02xh %02xh - guessing OP1a\n", op[12], op[13]);
mxf->op = OP1a;
}
if (partition->kag_size <= 0 || partition->kag_size > (1 << 20)) {
av_log(mxf->fc, AV_LOG_WARNING, "invalid KAGSize %i - guessing ", partition->kag_size);
if (mxf->op == OPSonyOpt)
partition->kag_size = 512;
else
partition->kag_size = 1;
av_log(mxf->fc, AV_LOG_WARNING, "%i\n", partition->kag_size);
}
return 0;
}
| false | FFmpeg | f5fbbbc022f723d3ccf99afd5d658a977b51c08a |
8,488 | char *av_base64_encode(uint8_t * src, int len)
{
static const char b64[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
char *ret, *dst;
unsigned i_bits = 0;
int i_shift = 0;
int bytes_remaining = len;
if (len < UINT_MAX / 4) {
ret = dst = av_malloc(len * 4 / 3 + 12);
} else
return NULL;
if (len) { // special edge case, what should we really do here?
while (bytes_remaining) {
i_bits = (i_bits << 8) + *src++;
bytes_remaining--;
i_shift += 8;
do {
*dst++ = b64[(i_bits << 6 >> i_shift) & 0x3f];
i_shift -= 6;
} while (i_shift > 6 || (bytes_remaining == 0 && i_shift > 0));
}
while ((dst - ret) & 3)
*dst++ = '=';
}
*dst = '\0';
return ret;
}
| false | FFmpeg | bd03c380ce67cffaaf3c456407cc98e02917ebf7 |
8,489 | av_cold void ff_blockdsp_init_x86(BlockDSPContext *c,
AVCodecContext *avctx)
#else
av_cold void ff_blockdsp_init_x86(BlockDSPContext *c)
#endif /* FF_API_XVMC */
{
#if HAVE_INLINE_ASM
int cpu_flags = av_get_cpu_flags();
if (INLINE_MMX(cpu_flags)) {
c->clear_block = clear_block_mmx;
c->clear_blocks = clear_blocks_mmx;
}
#if FF_API_XVMC
FF_DISABLE_DEPRECATION_WARNINGS
/* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
if (CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)
return;
FF_ENABLE_DEPRECATION_WARNINGS
#endif /* FF_API_XVMC */
if (INLINE_SSE(cpu_flags)) {
c->clear_block = clear_block_sse;
c->clear_blocks = clear_blocks_sse;
}
#endif /* HAVE_INLINE_ASM */
}
| false | FFmpeg | dcc39ee10e82833ce24aa57926c00ffeb1948198 |
8,490 | static int read_bfraction(VC1Context *v, GetBitContext* gb) {
v->bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
v->bfraction = ff_vc1_bfraction_lut[v->bfraction_lut_index];
return 0;
}
| false | FFmpeg | dcf5bfbdb6137ffdca66e0b7c2929ced42732951 |
8,491 | AVStream *add_video_stream(AVFormatContext *oc, int codec_id)
{
AVCodec *codec;
AVCodecContext *c;
AVStream *st;
uint8_t *picture_buf;
int size;
st = av_new_stream(oc, 0);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
c = &st->codec;
c->codec_type = CODEC_TYPE_VIDEO;
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* frames per second */
c->frame_rate = 25;
c->frame_rate_base= 1;
c->gop_size = 12; /* emit one intra frame every twelve frames */
/* open it */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
/* alloc various buffers */
picture= avcodec_alloc_frame();
video_outbuf_size = 100000;
video_outbuf = malloc(video_outbuf_size);
size = c->width * c->height;
picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */
picture->data[0] = picture_buf;
picture->data[1] = picture->data[0] + size;
picture->data[2] = picture->data[1] + size / 4;
picture->linesize[0] = c->width;
picture->linesize[1] = c->width / 2;
picture->linesize[2] = c->width / 2;
return st;
}
| true | FFmpeg | e70fcf075b8f92c4e410b80c703fbdc1d531d42d |
8,492 | const uint8_t *get_submv_prob(uint32_t left, uint32_t top)
{
if (left == top)
return vp8_submv_prob[4 - !!left];
if (!top)
return vp8_submv_prob[2];
return vp8_submv_prob[1 - !!left];
}
| true | FFmpeg | ac4b32df71bd932838043a4838b86d11e169707f |
8,493 | static inline void cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
int interrupt_request = cpu->interrupt_request;
if (unlikely(interrupt_request)) {
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
}
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(cpu);
}
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
replay_interrupt();
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
cpu->exception_index = EXCP_HLT;
cpu_loop_exit(cpu);
}
#if defined(TARGET_I386)
else if (interrupt_request & CPU_INTERRUPT_INIT) {
X86CPU *x86_cpu = X86_CPU(cpu);
CPUArchState *env = &x86_cpu->env;
replay_interrupt();
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
do_cpu_init(x86_cpu);
cpu->exception_index = EXCP_HALTED;
cpu_loop_exit(cpu);
}
#else
else if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt();
cpu_reset(cpu);
cpu_loop_exit(cpu);
}
#endif
/* The target hook has 3 exit conditions:
False when the interrupt isn't processed,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
replay_interrupt();
if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
*last_tb = NULL;
}
/* The target hook may have updated the 'cpu->interrupt_request';
* reload the 'interrupt_request' value */
interrupt_request = cpu->interrupt_request;
}
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
the program flow was changed */
*last_tb = NULL;
}
}
if (unlikely(cpu->exit_request || replay_has_interrupt())) {
cpu->exit_request = 0;
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cpu);
}
}
| true | qemu | 027d9a7d2911e993cdcbd21c7c35d1dd058f05bb |
8,494 | int qemu_get_byte(QEMUFile *f)
{
int result;
result = qemu_peek_byte(f, 0);
qemu_file_skip(f, 1);
return result;
}
| true | qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 |
8,495 | bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
{
TranslationBlock *tb;
bool r = false;
tb_lock();
tb = tb_find_pc(retaddr);
if (tb) {
cpu_restore_state_from_tb(cpu, tb, retaddr);
if (tb->cflags & CF_NOCACHE) {
/* one-shot translation, invalidate it immediately */
tb_phys_invalidate(tb, -1);
tb_free(tb);
r = true;
tb_unlock(); | true | qemu | d8b2239bcd8872a5c5f7534d1658fc2365caab2d |
8,496 | pflash_t *pflash_cfi01_register(target_phys_addr_t base,
DeviceState *qdev, const char *name,
target_phys_addr_t size,
BlockDriverState *bs, uint32_t sector_len,
int nb_blocs, int width,
uint16_t id0, uint16_t id1,
uint16_t id2, uint16_t id3, int be)
{
pflash_t *pfl;
target_phys_addr_t total_len;
int ret;
total_len = sector_len * nb_blocs;
/* XXX: to be fixed */
#if 0
if (total_len != (8 * 1024 * 1024) && total_len != (16 * 1024 * 1024) &&
total_len != (32 * 1024 * 1024) && total_len != (64 * 1024 * 1024))
return NULL;
#endif
pfl = g_malloc0(sizeof(pflash_t));
memory_region_init_rom_device(
&pfl->mem, be ? &pflash_cfi01_ops_be : &pflash_cfi01_ops_le, pfl,
name, size);
vmstate_register_ram(&pfl->mem, qdev);
pfl->storage = memory_region_get_ram_ptr(&pfl->mem);
memory_region_add_subregion(get_system_memory(), base, &pfl->mem);
pfl->bs = bs;
if (pfl->bs) {
/* read the initial flash content */
ret = bdrv_read(pfl->bs, 0, pfl->storage, total_len >> 9);
if (ret < 0) {
memory_region_del_subregion(get_system_memory(), &pfl->mem);
vmstate_unregister_ram(&pfl->mem, qdev);
memory_region_destroy(&pfl->mem);
g_free(pfl);
return NULL;
}
bdrv_attach_dev_nofail(pfl->bs, pfl);
}
if (pfl->bs) {
pfl->ro = bdrv_is_read_only(pfl->bs);
} else {
pfl->ro = 0;
}
pfl->timer = qemu_new_timer_ns(vm_clock, pflash_timer, pfl);
pfl->base = base;
pfl->sector_len = sector_len;
pfl->total_len = total_len;
pfl->width = width;
pfl->wcycle = 0;
pfl->cmd = 0;
pfl->status = 0;
pfl->ident[0] = id0;
pfl->ident[1] = id1;
pfl->ident[2] = id2;
pfl->ident[3] = id3;
/* Hardcoded CFI table */
pfl->cfi_len = 0x52;
/* Standard "QRY" string */
pfl->cfi_table[0x10] = 'Q';
pfl->cfi_table[0x11] = 'R';
pfl->cfi_table[0x12] = 'Y';
/* Command set (Intel) */
pfl->cfi_table[0x13] = 0x01;
pfl->cfi_table[0x14] = 0x00;
/* Primary extended table address (none) */
pfl->cfi_table[0x15] = 0x31;
pfl->cfi_table[0x16] = 0x00;
/* Alternate command set (none) */
pfl->cfi_table[0x17] = 0x00;
pfl->cfi_table[0x18] = 0x00;
/* Alternate extended table (none) */
pfl->cfi_table[0x19] = 0x00;
pfl->cfi_table[0x1A] = 0x00;
/* Vcc min */
pfl->cfi_table[0x1B] = 0x45;
/* Vcc max */
pfl->cfi_table[0x1C] = 0x55;
/* Vpp min (no Vpp pin) */
pfl->cfi_table[0x1D] = 0x00;
/* Vpp max (no Vpp pin) */
pfl->cfi_table[0x1E] = 0x00;
/* Reserved */
pfl->cfi_table[0x1F] = 0x07;
/* Timeout for min size buffer write */
pfl->cfi_table[0x20] = 0x07;
/* Typical timeout for block erase */
pfl->cfi_table[0x21] = 0x0a;
/* Typical timeout for full chip erase (4096 ms) */
pfl->cfi_table[0x22] = 0x00;
/* Reserved */
pfl->cfi_table[0x23] = 0x04;
/* Max timeout for buffer write */
pfl->cfi_table[0x24] = 0x04;
/* Max timeout for block erase */
pfl->cfi_table[0x25] = 0x04;
/* Max timeout for chip erase */
pfl->cfi_table[0x26] = 0x00;
/* Device size */
pfl->cfi_table[0x27] = ctz32(total_len); // + 1;
/* Flash device interface (8 & 16 bits) */
pfl->cfi_table[0x28] = 0x02;
pfl->cfi_table[0x29] = 0x00;
/* Max number of bytes in multi-bytes write */
if (width == 1) {
pfl->cfi_table[0x2A] = 0x08;
} else {
pfl->cfi_table[0x2A] = 0x0B;
}
pfl->writeblock_size = 1 << pfl->cfi_table[0x2A];
pfl->cfi_table[0x2B] = 0x00;
/* Number of erase block regions (uniform) */
pfl->cfi_table[0x2C] = 0x01;
/* Erase block region 1 */
pfl->cfi_table[0x2D] = nb_blocs - 1;
pfl->cfi_table[0x2E] = (nb_blocs - 1) >> 8;
pfl->cfi_table[0x2F] = sector_len >> 8;
pfl->cfi_table[0x30] = sector_len >> 16;
/* Extended */
pfl->cfi_table[0x31] = 'P';
pfl->cfi_table[0x32] = 'R';
pfl->cfi_table[0x33] = 'I';
pfl->cfi_table[0x34] = '1';
pfl->cfi_table[0x35] = '1';
pfl->cfi_table[0x36] = 0x00;
pfl->cfi_table[0x37] = 0x00;
pfl->cfi_table[0x38] = 0x00;
pfl->cfi_table[0x39] = 0x00;
pfl->cfi_table[0x3a] = 0x00;
pfl->cfi_table[0x3b] = 0x00;
pfl->cfi_table[0x3c] = 0x00;
return pfl;
}
| true | qemu | 262e1eaafabf32d33a9fa0b03b3c8ea426c5ae1b |
8,497 | static int uhci_handle_td(UHCIState *s, UHCIQueue *q, uint32_t qh_addr,
UHCI_TD *td, uint32_t td_addr, uint32_t *int_mask)
{
int len = 0, max_len;
bool spd;
bool queuing = (q != NULL);
uint8_t pid = td->token & 0xff;
UHCIAsync *async = uhci_async_find_td(s, td_addr);
if (async) {
if (uhci_queue_verify(async->queue, qh_addr, td, td_addr, queuing)) {
assert(q == NULL || q == async->queue);
q = async->queue;
} else {
uhci_queue_free(async->queue, "guest re-used pending td");
async = NULL;
if (q == NULL) {
q = uhci_queue_find(s, td);
if (q && !uhci_queue_verify(q, qh_addr, td, td_addr, queuing)) {
uhci_queue_free(q, "guest re-used qh");
q = NULL;
if (q) {
q->valid = 32;
/* Is active ? */
if (!(td->ctrl & TD_CTRL_ACTIVE)) {
if (async) {
/* Guest marked a pending td non-active, cancel the queue */
uhci_queue_free(async->queue, "pending td non-active");
/*
* ehci11d spec page 22: "Even if the Active bit in the TD is already
* cleared when the TD is fetched ... an IOC interrupt is generated"
*/
if (td->ctrl & TD_CTRL_IOC) {
*int_mask |= 0x01;
return TD_RESULT_NEXT_QH;
if (async) {
if (queuing) {
/* we are busy filling the queue, we are not prepared
to consume completed packages then, just leave them
in async state */
return TD_RESULT_ASYNC_CONT;
if (!async->done) {
UHCI_TD last_td;
UHCIAsync *last = QTAILQ_LAST(&async->queue->asyncs, asyncs_head);
/*
* While we are waiting for the current td to complete, the guest
* may have added more tds to the queue. Note we re-read the td
* rather then caching it, as we want to see guest made changes!
*/
uhci_read_td(s, &last_td, last->td_addr);
uhci_queue_fill(async->queue, &last_td);
return TD_RESULT_ASYNC_CONT;
uhci_async_unlink(async);
goto done;
/* Allocate new packet */
if (q == NULL) {
USBDevice *dev = uhci_find_device(s, (td->token >> 8) & 0x7f);
USBEndpoint *ep = usb_ep_get(dev, pid, (td->token >> 15) & 0xf);
q = uhci_queue_new(s, qh_addr, td, ep);
async = uhci_async_alloc(q, td_addr);
max_len = ((td->token >> 21) + 1) & 0x7ff;
spd = (pid == USB_TOKEN_IN && (td->ctrl & TD_CTRL_SPD) != 0);
usb_packet_setup(&async->packet, pid, q->ep, td_addr, spd,
(td->ctrl & TD_CTRL_IOC) != 0);
qemu_sglist_add(&async->sgl, td->buffer, max_len);
usb_packet_map(&async->packet, &async->sgl);
switch(pid) {
case USB_TOKEN_OUT:
case USB_TOKEN_SETUP:
len = usb_handle_packet(q->ep->dev, &async->packet);
if (len >= 0)
len = max_len;
break;
case USB_TOKEN_IN:
len = usb_handle_packet(q->ep->dev, &async->packet);
break;
default:
/* invalid pid : frame interrupted */
usb_packet_unmap(&async->packet, &async->sgl);
uhci_async_free(async);
s->status |= UHCI_STS_HCPERR;
uhci_update_irq(s);
return TD_RESULT_STOP_FRAME;
if (len == USB_RET_ASYNC) {
uhci_async_link(async);
if (!queuing) {
uhci_queue_fill(q, td);
return TD_RESULT_ASYNC_START;
async->packet.result = len;
done:
len = uhci_complete_td(s, td, async, int_mask);
usb_packet_unmap(&async->packet, &async->sgl);
uhci_async_free(async);
return len; | true | qemu | 7f102ebeb5bad7b723a25557234b0feb493f6134 |
8,498 | static int writev_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
bool Cflag = false, qflag = false;
int flags = 0;
int c, cnt;
char *buf;
int64_t offset;
/* Some compilers get confused and warn if this is not initialized. */
int total = 0;
int nr_iov;
int pattern = 0xcd;
QEMUIOVector qiov;
while ((c = getopt(argc, argv, "CqP:")) != -1) {
switch (c) {
case 'C':
Cflag = true;
break;
case 'f':
flags |= BDRV_REQ_FUA;
break;
case 'q':
qflag = true;
break;
case 'P':
pattern = parse_pattern(optarg);
if (pattern < 0) {
return 0;
}
break;
default:
return qemuio_command_usage(&writev_cmd);
}
}
if (optind > argc - 2) {
return qemuio_command_usage(&writev_cmd);
}
offset = cvtnum(argv[optind]);
if (offset < 0) {
print_cvtnum_err(offset, argv[optind]);
return 0;
}
optind++;
nr_iov = argc - optind;
buf = create_iovec(blk, &qiov, &argv[optind], nr_iov, pattern);
if (buf == NULL) {
return 0;
}
gettimeofday(&t1, NULL);
cnt = do_aio_writev(blk, &qiov, offset, flags, &total);
gettimeofday(&t2, NULL);
if (cnt < 0) {
printf("writev failed: %s\n", strerror(-cnt));
goto out;
}
if (qflag) {
goto out;
}
/* Finally, report back -- -C gives a parsable format */
t2 = tsub(t2, t1);
print_report("wrote", &t2, offset, qiov.size, total, cnt, Cflag);
out:
qemu_iovec_destroy(&qiov);
qemu_io_free(buf);
return 0;
}
| true | qemu | 4ca1d3401b834662efddd12bd62ad80f5ef1ef05 |
8,499 | libAVMemInputPin_Receive(libAVMemInputPin *this, IMediaSample *sample)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
enum dshowDeviceType devtype = pin->filter->type;
void *priv_data;
uint8_t *buf;
int buf_size;
int index;
int64_t curtime;
dshowdebug("libAVMemInputPin_Receive(%p)\n", this);
if (!sample)
return E_POINTER;
if (devtype == VideoDevice) {
/* PTS from video devices is unreliable. */
IReferenceClock *clock = pin->filter->clock;
IReferenceClock_GetTime(clock, &curtime);
} else {
int64_t dummy;
IMediaSample_GetTime(sample, &curtime, &dummy);
curtime += pin->filter->start_time;
}
buf_size = IMediaSample_GetActualDataLength(sample);
IMediaSample_GetPointer(sample, &buf);
priv_data = pin->filter->priv_data;
index = pin->filter->stream_index;
pin->filter->callback(priv_data, index, buf, buf_size, curtime);
return S_OK;
}
| true | FFmpeg | 773eb74babe07bc5c97c32aa564efc40e2d4b00c |
8,500 | static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
int *got_frame, const uint8_t *data, int size)
{
VP9Context *s = avctx->priv_data;
int ret, tile_row, tile_col, i, ref = -1, row, col;
ptrdiff_t yoff = 0, uvoff = 0;
ret = decode_frame_header(avctx, data, size, &ref);
if (ret < 0) {
return ret;
} else if (!ret) {
if (!s->refs[ref]->buf[0]) {
av_log(avctx, AV_LOG_ERROR,
"Requested reference %d not available\n", ref);
return AVERROR_INVALIDDATA;
}
ret = av_frame_ref(frame, s->refs[ref]);
if (ret < 0)
return ret;
*got_frame = 1;
return 0;
}
data += ret;
size -= ret;
s->cur_frame = frame;
av_frame_unref(s->cur_frame);
if ((ret = ff_get_buffer(avctx, s->cur_frame,
s->refreshrefmask ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
return ret;
s->cur_frame->key_frame = s->keyframe;
s->cur_frame->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
: AV_PICTURE_TYPE_P;
// main tile decode loop
memset(s->above_partition_ctx, 0, s->cols);
memset(s->above_skip_ctx, 0, s->cols);
if (s->keyframe || s->intraonly)
memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
else
memset(s->above_mode_ctx, NEARESTMV, s->cols);
memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 8);
memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 8);
memset(s->above_segpred_ctx, 0, s->cols);
for (tile_row = 0; tile_row < s->tiling.tile_rows; tile_row++) {
set_tile_offset(&s->tiling.tile_row_start, &s->tiling.tile_row_end,
tile_row, s->tiling.log2_tile_rows, s->sb_rows);
for (tile_col = 0; tile_col < s->tiling.tile_cols; tile_col++) {
int64_t tile_size;
if (tile_col == s->tiling.tile_cols - 1 &&
tile_row == s->tiling.tile_rows - 1) {
tile_size = size;
} else {
tile_size = AV_RB32(data);
data += 4;
size -= 4;
}
if (tile_size > size)
return AVERROR_INVALIDDATA;
ff_vp56_init_range_decoder(&s->c_b[tile_col], data, tile_size);
if (vp56_rac_get_prob_branchy(&s->c_b[tile_col], 128)) // marker bit
return AVERROR_INVALIDDATA;
data += tile_size;
size -= tile_size;
}
for (row = s->tiling.tile_row_start;
row < s->tiling.tile_row_end;
row += 8, yoff += s->cur_frame->linesize[0] * 64,
uvoff += s->cur_frame->linesize[1] * 32) {
VP9Filter *lflvl = s->lflvl;
ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
for (tile_col = 0; tile_col < s->tiling.tile_cols; tile_col++) {
set_tile_offset(&s->tiling.tile_col_start,
&s->tiling.tile_col_end,
tile_col, s->tiling.log2_tile_cols, s->sb_cols);
memset(s->left_partition_ctx, 0, 8);
memset(s->left_skip_ctx, 0, 8);
if (s->keyframe || s->intraonly)
memset(s->left_mode_ctx, DC_PRED, 16);
else
memset(s->left_mode_ctx, NEARESTMV, 8);
memset(s->left_y_nnz_ctx, 0, 16);
memset(s->left_uv_nnz_ctx, 0, 16);
memset(s->left_segpred_ctx, 0, 8);
memcpy(&s->c, &s->c_b[tile_col], sizeof(s->c));
for (col = s->tiling.tile_col_start;
col < s->tiling.tile_col_end;
col += 8, yoff2 += 64, uvoff2 += 32, lflvl++) {
// FIXME integrate with lf code (i.e. zero after each
// use, similar to invtxfm coefficients, or similar)
memset(lflvl->mask, 0, sizeof(lflvl->mask));
if ((ret = decode_subblock(avctx, row, col, lflvl,
yoff2, uvoff2, BL_64X64)) < 0)
return ret;
}
memcpy(&s->c_b[tile_col], &s->c, sizeof(s->c));
}
// backup pre-loopfilter reconstruction data for intra
// prediction of next row of sb64s
if (row + 8 < s->rows) {
memcpy(s->intra_pred_data[0],
s->cur_frame->data[0] + yoff +
63 * s->cur_frame->linesize[0],
8 * s->cols);
memcpy(s->intra_pred_data[1],
s->cur_frame->data[1] + uvoff +
31 * s->cur_frame->linesize[1],
4 * s->cols);
memcpy(s->intra_pred_data[2],
s->cur_frame->data[2] + uvoff +
31 * s->cur_frame->linesize[2],
4 * s->cols);
}
// loopfilter one row
if (s->filter.level) {
yoff2 = yoff;
uvoff2 = uvoff;
lflvl = s->lflvl;
for (col = 0; col < s->cols;
col += 8, yoff2 += 64, uvoff2 += 32, lflvl++)
loopfilter_subblock(avctx, lflvl, row, col, yoff2, uvoff2);
}
}
}
// bw adaptivity (or in case of parallel decoding mode, fw adaptivity
// probability maintenance between frames)
if (s->refreshctx) {
if (s->parallelmode) {
memcpy(s->prob_ctx[s->framectxid].coef, s->prob.coef,
sizeof(s->prob.coef));
s->prob_ctx[s->framectxid].p = s->prob.p;
} else {
ff_vp9_adapt_probs(s);
}
}
FFSWAP(VP9MVRefPair *, s->mv[0], s->mv[1]);
// ref frame setup
for (i = 0; i < 8; i++)
if (s->refreshrefmask & (1 << i)) {
av_frame_unref(s->refs[i]);
ret = av_frame_ref(s->refs[i], s->cur_frame);
if (ret < 0)
return ret;
}
if (s->invisible)
av_frame_unref(s->cur_frame);
else
*got_frame = 1;
return 0;
}
| true | FFmpeg | 50866c8d95bfd97c299199aec0d68291f38a72e1 |
8,501 | static int parallels_open(BlockDriverState *bs, int flags)
{
BDRVParallelsState *s = bs->opaque;
int i;
struct parallels_header ph;
bs->read_only = 1; // no write support yet
if (bdrv_pread(bs->file, 0, &ph, sizeof(ph)) != sizeof(ph))
goto fail;
if (memcmp(ph.magic, HEADER_MAGIC, 16) ||
(le32_to_cpu(ph.version) != HEADER_VERSION)) {
goto fail;
}
bs->total_sectors = le32_to_cpu(ph.nb_sectors);
s->tracks = le32_to_cpu(ph.tracks);
s->catalog_size = le32_to_cpu(ph.catalog_entries);
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
if (bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4) !=
s->catalog_size * 4)
goto fail;
for (i = 0; i < s->catalog_size; i++)
le32_to_cpus(&s->catalog_bitmap[i]);
qemu_co_mutex_init(&s->lock);
return 0;
fail:
if (s->catalog_bitmap)
g_free(s->catalog_bitmap);
return -1;
}
| true | qemu | 46536235d80a012cc4286b71426cafad0c7f41f0 |
8,502 | int inet_aton (const char * str, struct in_addr * add)
{
const char * pch = str;
unsigned int add1 = 0, add2 = 0, add3 = 0, add4 = 0;
add1 = atoi(pch);
pch = strpbrk(pch,".");
if (pch == 0 || ++pch == 0) goto done;
add2 = atoi(pch);
pch = strpbrk(pch,".");
if (pch == 0 || ++pch == 0) goto done;
add3 = atoi(pch);
pch = strpbrk(pch,".");
if (pch == 0 || ++pch == 0) goto done;
add4 = atoi(pch);
done:
add->s_addr=(add4<<24)+(add3<<16)+(add2<<8)+add1;
return 1;
}
| true | FFmpeg | 104d04182d85e8538e8934c072432a05ab7ed999 |
8,503 | static void pci_indirect(void)
{
QVirtioPCIDevice *dev;
QPCIBus *bus;
QVirtQueuePCI *vqpci;
QGuestAllocator *alloc;
QVirtioBlkReq req;
QVRingIndirectDesc *indirect;
void *addr;
uint64_t req_addr;
uint64_t capacity;
uint32_t features;
uint32_t free_head;
uint8_t status;
char *data;
bus = test_start();
dev = virtio_blk_init(bus);
/* MSI-X is not enabled */
addr = dev->addr + QVIRTIO_DEVICE_SPECIFIC_NO_MSIX;
capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev, addr);
g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);
features = qvirtio_get_features(&qvirtio_pci, &dev->vdev);
g_assert_cmphex(features & QVIRTIO_F_RING_INDIRECT_DESC, !=, 0);
features = features & ~(QVIRTIO_F_BAD_FEATURE | QVIRTIO_F_RING_EVENT_IDX |
QVIRTIO_BLK_F_SCSI);
qvirtio_set_features(&qvirtio_pci, &dev->vdev, features);
alloc = pc_alloc_init();
vqpci = (QVirtQueuePCI *)qvirtqueue_setup(&qvirtio_pci, &dev->vdev,
alloc, 0);
qvirtio_set_driver_ok(&qvirtio_pci, &dev->vdev);
/* Write request */
req.type = QVIRTIO_BLK_T_OUT;
req.ioprio = 1;
req.sector = 0;
req.data = g_malloc0(512);
strcpy(req.data, "TEST");
req_addr = virtio_blk_request(alloc, &req, 512);
g_free(req.data);
indirect = qvring_indirect_desc_setup(&dev->vdev, alloc, 2);
qvring_indirect_desc_add(indirect, req_addr, 528, false);
qvring_indirect_desc_add(indirect, req_addr + 528, 1, true);
free_head = qvirtqueue_add_indirect(&vqpci->vq, indirect);
qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
QVIRTIO_BLK_TIMEOUT));
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
g_free(indirect);
guest_free(alloc, req_addr);
/* Read request */
req.type = QVIRTIO_BLK_T_IN;
req.ioprio = 1;
req.sector = 0;
req.data = g_malloc0(512);
strcpy(req.data, "TEST");
req_addr = virtio_blk_request(alloc, &req, 512);
g_free(req.data);
indirect = qvring_indirect_desc_setup(&dev->vdev, alloc, 2);
qvring_indirect_desc_add(indirect, req_addr, 16, false);
qvring_indirect_desc_add(indirect, req_addr + 16, 513, true);
free_head = qvirtqueue_add_indirect(&vqpci->vq, indirect);
qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
QVIRTIO_BLK_TIMEOUT));
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
data = g_malloc0(512);
memread(req_addr + 16, data, 512);
g_assert_cmpstr(data, ==, "TEST");
g_free(data);
g_free(indirect);
guest_free(alloc, req_addr);
/* End test */
guest_free(alloc, vqpci->vq.desc);
qvirtio_pci_device_disable(dev);
g_free(dev);
test_end();
}
| true | qemu | 70556264a89a268efba1d7e8e341adcdd7881eb4 |
8,504 | static void qvirtio_scsi_pci_free(QVirtIOSCSI *vs)
{
int i;
for (i = 0; i < vs->num_queues + 2; i++) {
qvirtqueue_cleanup(vs->dev->bus, vs->vq[i], vs->qs->alloc);
}
qvirtio_pci_device_disable(container_of(vs->dev, QVirtioPCIDevice, vdev));
g_free(vs->dev);
qvirtio_scsi_stop(vs->qs);
g_free(vs);
}
| true | qemu | 3caab54d081bb3ce1b237d9628dd2b8ee7680159 |
8,505 | static void *circular_buffer_task_tx( void *_URLContext)
{
URLContext *h = _URLContext;
UDPContext *s = h->priv_data;
int old_cancelstate;
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old_cancelstate);
for(;;) {
int len;
uint8_t tmp[4];
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_cancelstate);
av_usleep(s->packet_gap);
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old_cancelstate);
pthread_mutex_lock(&s->mutex);
len=av_fifo_size(s->fifo);
while (len<4) {
if (pthread_cond_wait(&s->cond, &s->mutex) < 0) {
goto end;
}
len=av_fifo_size(s->fifo);
}
av_fifo_generic_peek(s->fifo, tmp, 4, NULL);
len=AV_RL32(tmp);
if (len>0 && av_fifo_size(s->fifo)>=len+4) {
av_fifo_drain(s->fifo, 4); /* skip packet length */
av_fifo_generic_read(s->fifo, h, len, do_udp_write); /* use function for write from fifo buffer */
if (s->circular_buffer_error == len) {
/* all ok - reset error */
s->circular_buffer_error=0;
}
}
pthread_mutex_unlock(&s->mutex);
}
end:
pthread_mutex_unlock(&s->mutex);
return NULL;
}
| true | FFmpeg | 9b7a8bddac52bd05dddb28afd4dff92739946d3b |
8,506 | int ff_lpc_calc_coefs(LPCContext *s,
const int32_t *samples, int blocksize, int min_order,
int max_order, int precision,
int32_t coefs[][MAX_LPC_ORDER], int *shift,
enum FFLPCType lpc_type, int lpc_passes,
int omethod, int max_shift, int zero_shift)
{
double autoc[MAX_LPC_ORDER+1];
double ref[MAX_LPC_ORDER];
double lpc[MAX_LPC_ORDER][MAX_LPC_ORDER];
int i, j, pass = 0;
int opt_order;
av_assert2(max_order >= MIN_LPC_ORDER && max_order <= MAX_LPC_ORDER &&
lpc_type > FF_LPC_TYPE_FIXED);
/* reinit LPC context if parameters have changed */
if (blocksize != s->blocksize || max_order != s->max_order ||
lpc_type != s->lpc_type) {
ff_lpc_end(s);
ff_lpc_init(s, blocksize, max_order, lpc_type);
}
if(lpc_passes <= 0)
lpc_passes = 2;
if (lpc_type == FF_LPC_TYPE_LEVINSON || (lpc_type == FF_LPC_TYPE_CHOLESKY && lpc_passes > 1)) {
s->lpc_apply_welch_window(samples, blocksize, s->windowed_samples);
s->lpc_compute_autocorr(s->windowed_samples, blocksize, max_order, autoc);
compute_lpc_coefs(autoc, max_order, &lpc[0][0], MAX_LPC_ORDER, 0, 1);
for(i=0; i<max_order; i++)
ref[i] = fabs(lpc[i][i]);
pass++;
}
if (lpc_type == FF_LPC_TYPE_CHOLESKY) {
LLSModel m[2];
LOCAL_ALIGNED(32, double, var, [FFALIGN(MAX_LPC_ORDER+1,4)]);
double av_uninit(weight);
memset(var, 0, FFALIGN(MAX_LPC_ORDER+1,4)*sizeof(*var));
for(j=0; j<max_order; j++)
m[0].coeff[max_order-1][j] = -lpc[max_order-1][j];
for(; pass<lpc_passes; pass++){
avpriv_init_lls(&m[pass&1], max_order);
weight=0;
for(i=max_order; i<blocksize; i++){
for(j=0; j<=max_order; j++)
var[j]= samples[i-j];
if(pass){
double eval, inv, rinv;
eval= m[(pass-1)&1].evaluate_lls(&m[(pass-1)&1], var+1, max_order-1);
eval= (512>>pass) + fabs(eval - var[0]);
inv = 1/eval;
rinv = sqrt(inv);
for(j=0; j<=max_order; j++)
var[j] *= rinv;
weight += inv;
}else
weight++;
m[pass&1].update_lls(&m[pass&1], var);
}
avpriv_solve_lls(&m[pass&1], 0.001, 0);
}
for(i=0; i<max_order; i++){
for(j=0; j<max_order; j++)
lpc[i][j]=-m[(pass-1)&1].coeff[i][j];
ref[i]= sqrt(m[(pass-1)&1].variance[i] / weight) * (blocksize - max_order) / 4000;
}
for(i=max_order-1; i>0; i--)
ref[i] = ref[i-1] - ref[i];
}
opt_order = max_order;
if(omethod == ORDER_METHOD_EST) {
opt_order = estimate_best_order(ref, min_order, max_order);
i = opt_order-1;
quantize_lpc_coefs(lpc[i], i+1, precision, coefs[i], &shift[i], max_shift, zero_shift);
} else {
for(i=min_order-1; i<max_order; i++) {
quantize_lpc_coefs(lpc[i], i+1, precision, coefs[i], &shift[i], max_shift, zero_shift);
}
}
return opt_order;
}
| true | FFmpeg | e27be795f019a8ba41c33b0cf0e3a060069252ea |
8,507 | static void string_deserialize(void **native_out, void *datap,
VisitorFunc visit, Error **errp)
{
StringSerializeData *d = datap;
d->siv = string_input_visitor_new(string_output_get_string(d->sov));
visit(string_input_get_visitor(d->siv), native_out, errp);
}
| true | qemu | 2bd01ac1e238c76e201ba21f314cec46437d2c5a |
8,508 | static int rtp_read_header(AVFormatContext *s)
{
uint8_t recvbuf[RTP_MAX_PACKET_LENGTH];
char host[500], sdp[500];
int ret, port;
URLContext* in = NULL;
int payload_type;
AVCodecContext codec = { 0 };
struct sockaddr_storage addr;
AVIOContext pb;
socklen_t addrlen = sizeof(addr);
RTSPState *rt = s->priv_data;
if (!ff_network_init())
return AVERROR(EIO);
if (!rt->protocols) {
rt->protocols = ffurl_get_protocols(NULL, NULL);
if (!rt->protocols)
return AVERROR(ENOMEM);
}
ret = ffurl_open(&in, s->filename, AVIO_FLAG_READ,
&s->interrupt_callback, NULL, rt->protocols);
if (ret)
goto fail;
while (1) {
ret = ffurl_read(in, recvbuf, sizeof(recvbuf));
if (ret == AVERROR(EAGAIN))
continue;
if (ret < 0)
goto fail;
if (ret < 12) {
av_log(s, AV_LOG_WARNING, "Received too short packet\n");
continue;
}
if ((recvbuf[0] & 0xc0) != 0x80) {
av_log(s, AV_LOG_WARNING, "Unsupported RTP version packet "
"received\n");
continue;
}
if (RTP_PT_IS_RTCP(recvbuf[1]))
continue;
payload_type = recvbuf[1] & 0x7f;
break;
}
getsockname(ffurl_get_file_handle(in), (struct sockaddr*) &addr, &addrlen);
ffurl_close(in);
in = NULL;
if (ff_rtp_get_codec_info(&codec, payload_type)) {
av_log(s, AV_LOG_ERROR, "Unable to receive RTP payload type %d "
"without an SDP file describing it\n",
payload_type);
goto fail;
}
if (codec.codec_type != AVMEDIA_TYPE_DATA) {
av_log(s, AV_LOG_WARNING, "Guessing on RTP content - if not received "
"properly you need an SDP file "
"describing it\n");
}
av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port,
NULL, 0, s->filename);
snprintf(sdp, sizeof(sdp),
"v=0\r\nc=IN IP%d %s\r\nm=%s %d RTP/AVP %d\r\n",
addr.ss_family == AF_INET ? 4 : 6, host,
codec.codec_type == AVMEDIA_TYPE_DATA ? "application" :
codec.codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio",
port, payload_type);
av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sdp);
ffio_init_context(&pb, sdp, strlen(sdp), 0, NULL, NULL, NULL, NULL);
s->pb = &pb;
/* sdp_read_header initializes this again */
ff_network_close();
rt->media_type_mask = (1 << (AVMEDIA_TYPE_DATA+1)) - 1;
ret = sdp_read_header(s);
s->pb = NULL;
return ret;
fail:
if (in)
ffurl_close(in);
ff_network_close();
return ret;
}
| true | FFmpeg | ec4c48397641dbaf4ae8df36c32aaa5a311a11bf |
8,509 | static void qemu_aio_wait_all(void)
{
while (aio_poll(ctx, true)) {
/* Do nothing */
}
}
| true | qemu | 35ecde26018207fe723bec6efbd340db6e9c2d53 |
8,510 | static int send_invoke_response(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
double seqnum;
char filename[64];
char command[64];
int stringlen;
char *pchar;
const uint8_t *p = pkt->data;
uint8_t *pp = NULL;
RTMPPacket spkt = { 0 };
GetByteContext gbc;
int ret;
bytestream2_init(&gbc, p, pkt->size);
if (ff_amf_read_string(&gbc, command, sizeof(command),
&stringlen)) {
av_log(s, AV_LOG_ERROR, "Error in PT_INVOKE\n");
return AVERROR_INVALIDDATA;
}
ret = ff_amf_read_number(&gbc, &seqnum);
if (ret)
return ret;
ret = ff_amf_read_null(&gbc);
if (ret)
return ret;
if (!strcmp(command, "FCPublish") ||
!strcmp(command, "publish")) {
ret = ff_amf_read_string(&gbc, filename,
sizeof(filename), &stringlen);
if (ret) {
if (ret == AVERROR(EINVAL))
av_log(s, AV_LOG_ERROR, "Unable to parse stream name - name too long?\n");
else
av_log(s, AV_LOG_ERROR, "Unable to parse stream name\n");
return ret;
}
// check with url
if (s->filename) {
pchar = strrchr(s->filename, '/');
if (!pchar) {
av_log(s, AV_LOG_WARNING,
"Unable to find / in url %s, bad format\n",
s->filename);
pchar = s->filename;
}
pchar++;
if (strcmp(pchar, filename))
av_log(s, AV_LOG_WARNING, "Unexpected stream %s, expecting"
" %s\n", filename, pchar);
}
rt->state = STATE_RECEIVING;
}
if (!strcmp(command, "FCPublish")) {
if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
RTMP_PT_INVOKE, 0,
RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
return ret;
}
pp = spkt.data;
ff_amf_write_string(&pp, "onFCPublish");
} else if (!strcmp(command, "publish")) {
ret = write_begin(s);
if (ret < 0)
return ret;
// Send onStatus(NetStream.Publish.Start)
return write_status(s, pkt, "NetStream.Publish.Start",
filename);
} else if (!strcmp(command, "play")) {
ret = write_begin(s);
if (ret < 0)
return ret;
rt->state = STATE_SENDING;
return write_status(s, pkt, "NetStream.Play.Start",
filename);
} else {
if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
RTMP_PT_INVOKE, 0,
RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
return ret;
}
pp = spkt.data;
ff_amf_write_string(&pp, "_result");
ff_amf_write_number(&pp, seqnum);
ff_amf_write_null(&pp);
if (!strcmp(command, "createStream")) {
rt->nb_streamid++;
if (rt->nb_streamid == 0 || rt->nb_streamid == 2)
rt->nb_streamid++; /* Values 0 and 2 are reserved */
ff_amf_write_number(&pp, rt->nb_streamid);
/* By now we don't control which streams are removed in
* deleteStream. There is no stream creation control
* if a client creates more than 2^32 - 2 streams. */
}
}
spkt.size = pp - spkt.data;
ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&spkt);
return ret;
}
| true | FFmpeg | d6ded94036e43a04889f4ff2813a7f7dd60b82fe |
8,511 | static void coroutine_fn v9fs_open(void *opaque)
{
int flags;
int32_t fid;
int32_t mode;
V9fsQID qid;
int iounit = 0;
ssize_t err = 0;
size_t offset = 7;
struct stat stbuf;
V9fsFidState *fidp;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
if (s->proto_version == V9FS_PROTO_2000L) {
err = pdu_unmarshal(pdu, offset, "dd", &fid, &mode);
} else {
uint8_t modebyte;
err = pdu_unmarshal(pdu, offset, "db", &fid, &modebyte);
mode = modebyte;
}
if (err < 0) {
goto out_nofid;
}
trace_v9fs_open(pdu->tag, pdu->id, fid, mode);
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
BUG_ON(fidp->fid_type != P9_FID_NONE);
err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
if (err < 0) {
goto out;
}
stat_to_qid(&stbuf, &qid);
if (S_ISDIR(stbuf.st_mode)) {
err = v9fs_co_opendir(pdu, fidp);
if (err < 0) {
goto out;
}
fidp->fid_type = P9_FID_DIR;
err = pdu_marshal(pdu, offset, "Qd", &qid, 0);
if (err < 0) {
goto out;
}
err += offset;
} else {
if (s->proto_version == V9FS_PROTO_2000L) {
flags = get_dotl_openflags(s, mode);
} else {
flags = omode_to_uflags(mode);
}
if (is_ro_export(&s->ctx)) {
if (mode & O_WRONLY || mode & O_RDWR ||
mode & O_APPEND || mode & O_TRUNC) {
err = -EROFS;
goto out;
}
}
err = v9fs_co_open(pdu, fidp, flags);
if (err < 0) {
goto out;
}
fidp->fid_type = P9_FID_FILE;
fidp->open_flags = flags;
if (flags & O_EXCL) {
/*
* We let the host file system do O_EXCL check
* We should not reclaim such fd
*/
fidp->flags |= FID_NON_RECLAIMABLE;
}
iounit = get_iounit(pdu, &fidp->path);
err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
if (err < 0) {
goto out;
}
err += offset;
}
trace_v9fs_open_return(pdu->tag, pdu->id,
qid.type, qid.version, qid.path, iounit);
out:
put_fid(pdu, fidp);
out_nofid:
pdu_complete(pdu, err);
}
| true | qemu | 49dd946bb5419681c8668b09a6d10f42bc707b78 |
8,512 | static void init_blk_migration_it(void *opaque, BlockDriverState *bs)
{
BlkMigDevState *bmds;
int64_t sectors;
if (!bdrv_is_read_only(bs)) {
sectors = bdrv_nb_sectors(bs);
if (sectors <= 0) {
return;
}
bmds = g_malloc0(sizeof(BlkMigDevState));
bmds->bs = bs;
bmds->bulk_completed = 0;
bmds->total_sectors = sectors;
bmds->completed_sectors = 0;
bmds->shared_base = block_mig_state.shared_base;
alloc_aio_bitmap(bmds);
error_setg(&bmds->blocker, "block device is in use by migration");
bdrv_op_block_all(bs, bmds->blocker);
bdrv_ref(bs);
block_mig_state.total_sector_sum += sectors;
if (bmds->shared_base) {
DPRINTF("Start migration for %s with shared base image\n",
bs->device_name);
} else {
DPRINTF("Start full migration for %s\n", bs->device_name);
}
QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
}
}
| true | qemu | 5839e53bbc0fec56021d758aab7610df421ed8c8 |
8,513 | static void megasas_command_complete(SCSIRequest *req, uint32_t status,
size_t resid)
{
MegasasCmd *cmd = req->hba_private;
uint8_t cmd_status = MFI_STAT_OK;
trace_megasas_command_complete(cmd->index, status, resid);
if (req->io_canceled) {
return;
}
if (cmd->req == NULL) {
/*
* Internal command complete
*/
cmd_status = megasas_finish_internal_dcmd(cmd, req, resid);
if (cmd_status == MFI_STAT_INVALID_STATUS) {
return;
}
} else {
req->status = status;
trace_megasas_scsi_complete(cmd->index, req->status,
cmd->iov_size, req->cmd.xfer);
if (req->status != GOOD) {
cmd_status = MFI_STAT_SCSI_DONE_WITH_ERROR;
}
if (req->status == CHECK_CONDITION) {
megasas_copy_sense(cmd);
}
cmd->frame->header.scsi_status = req->status;
}
cmd->frame->header.cmd_status = cmd_status;
megasas_complete_command(cmd);
}
| true | qemu | 87e459a810d7b1ec1638085b5a80ea3d9b43119a |
8,514 | static void GLZWDecodeInit(GifState * s, int csize)
{
/* read buffer */
s->eob_reached = 0;
s->pbuf = s->buf;
s->ebuf = s->buf;
s->bbuf = 0;
s->bbits = 0;
/* decoder */
s->codesize = csize;
s->cursize = s->codesize + 1;
s->curmask = mask[s->cursize];
s->top_slot = 1 << s->cursize;
s->clear_code = 1 << s->codesize;
s->end_code = s->clear_code + 1;
s->slot = s->newcodes = s->clear_code + 2;
s->oc = s->fc = 0;
s->sp = s->stack;
}
| true | FFmpeg | 0b54f3c0878a3acaa9142e4f24942e762d97e350 |
8,515 | SchroFrame *ff_create_schro_frame(AVCodecContext *avctx,
SchroFrameFormat schro_frame_fmt)
{
AVFrame *p_pic;
SchroFrame *p_frame;
int y_width, uv_width;
int y_height, uv_height;
int i;
y_width = avctx->width;
y_height = avctx->height;
uv_width = y_width >> (SCHRO_FRAME_FORMAT_H_SHIFT(schro_frame_fmt));
uv_height = y_height >> (SCHRO_FRAME_FORMAT_V_SHIFT(schro_frame_fmt));
p_pic = av_frame_alloc();
if (!p_pic)
return NULL;
if (ff_get_buffer(avctx, p_pic, AV_GET_BUFFER_FLAG_REF) < 0) {
av_frame_free(&p_pic);
return NULL;
}
p_frame = schro_frame_new();
p_frame->format = schro_frame_fmt;
p_frame->width = y_width;
p_frame->height = y_height;
schro_frame_set_free_callback(p_frame, free_schro_frame, p_pic);
for (i = 0; i < 3; ++i) {
p_frame->components[i].width = i ? uv_width : y_width;
p_frame->components[i].stride = p_pic->linesize[i];
p_frame->components[i].height = i ? uv_height : y_height;
p_frame->components[i].length =
p_frame->components[i].stride * p_frame->components[i].height;
p_frame->components[i].data = p_pic->data[i];
if (i) {
p_frame->components[i].v_shift =
SCHRO_FRAME_FORMAT_V_SHIFT(p_frame->format);
p_frame->components[i].h_shift =
SCHRO_FRAME_FORMAT_H_SHIFT(p_frame->format);
}
}
return p_frame;
}
| true | FFmpeg | 220b24c7c97dc033ceab1510549f66d0e7b52ef1 |
8,516 | static int applehttp_open(URLContext *h, const char *uri, int flags)
{
AppleHTTPContext *s;
int ret, i;
const char *nested_url;
if (flags & AVIO_FLAG_WRITE)
return AVERROR(ENOSYS);
s = av_mallocz(sizeof(AppleHTTPContext));
if (!s)
return AVERROR(ENOMEM);
h->priv_data = s;
h->is_streamed = 1;
if (av_strstart(uri, "applehttp+", &nested_url)) {
av_strlcpy(s->playlisturl, nested_url, sizeof(s->playlisturl));
} else if (av_strstart(uri, "applehttp://", &nested_url)) {
av_strlcpy(s->playlisturl, "http://", sizeof(s->playlisturl));
av_strlcat(s->playlisturl, nested_url, sizeof(s->playlisturl));
} else {
av_log(h, AV_LOG_ERROR, "Unsupported url %s\n", uri);
ret = AVERROR(EINVAL);
goto fail;
}
if ((ret = parse_playlist(h, s->playlisturl)) < 0)
goto fail;
if (s->n_segments == 0 && s->n_variants > 0) {
int max_bandwidth = 0, maxvar = -1;
for (i = 0; i < s->n_variants; i++) {
if (s->variants[i]->bandwidth > max_bandwidth || i == 0) {
max_bandwidth = s->variants[i]->bandwidth;
maxvar = i;
}
}
av_strlcpy(s->playlisturl, s->variants[maxvar]->url,
sizeof(s->playlisturl));
if ((ret = parse_playlist(h, s->playlisturl)) < 0)
goto fail;
}
if (s->n_segments == 0) {
av_log(h, AV_LOG_WARNING, "Empty playlist\n");
ret = AVERROR(EIO);
goto fail;
}
s->cur_seq_no = s->start_seq_no;
if (!s->finished && s->n_segments >= 3)
s->cur_seq_no = s->start_seq_no + s->n_segments - 3;
return 0;
fail:
av_free(s);
return ret;
}
| true | FFmpeg | 1ca87d600bc069fe4cf497c410b4f794e88a122d |
8,517 | static int mpeg_decode_slice(AVCodecContext *avctx,
AVPicture *pict,
int start_code,
UINT8 *buf, int buf_size)
{
Mpeg1Context *s1 = avctx->priv_data;
MpegEncContext *s = &s1->mpeg_enc_ctx;
int ret;
start_code = (start_code - 1) & 0xff;
if (start_code >= s->mb_height)
return -1;
s->last_dc[0] = 1 << (7 + s->intra_dc_precision);
s->last_dc[1] = s->last_dc[0];
s->last_dc[2] = s->last_dc[0];
memset(s->last_mv, 0, sizeof(s->last_mv));
s->mb_x = -1;
s->mb_y = start_code;
s->mb_incr = 0;
/* start frame decoding */
if (s->first_slice) {
s->first_slice = 0;
MPV_frame_start(s);
}
init_get_bits(&s->gb, buf, buf_size);
s->qscale = get_qscale(s);
/* extra slice info */
while (get_bits1(&s->gb) != 0) {
skip_bits(&s->gb, 8);
}
for(;;) {
clear_blocks(s->block[0]);
emms_c();
ret = mpeg_decode_mb(s, s->block);
dprintf("ret=%d\n", ret);
if (ret < 0)
return -1;
if (ret == 1)
break;
MPV_decode_mb(s, s->block);
}
emms_c();
/* end of slice reached */
if (s->mb_x == (s->mb_width - 1) &&
s->mb_y == (s->mb_height - 1)) {
/* end of image */
UINT8 **picture;
MPV_frame_end(s);
/* XXX: incorrect reported qscale for mpeg2 */
if (s->pict_type == B_TYPE) {
picture = s->current_picture;
avctx->quality = s->qscale;
} else {
/* latency of 1 frame for I and P frames */
/* XXX: use another variable than picture_number */
if (s->picture_number == 0) {
picture = NULL;
} else {
picture = s->last_picture;
avctx->quality = s->last_qscale;
}
s->last_qscale = s->qscale;
s->picture_number++;
}
if (picture) {
pict->data[0] = picture[0];
pict->data[1] = picture[1];
pict->data[2] = picture[2];
pict->linesize[0] = s->linesize;
pict->linesize[1] = s->linesize / 2;
pict->linesize[2] = s->linesize / 2;
return 1;
} else {
return 0;
}
} else {
return 0;
}
}
| true | FFmpeg | d7e9533aa06f4073a27812349b35ba5fede11ca1 |
8,519 | static int h264_handle_packet(AVFormatContext *ctx,
PayloadContext *data,
AVStream *st,
AVPacket * pkt,
uint32_t * timestamp,
const uint8_t * buf,
int len, int flags)
{
uint8_t nal = buf[0];
uint8_t type = (nal & 0x1f);
int result= 0;
uint8_t start_sequence[]= {0, 0, 1};
#ifdef DEBUG
assert(data);
assert(data->cookie == MAGIC_COOKIE);
#endif
assert(buf);
if (type >= 1 && type <= 23)
type = 1; // simplify the case. (these are all the nal types used internally by the h264 codec)
switch (type) {
case 0: // undefined;
result= -1;
break;
case 1:
av_new_packet(pkt, len+sizeof(start_sequence));
memcpy(pkt->data, start_sequence, sizeof(start_sequence));
memcpy(pkt->data+sizeof(start_sequence), buf, len);
#ifdef DEBUG
data->packet_types_received[nal & 0x1f]++;
#endif
break;
case 24: // STAP-A (one packet, multiple nals)
// consume the STAP-A NAL
buf++;
len--;
// first we are going to figure out the total size....
{
int pass= 0;
int total_length= 0;
uint8_t *dst= NULL;
for(pass= 0; pass<2; pass++) {
const uint8_t *src= buf;
int src_len= len;
do {
uint16_t nal_size = AV_RB16(src); // this going to be a problem if unaligned (can it be?)
// consume the length of the aggregate...
src += 2;
src_len -= 2;
if (nal_size <= src_len) {
if(pass==0) {
// counting...
total_length+= sizeof(start_sequence)+nal_size;
} else {
// copying
assert(dst);
memcpy(dst, start_sequence, sizeof(start_sequence));
dst+= sizeof(start_sequence);
memcpy(dst, src, nal_size);
#ifdef DEBUG
data->packet_types_received[*src & 0x1f]++;
#endif
dst+= nal_size;
}
} else {
av_log(ctx, AV_LOG_ERROR,
"nal size exceeds length: %d %d\n", nal_size, src_len);
}
// eat what we handled...
src += nal_size;
src_len -= nal_size;
if (src_len < 0)
av_log(ctx, AV_LOG_ERROR,
"Consumed more bytes than we got! (%d)\n", src_len);
} while (src_len > 2); // because there could be rtp padding..
if(pass==0) {
// now we know the total size of the packet (with the start sequences added)
av_new_packet(pkt, total_length);
dst= pkt->data;
} else {
assert(dst-pkt->data==total_length);
}
}
}
break;
case 25: // STAP-B
case 26: // MTAP-16
case 27: // MTAP-24
case 29: // FU-B
av_log(ctx, AV_LOG_ERROR,
"Unhandled type (%d) (See RFC for implementation details\n",
type);
result= -1;
break;
case 28: // FU-A (fragmented nal)
buf++;
len--; // skip the fu_indicator
{
// these are the same as above, we just redo them here for clarity...
uint8_t fu_indicator = nal;
uint8_t fu_header = *buf; // read the fu_header.
uint8_t start_bit = fu_header >> 7;
// uint8_t end_bit = (fu_header & 0x40) >> 6;
uint8_t nal_type = (fu_header & 0x1f);
uint8_t reconstructed_nal;
// reconstruct this packet's true nal; only the data follows..
reconstructed_nal = fu_indicator & (0xe0); // the original nal forbidden bit and NRI are stored in this packet's nal;
reconstructed_nal |= nal_type;
// skip the fu_header...
buf++;
len--;
#ifdef DEBUG
if (start_bit)
data->packet_types_received[nal_type]++;
#endif
if(start_bit) {
// copy in the start sequence, and the reconstructed nal....
av_new_packet(pkt, sizeof(start_sequence)+sizeof(nal)+len);
memcpy(pkt->data, start_sequence, sizeof(start_sequence));
pkt->data[sizeof(start_sequence)]= reconstructed_nal;
memcpy(pkt->data+sizeof(start_sequence)+sizeof(nal), buf, len);
} else {
av_new_packet(pkt, len);
memcpy(pkt->data, buf, len);
}
}
break;
case 30: // undefined
case 31: // undefined
default:
av_log(ctx, AV_LOG_ERROR, "Undefined type (%d)", type);
result= -1;
break;
}
return result;
} | true | FFmpeg | eafb17d140f6772c9aac8fbf31641f24a371b2c0 |
8,520 | static inline void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrSrc, const int16_t *alpSrc,
uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, int dstW, int chrDstW)
{
int i;
#if COMPILE_TEMPLATE_MMX
if(!(c->flags & SWS_BITEXACT)) {
long p= 4;
const uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
uint8_t *dst[4]= {aDest, dest, uDest, vDest};
x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
if (c->flags & SWS_ACCURATE_RND) {
while(p--) {
if (dst[p]) {
__asm__ volatile(
YSCALEYUV2YV121_ACCURATE
:: "r" (src[p]), "r" (dst[p] + counter[p]),
"g" (-counter[p])
: "%"REG_a
);
}
}
} else {
while(p--) {
if (dst[p]) {
__asm__ volatile(
YSCALEYUV2YV121
:: "r" (src[p]), "r" (dst[p] + counter[p]),
"g" (-counter[p])
: "%"REG_a
);
}
}
}
return;
}
#endif
for (i=0; i<dstW; i++) {
int val= (lumSrc[i]+64)>>7;
if (val&256) {
if (val<0) val=0;
else val=255;
}
dest[i]= val;
}
if (uDest)
for (i=0; i<chrDstW; i++) {
int u=(chrSrc[i ]+64)>>7;
int v=(chrSrc[i + VOFW]+64)>>7;
if ((u|v)&256) {
if (u<0) u=0;
else if (u>255) u=255;
if (v<0) v=0;
else if (v>255) v=255;
}
uDest[i]= u;
vDest[i]= v;
}
if (CONFIG_SWSCALE_ALPHA && aDest)
for (i=0; i<dstW; i++) {
int val= (alpSrc[i]+64)>>7;
aDest[i]= av_clip_uint8(val);
}
}
| true | FFmpeg | c3ab0004ae4dffc32494ae84dd15cfaa909a7884 |
8,521 | static int interp(RA144Context *ractx, int16_t *out, int a,
int copyold, int energy)
{
int work[10];
int b = NBLOCKS - a;
int i;
// Interpolate block coefficients from the this frame's forth block and
// last frame's forth block.
for (i=0; i<30; i++)
out[i] = (a * ractx->lpc_coef[0][i] + b * ractx->lpc_coef[1][i])>> 2;
if (eval_refl(work, out, ractx)) {
// The interpolated coefficients are unstable, copy either new or old
// coefficients.
int_to_int16(out, ractx->lpc_coef[copyold]);
return rescale_rms(ractx->lpc_refl_rms[copyold], energy);
} else {
return rescale_rms(rms(work), energy);
}
}
| true | FFmpeg | 643bae382c2610512652d3c5cfa7aabb450a706e |
8,523 | static int dv_extract_audio(uint8_t *frame, uint8_t *ppcm[4],
const DVprofile *sys)
{
int size, chan, i, j, d, of, smpls, freq, quant, half_ch;
uint16_t lc, rc;
const uint8_t *as_pack;
uint8_t *pcm, ipcm;
as_pack = dv_extract_pack(frame, dv_audio_source);
if (!as_pack) /* No audio ? */
return 0;
smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */
freq = as_pack[4] >> 3 & 0x07; /* 0 - 48kHz, 1 - 44,1kHz, 2 - 32kHz */
quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */
if (quant > 1)
return -1; /* unsupported quantization */
if (freq >= FF_ARRAY_ELEMS(dv_audio_frequency))
return AVERROR_INVALIDDATA;
size = (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */
half_ch = sys->difseg_size / 2;
/* We work with 720p frames split in half, thus even frames have
* channels 0,1 and odd 2,3. */
ipcm = (sys->height == 720 && !(frame[1] & 0x0C)) ? 2 : 0;
/* for each DIF channel */
for (chan = 0; chan < sys->n_difchan; chan++) {
/* next stereo channel (50Mbps and 100Mbps only) */
pcm = ppcm[ipcm++];
if (!pcm)
break;
/* for each DIF segment */
for (i = 0; i < sys->difseg_size; i++) {
frame += 6 * 80; /* skip DIF segment header */
if (quant == 1 && i == half_ch) {
/* next stereo channel (12bit mode only) */
pcm = ppcm[ipcm++];
if (!pcm)
break;
}
/* for each AV sequence */
for (j = 0; j < 9; j++) {
for (d = 8; d < 80; d += 2) {
if (quant == 0) { /* 16bit quantization */
of = sys->audio_shuffle[i][j] +
(d - 8) / 2 * sys->audio_stride;
if (of * 2 >= size)
continue;
/* FIXME: maybe we have to admit that DV is a
* big-endian PCM */
pcm[of * 2] = frame[d + 1];
pcm[of * 2 + 1] = frame[d];
if (pcm[of * 2 + 1] == 0x80 && pcm[of * 2] == 0x00)
pcm[of * 2 + 1] = 0;
} else { /* 12bit quantization */
lc = ((uint16_t)frame[d] << 4) |
((uint16_t)frame[d + 2] >> 4);
rc = ((uint16_t)frame[d + 1] << 4) |
((uint16_t)frame[d + 2] & 0x0f);
lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc));
rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc));
of = sys->audio_shuffle[i % half_ch][j] +
(d - 8) / 3 * sys->audio_stride;
if (of * 2 >= size)
continue;
/* FIXME: maybe we have to admit that DV is a
* big-endian PCM */
pcm[of * 2] = lc & 0xff;
pcm[of * 2 + 1] = lc >> 8;
of = sys->audio_shuffle[i % half_ch + half_ch][j] +
(d - 8) / 3 * sys->audio_stride;
/* FIXME: maybe we have to admit that DV is a
* big-endian PCM */
pcm[of * 2] = rc & 0xff;
pcm[of * 2 + 1] = rc >> 8;
++d;
}
}
frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
}
}
}
return size;
}
| true | FFmpeg | 7ee191cab0dc44700f26c5784e2adeb6a779651b |
8,524 | void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
{
if (min_size < *size)
return ptr;
min_size = FFMAX(17 * min_size / 16 + 32, min_size);
ptr = av_realloc(ptr, min_size);
/* we could set this to the unmodified min_size but this is safer
* if the user lost the ptr and uses NULL now
*/
if (!ptr)
min_size = 0;
*size = min_size;
return ptr;
}
| true | FFmpeg | b3415e4c5f9205820fd6c9211ad50a4df2692a36 |
8,525 | static inline int svq3_decode_block(GetBitContext *gb, DCTELEM *block,
int index, const int type)
{
static const uint8_t *const scan_patterns[4] =
{ luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
int run, level, sign, vlc, limit;
const int intra = 3 * type >> 2;
const uint8_t *const scan = scan_patterns[type];
for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
if (vlc == INVALID_VLC)
return -1;
sign = (vlc & 0x1) - 1;
vlc = vlc + 1 >> 1;
if (type == 3) {
if (vlc < 3) {
run = 0;
level = vlc;
} else if (vlc < 4) {
run = 1;
level = 1;
} else {
run = vlc & 0x3;
level = (vlc + 9 >> 2) - run;
}
} else {
if (vlc < 16) {
run = svq3_dct_tables[intra][vlc].run;
level = svq3_dct_tables[intra][vlc].level;
} else if (intra) {
run = vlc & 0x7;
level = (vlc >> 3) +
((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
} else {
run = vlc & 0xF;
level = (vlc >> 4) +
((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
}
}
if ((index += run) >= limit)
return -1;
block[scan[index]] = (level ^ sign) - sign;
}
if (type != 2) {
break;
}
}
return 0;
}
| true | FFmpeg | 9a2e79116d6235c53d8e9663a8d30d1950d7431a |
8,528 | static int flac_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
FLACContext *s = avctx->priv_data;
int metadata_last, metadata_type, metadata_size;
int tmp = 0, i, j = 0, input_buf_size;
int16_t *samples = data, *left, *right;
*data_size = 0;
s->avctx = avctx;
if(s->max_framesize == 0){
s->max_framesize= 8192; // should hopefully be enough for the first header
s->bitstream= av_fast_realloc(s->bitstream, &s->allocated_bitstream_size, s->max_framesize);
}
if(1 && s->max_framesize){//FIXME truncated
buf_size= FFMIN(buf_size, s->max_framesize - s->bitstream_size);
input_buf_size= buf_size;
if(s->bitstream_index + s->bitstream_size + buf_size > s->allocated_bitstream_size){
// printf("memmove\n");
memmove(s->bitstream, &s->bitstream[s->bitstream_index], s->bitstream_size);
s->bitstream_index=0;
}
memcpy(&s->bitstream[s->bitstream_index + s->bitstream_size], buf, buf_size);
buf= &s->bitstream[s->bitstream_index];
buf_size += s->bitstream_size;
s->bitstream_size= buf_size;
if(buf_size < s->max_framesize){
// printf("wanna more data ...\n");
return input_buf_size;
}
}
init_get_bits(&s->gb, buf, buf_size*8);
/* fLaC signature (be) */
if (show_bits_long(&s->gb, 32) == bswap_32(ff_get_fourcc("fLaC")))
{
skip_bits(&s->gb, 32);
av_log(s->avctx, AV_LOG_DEBUG, "STREAM HEADER\n");
do {
metadata_last = get_bits(&s->gb, 1);
metadata_type = get_bits(&s->gb, 7);
metadata_size = get_bits_long(&s->gb, 24);
av_log(s->avctx, AV_LOG_DEBUG, " metadata block: flag = %d, type = %d, size = %d\n",
metadata_last, metadata_type,
metadata_size);
if(metadata_size){
switch(metadata_type)
{
case METADATA_TYPE_STREAMINFO:
if(metadata_size == 0)
av_log(s->avctx, AV_LOG_DEBUG, "size= 0 WTF!?\n");
metadata_streaminfo(s);
dump_headers(s);
break;
default:
for(i=0; i<metadata_size; i++)
skip_bits(&s->gb, 8);
}
}
} while(!metadata_last);
}
else
{
tmp = show_bits(&s->gb, 16);
if(tmp != 0xFFF8){
av_log(s->avctx, AV_LOG_ERROR, "FRAME HEADER not here\n");
while(get_bits_count(&s->gb)/8+2 < buf_size && show_bits(&s->gb, 16) != 0xFFF8)
skip_bits(&s->gb, 8);
goto end; // we may not have enough bits left to decode a frame, so try next time
}
skip_bits(&s->gb, 16);
if (decode_frame(s) < 0)
return -1;
}
#if 0
/* fix the channel order here */
if (s->order == MID_SIDE)
{
short *left = samples;
short *right = samples + s->blocksize;
for (i = 0; i < s->blocksize; i += 2)
{
uint32_t x = s->decoded[0][i];
uint32_t y = s->decoded[0][i+1];
right[i] = x - (y / 2);
left[i] = right[i] + y;
}
*data_size = 2 * s->blocksize;
}
else
{
for (i = 0; i < s->channels; i++)
{
switch(s->order)
{
case INDEPENDENT:
for (j = 0; j < s->blocksize; j++)
samples[(s->blocksize*i)+j] = s->decoded[i][j];
break;
case LEFT_SIDE:
case RIGHT_SIDE:
if (i == 0)
for (j = 0; j < s->blocksize; j++)
samples[(s->blocksize*i)+j] = s->decoded[0][j];
else
for (j = 0; j < s->blocksize; j++)
samples[(s->blocksize*i)+j] = s->decoded[0][j] - s->decoded[i][j];
break;
// case MID_SIDE:
// av_log(s->avctx, AV_LOG_DEBUG, "mid-side unsupported\n");
}
*data_size += s->blocksize;
}
}
#else
switch(s->order)
{
case INDEPENDENT:
for (j = 0; j < s->blocksize; j++)
{
for (i = 0; i < s->channels; i++)
*(samples++) = s->decoded[i][j];
}
break;
case LEFT_SIDE:
assert(s->channels == 2);
for (i = 0; i < s->blocksize; i++)
{
*(samples++) = s->decoded[0][i];
*(samples++) = s->decoded[0][i] - s->decoded[1][i];
}
break;
case RIGHT_SIDE:
assert(s->channels == 2);
for (i = 0; i < s->blocksize; i++)
{
*(samples++) = s->decoded[0][i] + s->decoded[1][i];
*(samples++) = s->decoded[1][i];
}
break;
case MID_SIDE:
assert(s->channels == 2);
for (i = 0; i < s->blocksize; i++)
{
int mid, side;
mid = s->decoded[0][i];
side = s->decoded[1][i];
mid <<= 1;
if (side & 1)
mid++;
*(samples++) = (mid + side) >> 1;
*(samples++) = (mid - side) >> 1;
}
break;
}
#endif
*data_size = (int8_t *)samples - (int8_t *)data;
av_log(s->avctx, AV_LOG_DEBUG, "data size: %d\n", *data_size);
// s->last_blocksize = s->blocksize;
end:
i= (get_bits_count(&s->gb)+7)/8;;
if(i > buf_size){
av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", i - buf_size);
return -1;
}
if(s->bitstream_size){
s->bitstream_index += i;
s->bitstream_size -= i;
return input_buf_size;
}else
return i;
}
| false | FFmpeg | 9d656110966fbdde0fd1d2e685f3ed3633ba3596 |
8,529 | int64_t avcodec_guess_channel_layout(int nb_channels, enum CodecID codec_id, const char *fmt_name)
{
switch(nb_channels) {
case 1: return AV_CH_LAYOUT_MONO;
case 2: return AV_CH_LAYOUT_STEREO;
case 3: return AV_CH_LAYOUT_SURROUND;
case 4: return AV_CH_LAYOUT_QUAD;
case 5: return AV_CH_LAYOUT_5POINT0;
case 6: return AV_CH_LAYOUT_5POINT1;
case 8: return AV_CH_LAYOUT_7POINT1;
default: return 0;
}
}
| false | FFmpeg | cc276c85d15272df6e44fb3252657a43cbd49555 |
8,530 | static inline void downmix_2f_1r_to_dolby(float *samples)
{
int i;
for (i = 0; i < 256; i++) {
samples[i] -= samples[i + 512];
samples[i + 256] += samples[i + 512];
samples[i + 512] = 0;
}
}
| false | FFmpeg | 0058584580b87feb47898e60e4b80c7f425882ad |
8,531 | static int vc1_init_common(VC1Context *v)
{
static int done = 0;
int i = 0;
static VLC_TYPE vlc_table[32372][2];
v->hrd_rate = v->hrd_buffer = NULL;
/* VLC tables */
if (!done) {
INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
ff_vc1_bfraction_bits, 1, 1,
ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS);
INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
ff_vc1_norm2_bits, 1, 1,
ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS);
INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
ff_vc1_norm6_bits, 1, 1,
ff_vc1_norm6_codes, 2, 2, 556);
INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
ff_vc1_imode_bits, 1, 1,
ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS);
for (i = 0; i < 3; i++) {
ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 0]];
ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i * 3 + 1] - vlc_offs[i * 3 + 0];
init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
ff_vc1_ttmb_bits[i], 1, 1,
ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 1]];
ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i * 3 + 2] - vlc_offs[i * 3 + 1];
init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
ff_vc1_ttblk_bits[i], 1, 1,
ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 2]];
ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i * 3 + 3] - vlc_offs[i * 3 + 2];
init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
ff_vc1_subblkpat_bits[i], 1, 1,
ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
}
for (i = 0; i < 4; i++) {
ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 9]];
ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i * 3 + 10] - vlc_offs[i * 3 + 9];
init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
ff_vc1_4mv_block_pattern_bits[i], 1, 1,
ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 10]];
ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i * 3 + 11] - vlc_offs[i * 3 + 10];
init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
ff_vc1_cbpcy_p_bits[i], 1, 1,
ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 11]];
ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i * 3 + 12] - vlc_offs[i * 3 + 11];
init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
ff_vc1_mv_diff_bits[i], 1, 1,
ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
}
for (i = 0; i < 8; i++) {
ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i * 2 + 21]];
ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i * 2 + 22] - vlc_offs[i * 2 + 21];
init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
&vc1_ac_tables[i][0][1], 8, 4,
&vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
/* initialize interlaced MVDATA tables (2-Ref) */
ff_vc1_2ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 2 + 22]];
ff_vc1_2ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 2 + 23] - vlc_offs[i * 2 + 22];
init_vlc(&ff_vc1_2ref_mvdata_vlc[i], VC1_2REF_MVDATA_VLC_BITS, 126,
ff_vc1_2ref_mvdata_bits[i], 1, 1,
ff_vc1_2ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
}
for (i = 0; i < 4; i++) {
/* initialize 4MV MBMODE VLC tables for interlaced frame P picture */
ff_vc1_intfr_4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 37]];
ff_vc1_intfr_4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 38] - vlc_offs[i * 3 + 37];
init_vlc(&ff_vc1_intfr_4mv_mbmode_vlc[i], VC1_INTFR_4MV_MBMODE_VLC_BITS, 15,
ff_vc1_intfr_4mv_mbmode_bits[i], 1, 1,
ff_vc1_intfr_4mv_mbmode_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
/* initialize NON-4MV MBMODE VLC tables for the same */
ff_vc1_intfr_non4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 38]];
ff_vc1_intfr_non4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 39] - vlc_offs[i * 3 + 38];
init_vlc(&ff_vc1_intfr_non4mv_mbmode_vlc[i], VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 9,
ff_vc1_intfr_non4mv_mbmode_bits[i], 1, 1,
ff_vc1_intfr_non4mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
/* initialize interlaced MVDATA tables (1-Ref) */
ff_vc1_1ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 39]];
ff_vc1_1ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 3 + 40] - vlc_offs[i * 3 + 39];
init_vlc(&ff_vc1_1ref_mvdata_vlc[i], VC1_1REF_MVDATA_VLC_BITS, 72,
ff_vc1_1ref_mvdata_bits[i], 1, 1,
ff_vc1_1ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
}
for (i = 0; i < 4; i++) {
/* Initialize 2MV Block pattern VLC tables */
ff_vc1_2mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i + 49]];
ff_vc1_2mv_block_pattern_vlc[i].table_allocated = vlc_offs[i + 50] - vlc_offs[i + 49];
init_vlc(&ff_vc1_2mv_block_pattern_vlc[i], VC1_2MV_BLOCK_PATTERN_VLC_BITS, 4,
ff_vc1_2mv_block_pattern_bits[i], 1, 1,
ff_vc1_2mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
}
for (i = 0; i < 8; i++) {
/* Initialize interlaced CBPCY VLC tables (Table 124 - Table 131) */
ff_vc1_icbpcy_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 53]];
ff_vc1_icbpcy_vlc[i].table_allocated = vlc_offs[i * 3 + 54] - vlc_offs[i * 3 + 53];
init_vlc(&ff_vc1_icbpcy_vlc[i], VC1_ICBPCY_VLC_BITS, 63,
ff_vc1_icbpcy_p_bits[i], 1, 1,
ff_vc1_icbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
/* Initialize interlaced field picture MBMODE VLC tables */
ff_vc1_if_mmv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 54]];
ff_vc1_if_mmv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 55] - vlc_offs[i * 3 + 54];
init_vlc(&ff_vc1_if_mmv_mbmode_vlc[i], VC1_IF_MMV_MBMODE_VLC_BITS, 8,
ff_vc1_if_mmv_mbmode_bits[i], 1, 1,
ff_vc1_if_mmv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
ff_vc1_if_1mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 55]];
ff_vc1_if_1mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 56] - vlc_offs[i * 3 + 55];
init_vlc(&ff_vc1_if_1mv_mbmode_vlc[i], VC1_IF_1MV_MBMODE_VLC_BITS, 6,
ff_vc1_if_1mv_mbmode_bits[i], 1, 1,
ff_vc1_if_1mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
}
done = 1;
}
/* Other defaults */
v->pq = -1;
v->mvrange = 0; /* 7.1.1.18, p80 */
return 0;
}
| false | FFmpeg | c742ab4e81bb9dcabfdab006d6b8b09a5808c4ce |
8,532 | static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
int *golden_frame)
{
VP56RangeCoder *c = &s->c;
int parse_filter_info = 0;
int coeff_offset = 0;
int vrt_shift = 0;
int sub_version;
int rows, cols;
int res = 1;
int separated_coeff = buf[0] & 1;
s->framep[VP56_FRAME_CURRENT]->key_frame = !(buf[0] & 0x80);
ff_vp56_init_dequant(s, (buf[0] >> 1) & 0x3F);
if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
sub_version = buf[1] >> 3;
if (sub_version > 8)
return 0;
s->filter_header = buf[1] & 0x06;
if (buf[1] & 1) {
av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n");
return 0;
}
if (separated_coeff || !s->filter_header) {
coeff_offset = AV_RB16(buf+2) - 2;
buf += 2;
buf_size -= 2;
}
rows = buf[2]; /* number of stored macroblock rows */
cols = buf[3]; /* number of stored macroblock cols */
/* buf[4] is number of displayed macroblock rows */
/* buf[5] is number of displayed macroblock cols */
if (!s->macroblocks || /* first frame */
16*cols != s->avctx->coded_width ||
16*rows != s->avctx->coded_height) {
avcodec_set_dimensions(s->avctx, 16*cols, 16*rows);
if (s->avctx->extradata_size == 1) {
s->avctx->width -= s->avctx->extradata[0] >> 4;
s->avctx->height -= s->avctx->extradata[0] & 0x0F;
}
res = 2;
}
ff_vp56_init_range_decoder(c, buf+6, buf_size-6);
vp56_rac_gets(c, 2);
parse_filter_info = s->filter_header;
if (sub_version < 8)
vrt_shift = 5;
s->sub_version = sub_version;
} else {
if (!s->sub_version)
return 0;
if (separated_coeff || !s->filter_header) {
coeff_offset = AV_RB16(buf+1) - 2;
buf += 2;
buf_size -= 2;
}
ff_vp56_init_range_decoder(c, buf+1, buf_size-1);
*golden_frame = vp56_rac_get(c);
if (s->filter_header) {
s->deblock_filtering = vp56_rac_get(c);
if (s->deblock_filtering)
vp56_rac_get(c);
if (s->sub_version > 7)
parse_filter_info = vp56_rac_get(c);
}
}
if (parse_filter_info) {
if (vp56_rac_get(c)) {
s->filter_mode = 2;
s->sample_variance_threshold = vp56_rac_gets(c, 5) << vrt_shift;
s->max_vector_length = 2 << vp56_rac_gets(c, 3);
} else if (vp56_rac_get(c)) {
s->filter_mode = 1;
} else {
s->filter_mode = 0;
}
if (s->sub_version > 7)
s->filter_selection = vp56_rac_gets(c, 4);
else
s->filter_selection = 16;
}
s->use_huffman = vp56_rac_get(c);
s->parse_coeff = vp6_parse_coeff;
if (coeff_offset) {
buf += coeff_offset;
buf_size -= coeff_offset;
if (buf_size < 0)
return 0;
if (s->use_huffman) {
s->parse_coeff = vp6_parse_coeff_huffman;
init_get_bits(&s->gb, buf, buf_size<<3);
} else {
ff_vp56_init_range_decoder(&s->cc, buf, buf_size);
s->ccp = &s->cc;
}
} else {
s->ccp = &s->c;
}
return res;
}
| false | FFmpeg | 91f104496bb7632ed5ff03798e06dd8af014f0d9 |
8,533 | int ff_network_wait_fd_timeout(int fd, int write, int64_t timeout, AVIOInterruptCB *int_cb)
{
int ret;
int64_t wait_start = 0;
while (1) {
ret = ff_network_wait_fd(fd, write);
if (ret != AVERROR(EAGAIN))
return ret;
if (ff_check_interrupt(int_cb))
return AVERROR_EXIT;
if (timeout > 0) {
if (!wait_start)
wait_start = av_gettime();
else if (av_gettime() - wait_start > timeout)
return AVERROR(ETIMEDOUT);
}
}
}
| false | FFmpeg | 1e85b5e077e7e6fb9901bfd1a7a4f2594ba5a9a5 |
8,535 | GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp)
{
GuestNetworkInterfaceList *head = NULL, *cur_item = NULL;
struct ifaddrs *ifap, *ifa;
if (getifaddrs(&ifap) < 0) {
error_setg_errno(errp, errno, "getifaddrs failed");
goto error;
}
for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
GuestNetworkInterfaceList *info;
GuestIpAddressList **address_list = NULL, *address_item = NULL;
char addr4[INET_ADDRSTRLEN];
char addr6[INET6_ADDRSTRLEN];
int sock;
struct ifreq ifr;
unsigned char *mac_addr;
void *p;
g_debug("Processing %s interface", ifa->ifa_name);
info = guest_find_interface(head, ifa->ifa_name);
if (!info) {
info = g_malloc0(sizeof(*info));
info->value = g_malloc0(sizeof(*info->value));
info->value->name = g_strdup(ifa->ifa_name);
if (!cur_item) {
head = cur_item = info;
} else {
cur_item->next = info;
cur_item = info;
}
}
if (!info->value->has_hardware_address &&
ifa->ifa_flags & SIOCGIFHWADDR) {
/* we haven't obtained HW address yet */
sock = socket(PF_INET, SOCK_STREAM, 0);
if (sock == -1) {
error_setg_errno(errp, errno, "failed to create socket");
goto error;
}
memset(&ifr, 0, sizeof(ifr));
pstrcpy(ifr.ifr_name, IF_NAMESIZE, info->value->name);
if (ioctl(sock, SIOCGIFHWADDR, &ifr) == -1) {
error_setg_errno(errp, errno,
"failed to get MAC address of %s",
ifa->ifa_name);
goto error;
}
mac_addr = (unsigned char *) &ifr.ifr_hwaddr.sa_data;
info->value->hardware_address =
g_strdup_printf("%02x:%02x:%02x:%02x:%02x:%02x",
(int) mac_addr[0], (int) mac_addr[1],
(int) mac_addr[2], (int) mac_addr[3],
(int) mac_addr[4], (int) mac_addr[5]);
info->value->has_hardware_address = true;
close(sock);
}
if (ifa->ifa_addr &&
ifa->ifa_addr->sa_family == AF_INET) {
/* interface with IPv4 address */
address_item = g_malloc0(sizeof(*address_item));
address_item->value = g_malloc0(sizeof(*address_item->value));
p = &((struct sockaddr_in *)ifa->ifa_addr)->sin_addr;
if (!inet_ntop(AF_INET, p, addr4, sizeof(addr4))) {
error_setg_errno(errp, errno, "inet_ntop failed");
goto error;
}
address_item->value->ip_address = g_strdup(addr4);
address_item->value->ip_address_type = GUEST_IP_ADDRESS_TYPE_IPV4;
if (ifa->ifa_netmask) {
/* Count the number of set bits in netmask.
* This is safe as '1' and '0' cannot be shuffled in netmask. */
p = &((struct sockaddr_in *)ifa->ifa_netmask)->sin_addr;
address_item->value->prefix = ctpop32(((uint32_t *) p)[0]);
}
} else if (ifa->ifa_addr &&
ifa->ifa_addr->sa_family == AF_INET6) {
/* interface with IPv6 address */
address_item = g_malloc0(sizeof(*address_item));
address_item->value = g_malloc0(sizeof(*address_item->value));
p = &((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr;
if (!inet_ntop(AF_INET6, p, addr6, sizeof(addr6))) {
error_setg_errno(errp, errno, "inet_ntop failed");
goto error;
}
address_item->value->ip_address = g_strdup(addr6);
address_item->value->ip_address_type = GUEST_IP_ADDRESS_TYPE_IPV6;
if (ifa->ifa_netmask) {
/* Count the number of set bits in netmask.
* This is safe as '1' and '0' cannot be shuffled in netmask. */
p = &((struct sockaddr_in6 *)ifa->ifa_netmask)->sin6_addr;
address_item->value->prefix =
ctpop32(((uint32_t *) p)[0]) +
ctpop32(((uint32_t *) p)[1]) +
ctpop32(((uint32_t *) p)[2]) +
ctpop32(((uint32_t *) p)[3]);
}
}
if (!address_item) {
continue;
}
address_list = &info->value->ip_addresses;
while (*address_list && (*address_list)->next) {
address_list = &(*address_list)->next;
}
if (!*address_list) {
*address_list = address_item;
} else {
(*address_list)->next = address_item;
}
info->value->has_ip_addresses = true;
}
freeifaddrs(ifap);
return head;
error:
freeifaddrs(ifap);
qapi_free_GuestNetworkInterfaceList(head);
return NULL;
}
| true | qemu | 10a2158f52796e5b2b7ce7991bde09a3c985a37b |
8,536 | PPC_OP(mulli)
{
T0 = (Ts0 * SPARAM(1));
RETURN();
}
| true | qemu | d9bce9d99f4656ae0b0127f7472db9067b8f84ab |
8,537 | static int print_uint64(DeviceState *dev, Property *prop, char *dest, size_t len)
{
uint64_t *ptr = qdev_get_prop_ptr(dev, prop);
return snprintf(dest, len, "%" PRIu64, *ptr);
}
| true | qemu | 5cb9b56acfc0b50acf7ccd2d044ab4991c47fdde |
8,539 | static av_always_inline void decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
int jobnr, int threadnr, int is_vp7)
{
VP8Context *s = avctx->priv_data;
VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
int mb_y = td->thread_mb_pos >> 16;
int mb_x, mb_xy = mb_y * s->mb_width;
int num_jobs = s->num_jobs;
VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
VP8Macroblock *mb;
uint8_t *dst[3] = {
curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
};
if (mb_y == 0)
prev_td = td;
else
prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
if (mb_y == s->mb_height - 1)
next_td = td;
else
next_td = &s->thread_data[(jobnr + 1) % num_jobs];
if (s->mb_layout == 1)
mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
else {
// Make sure the previous frame has read its segmentation map,
// if we re-use the same map.
if (prev_frame && s->segmentation.enabled &&
!s->segmentation.update_map)
ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
}
if (!is_vp7 || mb_y == 0)
memset(td->left_nnz, 0, sizeof(td->left_nnz));
s->mv_min.x = -MARGIN;
s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
// Wait for previous thread to read mb_x+2, and reach mb_y-1.
if (prev_td != td) {
if (threadnr != 0) {
check_thread_pos(td, prev_td,
mb_x + (is_vp7 ? 2 : 1),
mb_y - (is_vp7 ? 2 : 1));
} else {
check_thread_pos(td, prev_td,
mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
mb_y - (is_vp7 ? 2 : 1));
}
}
s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
s->linesize, 4);
s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
dst[2] - dst[1], 2);
if (!s->mb_layout)
decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
prev_frame && prev_frame->seg_map ?
prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
if (!mb->skip)
decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
if (mb->mode <= MODE_I4x4)
intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
else
inter_predict(s, td, dst, mb, mb_x, mb_y);
prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
if (!mb->skip) {
idct_mb(s, td, dst, mb);
} else {
AV_ZERO64(td->left_nnz);
AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
/* Reset DC block predictors if they would exist
* if the mb had coefficients */
if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
td->left_nnz[8] = 0;
s->top_nnz[mb_x][8] = 0;
}
}
if (s->deblock_filter)
filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
if (s->filter.simple)
backup_mb_border(s->top_border[mb_x + 1], dst[0],
NULL, NULL, s->linesize, 0, 1);
else
backup_mb_border(s->top_border[mb_x + 1], dst[0],
dst[1], dst[2], s->linesize, s->uvlinesize, 0);
}
prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
dst[0] += 16;
dst[1] += 8;
dst[2] += 8;
s->mv_min.x -= 64;
s->mv_max.x -= 64;
if (mb_x == s->mb_width + 1) {
update_pos(td, mb_y, s->mb_width + 3);
} else {
update_pos(td, mb_y, mb_x);
}
}
}
| true | FFmpeg | 7b5ff7d57355dc608f0fd86e3ab32a2fda65e752 |
8,540 | static void yop_next_macroblock(YopDecContext *s)
{
// If we are advancing to the next row of macroblocks
if (s->row_pos == s->frame.linesize[0] - 2) {
s->dstptr += s->frame.linesize[0];
s->row_pos = 0;
}else {
s->row_pos += 2;
}
s->dstptr += 2;
}
| true | FFmpeg | c6303f8d70c25dd6c6e6486c78bf99c9924e2b6b |
8,541 | static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist)
{
AHCICmdHdr *cmd = ad->cur_cmd;
uint32_t opts = le32_to_cpu(cmd->opts);
uint64_t prdt_addr = le64_to_cpu(cmd->tbl_addr) + 0x80;
int sglist_alloc_hint = opts >> AHCI_CMD_HDR_PRDT_LEN;
dma_addr_t prdt_len = (sglist_alloc_hint * sizeof(AHCI_SG));
dma_addr_t real_prdt_len = prdt_len;
uint8_t *prdt;
int i;
int r = 0;
if (!sglist_alloc_hint) {
DPRINTF(ad->port_no, "no sg list given by guest: 0x%08x\n", opts);
return -1;
}
/* map PRDT */
if (!(prdt = dma_memory_map(ad->hba->dma, prdt_addr, &prdt_len,
DMA_DIRECTION_TO_DEVICE))){
DPRINTF(ad->port_no, "map failed\n");
return -1;
}
if (prdt_len < real_prdt_len) {
DPRINTF(ad->port_no, "mapped less than expected\n");
r = -1;
goto out;
}
/* Get entries in the PRDT, init a qemu sglist accordingly */
if (sglist_alloc_hint > 0) {
AHCI_SG *tbl = (AHCI_SG *)prdt;
qemu_sglist_init(sglist, sglist_alloc_hint, ad->hba->dma);
for (i = 0; i < sglist_alloc_hint; i++) {
/* flags_size is zero-based */
qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr),
le32_to_cpu(tbl[i].flags_size) + 1);
}
}
out:
dma_memory_unmap(ad->hba->dma, prdt, prdt_len,
DMA_DIRECTION_TO_DEVICE, prdt_len);
return r;
}
| true | qemu | 61f52e06f0a21bab782f98ef3ea789aa6d0aa046 |
8,542 | static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
{
MpegEncContext *s = &ctx->m;
int width, height, vo_ver_id;
/* vol header */
skip_bits(gb, 1); /* random access */
s->vo_type = get_bits(gb, 8);
if (get_bits1(gb) != 0) { /* is_ol_id */
vo_ver_id = get_bits(gb, 4); /* vo_ver_id */
skip_bits(gb, 3); /* vo_priority */
} else {
vo_ver_id = 1;
s->aspect_ratio_info = get_bits(gb, 4);
if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
s->avctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width
s->avctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height
} else {
s->avctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info];
if ((s->vol_control_parameters = get_bits1(gb))) { /* vol control parameter */
int chroma_format = get_bits(gb, 2);
if (chroma_format != CHROMA_420)
av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n");
s->low_delay = get_bits1(gb);
if (get_bits1(gb)) { /* vbv parameters */
get_bits(gb, 15); /* first_half_bitrate */
skip_bits1(gb); /* marker */
get_bits(gb, 15); /* latter_half_bitrate */
skip_bits1(gb); /* marker */
get_bits(gb, 15); /* first_half_vbv_buffer_size */
skip_bits1(gb); /* marker */
get_bits(gb, 3); /* latter_half_vbv_buffer_size */
get_bits(gb, 11); /* first_half_vbv_occupancy */
skip_bits1(gb); /* marker */
get_bits(gb, 15); /* latter_half_vbv_occupancy */
skip_bits1(gb); /* marker */
} else {
/* is setting low delay flag only once the smartest thing to do?
* low delay detection won't be overriden. */
if (s->picture_number == 0)
s->low_delay = 0;
ctx->shape = get_bits(gb, 2); /* vol shape */
if (ctx->shape != RECT_SHAPE)
av_log(s->avctx, AV_LOG_ERROR, "only rectangular vol supported\n");
if (ctx->shape == GRAY_SHAPE && vo_ver_id != 1) {
av_log(s->avctx, AV_LOG_ERROR, "Gray shape not supported\n");
skip_bits(gb, 4); /* video_object_layer_shape_extension */
check_marker(gb, "before time_increment_resolution");
s->avctx->time_base.den = get_bits(gb, 16);
if (!s->avctx->time_base.den) {
av_log(s->avctx, AV_LOG_ERROR, "time_base.den==0\n");
s->avctx->time_base.num = 0;
return -1;
ctx->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
if (ctx->time_increment_bits < 1)
ctx->time_increment_bits = 1;
check_marker(gb, "before fixed_vop_rate");
if (get_bits1(gb) != 0) /* fixed_vop_rate */
s->avctx->time_base.num = get_bits(gb, ctx->time_increment_bits);
else
s->avctx->time_base.num = 1;
ctx->t_frame = 0;
if (ctx->shape != BIN_ONLY_SHAPE) {
if (ctx->shape == RECT_SHAPE) {
check_marker(gb, "before width");
width = get_bits(gb, 13);
check_marker(gb, "before height");
height = get_bits(gb, 13);
check_marker(gb, "after height");
if (width && height && /* they should be non zero but who knows */
!(s->width && s->codec_tag == AV_RL32("MP4S"))) {
if (s->width && s->height &&
(s->width != width || s->height != height))
s->context_reinit = 1;
s->width = width;
s->height = height;
s->progressive_sequence =
s->progressive_frame = get_bits1(gb) ^ 1;
s->interlaced_dct = 0;
if (!get_bits1(gb) && (s->avctx->debug & FF_DEBUG_PICT_INFO))
av_log(s->avctx, AV_LOG_INFO, /* OBMC Disable */
"MPEG4 OBMC not supported (very likely buggy encoder)\n");
if (vo_ver_id == 1)
ctx->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */
else
ctx->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */
if (ctx->vol_sprite_usage == STATIC_SPRITE)
av_log(s->avctx, AV_LOG_ERROR, "Static Sprites not supported\n");
if (ctx->vol_sprite_usage == STATIC_SPRITE ||
ctx->vol_sprite_usage == GMC_SPRITE) {
if (ctx->vol_sprite_usage == STATIC_SPRITE) {
skip_bits(gb, 13); // sprite_width
skip_bits1(gb); /* marker */
skip_bits(gb, 13); // sprite_height
skip_bits1(gb); /* marker */
skip_bits(gb, 13); // sprite_left
skip_bits1(gb); /* marker */
skip_bits(gb, 13); // sprite_top
skip_bits1(gb); /* marker */
ctx->num_sprite_warping_points = get_bits(gb, 6);
if (ctx->num_sprite_warping_points > 3) {
av_log(s->avctx, AV_LOG_ERROR,
"%d sprite_warping_points\n",
ctx->num_sprite_warping_points);
ctx->num_sprite_warping_points = 0;
return -1;
s->sprite_warping_accuracy = get_bits(gb, 2);
ctx->sprite_brightness_change = get_bits1(gb);
if (ctx->vol_sprite_usage == STATIC_SPRITE)
skip_bits1(gb); // low_latency_sprite
// FIXME sadct disable bit if verid!=1 && shape not rect
if (get_bits1(gb) == 1) { /* not_8_bit */
s->quant_precision = get_bits(gb, 4); /* quant_precision */
if (get_bits(gb, 4) != 8) /* bits_per_pixel */
av_log(s->avctx, AV_LOG_ERROR, "N-bit not supported\n");
if (s->quant_precision != 5)
av_log(s->avctx, AV_LOG_ERROR,
"quant precision %d\n", s->quant_precision);
if (s->quant_precision<3 || s->quant_precision>9) {
s->quant_precision = 5;
} else {
s->quant_precision = 5;
// FIXME a bunch of grayscale shape things
if ((s->mpeg_quant = get_bits1(gb))) { /* vol_quant_type */
int i, v;
/* load default matrixes */
for (i = 0; i < 64; i++) {
int j = s->dsp.idct_permutation[i];
v = ff_mpeg4_default_intra_matrix[i];
s->intra_matrix[j] = v;
s->chroma_intra_matrix[j] = v;
v = ff_mpeg4_default_non_intra_matrix[i];
s->inter_matrix[j] = v;
s->chroma_inter_matrix[j] = v;
/* load custom intra matrix */
if (get_bits1(gb)) {
int last = 0;
for (i = 0; i < 64; i++) {
int j;
v = get_bits(gb, 8);
if (v == 0)
break;
last = v;
j = s->dsp.idct_permutation[ff_zigzag_direct[i]];
s->intra_matrix[j] = last;
s->chroma_intra_matrix[j] = last;
/* replicate last value */
for (; i < 64; i++) {
int j = s->dsp.idct_permutation[ff_zigzag_direct[i]];
s->intra_matrix[j] = last;
s->chroma_intra_matrix[j] = last;
/* load custom non intra matrix */
if (get_bits1(gb)) {
int last = 0;
for (i = 0; i < 64; i++) {
int j;
v = get_bits(gb, 8);
if (v == 0)
break;
last = v;
j = s->dsp.idct_permutation[ff_zigzag_direct[i]];
s->inter_matrix[j] = v;
s->chroma_inter_matrix[j] = v;
/* replicate last value */
for (; i < 64; i++) {
int j = s->dsp.idct_permutation[ff_zigzag_direct[i]];
s->inter_matrix[j] = last;
s->chroma_inter_matrix[j] = last;
// FIXME a bunch of grayscale shape things
if (vo_ver_id != 1)
s->quarter_sample = get_bits1(gb);
else
s->quarter_sample = 0;
if (!get_bits1(gb)) {
int pos = get_bits_count(gb);
int estimation_method = get_bits(gb, 2);
if (estimation_method < 2) {
if (!get_bits1(gb)) {
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* opaque */
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* transparent */
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_cae */
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* inter_cae */
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* no_update */
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* upampling */
if (!get_bits1(gb)) {
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_blocks */
ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter_blocks */
ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter4v_blocks */
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* not coded blocks */
if (!check_marker(gb, "in complexity estimation part 1")) {
skip_bits_long(gb, pos - get_bits_count(gb));
goto no_cplx_est;
if (!get_bits1(gb)) {
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_coeffs */
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_lines */
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* vlc_syms */
ctx->cplx_estimation_trash_i += 4 * get_bits1(gb); /* vlc_bits */
if (!get_bits1(gb)) {
ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* apm */
ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* npm */
ctx->cplx_estimation_trash_b += 8 * get_bits1(gb); /* interpolate_mc_q */
ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* forwback_mc_q */
ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel2 */
ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel4 */
if (!check_marker(gb, "in complexity estimation part 2")) {
skip_bits_long(gb, pos - get_bits_count(gb));
goto no_cplx_est;
if (estimation_method == 1) {
ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* sadct */
ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* qpel */
} else
av_log(s->avctx, AV_LOG_ERROR,
"Invalid Complexity estimation method %d\n",
estimation_method);
} else {
no_cplx_est:
ctx->cplx_estimation_trash_i =
ctx->cplx_estimation_trash_p =
ctx->cplx_estimation_trash_b = 0;
ctx->resync_marker = !get_bits1(gb); /* resync_marker_disabled */
s->data_partitioning = get_bits1(gb);
if (s->data_partitioning)
ctx->rvlc = get_bits1(gb);
if (vo_ver_id != 1) {
ctx->new_pred = get_bits1(gb);
if (ctx->new_pred) {
av_log(s->avctx, AV_LOG_ERROR, "new pred not supported\n");
skip_bits(gb, 2); /* requested upstream message type */
skip_bits1(gb); /* newpred segment type */
if (get_bits1(gb)) // reduced_res_vop
av_log(s->avctx, AV_LOG_ERROR,
"reduced resolution VOP not supported\n");
} else {
ctx->new_pred = 0;
ctx->scalability = get_bits1(gb);
if (ctx->scalability) {
GetBitContext bak = *gb;
int h_sampling_factor_n;
int h_sampling_factor_m;
int v_sampling_factor_n;
int v_sampling_factor_m;
skip_bits1(gb); // hierarchy_type
skip_bits(gb, 4); /* ref_layer_id */
skip_bits1(gb); /* ref_layer_sampling_dir */
h_sampling_factor_n = get_bits(gb, 5);
h_sampling_factor_m = get_bits(gb, 5);
v_sampling_factor_n = get_bits(gb, 5);
v_sampling_factor_m = get_bits(gb, 5);
ctx->enhancement_type = get_bits1(gb);
if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 ||
v_sampling_factor_n == 0 || v_sampling_factor_m == 0) {
/* illegal scalability header (VERY broken encoder),
* trying to workaround */
ctx->scalability = 0;
*gb = bak;
} else
av_log(s->avctx, AV_LOG_ERROR, "scalability not supported\n");
// bin shape stuff FIXME
if (s->avctx->debug&FF_DEBUG_PICT_INFO) {
av_log(s->avctx, AV_LOG_DEBUG, "tb %d/%d, tincrbits:%d, qp_prec:%d, ps:%d, %s%s%s%s\n",
s->avctx->time_base.num, s->avctx->time_base.den,
ctx->time_increment_bits,
s->quant_precision,
s->progressive_sequence,
ctx->scalability ? "scalability " :"" , s->quarter_sample ? "qpel " : "",
s->data_partitioning ? "partition " : "", ctx->rvlc ? "rvlc " : ""
);
return 0; | true | FFmpeg | 3edc3b159503d512c919b3d5902f7026e961823a |
8,544 | static int ac3_probe(AVProbeData *p)
{
int max_frames, first_frames, frames;
uint8_t *buf, *buf2, *end;
AC3HeaderInfo hdr;
if(p->buf_size < 7)
return 0;
max_frames = 0;
buf = p->buf;
end = buf + FFMIN(4096, p->buf_size - 7);
for(; buf < end; buf++) {
buf2 = buf;
for(frames = 0; buf2 < end; frames++) {
if(ff_ac3_parse_header(buf2, &hdr) < 0)
break;
buf2 += hdr.frame_size;
}
max_frames = FFMAX(max_frames, frames);
if(buf == p->buf)
first_frames = frames;
}
if (first_frames>=3) return AVPROBE_SCORE_MAX * 3 / 4;
else if(max_frames>=3) return AVPROBE_SCORE_MAX / 2;
else if(max_frames>=1) return 1;
else return 0;
}
| true | FFmpeg | 8c222bb405f7031b2326c601f5072ca2980b1079 |
8,545 | static void virtio_ccw_device_plugged(DeviceState *d)
{
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
SubchDev *sch = dev->sch;
sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
d->hotplugged, 1);
}
| true | qemu | e83980455c8c7eb066405de512be7c4bace3ac4d |
8,546 | static void digic_uart_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
DigicUartState *s = opaque;
unsigned char ch = value;
addr >>= 2;
switch (addr) {
case R_TX:
if (s->chr) {
qemu_chr_fe_write_all(s->chr, &ch, 1);
}
break;
case R_ST:
/*
* Ignore write to R_ST.
*
* The point is that this register is actively used
* during receiving and transmitting symbols,
* but we don't know the function of most of bits.
*
* Ignoring writes to R_ST is only a simplification
* of the model. It has no perceptible side effects
* for existing guests.
*/
break;
default:
qemu_log_mask(LOG_UNIMP,
"digic-uart: write access to unknown register 0x"
TARGET_FMT_plx, addr << 2);
}
} | true | qemu | 6ab3fc32ea640026726bc5f9f4db622d0954fb8a |
8,547 | static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc,
vorbis_residue *vr,
uint_fast8_t ch,
uint_fast8_t *do_not_decode,
float *vec,
uint_fast16_t vlen,
int vr_type)
{
GetBitContext *gb = &vc->gb;
uint_fast8_t c_p_c = vc->codebooks[vr->classbook].dimensions;
uint_fast16_t ptns_to_read = vr->ptns_to_read;
uint_fast8_t *classifs = vr->classifs;
uint_fast8_t pass;
uint_fast8_t ch_used;
uint_fast8_t i,j,l;
uint_fast16_t k;
if (vr_type == 2) {
for (j = 1; j < ch; ++j)
do_not_decode[0] &= do_not_decode[j]; // FIXME - clobbering input
if (do_not_decode[0])
return 0;
ch_used = 1;
} else {
ch_used = ch;
}
AV_DEBUG(" residue type 0/1/2 decode begin, ch: %d cpc %d \n", ch, c_p_c);
for (pass = 0; pass <= vr->maxpass; ++pass) { // FIXME OPTIMIZE?
uint_fast16_t voffset;
uint_fast16_t partition_count;
uint_fast16_t j_times_ptns_to_read;
voffset = vr->begin;
for (partition_count = 0; partition_count < ptns_to_read;) { // SPEC error
if (!pass) {
uint_fast32_t inverse_class = ff_inverse[vr->classifications];
for (j_times_ptns_to_read = 0, j = 0; j < ch_used; ++j) {
if (!do_not_decode[j]) {
uint_fast32_t temp = get_vlc2(gb, vc->codebooks[vr->classbook].vlc.table,
vc->codebooks[vr->classbook].nb_bits, 3);
AV_DEBUG("Classword: %d \n", temp);
assert(vr->classifications > 1 && temp <= 65536); //needed for inverse[]
for (i = 0; i < c_p_c; ++i) {
uint_fast32_t temp2;
temp2 = (((uint_fast64_t)temp) * inverse_class) >> 32;
if (partition_count + c_p_c - 1 - i < ptns_to_read)
classifs[j_times_ptns_to_read + partition_count + c_p_c - 1 - i] = temp - temp2 * vr->classifications;
temp = temp2;
}
}
j_times_ptns_to_read += ptns_to_read;
}
}
for (i = 0; (i < c_p_c) && (partition_count < ptns_to_read); ++i) {
for (j_times_ptns_to_read = 0, j = 0; j < ch_used; ++j) {
uint_fast16_t voffs;
if (!do_not_decode[j]) {
uint_fast8_t vqclass = classifs[j_times_ptns_to_read+partition_count];
int_fast16_t vqbook = vr->books[vqclass][pass];
if (vqbook >= 0 && vc->codebooks[vqbook].codevectors) {
uint_fast16_t coffs;
unsigned dim = vc->codebooks[vqbook].dimensions; // not uint_fast8_t: 64bit is slower here on amd64
uint_fast16_t step = dim == 1 ? vr->partition_size
: FASTDIV(vr->partition_size, dim);
vorbis_codebook codebook = vc->codebooks[vqbook];
if (vr_type == 0) {
voffs = voffset+j*vlen;
for (k = 0; k < step; ++k) {
coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
for (l = 0; l < dim; ++l)
vec[voffs + k + l * step] += codebook.codevectors[coffs + l]; // FPMATH
}
} else if (vr_type == 1) {
voffs = voffset + j * vlen;
for (k = 0; k < step; ++k) {
coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
for (l = 0; l < dim; ++l, ++voffs) {
vec[voffs]+=codebook.codevectors[coffs+l]; // FPMATH
AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d \n", pass, voffs, vec[voffs], codebook.codevectors[coffs+l], coffs);
}
}
} else if (vr_type == 2 && ch == 2 && (voffset & 1) == 0 && (dim & 1) == 0) { // most frequent case optimized
voffs = voffset >> 1;
if (dim == 2) {
for (k = 0; k < step; ++k) {
coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * 2;
vec[voffs + k ] += codebook.codevectors[coffs ]; // FPMATH
vec[voffs + k + vlen] += codebook.codevectors[coffs + 1]; // FPMATH
}
} else if (dim == 4) {
for (k = 0; k < step; ++k, voffs += 2) {
coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * 4;
vec[voffs ] += codebook.codevectors[coffs ]; // FPMATH
vec[voffs + 1 ] += codebook.codevectors[coffs + 2]; // FPMATH
vec[voffs + vlen ] += codebook.codevectors[coffs + 1]; // FPMATH
vec[voffs + vlen + 1] += codebook.codevectors[coffs + 3]; // FPMATH
}
} else
for (k = 0; k < step; ++k) {
coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
for (l = 0; l < dim; l += 2, voffs++) {
vec[voffs ] += codebook.codevectors[coffs + l ]; // FPMATH
vec[voffs + vlen] += codebook.codevectors[coffs + l + 1]; // FPMATH
AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n", pass, voffset / ch + (voffs % ch) * vlen, vec[voffset / ch + (voffs % ch) * vlen], codebook.codevectors[coffs + l], coffs, l);
}
}
} else if (vr_type == 2) {
voffs = voffset;
for (k = 0; k < step; ++k) {
coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
for (l = 0; l < dim; ++l, ++voffs) {
vec[voffs / ch + (voffs % ch) * vlen] += codebook.codevectors[coffs + l]; // FPMATH FIXME use if and counter instead of / and %
AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n", pass, voffset / ch + (voffs % ch) * vlen, vec[voffset / ch + (voffs % ch) * vlen], codebook.codevectors[coffs + l], coffs, l);
}
}
}
}
}
j_times_ptns_to_read += ptns_to_read;
}
++partition_count;
voffset += vr->partition_size;
}
}
}
return 0;
}
| true | FFmpeg | 366d919016a679d3955f6fe5278fa7ce4f47b81e |
8,548 | static int local_name_to_path(FsContext *ctx, V9fsPath *dir_path,
const char *name, V9fsPath *target)
{
if (dir_path) {
v9fs_path_sprintf(target, "%s/%s", dir_path->data, name);
} else {
v9fs_path_sprintf(target, "%s", name);
}
return 0;
}
| true | qemu | 9c6b899f7a46893ab3b671e341a2234e9c0c060e |
8,549 | static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
{
int i;
for (i=0; i<width; i++) {
int b= src1[6*i + 0] + src1[6*i + 3];
int g= src1[6*i + 1] + src1[6*i + 4];
int r= src1[6*i + 2] + src1[6*i + 5];
dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
}
assert(src1 == src2);
}
| false | FFmpeg | d1adad3cca407f493c3637e20ecd4f7124e69212 |
8,550 | static int ftp_parse_entry_mlsd(char *mlsd, AVIODirEntry *next)
{
char *fact, *value;
av_dlog(NULL, "%s\n", mlsd);
while(fact = av_strtok(mlsd, ";", &mlsd)) {
if (fact[0] == ' ') {
next->name = av_strdup(&fact[1]);
continue;
}
fact = av_strtok(fact, "=", &value);
if (!av_strcasecmp(fact, "type")) {
if (!av_strcasecmp(value, "cdir") || !av_strcasecmp(value, "pdir"))
return 1;
if (!av_strcasecmp(value, "dir"))
next->type = AVIO_ENTRY_DIRECTORY;
else if (!av_strcasecmp(value, "file"))
next->type = AVIO_ENTRY_FILE;
else if (!av_strcasecmp(value, "OS.unix=slink:"))
next->type = AVIO_ENTRY_SYMBOLIC_LINK;
} else if (!av_strcasecmp(fact, "modify")) {
next->modification_timestamp = ftp_parse_date(value);
} else if (!av_strcasecmp(fact, "UNIX.mode")) {
next->filemode = strtoumax(value, NULL, 8);
} else if (!av_strcasecmp(fact, "UNIX.uid") || !av_strcasecmp(fact, "UNIX.owner"))
next->user_id = strtoumax(value, NULL, 10);
else if (!av_strcasecmp(fact, "UNIX.gid") || !av_strcasecmp(fact, "UNIX.group"))
next->group_id = strtoumax(value, NULL, 10);
else if (!av_strcasecmp(fact, "size") || !av_strcasecmp(fact, "sizd"))
next->size = strtoll(value, NULL, 10);
}
return 0;
}
| false | FFmpeg | 229843aa359ae0c9519977d7fa952688db63f559 |
8,551 | static int get_scale_idx(GetBitContext *gb, int ref)
{
int t = get_vlc2(gb, dscf_vlc.table, MPC7_DSCF_BITS, 1) - 7;
if (t == 8)
return get_bits(gb, 6);
return ref + t;
}
| false | FFmpeg | d7eabd50425a61b31e90c763a0c3e4316a725404 |
8,552 | int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
struct image_info * info)
{
struct elfhdr elf_ex;
struct elfhdr interp_elf_ex;
struct exec interp_ex;
int interpreter_fd = -1; /* avoid warning */
abi_ulong load_addr, load_bias;
int load_addr_set = 0;
unsigned int interpreter_type = INTERPRETER_NONE;
unsigned char ibcs2_interpreter;
int i;
abi_ulong mapped_addr;
struct elf_phdr * elf_ppnt;
struct elf_phdr *elf_phdata;
abi_ulong k, elf_brk;
int retval;
char * elf_interpreter;
abi_ulong elf_entry, interp_load_addr = 0;
int status;
abi_ulong start_code, end_code, start_data, end_data;
abi_ulong reloc_func_desc = 0;
abi_ulong elf_stack;
char passed_fileno[6];
ibcs2_interpreter = 0;
status = 0;
load_addr = 0;
load_bias = 0;
elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
bswap_ehdr(&elf_ex);
/* First of all, some simple consistency checks */
if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
(! elf_check_arch(elf_ex.e_machine))) {
return -ENOEXEC;
}
bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
if (!bprm->p) {
retval = -E2BIG;
}
/* Now read in all of the header information */
elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
if (elf_phdata == NULL) {
return -ENOMEM;
}
i = elf_ex.e_phnum * sizeof(struct elf_phdr);
if (elf_ex.e_phoff + i <= BPRM_BUF_SIZE) {
memcpy(elf_phdata, bprm->buf + elf_ex.e_phoff, i);
} else {
retval = pread(bprm->fd, (char *) elf_phdata, i, elf_ex.e_phoff);
if (retval != i) {
perror("load_elf_binary");
exit(-1);
}
}
bswap_phdr(elf_phdata, elf_ex.e_phnum);
elf_brk = 0;
elf_stack = ~((abi_ulong)0UL);
elf_interpreter = NULL;
start_code = ~((abi_ulong)0UL);
end_code = 0;
start_data = 0;
end_data = 0;
interp_ex.a_info = 0;
elf_ppnt = elf_phdata;
for(i=0;i < elf_ex.e_phnum; i++) {
if (elf_ppnt->p_type == PT_INTERP) {
if ( elf_interpreter != NULL )
{
free (elf_phdata);
free(elf_interpreter);
close(bprm->fd);
return -EINVAL;
}
/* This is the program interpreter used for
* shared libraries - for now assume that this
* is an a.out format binary
*/
elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
if (elf_interpreter == NULL) {
free (elf_phdata);
close(bprm->fd);
return -ENOMEM;
}
if (elf_ppnt->p_offset + elf_ppnt->p_filesz <= BPRM_BUF_SIZE) {
memcpy(elf_interpreter, bprm->buf + elf_ppnt->p_offset,
elf_ppnt->p_filesz);
} else {
retval = pread(bprm->fd, elf_interpreter, elf_ppnt->p_filesz,
elf_ppnt->p_offset);
if (retval != elf_ppnt->p_filesz) {
perror("load_elf_binary2");
exit(-1);
}
}
/* If the program interpreter is one of these two,
then assume an iBCS2 image. Otherwise assume
a native linux image. */
/* JRP - Need to add X86 lib dir stuff here... */
if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
ibcs2_interpreter = 1;
}
retval = open(path(elf_interpreter), O_RDONLY);
if (retval < 0) {
perror(elf_interpreter);
exit(-1);
}
interpreter_fd = retval;
retval = read(interpreter_fd, bprm->buf, BPRM_BUF_SIZE);
if (retval < 0) {
perror("load_elf_binary3");
exit(-1);
}
if (retval < BPRM_BUF_SIZE) {
memset(bprm->buf, 0, BPRM_BUF_SIZE - retval);
}
interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
}
elf_ppnt++;
}
/* Some simple consistency checks for the interpreter */
if (elf_interpreter){
interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
/* Now figure out which format our binary is */
if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
(N_MAGIC(interp_ex) != QMAGIC)) {
interpreter_type = INTERPRETER_ELF;
}
if (interp_elf_ex.e_ident[0] != 0x7f ||
strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
interpreter_type &= ~INTERPRETER_ELF;
}
if (!interpreter_type) {
free(elf_interpreter);
free(elf_phdata);
close(bprm->fd);
return -ELIBBAD;
}
}
/* OK, we are done with that, now set up the arg stuff,
and then start this sucker up */
{
char * passed_p;
if (interpreter_type == INTERPRETER_AOUT) {
snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
passed_p = passed_fileno;
if (elf_interpreter) {
bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
bprm->argc++;
}
}
if (!bprm->p) {
if (elf_interpreter) {
free(elf_interpreter);
}
free (elf_phdata);
close(bprm->fd);
return -E2BIG;
}
}
/* OK, This is the point of no return */
info->end_data = 0;
info->end_code = 0;
info->start_mmap = (abi_ulong)ELF_START_MMAP;
info->mmap = 0;
elf_entry = (abi_ulong) elf_ex.e_entry;
#if defined(CONFIG_USE_GUEST_BASE)
/*
* In case where user has not explicitly set the guest_base, we
* probe here that should we set it automatically.
*/
if (!(have_guest_base || reserved_va)) {
/*
* Go through ELF program header table and find the address
* range used by loadable segments. Check that this is available on
* the host, and if not find a suitable value for guest_base. */
abi_ulong app_start = ~0;
abi_ulong app_end = 0;
abi_ulong addr;
unsigned long host_start;
unsigned long real_start;
unsigned long host_size;
for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
i++, elf_ppnt++) {
if (elf_ppnt->p_type != PT_LOAD)
continue;
addr = elf_ppnt->p_vaddr;
if (addr < app_start) {
app_start = addr;
}
addr += elf_ppnt->p_memsz;
if (addr > app_end) {
app_end = addr;
}
}
/* If we don't have any loadable segments then something
is very wrong. */
assert(app_start < app_end);
/* Round addresses to page boundaries. */
app_start = app_start & qemu_host_page_mask;
app_end = HOST_PAGE_ALIGN(app_end);
if (app_start < mmap_min_addr) {
host_start = HOST_PAGE_ALIGN(mmap_min_addr);
} else {
host_start = app_start;
if (host_start != app_start) {
fprintf(stderr, "qemu: Address overflow loading ELF binary\n");
abort();
}
}
host_size = app_end - app_start;
while (1) {
/* Do not use mmap_find_vma here because that is limited to the
guest address space. We are going to make the
guest address space fit whatever we're given. */
real_start = (unsigned long)mmap((void *)host_start, host_size,
PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
if (real_start == (unsigned long)-1) {
fprintf(stderr, "qemu: Virtual memory exausted\n");
abort();
}
if (real_start == host_start) {
break;
}
/* That address didn't work. Unmap and try a different one.
The address the host picked because is typically
right at the top of the host address space and leaves the
guest with no usable address space. Resort to a linear search.
We already compensated for mmap_min_addr, so this should not
happen often. Probably means we got unlucky and host address
space randomization put a shared library somewhere
inconvenient. */
munmap((void *)real_start, host_size);
host_start += qemu_host_page_size;
if (host_start == app_start) {
/* Theoretically possible if host doesn't have any
suitably aligned areas. Normally the first mmap will
fail. */
fprintf(stderr, "qemu: Unable to find space for application\n");
abort();
}
}
qemu_log("Relocating guest address space from 0x" TARGET_ABI_FMT_lx
" to 0x%lx\n", app_start, real_start);
guest_base = real_start - app_start;
}
#endif /* CONFIG_USE_GUEST_BASE */
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
info->rss = 0;
bprm->p = setup_arg_pages(bprm->p, bprm, info);
info->start_stack = bprm->p;
/* Now we do a little grungy work by mmaping the ELF image into
* the correct location in memory. At this point, we assume that
* the image should be loaded at fixed address, not at a variable
* address.
*/
for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0;
int elf_flags = 0;
abi_ulong error;
if (elf_ppnt->p_type != PT_LOAD)
continue;
if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
if (elf_ex.e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
} else if (elf_ex.e_type == ET_DYN) {
/* Try and get dynamic programs out of the way of the default mmap
base, as well as whatever program they might try to exec. This
is because the brk will follow the loader, and is not movable. */
/* NOTE: for qemu, we do a big mmap to get enough space
without hardcoding any address */
error = target_mmap(0, ET_DYN_MAP_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANON,
-1, 0);
if (error == -1) {
perror("mmap");
exit(-1);
}
load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
}
error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
(elf_ppnt->p_filesz +
TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
elf_prot,
(MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
bprm->fd,
(elf_ppnt->p_offset -
TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
if (error == -1) {
perror("mmap");
exit(-1);
}
#ifdef LOW_ELF_STACK
if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
#endif
if (!load_addr_set) {
load_addr_set = 1;
load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
if (elf_ex.e_type == ET_DYN) {
load_bias += error -
TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
load_addr += load_bias;
reloc_func_desc = load_bias;
}
}
k = elf_ppnt->p_vaddr;
if (k < start_code)
start_code = k;
if (start_data < k)
start_data = k;
k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
if ((elf_ppnt->p_flags & PF_X) && end_code < k)
end_code = k;
if (end_data < k)
end_data = k;
k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
if (k > elf_brk) {
elf_brk = TARGET_PAGE_ALIGN(k);
}
/* If the load segment requests extra zeros (e.g. bss), map it. */
if (elf_ppnt->p_filesz < elf_ppnt->p_memsz) {
abi_ulong base = load_bias + elf_ppnt->p_vaddr;
zero_bss(base + elf_ppnt->p_filesz,
base + elf_ppnt->p_memsz, elf_prot);
}
}
elf_entry += load_bias;
elf_brk += load_bias;
start_code += load_bias;
end_code += load_bias;
start_data += load_bias;
end_data += load_bias;
if (elf_interpreter) {
if (interpreter_type & 1) {
elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
} else if (interpreter_type & 2) {
elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
&interp_load_addr, bprm->buf);
}
reloc_func_desc = interp_load_addr;
close(interpreter_fd);
free(elf_interpreter);
if (elf_entry == ~((abi_ulong)0UL)) {
printf("Unable to load interpreter\n");
free(elf_phdata);
exit(-1);
return 0;
}
}
free(elf_phdata);
if (qemu_log_enabled()) {
load_symbols(&elf_ex, bprm->fd, load_bias);
}
if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
#ifdef LOW_ELF_STACK
info->start_stack = bprm->p = elf_stack - 4;
#endif
bprm->p = create_elf_tables(bprm->p,
bprm->argc,
bprm->envc,
&elf_ex,
load_addr, load_bias,
interp_load_addr,
(interpreter_type == INTERPRETER_AOUT ? 0 : 1),
info);
info->load_addr = reloc_func_desc;
info->start_brk = info->brk = elf_brk;
info->end_code = end_code;
info->start_code = start_code;
info->start_data = start_data;
info->end_data = end_data;
info->start_stack = bprm->p;
#if 0
printf("(start_brk) %x\n" , info->start_brk);
printf("(end_code) %x\n" , info->end_code);
printf("(start_code) %x\n" , info->start_code);
printf("(end_data) %x\n" , info->end_data);
printf("(start_stack) %x\n" , info->start_stack);
printf("(brk) %x\n" , info->brk);
#endif
if ( info->personality == PER_SVR4 )
{
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
and some applications "depend" upon this behavior.
Since we do not have the power to recompile these, we
emulate the SVr4 behavior. Sigh. */
mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE, -1, 0);
}
info->entry = elf_entry;
#ifdef USE_ELF_CORE_DUMP
bprm->core_dump = &elf_core_dump;
#endif
return 0;
}
| false | qemu | 9058abdd180843473d440958c79a1a781be723c1 |
8,553 | static sd_rsp_type_t sd_app_command(SDState *sd,
SDRequest req)
{
DPRINTF("ACMD%d 0x%08x\n", req.cmd, req.arg);
sd->card_status |= APP_CMD;
switch (req.cmd) {
case 6: /* ACMD6: SET_BUS_WIDTH */
switch (sd->state) {
case sd_transfer_state:
sd->sd_status[0] &= 0x3f;
sd->sd_status[0] |= (req.arg & 0x03) << 6;
return sd_r1;
default:
break;
}
break;
case 13: /* ACMD13: SD_STATUS */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */
switch (sd->state) {
case sd_transfer_state:
*(uint32_t *) sd->data = sd->blk_written;
sd->state = sd_sendingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 23: /* ACMD23: SET_WR_BLK_ERASE_COUNT */
switch (sd->state) {
case sd_transfer_state:
return sd_r1;
default:
break;
}
break;
case 41: /* ACMD41: SD_APP_OP_COND */
if (sd->spi) {
/* SEND_OP_CMD */
sd->state = sd_transfer_state;
return sd_r1;
}
switch (sd->state) {
case sd_idle_state:
/* We accept any voltage. 10000 V is nothing.
*
* We don't model init delay so just advance straight to ready state
* unless it's an enquiry ACMD41 (bits 23:0 == 0).
*/
if (req.arg & ACMD41_ENQUIRY_MASK) {
sd->state = sd_ready_state;
}
return sd_r3;
default:
break;
}
break;
case 42: /* ACMD42: SET_CLR_CARD_DETECT */
switch (sd->state) {
case sd_transfer_state:
/* Bringing in the 50KOhm pull-up resistor... Done. */
return sd_r1;
default:
break;
}
break;
case 51: /* ACMD51: SEND_SCR */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
default:
/* Fall back to standard commands. */
return sd_normal_command(sd, req);
}
fprintf(stderr, "SD: ACMD%i in a wrong state\n", req.cmd);
return sd_illegal;
}
| false | qemu | dd26eb43337adf53d22b3fda3591e3837bc08b8c |
8,554 | static void bt_dummy_lmp_connection_complete(struct bt_link_s *link)
{
if (link->slave->reject_reason)
fprintf(stderr, "%s: stray LMP_not_accepted received, fixme\n",
__func__);
else
fprintf(stderr, "%s: stray LMP_accepted received, fixme\n",
__func__);
exit(-1);
}
| false | qemu | bf937a7965c1d1a6dce4f615d0ead2e2ab505004 |
8,555 | static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
uint32_t *ext_features,
uint32_t *ext2_features,
uint32_t *ext3_features)
{
int i;
int found = 0;
for ( i = 0 ; i < 32 ; i++ )
if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
*features |= 1 << i;
found = 1;
}
for ( i = 0 ; i < 32 ; i++ )
if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
*ext_features |= 1 << i;
found = 1;
}
for ( i = 0 ; i < 32 ; i++ )
if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
*ext2_features |= 1 << i;
found = 1;
}
for ( i = 0 ; i < 32 ; i++ )
if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
*ext3_features |= 1 << i;
found = 1;
}
if (!found) {
fprintf(stderr, "CPU feature %s not found\n", flagname);
}
}
| false | qemu | bb0300dc57c10b3721451b0ff566a03f9276cc77 |
8,556 | struct pxa2xx_i2c_s *pxa2xx_i2c_init(target_phys_addr_t base,
qemu_irq irq, int ioregister)
{
int iomemtype;
struct pxa2xx_i2c_s *s = (struct pxa2xx_i2c_s *)
i2c_slave_init(i2c_init_bus(), 0, sizeof(struct pxa2xx_i2c_s));
s->base = base;
s->irq = irq;
s->slave.event = pxa2xx_i2c_event;
s->slave.recv = pxa2xx_i2c_rx;
s->slave.send = pxa2xx_i2c_tx;
s->bus = i2c_init_bus();
if (ioregister) {
iomemtype = cpu_register_io_memory(0, pxa2xx_i2c_readfn,
pxa2xx_i2c_writefn, s);
cpu_register_physical_memory(s->base & 0xfffff000, 0xfff, iomemtype);
}
register_savevm("pxa2xx_i2c", base, 0,
pxa2xx_i2c_save, pxa2xx_i2c_load, s);
return s;
}
| false | qemu | 2a1639291bf9f3c88c62d10459fedaa677536ff5 |
8,557 | static int v9fs_synth_link(FsContext *fs_ctx, V9fsPath *oldpath,
V9fsPath *newpath, const char *buf)
{
errno = EPERM;
return -1;
}
| false | qemu | 364031f17932814484657e5551ba12957d993d7e |
8,558 | static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
{
ARMCPU *cpu = opaque;
return cpu->env.pmsav7.rnr < cpu->pmsav7_dregion;
}
| false | qemu | 1bc04a8880374407c4b12d82ceb8752e12ff5336 |
8,559 | int net_init_dump(const NetClientOptions *opts, const char *name,
NetClientState *peer, Error **errp)
{
int len, rc;
const char *file;
char def_file[128];
const NetdevDumpOptions *dump;
NetClientState *nc;
DumpNetClient *dnc;
assert(opts->type == NET_CLIENT_OPTIONS_KIND_DUMP);
dump = opts->u.dump;
assert(peer);
if (dump->has_file) {
file = dump->file;
} else {
int id;
int ret;
ret = net_hub_id_for_client(peer, &id);
assert(ret == 0); /* peer must be on a hub */
snprintf(def_file, sizeof(def_file), "qemu-vlan%d.pcap", id);
file = def_file;
}
if (dump->has_len) {
if (dump->len > INT_MAX) {
error_setg(errp, "invalid length: %"PRIu64, dump->len);
return -1;
}
len = dump->len;
} else {
len = 65536;
}
nc = qemu_new_net_client(&net_dump_info, peer, "dump", name);
snprintf(nc->info_str, sizeof(nc->info_str),
"dump to %s (len=%d)", file, len);
dnc = DO_UPCAST(DumpNetClient, nc, nc);
rc = net_dump_state_init(&dnc->ds, file, len, errp);
if (rc) {
qemu_del_net_client(nc);
}
return rc;
}
| false | qemu | 32bafa8fdd098d52fbf1102d5a5e48d29398c0aa |
8,560 | static int handle_secondary_tcp_pkt(NetFilterState *nf,
Connection *conn,
Packet *pkt)
{
struct tcphdr *tcp_pkt;
tcp_pkt = (struct tcphdr *)pkt->transport_header;
if (trace_event_get_state(TRACE_COLO_FILTER_REWRITER_DEBUG)) {
trace_colo_filter_rewriter_pkt_info(__func__,
inet_ntoa(pkt->ip->ip_src), inet_ntoa(pkt->ip->ip_dst),
ntohl(tcp_pkt->th_seq), ntohl(tcp_pkt->th_ack),
tcp_pkt->th_flags);
trace_colo_filter_rewriter_conn_offset(conn->offset);
}
if (((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == (TH_ACK | TH_SYN))) {
/*
* save offset = secondary_seq and then
* in handle_primary_tcp_pkt make offset
* = secondary_seq - primary_seq
*/
conn->offset = ntohl(tcp_pkt->th_seq);
}
if ((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == TH_ACK) {
/* handle packets to the primary from the secondary*/
tcp_pkt->th_seq = htonl(ntohl(tcp_pkt->th_seq) - conn->offset);
net_checksum_calculate((uint8_t *)pkt->data, pkt->size);
}
return 0;
}
| false | qemu | db0a762e4be965b8976abe9df82c6d47c57336fc |
8,561 | static void nvdimm_dsm_reserved_root(AcpiNVDIMMState *state, NvdimmDsmIn *in,
hwaddr dsm_mem_addr)
{
switch (in->function) {
case 0x0:
nvdimm_dsm_function0(0x1 | 1 << 1 /* Read FIT */, dsm_mem_addr);
return;
case 0x1 /* Read FIT */:
nvdimm_dsm_func_read_fit(state, in, dsm_mem_addr);
return;
}
nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
}
| false | qemu | 5a33db78b0f3ee808a3d8dd5c427edfbe80bdc73 |
8,563 | static void omap_prcm_dpll_update(struct omap_prcm_s *s)
{
omap_clk dpll = omap_findclk(s->mpu, "dpll");
omap_clk dpll_x2 = omap_findclk(s->mpu, "dpll");
omap_clk core = omap_findclk(s->mpu, "core_clk");
int mode = (s->clken[9] >> 0) & 3;
int mult, div;
mult = (s->clksel[5] >> 12) & 0x3ff;
div = (s->clksel[5] >> 8) & 0xf;
if (mult == 0 || mult == 1)
mode = 1; /* Bypass */
s->dpll_lock = 0;
switch (mode) {
case 0:
fprintf(stderr, "%s: bad EN_DPLL\n", __FUNCTION__);
break;
case 1: /* Low-power bypass mode (Default) */
case 2: /* Fast-relock bypass mode */
omap_clk_setrate(dpll, 1, 1);
omap_clk_setrate(dpll_x2, 1, 1);
break;
case 3: /* Lock mode */
s->dpll_lock = 1; /* After 20 FINT cycles (ref_clk / (div + 1)). */
omap_clk_setrate(dpll, div + 1, mult);
omap_clk_setrate(dpll_x2, div + 1, mult * 2);
break;
}
switch ((s->clksel[6] >> 0) & 3) {
case 0:
omap_clk_reparent(core, omap_findclk(s->mpu, "clk32-kHz"));
break;
case 1:
omap_clk_reparent(core, dpll);
break;
case 2:
/* Default */
omap_clk_reparent(core, dpll_x2);
break;
case 3:
fprintf(stderr, "%s: bad CORE_CLK_SRC\n", __FUNCTION__);
break;
}
}
| false | qemu | a89f364ae8740dfc31b321eed9ee454e996dc3c1 |
8,564 | void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
{
/* Implement DC ZVA, which zeroes a fixed-length block of memory.
* Note that we do not implement the (architecturally mandated)
* alignment fault for attempts to use this on Device memory
* (which matches the usual QEMU behaviour of not implementing either
* alignment faults or any memory attribute handling).
*/
ARMCPU *cpu = arm_env_get_cpu(env);
uint64_t blocklen = 4 << cpu->dcz_blocksize;
uint64_t vaddr = vaddr_in & ~(blocklen - 1);
#ifndef CONFIG_USER_ONLY
{
/* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
* the block size so we might have to do more than one TLB lookup.
* We know that in fact for any v8 CPU the page size is at least 4K
* and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
* 1K as an artefact of legacy v5 subpage support being present in the
* same QEMU executable.
*/
int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
void *hostaddr[maxidx];
int try, i;
unsigned mmu_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
for (try = 0; try < 2; try++) {
for (i = 0; i < maxidx; i++) {
hostaddr[i] = tlb_vaddr_to_host(env,
vaddr + TARGET_PAGE_SIZE * i,
1, mmu_idx);
if (!hostaddr[i]) {
break;
}
}
if (i == maxidx) {
/* If it's all in the TLB it's fair game for just writing to;
* we know we don't need to update dirty status, etc.
*/
for (i = 0; i < maxidx - 1; i++) {
memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
}
memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
return;
}
/* OK, try a store and see if we can populate the tlb. This
* might cause an exception if the memory isn't writable,
* in which case we will longjmp out of here. We must for
* this purpose use the actual register value passed to us
* so that we get the fault address right.
*/
helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETRA());
/* Now we can populate the other TLB entries, if any */
for (i = 0; i < maxidx; i++) {
uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
if (va != (vaddr_in & TARGET_PAGE_MASK)) {
helper_ret_stb_mmu(env, va, 0, oi, GETRA());
}
}
}
/* Slow path (probably attempt to do this to an I/O device or
* similar, or clearing of a block of code we have translations
* cached for). Just do a series of byte writes as the architecture
* demands. It's not worth trying to use a cpu_physical_memory_map(),
* memset(), unmap() sequence here because:
* + we'd need to account for the blocksize being larger than a page
* + the direct-RAM access case is almost always going to be dealt
* with in the fastpath code above, so there's no speed benefit
* + we would have to deal with the map returning NULL because the
* bounce buffer was in use
*/
for (i = 0; i < blocklen; i++) {
helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETRA());
}
}
#else
memset(g2h(vaddr), 0, blocklen);
#endif
}
| false | qemu | 01ecaf438b1eb46abe23392c8ce5b7628b0c8cf5 |
8,567 | static void slirp_bootp_load(QEMUFile *f, Slirp *slirp)
{
int i;
for (i = 0; i < NB_BOOTP_CLIENTS; i++) {
slirp->bootp_clients[i].allocated = qemu_get_be16(f);
qemu_get_buffer(f, slirp->bootp_clients[i].macaddr, 6);
}
}
| false | qemu | eb5d4f5329df83ea15244b47f7fbca21adaae41b |
8,569 | static int scsi_handle_rw_error(SCSIDiskReq *r, int error)
{
bool is_read = (r->req.cmd.xfer == SCSI_XFER_FROM_DEV);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
BlockErrorAction action = bdrv_get_error_action(s->qdev.conf.bs, is_read, error);
if (action == BLOCK_ERROR_ACTION_REPORT) {
switch (error) {
case ENOMEDIUM:
scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
break;
case ENOMEM:
scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE));
break;
case EINVAL:
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
break;
case ENOSPC:
scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED));
break;
default:
scsi_check_condition(r, SENSE_CODE(IO_ERROR));
break;
}
}
bdrv_error_action(s->qdev.conf.bs, action, is_read, error);
if (action == BLOCK_ERROR_ACTION_STOP) {
scsi_req_retry(&r->req);
}
return action != BLOCK_ERROR_ACTION_IGNORE;
}
| false | qemu | 4be746345f13e99e468c60acbd3a355e8183e3ce |
8,570 | void migrate_fd_connect(MigrationState *s)
{
s->state = MIG_STATE_ACTIVE;
trace_migrate_set_state(MIG_STATE_ACTIVE);
s->bytes_xfer = 0;
/* This is a best 1st approximation. ns to ms */
s->expected_downtime = max_downtime/1000000;
s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
s->file = qemu_fopen_ops(s, &migration_file_ops);
qemu_file_set_rate_limit(s->file,
s->bandwidth_limit / XFER_LIMIT_RATIO);
qemu_thread_create(&s->thread, migration_thread, s,
QEMU_THREAD_JOINABLE);
notifier_list_notify(&migration_state_notifiers, s);
}
| false | qemu | 1964a397063967acc5ce71a2a24ed26e74824ee1 |
8,571 | void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
DMADirection dir)
{
int err;
target_phys_addr_t paddr, plen;
void *buf;
if (dma->map) {
return dma->map(dma, addr, len, dir);
}
plen = *len;
err = dma->translate(dma, addr, &paddr, &plen, dir);
if (err) {
return NULL;
}
/*
* If this is true, the virtual region is contiguous,
* but the translated physical region isn't. We just
* clamp *len, much like address_space_map() does.
*/
if (plen < *len) {
*len = plen;
}
buf = address_space_map(dma->as, paddr, &plen, dir == DMA_DIRECTION_FROM_DEVICE);
*len = plen;
return buf;
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
8,572 | static void lx_init(const LxBoardDesc *board, MachineState *machine)
{
#ifdef TARGET_WORDS_BIGENDIAN
int be = 1;
#else
int be = 0;
#endif
MemoryRegion *system_memory = get_system_memory();
XtensaCPU *cpu = NULL;
CPUXtensaState *env = NULL;
MemoryRegion *ram, *rom, *system_io;
DriveInfo *dinfo;
pflash_t *flash = NULL;
QemuOpts *machine_opts = qemu_get_machine_opts();
const char *cpu_model = machine->cpu_model;
const char *kernel_filename = qemu_opt_get(machine_opts, "kernel");
const char *kernel_cmdline = qemu_opt_get(machine_opts, "append");
const char *dtb_filename = qemu_opt_get(machine_opts, "dtb");
const char *initrd_filename = qemu_opt_get(machine_opts, "initrd");
int n;
if (!cpu_model) {
cpu_model = XTENSA_DEFAULT_CPU_MODEL;
}
for (n = 0; n < smp_cpus; n++) {
cpu = cpu_xtensa_init(cpu_model);
if (cpu == NULL) {
error_report("unable to find CPU definition '%s'\n",
cpu_model);
exit(EXIT_FAILURE);
}
env = &cpu->env;
env->sregs[PRID] = n;
qemu_register_reset(lx60_reset, cpu);
/* Need MMU initialized prior to ELF loading,
* so that ELF gets loaded into virtual addresses
*/
cpu_reset(CPU(cpu));
}
ram = g_malloc(sizeof(*ram));
memory_region_init_ram(ram, NULL, "lx60.dram", machine->ram_size,
&error_abort);
vmstate_register_ram_global(ram);
memory_region_add_subregion(system_memory, 0, ram);
system_io = g_malloc(sizeof(*system_io));
memory_region_init(system_io, NULL, "lx60.io", 224 * 1024 * 1024);
memory_region_add_subregion(system_memory, 0xf0000000, system_io);
lx60_fpga_init(system_io, 0x0d020000);
if (nd_table[0].used) {
lx60_net_init(system_io, 0x0d030000, 0x0d030400, 0x0d800000,
xtensa_get_extint(env, 1), nd_table);
}
if (!serial_hds[0]) {
serial_hds[0] = qemu_chr_new("serial0", "null", NULL);
}
serial_mm_init(system_io, 0x0d050020, 2, xtensa_get_extint(env, 0),
115200, serial_hds[0], DEVICE_NATIVE_ENDIAN);
dinfo = drive_get(IF_PFLASH, 0, 0);
if (dinfo) {
flash = pflash_cfi01_register(board->flash_base,
NULL, "lx60.io.flash", board->flash_size,
blk_bs(blk_by_legacy_dinfo(dinfo)),
board->flash_sector_size,
board->flash_size / board->flash_sector_size,
4, 0x0000, 0x0000, 0x0000, 0x0000, be);
if (flash == NULL) {
error_report("unable to mount pflash\n");
exit(EXIT_FAILURE);
}
}
/* Use presence of kernel file name as 'boot from SRAM' switch. */
if (kernel_filename) {
uint32_t entry_point = env->pc;
size_t bp_size = 3 * get_tag_size(0); /* first/last and memory tags */
uint32_t tagptr = 0xfe000000 + board->sram_size;
uint32_t cur_tagptr;
BpMemInfo memory_location = {
.type = tswap32(MEMORY_TYPE_CONVENTIONAL),
.start = tswap32(0),
.end = tswap32(machine->ram_size),
};
uint32_t lowmem_end = machine->ram_size < 0x08000000 ?
machine->ram_size : 0x08000000;
uint32_t cur_lowmem = QEMU_ALIGN_UP(lowmem_end / 2, 4096);
rom = g_malloc(sizeof(*rom));
memory_region_init_ram(rom, NULL, "lx60.sram", board->sram_size,
&error_abort);
vmstate_register_ram_global(rom);
memory_region_add_subregion(system_memory, 0xfe000000, rom);
if (kernel_cmdline) {
bp_size += get_tag_size(strlen(kernel_cmdline) + 1);
}
if (dtb_filename) {
bp_size += get_tag_size(sizeof(uint32_t));
}
if (initrd_filename) {
bp_size += get_tag_size(sizeof(BpMemInfo));
}
/* Put kernel bootparameters to the end of that SRAM */
tagptr = (tagptr - bp_size) & ~0xff;
cur_tagptr = put_tag(tagptr, BP_TAG_FIRST, 0, NULL);
cur_tagptr = put_tag(cur_tagptr, BP_TAG_MEMORY,
sizeof(memory_location), &memory_location);
if (kernel_cmdline) {
cur_tagptr = put_tag(cur_tagptr, BP_TAG_COMMAND_LINE,
strlen(kernel_cmdline) + 1, kernel_cmdline);
}
if (dtb_filename) {
int fdt_size;
void *fdt = load_device_tree(dtb_filename, &fdt_size);
uint32_t dtb_addr = tswap32(cur_lowmem);
if (!fdt) {
error_report("could not load DTB '%s'\n", dtb_filename);
exit(EXIT_FAILURE);
}
cpu_physical_memory_write(cur_lowmem, fdt, fdt_size);
cur_tagptr = put_tag(cur_tagptr, BP_TAG_FDT,
sizeof(dtb_addr), &dtb_addr);
cur_lowmem = QEMU_ALIGN_UP(cur_lowmem + fdt_size, 4096);
}
if (initrd_filename) {
BpMemInfo initrd_location = { 0 };
int initrd_size = load_ramdisk(initrd_filename, cur_lowmem,
lowmem_end - cur_lowmem);
if (initrd_size < 0) {
initrd_size = load_image_targphys(initrd_filename,
cur_lowmem,
lowmem_end - cur_lowmem);
}
if (initrd_size < 0) {
error_report("could not load initrd '%s'\n", initrd_filename);
exit(EXIT_FAILURE);
}
initrd_location.start = tswap32(cur_lowmem);
initrd_location.end = tswap32(cur_lowmem + initrd_size);
cur_tagptr = put_tag(cur_tagptr, BP_TAG_INITRD,
sizeof(initrd_location), &initrd_location);
cur_lowmem = QEMU_ALIGN_UP(cur_lowmem + initrd_size, 4096);
}
cur_tagptr = put_tag(cur_tagptr, BP_TAG_LAST, 0, NULL);
env->regs[2] = tagptr;
uint64_t elf_entry;
uint64_t elf_lowaddr;
int success = load_elf(kernel_filename, translate_phys_addr, cpu,
&elf_entry, &elf_lowaddr, NULL, be, ELF_MACHINE, 0);
if (success > 0) {
entry_point = elf_entry;
} else {
hwaddr ep;
int is_linux;
success = load_uimage(kernel_filename, &ep, NULL, &is_linux);
if (success > 0 && is_linux) {
entry_point = ep;
} else {
error_report("could not load kernel '%s'\n",
kernel_filename);
exit(EXIT_FAILURE);
}
}
if (entry_point != env->pc) {
static const uint8_t jx_a0[] = {
#ifdef TARGET_WORDS_BIGENDIAN
0x0a, 0, 0,
#else
0xa0, 0, 0,
#endif
};
env->regs[0] = entry_point;
cpu_physical_memory_write(env->pc, jx_a0, sizeof(jx_a0));
}
} else {
if (flash) {
MemoryRegion *flash_mr = pflash_cfi01_get_memory(flash);
MemoryRegion *flash_io = g_malloc(sizeof(*flash_io));
memory_region_init_alias(flash_io, NULL, "lx60.flash",
flash_mr, board->flash_boot_base,
board->flash_size - board->flash_boot_base < 0x02000000 ?
board->flash_size - board->flash_boot_base : 0x02000000);
memory_region_add_subregion(system_memory, 0xfe000000,
flash_io);
}
}
}
| false | qemu | 4be746345f13e99e468c60acbd3a355e8183e3ce |
8,575 | void kvm_arch_reset_vcpu(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
env->exception_injected = -1;
env->interrupt_injected = -1;
env->xcr0 = 1;
if (kvm_irqchip_in_kernel()) {
env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
KVM_MP_STATE_UNINITIALIZED;
} else {
env->mp_state = KVM_MP_STATE_RUNNABLE;
}
if (cpu->hyperv_synic) {
int i;
for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
}
}
}
| false | qemu | b7394c8394d38cb38b6db14eb431cac7a91e7140 |
8,576 | static int speex_header(AVFormatContext *s, int idx) {
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
struct speex_params *spxp = os->private;
AVStream *st = s->streams[idx];
uint8_t *p = os->buf + os->pstart;
if (!spxp) {
spxp = av_mallocz(sizeof(*spxp));
os->private = spxp;
}
if (spxp->seq > 1)
return 0;
if (spxp->seq == 0) {
int frames_per_packet;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_SPEEX;
if (os->psize < 68) {
av_log(s, AV_LOG_ERROR, "speex packet too small\n");
return AVERROR_INVALIDDATA;
}
st->codec->sample_rate = AV_RL32(p + 36);
st->codec->channels = AV_RL32(p + 48);
if (st->codec->channels < 1 || st->codec->channels > 2) {
av_log(s, AV_LOG_ERROR, "invalid channel count. Speex must be mono or stereo.\n");
return AVERROR_INVALIDDATA;
}
st->codec->channel_layout = st->codec->channels == 1 ? AV_CH_LAYOUT_MONO :
AV_CH_LAYOUT_STEREO;
spxp->packet_size = AV_RL32(p + 56);
frames_per_packet = AV_RL32(p + 64);
if (frames_per_packet)
spxp->packet_size *= frames_per_packet;
ff_alloc_extradata(st->codec, os->psize);
memcpy(st->codec->extradata, p, st->codec->extradata_size);
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
} else
ff_vorbis_comment(s, &st->metadata, p, os->psize);
spxp->seq++;
return 1;
}
| true | FFmpeg | eb5cc8febc6cd7938f8fdce95d78cacdbe1be30b |
8,578 | static ExitStatus trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned rt = assemble_rt64(insn);
unsigned ra = extract32(insn, 21, 5);
return do_fop_wed(ctx, rt, ra, di->f_wed);
}
| true | qemu | eff235eb2bcd7092901f4698a7907e742f3b7f2f |
8,579 | void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f)
{
int interior_limit, filter_level;
if (s->segmentation.enabled) {
filter_level = s->segmentation.filter_level[mb->segment];
if (!s->segmentation.absolute_vals)
filter_level += s->filter.level;
} else
filter_level = s->filter.level;
if (s->lf_delta.enabled) {
filter_level += s->lf_delta.ref[mb->ref_frame];
filter_level += s->lf_delta.mode[mb->mode];
}
filter_level = av_clip_uintp2(filter_level, 6);
interior_limit = filter_level;
if (s->filter.sharpness) {
interior_limit >>= (s->filter.sharpness + 3) >> 2;
interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
}
interior_limit = FFMAX(interior_limit, 1);
f->filter_level = filter_level;
f->inner_limit = interior_limit;
f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 ||
mb->mode == VP8_MVMODE_SPLIT;
}
| true | FFmpeg | ac4b32df71bd932838043a4838b86d11e169707f |
8,580 | void muls64(int64_t *phigh, int64_t *plow, int64_t a, int64_t b)
{
#if defined(__x86_64__)
__asm__ ("imul %0\n\t"
: "=d" (*phigh), "=a" (*plow)
: "a" (a), "0" (b)
);
#else
int64_t ph;
uint64_t pm1, pm2, pl;
pl = (uint64_t)((uint32_t)a) * (uint64_t)((uint32_t)b);
pm1 = (a >> 32) * (uint32_t)b;
pm2 = (uint32_t)a * (b >> 32);
ph = (a >> 32) * (b >> 32);
ph += (int64_t)pm1 >> 32;
pm1 = (uint64_t)((uint32_t)pm1) + pm2 + (pl >> 32);
*phigh = ph + ((int64_t)pm1 >> 32);
*plow = (pm1 << 32) + (uint32_t)pl;
#endif
}
| true | qemu | 67fc07d3fba681f3362f7644a69b7a581a2670e8 |
8,582 | static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
{
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
uint64_t nb_sectors;
uint8_t *outbuf;
int buflen;
switch (req->cmd.buf[0]) {
case INQUIRY:
case MODE_SENSE:
case MODE_SENSE_10:
case RESERVE:
case RESERVE_10:
case RELEASE:
case RELEASE_10:
case START_STOP:
case ALLOW_MEDIUM_REMOVAL:
case GET_CONFIGURATION:
case GET_EVENT_STATUS_NOTIFICATION:
case MECHANISM_STATUS:
case REQUEST_SENSE:
break;
default:
if (s->tray_open || !bdrv_is_inserted(s->qdev.conf.bs)) {
scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
return 0;
}
break;
}
/*
* FIXME: we shouldn't return anything bigger than 4k, but the code
* requires the buffer to be as big as req->cmd.xfer in several
* places. So, do not allow CDBs with a very large ALLOCATION
* LENGTH. The real fix would be to modify scsi_read_data and
* dma_buf_read, so that they return data beyond the buflen
* as all zeros.
*/
if (req->cmd.xfer > 65536) {
goto illegal_request;
}
r->buflen = MAX(4096, req->cmd.xfer);
if (!r->iov.iov_base) {
r->iov.iov_base = qemu_blockalign(s->qdev.conf.bs, r->buflen);
}
buflen = req->cmd.xfer;
outbuf = r->iov.iov_base;
memset(outbuf, 0, r->buflen);
switch (req->cmd.buf[0]) {
case TEST_UNIT_READY:
assert(!s->tray_open && bdrv_is_inserted(s->qdev.conf.bs));
break;
case INQUIRY:
buflen = scsi_disk_emulate_inquiry(req, outbuf);
if (buflen < 0) {
goto illegal_request;
}
break;
case MODE_SENSE:
case MODE_SENSE_10:
buflen = scsi_disk_emulate_mode_sense(r, outbuf);
if (buflen < 0) {
goto illegal_request;
}
break;
case READ_TOC:
buflen = scsi_disk_emulate_read_toc(req, outbuf);
if (buflen < 0) {
goto illegal_request;
}
break;
case RESERVE:
if (req->cmd.buf[1] & 1) {
goto illegal_request;
}
break;
case RESERVE_10:
if (req->cmd.buf[1] & 3) {
goto illegal_request;
}
break;
case RELEASE:
if (req->cmd.buf[1] & 1) {
goto illegal_request;
}
break;
case RELEASE_10:
if (req->cmd.buf[1] & 3) {
goto illegal_request;
}
break;
case START_STOP:
if (scsi_disk_emulate_start_stop(r) < 0) {
return 0;
}
break;
case ALLOW_MEDIUM_REMOVAL:
s->tray_locked = req->cmd.buf[4] & 1;
bdrv_lock_medium(s->qdev.conf.bs, req->cmd.buf[4] & 1);
break;
case READ_CAPACITY_10:
/* The normal LEN field for this command is zero. */
memset(outbuf, 0, 8);
bdrv_get_geometry(s->qdev.conf.bs, &nb_sectors);
if (!nb_sectors) {
scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
return 0;
}
if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
goto illegal_request;
}
nb_sectors /= s->qdev.blocksize / 512;
/* Returned value is the address of the last sector. */
nb_sectors--;
/* Remember the new size for read/write sanity checking. */
s->qdev.max_lba = nb_sectors;
/* Clip to 2TB, instead of returning capacity modulo 2TB. */
if (nb_sectors > UINT32_MAX) {
nb_sectors = UINT32_MAX;
}
outbuf[0] = (nb_sectors >> 24) & 0xff;
outbuf[1] = (nb_sectors >> 16) & 0xff;
outbuf[2] = (nb_sectors >> 8) & 0xff;
outbuf[3] = nb_sectors & 0xff;
outbuf[4] = 0;
outbuf[5] = 0;
outbuf[6] = s->qdev.blocksize >> 8;
outbuf[7] = 0;
break;
case REQUEST_SENSE:
/* Just return "NO SENSE". */
buflen = scsi_build_sense(NULL, 0, outbuf, r->buflen,
(req->cmd.buf[1] & 1) == 0);
if (buflen < 0) {
goto illegal_request;
}
break;
case MECHANISM_STATUS:
buflen = scsi_emulate_mechanism_status(s, outbuf);
if (buflen < 0) {
goto illegal_request;
}
break;
case GET_CONFIGURATION:
buflen = scsi_get_configuration(s, outbuf);
if (buflen < 0) {
goto illegal_request;
}
break;
case GET_EVENT_STATUS_NOTIFICATION:
buflen = scsi_get_event_status_notification(s, r, outbuf);
if (buflen < 0) {
goto illegal_request;
}
break;
case READ_DISC_INFORMATION:
buflen = scsi_read_disc_information(s, r, outbuf);
if (buflen < 0) {
goto illegal_request;
}
break;
case READ_DVD_STRUCTURE:
buflen = scsi_read_dvd_structure(s, r, outbuf);
if (buflen < 0) {
goto illegal_request;
}
break;
case SERVICE_ACTION_IN_16:
/* Service Action In subcommands. */
if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
DPRINTF("SAI READ CAPACITY(16)\n");
memset(outbuf, 0, req->cmd.xfer);
bdrv_get_geometry(s->qdev.conf.bs, &nb_sectors);
if (!nb_sectors) {
scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
return 0;
}
if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
goto illegal_request;
}
nb_sectors /= s->qdev.blocksize / 512;
/* Returned value is the address of the last sector. */
nb_sectors--;
/* Remember the new size for read/write sanity checking. */
s->qdev.max_lba = nb_sectors;
outbuf[0] = (nb_sectors >> 56) & 0xff;
outbuf[1] = (nb_sectors >> 48) & 0xff;
outbuf[2] = (nb_sectors >> 40) & 0xff;
outbuf[3] = (nb_sectors >> 32) & 0xff;
outbuf[4] = (nb_sectors >> 24) & 0xff;
outbuf[5] = (nb_sectors >> 16) & 0xff;
outbuf[6] = (nb_sectors >> 8) & 0xff;
outbuf[7] = nb_sectors & 0xff;
outbuf[8] = 0;
outbuf[9] = 0;
outbuf[10] = s->qdev.blocksize >> 8;
outbuf[11] = 0;
outbuf[12] = 0;
outbuf[13] = get_physical_block_exp(&s->qdev.conf);
/* set TPE bit if the format supports discard */
if (s->qdev.conf.discard_granularity) {
outbuf[14] = 0x80;
}
/* Protection, exponent and lowest lba field left blank. */
break;
}
DPRINTF("Unsupported Service Action In\n");
goto illegal_request;
case SYNCHRONIZE_CACHE:
/* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BDRV_ACCT_FLUSH);
r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r);
return 0;
case SEEK_10:
DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba);
if (r->req.cmd.lba > s->qdev.max_lba) {
goto illegal_lba;
}
break;
case MODE_SELECT:
DPRINTF("Mode Select(6) (len %lu)\n", (long)r->req.cmd.xfer);
break;
case MODE_SELECT_10:
DPRINTF("Mode Select(10) (len %lu)\n", (long)r->req.cmd.xfer);
break;
case UNMAP:
DPRINTF("Unmap (len %lu)\n", (long)r->req.cmd.xfer);
break;
case WRITE_SAME_10:
case WRITE_SAME_16:
nb_sectors = scsi_data_cdb_length(r->req.cmd.buf);
if (bdrv_is_read_only(s->qdev.conf.bs)) {
scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
return 0;
}
if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
goto illegal_lba;
}
/*
* We only support WRITE SAME with the unmap bit set for now.
*/
if (!(req->cmd.buf[1] & 0x8)) {
goto illegal_request;
}
/* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req);
r->req.aiocb = bdrv_aio_discard(s->qdev.conf.bs,
r->req.cmd.lba * (s->qdev.blocksize / 512),
nb_sectors * (s->qdev.blocksize / 512),
scsi_aio_complete, r);
return 0;
default:
DPRINTF("Unknown SCSI command (%2.2x)\n", buf[0]);
scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
return 0;
}
assert(!r->req.aiocb);
r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
if (r->iov.iov_len == 0) {
scsi_req_complete(&r->req, GOOD);
}
if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
assert(r->iov.iov_len == req->cmd.xfer);
return -r->iov.iov_len;
} else {
return r->iov.iov_len;
}
illegal_request:
if (r->req.status == -1) {
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
}
return 0;
illegal_lba:
scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
return 0;
}
| true | qemu | 823bd7391c96ba675f20fd6d952d1cb6e1ffb851 |
8,583 | static int decode_syncpoint(NUTContext *nut){
AVFormatContext *s= nut->avf;
ByteIOContext *bc = &s->pb;
int64_t end;
uint64_t tmp;
int i;
AVRational time_base;
nut->last_syncpoint_pos= url_ftell(bc)-8;
end= get_packetheader(nut, bc, 1);
end += url_ftell(bc) - 4;
tmp= get_v(bc);
get_v(bc); //back_ptr_div16
time_base= nut->time_base[tmp % nut->time_base_count];
for(i=0; i<s->nb_streams; i++){
nut->stream[i].last_pts= av_rescale_rnd(
tmp / nut->time_base_count,
time_base.num * (int64_t)nut->stream[i].time_base.den,
time_base.den * (int64_t)nut->stream[i].time_base.num,
AV_ROUND_DOWN);
//last_key_frame ?
}
//FIXME put this in a reset func maybe
if(skip_reserved(bc, end) || check_checksum(bc)){
av_log(s, AV_LOG_ERROR, "sync point checksum mismatch\n");
return -1;
}
return 0;
}
| true | FFmpeg | 5d97d9d53ea1cc2c28411ad734565372ddeccc32 |
8,584 | static void mips_jazz_init(MachineState *machine,
enum jazz_model_e jazz_model)
{
MemoryRegion *address_space = get_system_memory();
const char *cpu_model = machine->cpu_model;
char *filename;
int bios_size, n;
MIPSCPU *cpu;
CPUClass *cc;
CPUMIPSState *env;
qemu_irq *i8259;
rc4030_dma *dmas;
MemoryRegion *rc4030_dma_mr;
MemoryRegion *isa_mem = g_new(MemoryRegion, 1);
MemoryRegion *isa_io = g_new(MemoryRegion, 1);
MemoryRegion *rtc = g_new(MemoryRegion, 1);
MemoryRegion *i8042 = g_new(MemoryRegion, 1);
MemoryRegion *dma_dummy = g_new(MemoryRegion, 1);
NICInfo *nd;
DeviceState *dev, *rc4030;
SysBusDevice *sysbus;
ISABus *isa_bus;
ISADevice *pit;
DriveInfo *fds[MAX_FD];
qemu_irq esp_reset, dma_enable;
MemoryRegion *ram = g_new(MemoryRegion, 1);
MemoryRegion *bios = g_new(MemoryRegion, 1);
MemoryRegion *bios2 = g_new(MemoryRegion, 1);
/* init CPUs */
if (cpu_model == NULL) {
cpu_model = "R4000";
}
cpu = cpu_mips_init(cpu_model);
if (cpu == NULL) {
fprintf(stderr, "Unable to find CPU definition\n");
exit(1);
}
env = &cpu->env;
qemu_register_reset(main_cpu_reset, cpu);
/* Chipset returns 0 in invalid reads and do not raise data exceptions.
* However, we can't simply add a global memory region to catch
* everything, as memory core directly call unassigned_mem_read/write
* on some invalid accesses, which call do_unassigned_access on the
* CPU, which raise an exception.
* Handle that case by hijacking the do_unassigned_access method on
* the CPU, and do not raise exceptions for data access. */
cc = CPU_GET_CLASS(cpu);
real_do_unassigned_access = cc->do_unassigned_access;
cc->do_unassigned_access = mips_jazz_do_unassigned_access;
/* allocate RAM */
memory_region_allocate_system_memory(ram, NULL, "mips_jazz.ram",
machine->ram_size);
memory_region_add_subregion(address_space, 0, ram);
memory_region_init_ram(bios, NULL, "mips_jazz.bios", MAGNUM_BIOS_SIZE,
&error_abort);
vmstate_register_ram_global(bios);
memory_region_set_readonly(bios, true);
memory_region_init_alias(bios2, NULL, "mips_jazz.bios", bios,
0, MAGNUM_BIOS_SIZE);
memory_region_add_subregion(address_space, 0x1fc00000LL, bios);
memory_region_add_subregion(address_space, 0xfff00000LL, bios2);
/* load the BIOS image. */
if (bios_name == NULL)
bios_name = BIOS_FILENAME;
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
if (filename) {
bios_size = load_image_targphys(filename, 0xfff00000LL,
MAGNUM_BIOS_SIZE);
g_free(filename);
} else {
bios_size = -1;
}
if ((bios_size < 0 || bios_size > MAGNUM_BIOS_SIZE) && !qtest_enabled()) {
error_report("Could not load MIPS bios '%s'", bios_name);
exit(1);
}
/* Init CPU internal devices */
cpu_mips_irq_init_cpu(env);
cpu_mips_clock_init(env);
/* Chipset */
rc4030 = rc4030_init(&dmas, &rc4030_dma_mr);
sysbus = SYS_BUS_DEVICE(rc4030);
sysbus_connect_irq(sysbus, 0, env->irq[6]);
sysbus_connect_irq(sysbus, 1, env->irq[3]);
memory_region_add_subregion(address_space, 0x80000000,
sysbus_mmio_get_region(sysbus, 0));
memory_region_add_subregion(address_space, 0xf0000000,
sysbus_mmio_get_region(sysbus, 1));
memory_region_init_io(dma_dummy, NULL, &dma_dummy_ops, NULL, "dummy_dma", 0x1000);
memory_region_add_subregion(address_space, 0x8000d000, dma_dummy);
/* ISA bus: IO space at 0x90000000, mem space at 0x91000000 */
memory_region_init(isa_io, NULL, "isa-io", 0x00010000);
memory_region_init(isa_mem, NULL, "isa-mem", 0x01000000);
memory_region_add_subregion(address_space, 0x90000000, isa_io);
memory_region_add_subregion(address_space, 0x91000000, isa_mem);
isa_bus = isa_bus_new(NULL, isa_mem, isa_io);
/* ISA devices */
i8259 = i8259_init(isa_bus, env->irq[4]);
isa_bus_irqs(isa_bus, i8259);
DMA_init(0);
pit = pit_init(isa_bus, 0x40, 0, NULL);
pcspk_init(isa_bus, pit);
/* Video card */
switch (jazz_model) {
case JAZZ_MAGNUM:
dev = qdev_create(NULL, "sysbus-g364");
qdev_init_nofail(dev);
sysbus = SYS_BUS_DEVICE(dev);
sysbus_mmio_map(sysbus, 0, 0x60080000);
sysbus_mmio_map(sysbus, 1, 0x40000000);
sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(rc4030, 3));
{
/* Simple ROM, so user doesn't have to provide one */
MemoryRegion *rom_mr = g_new(MemoryRegion, 1);
memory_region_init_ram(rom_mr, NULL, "g364fb.rom", 0x80000,
&error_abort);
vmstate_register_ram_global(rom_mr);
memory_region_set_readonly(rom_mr, true);
uint8_t *rom = memory_region_get_ram_ptr(rom_mr);
memory_region_add_subregion(address_space, 0x60000000, rom_mr);
rom[0] = 0x10; /* Mips G364 */
}
break;
case JAZZ_PICA61:
isa_vga_mm_init(0x40000000, 0x60000000, 0, get_system_memory());
break;
default:
break;
}
/* Network controller */
for (n = 0; n < nb_nics; n++) {
nd = &nd_table[n];
if (!nd->model)
nd->model = g_strdup("dp83932");
if (strcmp(nd->model, "dp83932") == 0) {
qemu_check_nic_model(nd, "dp83932");
dev = qdev_create(NULL, "dp8393x");
qdev_set_nic_properties(dev, nd);
qdev_prop_set_uint8(dev, "it_shift", 2);
qdev_prop_set_ptr(dev, "dma_mr", rc4030_dma_mr);
qdev_init_nofail(dev);
sysbus = SYS_BUS_DEVICE(dev);
sysbus_mmio_map(sysbus, 0, 0x80001000);
sysbus_mmio_map(sysbus, 1, 0x8000b000);
sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(rc4030, 4));
break;
} else if (is_help_option(nd->model)) {
fprintf(stderr, "qemu: Supported NICs: dp83932\n");
exit(1);
} else {
fprintf(stderr, "qemu: Unsupported NIC: %s\n", nd->model);
exit(1);
}
}
/* SCSI adapter */
esp_init(0x80002000, 0,
rc4030_dma_read, rc4030_dma_write, dmas[0],
qdev_get_gpio_in(rc4030, 5), &esp_reset, &dma_enable);
/* Floppy */
if (drive_get_max_bus(IF_FLOPPY) >= MAX_FD) {
fprintf(stderr, "qemu: too many floppy drives\n");
exit(1);
}
for (n = 0; n < MAX_FD; n++) {
fds[n] = drive_get(IF_FLOPPY, 0, n);
}
fdctrl_init_sysbus(qdev_get_gpio_in(rc4030, 1), 0, 0x80003000, fds);
/* Real time clock */
rtc_init(isa_bus, 1980, NULL);
memory_region_init_io(rtc, NULL, &rtc_ops, NULL, "rtc", 0x1000);
memory_region_add_subregion(address_space, 0x80004000, rtc);
/* Keyboard (i8042) */
i8042_mm_init(qdev_get_gpio_in(rc4030, 6), qdev_get_gpio_in(rc4030, 7),
i8042, 0x1000, 0x1);
memory_region_add_subregion(address_space, 0x80005000, i8042);
/* Serial ports */
if (serial_hds[0]) {
serial_mm_init(address_space, 0x80006000, 0,
qdev_get_gpio_in(rc4030, 8), 8000000/16,
serial_hds[0], DEVICE_NATIVE_ENDIAN);
}
if (serial_hds[1]) {
serial_mm_init(address_space, 0x80007000, 0,
qdev_get_gpio_in(rc4030, 9), 8000000/16,
serial_hds[1], DEVICE_NATIVE_ENDIAN);
}
/* Parallel port */
if (parallel_hds[0])
parallel_mm_init(address_space, 0x80008000, 0,
qdev_get_gpio_in(rc4030, 0), parallel_hds[0]);
/* FIXME: missing Jazz sound at 0x8000c000, rc4030[2] */
/* NVRAM */
dev = qdev_create(NULL, "ds1225y");
qdev_init_nofail(dev);
sysbus = SYS_BUS_DEVICE(dev);
sysbus_mmio_map(sysbus, 0, 0x80009000);
/* LED indicator */
sysbus_create_simple("jazz-led", 0x8000f000, NULL);
}
| true | qemu | f8ed85ac992c48814d916d5df4d44f9a971c5de4 |
8,585 | static void free_texture(void *opaque, uint8_t *data)
{
ID3D11Texture2D_Release((ID3D11Texture2D *)opaque);
} | true | FFmpeg | 2c2f25eb8920129ef3cfe6da2e1cefdedc485965 |
8,586 | static int alac_decode_frame(AVCodecContext *avctx,
void *outbuffer, int *outputsize,
const uint8_t *inbuffer, int input_buffer_size)
{
ALACContext *alac = avctx->priv_data;
int channels;
unsigned int outputsamples;
int hassize;
int readsamplesize;
int wasted_bytes;
int isnotcompressed;
uint8_t interlacing_shift;
uint8_t interlacing_leftweight;
/* short-circuit null buffers */
if (!inbuffer || !input_buffer_size)
return input_buffer_size;
/* initialize from the extradata */
if (!alac->context_initialized) {
if (alac->avctx->extradata_size != ALAC_EXTRADATA_SIZE) {
av_log(avctx, AV_LOG_ERROR, "alac: expected %d extradata bytes\n",
ALAC_EXTRADATA_SIZE);
return input_buffer_size;
if (alac_set_info(alac)) {
av_log(avctx, AV_LOG_ERROR, "alac: set_info failed\n");
return input_buffer_size;
alac->context_initialized = 1;
init_get_bits(&alac->gb, inbuffer, input_buffer_size * 8);
channels = get_bits(&alac->gb, 3) + 1;
if (channels > MAX_CHANNELS) {
av_log(avctx, AV_LOG_ERROR, "channels > %d not supported\n",
MAX_CHANNELS);
return input_buffer_size;
/* 2^result = something to do with output waiting.
* perhaps matters if we read > 1 frame in a pass?
*/
skip_bits(&alac->gb, 4);
skip_bits(&alac->gb, 12); /* unknown, skip 12 bits */
/* the output sample size is stored soon */
hassize = get_bits1(&alac->gb);
wasted_bytes = get_bits(&alac->gb, 2); /* unknown ? */
/* whether the frame is compressed */
isnotcompressed = get_bits1(&alac->gb);
if (hassize) {
/* now read the number of samples as a 32bit integer */
outputsamples = get_bits(&alac->gb, 32);
if(outputsamples > alac->setinfo_max_samples_per_frame){
av_log(avctx, AV_LOG_ERROR, "outputsamples %d > %d\n", outputsamples, alac->setinfo_max_samples_per_frame);
} else
outputsamples = alac->setinfo_max_samples_per_frame;
*outputsize = outputsamples * alac->bytespersample;
readsamplesize = alac->setinfo_sample_size - (wasted_bytes * 8) + channels - 1;
if (!isnotcompressed) {
/* so it is compressed */
int16_t predictor_coef_table[channels][32];
int predictor_coef_num[channels];
int prediction_type[channels];
int prediction_quantitization[channels];
int ricemodifier[channels];
int i, chan;
interlacing_shift = get_bits(&alac->gb, 8);
interlacing_leftweight = get_bits(&alac->gb, 8);
for (chan = 0; chan < channels; chan++) {
prediction_type[chan] = get_bits(&alac->gb, 4);
prediction_quantitization[chan] = get_bits(&alac->gb, 4);
ricemodifier[chan] = get_bits(&alac->gb, 3);
predictor_coef_num[chan] = get_bits(&alac->gb, 5);
/* read the predictor table */
for (i = 0; i < predictor_coef_num[chan]; i++)
predictor_coef_table[chan][i] = (int16_t)get_bits(&alac->gb, 16);
if (wasted_bytes)
av_log(avctx, AV_LOG_ERROR, "FIXME: unimplemented, unhandling of wasted_bytes\n");
for (chan = 0; chan < channels; chan++) {
bastardized_rice_decompress(alac,
alac->predicterror_buffer[chan],
outputsamples,
readsamplesize,
alac->setinfo_rice_initialhistory,
alac->setinfo_rice_kmodifier,
ricemodifier[chan] * alac->setinfo_rice_historymult / 4,
(1 << alac->setinfo_rice_kmodifier) - 1);
if (prediction_type[chan] == 0) {
/* adaptive fir */
predictor_decompress_fir_adapt(alac->predicterror_buffer[chan],
alac->outputsamples_buffer[chan],
outputsamples,
readsamplesize,
predictor_coef_table[chan],
predictor_coef_num[chan],
prediction_quantitization[chan]);
} else {
av_log(avctx, AV_LOG_ERROR, "FIXME: unhandled prediction type: %i\n", prediction_type[chan]);
/* I think the only other prediction type (or perhaps this is
* just a boolean?) runs adaptive fir twice.. like:
* predictor_decompress_fir_adapt(predictor_error, tempout, ...)
* predictor_decompress_fir_adapt(predictor_error, outputsamples ...)
* little strange..
*/
} else {
/* not compressed, easy case */
if (alac->setinfo_sample_size <= 16) {
int i, chan;
for (chan = 0; chan < channels; chan++)
for (i = 0; i < outputsamples; i++) {
int32_t audiobits;
audiobits = get_bits(&alac->gb, alac->setinfo_sample_size);
audiobits = extend_sign32(audiobits, readsamplesize);
alac->outputsamples_buffer[chan][i] = audiobits;
} else {
int i, chan;
for (chan = 0; chan < channels; chan++)
for (i = 0; i < outputsamples; i++) {
int32_t audiobits;
audiobits = get_bits(&alac->gb, 16);
/* special case of sign extension..
* as we'll be ORing the low 16bits into this */
audiobits = audiobits << 16;
audiobits = audiobits >> (32 - alac->setinfo_sample_size);
audiobits |= get_bits(&alac->gb, alac->setinfo_sample_size - 16);
alac->outputsamples_buffer[chan][i] = audiobits;
/* wasted_bytes = 0; */
interlacing_shift = 0;
interlacing_leftweight = 0;
if (get_bits(&alac->gb, 3) != 7)
av_log(avctx, AV_LOG_ERROR, "Error : Wrong End Of Frame\n");
switch(alac->setinfo_sample_size) {
case 16:
if (channels == 2) {
reconstruct_stereo_16(alac->outputsamples_buffer,
(int16_t*)outbuffer,
alac->numchannels,
outputsamples,
interlacing_shift,
interlacing_leftweight);
} else {
int i;
for (i = 0; i < outputsamples; i++) {
int16_t sample = alac->outputsamples_buffer[0][i];
((int16_t*)outbuffer)[i * alac->numchannels] = sample;
break;
case 20:
case 24:
// It is not clear if there exist any encoder that creates 24 bit ALAC
// files. iTunes convert 24 bit raw files to 16 bit before encoding.
case 32:
av_log(avctx, AV_LOG_ERROR, "FIXME: unimplemented sample size %i\n", alac->setinfo_sample_size);
break;
default:
break;
if (input_buffer_size * 8 - get_bits_count(&alac->gb) > 8)
av_log(avctx, AV_LOG_ERROR, "Error : %d bits left\n", input_buffer_size * 8 - get_bits_count(&alac->gb));
return input_buffer_size; | true | FFmpeg | f7739f3708f786a0b071d8d8b59331525b0ccfd8 |
8,587 | int bdrv_truncate(BlockDriverState *bs, int64_t offset)
{
BlockDriver *drv = bs->drv;
int ret;
if (!drv)
return -ENOMEDIUM;
if (!drv->bdrv_truncate)
return -ENOTSUP;
if (bs->read_only)
return -EACCES;
ret = drv->bdrv_truncate(bs, offset);
if (ret == 0) {
ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
bdrv_dirty_bitmap_truncate(bs);
bdrv_parent_cb_resize(bs);
}
return ret;
} | true | qemu | 3ff2f67a7c24183fcbcfe1332e5223ac6f96438c |
8,588 | static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
{
TCGv res = tcg_temp_new();
TCGv sr_cy = tcg_temp_new();
TCGv sr_ov = tcg_temp_new();
tcg_gen_sub_tl(res, srca, srcb);
tcg_gen_xor_tl(sr_cy, srca, srcb);
tcg_gen_xor_tl(sr_ov, res, srcb);
tcg_gen_and_tl(sr_ov, sr_ov, sr_cy);
tcg_gen_setcond_tl(TCG_COND_LTU, sr_cy, srca, srcb);
tcg_gen_mov_tl(dest, res);
tcg_temp_free(res);
tcg_gen_shri_tl(sr_ov, sr_ov, TARGET_LONG_BITS - 1);
tcg_gen_deposit_tl(cpu_sr, cpu_sr, sr_cy, ctz32(SR_CY), 1);
tcg_gen_deposit_tl(cpu_sr, cpu_sr, sr_ov, ctz32(SR_OV), 1);
gen_ove_cyov(dc, sr_ov, sr_cy);
tcg_temp_free(sr_ov);
tcg_temp_free(sr_cy);
}
| true | qemu | 9745807191a81c45970f780166f44a7f93b18653 |
8,589 | static void ecc_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
{
ECCState *s = opaque;
switch (addr & ECC_ADDR_MASK) {
case ECC_MER:
s->regs[0] = (s->regs[0] & (ECC_MER_VER | ECC_MER_IMPL)) |
(val & ~(ECC_MER_VER | ECC_MER_IMPL));
DPRINTF("Write memory enable %08x\n", val);
break;
case ECC_MDR:
s->regs[1] = val & ECC_MDR_MASK;
DPRINTF("Write memory delay %08x\n", val);
break;
case ECC_MFSR:
s->regs[2] = val;
DPRINTF("Write memory fault status %08x\n", val);
break;
case ECC_VCR:
s->regs[3] = val;
DPRINTF("Write slot configuration %08x\n", val);
break;
case ECC_DR:
s->regs[6] = val;
DPRINTF("Write diagnosiic %08x\n", val);
break;
case ECC_ECR0:
s->regs[7] = val;
DPRINTF("Write event count 1 %08x\n", val);
break;
case ECC_ECR1:
s->regs[7] = val;
DPRINTF("Write event count 2 %08x\n", val);
break;
}
}
| true | qemu | 8f2ad0a3fc5e3569183d44bf1c7fcb95294be4c0 |
8,590 | static void idct32(int *out, int *tab, int sblimit, int left_shift)
{
int i, j;
int *t, *t1, xr;
const int *xp = costab32;
for(j=31;j>=3;j-=2) tab[j] += tab[j - 2];
t = tab + 30;
t1 = tab + 2;
do {
t[0] += t[-4];
t[1] += t[1 - 4];
t -= 4;
} while (t != t1);
t = tab + 28;
t1 = tab + 4;
do {
t[0] += t[-8];
t[1] += t[1-8];
t[2] += t[2-8];
t[3] += t[3-8];
t -= 8;
} while (t != t1);
t = tab;
t1 = tab + 32;
do {
t[ 3] = -t[ 3];
t[ 6] = -t[ 6];
t[11] = -t[11];
t[12] = -t[12];
t[13] = -t[13];
t[15] = -t[15];
t += 16;
} while (t != t1);
t = tab;
t1 = tab + 8;
do {
int x1, x2, x3, x4;
x3 = MUL(t[16], FIX(SQRT2*0.5));
x4 = t[0] - x3;
x3 = t[0] + x3;
x2 = MUL(-(t[24] + t[8]), FIX(SQRT2*0.5));
x1 = MUL((t[8] - x2), xp[0]);
x2 = MUL((t[8] + x2), xp[1]);
t[ 0] = x3 + x1;
t[ 8] = x4 - x2;
t[16] = x4 + x2;
t[24] = x3 - x1;
t++;
} while (t != t1);
xp += 2;
t = tab;
t1 = tab + 4;
do {
xr = MUL(t[28],xp[0]);
t[28] = (t[0] - xr);
t[0] = (t[0] + xr);
xr = MUL(t[4],xp[1]);
t[ 4] = (t[24] - xr);
t[24] = (t[24] + xr);
xr = MUL(t[20],xp[2]);
t[20] = (t[8] - xr);
t[ 8] = (t[8] + xr);
xr = MUL(t[12],xp[3]);
t[12] = (t[16] - xr);
t[16] = (t[16] + xr);
t++;
} while (t != t1);
xp += 4;
for (i = 0; i < 4; i++) {
xr = MUL(tab[30-i*4],xp[0]);
tab[30-i*4] = (tab[i*4] - xr);
tab[ i*4] = (tab[i*4] + xr);
xr = MUL(tab[ 2+i*4],xp[1]);
tab[ 2+i*4] = (tab[28-i*4] - xr);
tab[28-i*4] = (tab[28-i*4] + xr);
xr = MUL(tab[31-i*4],xp[0]);
tab[31-i*4] = (tab[1+i*4] - xr);
tab[ 1+i*4] = (tab[1+i*4] + xr);
xr = MUL(tab[ 3+i*4],xp[1]);
tab[ 3+i*4] = (tab[29-i*4] - xr);
tab[29-i*4] = (tab[29-i*4] + xr);
xp += 2;
}
t = tab + 30;
t1 = tab + 1;
do {
xr = MUL(t1[0], *xp);
t1[0] = (t[0] - xr);
t[0] = (t[0] + xr);
t -= 2;
t1 += 2;
xp++;
} while (t >= tab);
for(i=0;i<32;i++) {
out[i] = tab[bitinv32[i]] << left_shift;
}
}
| true | FFmpeg | afa982fdae1b49a8aee00a27da876bba10ba1073 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.