id
int32 0
27.3k
| func
stringlengths 26
142k
| target
bool 2
classes | project
stringclasses 2
values | commit_id
stringlengths 40
40
|
---|---|---|---|---|
8,701 | static int tap_alloc(char *dev, size_t dev_size)
{
int tap_fd, if_fd, ppa = -1;
static int ip_fd = 0;
char *ptr;
static int arp_fd = 0;
int ip_muxid, arp_muxid;
struct strioctl strioc_if, strioc_ppa;
int link_type = I_PLINK;
struct lifreq ifr;
char actual_name[32] = "";
memset(&ifr, 0x0, sizeof(ifr));
if( *dev ){
ptr = dev;
while( *ptr && !qemu_isdigit((int)*ptr) ) ptr++;
ppa = atoi(ptr);
}
/* Check if IP device was opened */
if( ip_fd )
close(ip_fd);
TFR(ip_fd = open("/dev/udp", O_RDWR, 0));
if (ip_fd < 0) {
syslog(LOG_ERR, "Can't open /dev/ip (actually /dev/udp)");
return -1;
}
TFR(tap_fd = open("/dev/tap", O_RDWR, 0));
if (tap_fd < 0) {
syslog(LOG_ERR, "Can't open /dev/tap");
return -1;
}
/* Assign a new PPA and get its unit number. */
strioc_ppa.ic_cmd = TUNNEWPPA;
strioc_ppa.ic_timout = 0;
strioc_ppa.ic_len = sizeof(ppa);
strioc_ppa.ic_dp = (char *)&ppa;
if ((ppa = ioctl (tap_fd, I_STR, &strioc_ppa)) < 0)
syslog (LOG_ERR, "Can't assign new interface");
TFR(if_fd = open("/dev/tap", O_RDWR, 0));
if (if_fd < 0) {
syslog(LOG_ERR, "Can't open /dev/tap (2)");
return -1;
}
if(ioctl(if_fd, I_PUSH, "ip") < 0){
syslog(LOG_ERR, "Can't push IP module");
return -1;
}
if (ioctl(if_fd, SIOCGLIFFLAGS, &ifr) < 0)
syslog(LOG_ERR, "Can't get flags\n");
snprintf (actual_name, 32, "tap%d", ppa);
pstrcpy(ifr.lifr_name, sizeof(ifr.lifr_name), actual_name);
ifr.lifr_ppa = ppa;
/* Assign ppa according to the unit number returned by tun device */
if (ioctl (if_fd, SIOCSLIFNAME, &ifr) < 0)
syslog (LOG_ERR, "Can't set PPA %d", ppa);
if (ioctl(if_fd, SIOCGLIFFLAGS, &ifr) <0)
syslog (LOG_ERR, "Can't get flags\n");
/* Push arp module to if_fd */
if (ioctl (if_fd, I_PUSH, "arp") < 0)
syslog (LOG_ERR, "Can't push ARP module (2)");
/* Push arp module to ip_fd */
if (ioctl (ip_fd, I_POP, NULL) < 0)
syslog (LOG_ERR, "I_POP failed\n");
if (ioctl (ip_fd, I_PUSH, "arp") < 0)
syslog (LOG_ERR, "Can't push ARP module (3)\n");
/* Open arp_fd */
TFR(arp_fd = open ("/dev/tap", O_RDWR, 0));
if (arp_fd < 0)
syslog (LOG_ERR, "Can't open %s\n", "/dev/tap");
/* Set ifname to arp */
strioc_if.ic_cmd = SIOCSLIFNAME;
strioc_if.ic_timout = 0;
strioc_if.ic_len = sizeof(ifr);
strioc_if.ic_dp = (char *)𝔦
if (ioctl(arp_fd, I_STR, &strioc_if) < 0){
syslog (LOG_ERR, "Can't set ifname to arp\n");
}
if((ip_muxid = ioctl(ip_fd, I_LINK, if_fd)) < 0){
syslog(LOG_ERR, "Can't link TAP device to IP");
return -1;
}
if ((arp_muxid = ioctl (ip_fd, link_type, arp_fd)) < 0)
syslog (LOG_ERR, "Can't link TAP device to ARP");
close (if_fd);
memset(&ifr, 0x0, sizeof(ifr));
pstrcpy(ifr.lifr_name, sizeof(ifr.lifr_name), actual_name);
ifr.lifr_ip_muxid = ip_muxid;
ifr.lifr_arp_muxid = arp_muxid;
if (ioctl (ip_fd, SIOCSLIFMUXID, &ifr) < 0)
{
ioctl (ip_fd, I_PUNLINK , arp_muxid);
ioctl (ip_fd, I_PUNLINK, ip_muxid);
syslog (LOG_ERR, "Can't set multiplexor id");
}
snprintf(dev, dev_size, "tap%d", ppa);
return tap_fd;
}
| true | qemu | 576c6eb6700d241c9d4a6883d25720c7bbaaeccd |
8,703 | static int ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
IVITile *tile, int32_t mv_scale)
{
int x, y, need_mc, mbn, blk, num_blocks, mv_x, mv_y, mc_type;
int offs, mb_offset, row_offset;
IVIMbInfo *mb, *ref_mb;
const int16_t *src;
int16_t *dst;
void (*mc_no_delta_func)(int16_t *buf, const int16_t *ref_buf, uint32_t pitch,
int mc_type);
if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) {
av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches "
"parameters %d in ivi_process_empty_tile()\n",
tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size));
return AVERROR_INVALIDDATA;
}
offs = tile->ypos * band->pitch + tile->xpos;
mb = tile->mbs;
ref_mb = tile->ref_mbs;
row_offset = band->mb_size * band->pitch;
need_mc = 0; /* reset the mc tracking flag */
for (y = tile->ypos; y < (tile->ypos + tile->height); y += band->mb_size) {
mb_offset = offs;
for (x = tile->xpos; x < (tile->xpos + tile->width); x += band->mb_size) {
mb->xpos = x;
mb->ypos = y;
mb->buf_offs = mb_offset;
mb->type = 1; /* set the macroblocks type = INTER */
mb->cbp = 0; /* all blocks are empty */
if (!band->qdelta_present && !band->plane && !band->band_num) {
mb->q_delta = band->glob_quant;
mb->mv_x = 0;
mb->mv_y = 0;
}
if (band->inherit_qdelta && ref_mb)
mb->q_delta = ref_mb->q_delta;
if (band->inherit_mv) {
/* motion vector inheritance */
if (mv_scale) {
mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale);
mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale);
} else {
mb->mv_x = ref_mb->mv_x;
mb->mv_y = ref_mb->mv_y;
}
need_mc |= mb->mv_x || mb->mv_y; /* tracking non-zero motion vectors */
}
mb++;
if (ref_mb)
ref_mb++;
mb_offset += band->mb_size;
} // for x
offs += row_offset;
} // for y
if (band->inherit_mv && need_mc) { /* apply motion compensation if there is at least one non-zero motion vector */
num_blocks = (band->mb_size != band->blk_size) ? 4 : 1; /* number of blocks per mb */
mc_no_delta_func = (band->blk_size == 8) ? ff_ivi_mc_8x8_no_delta
: ff_ivi_mc_4x4_no_delta;
for (mbn = 0, mb = tile->mbs; mbn < tile->num_MBs; mb++, mbn++) {
mv_x = mb->mv_x;
mv_y = mb->mv_y;
if (!band->is_halfpel) {
mc_type = 0; /* we have only fullpel vectors */
} else {
mc_type = ((mv_y & 1) << 1) | (mv_x & 1);
mv_x >>= 1;
mv_y >>= 1; /* convert halfpel vectors into fullpel ones */
}
for (blk = 0; blk < num_blocks; blk++) {
/* adjust block position in the buffer according with its number */
offs = mb->buf_offs + band->blk_size * ((blk & 1) + !!(blk & 2) * band->pitch);
mc_no_delta_func(band->buf + offs,
band->ref_buf + offs + mv_y * band->pitch + mv_x,
band->pitch, mc_type);
}
}
} else {
/* copy data from the reference tile into the current one */
src = band->ref_buf + tile->ypos * band->pitch + tile->xpos;
dst = band->buf + tile->ypos * band->pitch + tile->xpos;
for (y = 0; y < tile->height; y++) {
memcpy(dst, src, tile->width*sizeof(band->buf[0]));
src += band->pitch;
dst += band->pitch;
}
}
return 0;
}
| false | FFmpeg | b36e1893ef3430f039c1eaddeedcbb378f9c4444 |
8,704 | static void set_palette(AVFrame * frame, const uint8_t * palette_buffer)
{
uint32_t * palette = (uint32_t *)frame->data[1];
int a;
for(a = 0; a < 256; a++){
palette[a] = AV_RB24(&palette_buffer[a * 3]) * 4;
}
frame->palette_has_changed = 1;
}
| false | FFmpeg | 3eedd29bd7df6f21a79e1a67a6d905049996d2ec |
8,705 | static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, unsigned src_size)
{
const uint8_t *s = src;
const uint8_t *end;
#ifdef HAVE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
#ifdef HAVE_MMX
mm_end = end - 15;
#if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster)
asm volatile(
"movq %3, %%mm5 \n\t"
"movq %4, %%mm6 \n\t"
"movq %5, %%mm7 \n\t"
".balign 16 \n\t"
"1: \n\t"
PREFETCH" 32(%1) \n\t"
"movd (%1), %%mm0 \n\t"
"movd 4(%1), %%mm3 \n\t"
"punpckldq 8(%1), %%mm0 \n\t"
"punpckldq 12(%1), %%mm3 \n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm3, %%mm4 \n\t"
"pand %%mm6, %%mm0 \n\t"
"pand %%mm6, %%mm3 \n\t"
"pmaddwd %%mm7, %%mm0 \n\t"
"pmaddwd %%mm7, %%mm3 \n\t"
"pand %%mm5, %%mm1 \n\t"
"pand %%mm5, %%mm4 \n\t"
"por %%mm1, %%mm0 \n\t"
"por %%mm4, %%mm3 \n\t"
"psrld $6, %%mm0 \n\t"
"pslld $10, %%mm3 \n\t"
"por %%mm3, %%mm0 \n\t"
MOVNTQ" %%mm0, (%0) \n\t"
"addl $16, %1 \n\t"
"addl $8, %0 \n\t"
"cmpl %2, %1 \n\t"
" jb 1b \n\t"
: "+r" (d), "+r"(s)
: "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
);
#else
__asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm __volatile(
"movq %0, %%mm7\n\t"
"movq %1, %%mm6\n\t"
::"m"(red_15mask),"m"(green_15mask));
while(s < mm_end)
{
__asm __volatile(
PREFETCH" 32%1\n\t"
"movd %1, %%mm0\n\t"
"movd 4%1, %%mm3\n\t"
"punpckldq 8%1, %%mm0\n\t"
"punpckldq 12%1, %%mm3\n\t"
"movq %%mm0, %%mm1\n\t"
"movq %%mm0, %%mm2\n\t"
"movq %%mm3, %%mm4\n\t"
"movq %%mm3, %%mm5\n\t"
"psrlq $3, %%mm0\n\t"
"psrlq $3, %%mm3\n\t"
"pand %2, %%mm0\n\t"
"pand %2, %%mm3\n\t"
"psrlq $6, %%mm1\n\t"
"psrlq $6, %%mm4\n\t"
"pand %%mm6, %%mm1\n\t"
"pand %%mm6, %%mm4\n\t"
"psrlq $9, %%mm2\n\t"
"psrlq $9, %%mm5\n\t"
"pand %%mm7, %%mm2\n\t"
"pand %%mm7, %%mm5\n\t"
"por %%mm1, %%mm0\n\t"
"por %%mm4, %%mm3\n\t"
"por %%mm2, %%mm0\n\t"
"por %%mm5, %%mm3\n\t"
"psllq $16, %%mm3\n\t"
"por %%mm3, %%mm0\n\t"
MOVNTQ" %%mm0, %0\n\t"
:"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
d += 4;
s += 16;
}
#endif
__asm __volatile(SFENCE:::"memory");
__asm __volatile(EMMS:::"memory");
#endif
while(s < end)
{
const int src= *((uint32_t*)s)++;
*d++ = ((src&0xFF)>>3) + ((src&0xF800)>>6) + ((src&0xF80000)>>9);
}
}
| false | FFmpeg | ae4cffd9fc5bc495692920d646d7d1462315cfa6 |
8,706 | void helper_do_semihosting(CPUMIPSState *env)
{
target_ulong *gpr = env->active_tc.gpr;
const UHIOp op = gpr[25];
char *p, *p2;
switch (op) {
case UHI_exit:
qemu_log("UHI(%d): exit(%d)\n", op, (int)gpr[4]);
exit(gpr[4]);
case UHI_open:
GET_TARGET_STRING(p, gpr[4]);
if (!strcmp("/dev/stdin", p)) {
gpr[2] = 0;
} else if (!strcmp("/dev/stdout", p)) {
gpr[2] = 1;
} else if (!strcmp("/dev/stderr", p)) {
gpr[2] = 2;
} else {
gpr[2] = open(p, get_open_flags(gpr[5]), gpr[6]);
gpr[3] = errno_mips(errno);
}
FREE_TARGET_STRING(p, gpr[4]);
break;
case UHI_close:
if (gpr[4] < 3) {
/* ignore closing stdin/stdout/stderr */
gpr[2] = 0;
goto uhi_done;
}
gpr[2] = close(gpr[4]);
gpr[3] = errno_mips(errno);
break;
case UHI_read:
gpr[2] = read_from_file(env, gpr[4], gpr[5], gpr[6], 0);
gpr[3] = errno_mips(errno);
break;
case UHI_write:
gpr[2] = write_to_file(env, gpr[4], gpr[5], gpr[6], 0);
gpr[3] = errno_mips(errno);
break;
case UHI_lseek:
gpr[2] = lseek(gpr[4], gpr[5], gpr[6]);
gpr[3] = errno_mips(errno);
break;
case UHI_unlink:
GET_TARGET_STRING(p, gpr[4]);
gpr[2] = remove(p);
gpr[3] = errno_mips(errno);
FREE_TARGET_STRING(p, gpr[4]);
break;
case UHI_fstat:
{
struct stat sbuf;
memset(&sbuf, 0, sizeof(sbuf));
gpr[2] = fstat(gpr[4], &sbuf);
gpr[3] = errno_mips(errno);
if (gpr[2]) {
goto uhi_done;
}
gpr[2] = copy_stat_to_target(env, &sbuf, gpr[5]);
gpr[3] = errno_mips(errno);
}
break;
case UHI_argc:
gpr[2] = semihosting_get_argc();
break;
case UHI_argnlen:
if (gpr[4] >= semihosting_get_argc()) {
gpr[2] = -1;
goto uhi_done;
}
gpr[2] = strlen(semihosting_get_arg(gpr[4]));
break;
case UHI_argn:
if (gpr[4] >= semihosting_get_argc()) {
gpr[2] = -1;
goto uhi_done;
}
gpr[2] = copy_argn_to_target(env, gpr[4], gpr[5]);
break;
case UHI_plog:
GET_TARGET_STRING(p, gpr[4]);
p2 = strstr(p, "%d");
if (p2) {
int char_num = p2 - p;
char *buf = g_malloc(char_num + 1);
strncpy(buf, p, char_num);
buf[char_num] = '\0';
gpr[2] = printf("%s%d%s", buf, (int)gpr[5], p2 + 2);
g_free(buf);
} else {
gpr[2] = printf("%s", p);
}
FREE_TARGET_STRING(p, gpr[4]);
break;
case UHI_assert:
GET_TARGET_STRING(p, gpr[4]);
GET_TARGET_STRING(p2, gpr[5]);
printf("assertion '");
printf("\"%s\"", p);
printf("': file \"%s\", line %d\n", p2, (int)gpr[6]);
FREE_TARGET_STRING(p2, gpr[5]);
FREE_TARGET_STRING(p, gpr[4]);
abort();
break;
case UHI_pread:
gpr[2] = read_from_file(env, gpr[4], gpr[5], gpr[6], gpr[7]);
gpr[3] = errno_mips(errno);
break;
case UHI_pwrite:
gpr[2] = write_to_file(env, gpr[4], gpr[5], gpr[6], gpr[7]);
gpr[3] = errno_mips(errno);
break;
#ifndef _WIN32
case UHI_link:
GET_TARGET_STRING(p, gpr[4]);
GET_TARGET_STRING(p2, gpr[5]);
gpr[2] = link(p, p2);
gpr[3] = errno_mips(errno);
FREE_TARGET_STRING(p2, gpr[5]);
FREE_TARGET_STRING(p, gpr[4]);
break;
#endif
default:
fprintf(stderr, "Unknown UHI operation %d\n", op);
abort();
}
uhi_done:
return;
}
| true | qemu | 26e7e982b267e71d40cd20e9e234fedef6770a90 |
8,708 | static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band,
IVITile *tile, AVCodecContext *avctx)
{
int x, y, mv_x, mv_y, mv_delta, offs, mb_offset,
mv_scale, blks_per_mb;
IVIMbInfo *mb, *ref_mb;
int row_offset = band->mb_size * band->pitch;
mb = tile->mbs;
ref_mb = tile->ref_mbs;
offs = tile->ypos * band->pitch + tile->xpos;
/* scale factor for motion vectors */
mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3);
mv_x = mv_y = 0;
for (y = tile->ypos; y < (tile->ypos + tile->height); y += band->mb_size) {
mb_offset = offs;
for (x = tile->xpos; x < (tile->xpos + tile->width); x += band->mb_size) {
mb->xpos = x;
mb->ypos = y;
mb->buf_offs = mb_offset;
if (get_bits1(&ctx->gb)) {
if (ctx->frame_type == FRAMETYPE_INTRA) {
av_log(avctx, AV_LOG_ERROR, "Empty macroblock in an INTRA picture!\n");
return -1;
}
mb->type = 1; /* empty macroblocks are always INTER */
mb->cbp = 0; /* all blocks are empty */
mb->q_delta = 0;
if (!band->plane && !band->band_num && (ctx->frame_flags & 8)) {
mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table,
IVI_VLC_BITS, 1);
mb->q_delta = IVI_TOSIGNED(mb->q_delta);
}
mb->mv_x = mb->mv_y = 0; /* no motion vector coded */
if (band->inherit_mv){
/* motion vector inheritance */
if (mv_scale) {
mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale);
mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale);
} else {
mb->mv_x = ref_mb->mv_x;
mb->mv_y = ref_mb->mv_y;
}
}
} else {
if (band->inherit_mv) {
mb->type = ref_mb->type; /* copy mb_type from corresponding reference mb */
} else if (ctx->frame_type == FRAMETYPE_INTRA) {
mb->type = 0; /* mb_type is always INTRA for intra-frames */
} else {
mb->type = get_bits1(&ctx->gb);
}
blks_per_mb = band->mb_size != band->blk_size ? 4 : 1;
mb->cbp = get_bits(&ctx->gb, blks_per_mb);
mb->q_delta = 0;
if (band->qdelta_present) {
if (band->inherit_qdelta) {
if (ref_mb) mb->q_delta = ref_mb->q_delta;
} else if (mb->cbp || (!band->plane && !band->band_num &&
(ctx->frame_flags & 8))) {
mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table,
IVI_VLC_BITS, 1);
mb->q_delta = IVI_TOSIGNED(mb->q_delta);
}
}
if (!mb->type) {
mb->mv_x = mb->mv_y = 0; /* there is no motion vector in intra-macroblocks */
} else {
if (band->inherit_mv){
/* motion vector inheritance */
if (mv_scale) {
mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale);
mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale);
} else {
mb->mv_x = ref_mb->mv_x;
mb->mv_y = ref_mb->mv_y;
}
} else {
/* decode motion vector deltas */
mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table,
IVI_VLC_BITS, 1);
mv_y += IVI_TOSIGNED(mv_delta);
mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table,
IVI_VLC_BITS, 1);
mv_x += IVI_TOSIGNED(mv_delta);
mb->mv_x = mv_x;
mb->mv_y = mv_y;
}
}
}
mb++;
if (ref_mb)
ref_mb++;
mb_offset += band->mb_size;
}
offs += row_offset;
}
align_get_bits(&ctx->gb);
return 0;
}
| true | FFmpeg | f41a6c8f3aeb51332bb359038cb504d3fb562a52 |
8,709 | static int init_input_stream(int ist_index, char *error, int error_len)
{
int i;
InputStream *ist = input_streams[ist_index];
if (ist->decoding_needed) {
AVCodec *codec = ist->dec;
if (!codec) {
snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
ist->st->codec->codec_id, ist->file_index, ist->st->index);
return AVERROR(EINVAL);
}
/* update requested sample format for the decoder based on the
corresponding encoder sample format */
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
if (ost->source_index == ist_index) {
update_sample_fmt(ist->st->codec, codec, ost->st->codec);
break;
}
}
if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
ist->st->codec->get_buffer = codec_get_buffer;
ist->st->codec->release_buffer = codec_release_buffer;
ist->st->codec->opaque = ist;
}
if (!av_dict_get(ist->opts, "threads", NULL, 0))
av_dict_set(&ist->opts, "threads", "auto", 0);
if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
ist->file_index, ist->st->index);
return AVERROR(EINVAL);
}
assert_codec_experimental(ist->st->codec, 0);
assert_avoptions(ist->opts);
if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
if (ost->source_index == ist_index) {
if (!ist->st->codec->channel_layout || !ost->st->codec->channel_layout)
get_default_channel_layouts(ost, ist);
break;
}
}
}
}
ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
ist->next_dts = AV_NOPTS_VALUE;
init_pts_correction(&ist->pts_ctx);
ist->is_start = 1;
return 0;
}
| true | FFmpeg | 369cb092ecbbaff20bb0a2a1d60536c3bc04a8f0 |
8,710 | static void init_proc_POWER9(CPUPPCState *env)
{
/* Common Registers */
init_proc_book3s_common(env);
gen_spr_book3s_207_dbg(env);
/* POWER8 Specific Registers */
gen_spr_book3s_ids(env);
gen_spr_amr(env);
gen_spr_iamr(env);
gen_spr_book3s_purr(env);
gen_spr_power5p_common(env);
gen_spr_power5p_lpar(env);
gen_spr_power5p_ear(env);
gen_spr_power6_common(env);
gen_spr_power6_dbg(env);
gen_spr_power8_tce_address_control(env);
gen_spr_power8_ids(env);
gen_spr_power8_ebb(env);
gen_spr_power8_fscr(env);
gen_spr_power8_pmu_sup(env);
gen_spr_power8_pmu_user(env);
gen_spr_power8_tm(env);
gen_spr_power8_pspb(env);
gen_spr_vtb(env);
gen_spr_power8_ic(env);
gen_spr_power8_book4(env);
gen_spr_power8_rpr(env);
/* POWER9 Specific registers */
spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL,
KVM_REG_PPC_TIDR, 0);
/* env variables */
#if !defined(CONFIG_USER_ONLY)
env->slb_nr = 32;
#endif
env->ci_large_pages = true;
env->dcache_line_size = 128;
env->icache_line_size = 128;
/* Allocate hardware IRQ controller */
init_excp_POWER8(env);
ppcPOWER7_irq_init(ppc_env_get_cpu(env));
} | true | qemu | b8af5b2d5f67b0e1b274f8532f42a47bfe46ea3b |
8,711 | static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
const uint8_t *buf, int buf_size)
{
H264Context *h = avctx->priv_data;
MpegEncContext *s = &h->s;
AVFrame *pict = data;
int buf_index;
s->flags= avctx->flags;
s->flags2= avctx->flags2;
/* end of stream, output what is still in the buffers */
if (buf_size == 0) {
Picture *out;
int i, out_idx;
//FIXME factorize this with the output code below
out = h->delayed_pic[0];
out_idx = 0;
for(i=1; h->delayed_pic[i] && h->delayed_pic[i]->poc; i++)
if(h->delayed_pic[i]->poc < out->poc){
out = h->delayed_pic[i];
out_idx = i;
}
for(i=out_idx; h->delayed_pic[i]; i++)
h->delayed_pic[i] = h->delayed_pic[i+1];
if(out){
*data_size = sizeof(AVFrame);
*pict= *(AVFrame*)out;
}
return 0;
}
if(h->is_avc && !h->got_avcC) {
int i, cnt, nalsize;
unsigned char *p = avctx->extradata;
if(avctx->extradata_size < 7) {
av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
return -1;
}
if(*p != 1) {
av_log(avctx, AV_LOG_ERROR, "Unknown avcC version %d\n", *p);
return -1;
}
/* sps and pps in the avcC always have length coded with 2 bytes,
so put a fake nal_length_size = 2 while parsing them */
h->nal_length_size = 2;
// Decode sps from avcC
cnt = *(p+5) & 0x1f; // Number of sps
p += 6;
for (i = 0; i < cnt; i++) {
nalsize = AV_RB16(p) + 2;
if(decode_nal_units(h, p, nalsize) < 0) {
av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i);
return -1;
}
p += nalsize;
}
// Decode pps from avcC
cnt = *(p++); // Number of pps
for (i = 0; i < cnt; i++) {
nalsize = AV_RB16(p) + 2;
if(decode_nal_units(h, p, nalsize) != nalsize) {
av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i);
return -1;
}
p += nalsize;
}
// Now store right nal length size, that will be use to parse all other nals
h->nal_length_size = ((*(((char*)(avctx->extradata))+4))&0x03)+1;
// Do not reparse avcC
h->got_avcC = 1;
}
if(avctx->frame_number==0 && !h->is_avc && s->avctx->extradata_size){
if(decode_nal_units(h, s->avctx->extradata, s->avctx->extradata_size) < 0)
return -1;
}
buf_index=decode_nal_units(h, buf, buf_size);
if(buf_index < 0)
return -1;
if(!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr){
if (avctx->skip_frame >= AVDISCARD_NONREF || s->hurry_up) return 0;
av_log(avctx, AV_LOG_ERROR, "no frame!\n");
return -1;
}
if(!(s->flags2 & CODEC_FLAG2_CHUNKS) || (s->mb_y >= s->mb_height && s->mb_height)){
Picture *out = s->current_picture_ptr;
Picture *cur = s->current_picture_ptr;
int i, pics, cross_idr, out_of_order, out_idx;
s->mb_y= 0;
s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264;
s->current_picture_ptr->pict_type= s->pict_type;
if(!s->dropable) {
execute_ref_pic_marking(h, h->mmco, h->mmco_index);
h->prev_poc_msb= h->poc_msb;
h->prev_poc_lsb= h->poc_lsb;
}
h->prev_frame_num_offset= h->frame_num_offset;
h->prev_frame_num= h->frame_num;
/*
* FIXME: Error handling code does not seem to support interlaced
* when slices span multiple rows
* The ff_er_add_slice calls don't work right for bottom
* fields; they cause massive erroneous error concealing
* Error marking covers both fields (top and bottom).
* This causes a mismatched s->error_count
* and a bad error table. Further, the error count goes to
* INT_MAX when called for bottom field, because mb_y is
* past end by one (callers fault) and resync_mb_y != 0
* causes problems for the first MB line, too.
*/
if (!FIELD_PICTURE)
ff_er_frame_end(s);
MPV_frame_end(s);
if (s->first_field) {
/* Wait for second field. */
*data_size = 0;
} else {
cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE;
/* Derive top_field_first from field pocs. */
cur->top_field_first = cur->field_poc[0] < cur->field_poc[1];
//FIXME do something with unavailable reference frames
/* Sort B-frames into display order */
if(h->sps.bitstream_restriction_flag
&& s->avctx->has_b_frames < h->sps.num_reorder_frames){
s->avctx->has_b_frames = h->sps.num_reorder_frames;
s->low_delay = 0;
}
if( s->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT
&& !h->sps.bitstream_restriction_flag){
s->avctx->has_b_frames= MAX_DELAYED_PIC_COUNT;
s->low_delay= 0;
}
pics = 0;
while(h->delayed_pic[pics]) pics++;
assert(pics <= MAX_DELAYED_PIC_COUNT);
h->delayed_pic[pics++] = cur;
if(cur->reference == 0)
cur->reference = DELAYED_PIC_REF;
out = h->delayed_pic[0];
out_idx = 0;
for(i=1; h->delayed_pic[i] && h->delayed_pic[i]->poc; i++)
if(h->delayed_pic[i]->poc < out->poc){
out = h->delayed_pic[i];
out_idx = i;
}
cross_idr = !h->delayed_pic[0]->poc || !!h->delayed_pic[i];
out_of_order = !cross_idr && out->poc < h->outputed_poc;
if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames >= h->sps.num_reorder_frames)
{ }
else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT)
|| (s->low_delay &&
((!cross_idr && out->poc > h->outputed_poc + 2)
|| cur->pict_type == FF_B_TYPE)))
{
s->low_delay = 0;
s->avctx->has_b_frames++;
}
if(out_of_order || pics > s->avctx->has_b_frames){
out->reference &= ~DELAYED_PIC_REF;
for(i=out_idx; h->delayed_pic[i]; i++)
h->delayed_pic[i] = h->delayed_pic[i+1];
}
if(!out_of_order && pics > s->avctx->has_b_frames){
*data_size = sizeof(AVFrame);
h->outputed_poc = out->poc;
*pict= *(AVFrame*)out;
}else{
av_log(avctx, AV_LOG_DEBUG, "no picture\n");
}
}
}
assert(pict->data[0] || !*data_size);
ff_print_debug_info(s, pict);
//printf("out %d\n", (int)pict->data[0]);
#if 0 //?
/* Return the Picture timestamp as the frame number */
/* we subtract 1 because it is added on utils.c */
avctx->frame_number = s->picture_number - 1;
#endif
return get_consumed_bytes(s, buf_index, buf_size);
}
| true | FFmpeg | 357282c6f3c990833d0508c234ac4522d536c4ac |
8,713 | target_ulong spapr_rtas_call(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t token, uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
if ((token >= TOKEN_BASE)
&& ((token - TOKEN_BASE) < TOKEN_MAX)) {
struct rtas_call *call = rtas_table + (token - TOKEN_BASE);
if (call->fn) {
call->fn(cpu, spapr, token, nargs, args, nret, rets);
return H_SUCCESS;
}
}
/* HACK: Some Linux early debug code uses RTAS display-character,
* but assumes the token value is 0xa (which it is on some real
* machines) without looking it up in the device tree. This
* special case makes this work */
if (token == 0xa) {
rtas_display_character(cpu, spapr, 0xa, nargs, args, nret, rets);
return H_SUCCESS;
}
hcall_dprintf("Unknown RTAS token 0x%x\n", token);
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return H_PARAMETER;
}
| true | qemu | 3a3b8502e6f0c8d30865c5f36d2c3ae4114000b5 |
8,714 | void OPPROTO op_fdivr_ST0_FT0(void)
{
ST0 = FT0 / ST0;
}
| true | qemu | 2ee73ac3a855fb0cfba3db91fdd1ecebdbc6f971 |
8,715 | static void spectral_to_sample(AACContext *ac, int samples)
{
int i, type;
void (*imdct_and_window)(AACContext *ac, SingleChannelElement *sce);
switch (ac->oc[1].m4ac.object_type) {
case AOT_ER_AAC_LD:
imdct_and_window = imdct_and_windowing_ld;
break;
case AOT_ER_AAC_ELD:
imdct_and_window = imdct_and_windowing_eld;
break;
default:
imdct_and_window = ac->imdct_and_windowing;
}
for (type = 3; type >= 0; type--) {
for (i = 0; i < MAX_ELEM_ID; i++) {
ChannelElement *che = ac->che[type][i];
if (che && che->present) {
if (type <= TYPE_CPE)
apply_channel_coupling(ac, che, type, i, BEFORE_TNS, AAC_RENAME(apply_dependent_coupling));
if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
if (che->ch[0].ics.predictor_present) {
if (che->ch[0].ics.ltp.present)
ac->apply_ltp(ac, &che->ch[0]);
if (che->ch[1].ics.ltp.present && type == TYPE_CPE)
ac->apply_ltp(ac, &che->ch[1]);
}
}
if (che->ch[0].tns.present)
ac->apply_tns(che->ch[0].coeffs, &che->ch[0].tns, &che->ch[0].ics, 1);
if (che->ch[1].tns.present)
ac->apply_tns(che->ch[1].coeffs, &che->ch[1].tns, &che->ch[1].ics, 1);
if (type <= TYPE_CPE)
apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, AAC_RENAME(apply_dependent_coupling));
if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) {
imdct_and_window(ac, &che->ch[0]);
if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
ac->update_ltp(ac, &che->ch[0]);
if (type == TYPE_CPE) {
imdct_and_window(ac, &che->ch[1]);
if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
ac->update_ltp(ac, &che->ch[1]);
}
if (ac->oc[1].m4ac.sbr > 0) {
AAC_RENAME(ff_sbr_apply)(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret);
}
}
if (type <= TYPE_CCE)
apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, AAC_RENAME(apply_independent_coupling));
#if USE_FIXED
{
int j;
/* preparation for resampler */
for(j = 0; j<samples; j++){
che->ch[0].ret[j] = (int32_t)av_clip64((int64_t)che->ch[0].ret[j]<<7, INT32_MIN, INT32_MAX-0x8000)+0x8000;
if(type == TYPE_CPE)
che->ch[1].ret[j] = (int32_t)av_clip64((int64_t)che->ch[1].ret[j]<<7, INT32_MIN, INT32_MAX-0x8000)+0x8000;
}
}
#endif /* USE_FIXED */
che->present = 0;
} else if (che) {
av_log(ac->avctx, AV_LOG_VERBOSE, "ChannelElement %d.%d missing \n", type, i);
}
}
}
}
| true | FFmpeg | 2ccd2c9003c77aee8ffb5f4f43863e35bdf0e4b6 |
8,716 | static void serial_ioport_write(void *opaque, uint32_t addr, uint32_t val)
{
SerialState *s = opaque;
unsigned char ch;
addr &= 7;
#ifdef DEBUG_SERIAL
printf("serial: write addr=0x%02x val=0x%02x\n", addr, val);
#endif
switch(addr) {
default:
case 0:
if (s->lcr & UART_LCR_DLAB) {
s->divider = (s->divider & 0xff00) | val;
serial_update_parameters(s);
} else {
s->thr_ipending = 0;
s->lsr &= ~UART_LSR_THRE;
serial_update_irq(s);
ch = val;
if (!(s->mcr & UART_MCR_LOOP)) {
/* when not in loopback mode, send the char */
qemu_chr_write(s->chr, &ch, 1);
} else {
/* in loopback mode, say that we just received a char */
serial_receive_byte(s, ch);
}
if (s->tx_burst > 0) {
s->tx_burst--;
serial_tx_done(s);
} else if (s->tx_burst == 0) {
s->tx_burst--;
qemu_mod_timer(s->tx_timer, qemu_get_clock(vm_clock) +
ticks_per_sec * THROTTLE_TX_INTERVAL / 1000);
}
}
break;
case 1:
if (s->lcr & UART_LCR_DLAB) {
s->divider = (s->divider & 0x00ff) | (val << 8);
serial_update_parameters(s);
} else {
s->ier = val & 0x0f;
if (s->lsr & UART_LSR_THRE) {
s->thr_ipending = 1;
}
serial_update_irq(s);
}
break;
case 2:
break;
case 3:
{
int break_enable;
s->lcr = val;
serial_update_parameters(s);
break_enable = (val >> 6) & 1;
if (break_enable != s->last_break_enable) {
s->last_break_enable = break_enable;
qemu_chr_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_BREAK,
&break_enable);
}
}
break;
case 4:
s->mcr = val & 0x1f;
break;
case 5:
break;
case 6:
break;
case 7:
s->scr = val;
break;
}
}
| true | qemu | 81174dae3f9189519cd60c7b79e91c291b021bbe |
8,717 | int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
{
int ret;
if ((ret = graph_check_validity(graphctx, log_ctx)))
if ((ret = graph_insert_fifos(graphctx, log_ctx)) < 0)
if ((ret = graph_config_formats(graphctx, log_ctx)))
if ((ret = graph_config_links(graphctx, log_ctx)))
if ((ret = graph_config_pointers(graphctx, log_ctx)))
return 0;
} | true | FFmpeg | 345e7072ab867ee1e56cbf857dbc93d37f168294 |
8,718 | static void process_param(float *bc, EqParameter *param, float fs)
{
int i;
for (i = 0; i <= NBANDS; i++) {
param[i].lower = i == 0 ? 0 : bands[i - 1];
param[i].upper = i == NBANDS - 1 ? fs : bands[i];
param[i].gain = bc[i];
}
}
| false | FFmpeg | 2820c9dfaa1f4093fea471665fdbef9ca7080bcd |
8,721 | static int apng_read_packet(AVFormatContext *s, AVPacket *pkt)
{
APNGDemuxContext *ctx = s->priv_data;
int64_t ret;
int64_t size;
AVIOContext *pb = s->pb;
uint32_t len, tag;
/*
* fcTL chunk length, in bytes:
* 4 (length)
* 4 (tag)
* 26 (actual chunk)
* 4 (crc) bytes
* and needed next:
* 4 (length)
* 4 (tag (must be fdAT or IDAT))
*/
/* if num_play is not 1, then the seekback is already guaranteed */
if (ctx->num_play == 1 && (ret = ffio_ensure_seekback(pb, 46)) < 0)
return ret;
len = avio_rb32(pb);
tag = avio_rl32(pb);
switch (tag) {
case MKTAG('f', 'c', 'T', 'L'):
if (len != 26)
return AVERROR_INVALIDDATA;
if ((ret = decode_fctl_chunk(s, ctx, pkt)) < 0)
return ret;
/* fcTL must precede fdAT or IDAT */
len = avio_rb32(pb);
tag = avio_rl32(pb);
if (len > 0x7fffffff ||
tag != MKTAG('f', 'd', 'A', 'T') &&
tag != MKTAG('I', 'D', 'A', 'T'))
return AVERROR_INVALIDDATA;
size = 38 /* fcTL */ + 8 /* len, tag */ + len + 4 /* crc */;
if (size > INT_MAX)
return AVERROR(EINVAL);
if ((ret = avio_seek(pb, -46, SEEK_CUR)) < 0 ||
(ret = av_append_packet(pb, pkt, size)) < 0)
return ret;
if (ctx->num_play == 1 && (ret = ffio_ensure_seekback(pb, 8)) < 0)
return ret;
len = avio_rb32(pb);
tag = avio_rl32(pb);
while (tag &&
tag != MKTAG('f', 'c', 'T', 'L') &&
tag != MKTAG('I', 'E', 'N', 'D')) {
if (len > 0x7fffffff)
return AVERROR_INVALIDDATA;
if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0 ||
(ret = av_append_packet(pb, pkt, len + 12)) < 0)
return ret;
if (ctx->num_play == 1 && (ret = ffio_ensure_seekback(pb, 8)) < 0)
return ret;
len = avio_rb32(pb);
tag = avio_rl32(pb);
}
if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0)
return ret;
if (ctx->is_key_frame)
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->pts = ctx->pkt_pts;
pkt->duration = ctx->pkt_duration;
ctx->pkt_pts += ctx->pkt_duration;
return send_extradata(ctx, pkt);
case MKTAG('I', 'E', 'N', 'D'):
ctx->cur_loop++;
if (ctx->ignore_loop || ctx->num_play >= 1 && ctx->cur_loop == ctx->num_play) {
avio_seek(pb, -8, SEEK_CUR);
return AVERROR_EOF;
}
if ((ret = avio_seek(pb, ctx->extra_data_size + 8, SEEK_SET)) < 0)
return ret;
return send_extradata(ctx, pkt);
default:
{
char tag_buf[32];
av_get_codec_tag_string(tag_buf, sizeof(tag_buf), tag);
avpriv_request_sample(s, "In-stream tag=%s (0x%08X) len=%"PRIu32, tag_buf, tag, len);
avio_skip(pb, len + 4);
}
}
/* Handle the unsupported yet cases */
return AVERROR_PATCHWELCOME;
}
| true | FFmpeg | 16c429166ddf1736972b6ccce84bd3509ec16a34 |
8,722 | static int set_expr(AVExpr **pexpr, const char *expr, void *log_ctx)
{
int ret;
if (*pexpr)
av_expr_free(*pexpr);
*pexpr = NULL;
ret = av_expr_parse(pexpr, expr, var_names,
NULL, NULL, NULL, NULL, 0, log_ctx);
if (ret < 0)
av_log(log_ctx, AV_LOG_ERROR,
"Error when evaluating the expression '%s'\n", expr);
return ret;
}
| true | FFmpeg | aff6cebb41669a25008f76ce3c310001613e6263 |
8,723 | int MPV_encode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
int i, dummy;
int chroma_h_shift, chroma_v_shift;
avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
s->bit_rate = avctx->bit_rate;
s->width = avctx->width;
s->height = avctx->height;
if(avctx->gop_size > 600){
av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n");
avctx->gop_size=600;
}
s->gop_size = avctx->gop_size;
s->avctx = avctx;
s->flags= avctx->flags;
s->flags2= avctx->flags2;
s->max_b_frames= avctx->max_b_frames;
s->codec_id= avctx->codec->id;
s->luma_elim_threshold = avctx->luma_elim_threshold;
s->chroma_elim_threshold= avctx->chroma_elim_threshold;
s->strict_std_compliance= avctx->strict_std_compliance;
s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
s->mpeg_quant= avctx->mpeg_quant;
s->rtp_mode= !!avctx->rtp_payload_size;
if (s->gop_size <= 1) {
s->intra_only = 1;
s->gop_size = 12;
} else {
s->intra_only = 0;
}
s->me_method = avctx->me_method;
/* Fixed QSCALE */
s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
s->adaptive_quant= ( s->avctx->lumi_masking
|| s->avctx->dark_masking
|| s->avctx->temporal_cplx_masking
|| s->avctx->spatial_cplx_masking
|| s->avctx->p_masking
|| (s->flags&CODEC_FLAG_QP_RD))
&& !s->fixed_qscale;
s->obmc= !!(s->flags & CODEC_FLAG_OBMC);
s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER);
s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN);
if(avctx->rc_max_rate && !avctx->rc_buffer_size){
av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
return -1;
}
if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){
av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n");
}
if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4
&& s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
return -1;
}
if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){
av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decission\n");
return -1;
}
if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
return -1;
}
if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
return -1;
}
if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n");
return -1;
}
if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
return -1;
}
if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supporetd by codec\n");
return -1;
}
if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){
av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
return -1;
}
if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){
av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
return -1;
}
if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){
av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection arent supported yet\n");
return -1;
}
i= ff_gcd(avctx->frame_rate, avctx->frame_rate_base);
if(i > 1){
av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
avctx->frame_rate /= i;
avctx->frame_rate_base /= i;
// return -1;
}
if(s->codec_id==CODEC_ID_MJPEG){
s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
s->inter_quant_bias= 0;
}else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
s->inter_quant_bias= 0;
}else{
s->intra_quant_bias=0;
s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
}
if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
s->intra_quant_bias= avctx->intra_quant_bias;
if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
s->inter_quant_bias= avctx->inter_quant_bias;
avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1);
s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1;
switch(avctx->codec->id) {
case CODEC_ID_MPEG1VIDEO:
s->out_format = FMT_MPEG1;
s->low_delay= 0; //s->max_b_frames ? 0 : 1;
avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
break;
case CODEC_ID_MPEG2VIDEO:
s->out_format = FMT_MPEG1;
s->low_delay= 0; //s->max_b_frames ? 0 : 1;
avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
s->rtp_mode= 1;
break;
case CODEC_ID_LJPEG:
case CODEC_ID_MJPEG:
s->out_format = FMT_MJPEG;
s->intra_only = 1; /* force intra only for jpeg */
s->mjpeg_write_tables = 1; /* write all tables */
s->mjpeg_data_only_frames = 0; /* write all the needed headers */
s->mjpeg_vsample[0] = 1<<chroma_v_shift;
s->mjpeg_vsample[1] = 1;
s->mjpeg_vsample[2] = 1;
s->mjpeg_hsample[0] = 1<<chroma_h_shift;
s->mjpeg_hsample[1] = 1;
s->mjpeg_hsample[2] = 1;
if (mjpeg_init(s) < 0)
return -1;
avctx->delay=0;
s->low_delay=1;
break;
#ifdef CONFIG_RISKY
case CODEC_ID_H263:
if (h263_get_picture_format(s->width, s->height) == 7) {
av_log(avctx, AV_LOG_INFO, "Input picture size isn't suitable for h263 codec! try h263+\n");
return -1;
}
s->out_format = FMT_H263;
s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
avctx->delay=0;
s->low_delay=1;
break;
case CODEC_ID_H263P:
s->out_format = FMT_H263;
s->h263_plus = 1;
/* Fx */
s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
s->modified_quant= s->h263_aic;
s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0;
s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0;
s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0;
/* /Fx */
/* These are just to be sure */
avctx->delay=0;
s->low_delay=1;
break;
case CODEC_ID_FLV1:
s->out_format = FMT_H263;
s->h263_flv = 2; /* format = 1; 11-bit codes */
s->unrestricted_mv = 1;
s->rtp_mode=0; /* don't allow GOB */
avctx->delay=0;
s->low_delay=1;
break;
case CODEC_ID_RV10:
s->out_format = FMT_H263;
avctx->delay=0;
s->low_delay=1;
break;
case CODEC_ID_MPEG4:
s->out_format = FMT_H263;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->low_delay= s->max_b_frames ? 0 : 1;
avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
break;
case CODEC_ID_MSMPEG4V1:
s->out_format = FMT_H263;
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version= 1;
avctx->delay=0;
s->low_delay=1;
break;
case CODEC_ID_MSMPEG4V2:
s->out_format = FMT_H263;
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version= 2;
avctx->delay=0;
s->low_delay=1;
break;
case CODEC_ID_MSMPEG4V3:
s->out_format = FMT_H263;
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version= 3;
s->flipflop_rounding=1;
avctx->delay=0;
s->low_delay=1;
break;
case CODEC_ID_WMV1:
s->out_format = FMT_H263;
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version= 4;
s->flipflop_rounding=1;
avctx->delay=0;
s->low_delay=1;
break;
case CODEC_ID_WMV2:
s->out_format = FMT_H263;
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version= 5;
s->flipflop_rounding=1;
avctx->delay=0;
s->low_delay=1;
break;
#endif
default:
return -1;
}
{ /* set up some save defaults, some codecs might override them later */
static int done=0;
if(!done){
int i;
done=1;
default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
for(i=-16; i<16; i++){
default_fcode_tab[i + MAX_MV]= 1;
}
}
}
s->me.mv_penalty= default_mv_penalty;
s->fcode_tab= default_fcode_tab;
/* dont use mv_penalty table for crap MV as it would be confused */
//FIXME remove after fixing / removing old ME
if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
s->encoding = 1;
/* init */
if (MPV_common_init(s) < 0)
return -1;
if(s->modified_quant)
s->chroma_qscale_table= ff_h263_chroma_qscale_table;
s->progressive_frame=
s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME));
ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
ff_init_me(s);
#ifdef CONFIG_ENCODERS
#ifdef CONFIG_RISKY
if (s->out_format == FMT_H263)
h263_encode_init(s);
if(s->msmpeg4_version)
ff_msmpeg4_encode_init(s);
#endif
if (s->out_format == FMT_MPEG1)
ff_mpeg1_encode_init(s);
#endif
/* init default q matrix */
for(i=0;i<64;i++) {
int j= s->dsp.idct_permutation[i];
#ifdef CONFIG_RISKY
if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
}else if(s->out_format == FMT_H263){
s->intra_matrix[j] =
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
}else
#endif
{ /* mpeg1/2 */
s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
}
if(s->avctx->intra_matrix)
s->intra_matrix[j] = s->avctx->intra_matrix[i];
if(s->avctx->inter_matrix)
s->inter_matrix[j] = s->avctx->inter_matrix[i];
}
/* precompute matrix */
/* for mjpeg, we do include qscale in the matrix */
if (s->out_format != FMT_MJPEG) {
convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
s->intra_matrix, s->intra_quant_bias, 1, 31);
convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
s->inter_matrix, s->inter_quant_bias, 1, 31);
}
if(ff_rate_control_init(s) < 0)
return -1;
s->picture_number = 0;
s->input_picture_number = 0;
s->picture_in_gop_number = 0;
/* motion detector init */
s->f_code = 1;
s->b_code = 1;
return 0;
}
| true | FFmpeg | 747a0554ea8ad09404c1f5b80239ebd8d71b291e |
8,725 | static int32_t ahci_dma_prepare_buf(IDEDMA *dma, int is_write)
{
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
IDEState *s = &ad->port.ifs[0];
if (ahci_populate_sglist(ad, &s->sg, s->io_buffer_offset) == -1) {
DPRINTF(ad->port_no, "ahci_dma_prepare_buf failed.\n");
return -1;
}
s->io_buffer_size = s->sg.size;
DPRINTF(ad->port_no, "len=%#x\n", s->io_buffer_size);
return s->io_buffer_size;
}
| true | qemu | a718978ed58abc1ad92567a9c17525136be02a71 |
8,728 | static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
struct image_info * info)
{
struct elfhdr elf_ex;
struct elfhdr interp_elf_ex;
struct exec interp_ex;
int interpreter_fd = -1; /* avoid warning */
unsigned long load_addr, load_bias;
int load_addr_set = 0;
unsigned int interpreter_type = INTERPRETER_NONE;
unsigned char ibcs2_interpreter;
int i;
unsigned long mapped_addr;
struct elf_phdr * elf_ppnt;
struct elf_phdr *elf_phdata;
unsigned long elf_bss, k, elf_brk;
int retval;
char * elf_interpreter;
unsigned long elf_entry, interp_load_addr = 0;
int status;
unsigned long start_code, end_code, end_data;
unsigned long elf_stack;
char passed_fileno[6];
ibcs2_interpreter = 0;
status = 0;
load_addr = 0;
load_bias = 0;
elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
#ifdef BSWAP_NEEDED
bswap_ehdr(&elf_ex);
#endif
if (elf_ex.e_ident[0] != 0x7f ||
strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) {
return -ENOEXEC;
}
/* First of all, some simple consistency checks */
if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
(! elf_check_arch(elf_ex.e_machine))) {
return -ENOEXEC;
}
/* Now read in all of the header information */
elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
if (elf_phdata == NULL) {
return -ENOMEM;
}
retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
if(retval > 0) {
retval = read(bprm->fd, (char *) elf_phdata,
elf_ex.e_phentsize * elf_ex.e_phnum);
}
if (retval < 0) {
perror("load_elf_binary");
exit(-1);
free (elf_phdata);
return -errno;
}
#ifdef BSWAP_NEEDED
elf_ppnt = elf_phdata;
for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
bswap_phdr(elf_ppnt);
}
#endif
elf_ppnt = elf_phdata;
elf_bss = 0;
elf_brk = 0;
elf_stack = ~0UL;
elf_interpreter = NULL;
start_code = ~0UL;
end_code = 0;
end_data = 0;
for(i=0;i < elf_ex.e_phnum; i++) {
if (elf_ppnt->p_type == PT_INTERP) {
if ( elf_interpreter != NULL )
{
free (elf_phdata);
free(elf_interpreter);
close(bprm->fd);
return -EINVAL;
}
/* This is the program interpreter used for
* shared libraries - for now assume that this
* is an a.out format binary
*/
elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
if (elf_interpreter == NULL) {
free (elf_phdata);
close(bprm->fd);
return -ENOMEM;
}
retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
if(retval >= 0) {
retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
}
if(retval < 0) {
perror("load_elf_binary2");
exit(-1);
}
/* If the program interpreter is one of these two,
then assume an iBCS2 image. Otherwise assume
a native linux image. */
/* JRP - Need to add X86 lib dir stuff here... */
if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
ibcs2_interpreter = 1;
}
#if 0
printf("Using ELF interpreter %s\n", elf_interpreter);
#endif
if (retval >= 0) {
retval = open(path(elf_interpreter), O_RDONLY);
if(retval >= 0) {
interpreter_fd = retval;
}
else {
perror(elf_interpreter);
exit(-1);
/* retval = -errno; */
}
}
if (retval >= 0) {
retval = lseek(interpreter_fd, 0, SEEK_SET);
if(retval >= 0) {
retval = read(interpreter_fd,bprm->buf,128);
}
}
if (retval >= 0) {
interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
}
if (retval < 0) {
perror("load_elf_binary3");
exit(-1);
free (elf_phdata);
free(elf_interpreter);
close(bprm->fd);
return retval;
}
}
elf_ppnt++;
}
/* Some simple consistency checks for the interpreter */
if (elf_interpreter){
interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
/* Now figure out which format our binary is */
if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
(N_MAGIC(interp_ex) != QMAGIC)) {
interpreter_type = INTERPRETER_ELF;
}
if (interp_elf_ex.e_ident[0] != 0x7f ||
strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
interpreter_type &= ~INTERPRETER_ELF;
}
if (!interpreter_type) {
free(elf_interpreter);
free(elf_phdata);
close(bprm->fd);
return -ELIBBAD;
}
}
/* OK, we are done with that, now set up the arg stuff,
and then start this sucker up */
if (!bprm->sh_bang) {
char * passed_p;
if (interpreter_type == INTERPRETER_AOUT) {
sprintf(passed_fileno, "%d", bprm->fd);
passed_p = passed_fileno;
if (elf_interpreter) {
bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p);
bprm->argc++;
}
}
if (!bprm->p) {
if (elf_interpreter) {
free(elf_interpreter);
}
free (elf_phdata);
close(bprm->fd);
return -E2BIG;
}
}
/* OK, This is the point of no return */
info->end_data = 0;
info->end_code = 0;
info->start_mmap = (unsigned long)ELF_START_MMAP;
info->mmap = 0;
elf_entry = (unsigned long) elf_ex.e_entry;
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
info->rss = 0;
bprm->p = setup_arg_pages(bprm->p, bprm, info);
info->start_stack = bprm->p;
/* Now we do a little grungy work by mmaping the ELF image into
* the correct location in memory. At this point, we assume that
* the image should be loaded at fixed address, not at a variable
* address.
*/
for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0;
int elf_flags = 0;
unsigned long error;
if (elf_ppnt->p_type != PT_LOAD)
continue;
if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
if (elf_ex.e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
} else if (elf_ex.e_type == ET_DYN) {
/* Try and get dynamic programs out of the way of the default mmap
base, as well as whatever program they might try to exec. This
is because the brk will follow the loader, and is not movable. */
/* NOTE: for qemu, we do a big mmap to get enough space
without harcoding any address */
error = target_mmap(0, ET_DYN_MAP_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANON,
-1, 0);
if (error == -1) {
perror("mmap");
exit(-1);
}
load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
}
error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
(elf_ppnt->p_filesz +
TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
elf_prot,
(MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
bprm->fd,
(elf_ppnt->p_offset -
TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
if (error == -1) {
perror("mmap");
exit(-1);
}
#ifdef LOW_ELF_STACK
if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
#endif
if (!load_addr_set) {
load_addr_set = 1;
load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
if (elf_ex.e_type == ET_DYN) {
load_bias += error -
TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
load_addr += load_bias;
}
}
k = elf_ppnt->p_vaddr;
if (k < start_code)
start_code = k;
k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
if (k > elf_bss)
elf_bss = k;
if ((elf_ppnt->p_flags & PF_X) && end_code < k)
end_code = k;
if (end_data < k)
end_data = k;
k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
if (k > elf_brk) elf_brk = k;
}
elf_entry += load_bias;
elf_bss += load_bias;
elf_brk += load_bias;
start_code += load_bias;
end_code += load_bias;
// start_data += load_bias;
end_data += load_bias;
if (elf_interpreter) {
if (interpreter_type & 1) {
elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
}
else if (interpreter_type & 2) {
elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
&interp_load_addr);
}
close(interpreter_fd);
free(elf_interpreter);
if (elf_entry == ~0UL) {
printf("Unable to load interpreter\n");
free(elf_phdata);
exit(-1);
return 0;
}
}
free(elf_phdata);
if (loglevel)
load_symbols(&elf_ex, bprm->fd);
if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
#ifdef LOW_ELF_STACK
info->start_stack = bprm->p = elf_stack - 4;
#endif
bprm->p = (unsigned long)
create_elf_tables((char *)bprm->p,
bprm->argc,
bprm->envc,
&elf_ex,
load_addr, load_bias,
interp_load_addr,
(interpreter_type == INTERPRETER_AOUT ? 0 : 1),
info);
if (interpreter_type == INTERPRETER_AOUT)
info->arg_start += strlen(passed_fileno) + 1;
info->start_brk = info->brk = elf_brk;
info->end_code = end_code;
info->start_code = start_code;
info->end_data = end_data;
info->start_stack = bprm->p;
/* Calling set_brk effectively mmaps the pages that we need for the bss and break
sections */
set_brk(elf_bss, elf_brk);
padzero(elf_bss);
#if 0
printf("(start_brk) %x\n" , info->start_brk);
printf("(end_code) %x\n" , info->end_code);
printf("(start_code) %x\n" , info->start_code);
printf("(end_data) %x\n" , info->end_data);
printf("(start_stack) %x\n" , info->start_stack);
printf("(brk) %x\n" , info->brk);
#endif
if ( info->personality == PER_SVR4 )
{
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
and some applications "depend" upon this behavior.
Since we do not have the power to recompile these, we
emulate the SVr4 behavior. Sigh. */
mapped_addr = target_mmap(0, host_page_size, PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE, -1, 0);
}
#ifdef ELF_PLAT_INIT
/*
* The ABI may specify that certain registers be set up in special
* ways (on i386 %edx is the address of a DT_FINI function, for
* example. This macro performs whatever initialization to
* the regs structure is required.
*/
ELF_PLAT_INIT(regs);
#endif
info->entry = elf_entry;
return 0;
}
| true | qemu | eba2af633fb8fa3b20ad578184d79e1f0eabcefe |
8,729 | static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, DCTELEM *block)
{
int i;
register int t1,t2,t3,t4;
DCTELEM *src, *dst;
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
src = block;
dst = block;
for(i = 0; i < 4; i++){
t1 = 17 * (src[0] + src[2]) + 4;
t2 = 17 * (src[0] - src[2]) + 4;
t3 = 22 * src[1] + 10 * src[3];
t4 = 22 * src[3] - 10 * src[1];
dst[0] = (t1 + t3) >> 3;
dst[1] = (t2 - t4) >> 3;
dst[2] = (t2 + t4) >> 3;
dst[3] = (t1 - t3) >> 3;
src += 8;
dst += 8;
}
src = block;
for(i = 0; i < 4; i++){
t1 = 17 * (src[ 0] + src[16]) + 64;
t2 = 17 * (src[ 0] - src[16]) + 64;
t3 = 22 * src[ 8] + 10 * src[24];
t4 = 22 * src[24] - 10 * src[ 8];
dest[0*linesize] = cm[dest[0*linesize] + ((t1 + t3) >> 7)];
dest[1*linesize] = cm[dest[1*linesize] + ((t2 - t4) >> 7)];
dest[2*linesize] = cm[dest[2*linesize] + ((t2 + t4) >> 7)];
dest[3*linesize] = cm[dest[3*linesize] + ((t1 - t3) >> 7)];
src ++;
dest++;
}
}
| false | FFmpeg | 32f0c658283e2451add02a6ee5c719efa877a34c |
8,730 | static void fill_buffer(AVIOContext *s)
{
int max_buffer_size = s->max_packet_size ?
s->max_packet_size : IO_BUFFER_SIZE;
uint8_t *dst = s->buf_end - s->buffer + max_buffer_size < s->buffer_size ?
s->buf_end : s->buffer;
int len = s->buffer_size - (dst - s->buffer);
/* can't fill the buffer without read_packet, just set EOF if appropriate */
if (!s->read_packet && s->buf_ptr >= s->buf_end)
s->eof_reached = 1;
/* no need to do anything if EOF already reached */
if (s->eof_reached)
return;
if (s->update_checksum && dst == s->buffer) {
if (s->buf_end > s->checksum_ptr)
s->checksum = s->update_checksum(s->checksum, s->checksum_ptr,
s->buf_end - s->checksum_ptr);
s->checksum_ptr = s->buffer;
}
/* make buffer smaller in case it ended up large after probing */
if (s->read_packet && s->orig_buffer_size && s->buffer_size > s->orig_buffer_size) {
if (dst == s->buffer && s->buf_ptr != dst) {
int ret = ffio_set_buf_size(s, s->orig_buffer_size);
if (ret < 0)
av_log(s, AV_LOG_WARNING, "Failed to decrease buffer size\n");
s->checksum_ptr = dst = s->buffer;
}
av_assert0(len >= s->orig_buffer_size);
len = s->orig_buffer_size;
}
if (s->read_packet)
len = s->read_packet(s->opaque, dst, len);
else
len = AVERROR_EOF;
if (len == AVERROR_EOF) {
/* do not modify buffer if EOF reached so that a seek back can
be done without rereading data */
s->eof_reached = 1;
} else if (len < 0) {
s->eof_reached = 1;
s->error= len;
} else {
s->pos += len;
s->buf_ptr = dst;
s->buf_end = dst + len;
s->bytes_read += len;
}
}
| false | FFmpeg | a606f27f4c610708fa96e35eed7b7537d3d8f712 |
8,731 | AVFilterFormats *avfilter_make_all_channel_layouts(void)
{
static int64_t chlayouts[] = {
AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_4POINT0,
AV_CH_LAYOUT_QUAD,
AV_CH_LAYOUT_5POINT0,
AV_CH_LAYOUT_5POINT0_BACK,
AV_CH_LAYOUT_5POINT1,
AV_CH_LAYOUT_5POINT1_BACK,
AV_CH_LAYOUT_5POINT1|AV_CH_LAYOUT_STEREO_DOWNMIX,
AV_CH_LAYOUT_7POINT1,
AV_CH_LAYOUT_7POINT1_WIDE,
AV_CH_LAYOUT_7POINT1|AV_CH_LAYOUT_STEREO_DOWNMIX,
-1,
};
return avfilter_make_format64_list(chlayouts);
}
| false | FFmpeg | ea8de109af46ae8e6751217977ae8f7becf94ba5 |
8,732 | static int matroska_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MatroskaDemuxContext *matroska = s->priv_data;
int ret = 0;
while (!ret && matroska_deliver_packet(matroska, pkt)) {
if (matroska->done)
return AVERROR_EOF;
ret = matroska_parse_cluster(matroska);
}
return ret;
}
| true | FFmpeg | 8689d87ac61a412b88326c4d31a8f3375926f869 |
8,733 | int av_write_header(AVFormatContext *s)
{
int ret, i;
AVStream *st;
// some sanity checks
if (s->nb_streams == 0) {
av_log(s, AV_LOG_ERROR, "no streams\n");
return AVERROR(EINVAL);
}
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if(st->codec->sample_rate<=0){
av_log(s, AV_LOG_ERROR, "sample rate not set\n");
return AVERROR(EINVAL);
}
if(!st->codec->block_align)
st->codec->block_align = st->codec->channels *
av_get_bits_per_sample(st->codec->codec_id) >> 3;
break;
case AVMEDIA_TYPE_VIDEO:
if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
av_log(s, AV_LOG_ERROR, "time base not set\n");
return AVERROR(EINVAL);
}
if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
av_log(s, AV_LOG_ERROR, "dimensions not set\n");
return AVERROR(EINVAL);
}
if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
return AVERROR(EINVAL);
}
break;
}
if(s->oformat->codec_tag){
if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
//the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
st->codec->codec_tag= 0;
}
if(st->codec->codec_tag){
if (!validate_codec_tag(s, st)) {
char tagbuf[32];
av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
av_log(s, AV_LOG_ERROR,
"Tag %s/0x%08x incompatible with output codec '%s'\n",
tagbuf, st->codec->codec_tag, st->codec->codec->name);
return AVERROR_INVALIDDATA;
}
}else
st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
}
if(s->oformat->flags & AVFMT_GLOBALHEADER &&
!(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
}
if (!s->priv_data && s->oformat->priv_data_size > 0) {
s->priv_data = av_mallocz(s->oformat->priv_data_size);
if (!s->priv_data)
return AVERROR(ENOMEM);
}
#if LIBAVFORMAT_VERSION_MAJOR < 53
ff_metadata_mux_compat(s);
#endif
/* set muxer identification string */
if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
AVMetadata *m;
AVMetadataTag *t;
if (!(m = av_mallocz(sizeof(AVMetadata))))
return AVERROR(ENOMEM);
av_metadata_set2(&m, "encoder", LIBAVFORMAT_IDENT, 0);
metadata_conv(&m, s->oformat->metadata_conv, NULL);
if ((t = av_metadata_get(m, "", NULL, AV_METADATA_IGNORE_SUFFIX)))
av_metadata_set2(&s->metadata, t->key, t->value, 0);
av_metadata_free(&m);
}
if(s->oformat->write_header){
ret = s->oformat->write_header(s);
if (ret < 0)
return ret;
}
/* init PTS generation */
for(i=0;i<s->nb_streams;i++) {
int64_t den = AV_NOPTS_VALUE;
st = s->streams[i];
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
den = (int64_t)st->time_base.num * st->codec->sample_rate;
break;
case AVMEDIA_TYPE_VIDEO:
den = (int64_t)st->time_base.num * st->codec->time_base.den;
break;
default:
break;
}
if (den != AV_NOPTS_VALUE) {
if (den <= 0)
return AVERROR_INVALIDDATA;
av_frac_init(&st->pts, 0, 0, den);
}
}
return 0;
}
| true | FFmpeg | d2064fd42b2dd1cf1c44e5c4fc4b8aaba6698637 |
8,734 | static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits,
TCGReg addrlo, TCGReg addrhi,
int mem_index, bool is_read)
{
int cmp_off
= (is_read
? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
TCGReg base = TCG_AREG0;
/* Extract the page index, shifted into place for tlb index. */
if (TCG_TARGET_REG_BITS == 64) {
if (TARGET_LONG_BITS == 32) {
/* Zero-extend the address into a place helpful for further use. */
tcg_out_ext32u(s, TCG_REG_R4, addrlo);
addrlo = TCG_REG_R4;
} else {
tcg_out_rld(s, RLDICL, TCG_REG_R3, addrlo,
64 - TARGET_PAGE_BITS, 64 - CPU_TLB_BITS);
}
}
/* Compensate for very large offsets. */
if (add_off >= 0x8000) {
/* Most target env are smaller than 32k; none are larger than 64k.
Simplify the logic here merely to offset by 0x7ff0, giving us a
range just shy of 64k. Check this assumption. */
QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
tlb_table[NB_MMU_MODES - 1][1])
> 0x7ff0 + 0x7fff);
tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, base, 0x7ff0));
base = TCG_REG_TMP1;
cmp_off -= 0x7ff0;
add_off -= 0x7ff0;
}
/* Extraction and shifting, part 2. */
if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) {
tcg_out_rlw(s, RLWINM, TCG_REG_R3, addrlo,
32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS),
31 - CPU_TLB_ENTRY_BITS);
} else {
tcg_out_shli64(s, TCG_REG_R3, TCG_REG_R3, CPU_TLB_ENTRY_BITS);
}
tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, base));
/* Load the tlb comparator. */
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
} else {
tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
}
/* Load the TLB addend for use on the fast path. Do this asap
to minimize any load use delay. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, add_off);
/* Clear the non-page, non-alignment bits from the address. */
if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) {
tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
(32 - s_bits) & 31, 31 - TARGET_PAGE_BITS);
} else if (!s_bits) {
tcg_out_rld(s, RLDICR, TCG_REG_R0, addrlo,
0, 63 - TARGET_PAGE_BITS);
} else {
tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo,
64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits);
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
}
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
0, 7, TCG_TYPE_I32);
tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
} else {
tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
0, 7, TCG_TYPE_TL);
}
return addrlo;
}
| true | qemu | 68d45bb61c5bbfb3999486f78cf026c1e79eb301 |
8,736 | void qemu_tcg_configure(QemuOpts *opts, Error **errp)
{
const char *t = qemu_opt_get(opts, "thread");
if (t) {
if (strcmp(t, "multi") == 0) {
if (TCG_OVERSIZED_GUEST) {
error_setg(errp, "No MTTCG when guest word size > hosts");
} else if (use_icount) {
error_setg(errp, "No MTTCG when icount is enabled");
} else {
if (!check_tcg_memory_orders_compatible()) {
error_report("Guest expects a stronger memory ordering "
"than the host provides");
error_printf("This may cause strange/hard to debug errors");
}
mttcg_enabled = true;
}
} else if (strcmp(t, "single") == 0) {
mttcg_enabled = false;
} else {
error_setg(errp, "Invalid 'thread' setting %s", t);
}
} else {
mttcg_enabled = default_mttcg_enabled();
}
} | true | qemu | c34c762015fec023c3ea5cf3629cbac462a80973 |
8,737 | static int replaygain_export(AVStream *st,
const uint8_t *track_gain, const uint8_t *track_peak,
const uint8_t *album_gain, const uint8_t *album_peak)
{
AVPacketSideData *sd, *tmp;
AVReplayGain *replaygain;
uint8_t *data;
int32_t tg, ag;
uint32_t tp, ap;
tg = parse_gain(track_gain);
ag = parse_gain(album_gain);
tp = parse_peak(track_peak);
ap = parse_peak(album_peak);
if (tg == INT32_MIN && ag == INT32_MIN)
return 0;
replaygain = av_mallocz(sizeof(*replaygain));
if (!replaygain)
return AVERROR(ENOMEM);
tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
if (!tmp) {
av_freep(&replaygain);
return AVERROR(ENOMEM);
}
st->side_data = tmp;
st->nb_side_data++;
sd = &st->side_data[st->nb_side_data - 1];
sd->type = AV_PKT_DATA_REPLAYGAIN;
sd->data = (uint8_t*)replaygain;
sd->size = sizeof(*replaygain);
replaygain->track_gain = tg;
replaygain->track_peak = tp;
replaygain->album_gain = ag;
replaygain->album_peak = ap;
return 0;
}
| true | FFmpeg | 8542f9c4f17125d483c40c0c5723842f1c982f81 |
8,738 | static int bytes_left(ByteIOContext *bc)
{
return bc->buf_end - bc->buf_ptr;
}
| false | FFmpeg | 465e1dadbef7596a3eb87089a66bb4ecdc26d3c4 |
8,739 | int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
int *got_sub_ptr,
AVPacket *avpkt)
{
int i, ret = 0;
if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
return AVERROR(EINVAL);
}
*got_sub_ptr = 0;
avcodec_get_subtitle_defaults(sub);
if (avpkt->size) {
AVPacket pkt_recoded;
AVPacket tmp = *avpkt;
int did_split = av_packet_split_side_data(&tmp);
//apply_param_change(avctx, &tmp);
pkt_recoded = tmp;
ret = recode_subtitle(avctx, &pkt_recoded, &tmp);
if (ret < 0) {
*got_sub_ptr = 0;
} else {
avctx->pkt = &pkt_recoded;
if (avctx->pkt_timebase.den && avpkt->pts != AV_NOPTS_VALUE)
sub->pts = av_rescale_q(avpkt->pts,
avctx->pkt_timebase, AV_TIME_BASE_Q);
ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
av_assert1((ret >= 0) >= !!*got_sub_ptr &&
!!*got_sub_ptr >= !!sub->num_rects);
if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
avctx->pkt_timebase.num) {
AVRational ms = { 1, 1000 };
sub->end_display_time = av_rescale_q(avpkt->duration,
avctx->pkt_timebase, ms);
}
for (i = 0; i < sub->num_rects; i++) {
if (sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
av_log(avctx, AV_LOG_ERROR,
"Invalid UTF-8 in decoded subtitles text; "
"maybe missing -sub_charenc option\n");
avsubtitle_free(sub);
return AVERROR_INVALIDDATA;
}
}
if (tmp.data != pkt_recoded.data) { // did we recode?
/* prevent from destroying side data from original packet */
pkt_recoded.side_data = NULL;
pkt_recoded.side_data_elems = 0;
av_free_packet(&pkt_recoded);
}
sub->format = !(avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB);
avctx->pkt = NULL;
}
if (did_split) {
ff_packet_free_side_data(&tmp);
if(ret == tmp.size)
ret = avpkt->size;
}
if (*got_sub_ptr)
avctx->frame_number++;
}
return ret;
}
| false | FFmpeg | 63c0113588d61d5ed045e1937c473770453c53be |
8,740 | static av_always_inline void thread_park_workers(ThreadContext *c, int thread_count)
{
while (c->current_job != thread_count + c->job_count)
pthread_cond_wait(&c->last_job_cond, &c->current_job_lock);
pthread_mutex_unlock(&c->current_job_lock);
}
| false | FFmpeg | daa7a1d4431b6acf1f93c4a98b3de123abf4ca18 |
8,741 | int ff_h264_check_intra4x4_pred_mode(H264Context *h){
MpegEncContext * const s = &h->s;
static const int8_t top [12]= {-1, 0,LEFT_DC_PRED,-1,-1,-1,-1,-1, 0};
static const int8_t left[12]= { 0,-1, TOP_DC_PRED, 0,-1,-1,-1, 0,-1,DC_128_PRED};
int i;
if(!(h->top_samples_available&0x8000)){
for(i=0; i<4; i++){
int status= top[ h->intra4x4_pred_mode_cache[scan8[0] + i] ];
if(status<0){
av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
return -1;
} else if(status){
h->intra4x4_pred_mode_cache[scan8[0] + i]= status;
}
}
}
if((h->left_samples_available&0x8888)!=0x8888){
static const int mask[4]={0x8000,0x2000,0x80,0x20};
for(i=0; i<4; i++){
if(!(h->left_samples_available&mask[i])){
int status= left[ h->intra4x4_pred_mode_cache[scan8[0] + 8*i] ];
if(status<0){
av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
return -1;
} else if(status){
h->intra4x4_pred_mode_cache[scan8[0] + 8*i]= status;
}
}
}
}
return 0;
} //FIXME cleanup like ff_h264_check_intra_pred_mode
| false | FFmpeg | a4fd95b5d511384ed3ce388d8d20a16b1c4c0530 |
8,742 | static void sdram_map_bcr (ppc4xx_sdram_t *sdram)
{
int i;
for (i = 0; i < sdram->nbanks; i++) {
if (sdram->ram_sizes[i] != 0) {
sdram_set_bcr(&sdram->bcr[i],
sdram_bcr(sdram->ram_bases[i], sdram->ram_sizes[i]),
1);
} else {
sdram_set_bcr(&sdram->bcr[i], 0x00000000, 0);
}
}
}
| false | qemu | b6dcbe086c77ec683f5ff0b693593cda1d61f3a1 |
8,743 | static void virtio_ccw_balloon_realize(VirtioCcwDevice *ccw_dev, Error **errp)
{
VirtIOBalloonCcw *dev = VIRTIO_BALLOON_CCW(ccw_dev);
DeviceState *vdev = DEVICE(&dev->vdev);
Error *err = NULL;
qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
object_property_set_bool(OBJECT(vdev), true, "realized", &err);
if (err) {
error_propagate(errp, err);
}
}
| false | qemu | 621ff94d5074d88253a5818c6b9c4db718fbfc65 |
8,744 | static inline void gen_scas(DisasContext *s, int ot)
{
gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
gen_string_movl_A0_EDI(s);
gen_op_ld_T1_A0(ot + s->mem_index);
gen_op_cmpl_T0_T1_cc();
gen_op_movl_T0_Dshift[ot]();
#ifdef TARGET_X86_64
if (s->aflag == 2) {
gen_op_addq_EDI_T0();
} else
#endif
if (s->aflag) {
gen_op_addl_EDI_T0();
} else {
gen_op_addw_EDI_T0();
}
}
| false | qemu | 6e0d8677cb443e7408c0b7a25a93c6596d7fa380 |
8,745 | static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
{
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
int index;
while (vq) {
index = virtio_get_queue_index(vq);
if (!virtio_queue_get_num(vdev, index)) {
break;
}
virtio_pci_vq_vector_mask(proxy, index, vector);
vq = virtio_vector_next_queue(vq);
}
}
| false | qemu | 6652d0811c9463fbfb2d2d1cb2ec03f388145c5f |
8,746 | static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
{
TCGv_i32 tmp = tcg_const_i32(dest);
if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
slot = -1;
}
gen_callw_slot(dc, callinc, tmp, slot);
tcg_temp_free(tmp);
}
| false | qemu | 433d33c555deeed375996e338df1a9510df401c6 |
8,747 | static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
{
switch (size) {
case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
default: abort();
}
}
| false | qemu | a7812ae412311d7d47f8aa85656faadac9d64b56 |
8,748 | RefPicList *ff_hevc_get_ref_list(HEVCContext *s, HEVCFrame *ref, int x0, int y0)
{
if (x0 < 0 || y0 < 0) {
return s->ref->refPicList;
} else {
int x_cb = x0 >> s->sps->log2_ctb_size;
int y_cb = y0 >> s->sps->log2_ctb_size;
int pic_width_cb = (s->sps->width + (1 << s->sps->log2_ctb_size) - 1) >>
s->sps->log2_ctb_size;
int ctb_addr_ts = s->pps->ctb_addr_rs_to_ts[y_cb * pic_width_cb + x_cb];
return (RefPicList *)ref->rpl_tab[ctb_addr_ts];
}
}
| false | FFmpeg | 52a2c17ec006282f388071a831dfb21288611253 |
8,749 | int gtod_load(QEMUFile *f, void *opaque, int version_id)
{
uint64_t tod_low;
uint8_t tod_high;
int r;
if (qemu_get_byte(f) == S390_TOD_CLOCK_VALUE_MISSING) {
fprintf(stderr, "WARNING: Guest clock was not migrated. This could "
"cause the guest to hang.\n");
return 0;
}
tod_high = qemu_get_byte(f);
tod_low = qemu_get_be64(f);
r = s390_set_clock(&tod_high, &tod_low);
if (r) {
fprintf(stderr, "WARNING: Unable to set guest clock value. "
"s390_get_clock returned error %d. This could cause "
"the guest to hang.\n", r);
}
return 0;
}
| false | qemu | 8297be80f7cf71e09617669a8bd8b2836dcfd4c3 |
8,751 | void stw_be_phys(target_phys_addr_t addr, uint32_t val)
{
stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
8,752 | static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
{
TCGv_i32 v = tcg_temp_new_i32();
tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(reg, MO_32));
return v;
}
| false | qemu | 90e496386fe7fd32c189561f846b7913f95b8cf4 |
8,753 | static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client,
target_phys_addr_t start_addr,
target_phys_addr_t end_addr)
{
return kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
}
| false | qemu | a426e122173f36f05ea2cb72dcff77b7408546ce |
8,754 | static void malta_fpga_write(void *opaque, target_phys_addr_t addr,
uint64_t val, unsigned size)
{
MaltaFPGAState *s = opaque;
uint32_t saddr;
saddr = (addr & 0xfffff);
switch (saddr) {
/* SWITCH Register */
case 0x00200:
break;
/* JMPRS Register */
case 0x00210:
break;
/* LEDBAR Register */
case 0x00408:
s->leds = val & 0xff;
malta_fpga_update_display(s);
break;
/* ASCIIWORD Register */
case 0x00410:
snprintf(s->display_text, 9, "%08X", (uint32_t)val);
malta_fpga_update_display(s);
break;
/* ASCIIPOS0 to ASCIIPOS7 Registers */
case 0x00418:
case 0x00420:
case 0x00428:
case 0x00430:
case 0x00438:
case 0x00440:
case 0x00448:
case 0x00450:
s->display_text[(saddr - 0x00418) >> 3] = (char) val;
malta_fpga_update_display(s);
break;
/* SOFTRES Register */
case 0x00500:
if (val == 0x42)
qemu_system_reset_request ();
break;
/* BRKRES Register */
case 0x00508:
s->brk = val & 0xff;
break;
/* UART Registers are handled directly by the serial device */
/* GPOUT Register */
case 0x00a00:
s->gpout = val & 0xff;
break;
/* I2COE Register */
case 0x00b08:
s->i2coe = val & 0x03;
break;
/* I2COUT Register */
case 0x00b10:
eeprom24c0x_write(val & 0x02, val & 0x01);
s->i2cout = val;
break;
/* I2CSEL Register */
case 0x00b18:
s->i2csel = val & 0x01;
break;
default:
#if 0
printf ("malta_fpga_write: Bad register offset 0x" TARGET_FMT_lx "\n",
addr);
#endif
break;
}
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
8,756 | static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
{
ptrdiff_t offset = target - code_ptr;
assert(offset == sextract64(offset, 0, 26));
/* read instruction, mask away previous PC_REL26 parameter contents,
set the proper offset, then write back the instruction. */
*code_ptr = deposit32(*code_ptr, 0, 26, offset);
}
| false | qemu | eabb7b91b36b202b4dac2df2d59d698e3aff197a |
8,757 | static inline unsigned long align_sigframe(unsigned long sp)
{
unsigned long i;
i = sp & ~3UL;
return i;
}
| false | qemu | 9be385980d37e8f4fd33f605f5fb1c3d144170a8 |
8,759 | static av_cold void uninit(AVFilterContext *ctx)
{
EvalContext *eval = ctx->priv;
int i;
for (i = 0; i < 8; i++) {
av_expr_free(eval->expr[i]);
eval->expr[i] = NULL;
}
}
| false | FFmpeg | 937cfebd72d30e617591c666ea4854a3898a64b2 |
8,760 | static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
int ra, int rb, int rt, int Rc)
{
TCGv t0, t1;
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
switch (opc3 & 0x0D) {
case 0x05:
/* macchw - macchw. - macchwo - macchwo. */
/* macchws - macchws. - macchwso - macchwso. */
/* nmacchw - nmacchw. - nmacchwo - nmacchwo. */
/* nmacchws - nmacchws. - nmacchwso - nmacchwso. */
/* mulchw - mulchw. */
tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
tcg_gen_ext16s_tl(t1, t1);
break;
case 0x04:
/* macchwu - macchwu. - macchwuo - macchwuo. */
/* macchwsu - macchwsu. - macchwsuo - macchwsuo. */
/* mulchwu - mulchwu. */
tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
tcg_gen_ext16u_tl(t1, t1);
break;
case 0x01:
/* machhw - machhw. - machhwo - machhwo. */
/* machhws - machhws. - machhwso - machhwso. */
/* nmachhw - nmachhw. - nmachhwo - nmachhwo. */
/* nmachhws - nmachhws. - nmachhwso - nmachhwso. */
/* mulhhw - mulhhw. */
tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
tcg_gen_ext16s_tl(t0, t0);
tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
tcg_gen_ext16s_tl(t1, t1);
break;
case 0x00:
/* machhwu - machhwu. - machhwuo - machhwuo. */
/* machhwsu - machhwsu. - machhwsuo - machhwsuo. */
/* mulhhwu - mulhhwu. */
tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
tcg_gen_ext16u_tl(t0, t0);
tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
tcg_gen_ext16u_tl(t1, t1);
break;
case 0x0D:
/* maclhw - maclhw. - maclhwo - maclhwo. */
/* maclhws - maclhws. - maclhwso - maclhwso. */
/* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */
/* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */
/* mullhw - mullhw. */
tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
break;
case 0x0C:
/* maclhwu - maclhwu. - maclhwuo - maclhwuo. */
/* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */
/* mullhwu - mullhwu. */
tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
break;
}
if (opc2 & 0x04) {
/* (n)multiply-and-accumulate (0x0C / 0x0E) */
tcg_gen_mul_tl(t1, t0, t1);
if (opc2 & 0x02) {
/* nmultiply-and-accumulate (0x0E) */
tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
} else {
/* multiply-and-accumulate (0x0C) */
tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
}
if (opc3 & 0x12) {
/* Check overflow and/or saturate */
int l1 = gen_new_label();
if (opc3 & 0x10) {
/* Start with XER OV disabled, the most likely case */
tcg_gen_movi_tl(cpu_ov, 0);
}
if (opc3 & 0x01) {
/* Signed */
tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
if (opc3 & 0x02) {
/* Saturate */
tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
tcg_gen_xori_tl(t0, t0, 0x7fffffff);
}
} else {
/* Unsigned */
tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
if (opc3 & 0x02) {
/* Saturate */
tcg_gen_movi_tl(t0, UINT32_MAX);
}
}
if (opc3 & 0x10) {
/* Check overflow */
tcg_gen_movi_tl(cpu_ov, 1);
tcg_gen_movi_tl(cpu_so, 1);
}
gen_set_label(l1);
tcg_gen_mov_tl(cpu_gpr[rt], t0);
}
} else {
tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
}
tcg_temp_free(t0);
tcg_temp_free(t1);
if (unlikely(Rc) != 0) {
/* Update Rc0 */
gen_set_Rc0(ctx, cpu_gpr[rt]);
}
}
| false | qemu | 42a268c241183877192c376d03bd9b6d527407c7 |
8,761 | void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1)
{
/* We do not provide address limit checking, so let's suppress it. */
if (SAL_REG1_INVALID(reg1) || reg1 & 0x000000000000ffff) {
program_interrupt(&cpu->env, PGM_OPERAND, 2);
}
}
| false | qemu | 7e01376daea75e888c370aab521a7d4aeaf2ffd1 |
8,762 | static void migrate_fd_put_ready(void *opaque)
{
MigrationState *s = opaque;
int ret;
if (s->state != MIG_STATE_ACTIVE) {
DPRINTF("put_ready returning because of non-active state\n");
return;
}
DPRINTF("iterate\n");
ret = qemu_savevm_state_iterate(s->mon, s->file);
if (ret < 0) {
migrate_fd_error(s);
} else if (ret == 1) {
int old_vm_running = runstate_is_running();
DPRINTF("done iterating\n");
vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
if (qemu_savevm_state_complete(s->mon, s->file) < 0) {
migrate_fd_error(s);
} else {
migrate_fd_completed(s);
}
if (s->state != MIG_STATE_COMPLETED) {
if (old_vm_running) {
vm_start();
}
}
}
}
| false | qemu | 539de1246d355d3b8aa33fb7cde732352d8827c7 |
8,763 | void do_info_mice(Monitor *mon, QObject **ret_data)
{
QEMUPutMouseEntry *cursor;
QList *mice_list;
int current;
mice_list = qlist_new();
if (QTAILQ_EMPTY(&mouse_handlers)) {
goto out;
}
current = QTAILQ_FIRST(&mouse_handlers)->index;
QTAILQ_FOREACH(cursor, &mouse_handlers, node) {
QObject *obj;
obj = qobject_from_jsonf("{ 'name': %s, 'index': %d, 'current': %i }",
cursor->qemu_put_mouse_event_name,
cursor->index,
cursor->index == current);
qlist_append_obj(mice_list, obj);
}
out:
*ret_data = QOBJECT(mice_list);
}
| false | qemu | 1aaee43cf7a43ca8e7f12883ee7e3a35fe5eb84c |
8,764 | Visitor *validate_test_init(TestInputVisitorData *data,
const char *json_string, ...)
{
Visitor *v;
va_list ap;
va_start(ap, json_string);
v = validate_test_init_internal(data, json_string, &ap);
va_end(ap);
return v;
}
| false | qemu | b3db211f3c80bb996a704d665fe275619f728bd4 |
8,765 | static av_cold int vc1_decode_init(AVCodecContext *avctx)
{
VC1Context *v = avctx->priv_data;
MpegEncContext *s = &v->s;
GetBitContext gb;
/* save the container output size for WMImage */
v->output_width = avctx->width;
v->output_height = avctx->height;
if (!avctx->extradata_size || !avctx->extradata)
return -1;
if (!(avctx->flags & AV_CODEC_FLAG_GRAY))
avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts);
else
avctx->pix_fmt = AV_PIX_FMT_GRAY8;
v->s.avctx = avctx;
if (ff_vc1_init_common(v) < 0)
return -1;
ff_blockdsp_init(&s->bdsp, avctx);
ff_h264chroma_init(&v->h264chroma, 8);
ff_qpeldsp_init(&s->qdsp);
if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
int count = 0;
// looks like WMV3 has a sequence header stored in the extradata
// advanced sequence header may be before the first frame
// the last byte of the extradata is a version number, 1 for the
// samples we can decode
init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
return -1;
count = avctx->extradata_size*8 - get_bits_count(&gb);
if (count > 0) {
av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
count, get_bits_long(&gb, FFMIN(count, 32)));
} else if (count < 0) {
av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
}
} else { // VC1/WVC1/WVP2
const uint8_t *start = avctx->extradata;
uint8_t *end = avctx->extradata + avctx->extradata_size;
const uint8_t *next;
int size, buf2_size;
uint8_t *buf2 = NULL;
int seq_initialized = 0, ep_initialized = 0;
if (avctx->extradata_size < 16) {
av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
return -1;
}
buf2 = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
next = start;
for (; next < end; start = next) {
next = find_next_marker(start + 4, end);
size = next - start - 4;
if (size <= 0)
continue;
buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
init_get_bits(&gb, buf2, buf2_size * 8);
switch (AV_RB32(start)) {
case VC1_CODE_SEQHDR:
if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
av_free(buf2);
return -1;
}
seq_initialized = 1;
break;
case VC1_CODE_ENTRYPOINT:
if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
av_free(buf2);
return -1;
}
ep_initialized = 1;
break;
}
}
av_free(buf2);
if (!seq_initialized || !ep_initialized) {
av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
return -1;
}
v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
}
v->sprite_output_frame = av_frame_alloc();
if (!v->sprite_output_frame)
return AVERROR(ENOMEM);
avctx->profile = v->profile;
if (v->profile == PROFILE_ADVANCED)
avctx->level = v->level;
avctx->has_b_frames = !!avctx->max_b_frames;
if (v->color_prim == 1 || v->color_prim == 5 || v->color_prim == 6)
avctx->color_primaries = v->color_prim;
if (v->transfer_char == 1 || v->transfer_char == 7)
avctx->color_trc = v->transfer_char;
if (v->matrix_coef == 1 || v->matrix_coef == 6 || v->matrix_coef == 7)
avctx->colorspace = v->matrix_coef;
s->mb_width = (avctx->coded_width + 15) >> 4;
s->mb_height = (avctx->coded_height + 15) >> 4;
if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
ff_vc1_init_transposed_scantables(v);
} else {
memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
v->left_blk_sh = 3;
v->top_blk_sh = 0;
}
if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
v->sprite_width = avctx->coded_width;
v->sprite_height = avctx->coded_height;
avctx->coded_width = avctx->width = v->output_width;
avctx->coded_height = avctx->height = v->output_height;
// prevent 16.16 overflows
if (v->sprite_width > 1 << 14 ||
v->sprite_height > 1 << 14 ||
v->output_width > 1 << 14 ||
v->output_height > 1 << 14) return -1;
}
return 0;
}
| false | FFmpeg | dcc39ee10e82833ce24aa57926c00ffeb1948198 |
8,766 | static void show_packets(AVFormatContext *fmt_ctx)
{
AVPacket pkt;
av_init_packet(&pkt);
while (!av_read_frame(fmt_ctx, &pkt))
show_packet(fmt_ctx, &pkt);
}
| false | FFmpeg | 3a8c95f730732b9f1ffacdbfbf79a01b202a67af |
8,767 | static int http_open_cnx_internal(URLContext *h, AVDictionary **options)
{
const char *path, *proxy_path, *lower_proto = "tcp", *local_path;
char hostname[1024], hoststr[1024], proto[10];
char auth[1024], proxyauth[1024] = "";
char path1[MAX_URL_SIZE];
char buf[1024], urlbuf[MAX_URL_SIZE];
int port, use_proxy, err, location_changed = 0;
HTTPContext *s = h->priv_data;
av_url_split(proto, sizeof(proto), auth, sizeof(auth),
hostname, sizeof(hostname), &port,
path1, sizeof(path1), s->location);
ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL);
proxy_path = getenv("http_proxy");
use_proxy = !ff_http_match_no_proxy(getenv("no_proxy"), hostname) &&
proxy_path != NULL && av_strstart(proxy_path, "http://", NULL);
if (!strcmp(proto, "https")) {
lower_proto = "tls";
use_proxy = 0;
if (port < 0)
port = 443;
}
if (port < 0)
port = 80;
if (path1[0] == '\0')
path = "/";
else
path = path1;
local_path = path;
if (use_proxy) {
/* Reassemble the request URL without auth string - we don't
* want to leak the auth to the proxy. */
ff_url_join(urlbuf, sizeof(urlbuf), proto, NULL, hostname, port, "%s",
path1);
path = urlbuf;
av_url_split(NULL, 0, proxyauth, sizeof(proxyauth),
hostname, sizeof(hostname), &port, NULL, 0, proxy_path);
}
ff_url_join(buf, sizeof(buf), lower_proto, NULL, hostname, port, NULL);
if (!s->hd) {
err = ffurl_open(&s->hd, buf, AVIO_FLAG_READ_WRITE,
&h->interrupt_callback, options);
if (err < 0)
return err;
}
err = http_connect(h, path, local_path, hoststr,
auth, proxyauth, &location_changed);
if (err < 0)
return err;
return location_changed;
}
| false | FFmpeg | 4b1f5e5090abed6c618c8ba380cd7d28d140f867 |
8,768 | qio_channel_command_new_spawn(const char *const argv[],
int flags,
Error **errp)
{
pid_t pid = -1;
int stdinfd[2] = { -1, -1 };
int stdoutfd[2] = { -1, -1 };
int devnull = -1;
bool stdinnull = false, stdoutnull = false;
QIOChannelCommand *ioc;
flags = flags & O_ACCMODE;
if (flags == O_RDONLY) {
stdinnull = true;
}
if (flags == O_WRONLY) {
stdoutnull = true;
}
if (stdinnull || stdoutnull) {
devnull = open("/dev/null", O_RDWR);
if (!devnull) {
error_setg_errno(errp, errno,
"Unable to open /dev/null");
goto error;
}
}
if ((!stdinnull && pipe(stdinfd) < 0) ||
(!stdoutnull && pipe(stdoutfd) < 0)) {
error_setg_errno(errp, errno,
"Unable to open pipe");
goto error;
}
pid = qemu_fork(errp);
if (pid < 0) {
goto error;
}
if (pid == 0) { /* child */
dup2(stdinnull ? devnull : stdinfd[0], STDIN_FILENO);
dup2(stdoutnull ? devnull : stdoutfd[1], STDOUT_FILENO);
/* Leave stderr connected to qemu's stderr */
if (!stdinnull) {
close(stdinfd[0]);
close(stdinfd[1]);
}
if (!stdoutnull) {
close(stdoutfd[0]);
close(stdoutfd[1]);
}
execv(argv[0], (char * const *)argv);
_exit(1);
}
if (!stdinnull) {
close(stdinfd[0]);
}
if (!stdoutnull) {
close(stdoutfd[1]);
}
ioc = qio_channel_command_new_pid(stdinnull ? devnull : stdinfd[1],
stdoutnull ? devnull : stdoutfd[0],
pid);
trace_qio_channel_command_new_spawn(ioc, argv[0], flags);
return ioc;
error:
if (stdinfd[0] != -1) {
close(stdinfd[0]);
}
if (stdinfd[1] != -1) {
close(stdinfd[1]);
}
if (stdoutfd[0] != -1) {
close(stdoutfd[0]);
}
if (stdoutfd[1] != -1) {
close(stdoutfd[1]);
}
return NULL;
}
| true | qemu | e155494cf0b876c45c3c68a9ab6c641aac22dfdf |
8,769 | static int div_round (int dividend, int divisor)
{
if (dividend > 0)
return (dividend + (divisor>>1)) / divisor;
else
return -((-dividend + (divisor>>1)) / divisor);
}
| true | FFmpeg | 428098165de4c3edfe42c1b7f00627d287015863 |
8,770 | long do_sigreturn(CPUX86State *env)
{
struct sigframe *frame;
abi_ulong frame_addr = env->regs[R_ESP] - 8;
target_sigset_t target_set;
sigset_t set;
int eax, i;
#if defined(DEBUG_SIGNAL)
fprintf(stderr, "do_sigreturn\n");
#endif
if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
goto badframe;
/* set blocked signals */
if (__get_user(target_set.sig[0], &frame->sc.oldmask))
goto badframe;
for(i = 1; i < TARGET_NSIG_WORDS; i++) {
if (__get_user(target_set.sig[i], &frame->extramask[i - 1]))
goto badframe;
}
target_to_host_sigset_internal(&set, &target_set);
do_sigprocmask(SIG_SETMASK, &set, NULL);
/* restore registers */
if (restore_sigcontext(env, &frame->sc, &eax))
goto badframe;
unlock_user_struct(frame, frame_addr, 0);
return eax;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
return 0;
}
| true | qemu | f5f601afcec6c1081128fe5a0f831788ca9f56ed |
8,771 | void cpu_dump_state(CPUXtensaState *env, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
int i, j;
cpu_fprintf(f, "PC=%08x\n\n", env->pc);
for (i = j = 0; i < 256; ++i) {
if (sregnames[i]) {
cpu_fprintf(f, "%s=%08x%c", sregnames[i], env->sregs[i],
(j++ % 4) == 3 ? '\n' : ' ');
}
}
cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
for (i = j = 0; i < 256; ++i) {
if (uregnames[i]) {
cpu_fprintf(f, "%s=%08x%c", uregnames[i], env->uregs[i],
(j++ % 4) == 3 ? '\n' : ' ');
}
}
cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
for (i = 0; i < 16; ++i) {
cpu_fprintf(f, "A%02d=%08x%c", i, env->regs[i],
(i % 4) == 3 ? '\n' : ' ');
}
cpu_fprintf(f, "\n");
for (i = 0; i < env->config->nareg; ++i) {
cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
(i % 4) == 3 ? '\n' : ' ');
}
if (xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) {
cpu_fprintf(f, "\n");
for (i = 0; i < 16; ++i) {
cpu_fprintf(f, "F%02d=%08x (%+10.8e)%c", i,
float32_val(env->fregs[i]),
*(float *)&env->fregs[i], (i % 2) == 1 ? '\n' : ' ');
}
}
}
| true | qemu | fe0bd475aa31e60674f7f53b85dc293108026202 |
8,772 | int kvm_arch_init(KVMState *s)
{
uint64_t identity_base = 0xfffbc000;
int ret;
struct utsname utsname;
ret = kvm_get_supported_msrs(s);
if (ret < 0) {
return ret;
}
uname(&utsname);
lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
/*
* On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
* In order to use vm86 mode, an EPT identity map and a TSS are needed.
* Since these must be part of guest physical memory, we need to allocate
* them, both by setting their start addresses in the kernel and by
* creating a corresponding e820 entry. We need 4 pages before the BIOS.
*
* Older KVM versions may not support setting the identity map base. In
* that case we need to stick with the default, i.e. a 256K maximum BIOS
* size.
*/
#ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
/* Allows up to 16M BIOSes. */
identity_base = 0xfeffc000;
ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
if (ret < 0) {
return ret;
}
}
#endif
/* Set TSS base one page after EPT identity map. */
ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
if (ret < 0) {
return ret;
}
/* Tell fw_cfg to notify the BIOS to reserve the range. */
ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
if (ret < 0) {
fprintf(stderr, "e820_add_entry() table is full\n");
return ret;
}
return 0;
} | true | qemu | 3c85e74fbf9e5a39d8d13ef91a5f3dd91f0bc8a8 |
8,773 | static int matroska_parse_tracks(AVFormatContext *s)
{
MatroskaDemuxContext *matroska = s->priv_data;
MatroskaTrack *tracks = matroska->tracks.elem;
AVStream *st;
int i, j, ret;
int k;
for (i = 0; i < matroska->tracks.nb_elem; i++) {
MatroskaTrack *track = &tracks[i];
enum AVCodecID codec_id = AV_CODEC_ID_NONE;
EbmlList *encodings_list = &track->encodings;
MatroskaTrackEncoding *encodings = encodings_list->elem;
uint8_t *extradata = NULL;
int extradata_size = 0;
int extradata_offset = 0;
uint32_t fourcc = 0;
AVIOContext b;
char* key_id_base64 = NULL;
int bit_depth = -1;
/* Apply some sanity checks. */
if (track->type != MATROSKA_TRACK_TYPE_VIDEO &&
track->type != MATROSKA_TRACK_TYPE_AUDIO &&
track->type != MATROSKA_TRACK_TYPE_SUBTITLE &&
track->type != MATROSKA_TRACK_TYPE_METADATA) {
av_log(matroska->ctx, AV_LOG_INFO,
"Unknown or unsupported track type %"PRIu64"\n",
track->type);
continue;
}
if (!track->codec_id)
continue;
if (track->audio.samplerate < 0 || track->audio.samplerate > INT_MAX ||
isnan(track->audio.samplerate)) {
av_log(matroska->ctx, AV_LOG_WARNING,
"Invalid sample rate %f, defaulting to 8000 instead.\n",
track->audio.samplerate);
track->audio.samplerate = 8000;
}
if (track->type == MATROSKA_TRACK_TYPE_VIDEO) {
if (!track->default_duration && track->video.frame_rate > 0)
track->default_duration = 1000000000 / track->video.frame_rate;
if (track->video.display_width == -1)
track->video.display_width = track->video.pixel_width;
if (track->video.display_height == -1)
track->video.display_height = track->video.pixel_height;
if (track->video.color_space.size == 4)
fourcc = AV_RL32(track->video.color_space.data);
} else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) {
if (!track->audio.out_samplerate)
track->audio.out_samplerate = track->audio.samplerate;
}
if (encodings_list->nb_elem > 1) {
av_log(matroska->ctx, AV_LOG_ERROR,
"Multiple combined encodings not supported");
} else if (encodings_list->nb_elem == 1) {
if (encodings[0].type) {
if (encodings[0].encryption.key_id.size > 0) {
/* Save the encryption key id to be stored later as a
metadata tag. */
const int b64_size = AV_BASE64_SIZE(encodings[0].encryption.key_id.size);
key_id_base64 = av_malloc(b64_size);
if (key_id_base64 == NULL)
return AVERROR(ENOMEM);
av_base64_encode(key_id_base64, b64_size,
encodings[0].encryption.key_id.data,
encodings[0].encryption.key_id.size);
} else {
encodings[0].scope = 0;
av_log(matroska->ctx, AV_LOG_ERROR,
"Unsupported encoding type");
}
} else if (
#if CONFIG_ZLIB
encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_ZLIB &&
#endif
#if CONFIG_BZLIB
encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_BZLIB &&
#endif
#if CONFIG_LZO
encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_LZO &&
#endif
encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP) {
encodings[0].scope = 0;
av_log(matroska->ctx, AV_LOG_ERROR,
"Unsupported encoding type");
} else if (track->codec_priv.size && encodings[0].scope & 2) {
uint8_t *codec_priv = track->codec_priv.data;
int ret = matroska_decode_buffer(&track->codec_priv.data,
&track->codec_priv.size,
track);
if (ret < 0) {
track->codec_priv.data = NULL;
track->codec_priv.size = 0;
av_log(matroska->ctx, AV_LOG_ERROR,
"Failed to decode codec private data\n");
}
if (codec_priv != track->codec_priv.data)
av_free(codec_priv);
}
}
for (j = 0; ff_mkv_codec_tags[j].id != AV_CODEC_ID_NONE; j++) {
if (!strncmp(ff_mkv_codec_tags[j].str, track->codec_id,
strlen(ff_mkv_codec_tags[j].str))) {
codec_id = ff_mkv_codec_tags[j].id;
break;
}
}
st = track->stream = avformat_new_stream(s, NULL);
if (!st) {
av_free(key_id_base64);
return AVERROR(ENOMEM);
}
if (key_id_base64) {
/* export encryption key id as base64 metadata tag */
av_dict_set(&st->metadata, "enc_key_id", key_id_base64, 0);
av_freep(&key_id_base64);
}
if (!strcmp(track->codec_id, "V_MS/VFW/FOURCC") &&
track->codec_priv.size >= 40 &&
track->codec_priv.data) {
track->ms_compat = 1;
bit_depth = AV_RL16(track->codec_priv.data + 14);
fourcc = AV_RL32(track->codec_priv.data + 16);
codec_id = ff_codec_get_id(ff_codec_bmp_tags,
fourcc);
if (!codec_id)
codec_id = ff_codec_get_id(ff_codec_movvideo_tags,
fourcc);
extradata_offset = 40;
} else if (!strcmp(track->codec_id, "A_MS/ACM") &&
track->codec_priv.size >= 14 &&
track->codec_priv.data) {
int ret;
ffio_init_context(&b, track->codec_priv.data,
track->codec_priv.size,
0, NULL, NULL, NULL, NULL);
ret = ff_get_wav_header(s, &b, st->codecpar, track->codec_priv.size, 0);
if (ret < 0)
return ret;
codec_id = st->codecpar->codec_id;
fourcc = st->codecpar->codec_tag;
extradata_offset = FFMIN(track->codec_priv.size, 18);
} else if (!strcmp(track->codec_id, "A_QUICKTIME")
/* Normally 36, but allow noncompliant private data */
&& (track->codec_priv.size >= 32)
&& (track->codec_priv.data)) {
uint16_t sample_size;
int ret = get_qt_codec(track, &fourcc, &codec_id);
if (ret < 0)
return ret;
sample_size = AV_RB16(track->codec_priv.data + 26);
if (fourcc == 0) {
if (sample_size == 8) {
fourcc = MKTAG('r','a','w',' ');
codec_id = ff_codec_get_id(ff_codec_movaudio_tags, fourcc);
} else if (sample_size == 16) {
fourcc = MKTAG('t','w','o','s');
codec_id = ff_codec_get_id(ff_codec_movaudio_tags, fourcc);
}
}
if ((fourcc == MKTAG('t','w','o','s') ||
fourcc == MKTAG('s','o','w','t')) &&
sample_size == 8)
codec_id = AV_CODEC_ID_PCM_S8;
} else if (!strcmp(track->codec_id, "V_QUICKTIME") &&
(track->codec_priv.size >= 21) &&
(track->codec_priv.data)) {
int ret = get_qt_codec(track, &fourcc, &codec_id);
if (ret < 0)
return ret;
if (codec_id == AV_CODEC_ID_NONE && AV_RL32(track->codec_priv.data+4) == AV_RL32("SMI ")) {
fourcc = MKTAG('S','V','Q','3');
codec_id = ff_codec_get_id(ff_codec_movvideo_tags, fourcc);
}
if (codec_id == AV_CODEC_ID_NONE)
av_log(matroska->ctx, AV_LOG_ERROR,
"mov FourCC not found %s.\n", av_fourcc2str(fourcc));
if (track->codec_priv.size >= 86) {
bit_depth = AV_RB16(track->codec_priv.data + 82);
ffio_init_context(&b, track->codec_priv.data,
track->codec_priv.size,
0, NULL, NULL, NULL, NULL);
if (ff_get_qtpalette(codec_id, &b, track->palette)) {
bit_depth &= 0x1F;
track->has_palette = 1;
}
}
} else if (codec_id == AV_CODEC_ID_PCM_S16BE) {
switch (track->audio.bitdepth) {
case 8:
codec_id = AV_CODEC_ID_PCM_U8;
break;
case 24:
codec_id = AV_CODEC_ID_PCM_S24BE;
break;
case 32:
codec_id = AV_CODEC_ID_PCM_S32BE;
break;
}
} else if (codec_id == AV_CODEC_ID_PCM_S16LE) {
switch (track->audio.bitdepth) {
case 8:
codec_id = AV_CODEC_ID_PCM_U8;
break;
case 24:
codec_id = AV_CODEC_ID_PCM_S24LE;
break;
case 32:
codec_id = AV_CODEC_ID_PCM_S32LE;
break;
}
} else if (codec_id == AV_CODEC_ID_PCM_F32LE &&
track->audio.bitdepth == 64) {
codec_id = AV_CODEC_ID_PCM_F64LE;
} else if (codec_id == AV_CODEC_ID_AAC && !track->codec_priv.size) {
int profile = matroska_aac_profile(track->codec_id);
int sri = matroska_aac_sri(track->audio.samplerate);
extradata = av_mallocz(5 + AV_INPUT_BUFFER_PADDING_SIZE);
if (!extradata)
return AVERROR(ENOMEM);
extradata[0] = (profile << 3) | ((sri & 0x0E) >> 1);
extradata[1] = ((sri & 0x01) << 7) | (track->audio.channels << 3);
if (strstr(track->codec_id, "SBR")) {
sri = matroska_aac_sri(track->audio.out_samplerate);
extradata[2] = 0x56;
extradata[3] = 0xE5;
extradata[4] = 0x80 | (sri << 3);
extradata_size = 5;
} else
extradata_size = 2;
} else if (codec_id == AV_CODEC_ID_ALAC && track->codec_priv.size && track->codec_priv.size < INT_MAX - 12 - AV_INPUT_BUFFER_PADDING_SIZE) {
/* Only ALAC's magic cookie is stored in Matroska's track headers.
* Create the "atom size", "tag", and "tag version" fields the
* decoder expects manually. */
extradata_size = 12 + track->codec_priv.size;
extradata = av_mallocz(extradata_size +
AV_INPUT_BUFFER_PADDING_SIZE);
if (!extradata)
return AVERROR(ENOMEM);
AV_WB32(extradata, extradata_size);
memcpy(&extradata[4], "alac", 4);
AV_WB32(&extradata[8], 0);
memcpy(&extradata[12], track->codec_priv.data,
track->codec_priv.size);
} else if (codec_id == AV_CODEC_ID_TTA) {
extradata_size = 30;
extradata = av_mallocz(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!extradata)
return AVERROR(ENOMEM);
ffio_init_context(&b, extradata, extradata_size, 1,
NULL, NULL, NULL, NULL);
avio_write(&b, "TTA1", 4);
avio_wl16(&b, 1);
if (track->audio.channels > UINT16_MAX ||
track->audio.bitdepth > UINT16_MAX) {
av_log(matroska->ctx, AV_LOG_WARNING,
"Too large audio channel number %"PRIu64
" or bitdepth %"PRIu64". Skipping track.\n",
track->audio.channels, track->audio.bitdepth);
av_freep(&extradata);
if (matroska->ctx->error_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
else
continue;
}
avio_wl16(&b, track->audio.channels);
avio_wl16(&b, track->audio.bitdepth);
if (track->audio.out_samplerate < 0 || track->audio.out_samplerate > INT_MAX)
return AVERROR_INVALIDDATA;
avio_wl32(&b, track->audio.out_samplerate);
avio_wl32(&b, av_rescale((matroska->duration * matroska->time_scale),
track->audio.out_samplerate,
AV_TIME_BASE * 1000));
} else if (codec_id == AV_CODEC_ID_RV10 ||
codec_id == AV_CODEC_ID_RV20 ||
codec_id == AV_CODEC_ID_RV30 ||
codec_id == AV_CODEC_ID_RV40) {
extradata_offset = 26;
} else if (codec_id == AV_CODEC_ID_RA_144) {
track->audio.out_samplerate = 8000;
track->audio.channels = 1;
} else if ((codec_id == AV_CODEC_ID_RA_288 ||
codec_id == AV_CODEC_ID_COOK ||
codec_id == AV_CODEC_ID_ATRAC3 ||
codec_id == AV_CODEC_ID_SIPR)
&& track->codec_priv.data) {
int flavor;
ffio_init_context(&b, track->codec_priv.data,
track->codec_priv.size,
0, NULL, NULL, NULL, NULL);
avio_skip(&b, 22);
flavor = avio_rb16(&b);
track->audio.coded_framesize = avio_rb32(&b);
avio_skip(&b, 12);
track->audio.sub_packet_h = avio_rb16(&b);
track->audio.frame_size = avio_rb16(&b);
track->audio.sub_packet_size = avio_rb16(&b);
if (flavor < 0 ||
track->audio.coded_framesize <= 0 ||
track->audio.sub_packet_h <= 0 ||
track->audio.frame_size <= 0 ||
track->audio.sub_packet_size <= 0 && codec_id != AV_CODEC_ID_SIPR)
return AVERROR_INVALIDDATA;
track->audio.buf = av_malloc_array(track->audio.sub_packet_h,
track->audio.frame_size);
if (!track->audio.buf)
return AVERROR(ENOMEM);
if (codec_id == AV_CODEC_ID_RA_288) {
st->codecpar->block_align = track->audio.coded_framesize;
track->codec_priv.size = 0;
} else {
if (codec_id == AV_CODEC_ID_SIPR && flavor < 4) {
static const int sipr_bit_rate[4] = { 6504, 8496, 5000, 16000 };
track->audio.sub_packet_size = ff_sipr_subpk_size[flavor];
st->codecpar->bit_rate = sipr_bit_rate[flavor];
}
st->codecpar->block_align = track->audio.sub_packet_size;
extradata_offset = 78;
}
} else if (codec_id == AV_CODEC_ID_FLAC && track->codec_priv.size) {
ret = matroska_parse_flac(s, track, &extradata_offset);
if (ret < 0)
return ret;
} else if (codec_id == AV_CODEC_ID_PRORES && track->codec_priv.size == 4) {
fourcc = AV_RL32(track->codec_priv.data);
}
track->codec_priv.size -= extradata_offset;
if (codec_id == AV_CODEC_ID_NONE)
av_log(matroska->ctx, AV_LOG_INFO,
"Unknown/unsupported AVCodecID %s.\n", track->codec_id);
if (track->time_scale < 0.01)
track->time_scale = 1.0;
avpriv_set_pts_info(st, 64, matroska->time_scale * track->time_scale,
1000 * 1000 * 1000); /* 64 bit pts in ns */
/* convert the delay from ns to the track timebase */
track->codec_delay_in_track_tb = av_rescale_q(track->codec_delay,
(AVRational){ 1, 1000000000 },
st->time_base);
st->codecpar->codec_id = codec_id;
if (strcmp(track->language, "und"))
av_dict_set(&st->metadata, "language", track->language, 0);
av_dict_set(&st->metadata, "title", track->name, 0);
if (track->flag_default)
st->disposition |= AV_DISPOSITION_DEFAULT;
if (track->flag_forced)
st->disposition |= AV_DISPOSITION_FORCED;
if (!st->codecpar->extradata) {
if (extradata) {
st->codecpar->extradata = extradata;
st->codecpar->extradata_size = extradata_size;
} else if (track->codec_priv.data && track->codec_priv.size > 0) {
if (ff_alloc_extradata(st->codecpar, track->codec_priv.size))
return AVERROR(ENOMEM);
memcpy(st->codecpar->extradata,
track->codec_priv.data + extradata_offset,
track->codec_priv.size);
}
}
if (track->type == MATROSKA_TRACK_TYPE_VIDEO) {
MatroskaTrackPlane *planes = track->operation.combine_planes.elem;
int display_width_mul = 1;
int display_height_mul = 1;
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_tag = fourcc;
if (bit_depth >= 0)
st->codecpar->bits_per_coded_sample = bit_depth;
st->codecpar->width = track->video.pixel_width;
st->codecpar->height = track->video.pixel_height;
if (track->video.interlaced == MATROSKA_VIDEO_INTERLACE_FLAG_INTERLACED)
st->codecpar->field_order = mkv_field_order(matroska, track->video.field_order);
else if (track->video.interlaced == MATROSKA_VIDEO_INTERLACE_FLAG_PROGRESSIVE)
st->codecpar->field_order = AV_FIELD_PROGRESSIVE;
if (track->video.stereo_mode && track->video.stereo_mode < MATROSKA_VIDEO_STEREOMODE_TYPE_NB)
mkv_stereo_mode_display_mul(track->video.stereo_mode, &display_width_mul, &display_height_mul);
if (track->video.display_unit < MATROSKA_VIDEO_DISPLAYUNIT_UNKNOWN) {
av_reduce(&st->sample_aspect_ratio.num,
&st->sample_aspect_ratio.den,
st->codecpar->height * track->video.display_width * display_width_mul,
st->codecpar->width * track->video.display_height * display_height_mul,
255);
}
if (st->codecpar->codec_id != AV_CODEC_ID_HEVC)
st->need_parsing = AVSTREAM_PARSE_HEADERS;
if (track->default_duration) {
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
1000000000, track->default_duration, 30000);
#if FF_API_R_FRAME_RATE
if ( st->avg_frame_rate.num < st->avg_frame_rate.den * 1000LL
&& st->avg_frame_rate.num > st->avg_frame_rate.den * 5LL)
st->r_frame_rate = st->avg_frame_rate;
#endif
}
/* export stereo mode flag as metadata tag */
if (track->video.stereo_mode && track->video.stereo_mode < MATROSKA_VIDEO_STEREOMODE_TYPE_NB)
av_dict_set(&st->metadata, "stereo_mode", ff_matroska_video_stereo_mode[track->video.stereo_mode], 0);
/* export alpha mode flag as metadata tag */
if (track->video.alpha_mode)
av_dict_set(&st->metadata, "alpha_mode", "1", 0);
/* if we have virtual track, mark the real tracks */
for (j=0; j < track->operation.combine_planes.nb_elem; j++) {
char buf[32];
if (planes[j].type >= MATROSKA_VIDEO_STEREO_PLANE_COUNT)
continue;
snprintf(buf, sizeof(buf), "%s_%d",
ff_matroska_video_stereo_plane[planes[j].type], i);
for (k=0; k < matroska->tracks.nb_elem; k++)
if (planes[j].uid == tracks[k].uid && tracks[k].stream) {
av_dict_set(&tracks[k].stream->metadata,
"stereo_mode", buf, 0);
break;
}
}
// add stream level stereo3d side data if it is a supported format
if (track->video.stereo_mode < MATROSKA_VIDEO_STEREOMODE_TYPE_NB &&
track->video.stereo_mode != 10 && track->video.stereo_mode != 12) {
int ret = ff_mkv_stereo3d_conv(st, track->video.stereo_mode);
if (ret < 0)
return ret;
}
ret = mkv_parse_video_color(st, track);
if (ret < 0)
return ret;
ret = mkv_parse_video_projection(st, track);
if (ret < 0)
return ret;
} else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) {
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_tag = fourcc;
st->codecpar->sample_rate = track->audio.out_samplerate;
st->codecpar->channels = track->audio.channels;
if (!st->codecpar->bits_per_coded_sample)
st->codecpar->bits_per_coded_sample = track->audio.bitdepth;
if (st->codecpar->codec_id == AV_CODEC_ID_MP3)
st->need_parsing = AVSTREAM_PARSE_FULL;
else if (st->codecpar->codec_id != AV_CODEC_ID_AAC)
st->need_parsing = AVSTREAM_PARSE_HEADERS;
if (track->codec_delay > 0) {
st->codecpar->initial_padding = av_rescale_q(track->codec_delay,
(AVRational){1, 1000000000},
(AVRational){1, st->codecpar->codec_id == AV_CODEC_ID_OPUS ?
48000 : st->codecpar->sample_rate});
}
if (track->seek_preroll > 0) {
st->codecpar->seek_preroll = av_rescale_q(track->seek_preroll,
(AVRational){1, 1000000000},
(AVRational){1, st->codecpar->sample_rate});
}
} else if (codec_id == AV_CODEC_ID_WEBVTT) {
st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
if (!strcmp(track->codec_id, "D_WEBVTT/CAPTIONS")) {
st->disposition |= AV_DISPOSITION_CAPTIONS;
} else if (!strcmp(track->codec_id, "D_WEBVTT/DESCRIPTIONS")) {
st->disposition |= AV_DISPOSITION_DESCRIPTIONS;
} else if (!strcmp(track->codec_id, "D_WEBVTT/METADATA")) {
st->disposition |= AV_DISPOSITION_METADATA;
}
} else if (track->type == MATROSKA_TRACK_TYPE_SUBTITLE) {
st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
if (st->codecpar->codec_id == AV_CODEC_ID_ASS)
matroska->contains_ssa = 1;
}
}
return 0;
}
| true | FFmpeg | e07649e618caedc07eaf2f4d09253de7f77d14f0 |
8,774 | static void vnc_display_close(VncDisplay *vd)
{
size_t i;
if (!vd) {
return;
}
vd->is_unix = false;
for (i = 0; i < vd->nlsock; i++) {
if (vd->lsock_tag[i]) {
g_source_remove(vd->lsock_tag[i]);
}
object_unref(OBJECT(vd->lsock[i]));
}
g_free(vd->lsock);
g_free(vd->lsock_tag);
vd->lsock = NULL;
vd->lsock_tag = NULL;
vd->nlsock = 0;
for (i = 0; i < vd->nlwebsock; i++) {
if (vd->lwebsock_tag[i]) {
g_source_remove(vd->lwebsock_tag[i]);
}
object_unref(OBJECT(vd->lwebsock[i]));
}
g_free(vd->lwebsock);
g_free(vd->lwebsock_tag);
vd->lwebsock = NULL;
vd->lwebsock_tag = NULL;
vd->nlwebsock = 0;
vd->auth = VNC_AUTH_INVALID;
vd->subauth = VNC_AUTH_INVALID;
if (vd->tlscreds) {
object_unparent(OBJECT(vd->tlscreds));
vd->tlscreds = NULL;
}
g_free(vd->tlsaclname);
vd->tlsaclname = NULL;
if (vd->lock_key_sync) {
qemu_remove_led_event_handler(vd->led);
}
} | true | qemu | 2dc120beb89b825033972db441ab540bcc42a17e |
8,775 | static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf)
{
SCSIRequest *req = &r->req;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
uint64_t nb_sectors;
int buflen = 0;
int ret;
switch (req->cmd.buf[0]) {
case TEST_UNIT_READY:
if (!bdrv_is_inserted(s->bs))
goto not_ready;
break;
case REQUEST_SENSE:
if (req->cmd.xfer < 4)
goto illegal_request;
memset(outbuf, 0, 4);
buflen = 4;
if (s->sense.key == NOT_READY && req->cmd.xfer >= 18) {
memset(outbuf, 0, 18);
buflen = 18;
outbuf[7] = 10;
/* asc 0x3a, ascq 0: Medium not present */
outbuf[12] = 0x3a;
outbuf[13] = 0;
}
outbuf[0] = 0xf0;
outbuf[1] = 0;
outbuf[2] = s->sense.key;
scsi_disk_clear_sense(s);
break;
case INQUIRY:
buflen = scsi_disk_emulate_inquiry(req, outbuf);
if (buflen < 0)
goto illegal_request;
break;
case MODE_SENSE:
case MODE_SENSE_10:
buflen = scsi_disk_emulate_mode_sense(req, outbuf);
if (buflen < 0)
goto illegal_request;
break;
case READ_TOC:
buflen = scsi_disk_emulate_read_toc(req, outbuf);
if (buflen < 0)
goto illegal_request;
break;
case RESERVE:
if (req->cmd.buf[1] & 1)
goto illegal_request;
break;
case RESERVE_10:
if (req->cmd.buf[1] & 3)
goto illegal_request;
break;
case RELEASE:
if (req->cmd.buf[1] & 1)
goto illegal_request;
break;
case RELEASE_10:
if (req->cmd.buf[1] & 3)
goto illegal_request;
break;
case START_STOP:
if (s->drive_kind == SCSI_CD && (req->cmd.buf[4] & 2)) {
/* load/eject medium */
bdrv_eject(s->bs, !(req->cmd.buf[4] & 1));
}
break;
case ALLOW_MEDIUM_REMOVAL:
bdrv_set_locked(s->bs, req->cmd.buf[4] & 1);
break;
case READ_CAPACITY:
/* The normal LEN field for this command is zero. */
memset(outbuf, 0, 8);
bdrv_get_geometry(s->bs, &nb_sectors);
if (!nb_sectors)
goto not_ready;
nb_sectors /= s->cluster_size;
/* Returned value is the address of the last sector. */
nb_sectors--;
/* Remember the new size for read/write sanity checking. */
s->max_lba = nb_sectors;
/* Clip to 2TB, instead of returning capacity modulo 2TB. */
if (nb_sectors > UINT32_MAX)
nb_sectors = UINT32_MAX;
outbuf[0] = (nb_sectors >> 24) & 0xff;
outbuf[1] = (nb_sectors >> 16) & 0xff;
outbuf[2] = (nb_sectors >> 8) & 0xff;
outbuf[3] = nb_sectors & 0xff;
outbuf[4] = 0;
outbuf[5] = 0;
outbuf[6] = s->cluster_size * 2;
outbuf[7] = 0;
buflen = 8;
break;
case SYNCHRONIZE_CACHE:
ret = bdrv_flush(s->bs);
if (ret < 0) {
if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_FLUSH)) {
return -1;
}
}
break;
case GET_CONFIGURATION:
memset(outbuf, 0, 8);
/* ??? This should probably return much more information. For now
just return the basic header indicating the CD-ROM profile. */
outbuf[7] = 8; // CD-ROM
buflen = 8;
break;
case SERVICE_ACTION_IN:
/* Service Action In subcommands. */
if ((req->cmd.buf[1] & 31) == 0x10) {
DPRINTF("SAI READ CAPACITY(16)\n");
memset(outbuf, 0, req->cmd.xfer);
bdrv_get_geometry(s->bs, &nb_sectors);
if (!nb_sectors)
goto not_ready;
nb_sectors /= s->cluster_size;
/* Returned value is the address of the last sector. */
nb_sectors--;
/* Remember the new size for read/write sanity checking. */
s->max_lba = nb_sectors;
outbuf[0] = (nb_sectors >> 56) & 0xff;
outbuf[1] = (nb_sectors >> 48) & 0xff;
outbuf[2] = (nb_sectors >> 40) & 0xff;
outbuf[3] = (nb_sectors >> 32) & 0xff;
outbuf[4] = (nb_sectors >> 24) & 0xff;
outbuf[5] = (nb_sectors >> 16) & 0xff;
outbuf[6] = (nb_sectors >> 8) & 0xff;
outbuf[7] = nb_sectors & 0xff;
outbuf[8] = 0;
outbuf[9] = 0;
outbuf[10] = s->cluster_size * 2;
outbuf[11] = 0;
outbuf[12] = 0;
outbuf[13] = get_physical_block_exp(&s->qdev.conf);
/* set TPE bit if the format supports discard */
if (s->qdev.conf.discard_granularity) {
outbuf[14] = 0x80;
}
/* Protection, exponent and lowest lba field left blank. */
buflen = req->cmd.xfer;
break;
}
DPRINTF("Unsupported Service Action In\n");
goto illegal_request;
case REPORT_LUNS:
if (req->cmd.xfer < 16)
goto illegal_request;
memset(outbuf, 0, 16);
outbuf[3] = 8;
buflen = 16;
break;
case VERIFY:
break;
case REZERO_UNIT:
DPRINTF("Rezero Unit\n");
if (!bdrv_is_inserted(s->bs)) {
goto not_ready;
}
break;
default:
goto illegal_request;
}
scsi_req_set_status(r, GOOD, NO_SENSE);
return buflen;
not_ready:
scsi_command_complete(r, CHECK_CONDITION, NOT_READY);
return -1;
illegal_request:
scsi_command_complete(r, CHECK_CONDITION, ILLEGAL_REQUEST);
return -1;
}
| true | qemu | a1f0cce2ac0243572ff72aa561da67fe3766a395 |
8,776 | static int xen_init(MachineState *ms)
{
xen_xc = xen_xc_interface_open(0, 0, 0);
if (xen_xc == XC_HANDLER_INITIAL_VALUE) {
xen_be_printf(NULL, 0, "can't open xen interface\n");
qemu_add_vm_change_state_handler(xen_change_state_handler, NULL);
global_state_set_optional();
savevm_skip_configuration();
savevm_skip_section_footers();
return 0;
| true | qemu | e0cb42ae4bc4438ba4ec0760df2d830b8759b255 |
8,777 | static void cpu_common_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cpu = CPU(dev);
if (dev->hotplugged) {
cpu_synchronize_post_init(cpu);
cpu_resume(cpu);
}
} | true | qemu | 2bfe11c8fac96db4f94abbe818fbc964a6744130 |
8,778 | uint8_t sd_read_data(SDState *sd)
{
/* TODO: Append CRCs */
uint8_t ret;
int io_len;
if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable)
return 0x00;
if (sd->state != sd_sendingdata_state) {
qemu_log_mask(LOG_GUEST_ERROR,
"sd_read_data: not in Sending-Data state\n");
return 0x00;
}
if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION))
return 0x00;
io_len = (sd->ocr & (1 << 30)) ? 512 : sd->blk_len;
switch (sd->current_cmd) {
case 6: /* CMD6: SWITCH_FUNCTION */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 64)
sd->state = sd_transfer_state;
break;
case 9: /* CMD9: SEND_CSD */
case 10: /* CMD10: SEND_CID */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 16)
sd->state = sd_transfer_state;
break;
case 11: /* CMD11: READ_DAT_UNTIL_STOP */
if (sd->data_offset == 0)
BLK_READ_BLOCK(sd->data_start, io_len);
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= io_len) {
sd->data_start += io_len;
sd->data_offset = 0;
if (sd->data_start + io_len > sd->size) {
sd->card_status |= ADDRESS_ERROR;
break;
}
}
break;
case 13: /* ACMD13: SD_STATUS */
ret = sd->sd_status[sd->data_offset ++];
if (sd->data_offset >= sizeof(sd->sd_status))
sd->state = sd_transfer_state;
break;
case 17: /* CMD17: READ_SINGLE_BLOCK */
if (sd->data_offset == 0)
BLK_READ_BLOCK(sd->data_start, io_len);
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= io_len)
sd->state = sd_transfer_state;
break;
case 18: /* CMD18: READ_MULTIPLE_BLOCK */
if (sd->data_offset == 0)
BLK_READ_BLOCK(sd->data_start, io_len);
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= io_len) {
sd->data_start += io_len;
sd->data_offset = 0;
if (sd->multi_blk_cnt != 0) {
if (--sd->multi_blk_cnt == 0) {
/* Stop! */
sd->state = sd_transfer_state;
break;
}
}
if (sd->data_start + io_len > sd->size) {
sd->card_status |= ADDRESS_ERROR;
break;
}
}
break;
case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 4)
sd->state = sd_transfer_state;
break;
case 30: /* CMD30: SEND_WRITE_PROT */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 4)
sd->state = sd_transfer_state;
break;
case 51: /* ACMD51: SEND_SCR */
ret = sd->scr[sd->data_offset ++];
if (sd->data_offset >= sizeof(sd->scr))
sd->state = sd_transfer_state;
break;
case 56: /* CMD56: GEN_CMD */
if (sd->data_offset == 0)
APP_READ_BLOCK(sd->data_start, sd->blk_len);
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= sd->blk_len)
sd->state = sd_transfer_state;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "sd_read_data: unknown command\n");
return 0x00;
}
return ret;
}
| true | qemu | 8573378e62d19e25a2434e23462ec99ef4d065ac |
8,779 | static av_always_inline int sbr_hf_apply_noise(int (*Y)[2],
const SoftFloat *s_m,
const SoftFloat *q_filt,
int noise,
int phi_sign0,
int phi_sign1,
int m_max)
{
int m;
for (m = 0; m < m_max; m++) {
int y0 = Y[m][0];
int y1 = Y[m][1];
noise = (noise + 1) & 0x1ff;
if (s_m[m].mant) {
int shift, round;
shift = 22 - s_m[m].exp;
if (shift < 1) {
av_log(NULL, AV_LOG_ERROR, "Overflow in sbr_hf_apply_noise, shift=%d\n", shift);
return AVERROR(ERANGE);
} else if (shift < 30) {
round = 1 << (shift-1);
y0 += (s_m[m].mant * phi_sign0 + round) >> shift;
y1 += (s_m[m].mant * phi_sign1 + round) >> shift;
}
} else {
int shift, round, tmp;
int64_t accu;
shift = 22 - q_filt[m].exp;
if (shift < 1) {
av_log(NULL, AV_LOG_ERROR, "Overflow in sbr_hf_apply_noise, shift=%d\n", shift);
return AVERROR(ERANGE);
} else if (shift < 30) {
round = 1 << (shift-1);
accu = (int64_t)q_filt[m].mant * ff_sbr_noise_table_fixed[noise][0];
tmp = (int)((accu + 0x40000000) >> 31);
y0 += (tmp + round) >> shift;
accu = (int64_t)q_filt[m].mant * ff_sbr_noise_table_fixed[noise][1];
tmp = (int)((accu + 0x40000000) >> 31);
y1 += (tmp + round) >> shift;
}
}
Y[m][0] = y0;
Y[m][1] = y1;
phi_sign1 = -phi_sign1;
}
return 0;
}
| true | FFmpeg | 2061de8a3f73f14806e5f6ccaf9a635f740a54e6 |
8,780 | int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl,
const H2645NAL *nal)
{
int i, j, ret = 0;
int first_slice = sl == h->slice_ctx && !h->current_slice;
ret = h264_slice_header_parse(h, sl, nal);
if (ret < 0)
return ret;
if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
if (h->current_slice) {
if (h->setup_finished) {
av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
return AVERROR_INVALIDDATA;
}
if (h->max_contexts > 1) {
if (!h->single_decode_warning) {
av_log(h->avctx, AV_LOG_WARNING, "Cannot decode multiple access units as slice threads\n");
h->single_decode_warning = 1;
}
h->max_contexts = 1;
return SLICE_SINGLETHREAD;
}
if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
ret = ff_h264_field_end(h, h->slice_ctx, 1);
h->current_slice = 0;
if (ret < 0)
return ret;
} else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == NAL_IDR_SLICE) {
av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
ret = ff_h264_field_end(h, h->slice_ctx, 1);
h->current_slice = 0;
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
h->cur_pic_ptr = NULL;
if (ret < 0)
return ret;
} else
return AVERROR_INVALIDDATA;
}
if (!h->first_field) {
if (h->cur_pic_ptr && !h->droppable) {
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
}
h->cur_pic_ptr = NULL;
}
}
if (!h->current_slice)
av_assert0(sl == h->slice_ctx);
if (h->current_slice == 0 && !h->first_field) {
if (
(h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
(h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
(h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
(h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
h->avctx->skip_frame >= AVDISCARD_ALL) {
return SLICE_SKIPED;
}
}
if (!first_slice) {
const PPS *pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
if (h->ps.pps->sps_id != pps->sps_id ||
h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
(h->setup_finished && h->ps.pps != pps)*/) {
av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
return AVERROR_INVALIDDATA;
}
if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) {
av_log(h->avctx, AV_LOG_ERROR,
"SPS changed in the middle of the frame\n");
return AVERROR_INVALIDDATA;
}
}
if (h->current_slice == 0) {
ret = h264_field_start(h, sl, nal, first_slice);
if (ret < 0)
return ret;
} else {
if (h->picture_structure != sl->picture_structure ||
h->droppable != (nal->ref_idc == 0)) {
av_log(h->avctx, AV_LOG_ERROR,
"Changing field mode (%d -> %d) between slices is not allowed\n",
h->picture_structure, sl->picture_structure);
return AVERROR_INVALIDDATA;
} else if (!h->cur_pic_ptr) {
av_log(h->avctx, AV_LOG_ERROR,
"unset cur_pic_ptr on slice %d\n",
h->current_slice + 1);
return AVERROR_INVALIDDATA;
}
}
av_assert1(h->mb_num == h->mb_width * h->mb_height);
if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
sl->first_mb_addr >= h->mb_num) {
av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
return AVERROR_INVALIDDATA;
}
sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
FIELD_OR_MBAFF_PICTURE(h);
if (h->picture_structure == PICT_BOTTOM_FIELD)
sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
av_assert1(sl->mb_y < h->mb_height);
if (!h->setup_finished) {
ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
h->nb_mmco = sl->nb_mmco;
h->explicit_ref_marking = sl->explicit_ref_marking;
}
ret = ff_h264_build_ref_list(h, sl);
if (ret < 0)
return ret;
if (h->ps.pps->weighted_bipred_idc == 2 &&
sl->slice_type_nos == AV_PICTURE_TYPE_B) {
implicit_weight_table(h, sl, -1);
if (FRAME_MBAFF(h)) {
implicit_weight_table(h, sl, 0);
implicit_weight_table(h, sl, 1);
}
}
if (sl->slice_type_nos == AV_PICTURE_TYPE_B && !sl->direct_spatial_mv_pred)
ff_h264_direct_dist_scale_factor(h, sl);
ff_h264_direct_ref_list_init(h, sl);
if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
(h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
h->nal_unit_type != NAL_IDR_SLICE) ||
(h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
(h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
(h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
nal->ref_idc == 0))
sl->deblocking_filter = 0;
if (sl->deblocking_filter == 1 && h->max_contexts > 1) {
if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
/* Cheat slightly for speed:
* Do not bother to deblock across slices. */
sl->deblocking_filter = 2;
} else {
h->postpone_filter = 1;
}
}
sl->qp_thresh = 15 -
FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) -
FFMAX3(0,
h->ps.pps->chroma_qp_index_offset[0],
h->ps.pps->chroma_qp_index_offset[1]) +
6 * (h->ps.sps->bit_depth_luma - 8);
sl->slice_num = ++h->current_slice;
if (sl->slice_num)
h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
&& h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
&& sl->slice_num >= MAX_SLICES) {
//in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
}
for (j = 0; j < 2; j++) {
int id_list[16];
int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
for (i = 0; i < 16; i++) {
id_list[i] = 60;
if (j < sl->list_count && i < sl->ref_count[j] &&
sl->ref_list[j][i].parent->f->buf[0]) {
int k;
AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
for (k = 0; k < h->short_ref_count; k++)
if (h->short_ref[k]->f->buf[0]->buffer == buf) {
id_list[i] = k;
break;
}
for (k = 0; k < h->long_ref_count; k++)
if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
id_list[i] = h->short_ref_count + k;
break;
}
}
}
ref2frm[0] =
ref2frm[1] = -1;
for (i = 0; i < 16; i++)
ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
ref2frm[18 + 0] =
ref2frm[18 + 1] = -1;
for (i = 16; i < 48; i++)
ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
(sl->ref_list[j][i].reference & 3);
}
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
av_log(h->avctx, AV_LOG_DEBUG,
"slice:%d %s mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
sl->slice_num,
(h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
sl->mb_y * h->mb_width + sl->mb_x,
av_get_picture_type_char(sl->slice_type),
sl->slice_type_fixed ? " fix" : "",
nal->type == NAL_IDR_SLICE ? " IDR" : "",
h->poc.frame_num,
h->cur_pic_ptr->field_poc[0],
h->cur_pic_ptr->field_poc[1],
sl->ref_count[0], sl->ref_count[1],
sl->qscale,
sl->deblocking_filter,
sl->slice_alpha_c0_offset, sl->slice_beta_offset,
sl->pwt.use_weight,
sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
}
return 0;
}
| false | FFmpeg | 8385e1718ee1fed107ec911d0b17fed3f7d74c71 |
8,781 | static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
const DXVA_PicParams_H264 *pp, unsigned position, unsigned size)
{
const H264Context *h = avctx->priv_data;
H264SliceContext *sl = &h->slice_ctx[0];
AVDXVAContext *ctx = avctx->hwaccel_context;
unsigned list;
memset(slice, 0, sizeof(*slice));
slice->BSNALunitDataLocation = position;
slice->SliceBytesInBuffer = size;
slice->wBadSliceChopping = 0;
slice->first_mb_in_slice = (sl->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + sl->mb_x;
slice->NumMbsForSlice = 0; /* XXX it is set once we have all slices */
slice->BitOffsetToSliceData = get_bits_count(&sl->gb) - 8;
slice->slice_type = ff_h264_get_slice_type(sl);
if (sl->slice_type_fixed)
slice->slice_type += 5;
slice->luma_log2_weight_denom = sl->pwt.luma_log2_weight_denom;
slice->chroma_log2_weight_denom = sl->pwt.chroma_log2_weight_denom;
if (sl->list_count > 0)
slice->num_ref_idx_l0_active_minus1 = sl->ref_count[0] - 1;
if (sl->list_count > 1)
slice->num_ref_idx_l1_active_minus1 = sl->ref_count[1] - 1;
slice->slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2;
slice->slice_beta_offset_div2 = sl->slice_beta_offset / 2;
slice->Reserved8Bits = 0;
for (list = 0; list < 2; list++) {
unsigned i;
for (i = 0; i < FF_ARRAY_ELEMS(slice->RefPicList[list]); i++) {
if (list < sl->list_count && i < sl->ref_count[list]) {
const H264Picture *r = sl->ref_list[list][i].parent;
unsigned plane;
unsigned index;
if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO)
index = ff_dxva2_get_surface_index(avctx, ctx, r->f);
else
index = get_refpic_index(pp, ff_dxva2_get_surface_index(avctx, ctx, r->f));
fill_picture_entry(&slice->RefPicList[list][i], index,
sl->ref_list[list][i].reference == PICT_BOTTOM_FIELD);
for (plane = 0; plane < 3; plane++) {
int w, o;
if (plane == 0 && sl->pwt.luma_weight_flag[list]) {
w = sl->pwt.luma_weight[i][list][0];
o = sl->pwt.luma_weight[i][list][1];
} else if (plane >= 1 && sl->pwt.chroma_weight_flag[list]) {
w = sl->pwt.chroma_weight[i][list][plane-1][0];
o = sl->pwt.chroma_weight[i][list][plane-1][1];
} else {
w = 1 << (plane == 0 ? sl->pwt.luma_log2_weight_denom :
sl->pwt.chroma_log2_weight_denom);
o = 0;
}
slice->Weights[list][i][plane][0] = w;
slice->Weights[list][i][plane][1] = o;
}
} else {
unsigned plane;
slice->RefPicList[list][i].bPicEntry = 0xff;
for (plane = 0; plane < 3; plane++) {
slice->Weights[list][i][plane][0] = 0;
slice->Weights[list][i][plane][1] = 0;
}
}
}
}
slice->slice_qs_delta = 0; /* XXX not implemented by FFmpeg */
slice->slice_qp_delta = sl->qscale - h->ps.pps->init_qp;
slice->redundant_pic_cnt = sl->redundant_pic_count;
if (sl->slice_type == AV_PICTURE_TYPE_B)
slice->direct_spatial_mv_pred_flag = sl->direct_spatial_mv_pred;
slice->cabac_init_idc = h->ps.pps->cabac ? sl->cabac_init_idc : 0;
if (sl->deblocking_filter < 2)
slice->disable_deblocking_filter_idc = 1 - sl->deblocking_filter;
else
slice->disable_deblocking_filter_idc = sl->deblocking_filter;
slice->slice_id = h->current_slice - 1;
}
| false | FFmpeg | ab28108a361196134704071b7b34c42fc7d747c7 |
8,782 | static int dvdsub_parse(AVCodecParserContext *s,
AVCodecContext *avctx,
const uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size)
{
DVDSubParseContext *pc = s->priv_data;
if (pc->packet_index == 0) {
if (buf_size < 2)
return buf_size;
pc->packet_len = AV_RB16(buf);
if (pc->packet_len == 0) /* HD-DVD subpicture packet */
pc->packet_len = AV_RB32(buf+2);
av_freep(&pc->packet);
pc->packet = av_malloc(pc->packet_len);
}
if (pc->packet) {
if (pc->packet_index + buf_size <= pc->packet_len) {
memcpy(pc->packet + pc->packet_index, buf, buf_size);
pc->packet_index += buf_size;
if (pc->packet_index >= pc->packet_len) {
*poutbuf = pc->packet;
*poutbuf_size = pc->packet_len;
pc->packet_index = 0;
return buf_size;
}
} else {
/* erroneous size */
pc->packet_index = 0;
}
}
*poutbuf = NULL;
*poutbuf_size = 0;
return buf_size;
}
| false | FFmpeg | 81c1657a593b1c0f8e46fca00ead1d30ee1cd418 |
8,783 | static int kmvc_decode_inter_8x8(KmvcContext * ctx, int w, int h)
{
BitBuf bb;
int res, val;
int i, j;
int bx, by;
int l0x, l1x, l0y, l1y;
int mx, my;
kmvc_init_getbits(bb, &ctx->g);
for (by = 0; by < h; by += 8)
for (bx = 0; bx < w; bx += 8) {
kmvc_getbit(bb, &ctx->g, res);
if (!res) {
kmvc_getbit(bb, &ctx->g, res);
if (!res) { // fill whole 8x8 block
if (!bytestream2_get_bytes_left(&ctx->g)) {
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
return AVERROR_INVALIDDATA;
}
val = bytestream2_get_byte(&ctx->g);
for (i = 0; i < 64; i++)
BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) = val;
} else { // copy block from previous frame
for (i = 0; i < 64; i++)
BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) =
BLK(ctx->prev, bx + (i & 0x7), by + (i >> 3));
}
} else { // handle four 4x4 subblocks
if (!bytestream2_get_bytes_left(&ctx->g)) {
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
return AVERROR_INVALIDDATA;
}
for (i = 0; i < 4; i++) {
l0x = bx + (i & 1) * 4;
l0y = by + (i & 2) * 2;
kmvc_getbit(bb, &ctx->g, res);
if (!res) {
kmvc_getbit(bb, &ctx->g, res);
if (!res) { // fill whole 4x4 block
val = bytestream2_get_byte(&ctx->g);
for (j = 0; j < 16; j++)
BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = val;
} else { // copy block
val = bytestream2_get_byte(&ctx->g);
mx = (val & 0xF) - 8;
my = (val >> 4) - 8;
if ((l0x+mx) + 320*(l0y+my) < 0 || (l0x+mx) + 320*(l0y+my) > 318*198) {
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
return AVERROR_INVALIDDATA;
}
for (j = 0; j < 16; j++)
BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) =
BLK(ctx->prev, l0x + (j & 3) + mx, l0y + (j >> 2) + my);
}
} else { // descend to 2x2 sub-sub-blocks
for (j = 0; j < 4; j++) {
l1x = l0x + (j & 1) * 2;
l1y = l0y + (j & 2);
kmvc_getbit(bb, &ctx->g, res);
if (!res) {
kmvc_getbit(bb, &ctx->g, res);
if (!res) { // fill whole 2x2 block
val = bytestream2_get_byte(&ctx->g);
BLK(ctx->cur, l1x, l1y) = val;
BLK(ctx->cur, l1x + 1, l1y) = val;
BLK(ctx->cur, l1x, l1y + 1) = val;
BLK(ctx->cur, l1x + 1, l1y + 1) = val;
} else { // copy block
val = bytestream2_get_byte(&ctx->g);
mx = (val & 0xF) - 8;
my = (val >> 4) - 8;
if ((l1x+mx) + 320*(l1y+my) < 0 || (l1x+mx) + 320*(l1y+my) > 318*198) {
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
return AVERROR_INVALIDDATA;
}
BLK(ctx->cur, l1x, l1y) = BLK(ctx->prev, l1x + mx, l1y + my);
BLK(ctx->cur, l1x + 1, l1y) =
BLK(ctx->prev, l1x + 1 + mx, l1y + my);
BLK(ctx->cur, l1x, l1y + 1) =
BLK(ctx->prev, l1x + mx, l1y + 1 + my);
BLK(ctx->cur, l1x + 1, l1y + 1) =
BLK(ctx->prev, l1x + 1 + mx, l1y + 1 + my);
}
} else { // read values for block
BLK(ctx->cur, l1x, l1y) = bytestream2_get_byte(&ctx->g);
BLK(ctx->cur, l1x + 1, l1y) = bytestream2_get_byte(&ctx->g);
BLK(ctx->cur, l1x, l1y + 1) = bytestream2_get_byte(&ctx->g);
BLK(ctx->cur, l1x + 1, l1y + 1) = bytestream2_get_byte(&ctx->g);
}
}
}
}
}
}
return 0;
}
| false | FFmpeg | 3cd8aaa2b2e78faf039691e1c31ff4f8d94e3bc6 |
8,784 | static inline int16_t g726_iterate(G726Context* c, int16_t I)
{
int dq, re_signal, pk0, fa1, i, tr, ylint, ylfrac, thr2, al, dq0;
Float11 f;
dq = inverse_quant(c, I);
if (I >> (c->tbls->bits - 1)) /* get the sign */
dq = -dq;
re_signal = c->se + dq;
/* Transition detect */
ylint = (c->yl >> 15);
ylfrac = (c->yl >> 10) & 0x1f;
thr2 = (ylint > 9) ? 0x1f << 10 : (0x20 + ylfrac) << ylint;
if (c->td == 1 && abs(dq) > ((thr2+(thr2>>1))>>1))
tr = 1;
else
tr = 0;
/* Update second order predictor coefficient A2 and A1 */
pk0 = (c->sez + dq) ? sgn(c->sez + dq) : 0;
dq0 = dq ? sgn(dq) : 0;
if (tr) {
c->a[0] = 0;
c->a[1] = 0;
for (i=0; i<6; i++)
c->b[i] = 0;
} else {
/* This is a bit crazy, but it really is +255 not +256 */
fa1 = clamp((-c->a[0]*c->pk[0]*pk0)>>5, -256, 255);
c->a[1] += 128*pk0*c->pk[1] + fa1 - (c->a[1]>>7);
c->a[1] = clamp(c->a[1], -12288, 12288);
c->a[0] += 64*3*pk0*c->pk[0] - (c->a[0] >> 8);
c->a[0] = clamp(c->a[0], -(15360 - c->a[1]), 15360 - c->a[1]);
for (i=0; i<6; i++)
c->b[i] += 128*dq0*sgn(-c->dq[i].sign) - (c->b[i]>>8);
}
/* Update Dq and Sr and Pk */
c->pk[1] = c->pk[0];
c->pk[0] = pk0 ? pk0 : 1;
c->sr[1] = c->sr[0];
i2f(re_signal, &c->sr[0]);
for (i=5; i>0; i--)
c->dq[i] = c->dq[i-1];
i2f(dq, &c->dq[0]);
c->dq[0].sign = I >> (c->tbls->bits - 1); /* Isn't it crazy ?!?! */
/* Update tone detect [I'm not sure 'tr == 0' is really needed] */
c->td = (tr == 0 && c->a[1] < -11776);
/* Update Ap */
c->dms += ((c->tbls->F[I]<<9) - c->dms) >> 5;
c->dml += ((c->tbls->F[I]<<11) - c->dml) >> 7;
if (tr)
c->ap = 256;
else if (c->y > 1535 && !c->td && (abs((c->dms << 2) - c->dml) < (c->dml >> 3)))
c->ap += (-c->ap) >> 4;
else
c->ap += (0x200 - c->ap) >> 4;
/* Update Yu and Yl */
c->yu = clamp(c->y + (((c->tbls->W[I] << 5) - c->y) >> 5), 544, 5120);
c->yl += c->yu + ((-c->yl)>>6);
/* Next iteration for Y */
al = (c->ap >= 256) ? 1<<6 : c->ap >> 2;
c->y = (c->yl + (c->yu - (c->yl>>6))*al) >> 6;
/* Next iteration for SE and SEZ */
c->se = 0;
for (i=0; i<6; i++)
c->se += mult(i2f(c->b[i] >> 2, &f), &c->dq[i]);
c->sez = c->se >> 1;
for (i=0; i<2; i++)
c->se += mult(i2f(c->a[i] >> 2, &f), &c->sr[i]);
c->se >>= 1;
return clamp(re_signal << 2, -0xffff, 0xffff);
}
| false | FFmpeg | eb5b0422b595d488f5c2f2a37a62cd46dfbb6aa7 |
8,785 | static int decrypt_init(AVFormatContext *s, ID3v2ExtraMeta *em, uint8_t *header)
{
OMAContext *oc = s->priv_data;
ID3v2ExtraMetaGEOB *geob = NULL;
uint8_t *gdata;
oc->encrypted = 1;
av_log(s, AV_LOG_INFO, "File is encrypted\n");
/* find GEOB metadata */
while (em) {
if (!strcmp(em->tag, "GEOB") &&
(geob = em->data) &&
(!strcmp(geob->description, "OMG_LSI") ||
!strcmp(geob->description, "OMG_BKLSI"))) {
break;
}
em = em->next;
}
if (!em) {
av_log(s, AV_LOG_ERROR, "No encryption header found\n");
return -1;
}
if (geob->datasize < 64) {
av_log(s, AV_LOG_ERROR, "Invalid GEOB data size: %u\n", geob->datasize);
return -1;
}
gdata = geob->data;
if (AV_RB16(gdata) != 1)
av_log(s, AV_LOG_WARNING, "Unknown version in encryption header\n");
oc->k_size = AV_RB16(&gdata[2]);
oc->e_size = AV_RB16(&gdata[4]);
oc->i_size = AV_RB16(&gdata[6]);
oc->s_size = AV_RB16(&gdata[8]);
if (memcmp(&gdata[OMA_ENC_HEADER_SIZE], "KEYRING ", 12)) {
av_log(s, AV_LOG_ERROR, "Invalid encryption header\n");
return -1;
}
if (oc->k_size + oc->e_size + oc->i_size > geob->datasize) {
av_log(s, AV_LOG_ERROR, "Too little GEOB data\n");
return AVERROR_INVALIDDATA;
}
oc->rid = AV_RB32(&gdata[OMA_ENC_HEADER_SIZE + 28]);
av_log(s, AV_LOG_DEBUG, "RID: %.8x\n", oc->rid);
memcpy(oc->iv, &header[0x58], 8);
hex_log(s, AV_LOG_DEBUG, "IV", oc->iv, 8);
hex_log(s, AV_LOG_DEBUG, "CBC-MAC", &gdata[OMA_ENC_HEADER_SIZE+oc->k_size+oc->e_size+oc->i_size], 8);
if (s->keylen > 0) {
kset(s, s->key, s->key, s->keylen);
}
if (!memcmp(oc->r_val, (const uint8_t[8]){0}, 8) ||
rprobe(s, gdata, oc->r_val) < 0 &&
nprobe(s, gdata, geob->datasize, oc->n_val) < 0) {
int i;
for (i = 0; i < FF_ARRAY_ELEMS(leaf_table); i += 2) {
uint8_t buf[16];
AV_WL64(buf, leaf_table[i]);
AV_WL64(&buf[8], leaf_table[i+1]);
kset(s, buf, buf, 16);
if (!rprobe(s, gdata, oc->r_val) || !nprobe(s, gdata, geob->datasize, oc->n_val))
break;
}
if (i >= FF_ARRAY_ELEMS(leaf_table)) {
av_log(s, AV_LOG_ERROR, "Invalid key\n");
return -1;
}
}
/* e_val */
av_des_init(&oc->av_des, oc->m_val, 64, 0);
av_des_crypt(&oc->av_des, oc->e_val, &gdata[OMA_ENC_HEADER_SIZE + 40], 1, NULL, 0);
hex_log(s, AV_LOG_DEBUG, "EK", oc->e_val, 8);
/* init e_val */
av_des_init(&oc->av_des, oc->e_val, 64, 1);
return 0;
}
| false | FFmpeg | 91e72e35141f590c38985ad0ae3453a4e9e86b8a |
8,786 | uint32_t avpriv_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id)
{
int i;
for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if ((codec_id == AV_CODEC_ID_NONE ||
avpriv_fmt_conversion_table[i].codec_id == codec_id) &&
(pix_fmt == AV_PIX_FMT_NONE ||
avpriv_fmt_conversion_table[i].ff_fmt == pix_fmt)) {
return avpriv_fmt_conversion_table[i].v4l2_fmt;
}
}
return 0;
}
| false | FFmpeg | 931da6a5e9dd54563fe5d4d30b7bd4d0a0218c87 |
8,787 | int avfilter_default_config_output_link(AVFilterLink *link)
{
if (link->src->input_count && link->src->inputs[0]) {
if (link->type == AVMEDIA_TYPE_VIDEO) {
link->w = link->src->inputs[0]->w;
link->h = link->src->inputs[0]->h;
link->time_base = link->src->inputs[0]->time_base;
} else if (link->type == AVMEDIA_TYPE_AUDIO) {
link->channel_layout = link->src->inputs[0]->channel_layout;
link->sample_rate = link->src->inputs[0]->sample_rate;
}
} else {
/* XXX: any non-simple filter which would cause this branch to be taken
* really should implement its own config_props() for this link. */
return -1;
}
return 0;
}
| false | FFmpeg | 5f68a91be242c05bdd5aeffd74b36a1e6c782dd5 |
8,788 | static int mov_seek_stream(AVStream *st, int64_t timestamp, int flags)
{
MOVStreamContext *sc = st->priv_data;
int sample, time_sample;
int i;
sample = av_index_search_timestamp(st, timestamp, flags);
dprintf(st->codec, "stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample);
if (sample < 0) /* not sure what to do */
return -1;
sc->current_sample = sample;
dprintf(st->codec, "stream %d, found sample %d\n", st->index, sc->current_sample);
/* adjust ctts index */
if (sc->ctts_data) {
time_sample = 0;
for (i = 0; i < sc->ctts_count; i++) {
time_sample += sc->ctts_data[i].count;
if (time_sample >= sc->current_sample) {
sc->sample_to_ctime_index = i;
sc->sample_to_ctime_sample = time_sample - sc->current_sample;
break;
}
}
}
return sample;
}
| false | FFmpeg | 54a5c7193be19ad781a1df539bd120c3d8987251 |
8,789 | static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
MPADecodeContext *s = avctx->priv_data;
uint32_t header;
int ret;
int skipped = 0;
while(buf_size && !*buf){
buf++;
buf_size--;
skipped++;
}
if (buf_size < HEADER_SIZE)
return AVERROR_INVALIDDATA;
header = AV_RB32(buf);
if (header>>8 == AV_RB32("TAG")>>8) {
av_log(avctx, AV_LOG_DEBUG, "discarding ID3 tag\n");
return buf_size;
}
ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Header missing\n");
return AVERROR_INVALIDDATA;
} else if (ret == 1) {
/* free format: prepare to compute frame size */
s->frame_size = -1;
return AVERROR_INVALIDDATA;
}
/* update codec info */
avctx->channels = s->nb_channels;
avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
if (!avctx->bit_rate)
avctx->bit_rate = s->bit_rate;
if (s->frame_size <= 0) {
av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
return AVERROR_INVALIDDATA;
} else if (s->frame_size < buf_size) {
av_log(avctx, AV_LOG_DEBUG, "incorrect frame size - multiple frames in buffer?\n");
buf_size= s->frame_size;
}
s->frame = data;
ret = mp_decode_frame(s, NULL, buf, buf_size);
if (ret >= 0) {
s->frame->nb_samples = avctx->frame_size;
*got_frame_ptr = 1;
avctx->sample_rate = s->sample_rate;
//FIXME maybe move the other codec info stuff from above here too
} else {
av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
/* Only return an error if the bad frame makes up the whole packet or
* the error is related to buffer management.
* If there is more data in the packet, just consume the bad frame
* instead of returning an error, which would discard the whole
* packet. */
*got_frame_ptr = 0;
if (buf_size == avpkt->size || ret != AVERROR_INVALIDDATA)
return ret;
}
s->frame_size = 0;
return buf_size + skipped;
}
| true | FFmpeg | 5d81616be332cca99304d0b747c2c8e2d719f349 |
8,790 | bool arp_table_search(Slirp *slirp, uint32_t ip_addr,
uint8_t out_ethaddr[ETH_ALEN])
{
const uint32_t broadcast_addr =
~slirp->vnetwork_mask.s_addr | slirp->vnetwork_addr.s_addr;
ArpTable *arptbl = &slirp->arp_table;
int i;
DEBUG_CALL("arp_table_search");
DEBUG_ARG("ip = 0x%x", ip_addr);
/* Check 0.0.0.0/8 invalid source-only addresses */
assert((ip_addr & htonl(~(0xf << 28))) != 0);
/* If broadcast address */
if (ip_addr == 0xffffffff || ip_addr == broadcast_addr) {
/* return Ethernet broadcast address */
memset(out_ethaddr, 0xff, ETH_ALEN);
return 1;
}
for (i = 0; i < ARP_TABLE_SIZE; i++) {
if (arptbl->table[i].ar_sip == ip_addr) {
memcpy(out_ethaddr, arptbl->table[i].ar_sha, ETH_ALEN);
DEBUG_ARGS((dfd, " found hw addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
out_ethaddr[0], out_ethaddr[1], out_ethaddr[2],
out_ethaddr[3], out_ethaddr[4], out_ethaddr[5]));
return 1;
}
}
return 0;
}
| true | qemu | ed6bc28e8a448b9005af50eed12893c5f7711c6e |
8,791 | void smc91c111_init(NICInfo *nd, uint32_t base, void *pic, int irq)
{
smc91c111_state *s;
int iomemtype;
s = (smc91c111_state *)qemu_mallocz(sizeof(smc91c111_state));
iomemtype = cpu_register_io_memory(0, smc91c111_readfn,
smc91c111_writefn, s);
cpu_register_physical_memory(base, 16, iomemtype);
s->base = base;
s->pic = pic;
s->irq = irq;
memcpy(s->macaddr, nd->macaddr, 6);
smc91c111_reset(s);
s->vc = qemu_new_vlan_client(nd->vlan, smc91c111_receive, s);
/* ??? Save/restore. */
}
| true | qemu | d861b05ea30e6ac177de9b679da96194ebe21afc |
8,792 | void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
int motion_x, int motion_y)
{
int cbpc, cbpy, pred_x, pred_y;
PutBitContext *const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
PutBitContext *const tex_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
PutBitContext *const dc_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
const int interleaved_stats = (s->flags & CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0;
if (!s->mb_intra) {
int i, cbp;
if (s->pict_type == AV_PICTURE_TYPE_B) {
/* convert from mv_dir to type */
static const int mb_type_table[8] = { -1, 3, 2, 1, -1, -1, -1, 0 };
int mb_type = mb_type_table[s->mv_dir];
if (s->mb_x == 0) {
for (i = 0; i < 2; i++)
s->last_mv[i][0][0] =
s->last_mv[i][0][1] =
s->last_mv[i][1][0] =
s->last_mv[i][1][1] = 0;
}
assert(s->dquant >= -2 && s->dquant <= 2);
assert((s->dquant & 1) == 0);
assert(mb_type >= 0);
/* nothing to do if this MB was skipped in the next P Frame */
if (s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { // FIXME avoid DCT & ...
s->skip_count++;
s->mv[0][0][0] =
s->mv[0][0][1] =
s->mv[1][0][0] =
s->mv[1][0][1] = 0;
s->mv_dir = MV_DIR_FORWARD; // doesn't matter
s->qscale -= s->dquant;
// s->mb_skipped = 1;
return;
}
cbp = get_b_cbp(s, block, motion_x, motion_y, mb_type);
if ((cbp | motion_x | motion_y | mb_type) == 0) {
/* direct MB with MV={0,0} */
assert(s->dquant == 0);
put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */
if (interleaved_stats) {
s->misc_bits++;
s->last_bits++;
}
s->skip_count++;
return;
}
put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */
put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ // FIXME merge
put_bits(&s->pb, mb_type + 1, 1); // this table is so simple that we don't need it :)
if (cbp)
put_bits(&s->pb, 6, cbp);
if (cbp && mb_type) {
if (s->dquant)
put_bits(&s->pb, 2, (s->dquant >> 2) + 3);
else
put_bits(&s->pb, 1, 0);
} else
s->qscale -= s->dquant;
if (!s->progressive_sequence) {
if (cbp)
put_bits(&s->pb, 1, s->interlaced_dct);
if (mb_type) // not direct mode
put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD);
}
if (interleaved_stats)
s->misc_bits += get_bits_diff(s);
if (!mb_type) {
assert(s->mv_dir & MV_DIRECT);
ff_h263_encode_motion_vector(s, motion_x, motion_y, 1);
s->b_count++;
s->f_count++;
} else {
assert(mb_type > 0 && mb_type < 4);
if (s->mv_type != MV_TYPE_FIELD) {
if (s->mv_dir & MV_DIR_FORWARD) {
ff_h263_encode_motion_vector(s,
s->mv[0][0][0] - s->last_mv[0][0][0],
s->mv[0][0][1] - s->last_mv[0][0][1],
s->f_code);
s->last_mv[0][0][0] =
s->last_mv[0][1][0] = s->mv[0][0][0];
s->last_mv[0][0][1] =
s->last_mv[0][1][1] = s->mv[0][0][1];
s->f_count++;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
ff_h263_encode_motion_vector(s,
s->mv[1][0][0] - s->last_mv[1][0][0],
s->mv[1][0][1] - s->last_mv[1][0][1],
s->b_code);
s->last_mv[1][0][0] =
s->last_mv[1][1][0] = s->mv[1][0][0];
s->last_mv[1][0][1] =
s->last_mv[1][1][1] = s->mv[1][0][1];
s->b_count++;
}
} else {
if (s->mv_dir & MV_DIR_FORWARD) {
put_bits(&s->pb, 1, s->field_select[0][0]);
put_bits(&s->pb, 1, s->field_select[0][1]);
}
if (s->mv_dir & MV_DIR_BACKWARD) {
put_bits(&s->pb, 1, s->field_select[1][0]);
put_bits(&s->pb, 1, s->field_select[1][1]);
}
if (s->mv_dir & MV_DIR_FORWARD) {
for (i = 0; i < 2; i++) {
ff_h263_encode_motion_vector(s,
s->mv[0][i][0] - s->last_mv[0][i][0],
s->mv[0][i][1] - s->last_mv[0][i][1] / 2,
s->f_code);
s->last_mv[0][i][0] = s->mv[0][i][0];
s->last_mv[0][i][1] = s->mv[0][i][1] * 2;
}
s->f_count++;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
for (i = 0; i < 2; i++) {
ff_h263_encode_motion_vector(s,
s->mv[1][i][0] - s->last_mv[1][i][0],
s->mv[1][i][1] - s->last_mv[1][i][1] / 2,
s->b_code);
s->last_mv[1][i][0] = s->mv[1][i][0];
s->last_mv[1][i][1] = s->mv[1][i][1] * 2;
}
s->b_count++;
}
}
}
if (interleaved_stats)
s->mv_bits += get_bits_diff(s);
mpeg4_encode_blocks(s, block, NULL, NULL, NULL, &s->pb);
if (interleaved_stats)
s->p_tex_bits += get_bits_diff(s);
} else { /* s->pict_type==AV_PICTURE_TYPE_B */
cbp = get_p_cbp(s, block, motion_x, motion_y);
if ((cbp | motion_x | motion_y | s->dquant) == 0 &&
s->mv_type == MV_TYPE_16X16) {
/* check if the B frames can skip it too, as we must skip it
* if we skip here why didn't they just compress
* the skip-mb bits instead of reusing them ?! */
if (s->max_b_frames > 0) {
int i;
int x, y, offset;
uint8_t *p_pic;
x = s->mb_x * 16;
y = s->mb_y * 16;
if (x + 16 > s->width)
x = s->width - 16;
if (y + 16 > s->height)
y = s->height - 16;
offset = x + y * s->linesize;
p_pic = s->new_picture.f.data[0] + offset;
s->mb_skipped = 1;
for (i = 0; i < s->max_b_frames; i++) {
uint8_t *b_pic;
int diff;
Picture *pic = s->reordered_input_picture[i + 1];
if (!pic || pic->f.pict_type != AV_PICTURE_TYPE_B)
break;
b_pic = pic->f.data[0] + offset;
if (!pic->shared)
b_pic += INPLACE_OFFSET;
diff = s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
if (diff > s->qscale * 70) { // FIXME check that 70 is optimal
s->mb_skipped = 0;
break;
}
}
} else
s->mb_skipped = 1;
if (s->mb_skipped == 1) {
/* skip macroblock */
put_bits(&s->pb, 1, 1);
if (interleaved_stats) {
s->misc_bits++;
s->last_bits++;
}
s->skip_count++;
return;
}
}
put_bits(&s->pb, 1, 0); /* mb coded */
cbpc = cbp & 3;
cbpy = cbp >> 2;
cbpy ^= 0xf;
if (s->mv_type == MV_TYPE_16X16) {
if (s->dquant)
cbpc += 8;
put_bits(&s->pb,
ff_h263_inter_MCBPC_bits[cbpc],
ff_h263_inter_MCBPC_code[cbpc]);
put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
if (s->dquant)
put_bits(pb2, 2, dquant_code[s->dquant + 2]);
if (!s->progressive_sequence) {
if (cbp)
put_bits(pb2, 1, s->interlaced_dct);
put_bits(pb2, 1, 0);
}
if (interleaved_stats)
s->misc_bits += get_bits_diff(s);
/* motion vectors: 16x16 mode */
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
ff_h263_encode_motion_vector(s,
motion_x - pred_x,
motion_y - pred_y,
s->f_code);
} else if (s->mv_type == MV_TYPE_FIELD) {
if (s->dquant)
cbpc += 8;
put_bits(&s->pb,
ff_h263_inter_MCBPC_bits[cbpc],
ff_h263_inter_MCBPC_code[cbpc]);
put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
if (s->dquant)
put_bits(pb2, 2, dquant_code[s->dquant + 2]);
assert(!s->progressive_sequence);
if (cbp)
put_bits(pb2, 1, s->interlaced_dct);
put_bits(pb2, 1, 1);
if (interleaved_stats)
s->misc_bits += get_bits_diff(s);
/* motion vectors: 16x8 interlaced mode */
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
pred_y /= 2;
put_bits(&s->pb, 1, s->field_select[0][0]);
put_bits(&s->pb, 1, s->field_select[0][1]);
ff_h263_encode_motion_vector(s,
s->mv[0][0][0] - pred_x,
s->mv[0][0][1] - pred_y,
s->f_code);
ff_h263_encode_motion_vector(s,
s->mv[0][1][0] - pred_x,
s->mv[0][1][1] - pred_y,
s->f_code);
} else {
assert(s->mv_type == MV_TYPE_8X8);
put_bits(&s->pb,
ff_h263_inter_MCBPC_bits[cbpc + 16],
ff_h263_inter_MCBPC_code[cbpc + 16]);
put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
if (!s->progressive_sequence && cbp)
put_bits(pb2, 1, s->interlaced_dct);
if (interleaved_stats)
s->misc_bits += get_bits_diff(s);
for (i = 0; i < 4; i++) {
/* motion vectors: 8x8 mode*/
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
ff_h263_encode_motion_vector(s,
s->current_picture.motion_val[0][s->block_index[i]][0] - pred_x,
s->current_picture.motion_val[0][s->block_index[i]][1] - pred_y,
s->f_code);
}
}
if (interleaved_stats)
s->mv_bits += get_bits_diff(s);
mpeg4_encode_blocks(s, block, NULL, NULL, NULL, tex_pb);
if (interleaved_stats)
s->p_tex_bits += get_bits_diff(s);
s->f_count++;
}
} else {
int cbp;
int dc_diff[6]; // dc values with the dc prediction subtracted
int dir[6]; // prediction direction
int zigzag_last_index[6];
uint8_t *scan_table[6];
int i;
for (i = 0; i < 6; i++)
dc_diff[i] = ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1);
if (s->flags & CODEC_FLAG_AC_PRED) {
s->ac_pred = decide_ac_pred(s, block, dir, scan_table, zigzag_last_index);
} else {
for (i = 0; i < 6; i++)
scan_table[i] = s->intra_scantable.permutated;
}
/* compute cbp */
cbp = 0;
for (i = 0; i < 6; i++)
if (s->block_last_index[i] >= 1)
cbp |= 1 << (5 - i);
cbpc = cbp & 3;
if (s->pict_type == AV_PICTURE_TYPE_I) {
if (s->dquant)
cbpc += 4;
put_bits(&s->pb,
ff_h263_intra_MCBPC_bits[cbpc],
ff_h263_intra_MCBPC_code[cbpc]);
} else {
if (s->dquant)
cbpc += 8;
put_bits(&s->pb, 1, 0); /* mb coded */
put_bits(&s->pb,
ff_h263_inter_MCBPC_bits[cbpc + 4],
ff_h263_inter_MCBPC_code[cbpc + 4]);
}
put_bits(pb2, 1, s->ac_pred);
cbpy = cbp >> 2;
put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
if (s->dquant)
put_bits(dc_pb, 2, dquant_code[s->dquant + 2]);
if (!s->progressive_sequence)
put_bits(dc_pb, 1, s->interlaced_dct);
if (interleaved_stats)
s->misc_bits += get_bits_diff(s);
mpeg4_encode_blocks(s, block, dc_diff, scan_table, dc_pb, tex_pb);
if (interleaved_stats)
s->i_tex_bits += get_bits_diff(s);
s->i_count++;
/* restore ac coeffs & last_index stuff
* if we messed them up with the prediction */
if (s->ac_pred)
restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index);
}
}
| true | FFmpeg | f6774f905fb3cfdc319523ac640be30b14c1bc55 |
8,795 | static void write_section_data(MpegTSContext *ts, MpegTSFilter *tss1,
const uint8_t *buf, int buf_size, int is_start)
{
MpegTSSectionFilter *tss = &tss1->u.section_filter;
int len;
if (is_start) {
memcpy(tss->section_buf, buf, buf_size);
tss->section_index = buf_size;
tss->section_h_size = -1;
tss->end_of_section_reached = 0;
} else {
if (tss->end_of_section_reached)
return;
len = 4096 - tss->section_index;
if (buf_size < len)
len = buf_size;
memcpy(tss->section_buf + tss->section_index, buf, len);
tss->section_index += len;
}
/* compute section length if possible */
if (tss->section_h_size == -1 && tss->section_index >= 3) {
len = (AV_RB16(tss->section_buf + 1) & 0xfff) + 3;
if (len > 4096)
return;
tss->section_h_size = len;
}
if (tss->section_h_size != -1 &&
tss->section_index >= tss->section_h_size) {
int crc_valid = 1;
tss->end_of_section_reached = 1;
if (tss->check_crc) {
crc_valid = !av_crc(av_crc_get_table(AV_CRC_32_IEEE), -1, tss->section_buf, tss->section_h_size);
if (crc_valid) {
ts->crc_validity[ tss1->pid ] = 100;
}else if (ts->crc_validity[ tss1->pid ] > -10) {
ts->crc_validity[ tss1->pid ]--;
}else
crc_valid = 2;
}
if (crc_valid)
tss->section_cb(tss1, tss->section_buf, tss->section_h_size);
}
}
| true | FFmpeg | 4b6be54bed27eb7fc8f005505ff38e71b3c86cec |
8,796 | av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
int i, ret, format_supported;
mpv_encode_defaults(s);
switch (avctx->codec_id) {
case AV_CODEC_ID_MPEG2VIDEO:
if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
av_log(avctx, AV_LOG_ERROR,
"only YUV420 and YUV422 are supported\n");
return -1;
}
break;
case AV_CODEC_ID_MJPEG:
format_supported = 0;
/* JPEG color space */
if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
(avctx->color_range == AVCOL_RANGE_JPEG &&
(avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
avctx->pix_fmt == AV_PIX_FMT_YUV422P)))
format_supported = 1;
/* MPEG color space */
else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
(avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
avctx->pix_fmt == AV_PIX_FMT_YUV422P))
format_supported = 1;
if (!format_supported) {
av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
return -1;
}
break;
default:
if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
return -1;
}
}
switch (avctx->pix_fmt) {
case AV_PIX_FMT_YUVJ422P:
case AV_PIX_FMT_YUV422P:
s->chroma_format = CHROMA_422;
break;
case AV_PIX_FMT_YUVJ420P:
case AV_PIX_FMT_YUV420P:
default:
s->chroma_format = CHROMA_420;
break;
}
s->bit_rate = avctx->bit_rate;
s->width = avctx->width;
s->height = avctx->height;
if (avctx->gop_size > 600 &&
avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
av_log(avctx, AV_LOG_ERROR,
"Warning keyframe interval too large! reducing it ...\n");
avctx->gop_size = 600;
}
s->gop_size = avctx->gop_size;
s->avctx = avctx;
if (avctx->max_b_frames > MAX_B_FRAMES) {
av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
"is %d.\n", MAX_B_FRAMES);
}
s->max_b_frames = avctx->max_b_frames;
s->codec_id = avctx->codec->id;
s->strict_std_compliance = avctx->strict_std_compliance;
s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
s->mpeg_quant = avctx->mpeg_quant;
s->rtp_mode = !!avctx->rtp_payload_size;
s->intra_dc_precision = avctx->intra_dc_precision;
s->user_specified_pts = AV_NOPTS_VALUE;
if (s->gop_size <= 1) {
s->intra_only = 1;
s->gop_size = 12;
} else {
s->intra_only = 0;
}
#if FF_API_MOTION_EST
FF_DISABLE_DEPRECATION_WARNINGS
s->me_method = avctx->me_method;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
/* Fixed QSCALE */
s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
#if FF_API_MPV_OPT
FF_DISABLE_DEPRECATION_WARNINGS
if (avctx->border_masking != 0.0)
s->border_masking = avctx->border_masking;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
s->adaptive_quant = (s->avctx->lumi_masking ||
s->avctx->dark_masking ||
s->avctx->temporal_cplx_masking ||
s->avctx->spatial_cplx_masking ||
s->avctx->p_masking ||
s->border_masking ||
(s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
!s->fixed_qscale;
s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
av_log(avctx, AV_LOG_ERROR,
"a vbv buffer size is needed, "
"for encoding with a maximum bitrate\n");
return -1;
}
if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
av_log(avctx, AV_LOG_INFO,
"Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
}
if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
return -1;
}
if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
return -1;
}
if (avctx->rc_max_rate &&
avctx->rc_max_rate == avctx->bit_rate &&
avctx->rc_max_rate != avctx->rc_min_rate) {
av_log(avctx, AV_LOG_INFO,
"impossible bitrate constraints, this will fail\n");
}
if (avctx->rc_buffer_size &&
avctx->bit_rate * (int64_t)avctx->time_base.num >
avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
return -1;
}
if (!s->fixed_qscale &&
avctx->bit_rate * av_q2d(avctx->time_base) >
avctx->bit_rate_tolerance) {
av_log(avctx, AV_LOG_ERROR,
"bitrate tolerance too small for bitrate\n");
return -1;
}
if (s->avctx->rc_max_rate &&
s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
(s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
90000LL * (avctx->rc_buffer_size - 1) >
s->avctx->rc_max_rate * 0xFFFFLL) {
av_log(avctx, AV_LOG_INFO,
"Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
"specified vbv buffer is too large for the given bitrate!\n");
}
if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
s->codec_id != AV_CODEC_ID_FLV1) {
av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
return -1;
}
if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
av_log(avctx, AV_LOG_ERROR,
"OBMC is only supported with simple mb decision\n");
return -1;
}
if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
return -1;
}
if (s->max_b_frames &&
s->codec_id != AV_CODEC_ID_MPEG4 &&
s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
return -1;
}
if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
s->codec_id == AV_CODEC_ID_H263 ||
s->codec_id == AV_CODEC_ID_H263P) &&
(avctx->sample_aspect_ratio.num > 255 ||
avctx->sample_aspect_ratio.den > 255)) {
av_log(avctx, AV_LOG_ERROR,
"Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
return -1;
}
if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
return -1;
}
// FIXME mpeg2 uses that too
if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
av_log(avctx, AV_LOG_ERROR,
"mpeg2 style quantization not supported by codec\n");
return -1;
}
if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
return -1;
}
if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
s->avctx->mb_decision != FF_MB_DECISION_RD) {
av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
return -1;
}
if (s->avctx->scenechange_threshold < 1000000000 &&
(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
av_log(avctx, AV_LOG_ERROR,
"closed gop with scene change detection are not supported yet, "
"set threshold to 1000000000\n");
return -1;
}
if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR,
"low delay forcing is only available for mpeg2\n");
return -1;
}
if (s->max_b_frames != 0) {
av_log(avctx, AV_LOG_ERROR,
"b frames cannot be used with low delay\n");
return -1;
}
}
if (s->q_scale_type == 1) {
if (avctx->qmax > 12) {
av_log(avctx, AV_LOG_ERROR,
"non linear quant only supports qmax <= 12 currently\n");
return -1;
}
}
if (avctx->slices > 1 &&
(avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
return AVERROR(EINVAL);
}
if (s->avctx->thread_count > 1 &&
s->codec_id != AV_CODEC_ID_MPEG4 &&
s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
(s->codec_id != AV_CODEC_ID_H263P)) {
av_log(avctx, AV_LOG_ERROR,
"multi threaded encoding not supported by codec\n");
return -1;
}
if (s->avctx->thread_count < 1) {
av_log(avctx, AV_LOG_ERROR,
"automatic thread number detection not supported by codec,"
"patch welcome\n");
return -1;
}
if (s->avctx->thread_count > 1)
s->rtp_mode = 1;
if (!avctx->time_base.den || !avctx->time_base.num) {
av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
return -1;
}
if (avctx->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
av_log(avctx, AV_LOG_INFO,
"notice: b_frame_strategy only affects the first pass\n");
avctx->b_frame_strategy = 0;
}
i = av_gcd(avctx->time_base.den, avctx->time_base.num);
if (i > 1) {
av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
avctx->time_base.den /= i;
avctx->time_base.num /= i;
//return -1;
}
if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
// (a + x * 3 / 8) / x
s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
s->inter_quant_bias = 0;
} else {
s->intra_quant_bias = 0;
// (a - x / 4) / x
s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
}
#if FF_API_QUANT_BIAS
FF_DISABLE_DEPRECATION_WARNINGS
if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
s->intra_quant_bias = avctx->intra_quant_bias;
if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
s->inter_quant_bias = avctx->inter_quant_bias;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
s->avctx->time_base.den > (1 << 16) - 1) {
av_log(avctx, AV_LOG_ERROR,
"timebase %d/%d not supported by MPEG 4 standard, "
"the maximum admitted value for the timebase denominator "
"is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
(1 << 16) - 1);
return -1;
}
s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
switch (avctx->codec->id) {
case AV_CODEC_ID_MPEG1VIDEO:
s->out_format = FMT_MPEG1;
s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
break;
case AV_CODEC_ID_MPEG2VIDEO:
s->out_format = FMT_MPEG1;
s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
s->rtp_mode = 1;
break;
case AV_CODEC_ID_MJPEG:
s->out_format = FMT_MJPEG;
s->intra_only = 1; /* force intra only for jpeg */
if (!CONFIG_MJPEG_ENCODER ||
ff_mjpeg_encode_init(s) < 0)
return -1;
avctx->delay = 0;
s->low_delay = 1;
break;
case AV_CODEC_ID_H261:
if (!CONFIG_H261_ENCODER)
return -1;
if (ff_h261_get_picture_format(s->width, s->height) < 0) {
av_log(avctx, AV_LOG_ERROR,
"The specified picture size of %dx%d is not valid for the "
"H.261 codec.\nValid sizes are 176x144, 352x288\n",
s->width, s->height);
return -1;
}
s->out_format = FMT_H261;
avctx->delay = 0;
s->low_delay = 1;
s->rtp_mode = 0; /* Sliced encoding not supported */
break;
case AV_CODEC_ID_H263:
if (!CONFIG_H263_ENCODER)
return -1;
if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
s->width, s->height) == 8) {
av_log(avctx, AV_LOG_INFO,
"The specified picture size of %dx%d is not valid for "
"the H.263 codec.\nValid sizes are 128x96, 176x144, "
"352x288, 704x576, and 1408x1152."
"Try H.263+.\n", s->width, s->height);
return -1;
}
s->out_format = FMT_H263;
avctx->delay = 0;
s->low_delay = 1;
break;
case AV_CODEC_ID_H263P:
s->out_format = FMT_H263;
s->h263_plus = 1;
/* Fx */
s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
s->modified_quant = s->h263_aic;
s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
/* /Fx */
/* These are just to be sure */
avctx->delay = 0;
s->low_delay = 1;
break;
case AV_CODEC_ID_FLV1:
s->out_format = FMT_H263;
s->h263_flv = 2; /* format = 1; 11-bit codes */
s->unrestricted_mv = 1;
s->rtp_mode = 0; /* don't allow GOB */
avctx->delay = 0;
s->low_delay = 1;
break;
case AV_CODEC_ID_RV10:
s->out_format = FMT_H263;
avctx->delay = 0;
s->low_delay = 1;
break;
case AV_CODEC_ID_RV20:
s->out_format = FMT_H263;
avctx->delay = 0;
s->low_delay = 1;
s->modified_quant = 1;
s->h263_aic = 1;
s->h263_plus = 1;
s->loop_filter = 1;
s->unrestricted_mv = 0;
break;
case AV_CODEC_ID_MPEG4:
s->out_format = FMT_H263;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->low_delay = s->max_b_frames ? 0 : 1;
avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
break;
case AV_CODEC_ID_MSMPEG4V2:
s->out_format = FMT_H263;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version = 2;
avctx->delay = 0;
s->low_delay = 1;
break;
case AV_CODEC_ID_MSMPEG4V3:
s->out_format = FMT_H263;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version = 3;
s->flipflop_rounding = 1;
avctx->delay = 0;
s->low_delay = 1;
break;
case AV_CODEC_ID_WMV1:
s->out_format = FMT_H263;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version = 4;
s->flipflop_rounding = 1;
avctx->delay = 0;
s->low_delay = 1;
break;
case AV_CODEC_ID_WMV2:
s->out_format = FMT_H263;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version = 5;
s->flipflop_rounding = 1;
avctx->delay = 0;
s->low_delay = 1;
break;
default:
return -1;
}
avctx->has_b_frames = !s->low_delay;
s->encoding = 1;
s->progressive_frame =
s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
AV_CODEC_FLAG_INTERLACED_ME) ||
s->alternate_scan);
/* init */
ff_mpv_idct_init(s);
if (ff_mpv_common_init(s) < 0)
return -1;
if (ARCH_X86)
ff_mpv_encode_init_x86(s);
ff_fdctdsp_init(&s->fdsp, avctx);
ff_me_cmp_init(&s->mecc, avctx);
ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
ff_pixblockdsp_init(&s->pdsp, avctx);
ff_qpeldsp_init(&s->qdsp);
if (s->msmpeg4_version) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
2 * 2 * (MAX_LEVEL + 1) *
(MAX_RUN + 1) * 2 * sizeof(int), fail);
}
FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
MAX_PICTURE_COUNT * sizeof(Picture *), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
MAX_PICTURE_COUNT * sizeof(Picture *), fail);
if (s->avctx->noise_reduction) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
2 * 64 * sizeof(uint16_t), fail);
}
if (CONFIG_H263_ENCODER)
ff_h263dsp_init(&s->h263dsp);
if (!s->dct_quantize)
s->dct_quantize = ff_dct_quantize_c;
if (!s->denoise_dct)
s->denoise_dct = denoise_dct_c;
s->fast_dct_quantize = s->dct_quantize;
if (avctx->trellis)
s->dct_quantize = dct_quantize_trellis_c;
if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
s->chroma_qscale_table = ff_h263_chroma_qscale_table;
s->quant_precision = 5;
ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
ff_h261_encode_init(s);
if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
ff_h263_encode_init(s);
if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
if ((ret = ff_msmpeg4_encode_init(s)) < 0)
return ret;
if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
&& s->out_format == FMT_MPEG1)
ff_mpeg1_encode_init(s);
/* init q matrix */
for (i = 0; i < 64; i++) {
int j = s->idsp.idct_permutation[i];
if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
s->mpeg_quant) {
s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
} else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
s->intra_matrix[j] =
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
} else {
/* mpeg1/2 */
s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
}
if (s->avctx->intra_matrix)
s->intra_matrix[j] = s->avctx->intra_matrix[i];
if (s->avctx->inter_matrix)
s->inter_matrix[j] = s->avctx->inter_matrix[i];
}
/* precompute matrix */
/* for mjpeg, we do include qscale in the matrix */
if (s->out_format != FMT_MJPEG) {
ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
s->intra_matrix, s->intra_quant_bias, avctx->qmin,
31, 1);
ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
s->inter_matrix, s->inter_quant_bias, avctx->qmin,
31, 0);
}
if (ff_rate_control_init(s) < 0)
return -1;
#if FF_API_ERROR_RATE
FF_DISABLE_DEPRECATION_WARNINGS
if (avctx->error_rate)
s->error_rate = avctx->error_rate;
FF_ENABLE_DEPRECATION_WARNINGS;
#endif
#if FF_API_NORMALIZE_AQP
FF_DISABLE_DEPRECATION_WARNINGS
if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
s->mpv_flags |= FF_MPV_FLAG_NAQ;
FF_ENABLE_DEPRECATION_WARNINGS;
#endif
#if FF_API_MV0
FF_DISABLE_DEPRECATION_WARNINGS
if (avctx->flags & CODEC_FLAG_MV0)
s->mpv_flags |= FF_MPV_FLAG_MV0;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
#if FF_API_MPV_OPT
FF_DISABLE_DEPRECATION_WARNINGS
if (avctx->rc_qsquish != 0.0)
s->rc_qsquish = avctx->rc_qsquish;
if (avctx->rc_qmod_amp != 0.0)
s->rc_qmod_amp = avctx->rc_qmod_amp;
if (avctx->rc_qmod_freq)
s->rc_qmod_freq = avctx->rc_qmod_freq;
if (avctx->rc_buffer_aggressivity != 1.0)
s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
if (avctx->rc_initial_cplx != 0.0)
s->rc_initial_cplx = avctx->rc_initial_cplx;
if (avctx->lmin)
s->lmin = avctx->lmin;
if (avctx->lmax)
s->lmax = avctx->lmax;
if (avctx->rc_eq) {
av_freep(&s->rc_eq);
s->rc_eq = av_strdup(avctx->rc_eq);
if (!s->rc_eq)
return AVERROR(ENOMEM);
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (avctx->b_frame_strategy == 2) {
for (i = 0; i < s->max_b_frames + 2; i++) {
s->tmp_frames[i] = av_frame_alloc();
if (!s->tmp_frames[i])
return AVERROR(ENOMEM);
s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
ret = av_frame_get_buffer(s->tmp_frames[i], 32);
if (ret < 0)
return ret;
}
}
return 0;
fail:
ff_mpv_encode_end(avctx);
return AVERROR_UNKNOWN;
}
| true | FFmpeg | f6c94457b44f41d900cd0991857f54e1f0ccedd6 |
8,797 | static int sbr_hf_gen(AACContext *ac, SpectralBandReplication *sbr,
float X_high[64][40][2], const float X_low[32][40][2],
const float (*alpha0)[2], const float (*alpha1)[2],
const float bw_array[5], const uint8_t *t_env,
int bs_num_env)
{
int i, j, x;
int g = 0;
int k = sbr->kx[1];
for (j = 0; j < sbr->num_patches; j++) {
for (x = 0; x < sbr->patch_num_subbands[j]; x++, k++) {
float alpha[4];
const int p = sbr->patch_start_subband[j] + x;
while (g <= sbr->n_q && k >= sbr->f_tablenoise[g])
g++;
g--;
if (g < 0) {
av_log(ac->avctx, AV_LOG_ERROR,
"ERROR : no subband found for frequency %d\n", k);
return -1;
}
alpha[0] = alpha1[p][0] * bw_array[g] * bw_array[g];
alpha[1] = alpha1[p][1] * bw_array[g] * bw_array[g];
alpha[2] = alpha0[p][0] * bw_array[g];
alpha[3] = alpha0[p][1] * bw_array[g];
for (i = 2 * t_env[0]; i < 2 * t_env[bs_num_env]; i++) {
const int idx = i + ENVELOPE_ADJUSTMENT_OFFSET;
X_high[k][idx][0] =
X_low[p][idx - 2][0] * alpha[0] -
X_low[p][idx - 2][1] * alpha[1] +
X_low[p][idx - 1][0] * alpha[2] -
X_low[p][idx - 1][1] * alpha[3] +
X_low[p][idx][0];
X_high[k][idx][1] =
X_low[p][idx - 2][1] * alpha[0] +
X_low[p][idx - 2][0] * alpha[1] +
X_low[p][idx - 1][1] * alpha[2] +
X_low[p][idx - 1][0] * alpha[3] +
X_low[p][idx][1];
}
}
}
if (k < sbr->m[1] + sbr->kx[1])
memset(X_high + k, 0, (sbr->m[1] + sbr->kx[1] - k) * sizeof(*X_high));
return 0;
}
| false | FFmpeg | aac46e088d67a390489af686b846dea4987d8ffb |
8,798 | static int find_image_range(int *pfirst_index, int *plast_index,
const char *path)
{
char buf[1024];
int range, last_index, range1, first_index;
/* find the first image */
for(first_index = 0; first_index < 5; first_index++) {
if (av_get_frame_filename(buf, sizeof(buf), path, first_index) < 0){
*pfirst_index =
*plast_index = 1;
if (avio_check(buf, AVIO_FLAG_READ) > 0)
return 0;
return -1;
}
if (avio_check(buf, AVIO_FLAG_READ) > 0)
break;
}
if (first_index == 5)
goto fail;
/* find the last image */
last_index = first_index;
for(;;) {
range = 0;
for(;;) {
if (!range)
range1 = 1;
else
range1 = 2 * range;
if (av_get_frame_filename(buf, sizeof(buf), path,
last_index + range1) < 0)
goto fail;
if (avio_check(buf, AVIO_FLAG_READ) <= 0)
break;
range = range1;
/* just in case... */
if (range >= (1 << 30))
goto fail;
}
/* we are sure than image last_index + range exists */
if (!range)
break;
last_index += range;
}
*pfirst_index = first_index;
*plast_index = last_index;
return 0;
fail:
return -1;
}
| false | FFmpeg | 98b3f17bd7182f4fbf318e09068bc19457a82d69 |
8,799 | av_cold void ff_MPV_common_init_x86(MpegEncContext *s)
{
#if HAVE_INLINE_ASM
int cpu_flags = av_get_cpu_flags();
if (cpu_flags & AV_CPU_FLAG_MMX) {
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_mmx;
s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_mmx;
s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_mmx;
if(!(s->flags & CODEC_FLAG_BITEXACT))
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx;
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx;
if (cpu_flags & AV_CPU_FLAG_SSE2) {
s->denoise_dct= denoise_dct_sse2;
} else {
s->denoise_dct= denoise_dct_mmx;
}
}
#endif /* HAVE_INLINE_ASM */
}
| false | FFmpeg | 6369ba3c9cc74becfaad2a8882dff3dd3e7ae3c0 |
8,800 | static int mjpegb_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
MJpegDecodeContext *s = avctx->priv_data;
uint8_t *buf_end, *buf_ptr;
AVFrame *picture = data;
GetBitContext hgb; /* for the header */
uint32_t dqt_offs, dht_offs, sof_offs, sos_offs, second_field_offs;
uint32_t field_size, sod_offs;
/* no supplementary picture */
if (buf_size == 0)
return 0;
buf_ptr = buf;
buf_end = buf + buf_size;
read_header:
/* reset on every SOI */
s->restart_interval = 0;
s->mjpb_skiptosod = 0;
init_get_bits(&hgb, buf_ptr, /*buf_size*/(buf_end - buf_ptr)*8);
skip_bits(&hgb, 32); /* reserved zeros */
if (get_bits_long(&hgb, 32) != be2me_32(ff_get_fourcc("mjpg")))
{
dprintf("not mjpeg-b (bad fourcc)\n");
return 0;
}
field_size = get_bits_long(&hgb, 32); /* field size */
dprintf("field size: 0x%x\n", field_size);
skip_bits(&hgb, 32); /* padded field size */
second_field_offs = get_bits_long(&hgb, 32);
dprintf("second field offs: 0x%x\n", second_field_offs);
if (second_field_offs)
s->interlaced = 1;
dqt_offs = get_bits_long(&hgb, 32);
dprintf("dqt offs: 0x%x\n", dqt_offs);
if (dqt_offs)
{
init_get_bits(&s->gb, buf+dqt_offs, (buf_end - (buf+dqt_offs))*8);
s->start_code = DQT;
mjpeg_decode_dqt(s);
}
dht_offs = get_bits_long(&hgb, 32);
dprintf("dht offs: 0x%x\n", dht_offs);
if (dht_offs)
{
init_get_bits(&s->gb, buf+dht_offs, (buf_end - (buf+dht_offs))*8);
s->start_code = DHT;
mjpeg_decode_dht(s);
}
sof_offs = get_bits_long(&hgb, 32);
dprintf("sof offs: 0x%x\n", sof_offs);
if (sof_offs)
{
init_get_bits(&s->gb, buf+sof_offs, (buf_end - (buf+sof_offs))*8);
s->start_code = SOF0;
if (mjpeg_decode_sof(s) < 0)
return -1;
}
sos_offs = get_bits_long(&hgb, 32);
dprintf("sos offs: 0x%x\n", sos_offs);
sod_offs = get_bits_long(&hgb, 32);
dprintf("sod offs: 0x%x\n", sod_offs);
if (sos_offs)
{
// init_get_bits(&s->gb, buf+sos_offs, (buf_end - (buf+sos_offs))*8);
init_get_bits(&s->gb, buf+sos_offs, field_size*8);
s->mjpb_skiptosod = (sod_offs - sos_offs - show_bits(&s->gb, 16));
s->start_code = SOS;
mjpeg_decode_sos(s);
}
if (s->interlaced) {
s->bottom_field ^= 1;
/* if not bottom field, do not output image yet */
if (s->bottom_field && second_field_offs)
{
buf_ptr = buf + second_field_offs;
second_field_offs = 0;
goto read_header;
}
}
//XXX FIXME factorize, this looks very similar to the EOI code
*picture= s->picture;
*data_size = sizeof(AVFrame);
if(!s->lossless){
picture->quality= FFMAX(FFMAX(s->qscale[0], s->qscale[1]), s->qscale[2]);
picture->qstride= 0;
picture->qscale_table= s->qscale_table;
memset(picture->qscale_table, picture->quality, (s->width+15)/16);
if(avctx->debug & FF_DEBUG_QP)
av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", picture->quality);
picture->quality*= FF_QP2LAMBDA;
}
return buf_ptr - buf;
}
| false | FFmpeg | 934982c4ace1a3d5d627b518782ed092a456c49e |
8,804 | AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
{
AVStream *st;
int i;
AVStream **streams;
if (s->nb_streams >= INT_MAX/sizeof(*streams))
streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
if (!streams)
s->streams = streams;
st = av_mallocz(sizeof(AVStream));
if (!st)
if (!(st->info = av_mallocz(sizeof(*st->info)))) {
st->info->last_dts = AV_NOPTS_VALUE;
st->codec = avcodec_alloc_context3(c);
if (s->iformat) {
/* no default bitrate if decoding */
st->codec->bit_rate = 0;
/* default pts setting is MPEG-like */
avpriv_set_pts_info(st, 33, 1, 90000);
st->index = s->nb_streams;
st->start_time = AV_NOPTS_VALUE;
st->duration = AV_NOPTS_VALUE;
/* we set the current DTS to 0 so that formats without any timestamps
* but durations get some timestamps, formats with some unknown
* timestamps have their first few packets buffered and the
* timestamps corrected before they are returned to the user */
st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
st->first_dts = AV_NOPTS_VALUE;
st->probe_packets = MAX_PROBE_PACKETS;
st->pts_wrap_reference = AV_NOPTS_VALUE;
st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
st->last_IP_pts = AV_NOPTS_VALUE;
st->last_dts_for_order_check = AV_NOPTS_VALUE;
for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
st->pts_buffer[i] = AV_NOPTS_VALUE;
st->sample_aspect_ratio = (AVRational) { 0, 1 };
#if FF_API_R_FRAME_RATE
st->info->last_dts = AV_NOPTS_VALUE;
#endif
st->info->fps_first_dts = AV_NOPTS_VALUE;
st->info->fps_last_dts = AV_NOPTS_VALUE;
st->inject_global_side_data = s->internal->inject_global_side_data;
s->streams[s->nb_streams++] = st;
return st;
| true | FFmpeg | a66893ac949864352b36b39e48c4cd72bbd81e54 |
8,805 | static void monitor_parse(const char *optarg, const char *mode)
{
static int monitor_device_index = 0;
QemuOpts *opts;
const char *p;
char label[32];
int def = 0;
if (strstart(optarg, "chardev:", &p)) {
snprintf(label, sizeof(label), "%s", p);
} else {
snprintf(label, sizeof(label), "compat_monitor%d",
monitor_device_index);
if (monitor_device_index == 0) {
def = 1;
}
opts = qemu_chr_parse_compat(label, optarg);
if (!opts) {
fprintf(stderr, "parse error: %s\n", optarg);
exit(1);
}
}
opts = qemu_opts_create(qemu_find_opts("mon"), label, 1);
if (!opts) {
fprintf(stderr, "duplicate chardev: %s\n", label);
exit(1);
}
qemu_opt_set(opts, "mode", mode);
qemu_opt_set(opts, "chardev", label);
if (def)
qemu_opt_set(opts, "default", "on");
monitor_device_index++;
}
| true | qemu | 8be7e7e4c72c048b90e3482557954a24bba43ba7 |
8,806 | virtio_gpu_resource_attach_backing(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_simple_resource *res;
struct virtio_gpu_resource_attach_backing ab;
int ret;
VIRTIO_GPU_FILL_CMD(ab);
trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
res = virtio_gpu_find_resource(g, ab.resource_id);
if (!res) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
__func__, ab.resource_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
if (ret != 0) {
res->iov_cnt = ab.nr_entries; | true | qemu | 204f01b30975923c64006f8067f0937b91eea68b |
8,807 | static void qxl_create_guest_primary(PCIQXLDevice *qxl, int loadvm,
qxl_async_io async)
{
QXLDevSurfaceCreate surface;
QXLSurfaceCreate *sc = &qxl->guest_primary.surface;
int size;
int requested_height = le32_to_cpu(sc->height);
int requested_stride = le32_to_cpu(sc->stride);
size = abs(requested_stride) * requested_height;
if (size > qxl->vgamem_size) {
qxl_set_guest_bug(qxl, "%s: requested primary larger then framebuffer"
" size", __func__);
if (qxl->mode == QXL_MODE_NATIVE) {
qxl_set_guest_bug(qxl, "%s: nop since already in QXL_MODE_NATIVE",
__func__);
qxl_exit_vga_mode(qxl);
surface.format = le32_to_cpu(sc->format);
surface.height = le32_to_cpu(sc->height);
surface.mem = le64_to_cpu(sc->mem);
surface.position = le32_to_cpu(sc->position);
surface.stride = le32_to_cpu(sc->stride);
surface.width = le32_to_cpu(sc->width);
surface.type = le32_to_cpu(sc->type);
surface.flags = le32_to_cpu(sc->flags);
trace_qxl_create_guest_primary(qxl->id, sc->width, sc->height, sc->mem,
sc->format, sc->position);
trace_qxl_create_guest_primary_rest(qxl->id, sc->stride, sc->type,
sc->flags);
surface.mouse_mode = true;
surface.group_id = MEMSLOT_GROUP_GUEST;
if (loadvm) {
surface.flags |= QXL_SURF_FLAG_KEEP_DATA;
qxl->mode = QXL_MODE_NATIVE;
qxl->cmdflags = 0;
qemu_spice_create_primary_surface(&qxl->ssd, 0, &surface, async);
if (async == QXL_SYNC) {
qxl_create_guest_primary_complete(qxl); | true | qemu | 48f4ba671bbb3dd212002d57b72a23375f51619b |
8,808 | pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality)
{
char temp[GET_MODE_BUFFER_SIZE];
char *p= temp;
static const char filterDelimiters[] = ",/";
static const char optionDelimiters[] = ":|";
struct PPMode *ppMode;
char *filterToken;
if (!name) {
av_log(NULL, AV_LOG_ERROR, "pp: Missing argument\n");
return NULL;
if (!strcmp(name, "help")) {
const char *p;
for (p = pp_help; strchr(p, '\n'); p = strchr(p, '\n') + 1) {
av_strlcpy(temp, p, FFMIN(sizeof(temp), strchr(p, '\n') - p + 2));
av_log(NULL, AV_LOG_INFO, "%s", temp);
return NULL;
ppMode= av_malloc(sizeof(PPMode));
ppMode->lumMode= 0;
ppMode->chromMode= 0;
ppMode->maxTmpNoise[0]= 700;
ppMode->maxTmpNoise[1]= 1500;
ppMode->maxTmpNoise[2]= 3000;
ppMode->maxAllowedY= 234;
ppMode->minAllowedY= 16;
ppMode->baseDcDiff= 256/8;
ppMode->flatnessThreshold= 56-16-1;
ppMode->maxClippedThreshold= 0.01;
ppMode->error=0;
memset(temp, 0, GET_MODE_BUFFER_SIZE);
av_strlcpy(temp, name, GET_MODE_BUFFER_SIZE - 1);
av_log(NULL, AV_LOG_DEBUG, "pp: %s\n", name);
for(;;){
char *filterName;
int q= 1000000; //PP_QUALITY_MAX;
int chrom=-1;
int luma=-1;
char *option;
char *options[OPTIONS_ARRAY_SIZE];
int i;
int filterNameOk=0;
int numOfUnknownOptions=0;
int enable=1; //does the user want us to enabled or disabled the filter
filterToken= strtok(p, filterDelimiters);
if(filterToken == NULL) break;
p+= strlen(filterToken) + 1; // p points to next filterToken
filterName= strtok(filterToken, optionDelimiters);
av_log(NULL, AV_LOG_DEBUG, "pp: %s::%s\n", filterToken, filterName);
if(*filterName == '-'){
enable=0;
filterName++;
for(;;){ //for all options
option= strtok(NULL, optionDelimiters);
if(option == NULL) break;
av_log(NULL, AV_LOG_DEBUG, "pp: option: %s\n", option);
if(!strcmp("autoq", option) || !strcmp("a", option)) q= quality;
else if(!strcmp("nochrom", option) || !strcmp("y", option)) chrom=0;
else if(!strcmp("chrom", option) || !strcmp("c", option)) chrom=1;
else if(!strcmp("noluma", option) || !strcmp("n", option)) luma=0;
else{
options[numOfUnknownOptions] = option;
numOfUnknownOptions++;
if(numOfUnknownOptions >= OPTIONS_ARRAY_SIZE-1) break;
options[numOfUnknownOptions] = NULL;
/* replace stuff from the replace Table */
for(i=0; replaceTable[2*i]!=NULL; i++){
if(!strcmp(replaceTable[2*i], filterName)){
int newlen= strlen(replaceTable[2*i + 1]);
int plen;
int spaceLeft;
p--, *p=',';
plen= strlen(p);
spaceLeft= p - temp + plen;
if(spaceLeft + newlen >= GET_MODE_BUFFER_SIZE - 1){
memmove(p + newlen, p, plen+1);
memcpy(p, replaceTable[2*i + 1], newlen);
filterNameOk=1;
for(i=0; filters[i].shortName!=NULL; i++){
if( !strcmp(filters[i].longName, filterName)
|| !strcmp(filters[i].shortName, filterName)){
ppMode->lumMode &= ~filters[i].mask;
ppMode->chromMode &= ~filters[i].mask;
filterNameOk=1;
if(!enable) break; // user wants to disable it
if(q >= filters[i].minLumQuality && luma)
ppMode->lumMode|= filters[i].mask;
if(chrom==1 || (chrom==-1 && filters[i].chromDefault))
if(q >= filters[i].minChromQuality)
ppMode->chromMode|= filters[i].mask;
if(filters[i].mask == LEVEL_FIX){
int o;
ppMode->minAllowedY= 16;
ppMode->maxAllowedY= 234;
for(o=0; options[o]!=NULL; o++){
if( !strcmp(options[o],"fullyrange")
||!strcmp(options[o],"f")){
ppMode->minAllowedY= 0;
ppMode->maxAllowedY= 255;
numOfUnknownOptions--;
else if(filters[i].mask == TEMP_NOISE_FILTER)
{
int o;
int numOfNoises=0;
for(o=0; options[o]!=NULL; o++){
char *tail;
ppMode->maxTmpNoise[numOfNoises]=
strtol(options[o], &tail, 0);
if(tail!=options[o]){
numOfNoises++;
numOfUnknownOptions--;
if(numOfNoises >= 3) break;
else if(filters[i].mask == V_DEBLOCK || filters[i].mask == H_DEBLOCK
|| filters[i].mask == V_A_DEBLOCK || filters[i].mask == H_A_DEBLOCK){
int o;
for(o=0; options[o]!=NULL && o<2; o++){
char *tail;
int val= strtol(options[o], &tail, 0);
if(tail==options[o]) break;
numOfUnknownOptions--;
if(o==0) ppMode->baseDcDiff= val;
else ppMode->flatnessThreshold= val;
else if(filters[i].mask == FORCE_QUANT){
int o;
ppMode->forcedQuant= 15;
for(o=0; options[o]!=NULL && o<1; o++){
char *tail;
int val= strtol(options[o], &tail, 0);
if(tail==options[o]) break;
numOfUnknownOptions--;
ppMode->forcedQuant= val;
if(!filterNameOk) ppMode->error++;
ppMode->error += numOfUnknownOptions;
av_log(NULL, AV_LOG_DEBUG, "pp: lumMode=%X, chromMode=%X\n", ppMode->lumMode, ppMode->chromMode);
if(ppMode->error){
av_log(NULL, AV_LOG_ERROR, "%d errors in postprocess string \"%s\"\n", ppMode->error, name);
av_free(ppMode);
return NULL;
return ppMode;
| true | FFmpeg | f6173fed608b81e70f40664f356d127618851427 |
8,810 | static int mov_read_extradata(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
uint64_t size;
uint8_t *buf;
int err;
if (c->fc->nb_streams < 1) // will happen with jp2 files
return 0;
st= c->fc->streams[c->fc->nb_streams-1];
size= (uint64_t)st->codec->extradata_size + atom.size + 8 + FF_INPUT_BUFFER_PADDING_SIZE;
if (size > INT_MAX || (uint64_t)atom.size > INT_MAX)
return AVERROR_INVALIDDATA;
if ((err = av_reallocp(&st->codec->extradata, size)) < 0) {
st->codec->extradata_size = 0;
return err;
}
buf = st->codec->extradata + st->codec->extradata_size;
st->codec->extradata_size= size - FF_INPUT_BUFFER_PADDING_SIZE;
AV_WB32( buf , atom.size + 8);
AV_WL32( buf + 4, atom.type);
avio_read(pb, buf + 8, atom.size);
return 0;
}
| true | FFmpeg | 5c720657c23afd798ae0db7c7362eb859a89ab3d |
8,811 | static always_inline void gen_bcond (DisasContext *ctx, int type)
{
uint32_t bo = BO(ctx->opcode);
int l1 = gen_new_label();
TCGv target;
ctx->exception = POWERPC_EXCP_BRANCH;
if (type == BCOND_LR || type == BCOND_CTR) {
target = tcg_temp_local_new();
if (type == BCOND_CTR)
tcg_gen_mov_tl(target, cpu_ctr);
else
tcg_gen_mov_tl(target, cpu_lr);
}
if (LK(ctx->opcode))
gen_setlr(ctx, ctx->nip);
l1 = gen_new_label();
if ((bo & 0x4) == 0) {
/* Decrement and test CTR */
TCGv temp = tcg_temp_new();
if (unlikely(type == BCOND_CTR)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
#if defined(TARGET_PPC64)
if (!ctx->sf_mode)
tcg_gen_ext32u_tl(temp, cpu_ctr);
else
#endif
tcg_gen_mov_tl(temp, cpu_ctr);
if (bo & 0x2) {
tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
}
tcg_temp_free(temp);
}
if ((bo & 0x10) == 0) {
/* Test CR */
uint32_t bi = BI(ctx->opcode);
uint32_t mask = 1 << (3 - (bi & 0x03));
TCGv_i32 temp = tcg_temp_new_i32();
if (bo & 0x8) {
tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
}
tcg_temp_free_i32(temp);
}
if (type == BCOND_IM) {
target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
if (likely(AA(ctx->opcode) == 0)) {
gen_goto_tb(ctx, 0, ctx->nip + li - 4);
gen_goto_tb(ctx, 0, li);
}
gen_set_label(l1);
gen_goto_tb(ctx, 1, ctx->nip);
#if defined(TARGET_PPC64)
if (!(ctx->sf_mode))
tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
else
#endif
tcg_gen_andi_tl(cpu_nip, target, ~3);
tcg_gen_exit_tb(0);
gen_set_label(l1);
#if defined(TARGET_PPC64)
if (!(ctx->sf_mode))
tcg_gen_movi_tl(cpu_nip, (uint32_t)ctx->nip);
else
#endif
tcg_gen_movi_tl(cpu_nip, ctx->nip);
tcg_gen_exit_tb(0);
}
} | true | qemu | d2e9fd8f703203c2eeeed120b1ef6c3a6574e0ab |
8,812 | void slavio_serial_ms_kbd_init(target_phys_addr_t base, qemu_irq irq,
int disabled, int clock, int it_shift)
{
DeviceState *dev;
SysBusDevice *s;
dev = qdev_create(NULL, "escc");
qdev_prop_set_uint32(dev, "disabled", disabled);
qdev_prop_set_uint32(dev, "frequency", clock);
qdev_prop_set_uint32(dev, "it_shift", it_shift);
qdev_prop_set_chr(dev, "chrB", NULL);
qdev_prop_set_chr(dev, "chrA", NULL);
qdev_prop_set_uint32(dev, "chnBtype", mouse);
qdev_prop_set_uint32(dev, "chnAtype", kbd);
qdev_init(dev);
s = sysbus_from_qdev(dev);
sysbus_connect_irq(s, 0, irq);
sysbus_connect_irq(s, 1, irq);
sysbus_mmio_map(s, 0, base);
}
| true | qemu | e23a1b33b53d25510320b26d9f154e19c6c99725 |
8,813 | static int v9fs_receive_response(V9fsProxy *proxy, int type,
int *status, void *response)
{
int retval;
ProxyHeader header;
struct iovec *reply = &proxy->in_iovec;
*status = 0;
reply->iov_len = 0;
retval = socket_read(proxy->sockfd, reply->iov_base, PROXY_HDR_SZ);
if (retval < 0) {
return retval;
}
reply->iov_len = PROXY_HDR_SZ;
proxy_unmarshal(reply, 0, "dd", &header.type, &header.size);
/*
* if response size > PROXY_MAX_IO_SZ, read the response but ignore it and
* return -ENOBUFS
*/
if (header.size > PROXY_MAX_IO_SZ) {
int count;
while (header.size > 0) {
count = MIN(PROXY_MAX_IO_SZ, header.size);
count = socket_read(proxy->sockfd, reply->iov_base, count);
if (count < 0) {
return count;
}
header.size -= count;
}
*status = -ENOBUFS;
return 0;
}
retval = socket_read(proxy->sockfd,
reply->iov_base + PROXY_HDR_SZ, header.size);
if (retval < 0) {
return retval;
}
reply->iov_len += header.size;
/* there was an error during processing request */
if (header.type == T_ERROR) {
int ret;
ret = proxy_unmarshal(reply, PROXY_HDR_SZ, "d", status);
if (ret < 0) {
*status = ret;
}
return 0;
}
switch (type) {
case T_LSTAT: {
ProxyStat prstat;
retval = proxy_unmarshal(reply, PROXY_HDR_SZ,
"qqqdddqqqqqqqqqq", &prstat.st_dev,
&prstat.st_ino, &prstat.st_nlink,
&prstat.st_mode, &prstat.st_uid,
&prstat.st_gid, &prstat.st_rdev,
&prstat.st_size, &prstat.st_blksize,
&prstat.st_blocks,
&prstat.st_atim_sec, &prstat.st_atim_nsec,
&prstat.st_mtim_sec, &prstat.st_mtim_nsec,
&prstat.st_ctim_sec, &prstat.st_ctim_nsec);
prstat_to_stat(response, &prstat);
break;
}
case T_STATFS: {
ProxyStatFS prstfs;
retval = proxy_unmarshal(reply, PROXY_HDR_SZ,
"qqqqqqqqqqq", &prstfs.f_type,
&prstfs.f_bsize, &prstfs.f_blocks,
&prstfs.f_bfree, &prstfs.f_bavail,
&prstfs.f_files, &prstfs.f_ffree,
&prstfs.f_fsid[0], &prstfs.f_fsid[1],
&prstfs.f_namelen, &prstfs.f_frsize);
prstatfs_to_statfs(response, &prstfs);
break;
}
case T_READLINK: {
V9fsString target;
v9fs_string_init(&target);
retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "s", &target);
strcpy(response, target.data);
v9fs_string_free(&target);
break;
}
case T_LGETXATTR:
case T_LLISTXATTR: {
V9fsString xattr;
v9fs_string_init(&xattr);
retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "s", &xattr);
memcpy(response, xattr.data, xattr.size);
v9fs_string_free(&xattr);
break;
}
case T_GETVERSION:
proxy_unmarshal(reply, PROXY_HDR_SZ, "q", response);
break;
default:
return -1;
}
if (retval < 0) {
*status = retval;
}
return 0;
}
| true | qemu | 262169abe74b4c2d8b299b7499904cfc3c1902ea |
8,814 | static int nbd_opt_go(QIOChannel *ioc, const char *wantname,
NBDExportInfo *info, Error **errp)
{
nbd_opt_reply reply;
uint32_t len = strlen(wantname);
uint16_t type;
int error;
char *buf;
/* The protocol requires that the server send NBD_INFO_EXPORT with
* a non-zero flags (at least NBD_FLAG_HAS_FLAGS must be set); so
* flags still 0 is a witness of a broken server. */
info->flags = 0;
trace_nbd_opt_go_start(wantname);
buf = g_malloc(4 + len + 2 + 2 * info->request_sizes + 1);
stl_be_p(buf, len);
memcpy(buf + 4, wantname, len);
/* At most one request, everything else up to server */
stw_be_p(buf + 4 + len, info->request_sizes);
if (info->request_sizes) {
stw_be_p(buf + 4 + len + 2, NBD_INFO_BLOCK_SIZE);
}
if (nbd_send_option_request(ioc, NBD_OPT_GO,
4 + len + 2 + 2 * info->request_sizes, buf,
errp) < 0) {
return -1;
}
while (1) {
if (nbd_receive_option_reply(ioc, NBD_OPT_GO, &reply, errp) < 0) {
return -1;
}
error = nbd_handle_reply_err(ioc, &reply, errp);
if (error <= 0) {
return error;
}
len = reply.length;
if (reply.type == NBD_REP_ACK) {
/* Server is done sending info and moved into transmission
phase, but make sure it sent flags */
if (len) {
error_setg(errp, "server sent invalid NBD_REP_ACK");
nbd_send_opt_abort(ioc);
return -1;
}
if (!info->flags) {
error_setg(errp, "broken server omitted NBD_INFO_EXPORT");
nbd_send_opt_abort(ioc);
return -1;
}
trace_nbd_opt_go_success();
return 1;
}
if (reply.type != NBD_REP_INFO) {
error_setg(errp, "unexpected reply type %" PRIx32
" (%s), expected %x",
reply.type, nbd_rep_lookup(reply.type), NBD_REP_INFO);
nbd_send_opt_abort(ioc);
return -1;
}
if (len < sizeof(type)) {
error_setg(errp, "NBD_REP_INFO length %" PRIu32 " is too short",
len);
nbd_send_opt_abort(ioc);
return -1;
}
if (nbd_read(ioc, &type, sizeof(type), errp) < 0) {
error_prepend(errp, "failed to read info type");
nbd_send_opt_abort(ioc);
return -1;
}
len -= sizeof(type);
be16_to_cpus(&type);
switch (type) {
case NBD_INFO_EXPORT:
if (len != sizeof(info->size) + sizeof(info->flags)) {
error_setg(errp, "remaining export info len %" PRIu32
" is unexpected size", len);
nbd_send_opt_abort(ioc);
return -1;
}
if (nbd_read(ioc, &info->size, sizeof(info->size), errp) < 0) {
error_prepend(errp, "failed to read info size");
nbd_send_opt_abort(ioc);
return -1;
}
be64_to_cpus(&info->size);
if (nbd_read(ioc, &info->flags, sizeof(info->flags), errp) < 0) {
error_prepend(errp, "failed to read info flags");
nbd_send_opt_abort(ioc);
return -1;
}
be16_to_cpus(&info->flags);
trace_nbd_receive_negotiate_size_flags(info->size, info->flags);
break;
case NBD_INFO_BLOCK_SIZE:
if (len != sizeof(info->min_block) * 3) {
error_setg(errp, "remaining export info len %" PRIu32
" is unexpected size", len);
nbd_send_opt_abort(ioc);
return -1;
}
if (nbd_read(ioc, &info->min_block, sizeof(info->min_block),
errp) < 0) {
error_prepend(errp, "failed to read info minimum block size");
nbd_send_opt_abort(ioc);
return -1;
}
be32_to_cpus(&info->min_block);
if (!is_power_of_2(info->min_block)) {
error_setg(errp, "server minimum block size %" PRId32
"is not a power of two", info->min_block);
nbd_send_opt_abort(ioc);
return -1;
}
if (nbd_read(ioc, &info->opt_block, sizeof(info->opt_block),
errp) < 0) {
error_prepend(errp, "failed to read info preferred block size");
nbd_send_opt_abort(ioc);
return -1;
}
be32_to_cpus(&info->opt_block);
if (!is_power_of_2(info->opt_block) ||
info->opt_block < info->min_block) {
error_setg(errp, "server preferred block size %" PRId32
"is not valid", info->opt_block);
nbd_send_opt_abort(ioc);
return -1;
}
if (nbd_read(ioc, &info->max_block, sizeof(info->max_block),
errp) < 0) {
error_prepend(errp, "failed to read info maximum block size");
nbd_send_opt_abort(ioc);
return -1;
}
be32_to_cpus(&info->max_block);
trace_nbd_opt_go_info_block_size(info->min_block, info->opt_block,
info->max_block);
break;
default:
trace_nbd_opt_go_info_unknown(type, nbd_info_lookup(type));
if (nbd_drop(ioc, len, errp) < 0) {
error_prepend(errp, "Failed to read info payload");
nbd_send_opt_abort(ioc);
return -1;
}
break;
}
}
}
| true | qemu | 158b9aa568eabe614d37882dddc2eea534edc7ae |
8,816 | AVResampleContext *swr_resample_init(AVResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff){
double factor= FFMIN(out_rate * cutoff / in_rate, 1.0);
int phase_count= 1<<phase_shift;
if (!c || c->phase_shift != phase_shift || c->linear!=linear || c->factor != factor
|| c->filter_length != FFMAX((int)ceil(filter_size/factor), 1)) {
c = av_mallocz(sizeof(AVResampleContext));
if (!c)
return NULL;
c->phase_shift = phase_shift;
c->phase_mask = phase_count - 1;
c->linear = linear;
c->factor = factor;
c->filter_length = FFMAX((int)ceil(filter_size/factor), 1);
c->filter_bank = av_mallocz(c->filter_length*(phase_count+1)*sizeof(FELEM));
if (!c->filter_bank)
goto error;
if (build_filter(c->filter_bank, factor, c->filter_length, phase_count, 1<<FILTER_SHIFT, WINDOW_TYPE))
goto error;
memcpy(&c->filter_bank[c->filter_length*phase_count+1], c->filter_bank, (c->filter_length-1)*sizeof(FELEM));
c->filter_bank[c->filter_length*phase_count]= c->filter_bank[c->filter_length - 1];
}
c->compensation_distance= 0;
c->src_incr= out_rate;
c->ideal_dst_incr= c->dst_incr= in_rate * phase_count;
c->index= -phase_count*((c->filter_length-1)/2);
c->frac= 0;
return c;
error:
av_free(c->filter_bank);
av_free(c);
return NULL;
}
| true | FFmpeg | a67cb012e6947fb238193afc0f18114f6e20818c |
8,817 | static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
{
int i;
for (i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i];
AVCodecContext *dec = st->codec;
InputStream *ist = av_mallocz(sizeof(*ist));
char *framerate = NULL, *hwaccel = NULL, *hwaccel_device = NULL;
if (!ist)
exit_program(1);
GROW_ARRAY(input_streams, nb_input_streams);
input_streams[nb_input_streams - 1] = ist;
ist->st = st;
ist->file_index = nb_input_files;
ist->discard = 1;
st->discard = AVDISCARD_ALL;
ist->ts_scale = 1.0;
MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
ist->dec = choose_decoder(o, ic, st);
ist->opts = filter_codec_opts(o->g->codec_opts, ist->st->codec->codec_id, ic, st, ist->dec);
switch (dec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
ist->resample_height = dec->height;
ist->resample_width = dec->width;
ist->resample_pix_fmt = dec->pix_fmt;
MATCH_PER_STREAM_OPT(frame_rates, str, framerate, ic, st);
if (framerate && av_parse_video_rate(&ist->framerate,
framerate) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error parsing framerate %s.\n",
framerate);
exit_program(1);
}
MATCH_PER_STREAM_OPT(hwaccels, str, hwaccel, ic, st);
if (hwaccel) {
if (!strcmp(hwaccel, "none"))
ist->hwaccel_id = HWACCEL_NONE;
else if (!strcmp(hwaccel, "auto"))
ist->hwaccel_id = HWACCEL_AUTO;
else {
int i;
for (i = 0; hwaccels[i].name; i++) {
if (!strcmp(hwaccels[i].name, hwaccel)) {
ist->hwaccel_id = hwaccels[i].id;
break;
}
}
if (!ist->hwaccel_id) {
av_log(NULL, AV_LOG_FATAL, "Unrecognized hwaccel: %s.\n",
hwaccel);
av_log(NULL, AV_LOG_FATAL, "Supported hwaccels: ");
for (i = 0; hwaccels[i].name; i++)
av_log(NULL, AV_LOG_FATAL, "%s ", hwaccels[i].name);
av_log(NULL, AV_LOG_FATAL, "\n");
exit_program(1);
}
}
}
MATCH_PER_STREAM_OPT(hwaccel_devices, str, hwaccel_device, ic, st);
if (hwaccel_device) {
ist->hwaccel_device = av_strdup(hwaccel_device);
if (!ist->hwaccel_device)
exit_program(1);
}
break;
case AVMEDIA_TYPE_AUDIO:
guess_input_channel_layout(ist);
ist->resample_sample_fmt = dec->sample_fmt;
ist->resample_sample_rate = dec->sample_rate;
ist->resample_channels = dec->channels;
ist->resample_channel_layout = dec->channel_layout;
break;
case AVMEDIA_TYPE_DATA:
case AVMEDIA_TYPE_SUBTITLE:
case AVMEDIA_TYPE_ATTACHMENT:
case AVMEDIA_TYPE_UNKNOWN:
break;
default:
abort();
}
}
} | true | FFmpeg | c255f0b001da3afa4709f5378680880b8a2dbe61 |
8,818 | static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int discard) {
AviSynthContext *avs = s->priv_data;
AVS_VideoFrame *frame;
unsigned char *dst_p;
const unsigned char *src_p;
int n, i, plane, rowsize, planeheight, pitch, bits;
const char *error;
if (avs->curr_frame >= avs->vi->num_frames)
return AVERROR_EOF;
// This must happen even if the stream is discarded to prevent desync.
n = avs->curr_frame++;
if (discard)
return 0;
pkt->pts = n;
pkt->dts = n;
pkt->duration = 1;
// Define the bpp values for the new AviSynth 2.6 colorspaces
if (avs_is_yv24(avs->vi)) {
bits = 24;
} else if (avs_is_yv16(avs->vi)) {
bits = 16;
} else if (avs_is_yv411(avs->vi)) {
bits = 12;
} else if (avs_is_y8(avs->vi)) {
bits = 8;
bits = avs_bits_per_pixel(avs->vi);
// Without cast to int64_t, calculation overflows at about 9k x 9k resolution.
pkt->size = (((int64_t)avs->vi->width * (int64_t)avs->vi->height) * bits) / 8;
if (!pkt->size)
pkt->data = av_malloc(pkt->size);
if (!pkt->data)
frame = avs_library->avs_get_frame(avs->clip, n);
error = avs_library->avs_clip_get_error(avs->clip);
if (error) {
av_log(s, AV_LOG_ERROR, "%s\n", error);
dst_p = pkt->data;
for (i = 0; i < avs->n_planes; i++) {
plane = avs->planes[i];
src_p = avs_get_read_ptr_p(frame, plane);
rowsize = avs_get_row_size_p(frame, plane);
planeheight = avs_get_height_p(frame, plane);
pitch = avs_get_pitch_p(frame, plane);
// Flip RGB video.
if (avs_is_rgb24(avs->vi) || avs_is_rgb(avs->vi)) {
src_p = src_p + (planeheight - 1) * pitch;
pitch = -pitch;
dst_p += rowsize * planeheight;
avs_library->avs_release_video_frame(frame);
return 0;
| true | FFmpeg | 9db353bc4727d2a184778c110cf4ea0b9d1616cb |
8,819 | static inline void out_reg(IVState *s, enum Reg reg, unsigned v)
{
const char *name = reg2str(reg);
QTestState *qtest = global_qtest;
global_qtest = s->qtest;
g_test_message("%x -> *%s\n", v, name);
qpci_io_writel(s->dev, s->reg_base + reg, v);
global_qtest = qtest;
}
| true | qemu | b4ba67d9a702507793c2724e56f98e9b0f7be02b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.