id
int32 0
27.3k
| func
stringlengths 26
142k
| target
bool 2
classes | project
stringclasses 2
values | commit_id
stringlengths 40
40
|
---|---|---|---|---|
4,545 | static void isa_mmio_writeb (void *opaque, target_phys_addr_t addr,
uint32_t val)
{
cpu_outb(addr & IOPORTS_MASK, val);
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
4,546 | static void kill_channel(DBDMA_channel *ch)
{
DBDMA_DPRINTF("kill_channel\n");
ch->regs[DBDMA_STATUS] |= cpu_to_be32(DEAD);
ch->regs[DBDMA_STATUS] &= cpu_to_be32(~ACTIVE);
qemu_irq_raise(ch->irq);
}
| false | qemu | ad674e53b5cce265fadafbde2c6a4f190345cd00 |
4,547 | static void scsi_do_read(SCSIDiskReq *r, int ret)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
assert (r->req.aiocb == NULL);
if (r->req.io_canceled) {
scsi_req_cancel_complete(&r->req);
goto done;
}
if (ret < 0) {
if (scsi_handle_rw_error(r, -ret, false)) {
goto done;
}
}
/* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req);
if (r->req.sg) {
dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
r->req.resid -= r->req.sg->size;
r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
r->req.sg, r->sector << BDRV_SECTOR_BITS,
sdc->dma_readv, r, scsi_dma_complete, r,
DMA_DIRECTION_FROM_DEVICE);
} else {
scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
r->qiov.size, BLOCK_ACCT_READ);
r->req.aiocb = sdc->dma_readv(r->sector, &r->qiov,
scsi_read_complete, r, r);
}
done:
scsi_req_unref(&r->req);
}
| false | qemu | 5b956f415a356449a4171d5e0c7d9a25bbc84b5a |
4,548 | static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, uint32_t hdecr,
uint32_t value, int is_excp)
{
ppc_tb_t *tb_env = cpu->env.tb_env;
if (tb_env->hdecr_timer != NULL) {
__cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
&cpu_ppc_hdecr_excp, hdecr, value, is_excp);
}
}
| false | qemu | e81a982aa5398269a2cc344091ffa4930bdd242f |
4,549 | static void readline_printf_func(void *opaque, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vprintf(fmt, ap);
va_end(ap);
}
| false | qemu | d5d1507b347b7cd6c3b82459b96f1889b29939ef |
4,550 | static ssize_t virtio_net_receive(VLANClientState *vc, const uint8_t *buf, size_t size)
{
VirtIONet *n = vc->opaque;
struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL;
size_t hdr_len, offset, i;
if (!do_virtio_net_can_receive(n, size))
return 0;
if (!receive_filter(n, buf, size))
return size;
/* hdr_len refers to the header we supply to the guest */
hdr_len = n->mergeable_rx_bufs ?
sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
offset = i = 0;
while (offset < size) {
VirtQueueElement elem;
int len, total;
struct iovec sg[VIRTQUEUE_MAX_SIZE];
len = total = 0;
if ((i != 0 && !n->mergeable_rx_bufs) ||
virtqueue_pop(n->rx_vq, &elem) == 0) {
if (i == 0)
return -1;
fprintf(stderr, "virtio-net truncating packet\n");
exit(1);
}
if (elem.in_num < 1) {
fprintf(stderr, "virtio-net receive queue contains no in buffers\n");
exit(1);
}
if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != hdr_len) {
fprintf(stderr, "virtio-net header not in first element\n");
exit(1);
}
memcpy(&sg, &elem.in_sg[0], sizeof(sg[0]) * elem.in_num);
if (i == 0) {
if (n->mergeable_rx_bufs)
mhdr = (struct virtio_net_hdr_mrg_rxbuf *)sg[0].iov_base;
offset += receive_header(n, sg, elem.in_num,
buf + offset, size - offset, hdr_len);
total += hdr_len;
}
/* copy in packet. ugh */
len = iov_fill(sg, elem.in_num,
buf + offset, size - offset);
total += len;
/* signal other side */
virtqueue_fill(n->rx_vq, &elem, total, i++);
offset += len;
}
if (mhdr)
mhdr->num_buffers = i;
virtqueue_flush(n->rx_vq, i);
virtio_notify(&n->vdev, n->rx_vq);
return size;
}
| false | qemu | cdd5cc12ba8cf0c068da319370bdd3ba45eaf7ac |
4,552 | static CharDriverState *text_console_init(ChardevVC *vc, Error **errp)
{
CharDriverState *chr;
QemuConsole *s;
unsigned width = 0;
unsigned height = 0;
chr = qemu_chr_alloc();
if (vc->has_width) {
width = vc->width;
} else if (vc->has_cols) {
width = vc->cols * FONT_WIDTH;
}
if (vc->has_height) {
height = vc->height;
} else if (vc->has_rows) {
height = vc->rows * FONT_HEIGHT;
}
trace_console_txt_new(width, height);
if (width == 0 || height == 0) {
s = new_console(NULL, TEXT_CONSOLE, 0);
} else {
s = new_console(NULL, TEXT_CONSOLE_FIXED_SIZE, 0);
s->surface = qemu_create_displaysurface(width, height);
}
if (!s) {
g_free(chr);
error_setg(errp, "cannot create text console");
return NULL;
}
s->chr = chr;
chr->opaque = s;
chr->chr_set_echo = text_console_set_echo;
/* console/chardev init sometimes completes elsewhere in a 2nd
* stage, so defer OPENED events until they are fully initialized
*/
chr->explicit_be_open = true;
if (display_state) {
text_console_do_init(chr, display_state);
}
return chr;
}
| false | qemu | d0d7708ba29cbcc343364a46bff981e0ff88366f |
4,553 | static int vhdx_log_flush(BlockDriverState *bs, BDRVVHDXState *s,
VHDXLogSequence *logs)
{
int ret = 0;
int i;
uint32_t cnt, sectors_read;
uint64_t new_file_size;
void *data = NULL;
int64_t file_length;
VHDXLogDescEntries *desc_entries = NULL;
VHDXLogEntryHeader hdr_tmp = { 0 };
cnt = logs->count;
data = qemu_blockalign(bs, VHDX_LOG_SECTOR_SIZE);
ret = vhdx_user_visible_write(bs, s);
if (ret < 0) {
goto exit;
}
/* each iteration represents one log sequence, which may span multiple
* sectors */
while (cnt--) {
ret = vhdx_log_peek_hdr(bs, &logs->log, &hdr_tmp);
if (ret < 0) {
goto exit;
}
file_length = bdrv_getlength(bs->file->bs);
if (file_length < 0) {
ret = file_length;
goto exit;
}
/* if the log shows a FlushedFileOffset larger than our current file
* size, then that means the file has been truncated / corrupted, and
* we must refused to open it / use it */
if (hdr_tmp.flushed_file_offset > file_length) {
ret = -EINVAL;
goto exit;
}
ret = vhdx_log_read_desc(bs, s, &logs->log, &desc_entries, true);
if (ret < 0) {
goto exit;
}
for (i = 0; i < desc_entries->hdr.descriptor_count; i++) {
if (desc_entries->desc[i].signature == VHDX_LOG_DESC_SIGNATURE) {
/* data sector, so read a sector to flush */
ret = vhdx_log_read_sectors(bs, &logs->log, §ors_read,
data, 1, false);
if (ret < 0) {
goto exit;
}
if (sectors_read != 1) {
ret = -EINVAL;
goto exit;
}
vhdx_log_data_le_import(data);
}
ret = vhdx_log_flush_desc(bs, &desc_entries->desc[i], data);
if (ret < 0) {
goto exit;
}
}
if (file_length < desc_entries->hdr.last_file_offset) {
new_file_size = desc_entries->hdr.last_file_offset;
if (new_file_size % (1024*1024)) {
/* round up to nearest 1MB boundary */
new_file_size = QEMU_ALIGN_UP(new_file_size, MiB);
if (new_file_size > INT64_MAX) {
ret = -EINVAL;
goto exit;
}
bdrv_truncate(bs->file, new_file_size, PREALLOC_MODE_OFF, NULL);
}
}
qemu_vfree(desc_entries);
desc_entries = NULL;
}
ret = bdrv_flush(bs);
if (ret < 0) {
goto exit;
}
/* once the log is fully flushed, indicate that we have an empty log
* now. This also sets the log guid to 0, to indicate an empty log */
vhdx_log_reset(bs, s);
exit:
qemu_vfree(data);
qemu_vfree(desc_entries);
return ret;
}
| false | qemu | 95d729835f3ceeed977eaf326a7ebb92788dee6d |
4,554 | int ff_intel_h263_decode_picture_header(MpegEncContext *s)
{
int format;
/* picture header */
if (get_bits_long(&s->gb, 22) != 0x20) {
av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
return -1;
}
s->picture_number = get_bits(&s->gb, 8); /* picture timestamp */
if (get_bits1(&s->gb) != 1) {
av_log(s->avctx, AV_LOG_ERROR, "Bad marker\n");
return -1; /* marker */
}
if (get_bits1(&s->gb) != 0) {
av_log(s->avctx, AV_LOG_ERROR, "Bad H263 id\n");
return -1; /* h263 id */
}
skip_bits1(&s->gb); /* split screen off */
skip_bits1(&s->gb); /* camera off */
skip_bits1(&s->gb); /* freeze picture release off */
format = get_bits(&s->gb, 3);
if (format == 0 || format == 6) {
av_log(s->avctx, AV_LOG_ERROR, "Intel H263 free format not supported\n");
return -1;
}
s->h263_plus = 0;
s->pict_type = AV_PICTURE_TYPE_I + get_bits1(&s->gb);
s->unrestricted_mv = get_bits1(&s->gb);
s->h263_long_vectors = s->unrestricted_mv;
if (get_bits1(&s->gb) != 0) {
av_log(s->avctx, AV_LOG_ERROR, "SAC not supported\n");
return -1; /* SAC: off */
}
s->obmc= get_bits1(&s->gb);
s->pb_frame = get_bits1(&s->gb);
if (format < 6) {
s->width = ff_h263_format[format][0];
s->height = ff_h263_format[format][1];
s->avctx->sample_aspect_ratio.num = 12;
s->avctx->sample_aspect_ratio.den = 11;
} else {
format = get_bits(&s->gb, 3);
if(format == 0 || format == 7){
av_log(s->avctx, AV_LOG_ERROR, "Wrong Intel H263 format\n");
return -1;
}
if(get_bits(&s->gb, 2))
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
s->loop_filter = get_bits1(&s->gb);
if(get_bits1(&s->gb))
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
if(get_bits1(&s->gb))
s->pb_frame = 2;
if(get_bits(&s->gb, 5))
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
if(get_bits(&s->gb, 5) != 1)
av_log(s->avctx, AV_LOG_ERROR, "Invalid marker\n");
}
if(format == 6){
int ar = get_bits(&s->gb, 4);
skip_bits(&s->gb, 9); // display width
skip_bits1(&s->gb);
skip_bits(&s->gb, 9); // display height
if(ar == 15){
s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 8); // aspect ratio - width
s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 8); // aspect ratio - height
} else {
s->avctx->sample_aspect_ratio = ff_h263_pixel_aspect[ar];
}
if (s->avctx->sample_aspect_ratio.num == 0)
av_log(s->avctx, AV_LOG_ERROR, "Invalid aspect ratio.\n");
}
s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
skip_bits1(&s->gb); /* Continuous Presence Multipoint mode: off */
if(s->pb_frame){
skip_bits(&s->gb, 3); //temporal reference for B-frame
skip_bits(&s->gb, 2); //dbquant
}
/* PEI */
while (get_bits1(&s->gb) != 0) {
skip_bits(&s->gb, 8);
}
s->f_code = 1;
s->y_dc_scale_table=
s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
ff_h263_show_pict_info(s);
return 0;
}
| false | FFmpeg | cc229d4e83889d1298f1a0863b55feec6c5c339a |
4,558 | static ssize_t nbd_co_send_reply(NBDRequest *req, struct nbd_reply *reply,
int len)
{
NBDClient *client = req->client;
int csock = client->sock;
ssize_t rc, ret;
qemu_co_mutex_lock(&client->send_lock);
qemu_set_fd_handler2(csock, nbd_can_read, nbd_read,
nbd_restart_write, client);
client->send_coroutine = qemu_coroutine_self();
if (!len) {
rc = nbd_send_reply(csock, reply);
} else {
socket_set_cork(csock, 1);
rc = nbd_send_reply(csock, reply);
if (rc >= 0) {
ret = qemu_co_send(csock, req->data, len);
if (ret != len) {
rc = -EIO;
}
}
socket_set_cork(csock, 0);
}
client->send_coroutine = NULL;
qemu_set_fd_handler2(csock, nbd_can_read, nbd_read, NULL, client);
qemu_co_mutex_unlock(&client->send_lock);
return rc;
}
| false | qemu | 958c717df97ea9ca47a2253b8371130fe5f22980 |
4,559 | int64_t qemu_file_set_rate_limit(QEMUFile *f, int64_t new_rate)
{
/* any failed or completed migration keeps its state to allow probing of
* migration data, but has no associated file anymore */
if (f && f->ops->set_rate_limit)
return f->ops->set_rate_limit(f->opaque, new_rate);
return 0;
}
| false | qemu | 1964a397063967acc5ce71a2a24ed26e74824ee1 |
4,560 | static uint64_t lance_mem_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
SysBusPCNetState *d = opaque;
uint32_t val;
val = pcnet_ioport_readw(&d->state, addr);
trace_lance_mem_readw(addr, val & 0xffff);
return val & 0xffff;
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
4,561 | int tcp_socket_outgoing_opts(QemuOpts *opts)
{
Error *local_err = NULL;
int fd = inet_connect_opts(opts, &local_err, NULL, NULL);
if (local_err != NULL) {
qerror_report_err(local_err);
error_free(local_err);
}
return fd;
}
| false | qemu | 77e8b9ca64e85d3d309f322410964b7852ec091e |
4,562 | static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn,
void *src)
{
int result = 0;
if (secn > 0) {
const uint8_t *sp = (const uint8_t *)src;
uint8_t *dp = 0, *dpp = 0;
if (s->bdrv_cur) {
dp = g_malloc(512);
if (!dp || bdrv_read(s->bdrv_cur,
s->secs_cur + (sec >> 5),
dp, 1) < 0) {
result = 1;
} else {
dpp = dp + ((sec & 31) << 4);
}
} else {
if (sec + secn > s->secs_cur) {
result = 1;
} else {
dpp = s->current + (s->secs_cur << 9) + (sec << 4);
}
}
if (!result) {
uint32_t i;
for (i = 0; i < (secn << 4); i++) {
dpp[i] &= sp[i];
}
if (s->bdrv_cur) {
result = bdrv_write(s->bdrv_cur, s->secs_cur + (sec >> 5),
dp, 1) < 0;
}
}
g_free(dp);
}
return result;
}
| false | qemu | 4be746345f13e99e468c60acbd3a355e8183e3ce |
4,564 | static void setup_rt_frame(int sig, struct target_sigaction *ka,
target_siginfo_t *info,
target_sigset_t *set, CPUM68KState *env)
{
struct target_rt_sigframe *frame;
abi_ulong frame_addr;
abi_ulong retcode_addr;
abi_ulong info_addr;
abi_ulong uc_addr;
int err = 0;
int i;
frame_addr = get_sigframe(ka, env, sizeof *frame);
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
goto give_sigsegv;
__put_user(sig, &frame->sig);
info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
__put_user(info_addr, &frame->pinfo);
uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
__put_user(uc_addr, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, info);
/* Create the ucontext */
__put_user(0, &frame->uc.tuc_flags);
__put_user(0, &frame->uc.tuc_link);
__put_user(target_sigaltstack_used.ss_sp,
&frame->uc.tuc_stack.ss_sp);
__put_user(sas_ss_flags(env->aregs[7]),
&frame->uc.tuc_stack.ss_flags);
__put_user(target_sigaltstack_used.ss_size,
&frame->uc.tuc_stack.ss_size);
err |= target_rt_setup_ucontext(&frame->uc, env);
if (err)
goto give_sigsegv;
for(i = 0; i < TARGET_NSIG_WORDS; i++) {
if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]))
goto give_sigsegv;
}
/* Set up to return from userspace. */
retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
__put_user(retcode_addr, &frame->pretcode);
/* moveq #,d0; notb d0; trap #0 */
__put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
(long *)(frame->retcode + 0));
__put_user(0x4e40, (short *)(frame->retcode + 4));
if (err)
goto give_sigsegv;
/* Set up to return from userspace */
env->aregs[7] = frame_addr;
env->pc = ka->_sa_handler;
unlock_user_struct(frame, frame_addr, 1);
return;
give_sigsegv:
unlock_user_struct(frame, frame_addr, 1);
force_sig(TARGET_SIGSEGV);
}
| false | qemu | b0fd8d18683f0d77a8e6b482771ebea82234d727 |
4,565 | static int mov_read_stsz(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
unsigned int i, entries, sample_size, field_size, num_bytes;
GetBitContext gb;
unsigned char* buf;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
if (atom.type == MKTAG('s','t','s','z')) {
sample_size = avio_rb32(pb);
if (!sc->sample_size) /* do not overwrite value computed in stsd */
sc->sample_size = sample_size;
field_size = 32;
} else {
sample_size = 0;
avio_rb24(pb); /* reserved */
field_size = avio_r8(pb);
}
entries = avio_rb32(pb);
av_dlog(c->fc, "sample_size = %d sample_count = %d\n", sc->sample_size, entries);
sc->sample_count = entries;
if (sample_size)
return 0;
if (field_size != 4 && field_size != 8 && field_size != 16 && field_size != 32) {
av_log(c->fc, AV_LOG_ERROR, "Invalid sample field size %d\n", field_size);
return AVERROR_INVALIDDATA;
}
if (!entries)
return 0;
if (entries >= UINT_MAX / sizeof(int) || entries >= (UINT_MAX - 4) / field_size)
return AVERROR_INVALIDDATA;
sc->sample_sizes = av_malloc(entries * sizeof(int));
if (!sc->sample_sizes)
return AVERROR(ENOMEM);
num_bytes = (entries*field_size+4)>>3;
buf = av_malloc(num_bytes+FF_INPUT_BUFFER_PADDING_SIZE);
if (!buf) {
av_freep(&sc->sample_sizes);
return AVERROR(ENOMEM);
}
if (avio_read(pb, buf, num_bytes) < num_bytes) {
av_freep(&sc->sample_sizes);
av_free(buf);
return AVERROR_INVALIDDATA;
}
init_get_bits(&gb, buf, 8*num_bytes);
for (i = 0; i < entries; i++) {
sc->sample_sizes[i] = get_bits_long(&gb, field_size);
sc->data_size += sc->sample_sizes[i];
}
av_free(buf);
return 0;
}
| false | FFmpeg | 9888ffb1ce5e0a17f711b01933d504c72ea29d3b |
4,566 | static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int64_t offset, int is_writable)
{
QEMUFile *f;
f = qemu_mallocz(sizeof(QEMUFile));
if (!f)
return NULL;
f->is_file = 0;
f->bs = bs;
f->is_writable = is_writable;
f->base_offset = offset;
return f;
}
| false | qemu | 5dafc53f1fb091d242f2179ffcb43bb28af36d1e |
4,567 | static void virtio_setup(void)
{
struct irb irb;
int i;
int r;
bool found = false;
blk_schid.one = 1;
for (i = 0; i < 0x10000; i++) {
blk_schid.sch_no = i;
r = tsch(blk_schid, &irb);
if (r != 3) {
if (virtio_is_blk(blk_schid)) {
found = true;
break;
}
}
}
if (!found) {
virtio_panic("No virtio-blk device found!\n");
}
virtio_setup_block(blk_schid);
}
| false | qemu | 22d67ab55aad82383a0e5628b902a1a0556b2fc9 |
4,568 | int if_encap(Slirp *slirp, struct mbuf *ifm)
{
uint8_t buf[1600];
struct ethhdr *eh = (struct ethhdr *)buf;
uint8_t ethaddr[ETH_ALEN];
const struct ip *iph = (const struct ip *)ifm->m_data;
int ret;
if (ifm->m_len + ETH_HLEN > sizeof(buf)) {
return 1;
}
switch (iph->ip_v) {
case IPVERSION:
ret = if_encap4(slirp, ifm, eh, ethaddr);
if (ret < 2) {
return ret;
}
break;
default:
/* Do not assert while we don't manage IP6VERSION */
/* assert(0); */
break;
}
memcpy(eh->h_dest, ethaddr, ETH_ALEN);
DEBUG_ARGS((dfd, " src = %02x:%02x:%02x:%02x:%02x:%02x\n",
eh->h_source[0], eh->h_source[1], eh->h_source[2],
eh->h_source[3], eh->h_source[4], eh->h_source[5]));
DEBUG_ARGS((dfd, " dst = %02x:%02x:%02x:%02x:%02x:%02x\n",
eh->h_dest[0], eh->h_dest[1], eh->h_dest[2],
eh->h_dest[3], eh->h_dest[4], eh->h_dest[5]));
memcpy(buf + sizeof(struct ethhdr), ifm->m_data, ifm->m_len);
slirp_output(slirp->opaque, buf, ifm->m_len + ETH_HLEN);
return 1;
}
| false | qemu | 0d6ff71ae3c7ac3a446d295ef71884a05093b37c |
4,572 | static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
{
TaskState *ts = (TaskState *)env->opaque;
struct elf_thread_status *ets;
ets = qemu_mallocz(sizeof (*ets));
ets->num_notes = 1; /* only prstatus is dumped */
fill_prstatus(&ets->prstatus, ts, 0);
elf_core_copy_regs(&ets->prstatus.pr_reg, env);
fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
&ets->prstatus);
TAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
info->notes_size += note_size(&ets->notes[0]);
}
| false | qemu | 72cf2d4f0e181d0d3a3122e04129c58a95da713e |
4,573 | static void omap_screen_dump(void *opaque, const char *filename, bool cswitch,
Error **errp)
{
struct omap_lcd_panel_s *omap_lcd = opaque;
DisplaySurface *surface = qemu_console_surface(omap_lcd->con);
omap_update_display(opaque);
if (omap_lcd && surface_data(surface))
omap_ppm_save(filename, surface_data(surface),
omap_lcd->width, omap_lcd->height,
surface_stride(surface), errp);
}
| false | qemu | 2c62f08ddbf3fa80dc7202eb9a2ea60ae44e2cc5 |
4,574 | static void con_disconnect(struct XenDevice *xendev)
{
struct XenConsole *con = container_of(xendev, struct XenConsole, xendev);
if (!xendev->dev) {
return;
}
if (con->chr) {
qemu_chr_add_handlers(con->chr, NULL, NULL, NULL, NULL);
qemu_chr_fe_release(con->chr);
}
xen_be_unbind_evtchn(&con->xendev);
if (con->sring) {
if (!xendev->gnttabdev) {
munmap(con->sring, XC_PAGE_SIZE);
} else {
xc_gnttab_munmap(xendev->gnttabdev, con->sring, 1);
}
con->sring = NULL;
}
}
| false | qemu | 549e9bcabc2f5b37b0be8c24257e0b527bffb49a |
4,576 | static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
AVFilterInOut *in)
{
AVFilterContext *last_filter;
const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
InputStream *ist = ifilter->ist;
InputFile *f = input_files[ist->file_index];
AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
ist->st->time_base;
AVRational sar;
char args[255], name[255];
int ret, pad_idx = 0;
sar = ist->st->sample_aspect_ratio.num ?
ist->st->sample_aspect_ratio :
ist->st->codec->sample_aspect_ratio;
snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
ist->st->codec->height, ist->st->codec->pix_fmt,
tb.num, tb.den, sar.num, sar.den);
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
ist->file_index, ist->st->index);
if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
args, NULL, fg->graph)) < 0)
return ret;
last_filter = ifilter->filter;
if (ist->framerate.num) {
AVFilterContext *setpts;
snprintf(name, sizeof(name), "force CFR for input from stream %d:%d",
ist->file_index, ist->st->index);
if ((ret = avfilter_graph_create_filter(&setpts,
avfilter_get_by_name("setpts"),
name, "N", NULL,
fg->graph)) < 0)
return ret;
if ((ret = avfilter_link(last_filter, 0, setpts, 0)) < 0)
return ret;
last_filter = setpts;
}
snprintf(name, sizeof(name), "trim for input stream %d:%d",
ist->file_index, ist->st->index);
ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
AV_NOPTS_VALUE : 0, INT64_MAX, &last_filter, &pad_idx, name);
if (ret < 0)
return ret;
if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
return ret;
return 0;
}
| false | FFmpeg | 488a0fa68973d48e264d54f1722f7afb18afbea7 |
4,577 | void ff_put_h264_qpel16_mc33_msa(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avc_luma_hv_qrt_16w_msa(src + stride - 2,
src - (stride * 2) +
sizeof(uint8_t), stride, dst, stride, 16);
}
| false | FFmpeg | 2aab7c2dfaca4386c38e5d565cd2bf73096bcc86 |
4,579 | int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
int64_t offset, int bytes)
{
uint64_t bs_size = bs->total_sectors * BDRV_SECTOR_SIZE;
int n1;
if ((offset + bytes) <= bs_size) {
return bytes;
}
if (offset >= bs_size) {
n1 = 0;
} else {
n1 = bs_size - offset;
}
qemu_iovec_memset(qiov, n1, 0, bytes - n1);
return n1;
}
| true | qemu | 546a7dc40e8b8b6440a052e2b5cdfe9aadcaccf6 |
4,581 | static void property_get_bool(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
BoolProperty *prop = opaque;
bool value;
value = prop->get(obj, errp);
visit_type_bool(v, &value, name, errp);
}
| true | qemu | 4715d42efe8632b0f9d2594a80e917de45e4ef88 |
4,582 | static int read_header(ShortenContext *s)
{
int i, ret;
int maxnlpc = 0;
/* shorten signature */
if (get_bits_long(&s->gb, 32) != AV_RB32("ajkg")) {
av_log(s->avctx, AV_LOG_ERROR, "missing shorten magic 'ajkg'\n");
s->lpcqoffset = 0;
s->blocksize = DEFAULT_BLOCK_SIZE;
s->nmean = -1;
s->version = get_bits(&s->gb, 8);
s->internal_ftype = get_uint(s, TYPESIZE);
s->channels = get_uint(s, CHANSIZE);
if (!s->channels) {
av_log(s->avctx, AV_LOG_ERROR, "No channels reported\n");
if (s->channels > MAX_CHANNELS) {
av_log(s->avctx, AV_LOG_ERROR, "too many channels: %d\n", s->channels);
s->channels = 0;
s->avctx->channels = s->channels;
/* get blocksize if version > 0 */
if (s->version > 0) {
int skip_bytes;
unsigned blocksize;
blocksize = get_uint(s, av_log2(DEFAULT_BLOCK_SIZE));
if (!blocksize || blocksize > MAX_BLOCKSIZE) {
av_log(s->avctx, AV_LOG_ERROR,
"invalid or unsupported block size: %d\n",
blocksize);
return AVERROR(EINVAL);
s->blocksize = blocksize;
maxnlpc = get_uint(s, LPCQSIZE);
s->nmean = get_uint(s, 0);
skip_bytes = get_uint(s, NSKIPSIZE);
if ((unsigned)skip_bytes > get_bits_left(&s->gb)/8) {
av_log(s->avctx, AV_LOG_ERROR, "invalid skip_bytes: %d\n", skip_bytes);
for (i = 0; i < skip_bytes; i++)
skip_bits(&s->gb, 8);
s->nwrap = FFMAX(NWRAP, maxnlpc);
if ((ret = allocate_buffers(s)) < 0)
return ret;
if ((ret = init_offset(s)) < 0)
return ret;
if (s->version > 1)
s->lpcqoffset = V2LPCQOFFSET;
if (s->avctx->extradata_size > 0)
goto end;
if (get_ur_golomb_shorten(&s->gb, FNSIZE) != FN_VERBATIM) {
av_log(s->avctx, AV_LOG_ERROR,
"missing verbatim section at beginning of stream\n");
s->header_size = get_ur_golomb_shorten(&s->gb, VERBATIM_CKSIZE_SIZE);
if (s->header_size >= OUT_BUFFER_SIZE ||
s->header_size < CANONICAL_HEADER_SIZE) {
av_log(s->avctx, AV_LOG_ERROR, "header is wrong size: %d\n",
s->header_size);
for (i = 0; i < s->header_size; i++)
s->header[i] = (char)get_ur_golomb_shorten(&s->gb, VERBATIM_BYTE_SIZE);
if (AV_RL32(s->header) == MKTAG('R','I','F','F')) {
if ((ret = decode_wave_header(s->avctx, s->header, s->header_size)) < 0)
return ret;
} else if (AV_RL32(s->header) == MKTAG('F','O','R','M')) {
if ((ret = decode_aiff_header(s->avctx, s->header, s->header_size)) < 0)
return ret;
} else {
avpriv_report_missing_feature(s->avctx, "unsupported bit packing %"
PRIX32, AV_RL32(s->header));
return AVERROR_PATCHWELCOME;
end:
s->cur_chan = 0;
s->bitshift = 0;
s->got_header = 1;
return 0;
| true | FFmpeg | e77ddd31a8e14bcf5eccd6008d866ae90b4b0d4c |
4,584 | int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_pix_fmt,
int has_alpha)
{
const PixFmtInfo *pf, *ps;
const AVPixFmtDescriptor *src_desc = &av_pix_fmt_descriptors[src_pix_fmt];
const AVPixFmtDescriptor *dst_desc = &av_pix_fmt_descriptors[dst_pix_fmt];
int loss;
ps = &pix_fmt_info[src_pix_fmt];
/* compute loss */
loss = 0;
pf = &pix_fmt_info[dst_pix_fmt];
if (pf->depth < ps->depth ||
((dst_pix_fmt == PIX_FMT_RGB555BE || dst_pix_fmt == PIX_FMT_RGB555LE ||
dst_pix_fmt == PIX_FMT_BGR555BE || dst_pix_fmt == PIX_FMT_BGR555LE) &&
(src_pix_fmt == PIX_FMT_RGB565BE || src_pix_fmt == PIX_FMT_RGB565LE ||
src_pix_fmt == PIX_FMT_BGR565BE || src_pix_fmt == PIX_FMT_BGR565LE)))
loss |= FF_LOSS_DEPTH;
if (dst_desc->log2_chroma_w > src_desc->log2_chroma_w ||
dst_desc->log2_chroma_h > src_desc->log2_chroma_h)
loss |= FF_LOSS_RESOLUTION;
switch(pf->color_type) {
case FF_COLOR_RGB:
if (ps->color_type != FF_COLOR_RGB &&
ps->color_type != FF_COLOR_GRAY)
loss |= FF_LOSS_COLORSPACE;
break;
case FF_COLOR_GRAY:
if (ps->color_type != FF_COLOR_GRAY)
loss |= FF_LOSS_COLORSPACE;
break;
case FF_COLOR_YUV:
if (ps->color_type != FF_COLOR_YUV)
loss |= FF_LOSS_COLORSPACE;
break;
case FF_COLOR_YUV_JPEG:
if (ps->color_type != FF_COLOR_YUV_JPEG &&
ps->color_type != FF_COLOR_YUV &&
ps->color_type != FF_COLOR_GRAY)
loss |= FF_LOSS_COLORSPACE;
break;
default:
/* fail safe test */
if (ps->color_type != pf->color_type)
loss |= FF_LOSS_COLORSPACE;
break;
}
if (pf->color_type == FF_COLOR_GRAY &&
ps->color_type != FF_COLOR_GRAY)
loss |= FF_LOSS_CHROMA;
if (!pf->is_alpha && (ps->is_alpha && has_alpha))
loss |= FF_LOSS_ALPHA;
if (pf->pixel_type == FF_PIXEL_PALETTE &&
(ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
loss |= FF_LOSS_COLORQUANT;
return loss;
}
| false | FFmpeg | d7e14c0d103a2c9cca6c50568e09b40d6f48ea19 |
4,585 | static void svq1_parse_string(GetBitContext *bitbuf, uint8_t *out)
{
uint8_t seed;
int i;
out[0] = get_bits(bitbuf, 8);
seed = string_table[out[0]];
for (i = 1; i <= out[0]; i++) {
out[i] = get_bits(bitbuf, 8) ^ seed;
seed = string_table[out[i] ^ seed];
}
}
| false | FFmpeg | e91ba2efa949470e9157b652535d207a101f91e0 |
4,586 | static int vda_h264_end_frame(AVCodecContext *avctx)
{
H264Context *h = avctx->priv_data;
VDAContext *vda = avctx->internal->hwaccel_priv_data;
AVVDAContext *vda_ctx = avctx->hwaccel_context;
AVFrame *frame = h->cur_pic_ptr->f;
uint32_t flush_flags = 1 << 0; ///< kVDADecoderFlush_emitFrames
CFDataRef coded_frame;
OSStatus status;
if (!vda->bitstream_size)
return AVERROR_INVALIDDATA;
coded_frame = CFDataCreate(kCFAllocatorDefault,
vda->bitstream,
vda->bitstream_size);
status = VDADecoderDecode(vda_ctx->decoder, 0, coded_frame, NULL);
if (status == kVDADecoderNoErr)
status = VDADecoderFlush(vda_ctx->decoder, flush_flags);
CFRelease(coded_frame);
if (!vda->frame)
return AVERROR_UNKNOWN;
if (status != kVDADecoderNoErr) {
av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status);
return AVERROR_UNKNOWN;
}
av_buffer_unref(&frame->buf[0]);
frame->buf[0] = av_buffer_create((uint8_t*)vda->frame,
sizeof(vda->frame),
release_buffer, NULL,
AV_BUFFER_FLAG_READONLY);
if (!frame->buf)
return AVERROR(ENOMEM);
frame->data[3] = (uint8_t*)vda->frame;
vda->frame = NULL;
return 0;
}
| false | FFmpeg | 80f955c90867561dcce769216bc497e13281eb38 |
4,587 | int img_pad(AVPicture *dst, const AVPicture *src, int height, int width,
int pix_fmt, int padtop, int padbottom, int padleft, int padright,
int *color)
{
uint8_t *optr, *iptr;
int y_shift;
int x_shift;
int yheight;
int i, y;
if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB ||
!is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1;
for (i = 0; i < 3; i++) {
x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
if (padtop || padleft) {
memset(dst->data[i], color[i],
dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
}
if (padleft || padright || src) {
if (src) { /* first line */
iptr = src->data[i];
optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
(padleft >> x_shift);
memcpy(optr, iptr, src->linesize[i]);
iptr += src->linesize[i];
}
optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
(dst->linesize[i] - (padright >> x_shift));
yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
for (y = 0; y < yheight; y++) {
memset(optr, color[i], (padleft + padright) >> x_shift);
if (src) {
memcpy(optr + ((padleft + padright) >> x_shift), iptr,
src->linesize[i]);
iptr += src->linesize[i];
}
optr += dst->linesize[i];
}
}
if (padbottom || padright) {
optr = dst->data[i] + dst->linesize[i] *
((height - padbottom) >> y_shift) - (padright >> x_shift);
memset(optr, color[i],dst->linesize[i] *
(padbottom >> y_shift) + (padright >> x_shift));
}
}
return 0;
}
| false | FFmpeg | 79acfb0e133317c3a38c55b73c3b80f3212af2f9 |
4,588 | void avfilter_draw_slice(AVFilterLink *link, int y, int h)
{
uint8_t *src[4], *dst[4];
int i, j, hsub, vsub;
/* copy the slice if needed for permission reasons */
if(link->srcpic) {
avcodec_get_chroma_sub_sample(link->format, &hsub, &vsub);
src[0] = link->srcpic-> data[0] + y * link->srcpic-> linesize[0];
dst[0] = link->cur_pic->data[0] + y * link->cur_pic->linesize[0];
for(i = 1; i < 4; i ++) {
if(link->srcpic->data[i]) {
src[i] = link->srcpic-> data[i] + (y >> vsub) * link->srcpic-> linesize[i];
dst[i] = link->cur_pic->data[i] + (y >> vsub) * link->cur_pic->linesize[i];
} else
src[i] = dst[i] = NULL;
}
for(j = 0; j < h; j ++) {
memcpy(dst[0], src[0], link->cur_pic->linesize[0]);
src[0] += link->srcpic ->linesize[0];
dst[0] += link->cur_pic->linesize[0];
}
for(i = 1; i < 4; i ++) {
if(!src[i]) continue;
for(j = 0; j < h >> vsub; j ++) {
memcpy(dst[i], src[i], link->cur_pic->linesize[i]);
src[i] += link->srcpic ->linesize[i];
dst[i] += link->cur_pic->linesize[i];
}
}
}
if(!link_dpad(link).draw_slice)
return;
link_dpad(link).draw_slice(link, y, h);
}
| false | FFmpeg | 19dc71045d2833d2b3b77648608687eb687b4af1 |
4,589 | static int ff_sctp_send(int s, const void *msg, size_t len,
const struct sctp_sndrcvinfo *sinfo, int flags)
{
struct msghdr outmsg;
struct iovec iov;
outmsg.msg_name = NULL;
outmsg.msg_namelen = 0;
outmsg.msg_iov = &iov;
iov.iov_base = msg;
iov.iov_len = len;
outmsg.msg_iovlen = 1;
outmsg.msg_controllen = 0;
if (sinfo) {
char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
struct cmsghdr *cmsg;
outmsg.msg_control = outcmsg;
outmsg.msg_controllen = sizeof(outcmsg);
outmsg.msg_flags = 0;
cmsg = CMSG_FIRSTHDR(&outmsg);
cmsg->cmsg_level = IPPROTO_SCTP;
cmsg->cmsg_type = SCTP_SNDRCV;
cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
outmsg.msg_controllen = cmsg->cmsg_len;
memcpy(CMSG_DATA(cmsg), sinfo, sizeof(struct sctp_sndrcvinfo));
}
return sendmsg(s, &outmsg, flags | MSG_NOSIGNAL);
}
| false | FFmpeg | 8ef98855d25e457094468e2e1a79d9b10d6445b2 |
4,590 | static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
const uint8_t *src, int src_size)
{
int h, w;
uint8_t *Y1, *Y2, *U, *V;
int ret;
if (src_size < avctx->width * avctx->height * 3LL / 2) {
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA;
}
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
Y1 = pic->data[0];
Y2 = pic->data[0] + pic->linesize[0];
U = pic->data[1];
V = pic->data[2];
for (h = 0; h < avctx->height; h += 2) {
for (w = 0; w < avctx->width; w += 2) {
AV_COPY16(Y1 + w, src);
AV_COPY16(Y2 + w, src + 2);
U[w >> 1] = src[4] + 0x80;
V[w >> 1] = src[5] + 0x80;
src += 6;
}
Y1 += pic->linesize[0] << 1;
Y2 += pic->linesize[0] << 1;
U += pic->linesize[1];
V += pic->linesize[2];
}
return 0;
}
| false | FFmpeg | 9caa9414ccf2dcf8aee2695377dee830a5024c82 |
4,591 | void ff_avg_h264_qpel16_mc13_msa(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avc_luma_hv_qrt_and_aver_dst_16x16_msa(src + stride - 2,
src - (stride * 2),
stride, dst, stride);
}
| false | FFmpeg | 1181d93231e9b807965724587d363c1cfd5a1d0d |
4,592 | void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0,
int log2_trafo_size, enum ScanType scan_idx,
int c_idx)
{
#define GET_COORD(offset, n) \
do { \
x_c = (x_cg << 2) + scan_x_off[n]; \
y_c = (y_cg << 2) + scan_y_off[n]; \
} while (0)
HEVCLocalContext *lc = s->HEVClc;
int transform_skip_flag = 0;
int last_significant_coeff_x, last_significant_coeff_y;
int last_scan_pos;
int n_end;
int num_coeff = 0;
int greater1_ctx = 1;
int num_last_subset;
int x_cg_last_sig, y_cg_last_sig;
const uint8_t *scan_x_cg, *scan_y_cg, *scan_x_off, *scan_y_off;
ptrdiff_t stride = s->frame->linesize[c_idx];
int hshift = s->sps->hshift[c_idx];
int vshift = s->sps->vshift[c_idx];
uint8_t *dst = &s->frame->data[c_idx][(y0 >> vshift) * stride +
((x0 >> hshift) << s->sps->pixel_shift)];
int16_t *coeffs = lc->tu.coeffs[c_idx > 0];
uint8_t significant_coeff_group_flag[8][8] = {{0}};
int explicit_rdpcm_flag = 0;
int explicit_rdpcm_dir_flag;
int trafo_size = 1 << log2_trafo_size;
int i;
int qp,shift,add,scale,scale_m;
const uint8_t level_scale[] = { 40, 45, 51, 57, 64, 72 };
const uint8_t *scale_matrix = NULL;
uint8_t dc_scale;
int pred_mode_intra = (c_idx == 0) ? lc->tu.intra_pred_mode :
lc->tu.intra_pred_mode_c;
memset(coeffs, 0, trafo_size * trafo_size * sizeof(int16_t));
// Derive QP for dequant
if (!lc->cu.cu_transquant_bypass_flag) {
static const int qp_c[] = { 29, 30, 31, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37 };
static const uint8_t rem6[51 + 4 * 6 + 1] = {
0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
4, 5, 0, 1, 2, 3, 4, 5, 0, 1
};
static const uint8_t div6[51 + 4 * 6 + 1] = {
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
10, 10, 11, 11, 11, 11, 11, 11, 12, 12
};
int qp_y = lc->qp_y;
if (s->pps->transform_skip_enabled_flag &&
log2_trafo_size <= s->pps->log2_max_transform_skip_block_size) {
transform_skip_flag = ff_hevc_transform_skip_flag_decode(s, c_idx);
}
if (c_idx == 0) {
qp = qp_y + s->sps->qp_bd_offset;
} else {
int qp_i, offset;
if (c_idx == 1)
offset = s->pps->cb_qp_offset + s->sh.slice_cb_qp_offset +
lc->tu.cu_qp_offset_cb;
else
offset = s->pps->cr_qp_offset + s->sh.slice_cr_qp_offset +
lc->tu.cu_qp_offset_cr;
qp_i = av_clip(qp_y + offset, - s->sps->qp_bd_offset, 57);
if (s->sps->chroma_format_idc == 1) {
if (qp_i < 30)
qp = qp_i;
else if (qp_i > 43)
qp = qp_i - 6;
else
qp = qp_c[qp_i - 30];
} else {
if (qp_i > 51)
qp = 51;
else
qp = qp_i;
}
qp += s->sps->qp_bd_offset;
}
shift = s->sps->bit_depth + log2_trafo_size - 5;
add = 1 << (shift-1);
scale = level_scale[rem6[qp]] << (div6[qp]);
scale_m = 16; // default when no custom scaling lists.
dc_scale = 16;
if (s->sps->scaling_list_enable_flag && !(transform_skip_flag && log2_trafo_size > 2)) {
const ScalingList *sl = s->pps->scaling_list_data_present_flag ?
&s->pps->scaling_list : &s->sps->scaling_list;
int matrix_id = lc->cu.pred_mode != MODE_INTRA;
matrix_id = 3 * matrix_id + c_idx;
scale_matrix = sl->sl[log2_trafo_size - 2][matrix_id];
if (log2_trafo_size >= 4)
dc_scale = sl->sl_dc[log2_trafo_size - 4][matrix_id];
}
} else {
shift = 0;
add = 0;
scale = 0;
dc_scale = 0;
}
if (lc->cu.pred_mode == MODE_INTER && s->sps->explicit_rdpcm_enabled_flag &&
(transform_skip_flag || lc->cu.cu_transquant_bypass_flag)) {
explicit_rdpcm_flag = explicit_rdpcm_flag_decode(s, c_idx);
if (explicit_rdpcm_flag) {
explicit_rdpcm_dir_flag = explicit_rdpcm_dir_flag_decode(s, c_idx);
}
}
last_significant_coeff_xy_prefix_decode(s, c_idx, log2_trafo_size,
&last_significant_coeff_x, &last_significant_coeff_y);
if (last_significant_coeff_x > 3) {
int suffix = last_significant_coeff_suffix_decode(s, last_significant_coeff_x);
last_significant_coeff_x = (1 << ((last_significant_coeff_x >> 1) - 1)) *
(2 + (last_significant_coeff_x & 1)) +
suffix;
}
if (last_significant_coeff_y > 3) {
int suffix = last_significant_coeff_suffix_decode(s, last_significant_coeff_y);
last_significant_coeff_y = (1 << ((last_significant_coeff_y >> 1) - 1)) *
(2 + (last_significant_coeff_y & 1)) +
suffix;
}
if (scan_idx == SCAN_VERT)
FFSWAP(int, last_significant_coeff_x, last_significant_coeff_y);
x_cg_last_sig = last_significant_coeff_x >> 2;
y_cg_last_sig = last_significant_coeff_y >> 2;
switch (scan_idx) {
case SCAN_DIAG: {
int last_x_c = last_significant_coeff_x & 3;
int last_y_c = last_significant_coeff_y & 3;
scan_x_off = ff_hevc_diag_scan4x4_x;
scan_y_off = ff_hevc_diag_scan4x4_y;
num_coeff = diag_scan4x4_inv[last_y_c][last_x_c];
if (trafo_size == 4) {
scan_x_cg = scan_1x1;
scan_y_cg = scan_1x1;
} else if (trafo_size == 8) {
num_coeff += diag_scan2x2_inv[y_cg_last_sig][x_cg_last_sig] << 4;
scan_x_cg = diag_scan2x2_x;
scan_y_cg = diag_scan2x2_y;
} else if (trafo_size == 16) {
num_coeff += diag_scan4x4_inv[y_cg_last_sig][x_cg_last_sig] << 4;
scan_x_cg = ff_hevc_diag_scan4x4_x;
scan_y_cg = ff_hevc_diag_scan4x4_y;
} else { // trafo_size == 32
num_coeff += diag_scan8x8_inv[y_cg_last_sig][x_cg_last_sig] << 4;
scan_x_cg = ff_hevc_diag_scan8x8_x;
scan_y_cg = ff_hevc_diag_scan8x8_y;
}
break;
}
case SCAN_HORIZ:
scan_x_cg = horiz_scan2x2_x;
scan_y_cg = horiz_scan2x2_y;
scan_x_off = horiz_scan4x4_x;
scan_y_off = horiz_scan4x4_y;
num_coeff = horiz_scan8x8_inv[last_significant_coeff_y][last_significant_coeff_x];
break;
default: //SCAN_VERT
scan_x_cg = horiz_scan2x2_y;
scan_y_cg = horiz_scan2x2_x;
scan_x_off = horiz_scan4x4_y;
scan_y_off = horiz_scan4x4_x;
num_coeff = horiz_scan8x8_inv[last_significant_coeff_x][last_significant_coeff_y];
break;
}
num_coeff++;
num_last_subset = (num_coeff - 1) >> 4;
for (i = num_last_subset; i >= 0; i--) {
int n, m;
int x_cg, y_cg, x_c, y_c, pos;
int implicit_non_zero_coeff = 0;
int64_t trans_coeff_level;
int prev_sig = 0;
int offset = i << 4;
int rice_init = 0;
uint8_t significant_coeff_flag_idx[16];
uint8_t nb_significant_coeff_flag = 0;
x_cg = scan_x_cg[i];
y_cg = scan_y_cg[i];
if ((i < num_last_subset) && (i > 0)) {
int ctx_cg = 0;
if (x_cg < (1 << (log2_trafo_size - 2)) - 1)
ctx_cg += significant_coeff_group_flag[x_cg + 1][y_cg];
if (y_cg < (1 << (log2_trafo_size - 2)) - 1)
ctx_cg += significant_coeff_group_flag[x_cg][y_cg + 1];
significant_coeff_group_flag[x_cg][y_cg] =
significant_coeff_group_flag_decode(s, c_idx, ctx_cg);
implicit_non_zero_coeff = 1;
} else {
significant_coeff_group_flag[x_cg][y_cg] =
((x_cg == x_cg_last_sig && y_cg == y_cg_last_sig) ||
(x_cg == 0 && y_cg == 0));
}
last_scan_pos = num_coeff - offset - 1;
if (i == num_last_subset) {
n_end = last_scan_pos - 1;
significant_coeff_flag_idx[0] = last_scan_pos;
nb_significant_coeff_flag = 1;
} else {
n_end = 15;
}
if (x_cg < ((1 << log2_trafo_size) - 1) >> 2)
prev_sig = !!significant_coeff_group_flag[x_cg + 1][y_cg];
if (y_cg < ((1 << log2_trafo_size) - 1) >> 2)
prev_sig += (!!significant_coeff_group_flag[x_cg][y_cg + 1] << 1);
if (significant_coeff_group_flag[x_cg][y_cg] && n_end >= 0) {
static const uint8_t ctx_idx_map[] = {
0, 1, 4, 5, 2, 3, 4, 5, 6, 6, 8, 8, 7, 7, 8, 8, // log2_trafo_size == 2
1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, // prev_sig == 0
2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, // prev_sig == 1
2, 1, 0, 0, 2, 1, 0, 0, 2, 1, 0, 0, 2, 1, 0, 0, // prev_sig == 2
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 // default
};
const uint8_t *ctx_idx_map_p;
int scf_offset = 0;
if (s->sps->transform_skip_context_enabled_flag &&
(transform_skip_flag || lc->cu.cu_transquant_bypass_flag)) {
ctx_idx_map_p = (uint8_t*) &ctx_idx_map[4 * 16];
if (c_idx == 0) {
scf_offset = 40;
} else {
scf_offset = 14 + 27;
}
} else {
if (c_idx != 0)
scf_offset = 27;
if (log2_trafo_size == 2) {
ctx_idx_map_p = (uint8_t*) &ctx_idx_map[0];
} else {
ctx_idx_map_p = (uint8_t*) &ctx_idx_map[(prev_sig + 1) << 4];
if (c_idx == 0) {
if ((x_cg > 0 || y_cg > 0))
scf_offset += 3;
if (log2_trafo_size == 3) {
scf_offset += (scan_idx == SCAN_DIAG) ? 9 : 15;
} else {
scf_offset += 21;
}
} else {
if (log2_trafo_size == 3)
scf_offset += 9;
else
scf_offset += 12;
}
}
}
for (n = n_end; n > 0; n--) {
x_c = scan_x_off[n];
y_c = scan_y_off[n];
if (significant_coeff_flag_decode(s, x_c, y_c, scf_offset, ctx_idx_map_p)) {
significant_coeff_flag_idx[nb_significant_coeff_flag] = n;
nb_significant_coeff_flag++;
implicit_non_zero_coeff = 0;
}
}
if (implicit_non_zero_coeff == 0) {
if (s->sps->transform_skip_context_enabled_flag &&
(transform_skip_flag || lc->cu.cu_transquant_bypass_flag)) {
if (c_idx == 0) {
scf_offset = 42;
} else {
scf_offset = 16 + 27;
}
} else {
if (i == 0) {
if (c_idx == 0)
scf_offset = 0;
else
scf_offset = 27;
} else {
scf_offset = 2 + scf_offset;
}
}
if (significant_coeff_flag_decode_0(s, c_idx, scf_offset) == 1) {
significant_coeff_flag_idx[nb_significant_coeff_flag] = 0;
nb_significant_coeff_flag++;
}
} else {
significant_coeff_flag_idx[nb_significant_coeff_flag] = 0;
nb_significant_coeff_flag++;
}
}
n_end = nb_significant_coeff_flag;
if (n_end) {
int first_nz_pos_in_cg;
int last_nz_pos_in_cg;
int c_rice_param = 0;
int first_greater1_coeff_idx = -1;
uint8_t coeff_abs_level_greater1_flag[8];
uint16_t coeff_sign_flag;
int sum_abs = 0;
int sign_hidden;
int sb_type;
// initialize first elem of coeff_bas_level_greater1_flag
int ctx_set = (i > 0 && c_idx == 0) ? 2 : 0;
if (s->sps->persistent_rice_adaptation_enabled_flag) {
if (!transform_skip_flag && !lc->cu.cu_transquant_bypass_flag)
sb_type = 2 * (c_idx == 0 ? 1 : 0);
else
sb_type = 2 * (c_idx == 0 ? 1 : 0) + 1;
c_rice_param = lc->stat_coeff[sb_type] / 4;
}
if (!(i == num_last_subset) && greater1_ctx == 0)
ctx_set++;
greater1_ctx = 1;
last_nz_pos_in_cg = significant_coeff_flag_idx[0];
for (m = 0; m < (n_end > 8 ? 8 : n_end); m++) {
int inc = (ctx_set << 2) + greater1_ctx;
coeff_abs_level_greater1_flag[m] =
coeff_abs_level_greater1_flag_decode(s, c_idx, inc);
if (coeff_abs_level_greater1_flag[m]) {
greater1_ctx = 0;
if (first_greater1_coeff_idx == -1)
first_greater1_coeff_idx = m;
} else if (greater1_ctx > 0 && greater1_ctx < 3) {
greater1_ctx++;
}
}
first_nz_pos_in_cg = significant_coeff_flag_idx[n_end - 1];
if (lc->cu.cu_transquant_bypass_flag ||
(lc->cu.pred_mode == MODE_INTRA &&
s->sps->implicit_rdpcm_enabled_flag && transform_skip_flag &&
(pred_mode_intra == 10 || pred_mode_intra == 26 )) ||
explicit_rdpcm_flag)
sign_hidden = 0;
else
sign_hidden = (last_nz_pos_in_cg - first_nz_pos_in_cg >= 4);
if (first_greater1_coeff_idx != -1) {
coeff_abs_level_greater1_flag[first_greater1_coeff_idx] += coeff_abs_level_greater2_flag_decode(s, c_idx, ctx_set);
}
if (!s->pps->sign_data_hiding_flag || !sign_hidden ) {
coeff_sign_flag = coeff_sign_flag_decode(s, nb_significant_coeff_flag) << (16 - nb_significant_coeff_flag);
} else {
coeff_sign_flag = coeff_sign_flag_decode(s, nb_significant_coeff_flag - 1) << (16 - (nb_significant_coeff_flag - 1));
}
for (m = 0; m < n_end; m++) {
n = significant_coeff_flag_idx[m];
GET_COORD(offset, n);
if (m < 8) {
trans_coeff_level = 1 + coeff_abs_level_greater1_flag[m];
if (trans_coeff_level == ((m == first_greater1_coeff_idx) ? 3 : 2)) {
int last_coeff_abs_level_remaining = coeff_abs_level_remaining_decode(s, c_rice_param);
trans_coeff_level += last_coeff_abs_level_remaining;
if (trans_coeff_level > (3 << c_rice_param))
c_rice_param = s->sps->persistent_rice_adaptation_enabled_flag ? c_rice_param + 1 : FFMIN(c_rice_param + 1, 4);
if (s->sps->persistent_rice_adaptation_enabled_flag && !rice_init) {
int c_rice_p_init = lc->stat_coeff[sb_type] / 4;
if (last_coeff_abs_level_remaining >= (3 << c_rice_p_init))
lc->stat_coeff[sb_type]++;
else if (2 * last_coeff_abs_level_remaining < (1 << c_rice_p_init))
if (lc->stat_coeff[sb_type] > 0)
lc->stat_coeff[sb_type]--;
rice_init = 1;
}
}
} else {
int last_coeff_abs_level_remaining = coeff_abs_level_remaining_decode(s, c_rice_param);
trans_coeff_level = 1 + last_coeff_abs_level_remaining;
if (trans_coeff_level > (3 << c_rice_param))
c_rice_param = s->sps->persistent_rice_adaptation_enabled_flag ? c_rice_param + 1 : FFMIN(c_rice_param + 1, 4);
if (s->sps->persistent_rice_adaptation_enabled_flag && !rice_init) {
int c_rice_p_init = lc->stat_coeff[sb_type] / 4;
if (last_coeff_abs_level_remaining >= (3 << c_rice_p_init))
lc->stat_coeff[sb_type]++;
else if (2 * last_coeff_abs_level_remaining < (1 << c_rice_p_init))
if (lc->stat_coeff[sb_type] > 0)
lc->stat_coeff[sb_type]--;
rice_init = 1;
}
}
if (s->pps->sign_data_hiding_flag && sign_hidden) {
sum_abs += trans_coeff_level;
if (n == first_nz_pos_in_cg && (sum_abs&1))
trans_coeff_level = -trans_coeff_level;
}
if (coeff_sign_flag >> 15)
trans_coeff_level = -trans_coeff_level;
coeff_sign_flag <<= 1;
if(!lc->cu.cu_transquant_bypass_flag) {
if (s->sps->scaling_list_enable_flag && !(transform_skip_flag && log2_trafo_size > 2)) {
if(y_c || x_c || log2_trafo_size < 4) {
switch(log2_trafo_size) {
case 3: pos = (y_c << 3) + x_c; break;
case 4: pos = ((y_c >> 1) << 3) + (x_c >> 1); break;
case 5: pos = ((y_c >> 2) << 3) + (x_c >> 2); break;
default: pos = (y_c << 2) + x_c; break;
}
scale_m = scale_matrix[pos];
} else {
scale_m = dc_scale;
}
}
trans_coeff_level = (trans_coeff_level * (int64_t)scale * (int64_t)scale_m + add) >> shift;
if(trans_coeff_level < 0) {
if((~trans_coeff_level) & 0xFffffffffff8000)
trans_coeff_level = -32768;
} else {
if(trans_coeff_level & 0xffffffffffff8000)
trans_coeff_level = 32767;
}
}
coeffs[y_c * trafo_size + x_c] = trans_coeff_level;
}
}
}
if (lc->cu.cu_transquant_bypass_flag) {
if (explicit_rdpcm_flag || (s->sps->implicit_rdpcm_enabled_flag &&
(pred_mode_intra == 10 || pred_mode_intra == 26))) {
int mode = s->sps->implicit_rdpcm_enabled_flag ? (pred_mode_intra == 26) : explicit_rdpcm_dir_flag;
s->hevcdsp.transform_rdpcm(coeffs, log2_trafo_size, mode);
}
} else {
if (transform_skip_flag) {
int rot = s->sps->transform_skip_rotation_enabled_flag &&
log2_trafo_size == 2 &&
lc->cu.pred_mode == MODE_INTRA;
if (rot) {
for (i = 0; i < 8; i++)
FFSWAP(int16_t, coeffs[i], coeffs[16 - i - 1]);
}
s->hevcdsp.transform_skip(coeffs, log2_trafo_size);
if (explicit_rdpcm_flag || (s->sps->implicit_rdpcm_enabled_flag &&
lc->cu.pred_mode == MODE_INTRA &&
(pred_mode_intra == 10 || pred_mode_intra == 26))) {
int mode = explicit_rdpcm_flag ? explicit_rdpcm_dir_flag : (pred_mode_intra == 26);
s->hevcdsp.transform_rdpcm(coeffs, log2_trafo_size, mode);
}
} else if (lc->cu.pred_mode == MODE_INTRA && c_idx == 0 && log2_trafo_size == 2) {
s->hevcdsp.idct_4x4_luma(coeffs);
} else {
int max_xy = FFMAX(last_significant_coeff_x, last_significant_coeff_y);
if (max_xy == 0)
s->hevcdsp.idct_dc[log2_trafo_size-2](coeffs);
else {
int col_limit = last_significant_coeff_x + last_significant_coeff_y + 4;
if (max_xy < 4)
col_limit = FFMIN(4, col_limit);
else if (max_xy < 8)
col_limit = FFMIN(8, col_limit);
else if (max_xy < 12)
col_limit = FFMIN(24, col_limit);
s->hevcdsp.idct[log2_trafo_size-2](coeffs, col_limit);
}
}
}
if (lc->tu.cross_pf) {
int16_t *coeffs_y = lc->tu.coeffs[0];
for (i = 0; i < (trafo_size * trafo_size); i++) {
coeffs[i] = coeffs[i] + ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
}
}
s->hevcdsp.transform_add[log2_trafo_size-2](dst, coeffs, stride);
}
| false | FFmpeg | 9a3653c9ecc4bbbbb502513a70bccd4090ed12b0 |
4,593 | static int mov_write_sidx_tag(AVIOContext *pb,
MOVTrack *track, int ref_size, int total_sidx_size)
{
int64_t pos = avio_tell(pb), offset_pos, end_pos;
int64_t presentation_time, duration, offset;
int starts_with_SAP, i, entries;
if (track->entry) {
entries = 1;
presentation_time = track->start_dts + track->frag_start +
track->cluster[0].cts;
duration = track->end_pts -
(track->cluster[0].dts + track->cluster[0].cts);
starts_with_SAP = track->cluster[0].flags & MOV_SYNC_SAMPLE;
// pts<0 should be cut away using edts
if (presentation_time < 0) {
duration += presentation_time;
presentation_time = 0;
}
} else {
entries = track->nb_frag_info;
presentation_time = track->frag_info[0].time;
}
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "sidx");
avio_w8(pb, 1); /* version */
avio_wb24(pb, 0);
avio_wb32(pb, track->track_id); /* reference_ID */
avio_wb32(pb, track->timescale); /* timescale */
avio_wb64(pb, presentation_time); /* earliest_presentation_time */
offset_pos = avio_tell(pb);
avio_wb64(pb, 0); /* first_offset (offset to referenced moof) */
avio_wb16(pb, 0); /* reserved */
avio_wb16(pb, entries); /* reference_count */
for (i = 0; i < entries; i++) {
if (!track->entry) {
if (i > 1 && track->frag_info[i].offset != track->frag_info[i - 1].offset + track->frag_info[i - 1].size) {
av_log(NULL, AV_LOG_ERROR, "Non-consecutive fragments, writing incorrect sidx\n");
}
duration = track->frag_info[i].duration;
ref_size = track->frag_info[i].size;
starts_with_SAP = 1;
}
avio_wb32(pb, (0 << 31) | (ref_size & 0x7fffffff)); /* reference_type (0 = media) | referenced_size */
avio_wb32(pb, duration); /* subsegment_duration */
avio_wb32(pb, (starts_with_SAP << 31) | (0 << 28) | 0); /* starts_with_SAP | SAP_type | SAP_delta_time */
}
end_pos = avio_tell(pb);
offset = pos + total_sidx_size - end_pos;
avio_seek(pb, offset_pos, SEEK_SET);
avio_wb64(pb, offset);
avio_seek(pb, end_pos, SEEK_SET);
return update_size(pb, pos);
} | true | FFmpeg | 8e34089e265a6b01e1e3301e8864439d26793753 |
4,595 | static uint64_t vfio_rtl8168_window_quirk_read(void *opaque,
hwaddr addr, unsigned size)
{
VFIOQuirk *quirk = opaque;
VFIOPCIDevice *vdev = quirk->vdev;
switch (addr) {
case 4: /* address */
if (quirk->data.flags) {
trace_vfio_rtl8168_window_quirk_read_fake(
memory_region_name(&quirk->mem),
vdev->vbasedev.name);
return quirk->data.address_match ^ 0x80000000U;
}
break;
case 0: /* data */
if (quirk->data.flags) {
uint64_t val;
trace_vfio_rtl8168_window_quirk_read_table(
memory_region_name(&quirk->mem),
vdev->vbasedev.name);
if (!(vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) {
return 0;
}
memory_region_dispatch_read(&vdev->pdev.msix_table_mmio,
(hwaddr)(quirk->data.address_match
& 0xfff),
&val,
size,
MEMTXATTRS_UNSPECIFIED);
return val;
}
}
trace_vfio_rtl8168_window_quirk_read_direct(memory_region_name(&quirk->mem),
vdev->vbasedev.name);
return vfio_region_read(&vdev->bars[quirk->data.bar].region,
addr + 0x70, size);
}
| true | qemu | d451008e0fdf7fb817c791397e7999d5f3687e58 |
4,596 | static int teletext_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *pkt)
{
TeletextContext *ctx = avctx->priv_data;
AVSubtitle *sub = data;
const uint8_t *buf = pkt->data;
int left = pkt->size;
uint8_t pesheader[45] = {0x00, 0x00, 0x01, 0xbd, 0x00, 0x00, 0x85, 0x80, 0x24, 0x21, 0x00, 0x01, 0x00, 0x01};
int pesheader_size = sizeof(pesheader);
const uint8_t *pesheader_buf = pesheader;
int ret = 0;
if (!ctx->vbi) {
if (!(ctx->vbi = vbi_decoder_new()))
return AVERROR(ENOMEM);
if (!vbi_event_handler_add(ctx->vbi, VBI_EVENT_TTX_PAGE, handler, ctx)) {
vbi_decoder_delete(ctx->vbi);
ctx->vbi = NULL;
return AVERROR(ENOMEM);
}
}
if (!ctx->dx && (!(ctx->dx = vbi_dvb_pes_demux_new (/* callback */ NULL, NULL))))
return AVERROR(ENOMEM);
if (avctx->pkt_timebase.den && pkt->pts != AV_NOPTS_VALUE)
ctx->pts = av_rescale_q(pkt->pts, avctx->pkt_timebase, AV_TIME_BASE_Q);
if (left) {
// We allow unreasonably big packets, even if the standard only allows a max size of 1472
if ((pesheader_size + left) < 184 || (pesheader_size + left) > 65504 || (pesheader_size + left) % 184 != 0)
return AVERROR_INVALIDDATA;
memset(pesheader + 14, 0xff, pesheader_size - 14);
AV_WB16(pesheader + 4, left + pesheader_size - 6);
/* PTS is deliberately left as 0 in the PES header, otherwise libzvbi uses
* it to detect dropped frames. Unforunatey the guessed packet PTS values
* (see mpegts demuxer) are not accurate enough to pass that test. */
vbi_dvb_demux_cor(ctx->dx, ctx->sliced, 64, NULL, &pesheader_buf, &pesheader_size);
ctx->handler_ret = pkt->size;
while (left > 0) {
int64_t pts = 0;
unsigned int lines = vbi_dvb_demux_cor(ctx->dx, ctx->sliced, 64, &pts, &buf, &left);
av_dlog(avctx, "ctx=%p buf_size=%d left=%u lines=%u pts=%f pkt_pts=%f\n",
ctx, pkt->size, left, lines, (double)pts/90000.0, (double)pkt->pts/90000.0);
if (lines > 0) {
#ifdef DEBUGx
int i;
for(i=0; i<lines; ++i)
av_log(avctx, AV_LOG_DEBUG,
"lines=%d id=%x\n", i, ctx->sliced[i].id);
#endif
vbi_decode(ctx->vbi, ctx->sliced, lines, (double)pts/90000.0);
ctx->lines_processed += lines;
}
}
ctx->pts = AV_NOPTS_VALUE;
ret = ctx->handler_ret;
}
if (ret < 0)
return ret;
// is there a subtitle to pass?
if (ctx->nb_pages) {
int i;
sub->format = ctx->format_id;
sub->start_display_time = 0;
sub->end_display_time = ctx->sub_duration;
sub->num_rects = 0;
sub->pts = ctx->pages->pts;
if (ctx->pages->sub_rect->type != SUBTITLE_NONE) {
sub->rects = av_malloc(sizeof(*sub->rects) * 1);
if (sub->rects) {
sub->num_rects = 1;
sub->rects[0] = ctx->pages->sub_rect;
} else {
ret = AVERROR(ENOMEM);
}
} else {
av_log(avctx, AV_LOG_DEBUG, "sending empty sub\n");
sub->rects = NULL;
}
if (!sub->rects) // no rect was passed
subtitle_rect_free(&ctx->pages->sub_rect);
for (i = 0; i < ctx->nb_pages - 1; i++)
ctx->pages[i] = ctx->pages[i + 1];
ctx->nb_pages--;
if (ret >= 0)
*data_size = 1;
} else
*data_size = 0;
return ret;
}
| true | FFmpeg | 085ca7dcdbf9ab6c23e3a5397b1f6d4aa23f763d |
4,597 | static int enable_write_target(BlockDriverState *bs, Error **errp)
{
BDRVVVFATState *s = bs->opaque;
BlockDriver *bdrv_qcow = NULL;
BlockDriverState *backing;
QemuOpts *opts = NULL;
int ret;
int size = sector2cluster(s, s->sector_count);
QDict *options;
s->used_clusters = calloc(size, 1);
array_init(&(s->commits), sizeof(commit_t));
s->qcow_filename = g_malloc(PATH_MAX);
ret = get_tmp_filename(s->qcow_filename, PATH_MAX);
if (ret < 0) {
error_setg_errno(errp, -ret, "can't create temporary file");
goto err;
}
bdrv_qcow = bdrv_find_format("qcow");
if (!bdrv_qcow) {
error_setg(errp, "Failed to locate qcow driver");
ret = -ENOENT;
goto err;
}
opts = qemu_opts_create(bdrv_qcow->create_opts, NULL, 0, &error_abort);
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, s->sector_count * 512,
&error_abort);
qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, "fat:", &error_abort);
ret = bdrv_create(bdrv_qcow, s->qcow_filename, opts, errp);
qemu_opts_del(opts);
if (ret < 0) {
goto err;
}
options = qdict_new();
qdict_put(options, "write-target.driver", qstring_from_str("qcow"));
s->qcow = bdrv_open_child(s->qcow_filename, options, "write-target", bs,
&child_vvfat_qcow, false, errp);
QDECREF(options);
if (!s->qcow) {
ret = -EINVAL;
goto err;
}
#ifndef _WIN32
unlink(s->qcow_filename);
#endif
backing = bdrv_new();
bdrv_set_backing_hd(s->bs, backing);
bdrv_unref(backing);
s->bs->backing->bs->drv = &vvfat_write_target;
s->bs->backing->bs->opaque = g_new(void *, 1);
*(void**)s->bs->backing->bs->opaque = s;
return 0;
err:
g_free(s->qcow_filename);
s->qcow_filename = NULL;
return ret;
}
| true | qemu | a8a4d15c1c34d3cec704fb64eba4a3745a140a97 |
4,601 | static int apng_read_header(AVFormatContext *s)
{
APNGDemuxContext *ctx = s->priv_data;
AVIOContext *pb = s->pb;
uint32_t len, tag;
AVStream *st;
int acTL_found = 0;
int64_t ret = AVERROR_INVALIDDATA;
/* verify PNGSIG */
if (avio_rb64(pb) != PNGSIG)
return ret;
/* parse IHDR (must be first chunk) */
len = avio_rb32(pb);
tag = avio_rl32(pb);
if (len != 13 || tag != MKTAG('I', 'H', 'D', 'R'))
return ret;
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
/* set the timebase to something large enough (1/100,000 of second)
* to hopefully cope with all sane frame durations */
avpriv_set_pts_info(st, 64, 1, 100000);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_APNG;
st->codecpar->width = avio_rb32(pb);
st->codecpar->height = avio_rb32(pb);
if ((ret = av_image_check_size(st->codecpar->width, st->codecpar->height, 0, s)) < 0)
return ret;
/* extradata will contain every chunk up to the first fcTL (excluded) */
ctx->extra_data = av_malloc(len + 12 + AV_INPUT_BUFFER_PADDING_SIZE);
if (!ctx->extra_data)
return AVERROR(ENOMEM);
ctx->extra_data_size = len + 12;
AV_WB32(ctx->extra_data, len);
AV_WL32(ctx->extra_data+4, tag);
AV_WB32(ctx->extra_data+8, st->codecpar->width);
AV_WB32(ctx->extra_data+12, st->codecpar->height);
if ((ret = avio_read(pb, ctx->extra_data+16, 9)) < 0)
goto fail;
while (!avio_feof(pb)) {
if (acTL_found && ctx->num_play != 1) {
int64_t size = avio_size(pb);
int64_t offset = avio_tell(pb);
if (size < 0) {
ret = size;
goto fail;
} else if (offset < 0) {
ret = offset;
goto fail;
} else if ((ret = ffio_ensure_seekback(pb, size - offset)) < 0) {
av_log(s, AV_LOG_WARNING, "Could not ensure seekback, will not loop\n");
ctx->num_play = 1;
}
}
if ((ctx->num_play == 1 || !acTL_found) &&
((ret = ffio_ensure_seekback(pb, 4 /* len */ + 4 /* tag */)) < 0))
goto fail;
len = avio_rb32(pb);
if (len > 0x7fffffff) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
tag = avio_rl32(pb);
switch (tag) {
case MKTAG('a', 'c', 'T', 'L'):
if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0 ||
(ret = append_extradata(ctx, pb, len + 12)) < 0)
goto fail;
acTL_found = 1;
ctx->num_frames = AV_RB32(ctx->extra_data + ret + 8);
ctx->num_play = AV_RB32(ctx->extra_data + ret + 12);
av_log(s, AV_LOG_DEBUG, "num_frames: %"PRIu32", num_play: %"PRIu32"\n",
ctx->num_frames, ctx->num_play);
break;
case MKTAG('f', 'c', 'T', 'L'):
if (!acTL_found) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0)
goto fail;
return 0;
default:
if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0 ||
(ret = append_extradata(ctx, pb, len + 12)) < 0)
goto fail;
}
}
fail:
if (ctx->extra_data_size) {
av_freep(&ctx->extra_data);
ctx->extra_data_size = 0;
}
return ret;
}
| true | FFmpeg | 16c429166ddf1736972b6ccce84bd3509ec16a34 |
4,602 | static int local_statfs(FsContext *s, V9fsPath *fs_path, struct statfs *stbuf)
{
char *buffer;
int ret;
char *path = fs_path->data;
buffer = rpath(s, path);
ret = statfs(buffer, stbuf);
g_free(buffer);
return ret;
}
| true | qemu | 31e51d1c15b35dc98b88a301812914b70a2b55dc |
4,603 | int av_image_fill_black(uint8_t *dst_data[4], const ptrdiff_t dst_linesize[4],
enum AVPixelFormat pix_fmt, enum AVColorRange range,
int width, int height)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
int nb_planes = av_pix_fmt_count_planes(pix_fmt);
// A pixel or a group of pixels on each plane, with a value that represents black.
// Consider e.g. AV_PIX_FMT_UYVY422 for non-trivial cases.
uint8_t clear_block[4][MAX_BLOCK_SIZE] = {0}; // clear padding with 0
int clear_block_size[4] = {0};
ptrdiff_t plane_line_bytes[4] = {0};
int rgb, limited;
int plane, c;
if (!desc || nb_planes < 1 || nb_planes > 4 || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
return AVERROR(EINVAL);
rgb = !!(desc->flags & AV_PIX_FMT_FLAG_RGB);
limited = !rgb && range != AVCOL_RANGE_JPEG;
if (desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) {
ptrdiff_t bytewidth = av_image_get_linesize(pix_fmt, width, 0);
uint8_t *data;
int mono = pix_fmt == AV_PIX_FMT_MONOWHITE || pix_fmt == AV_PIX_FMT_MONOBLACK;
int fill = pix_fmt == AV_PIX_FMT_MONOWHITE ? 0xFF : 0;
if (nb_planes != 1 || !(rgb || mono) || bytewidth < 1)
return AVERROR(EINVAL);
if (!dst_data)
return 0;
data = dst_data[0];
// (Bitstream + alpha will be handled incorrectly - it'll remain transparent.)
for (;height > 0; height--) {
memset(data, fill, bytewidth);
data += dst_linesize[0];
}
return 0;
}
for (c = 0; c < desc->nb_components; c++) {
const AVComponentDescriptor comp = desc->comp[c];
// We try to operate on entire non-subsampled pixel groups (for
// AV_PIX_FMT_UYVY422 this would mean two consecutive pixels).
clear_block_size[comp.plane] = FFMAX(clear_block_size[comp.plane], comp.step);
if (clear_block_size[comp.plane] > MAX_BLOCK_SIZE)
return AVERROR(EINVAL);
}
// Create a byte array for clearing 1 pixel (sometimes several pixels).
for (c = 0; c < desc->nb_components; c++) {
const AVComponentDescriptor comp = desc->comp[c];
// (Multiple pixels happen e.g. with AV_PIX_FMT_UYVY422.)
int w = clear_block_size[comp.plane] / comp.step;
uint8_t *c_data[4];
const int c_linesize[4] = {0};
uint16_t src_array[MAX_BLOCK_SIZE];
uint16_t src = 0;
int x;
if (comp.depth > 16)
return AVERROR(EINVAL);
if (!rgb && comp.depth < 8)
return AVERROR(EINVAL);
if (w < 1)
return AVERROR(EINVAL);
if (c == 0 && limited) {
src = 16 << (comp.depth - 8);
} else if ((c == 1 || c == 2) && !rgb) {
src = 128 << (comp.depth - 8);
} else if (c == 3) {
// (Assume even limited YUV uses full range alpha.)
src = (1 << comp.depth) - 1;
}
for (x = 0; x < w; x++)
src_array[x] = src;
for (x = 0; x < 4; x++)
c_data[x] = &clear_block[x][0];
av_write_image_line(src_array, c_data, c_linesize, desc, 0, 0, c, w);
}
for (plane = 0; plane < nb_planes; plane++) {
plane_line_bytes[plane] = av_image_get_linesize(pix_fmt, width, plane);
if (plane_line_bytes[plane] < 0)
return AVERROR(EINVAL);
}
if (!dst_data)
return 0;
for (plane = 0; plane < nb_planes; plane++) {
size_t bytewidth = plane_line_bytes[plane];
uint8_t *data = dst_data[plane];
int chroma_div = plane == 1 || plane == 2 ? desc->log2_chroma_h : 0;
int plane_h = ((height + ( 1 << chroma_div) - 1)) >> chroma_div;
for (; plane_h > 0; plane_h--) {
memset_bytes(data, bytewidth, &clear_block[plane][0], clear_block_size[plane]);
data += dst_linesize[plane];
}
}
return 0;
}
| true | FFmpeg | 0f5576a22b11ef726a01b14d1eaae2fa780c2f52 |
4,604 | bool cache_is_cached(const PageCache *cache, uint64_t addr)
{
size_t pos;
g_assert(cache);
g_assert(cache->page_cache);
pos = cache_get_cache_pos(cache, addr);
return (cache->page_cache[pos].it_addr == addr);
}
| true | qemu | 27af7d6ea5015e5ef1f7985eab94a8a218267a2b |
4,605 | static void superio_ioport_writeb(void *opaque, hwaddr addr, uint64_t data,
unsigned size)
{
int can_write;
SuperIOConfig *superio_conf = opaque;
DPRINTF("superio_ioport_writeb address 0x%x val 0x%x\n", addr, data);
if (addr == 0x3f0) {
superio_conf->index = data & 0xff;
} else {
/* 0x3f1 */
switch (superio_conf->index) {
case 0x00 ... 0xdf:
case 0xe4:
case 0xe5:
case 0xe9 ... 0xed:
case 0xf3:
case 0xf5:
case 0xf7:
case 0xf9 ... 0xfb:
case 0xfd ... 0xff:
can_write = 0;
break;
default:
can_write = 1;
if (can_write) {
switch (superio_conf->index) {
case 0xe7:
if ((data & 0xff) != 0xfe) {
DPRINTF("chage uart 1 base. unsupported yet\n");
}
break;
case 0xe8:
if ((data & 0xff) != 0xbe) {
DPRINTF("chage uart 2 base. unsupported yet\n");
}
break;
default:
superio_conf->config[superio_conf->index] = data & 0xff;
}
}
}
superio_conf->config[superio_conf->index] = data & 0xff;
}
}
| true | qemu | b196d969efa3987148994f0f8da79a10ebda7641 |
4,608 | static int default_fdset_get_fd(int64_t fdset_id, int flags)
{
return -1;
}
| false | qemu | 1f001dc7bc9e435bf231a5b0edcad1c7c2bd6214 |
4,613 | static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
{
req->resp.cmd->response = VIRTIO_SCSI_S_FAILURE;
virtio_scsi_complete_cmd_req(req);
}
| false | qemu | 3eff1f46f08a360a4ae9f834ce9fef4c45bf6f0f |
4,614 | static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
IscsiLun *iscsilun = bs->opaque;
struct iscsi_context *iscsi = NULL;
struct iscsi_url *iscsi_url = NULL;
struct scsi_task *task = NULL;
struct scsi_inquiry_standard *inq = NULL;
struct scsi_inquiry_supported_pages *inq_vpd;
char *initiator_name = NULL;
QemuOpts *opts;
Error *local_err = NULL;
const char *filename;
int i, ret = 0;
if ((BDRV_SECTOR_SIZE % 512) != 0) {
error_setg(errp, "iSCSI: Invalid BDRV_SECTOR_SIZE. "
"BDRV_SECTOR_SIZE(%lld) is not a multiple "
"of 512", BDRV_SECTOR_SIZE);
return -EINVAL;
}
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
if (local_err) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto out;
}
filename = qemu_opt_get(opts, "filename");
iscsi_url = iscsi_parse_full_url(iscsi, filename);
if (iscsi_url == NULL) {
error_setg(errp, "Failed to parse URL : %s", filename);
ret = -EINVAL;
goto out;
}
memset(iscsilun, 0, sizeof(IscsiLun));
initiator_name = parse_initiator_name(iscsi_url->target);
iscsi = iscsi_create_context(initiator_name);
if (iscsi == NULL) {
error_setg(errp, "iSCSI: Failed to create iSCSI context.");
ret = -ENOMEM;
goto out;
}
if (iscsi_set_targetname(iscsi, iscsi_url->target)) {
error_setg(errp, "iSCSI: Failed to set target name.");
ret = -EINVAL;
goto out;
}
if (iscsi_url->user != NULL) {
ret = iscsi_set_initiator_username_pwd(iscsi, iscsi_url->user,
iscsi_url->passwd);
if (ret != 0) {
error_setg(errp, "Failed to set initiator username and password");
ret = -EINVAL;
goto out;
}
}
/* check if we got CHAP username/password via the options */
parse_chap(iscsi, iscsi_url->target, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto out;
}
if (iscsi_set_session_type(iscsi, ISCSI_SESSION_NORMAL) != 0) {
error_setg(errp, "iSCSI: Failed to set session type to normal.");
ret = -EINVAL;
goto out;
}
iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE_CRC32C);
/* check if we got HEADER_DIGEST via the options */
parse_header_digest(iscsi, iscsi_url->target, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto out;
}
if (iscsi_full_connect_sync(iscsi, iscsi_url->portal, iscsi_url->lun) != 0) {
error_setg(errp, "iSCSI: Failed to connect to LUN : %s",
iscsi_get_error(iscsi));
ret = -EINVAL;
goto out;
}
iscsilun->iscsi = iscsi;
iscsilun->aio_context = bdrv_get_aio_context(bs);
iscsilun->lun = iscsi_url->lun;
iscsilun->has_write_same = true;
task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 0, 0,
(void **) &inq, errp);
if (task == NULL) {
ret = -EINVAL;
goto out;
}
iscsilun->type = inq->periperal_device_type;
scsi_free_scsi_task(task);
task = NULL;
/* Check the write protect flag of the LUN if we want to write */
if (iscsilun->type == TYPE_DISK && (flags & BDRV_O_RDWR) &&
iscsi_is_write_protected(iscsilun)) {
error_setg(errp, "Cannot open a write protected LUN as read-write");
ret = -EACCES;
goto out;
}
iscsi_readcapacity_sync(iscsilun, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto out;
}
bs->total_sectors = sector_lun2qemu(iscsilun->num_blocks, iscsilun);
bs->request_alignment = iscsilun->block_size;
/* We don't have any emulation for devices other than disks and CD-ROMs, so
* this must be sg ioctl compatible. We force it to be sg, otherwise qemu
* will try to read from the device to guess the image format.
*/
if (iscsilun->type != TYPE_DISK && iscsilun->type != TYPE_ROM) {
bs->sg = 1;
}
task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1,
SCSI_INQUIRY_PAGECODE_SUPPORTED_VPD_PAGES,
(void **) &inq_vpd, errp);
if (task == NULL) {
ret = -EINVAL;
goto out;
}
for (i = 0; i < inq_vpd->num_pages; i++) {
struct scsi_task *inq_task;
struct scsi_inquiry_logical_block_provisioning *inq_lbp;
struct scsi_inquiry_block_limits *inq_bl;
switch (inq_vpd->pages[i]) {
case SCSI_INQUIRY_PAGECODE_LOGICAL_BLOCK_PROVISIONING:
inq_task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1,
SCSI_INQUIRY_PAGECODE_LOGICAL_BLOCK_PROVISIONING,
(void **) &inq_lbp, errp);
if (inq_task == NULL) {
ret = -EINVAL;
goto out;
}
memcpy(&iscsilun->lbp, inq_lbp,
sizeof(struct scsi_inquiry_logical_block_provisioning));
scsi_free_scsi_task(inq_task);
break;
case SCSI_INQUIRY_PAGECODE_BLOCK_LIMITS:
inq_task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1,
SCSI_INQUIRY_PAGECODE_BLOCK_LIMITS,
(void **) &inq_bl, errp);
if (inq_task == NULL) {
ret = -EINVAL;
goto out;
}
memcpy(&iscsilun->bl, inq_bl,
sizeof(struct scsi_inquiry_block_limits));
scsi_free_scsi_task(inq_task);
break;
default:
break;
}
}
scsi_free_scsi_task(task);
task = NULL;
iscsi_attach_aio_context(bs, iscsilun->aio_context);
/* Guess the internal cluster (page) size of the iscsi target by the means
* of opt_unmap_gran. Transfer the unmap granularity only if it has a
* reasonable size */
if (iscsilun->bl.opt_unmap_gran * iscsilun->block_size >= 4 * 1024 &&
iscsilun->bl.opt_unmap_gran * iscsilun->block_size <= 16 * 1024 * 1024) {
iscsilun->cluster_sectors = (iscsilun->bl.opt_unmap_gran *
iscsilun->block_size) >> BDRV_SECTOR_BITS;
if (iscsilun->lbprz && !(bs->open_flags & BDRV_O_NOCACHE)) {
iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun);
if (iscsilun->allocationmap == NULL) {
ret = -ENOMEM;
}
}
}
out:
qemu_opts_del(opts);
g_free(initiator_name);
if (iscsi_url != NULL) {
iscsi_destroy_url(iscsi_url);
}
if (task != NULL) {
scsi_free_scsi_task(task);
}
if (ret) {
if (iscsi != NULL) {
iscsi_destroy_context(iscsi);
}
memset(iscsilun, 0, sizeof(IscsiLun));
}
return ret;
}
| false | qemu | 43ae8fb10c5f6ca78f242624c1f446e0050a9d43 |
4,616 | static void string_output_append_range(StringOutputVisitor *sov,
int64_t s, int64_t e)
{
Range *r = g_malloc0(sizeof(*r));
r->begin = s;
r->end = e + 1;
sov->ranges = range_list_insert(sov->ranges, r);
}
| false | qemu | a0efbf16604770b9d805bcf210ec29942321134f |
4,617 | static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
int64_t sector_num, int nb_sectors,
QEMUIOVector *iov)
{
IscsiLun *iscsilun = bs->opaque;
struct IscsiTask iTask;
uint64_t lba;
uint32_t num_sectors;
if (!is_sector_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
return -EINVAL;
}
if (bs->bl.max_transfer &&
nb_sectors << BDRV_SECTOR_BITS > bs->bl.max_transfer) {
error_report("iSCSI Error: Read of %d sectors exceeds max_xfer_len "
"of %" PRIu32 " bytes", nb_sectors, bs->bl.max_transfer);
return -EINVAL;
}
if (iscsilun->lbprz && nb_sectors >= ISCSI_CHECKALLOC_THRES &&
!iscsi_allocationmap_is_allocated(iscsilun, sector_num, nb_sectors)) {
int64_t ret;
int pnum;
BlockDriverState *file;
ret = iscsi_co_get_block_status(bs, sector_num,
BDRV_REQUEST_MAX_SECTORS, &pnum, &file);
if (ret < 0) {
return ret;
}
if (ret & BDRV_BLOCK_ZERO && pnum >= nb_sectors) {
qemu_iovec_memset(iov, 0, 0x00, iov->size);
return 0;
}
}
lba = sector_qemu2lun(sector_num, iscsilun);
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
if (iscsilun->use_16_for_rw) {
iTask.task = iscsi_read16_task(iscsilun->iscsi, iscsilun->lun, lba,
num_sectors * iscsilun->block_size,
iscsilun->block_size, 0, 0, 0, 0, 0,
iscsi_co_generic_cb, &iTask);
} else {
iTask.task = iscsi_read10_task(iscsilun->iscsi, iscsilun->lun, lba,
num_sectors * iscsilun->block_size,
iscsilun->block_size,
0, 0, 0, 0, 0,
iscsi_co_generic_cb, &iTask);
}
if (iTask.task == NULL) {
return -ENOMEM;
}
scsi_task_set_iov_in(iTask.task, (struct scsi_iovec *) iov->iov, iov->niov);
while (!iTask.complete) {
iscsi_set_events(iscsilun);
qemu_coroutine_yield();
}
if (iTask.task != NULL) {
scsi_free_scsi_task(iTask.task);
iTask.task = NULL;
}
if (iTask.do_retry) {
iTask.complete = 0;
goto retry;
}
if (iTask.status != SCSI_STATUS_GOOD) {
return iTask.err_code;
}
return 0;
}
| false | qemu | e1123a3b40a1a9a625a29c8ed4debb7e206ea690 |
4,618 | static DeviceClass *qdev_get_device_class(const char **driver, Error **errp)
{
ObjectClass *oc;
DeviceClass *dc;
oc = object_class_by_name(*driver);
if (!oc) {
const char *typename = find_typename_by_alias(*driver);
if (typename) {
*driver = typename;
oc = object_class_by_name(*driver);
}
}
if (!object_class_dynamic_cast(oc, TYPE_DEVICE)) {
error_setg(errp, "'%s' is not a valid device model name", *driver);
return NULL;
}
if (object_class_is_abstract(oc)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "driver",
"non-abstract device type");
return NULL;
}
dc = DEVICE_CLASS(oc);
if (dc->cannot_instantiate_with_device_add_yet ||
(qdev_hotplug && !dc->hotpluggable)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "driver",
"pluggable device type");
return NULL;
}
return dc;
}
| false | qemu | f6b5319d412cda360695e2005737f91ca8201af0 |
4,619 | static int usb_net_handle_control(USBDevice *dev, int request, int value,
int index, int length, uint8_t *data)
{
USBNetState *s = (USBNetState *) dev;
int ret = 0;
switch(request) {
case DeviceRequest | USB_REQ_GET_STATUS:
data[0] = (1 << USB_DEVICE_SELF_POWERED) |
(dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP);
data[1] = 0x00;
ret = 2;
break;
case DeviceOutRequest | USB_REQ_CLEAR_FEATURE:
if (value == USB_DEVICE_REMOTE_WAKEUP) {
dev->remote_wakeup = 0;
} else {
goto fail;
}
ret = 0;
break;
case DeviceOutRequest | USB_REQ_SET_FEATURE:
if (value == USB_DEVICE_REMOTE_WAKEUP) {
dev->remote_wakeup = 1;
} else {
goto fail;
}
ret = 0;
break;
case DeviceOutRequest | USB_REQ_SET_ADDRESS:
dev->addr = value;
ret = 0;
break;
case ClassInterfaceOutRequest | USB_CDC_SEND_ENCAPSULATED_COMMAND:
if (!s->rndis || value || index != 0)
goto fail;
#ifdef TRAFFIC_DEBUG
{
unsigned int i;
fprintf(stderr, "SEND_ENCAPSULATED_COMMAND:");
for (i = 0; i < length; i++) {
if (!(i & 15))
fprintf(stderr, "\n%04x:", i);
fprintf(stderr, " %02x", data[i]);
}
fprintf(stderr, "\n\n");
}
#endif
ret = rndis_parse(s, data, length);
break;
case ClassInterfaceRequest | USB_CDC_GET_ENCAPSULATED_RESPONSE:
if (!s->rndis || value || index != 0)
goto fail;
ret = rndis_get_response(s, data);
if (!ret) {
data[0] = 0;
ret = 1;
}
#ifdef TRAFFIC_DEBUG
{
unsigned int i;
fprintf(stderr, "GET_ENCAPSULATED_RESPONSE:");
for (i = 0; i < ret; i++) {
if (!(i & 15))
fprintf(stderr, "\n%04x:", i);
fprintf(stderr, " %02x", data[i]);
}
fprintf(stderr, "\n\n");
}
#endif
break;
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
switch(value >> 8) {
case USB_DT_DEVICE:
ret = sizeof(qemu_net_dev_descriptor);
memcpy(data, qemu_net_dev_descriptor, ret);
break;
case USB_DT_CONFIG:
switch (value & 0xff) {
case 0:
ret = sizeof(qemu_net_rndis_config_descriptor);
memcpy(data, qemu_net_rndis_config_descriptor, ret);
break;
case 1:
ret = sizeof(qemu_net_cdc_config_descriptor);
memcpy(data, qemu_net_cdc_config_descriptor, ret);
break;
default:
goto fail;
}
data[2] = ret & 0xff;
data[3] = ret >> 8;
break;
case USB_DT_STRING:
switch (value & 0xff) {
case 0:
/* language ids */
data[0] = 4;
data[1] = 3;
data[2] = 0x09;
data[3] = 0x04;
ret = 4;
break;
case STRING_ETHADDR:
ret = set_usb_string(data, s->usbstring_mac);
break;
default:
if (usb_net_stringtable[value & 0xff]) {
ret = set_usb_string(data,
usb_net_stringtable[value & 0xff]);
break;
}
goto fail;
}
break;
default:
goto fail;
}
break;
case DeviceRequest | USB_REQ_GET_CONFIGURATION:
data[0] = s->rndis ? DEV_RNDIS_CONFIG_VALUE : DEV_CONFIG_VALUE;
ret = 1;
break;
case DeviceOutRequest | USB_REQ_SET_CONFIGURATION:
switch (value & 0xff) {
case DEV_CONFIG_VALUE:
s->rndis = 0;
break;
case DEV_RNDIS_CONFIG_VALUE:
s->rndis = 1;
break;
default:
goto fail;
}
ret = 0;
break;
case DeviceRequest | USB_REQ_GET_INTERFACE:
case InterfaceRequest | USB_REQ_GET_INTERFACE:
data[0] = 0;
ret = 1;
break;
case DeviceOutRequest | USB_REQ_SET_INTERFACE:
case InterfaceOutRequest | USB_REQ_SET_INTERFACE:
ret = 0;
break;
default:
fail:
fprintf(stderr, "usbnet: failed control transaction: "
"request 0x%x value 0x%x index 0x%x length 0x%x\n",
request, value, index, length);
ret = USB_RET_STALL;
break;
}
return ret;
}
| false | qemu | d59f8ba938afd837182e666cce777dfb860559e4 |
4,620 | static void v9fs_synth_rewinddir(FsContext *ctx, V9fsFidOpenState *fs)
{
v9fs_synth_seekdir(ctx, fs, 0);
}
| false | qemu | 364031f17932814484657e5551ba12957d993d7e |
4,621 | void ff_vp3_h_loop_filter_c(uint8_t *first_pixel, int stride, int *bounding_values)
{
unsigned char *end;
int filter_value;
for (end= first_pixel + 8*stride; first_pixel != end; first_pixel += stride) {
filter_value =
(first_pixel[-2] - first_pixel[ 1])
+3*(first_pixel[ 0] - first_pixel[-1]);
filter_value = bounding_values[(filter_value + 4) >> 3];
first_pixel[-1] = av_clip_uint8(first_pixel[-1] + filter_value);
first_pixel[ 0] = av_clip_uint8(first_pixel[ 0] - filter_value);
}
}
| false | FFmpeg | 28f9ab7029bd1a02f659995919f899f84ee7361b |
4,622 | static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
int16_t *exponents, int end_pos2)
{
int s_index;
int i;
int last_pos, bits_left;
VLC *vlc;
int end_pos = FFMIN(end_pos2, s->gb.size_in_bits);
/* low frequencies (called big values) */
s_index = 0;
for (i = 0; i < 3; i++) {
int j, k, l, linbits;
j = g->region_size[i];
if (j == 0)
continue;
/* select vlc table */
k = g->table_select[i];
l = mpa_huff_data[k][0];
linbits = mpa_huff_data[k][1];
vlc = &huff_vlc[l];
if (!l) {
memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * 2 * j);
s_index += 2 * j;
continue;
}
/* read huffcode and compute each couple */
for (; j > 0; j--) {
int exponent, x, y;
int v;
int pos = get_bits_count(&s->gb);
if (pos >= end_pos){
// av_log(NULL, AV_LOG_ERROR, "pos: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
switch_buffer(s, &pos, &end_pos, &end_pos2);
// av_log(NULL, AV_LOG_ERROR, "new pos: %d %d\n", pos, end_pos);
if (pos >= end_pos)
break;
}
y = get_vlc2(&s->gb, vlc->table, 7, 3);
if (!y) {
g->sb_hybrid[s_index ] =
g->sb_hybrid[s_index+1] = 0;
s_index += 2;
continue;
}
exponent= exponents[s_index];
av_dlog(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n",
i, g->region_size[i] - j, x, y, exponent);
if (y & 16) {
x = y >> 5;
y = y & 0x0f;
if (x < 15) {
READ_FLIP_SIGN(g->sb_hybrid + s_index, RENAME(expval_table)[exponent] + x)
} else {
x += get_bitsz(&s->gb, linbits);
v = l3_unscale(x, exponent);
if (get_bits1(&s->gb))
v = -v;
g->sb_hybrid[s_index] = v;
}
if (y < 15) {
READ_FLIP_SIGN(g->sb_hybrid + s_index + 1, RENAME(expval_table)[exponent] + y)
} else {
y += get_bitsz(&s->gb, linbits);
v = l3_unscale(y, exponent);
if (get_bits1(&s->gb))
v = -v;
g->sb_hybrid[s_index+1] = v;
}
} else {
x = y >> 5;
y = y & 0x0f;
x += y;
if (x < 15) {
READ_FLIP_SIGN(g->sb_hybrid + s_index + !!y, RENAME(expval_table)[exponent] + x)
} else {
x += get_bitsz(&s->gb, linbits);
v = l3_unscale(x, exponent);
if (get_bits1(&s->gb))
v = -v;
g->sb_hybrid[s_index+!!y] = v;
}
g->sb_hybrid[s_index + !y] = 0;
}
s_index += 2;
}
}
/* high frequencies */
vlc = &huff_quad_vlc[g->count1table_select];
last_pos = 0;
while (s_index <= 572) {
int pos, code;
pos = get_bits_count(&s->gb);
if (pos >= end_pos) {
if (pos > end_pos2 && last_pos) {
/* some encoders generate an incorrect size for this
part. We must go back into the data */
s_index -= 4;
skip_bits_long(&s->gb, last_pos - pos);
av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
if(s->err_recognition & AV_EF_BITSTREAM)
s_index=0;
break;
}
// av_log(NULL, AV_LOG_ERROR, "pos2: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
switch_buffer(s, &pos, &end_pos, &end_pos2);
// av_log(NULL, AV_LOG_ERROR, "new pos2: %d %d %d\n", pos, end_pos, s_index);
if (pos >= end_pos)
break;
}
last_pos = pos;
code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
av_dlog(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
g->sb_hybrid[s_index+0] =
g->sb_hybrid[s_index+1] =
g->sb_hybrid[s_index+2] =
g->sb_hybrid[s_index+3] = 0;
while (code) {
static const int idxtab[16] = { 3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0 };
int v;
int pos = s_index + idxtab[code];
code ^= 8 >> idxtab[code];
READ_FLIP_SIGN(g->sb_hybrid + pos, RENAME(exp_table)+exponents[pos])
}
s_index += 4;
}
/* skip extension bits */
bits_left = end_pos2 - get_bits_count(&s->gb);
//av_log(NULL, AV_LOG_ERROR, "left:%d buf:%p\n", bits_left, s->in_gb.buffer);
if (bits_left < 0 && (s->err_recognition & AV_EF_BITSTREAM)) {
av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
s_index=0;
} else if (bits_left > 0 && (s->err_recognition & AV_EF_BUFFER)) {
av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
s_index = 0;
}
memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * (576 - s_index));
skip_bits_long(&s->gb, bits_left);
i = get_bits_count(&s->gb);
switch_buffer(s, &i, &end_pos, &end_pos2);
return 0;
}
| false | FFmpeg | d2a0041c2075a553bb8d4f94591f8556680190c8 |
4,623 | static void memory_region_write_thunk_n(void *_mr,
target_phys_addr_t addr,
unsigned size,
uint64_t data)
{
MemoryRegion *mr = _mr;
if (!memory_region_access_valid(mr, addr, size)) {
return; /* FIXME: better signalling */
}
if (!mr->ops->write) {
mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data);
return;
}
/* FIXME: support unaligned access */
access_with_adjusted_size(addr + mr->offset, &data, size,
mr->ops->impl.min_access_size,
mr->ops->impl.max_access_size,
memory_region_write_accessor, mr);
}
| false | qemu | 897fa7cff21a98b260a5b3e73eae39273fa60272 |
4,624 | static int zipl_load_segment(struct component_entry *entry)
{
const int max_entries = (SECTOR_SIZE / sizeof(struct scsi_blockptr));
struct scsi_blockptr *bprs = (void*)sec;
const int bprs_size = sizeof(sec);
uint64_t blockno;
long address;
int i;
blockno = entry->data.blockno;
address = entry->load_address;
debug_print_int("loading segment at block", blockno);
debug_print_int("addr", address);
do {
memset(bprs, FREE_SPACE_FILLER, bprs_size);
if (virtio_read(blockno, (uint8_t *)bprs)) {
debug_print_int("failed reading bprs at", blockno);
goto fail;
}
for (i = 0;; i++) {
u64 *cur_desc = (void*)&bprs[i];
blockno = bprs[i].blockno;
if (!blockno)
break;
/* we need the updated blockno for the next indirect entry in the
chain, but don't want to advance address */
if (i == (max_entries - 1))
break;
if (bprs[i].blockct == 0 && unused_space(&bprs[i + 1],
sizeof(struct scsi_blockptr))) {
/* This is a "continue" pointer.
* This ptr is the last one in the current script section.
* I.e. the next ptr must point to the unused memory area.
* The blockno is not zero, so the upper loop must continue
* reading next section of BPRS.
*/
break;
}
address = virtio_load_direct(cur_desc[0], cur_desc[1], 0,
(void*)address);
if (address == -1)
goto fail;
}
} while (blockno);
return 0;
fail:
sclp_print("failed loading segment\n");
return -1;
}
| false | qemu | abd696e4f74a9d30801c6ae2693efe4e5979c2f2 |
4,625 | int event_notifier_set(EventNotifier *e)
{
uint64_t value = 1;
int r = write(e->fd, &value, sizeof(value));
return r == sizeof(value);
}
| false | qemu | d0cc2fbfa607678866475383c508be84818ceb64 |
4,626 | long do_sigreturn(CPUSPARCState *env)
{
abi_ulong sf_addr;
struct target_signal_frame *sf;
uint32_t up_psr, pc, npc;
target_sigset_t set;
sigset_t host_set;
int err=0, i;
sf_addr = env->regwptr[UREG_FP];
trace_user_do_sigreturn(env, sf_addr);
if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
goto segv_and_exit;
}
/* 1. Make sure we are not getting garbage from the user */
if (sf_addr & 3)
goto segv_and_exit;
__get_user(pc, &sf->info.si_regs.pc);
__get_user(npc, &sf->info.si_regs.npc);
if ((pc | npc) & 3) {
goto segv_and_exit;
}
/* 2. Restore the state */
__get_user(up_psr, &sf->info.si_regs.psr);
/* User can only change condition codes and FPU enabling in %psr. */
env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
| (env->psr & ~(PSR_ICC /* | PSR_EF */));
env->pc = pc;
env->npc = npc;
__get_user(env->y, &sf->info.si_regs.y);
for (i=0; i < 8; i++) {
__get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
}
for (i=0; i < 8; i++) {
__get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
}
/* FIXME: implement FPU save/restore:
* __get_user(fpu_save, &sf->fpu_save);
* if (fpu_save)
* err |= restore_fpu_state(env, fpu_save);
*/
/* This is pretty much atomic, no amount locking would prevent
* the races which exist anyways.
*/
__get_user(set.sig[0], &sf->info.si_mask);
for(i = 1; i < TARGET_NSIG_WORDS; i++) {
__get_user(set.sig[i], &sf->extramask[i - 1]);
}
target_to_host_sigset_internal(&host_set, &set);
do_sigprocmask(SIG_SETMASK, &host_set, NULL);
if (err) {
goto segv_and_exit;
}
unlock_user_struct(sf, sf_addr, 0);
return env->regwptr[0];
segv_and_exit:
unlock_user_struct(sf, sf_addr, 0);
force_sig(TARGET_SIGSEGV);
}
| false | qemu | c0bea68f9ea48f0dea7a06a259a613bfd3a7e35e |
4,627 | void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
int nb_clusters, enum qcow2_discard_type type)
{
BDRVQcow2State *s = bs->opaque;
switch (qcow2_get_cluster_type(l2_entry)) {
case QCOW2_CLUSTER_COMPRESSED:
{
int nb_csectors;
nb_csectors = ((l2_entry >> s->csize_shift) &
s->csize_mask) + 1;
qcow2_free_clusters(bs,
(l2_entry & s->cluster_offset_mask) & ~511,
nb_csectors * 512, type);
}
break;
case QCOW2_CLUSTER_NORMAL:
case QCOW2_CLUSTER_ZERO:
if (l2_entry & L2E_OFFSET_MASK) {
if (offset_into_cluster(s, l2_entry & L2E_OFFSET_MASK)) {
qcow2_signal_corruption(bs, false, -1, -1,
"Cannot free unaligned cluster %#llx",
l2_entry & L2E_OFFSET_MASK);
} else {
qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
nb_clusters << s->cluster_bits, type);
}
}
break;
case QCOW2_CLUSTER_UNALLOCATED:
break;
default:
abort();
}
}
| false | qemu | fdfab37dfeffefbd4533b4158055c9b82d7c3e69 |
4,628 | void qemu_put_byte(QEMUFile *f, int v)
{
if (!f->last_error && f->is_write == 0 && f->buf_index > 0) {
fprintf(stderr,
"Attempted to write to buffer while read buffer is not empty\n");
abort();
}
f->buf[f->buf_index++] = v;
f->is_write = 1;
if (f->buf_index >= IO_BUF_SIZE) {
int ret = qemu_fflush(f);
qemu_file_set_if_error(f, ret);
}
}
| false | qemu | c10682cb031525a8bdf3999ef6a033777929d304 |
4,629 | static void n8x0_i2c_setup(struct n800_s *s)
{
DeviceState *dev;
qemu_irq tmp_irq = qdev_get_gpio_in(s->cpu->gpio, N8X0_TMP105_GPIO);
/* Attach the CPU on one end of our I2C bus. */
s->i2c = omap_i2c_bus(s->cpu->i2c[0]);
/* Attach a menelaus PM chip */
dev = i2c_create_slave(s->i2c, "twl92230", N8X0_MENELAUS_ADDR);
qdev_connect_gpio_out(dev, 3, s->cpu->irq[0][OMAP_INT_24XX_SYS_NIRQ]);
/* Attach a TMP105 PM chip (A0 wired to ground) */
dev = i2c_create_slave(s->i2c, "tmp105", N8X0_TMP105_ADDR);
qdev_connect_gpio_out(dev, 0, tmp_irq);
}
| false | qemu | 0919ac787641db11024912651f3bc5764d4f1286 |
4,630 | void destroy_bdrvs(dev_match_fn *match_fn, void *arg)
{
DriveInfo *dinfo;
struct BlockDriverState *bs;
TAILQ_FOREACH(dinfo, &drives, next) {
bs = dinfo->bdrv;
if (bs) {
if (bs->private && match_fn(bs->private, arg)) {
drive_uninit(bs);
bdrv_delete(bs);
}
}
}
}
| false | qemu | 72cf2d4f0e181d0d3a3122e04129c58a95da713e |
4,631 | TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base,
int flags, int cflags)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
tb_page_addr_t phys_pc, phys_page2;
target_ulong virt_page2;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
phys_pc = get_page_addr_code(env, pc);
if (use_icount) {
cflags |= CF_USE_ICOUNT;
}
tb = tb_alloc(pc);
if (unlikely(!tb)) {
buffer_overflow:
/* flush must be done */
tb_flush(cpu);
/* cannot fail at this point */
tb = tb_alloc(pc);
assert(tb != NULL);
/* Don't forget to invalidate previous TB info. */
tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
}
gen_code_buf = tcg_ctx.code_gen_ptr;
tb->tc_ptr = gen_code_buf;
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count1++; /* includes aborted translations because of
exceptions */
ti = profile_getclock();
#endif
tcg_func_start(&tcg_ctx);
gen_intermediate_code(env, tb);
trace_translate_block(tb, tb->pc, tb->tc_ptr);
/* generate machine code */
tb->tb_next_offset[0] = 0xffff;
tb->tb_next_offset[1] = 0xffff;
tcg_ctx.tb_next_offset = tb->tb_next_offset;
#ifdef USE_DIRECT_JUMP
tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset;
tcg_ctx.tb_next = NULL;
#else
tcg_ctx.tb_jmp_offset = NULL;
tcg_ctx.tb_next = tb->tb_next;
#endif
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count++;
tcg_ctx.interm_time += profile_getclock() - ti;
tcg_ctx.code_time -= profile_getclock();
#endif
/* ??? Overflow could be handled better here. In particular, we
don't need to re-do gen_intermediate_code, nor should we re-do
the tcg optimization currently hidden inside tcg_gen_code. All
that should be required is to flush the TBs, allocate a new TB,
re-initialize it per above, and re-do the actual code generation. */
gen_code_size = tcg_gen_code(&tcg_ctx, gen_code_buf);
if (unlikely(gen_code_size < 0)) {
goto buffer_overflow;
}
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
if (unlikely(search_size < 0)) {
goto buffer_overflow;
}
#ifdef CONFIG_PROFILER
tcg_ctx.code_time += profile_getclock();
tcg_ctx.code_in_len += tb->size;
tcg_ctx.code_out_len += gen_code_size;
tcg_ctx.search_out_len += search_size;
#endif
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
qemu_log("OUT: [size=%d]\n", gen_code_size);
log_disas(tb->tc_ptr, gen_code_size);
qemu_log("\n");
qemu_log_flush();
}
#endif
tcg_ctx.code_gen_ptr = (void *)
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
CODE_GEN_ALIGN);
/* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_page_addr_code(env, virt_page2);
}
tb_link_page(tb, phys_pc, phys_page2);
return tb;
}
| false | qemu | 56c0269a9ec105d3848d9f900b5e38e6b35e2478 |
4,632 | static void virtio_blk_handle_write(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
BlockRequest *blkreq;
uint64_t sector;
sector = virtio_ldq_p(VIRTIO_DEVICE(req->dev), &req->out.sector);
trace_virtio_blk_handle_write(req, sector, req->qiov.size / 512);
if (!virtio_blk_sect_range_ok(req->dev, sector, req->qiov.size)) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
virtio_blk_free_request(req);
return;
}
block_acct_start(bdrv_get_stats(req->dev->bs), &req->acct, req->qiov.size,
BLOCK_ACCT_WRITE);
if (mrb->num_writes == 32) {
virtio_submit_multiwrite(req->dev->bs, mrb);
}
blkreq = &mrb->blkreq[mrb->num_writes];
blkreq->sector = sector;
blkreq->nb_sectors = req->qiov.size / BDRV_SECTOR_SIZE;
blkreq->qiov = &req->qiov;
blkreq->cb = virtio_blk_rw_complete;
blkreq->opaque = req;
blkreq->error = 0;
mrb->num_writes++;
}
| false | qemu | 4be746345f13e99e468c60acbd3a355e8183e3ce |
4,633 | static int vc1_parse(AVCodecParserContext *s,
AVCodecContext *avctx,
const uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size)
{
/* Here we do the searching for frame boundaries and headers at
* the same time. Only a minimal amount at the start of each
* header is unescaped. */
VC1ParseContext *vpc = s->priv_data;
int pic_found = vpc->pc.frame_start_found;
uint8_t *unesc_buffer = vpc->unesc_buffer;
size_t unesc_index = vpc->unesc_index;
VC1ParseSearchState search_state = vpc->search_state;
int start_code_found;
int next = END_NOT_FOUND;
int i = vpc->bytes_to_skip;
if (pic_found && buf_size == 0) {
/* EOF considered as end of frame */
memset(unesc_buffer + unesc_index, 0, UNESCAPED_THRESHOLD - unesc_index);
vc1_extract_header(s, avctx, unesc_buffer, unesc_index);
next = 0;
}
while (i < buf_size) {
uint8_t b;
start_code_found = 0;
while (i < buf_size && unesc_index < UNESCAPED_THRESHOLD) {
b = buf[i++];
unesc_buffer[unesc_index++] = b;
if (search_state <= ONE_ZERO)
search_state = b ? NO_MATCH : search_state + 1;
else if (search_state == TWO_ZEROS) {
if (b == 1)
search_state = ONE;
else if (b > 1) {
if (b == 3)
unesc_index--; // swallow emulation prevention byte
search_state = NO_MATCH;
}
}
else { // search_state == ONE
// Header unescaping terminates early due to detection of next start code
search_state = NO_MATCH;
start_code_found = 1;
break;
}
}
if ((s->flags & PARSER_FLAG_COMPLETE_FRAMES) &&
unesc_index >= UNESCAPED_THRESHOLD &&
vpc->prev_start_code == (VC1_CODE_FRAME & 0xFF))
{
// No need to keep scanning the rest of the buffer for
// start codes if we know it contains a complete frame and
// we've already unescaped all we need of the frame header
vc1_extract_header(s, avctx, unesc_buffer, unesc_index);
break;
}
if (unesc_index >= UNESCAPED_THRESHOLD && !start_code_found) {
while (i < buf_size) {
if (search_state == NO_MATCH) {
i += vpc->v.vc1dsp.startcode_find_candidate(buf + i, buf_size - i);
if (i < buf_size) {
search_state = ONE_ZERO;
}
i++;
} else {
b = buf[i++];
if (search_state == ONE_ZERO)
search_state = b ? NO_MATCH : TWO_ZEROS;
else if (search_state == TWO_ZEROS) {
if (b >= 1)
search_state = b == 1 ? ONE : NO_MATCH;
}
else { // search_state == ONE
search_state = NO_MATCH;
start_code_found = 1;
break;
}
}
}
}
if (start_code_found) {
vc1_extract_header(s, avctx, unesc_buffer, unesc_index);
vpc->prev_start_code = b;
unesc_index = 0;
if (!(s->flags & PARSER_FLAG_COMPLETE_FRAMES)) {
if (!pic_found && (b == (VC1_CODE_FRAME & 0xFF) || b == (VC1_CODE_FIELD & 0xFF))) {
pic_found = 1;
}
else if (pic_found && b != (VC1_CODE_FIELD & 0xFF) && b != (VC1_CODE_SLICE & 0xFF)) {
next = i - 4;
pic_found = b == (VC1_CODE_FRAME & 0xFF);
break;
}
}
}
}
vpc->pc.frame_start_found = pic_found;
vpc->unesc_index = unesc_index;
vpc->search_state = search_state;
if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
next = buf_size;
} else {
if (ff_combine_frame(&vpc->pc, next, &buf, &buf_size) < 0) {
vpc->bytes_to_skip = 0;
*poutbuf = NULL;
*poutbuf_size = 0;
return buf_size;
}
}
/* If we return with a valid pointer to a combined frame buffer
* then on the next call then we'll have been unhelpfully rewound
* by up to 4 bytes (depending upon whether the start code
* overlapped the input buffer, and if so by how much). We don't
* want this: it will either cause spurious second detections of
* the start code we've already seen, or cause extra bytes to be
* inserted at the start of the unescaped buffer. */
vpc->bytes_to_skip = 4;
if (next < 0 && start_code_found)
vpc->bytes_to_skip += next;
*poutbuf = buf;
*poutbuf_size = buf_size;
return next;
}
| false | FFmpeg | ff771f79b55a346b4163d814b58ee4c98114745e |
4,634 | static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
{
if (!acb->need_bh) {
acb->common.cb(acb->common.opaque, acb->req.error);
qemu_aio_unref(acb);
}
}
| false | qemu | 61007b316cd71ee7333ff7a0a749a8949527575f |
4,635 | QObject *qmp_output_get_qobject(QmpOutputVisitor *qov)
{
/* FIXME: we should require that a visit occurred, and that it is
* complete (no starts without a matching end) */
QObject *obj = qov->root;
if (obj) {
qobject_incref(obj);
} else {
obj = qnull();
}
return obj;
}
| false | qemu | 56a6f02b8ce1fe41a2a9077593e46eca7d98267d |
4,637 | eeprom_t *eeprom93xx_new(DeviceState *dev, uint16_t nwords)
{
/* Add a new EEPROM (with 16, 64 or 256 words). */
eeprom_t *eeprom;
uint8_t addrbits;
switch (nwords) {
case 16:
case 64:
addrbits = 6;
break;
case 128:
case 256:
addrbits = 8;
break;
default:
assert(!"Unsupported EEPROM size, fallback to 64 words!");
nwords = 64;
addrbits = 6;
}
eeprom = (eeprom_t *)g_malloc0(sizeof(*eeprom) + nwords * 2);
eeprom->size = nwords;
eeprom->addrbits = addrbits;
/* Output DO is tristate, read results in 1. */
eeprom->eedo = 1;
logout("eeprom = 0x%p, nwords = %u\n", eeprom, nwords);
vmstate_register(dev, 0, &vmstate_eeprom, eeprom);
return eeprom;
}
| false | qemu | 6fedcaa1c5419fa89c31fd34dabbd71861c615d2 |
4,638 | static void cdrom_change_cb(void *opaque)
{
IDEState *s = opaque;
uint64_t nb_sectors;
/* XXX: send interrupt too */
bdrv_get_geometry(s->bs, &nb_sectors);
s->nb_sectors = nb_sectors;
}
| false | qemu | 9118e7f08f39001c92d595090b41305ef45c200a |
4,641 | void *qpci_legacy_iomap(QPCIDevice *dev, uint16_t addr)
{
return (void *)(uintptr_t)addr;
}
| true | qemu | b4ba67d9a702507793c2724e56f98e9b0f7be02b |
4,642 | static void trace(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, uint64_t x5, uint64_t x6)
{
TraceRecord *rec = &trace_buf[trace_idx];
if (!trace_list[event].state) {
return;
}
rec->event = event;
rec->timestamp_ns = get_clock();
rec->x1 = x1;
rec->x2 = x2;
rec->x3 = x3;
rec->x4 = x4;
rec->x5 = x5;
rec->x6 = x6;
if (++trace_idx == TRACE_BUF_LEN) {
st_flush_trace_buffer();
}
}
| true | qemu | 0b5538c300a56c3cfb33022840fe0b4968147e7a |
4,643 | static void gen_intermediate_code_internal(CPULM32State *env,
TranslationBlock *tb, int search_pc)
{
struct DisasContext ctx, *dc = &ctx;
uint16_t *gen_opc_end;
uint32_t pc_start;
int j, lj;
uint32_t next_page_start;
int num_insns;
int max_insns;
qemu_log_try_set_file(stderr);
pc_start = tb->pc;
dc->env = env;
dc->tb = tb;
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->singlestep_enabled = env->singlestep_enabled;
dc->nr_nops = 0;
if (pc_start & 3) {
cpu_abort(env, "LM32: unaligned PC=%x\n", pc_start);
}
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("-----------------------------------------\n");
log_cpu_state(env, 0);
}
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
gen_icount_start();
do {
check_breakpoint(env, dc);
if (search_pc) {
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
if (lj < j) {
lj++;
while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
}
tcg_ctx.gen_opc_pc[lj] = dc->pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
decode(dc, cpu_ldl_code(env, dc->pc));
dc->pc += 4;
num_insns++;
} while (!dc->is_jmp
&& tcg_ctx.gen_opc_ptr < gen_opc_end
&& !env->singlestep_enabled
&& !singlestep
&& (dc->pc < next_page_start)
&& num_insns < max_insns);
if (tb->cflags & CF_LAST_IO) {
gen_io_end();
}
if (unlikely(env->singlestep_enabled)) {
if (dc->is_jmp == DISAS_NEXT) {
tcg_gen_movi_tl(cpu_pc, dc->pc);
}
t_gen_raise_exception(dc, EXCP_DEBUG);
} else {
switch (dc->is_jmp) {
case DISAS_NEXT:
gen_goto_tb(dc, 1, dc->pc);
break;
default:
case DISAS_JUMP:
case DISAS_UPDATE:
/* indicate that the hash table must be used
to find the next TB */
tcg_gen_exit_tb(0);
break;
case DISAS_TB_JUMP:
/* nothing more to generate */
break;
}
}
gen_icount_end(tb, num_insns);
*tcg_ctx.gen_opc_ptr = INDEX_op_end;
if (search_pc) {
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("\n");
log_target_disas(env, pc_start, dc->pc - pc_start, 0);
qemu_log("\nisize=%d osize=%td\n",
dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
tcg_ctx.gen_opc_buf);
}
#endif
}
| true | qemu | 632314c49ce20ee9c974f07544d9125fbbbfbe1b |
4,644 | void acpi_gpe_init(ACPIREGS *ar, uint8_t len)
{
ar->gpe.len = len;
ar->gpe.sts = g_malloc0(len / 2);
ar->gpe.en = g_malloc0(len / 2);
}
| true | qemu | d9a3b33d2c9f996537b7f1d0246dee2d0120cefb |
4,646 | static void xtensa_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
XtensaCPU *cpu = XTENSA_CPU(obj);
XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(obj);
CPUXtensaState *env = &cpu->env;
static bool tcg_inited;
cs->env_ptr = env;
env->config = xcc->config;
cpu_exec_init(cs, &error_abort);
if (tcg_enabled() && !tcg_inited) {
tcg_inited = true;
xtensa_translate_init();
}
}
| true | qemu | ce5b1bbf624b977a55ff7f85bb3871682d03baff |
4,648 | static void dp8393x_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->realize = dp8393x_realize;
dc->reset = dp8393x_reset;
dc->vmsd = &vmstate_dp8393x;
dc->props = dp8393x_properties;
} | true | qemu | f6351288b65130deb8102b17143f5d84f817a02a |
4,649 | static int mjpeg_decode_sof0(MJpegDecodeContext *s,
UINT8 *buf, int buf_size)
{
int len, nb_components, i, width, height;
init_get_bits(&s->gb, buf, buf_size);
/* XXX: verify len field validity */
len = get_bits(&s->gb, 16);
/* only 8 bits/component accepted */
if (get_bits(&s->gb, 8) != 8)
return -1;
height = get_bits(&s->gb, 16);
width = get_bits(&s->gb, 16);
nb_components = get_bits(&s->gb, 8);
if (nb_components <= 0 ||
nb_components > MAX_COMPONENTS)
return -1;
s->nb_components = nb_components;
s->h_max = 1;
s->v_max = 1;
for(i=0;i<nb_components;i++) {
/* component id */
s->component_id[i] = get_bits(&s->gb, 8) - 1;
s->h_count[i] = get_bits(&s->gb, 4);
s->v_count[i] = get_bits(&s->gb, 4);
/* compute hmax and vmax (only used in interleaved case) */
if (s->h_count[i] > s->h_max)
s->h_max = s->h_count[i];
if (s->v_count[i] > s->v_max)
s->v_max = s->v_count[i];
s->quant_index[i] = get_bits(&s->gb, 8);
if (s->quant_index[i] >= 4)
return -1;
dprintf("component %d %d:%d\n", i, s->h_count[i], s->v_count[i]);
}
/* if different size, realloc/alloc picture */
/* XXX: also check h_count and v_count */
if (width != s->width || height != s->height) {
for(i=0;i<MAX_COMPONENTS;i++) {
free(s->current_picture[i]);
s->current_picture[i] = NULL;
}
s->width = width;
s->height = height;
/* test interlaced mode */
if (s->first_picture &&
s->org_height != 0 &&
s->height < ((s->org_height * 3) / 4)) {
s->interlaced = 1;
s->bottom_field = 0;
}
for(i=0;i<nb_components;i++) {
int w, h;
w = (s->width + 8 * s->h_max - 1) / (8 * s->h_max);
h = (s->height + 8 * s->v_max - 1) / (8 * s->v_max);
w = w * 8 * s->h_count[i];
h = h * 8 * s->v_count[i];
if (s->interlaced)
w *= 2;
s->linesize[i] = w;
/* memory test is done in mjpeg_decode_sos() */
s->current_picture[i] = av_mallocz(w * h);
}
s->first_picture = 0;
}
if (len != 8+(3*nb_components))
dprintf("decode_sof0: error, len(%d) mismatch\n", len);
return 0;
}
| true | FFmpeg | af289048d8f720743ed82a4e674cff01ab02a836 |
4,651 | int ff_avfilter_graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx)
{
int ret;
/* find supported formats from sub-filters, and merge along links */
if ((ret = query_formats(graph, log_ctx)) < 0)
return ret;
/* Once everything is merged, it's possible that we'll still have
* multiple valid media format choices. We pick the first one. */
pick_formats(graph);
return 0;
}
| false | FFmpeg | 63736fe48c30c5db313c3a25d1462ad31b2a1671 |
4,652 | static void avc_luma_midh_qrt_8w_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int32_t height, uint8_t horiz_offset)
{
uint32_t multiple8_cnt;
for (multiple8_cnt = 2; multiple8_cnt--;) {
avc_luma_midh_qrt_4w_msa(src, src_stride, dst, dst_stride, height,
horiz_offset);
src += 4;
dst += 4;
}
}
| false | FFmpeg | e549933a270dd2cfc36f2cf9bb6b29acf3dc6d08 |
4,653 | target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1,
target_ulong r2, target_ulong r3)
{
int64_t t1 = extract64(r1, 0, 32);
int64_t t2 = extract64(r2, 0, 32);
int64_t t3 = extract64(r3, 0, 32);
int64_t result;
result = t2 - (t1 * t3);
return suov32_neg(env, result);
}
| true | qemu | 3debbb5af5f63440b170b71bf3aecc0e778f5691 |
4,655 | static CharDriverState *qemu_chr_open_fd(int fd_in, int fd_out)
{
CharDriverState *chr;
FDCharDriver *s;
chr = qemu_chr_alloc();
s = g_malloc0(sizeof(FDCharDriver));
s->fd_in = io_channel_from_fd(fd_in);
s->fd_out = io_channel_from_fd(fd_out);
qemu_set_nonblock(fd_out);
s->chr = chr;
chr->opaque = s;
chr->chr_add_watch = fd_chr_add_watch;
chr->chr_write = fd_chr_write;
chr->chr_update_read_handler = fd_chr_update_read_handler;
chr->chr_close = fd_chr_close;
return chr;
}
| true | qemu | 2d528d45ecf5ee3c1a566a9f3d664464925ef830 |
4,656 | static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
int (*mv)[2][4][2], int mb_x, int mb_y,
int mb_intra, int mb_skipped)
{
MpegEncContext *s = opaque;
s->mv_dir = mv_dir;
s->mv_type = mv_type;
s->mb_intra = mb_intra;
s->mb_skipped = mb_skipped;
s->mb_x = mb_x;
s->mb_y = mb_y;
memcpy(s->mv, mv, sizeof(*mv));
ff_init_block_index(s);
ff_update_block_index(s);
s->bdsp.clear_blocks(s->block[0]);
s->dest[0] = s->current_picture.f->data[0] +
s->mb_y * 16 * s->linesize +
s->mb_x * 16;
s->dest[1] = s->current_picture.f->data[1] +
s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize +
s->mb_x * (16 >> s->chroma_x_shift);
s->dest[2] = s->current_picture.f->data[2] +
s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize +
s->mb_x * (16 >> s->chroma_x_shift);
if (ref)
av_log(s->avctx, AV_LOG_DEBUG,
"Interlaced error concealment is not fully implemented\n");
ff_mpv_reconstruct_mb(s, s->block);
} | true | FFmpeg | 127a362630e11fe724e2e63fc871791fdcbcfa64 |
4,657 | static int field_end(H264Context *h, int in_setup)
{
MpegEncContext *const s = &h->s;
AVCodecContext *const avctx = s->avctx;
int err = 0;
s->mb_y = 0;
if (!in_setup && !s->dropable)
ff_thread_report_progress(&s->current_picture_ptr->f,
(16 * s->mb_height >> FIELD_PICTURE) - 1,
s->picture_structure == PICT_BOTTOM_FIELD);
if (CONFIG_H264_VDPAU_DECODER &&
s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
ff_vdpau_h264_set_reference_frames(s);
if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
if (!s->dropable) {
err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index);
h->prev_poc_msb = h->poc_msb;
h->prev_poc_lsb = h->poc_lsb;
}
h->prev_frame_num_offset = h->frame_num_offset;
h->prev_frame_num = h->frame_num;
h->outputed_poc = h->next_outputed_poc;
}
if (avctx->hwaccel) {
if (avctx->hwaccel->end_frame(avctx) < 0)
av_log(avctx, AV_LOG_ERROR,
"hardware accelerator failed to decode picture\n");
}
if (CONFIG_H264_VDPAU_DECODER &&
s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
ff_vdpau_h264_picture_complete(s);
/*
* FIXME: Error handling code does not seem to support interlaced
* when slices span multiple rows
* The ff_er_add_slice calls don't work right for bottom
* fields; they cause massive erroneous error concealing
* Error marking covers both fields (top and bottom).
* This causes a mismatched s->error_count
* and a bad error table. Further, the error count goes to
* INT_MAX when called for bottom field, because mb_y is
* past end by one (callers fault) and resync_mb_y != 0
* causes problems for the first MB line, too.
*/
if (!FIELD_PICTURE)
ff_er_frame_end(s);
ff_MPV_frame_end(s);
h->current_slice = 0;
return err;
}
| true | FFmpeg | 1e26a48fa23ef8e1cbc424667d387184d8155f15 |
4,658 | static void coroutine_fn backup_run(void *opaque)
{
BackupBlockJob *job = opaque;
BackupCompleteData *data;
BlockDriverState *bs = job->common.bs;
BlockDriverState *target = job->target;
BlockdevOnError on_target_error = job->on_target_error;
NotifierWithReturn before_write = {
.notify = backup_before_write_notify,
};
int64_t start, end;
int ret = 0;
QLIST_INIT(&job->inflight_reqs);
qemu_co_rwlock_init(&job->flush_rwlock);
start = 0;
end = DIV_ROUND_UP(job->common.len / BDRV_SECTOR_SIZE,
BACKUP_SECTORS_PER_CLUSTER);
job->bitmap = hbitmap_alloc(end, 0);
bdrv_set_enable_write_cache(target, true);
bdrv_set_on_error(target, on_target_error, on_target_error);
bdrv_iostatus_enable(target);
bdrv_add_before_write_notifier(bs, &before_write);
if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
while (!block_job_is_cancelled(&job->common)) {
/* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests. */
job->common.busy = false;
qemu_coroutine_yield();
job->common.busy = true;
}
} else {
/* Both FULL and TOP SYNC_MODE's require copying.. */
for (; start < end; start++) {
bool error_is_read;
if (block_job_is_cancelled(&job->common)) {
break;
}
/* we need to yield so that qemu_aio_flush() returns.
* (without, VM does not reboot)
*/
if (job->common.speed) {
uint64_t delay_ns = ratelimit_calculate_delay(
&job->limit, job->sectors_read);
job->sectors_read = 0;
block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns);
} else {
block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0);
}
if (block_job_is_cancelled(&job->common)) {
break;
}
if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
int i, n;
int alloced = 0;
/* Check to see if these blocks are already in the
* backing file. */
for (i = 0; i < BACKUP_SECTORS_PER_CLUSTER;) {
/* bdrv_is_allocated() only returns true/false based
* on the first set of sectors it comes across that
* are are all in the same state.
* For that reason we must verify each sector in the
* backup cluster length. We end up copying more than
* needed but at some point that is always the case. */
alloced =
bdrv_is_allocated(bs,
start * BACKUP_SECTORS_PER_CLUSTER + i,
BACKUP_SECTORS_PER_CLUSTER - i, &n);
i += n;
if (alloced == 1 || n == 0) {
break;
}
}
/* If the above loop never found any sectors that are in
* the topmost image, skip this backup. */
if (alloced == 0) {
continue;
}
}
/* FULL sync mode we copy the whole drive. */
ret = backup_do_cow(bs, start * BACKUP_SECTORS_PER_CLUSTER,
BACKUP_SECTORS_PER_CLUSTER, &error_is_read);
if (ret < 0) {
/* Depending on error action, fail now or retry cluster */
BlockErrorAction action =
backup_error_action(job, error_is_read, -ret);
if (action == BLOCK_ERROR_ACTION_REPORT) {
break;
} else {
start--;
continue;
}
}
}
}
notifier_with_return_remove(&before_write);
/* wait until pending backup_do_cow() calls have completed */
qemu_co_rwlock_wrlock(&job->flush_rwlock);
qemu_co_rwlock_unlock(&job->flush_rwlock);
hbitmap_free(job->bitmap);
bdrv_iostatus_disable(target);
data = g_malloc(sizeof(*data));
data->ret = ret;
block_job_defer_to_main_loop(&job->common, backup_complete, data);
} | true | qemu | c29c1dd312f39ec18a3c6177c6da09a75e095d70 |
4,659 | static void force_sigsegv(int oldsig)
{
CPUState *cpu = thread_cpu;
CPUArchState *env = cpu->env_ptr;
target_siginfo_t info;
if (oldsig == SIGSEGV) {
/* Make sure we don't try to deliver the signal again; this will
* end up with handle_pending_signal() calling force_sig().
*/
sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
}
info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
info.si_code = TARGET_SI_KERNEL;
info._sifields._kill._pid = 0;
info._sifields._kill._uid = 0;
queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
}
| true | qemu | c599d4d6d6e9bfdb64e54c33a22cb26e3496b96d |
4,660 | static void vnc_update_client(void *opaque)
{
VncState *vs = opaque;
if (vs->need_update && vs->csock != -1) {
int y;
uint8_t *row;
char *old_row;
uint32_t width_mask[VNC_DIRTY_WORDS];
int n_rectangles;
int saved_offset;
int has_dirty = 0;
vga_hw_update();
vnc_set_bits(width_mask, (ds_get_width(vs->ds) / 16), VNC_DIRTY_WORDS);
/* Walk through the dirty map and eliminate tiles that
really aren't dirty */
row = ds_get_data(vs->ds);
old_row = vs->old_data;
for (y = 0; y < ds_get_height(vs->ds); y++) {
if (vnc_and_bits(vs->dirty_row[y], width_mask, VNC_DIRTY_WORDS)) {
int x;
uint8_t *ptr;
char *old_ptr;
ptr = row;
old_ptr = (char*)old_row;
for (x = 0; x < ds_get_width(vs->ds); x += 16) {
if (memcmp(old_ptr, ptr, 16 * ds_get_bytes_per_pixel(vs->ds)) == 0) {
vnc_clear_bit(vs->dirty_row[y], (x / 16));
} else {
has_dirty = 1;
memcpy(old_ptr, ptr, 16 * ds_get_bytes_per_pixel(vs->ds));
}
ptr += 16 * ds_get_bytes_per_pixel(vs->ds);
old_ptr += 16 * ds_get_bytes_per_pixel(vs->ds);
}
}
row += ds_get_linesize(vs->ds);
old_row += ds_get_linesize(vs->ds);
}
if (!has_dirty && !vs->audio_cap) {
qemu_mod_timer(vs->timer, qemu_get_clock(rt_clock) + VNC_REFRESH_INTERVAL);
return;
}
/* Count rectangles */
n_rectangles = 0;
vnc_write_u8(vs, 0); /* msg id */
vnc_write_u8(vs, 0);
saved_offset = vs->output.offset;
vnc_write_u16(vs, 0);
for (y = 0; y < vs->serverds.height; y++) {
int x;
int last_x = -1;
for (x = 0; x < vs->serverds.width / 16; x++) {
if (vnc_get_bit(vs->dirty_row[y], x)) {
if (last_x == -1) {
last_x = x;
}
vnc_clear_bit(vs->dirty_row[y], x);
} else {
if (last_x != -1) {
int h = find_dirty_height(vs, y, last_x, x);
send_framebuffer_update(vs, last_x * 16, y, (x - last_x) * 16, h);
n_rectangles++;
}
last_x = -1;
}
}
if (last_x != -1) {
int h = find_dirty_height(vs, y, last_x, x);
send_framebuffer_update(vs, last_x * 16, y, (x - last_x) * 16, h);
n_rectangles++;
}
}
vs->output.buffer[saved_offset] = (n_rectangles >> 8) & 0xFF;
vs->output.buffer[saved_offset + 1] = n_rectangles & 0xFF;
vnc_flush(vs);
}
if (vs->csock != -1) {
qemu_mod_timer(vs->timer, qemu_get_clock(rt_clock) + VNC_REFRESH_INTERVAL);
}
}
| true | qemu | 6baebed7698a37a0ac5168faf26023426b0ac940 |
4,661 | static int video_open(VideoState *is)
{
int w,h;
if (screen_width) {
w = screen_width;
h = screen_height;
} else {
w = default_width;
h = default_height;
}
if (!window) {
int flags = SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE;
if (!window_title)
window_title = input_filename;
if (is_full_screen)
flags |= SDL_WINDOW_FULLSCREEN_DESKTOP;
window = SDL_CreateWindow(window_title, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, flags);
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
if (window) {
SDL_RendererInfo info;
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
if (!renderer) {
av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
renderer = SDL_CreateRenderer(window, -1, 0);
}
if (renderer) {
if (!SDL_GetRendererInfo(renderer, &info))
av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", info.name);
}
}
} else {
SDL_SetWindowSize(window, w, h);
}
if (!window || !renderer) {
av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
do_exit(is);
}
is->width = w;
is->height = h;
return 0;
} | true | FFmpeg | 15d7e31dcb68f30ebd725c495a191d5917a3b602 |
4,662 | static void FUNCC(pred4x4_129_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
((pixel4*)(src+0*stride))[0]=
((pixel4*)(src+1*stride))[0]=
((pixel4*)(src+2*stride))[0]=
((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))+1);
}
| true | FFmpeg | 2caf19e90f270abe1e80a3e85acaf0eb5c9d0aac |
4,663 | static int ehci_reset_queue(EHCIQueue *q)
{
int packets;
trace_usb_ehci_queue_action(q, "reset");
packets = ehci_cancel_queue(q);
q->dev = NULL;
q->qtdaddr = 0;
return packets;
} | true | qemu | bbbc39ccacf66ef58261c155f9eed503947c3023 |
4,664 | static void fill_caches(H264Context *h, int mb_type, int for_deblock){
MpegEncContext * const s = &h->s;
const int mb_xy= h->mb_xy;
int topleft_xy, top_xy, topright_xy, left_xy[2];
int topleft_type, top_type, topright_type, left_type[2];
int * left_block;
int topleft_partition= -1;
int i;
top_xy = mb_xy - (s->mb_stride << FIELD_PICTURE);
//FIXME deblocking could skip the intra and nnz parts.
if(for_deblock && (h->slice_num == 1 || h->slice_table[mb_xy] == h->slice_table[top_xy]) && !FRAME_MBAFF)
return;
/* Wow, what a mess, why didn't they simplify the interlacing & intra
* stuff, I can't imagine that these complex rules are worth it. */
topleft_xy = top_xy - 1;
topright_xy= top_xy + 1;
left_xy[1] = left_xy[0] = mb_xy-1;
left_block = left_block_options[0];
if(FRAME_MBAFF){
const int pair_xy = s->mb_x + (s->mb_y & ~1)*s->mb_stride;
const int top_pair_xy = pair_xy - s->mb_stride;
const int topleft_pair_xy = top_pair_xy - 1;
const int topright_pair_xy = top_pair_xy + 1;
const int topleft_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[topleft_pair_xy]);
const int top_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[top_pair_xy]);
const int topright_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[topright_pair_xy]);
const int left_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[pair_xy-1]);
const int curr_mb_frame_flag = !IS_INTERLACED(mb_type);
const int bottom = (s->mb_y & 1);
tprintf(s->avctx, "fill_caches: curr_mb_frame_flag:%d, left_mb_frame_flag:%d, topleft_mb_frame_flag:%d, top_mb_frame_flag:%d, topright_mb_frame_flag:%d\n", curr_mb_frame_flag, left_mb_frame_flag, topleft_mb_frame_flag, top_mb_frame_flag, topright_mb_frame_flag);
if (bottom
? !curr_mb_frame_flag // bottom macroblock
: (!curr_mb_frame_flag && !top_mb_frame_flag) // top macroblock
) {
top_xy -= s->mb_stride;
}
if (bottom
? !curr_mb_frame_flag // bottom macroblock
: (!curr_mb_frame_flag && !topleft_mb_frame_flag) // top macroblock
) {
topleft_xy -= s->mb_stride;
} else if(bottom && curr_mb_frame_flag && !left_mb_frame_flag) {
topleft_xy += s->mb_stride;
// take top left mv from the middle of the mb, as opposed to all other modes which use the bottom right partition
topleft_partition = 0;
}
if (bottom
? !curr_mb_frame_flag // bottom macroblock
: (!curr_mb_frame_flag && !topright_mb_frame_flag) // top macroblock
) {
topright_xy -= s->mb_stride;
}
if (left_mb_frame_flag != curr_mb_frame_flag) {
left_xy[1] = left_xy[0] = pair_xy - 1;
if (curr_mb_frame_flag) {
if (bottom) {
left_block = left_block_options[1];
} else {
left_block= left_block_options[2];
}
} else {
left_xy[1] += s->mb_stride;
left_block = left_block_options[3];
}
}
}
h->top_mb_xy = top_xy;
h->left_mb_xy[0] = left_xy[0];
h->left_mb_xy[1] = left_xy[1];
if(for_deblock){
topleft_type = 0;
topright_type = 0;
top_type = h->slice_table[top_xy ] < 255 ? s->current_picture.mb_type[top_xy] : 0;
left_type[0] = h->slice_table[left_xy[0] ] < 255 ? s->current_picture.mb_type[left_xy[0]] : 0;
left_type[1] = h->slice_table[left_xy[1] ] < 255 ? s->current_picture.mb_type[left_xy[1]] : 0;
if(FRAME_MBAFF && !IS_INTRA(mb_type)){
int list;
for(list=0; list<h->list_count; list++){
if(USES_LIST(mb_type,list)){
int8_t *ref = &s->current_picture.ref_index[list][h->mb2b8_xy[mb_xy]];
*(uint32_t*)&h->ref_cache[list][scan8[ 0]] =
*(uint32_t*)&h->ref_cache[list][scan8[ 2]] = pack16to32(ref[0],ref[1])*0x0101;
ref += h->b8_stride;
*(uint32_t*)&h->ref_cache[list][scan8[ 8]] =
*(uint32_t*)&h->ref_cache[list][scan8[10]] = pack16to32(ref[0],ref[1])*0x0101;
}else{
fill_rectangle(&h-> mv_cache[list][scan8[ 0]], 4, 4, 8, 0, 4);
fill_rectangle(&h->ref_cache[list][scan8[ 0]], 4, 4, 8, (uint8_t)LIST_NOT_USED, 1);
}
}
}
}else{
topleft_type = h->slice_table[topleft_xy ] == h->slice_num ? s->current_picture.mb_type[topleft_xy] : 0;
top_type = h->slice_table[top_xy ] == h->slice_num ? s->current_picture.mb_type[top_xy] : 0;
topright_type= h->slice_table[topright_xy] == h->slice_num ? s->current_picture.mb_type[topright_xy]: 0;
left_type[0] = h->slice_table[left_xy[0] ] == h->slice_num ? s->current_picture.mb_type[left_xy[0]] : 0;
left_type[1] = h->slice_table[left_xy[1] ] == h->slice_num ? s->current_picture.mb_type[left_xy[1]] : 0;
}
if(IS_INTRA(mb_type)){
h->topleft_samples_available=
h->top_samples_available=
h->left_samples_available= 0xFFFF;
h->topright_samples_available= 0xEEEA;
if(!IS_INTRA(top_type) && (top_type==0 || h->pps.constrained_intra_pred)){
h->topleft_samples_available= 0xB3FF;
h->top_samples_available= 0x33FF;
h->topright_samples_available= 0x26EA;
}
for(i=0; i<2; i++){
if(!IS_INTRA(left_type[i]) && (left_type[i]==0 || h->pps.constrained_intra_pred)){
h->topleft_samples_available&= 0xDF5F;
h->left_samples_available&= 0x5F5F;
}
}
if(!IS_INTRA(topleft_type) && (topleft_type==0 || h->pps.constrained_intra_pred))
h->topleft_samples_available&= 0x7FFF;
if(!IS_INTRA(topright_type) && (topright_type==0 || h->pps.constrained_intra_pred))
h->topright_samples_available&= 0xFBFF;
if(IS_INTRA4x4(mb_type)){
if(IS_INTRA4x4(top_type)){
h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode[top_xy][4];
h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode[top_xy][5];
h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode[top_xy][6];
h->intra4x4_pred_mode_cache[7+8*0]= h->intra4x4_pred_mode[top_xy][3];
}else{
int pred;
if(!top_type || (IS_INTER(top_type) && h->pps.constrained_intra_pred))
pred= -1;
else{
pred= 2;
}
h->intra4x4_pred_mode_cache[4+8*0]=
h->intra4x4_pred_mode_cache[5+8*0]=
h->intra4x4_pred_mode_cache[6+8*0]=
h->intra4x4_pred_mode_cache[7+8*0]= pred;
}
for(i=0; i<2; i++){
if(IS_INTRA4x4(left_type[i])){
h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[0+2*i]];
h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[1+2*i]];
}else{
int pred;
if(!left_type[i] || (IS_INTER(left_type[i]) && h->pps.constrained_intra_pred))
pred= -1;
else{
pred= 2;
}
h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= pred;
}
}
}
}
/*
0 . T T. T T T T
1 L . .L . . . .
2 L . .L . . . .
3 . T TL . . . .
4 L . .L . . . .
5 L . .. . . . .
*/
//FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec)
if(top_type){
h->non_zero_count_cache[4+8*0]= h->non_zero_count[top_xy][4];
h->non_zero_count_cache[5+8*0]= h->non_zero_count[top_xy][5];
h->non_zero_count_cache[6+8*0]= h->non_zero_count[top_xy][6];
h->non_zero_count_cache[7+8*0]= h->non_zero_count[top_xy][3];
h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][9];
h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][8];
h->non_zero_count_cache[1+8*3]= h->non_zero_count[top_xy][12];
h->non_zero_count_cache[2+8*3]= h->non_zero_count[top_xy][11];
}else{
h->non_zero_count_cache[4+8*0]=
h->non_zero_count_cache[5+8*0]=
h->non_zero_count_cache[6+8*0]=
h->non_zero_count_cache[7+8*0]=
h->non_zero_count_cache[1+8*0]=
h->non_zero_count_cache[2+8*0]=
h->non_zero_count_cache[1+8*3]=
h->non_zero_count_cache[2+8*3]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64;
}
for (i=0; i<2; i++) {
if(left_type[i]){
h->non_zero_count_cache[3+8*1 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[0+2*i]];
h->non_zero_count_cache[3+8*2 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[1+2*i]];
h->non_zero_count_cache[0+8*1 + 8*i]= h->non_zero_count[left_xy[i]][left_block[4+2*i]];
h->non_zero_count_cache[0+8*4 + 8*i]= h->non_zero_count[left_xy[i]][left_block[5+2*i]];
}else{
h->non_zero_count_cache[3+8*1 + 2*8*i]=
h->non_zero_count_cache[3+8*2 + 2*8*i]=
h->non_zero_count_cache[0+8*1 + 8*i]=
h->non_zero_count_cache[0+8*4 + 8*i]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64;
}
}
if( h->pps.cabac ) {
// top_cbp
if(top_type) {
h->top_cbp = h->cbp_table[top_xy];
} else if(IS_INTRA(mb_type)) {
h->top_cbp = 0x1C0;
} else {
h->top_cbp = 0;
}
// left_cbp
if (left_type[0]) {
h->left_cbp = h->cbp_table[left_xy[0]] & 0x1f0;
} else if(IS_INTRA(mb_type)) {
h->left_cbp = 0x1C0;
} else {
h->left_cbp = 0;
}
if (left_type[0]) {
h->left_cbp |= ((h->cbp_table[left_xy[0]]>>((left_block[0]&(~1))+1))&0x1) << 1;
}
if (left_type[1]) {
h->left_cbp |= ((h->cbp_table[left_xy[1]]>>((left_block[2]&(~1))+1))&0x1) << 3;
}
}
#if 1
if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){
int list;
for(list=0; list<h->list_count; list++){
if(!USES_LIST(mb_type, list) && !IS_DIRECT(mb_type) && !h->deblocking_filter){
/*if(!h->mv_cache_clean[list]){
memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all?
memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t));
h->mv_cache_clean[list]= 1;
}*/
continue;
}
h->mv_cache_clean[list]= 0;
if(USES_LIST(top_type, list)){
const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride;
*(uint32_t*)h->mv_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 0];
*(uint32_t*)h->mv_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 1];
*(uint32_t*)h->mv_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 2];
*(uint32_t*)h->mv_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 3];
h->ref_cache[list][scan8[0] + 0 - 1*8]=
h->ref_cache[list][scan8[0] + 1 - 1*8]= s->current_picture.ref_index[list][b8_xy + 0];
h->ref_cache[list][scan8[0] + 2 - 1*8]=
h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1];
}else{
*(uint32_t*)h->mv_cache [list][scan8[0] + 0 - 1*8]=
*(uint32_t*)h->mv_cache [list][scan8[0] + 1 - 1*8]=
*(uint32_t*)h->mv_cache [list][scan8[0] + 2 - 1*8]=
*(uint32_t*)h->mv_cache [list][scan8[0] + 3 - 1*8]= 0;
*(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101;
}
for(i=0; i<2; i++){
int cache_idx = scan8[0] - 1 + i*2*8;
if(USES_LIST(left_type[i], list)){
const int b_xy= h->mb2b_xy[left_xy[i]] + 3;
const int b8_xy= h->mb2b8_xy[left_xy[i]] + 1;
*(uint32_t*)h->mv_cache[list][cache_idx ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0+i*2]];
*(uint32_t*)h->mv_cache[list][cache_idx+8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1+i*2]];
h->ref_cache[list][cache_idx ]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0+i*2]>>1)];
h->ref_cache[list][cache_idx+8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[1+i*2]>>1)];
}else{
*(uint32_t*)h->mv_cache [list][cache_idx ]=
*(uint32_t*)h->mv_cache [list][cache_idx+8]= 0;
h->ref_cache[list][cache_idx ]=
h->ref_cache[list][cache_idx+8]= left_type[i] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
}
if((for_deblock || (IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred)) && !FRAME_MBAFF)
continue;
if(USES_LIST(topleft_type, list)){
const int b_xy = h->mb2b_xy[topleft_xy] + 3 + h->b_stride + (topleft_partition & 2*h->b_stride);
const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + (topleft_partition & h->b8_stride);
*(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy];
}else{
*(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0;
h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
if(USES_LIST(topright_type, list)){
const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride;
const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride;
*(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy];
}else{
*(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0;
h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
if((IS_SKIP(mb_type) || IS_DIRECT(mb_type)) && !FRAME_MBAFF)
continue;
h->ref_cache[list][scan8[5 ]+1] =
h->ref_cache[list][scan8[7 ]+1] =
h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewhere else)
h->ref_cache[list][scan8[4 ]] =
h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE;
*(uint32_t*)h->mv_cache [list][scan8[5 ]+1]=
*(uint32_t*)h->mv_cache [list][scan8[7 ]+1]=
*(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
*(uint32_t*)h->mv_cache [list][scan8[4 ]]=
*(uint32_t*)h->mv_cache [list][scan8[12]]= 0;
if( h->pps.cabac ) {
/* XXX beurk, Load mvd */
if(USES_LIST(top_type, list)){
const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
*(uint32_t*)h->mvd_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 0];
*(uint32_t*)h->mvd_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 1];
*(uint32_t*)h->mvd_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 2];
*(uint32_t*)h->mvd_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 3];
}else{
*(uint32_t*)h->mvd_cache [list][scan8[0] + 0 - 1*8]=
*(uint32_t*)h->mvd_cache [list][scan8[0] + 1 - 1*8]=
*(uint32_t*)h->mvd_cache [list][scan8[0] + 2 - 1*8]=
*(uint32_t*)h->mvd_cache [list][scan8[0] + 3 - 1*8]= 0;
}
if(USES_LIST(left_type[0], list)){
const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
*(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]];
*(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]];
}else{
*(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]=
*(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0;
}
if(USES_LIST(left_type[1], list)){
const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
*(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]];
*(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]];
}else{
*(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 2*8]=
*(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 3*8]= 0;
}
*(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]=
*(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]=
*(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
*(uint32_t*)h->mvd_cache [list][scan8[4 ]]=
*(uint32_t*)h->mvd_cache [list][scan8[12]]= 0;
if(h->slice_type_nos == FF_B_TYPE){
fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, 0, 1);
if(IS_DIRECT(top_type)){
*(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0x01010101;
}else if(IS_8X8(top_type)){
int b8_xy = h->mb2b8_xy[top_xy] + h->b8_stride;
h->direct_cache[scan8[0] + 0 - 1*8]= h->direct_table[b8_xy];
h->direct_cache[scan8[0] + 2 - 1*8]= h->direct_table[b8_xy + 1];
}else{
*(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0;
}
if(IS_DIRECT(left_type[0]))
h->direct_cache[scan8[0] - 1 + 0*8]= 1;
else if(IS_8X8(left_type[0]))
h->direct_cache[scan8[0] - 1 + 0*8]= h->direct_table[h->mb2b8_xy[left_xy[0]] + 1 + h->b8_stride*(left_block[0]>>1)];
else
h->direct_cache[scan8[0] - 1 + 0*8]= 0;
if(IS_DIRECT(left_type[1]))
h->direct_cache[scan8[0] - 1 + 2*8]= 1;
else if(IS_8X8(left_type[1]))
h->direct_cache[scan8[0] - 1 + 2*8]= h->direct_table[h->mb2b8_xy[left_xy[1]] + 1 + h->b8_stride*(left_block[2]>>1)];
else
h->direct_cache[scan8[0] - 1 + 2*8]= 0;
}
}
if(FRAME_MBAFF){
#define MAP_MVS\
MAP_F2F(scan8[0] - 1 - 1*8, topleft_type)\
MAP_F2F(scan8[0] + 0 - 1*8, top_type)\
MAP_F2F(scan8[0] + 1 - 1*8, top_type)\
MAP_F2F(scan8[0] + 2 - 1*8, top_type)\
MAP_F2F(scan8[0] + 3 - 1*8, top_type)\
MAP_F2F(scan8[0] + 4 - 1*8, topright_type)\
MAP_F2F(scan8[0] - 1 + 0*8, left_type[0])\
MAP_F2F(scan8[0] - 1 + 1*8, left_type[0])\
MAP_F2F(scan8[0] - 1 + 2*8, left_type[1])\
MAP_F2F(scan8[0] - 1 + 3*8, left_type[1])
if(MB_FIELD){
#define MAP_F2F(idx, mb_type)\
if(!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
h->ref_cache[list][idx] <<= 1;\
h->mv_cache[list][idx][1] /= 2;\
h->mvd_cache[list][idx][1] /= 2;\
}
MAP_MVS
#undef MAP_F2F
}else{
#define MAP_F2F(idx, mb_type)\
if(IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
h->ref_cache[list][idx] >>= 1;\
h->mv_cache[list][idx][1] <<= 1;\
h->mvd_cache[list][idx][1] <<= 1;\
}
MAP_MVS
#undef MAP_F2F
}
}
}
}
#endif
h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[0]);
}
| true | FFmpeg | 0281d32550639c806b7eccd2b17cc5d125e4253d |
4,666 | static void lsi_scsi_init(PCIDevice *dev)
{
LSIState *s = (LSIState *)dev;
uint8_t *pci_conf;
pci_conf = s->pci_dev.config;
/* PCI Vendor ID (word) */
pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_LSI_LOGIC);
/* PCI device ID (word) */
pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_LSI_53C895A);
/* PCI base class code */
pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_SCSI);
/* PCI subsystem ID */
pci_conf[0x2e] = 0x00;
pci_conf[0x2f] = 0x10;
/* PCI latency timer = 255 */
pci_conf[0x0d] = 0xff;
/* Interrupt pin 1 */
pci_conf[0x3d] = 0x01;
s->mmio_io_addr = cpu_register_io_memory(lsi_mmio_readfn,
lsi_mmio_writefn, s);
s->ram_io_addr = cpu_register_io_memory(lsi_ram_readfn,
lsi_ram_writefn, s);
pci_register_bar((struct PCIDevice *)s, 0, 256,
PCI_ADDRESS_SPACE_IO, lsi_io_mapfunc);
pci_register_bar((struct PCIDevice *)s, 1, 0x400,
PCI_ADDRESS_SPACE_MEM, lsi_mmio_mapfunc);
pci_register_bar((struct PCIDevice *)s, 2, 0x2000,
PCI_ADDRESS_SPACE_MEM, lsi_ram_mapfunc);
s->queue = qemu_malloc(sizeof(lsi_queue));
s->queue_len = 1;
s->active_commands = 0;
s->pci_dev.unregister = lsi_scsi_uninit;
lsi_soft_reset(s);
scsi_bus_new(&dev->qdev, lsi_scsi_attach);
} | true | qemu | 777aec7ac91b1306d77897aafc8097a87bf4a672 |
4,667 | int ff_j2k_init_component(J2kComponent *comp, J2kCodingStyle *codsty, J2kQuantStyle *qntsty, int cbps, int dx, int dy)
{
int reslevelno, bandno, gbandno = 0, ret, i, j, csize = 1;
if (ret=ff_j2k_dwt_init(&comp->dwt, comp->coord, codsty->nreslevels-1, codsty->transform))
return ret;
for (i = 0; i < 2; i++)
csize *= comp->coord[i][1] - comp->coord[i][0];
comp->data = av_malloc(csize * sizeof(int));
if (!comp->data)
return AVERROR(ENOMEM);
comp->reslevel = av_malloc(codsty->nreslevels * sizeof(J2kResLevel));
if (!comp->reslevel)
return AVERROR(ENOMEM);
for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++){
int declvl = codsty->nreslevels - reslevelno;
J2kResLevel *reslevel = comp->reslevel + reslevelno;
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
reslevel->coord[i][j] =
ff_j2k_ceildivpow2(comp->coord[i][j], declvl - 1);
if (reslevelno == 0)
reslevel->nbands = 1;
else
reslevel->nbands = 3;
if (reslevel->coord[0][1] == reslevel->coord[0][0])
reslevel->num_precincts_x = 0;
else
reslevel->num_precincts_x = ff_j2k_ceildivpow2(reslevel->coord[0][1], codsty->log2_prec_width)
- (reslevel->coord[0][0] >> codsty->log2_prec_width);
if (reslevel->coord[1][1] == reslevel->coord[1][0])
reslevel->num_precincts_y = 0;
else
reslevel->num_precincts_y = ff_j2k_ceildivpow2(reslevel->coord[1][1], codsty->log2_prec_height)
- (reslevel->coord[1][0] >> codsty->log2_prec_height);
reslevel->band = av_malloc(reslevel->nbands * sizeof(J2kBand));
if (!reslevel->band)
return AVERROR(ENOMEM);
for (bandno = 0; bandno < reslevel->nbands; bandno++, gbandno++){
J2kBand *band = reslevel->band + bandno;
int cblkno, precx, precy, precno;
int x0, y0, x1, y1;
int xi0, yi0, xi1, yi1;
int cblkperprecw, cblkperprech;
if (qntsty->quantsty != J2K_QSTY_NONE){
static const uint8_t lut_gain[2][4] = {{0, 0, 0, 0}, {0, 1, 1, 2}};
int numbps;
numbps = cbps + lut_gain[codsty->transform][bandno + reslevelno>0];
band->stepsize = SHL(2048 + qntsty->mant[gbandno], 2 + numbps - qntsty->expn[gbandno]);
} else
band->stepsize = 1 << 13;
if (reslevelno == 0){ // the same everywhere
band->codeblock_width = 1 << FFMIN(codsty->log2_cblk_width, codsty->log2_prec_width-1);
band->codeblock_height = 1 << FFMIN(codsty->log2_cblk_height, codsty->log2_prec_height-1);
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
band->coord[i][j] = ff_j2k_ceildivpow2(comp->coord[i][j], declvl-1);
} else{
band->codeblock_width = 1 << FFMIN(codsty->log2_cblk_width, codsty->log2_prec_width);
band->codeblock_height = 1 << FFMIN(codsty->log2_cblk_height, codsty->log2_prec_height);
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
band->coord[i][j] = ff_j2k_ceildivpow2(comp->coord[i][j] - (((bandno+1>>i)&1) << declvl-1), declvl);
}
band->cblknx = ff_j2k_ceildiv(band->coord[0][1], band->codeblock_width) - band->coord[0][0] / band->codeblock_width;
band->cblkny = ff_j2k_ceildiv(band->coord[1][1], band->codeblock_height) - band->coord[1][0] / band->codeblock_height;
for (j = 0; j < 2; j++)
band->coord[0][j] = ff_j2k_ceildiv(band->coord[0][j], dx);
for (j = 0; j < 2; j++)
band->coord[1][j] = ff_j2k_ceildiv(band->coord[1][j], dy);
band->cblknx = ff_j2k_ceildiv(band->cblknx, dx);
band->cblkny = ff_j2k_ceildiv(band->cblkny, dy);
band->cblk = av_malloc(band->cblknx * band->cblkny * sizeof(J2kCblk));
if (!band->cblk)
return AVERROR(ENOMEM);
band->prec = av_malloc(reslevel->num_precincts_x * reslevel->num_precincts_y * sizeof(J2kPrec));
if (!band->prec)
return AVERROR(ENOMEM);
for (cblkno = 0; cblkno < band->cblknx * band->cblkny; cblkno++){
J2kCblk *cblk = band->cblk + cblkno;
cblk->zero = 0;
cblk->lblock = 3;
cblk->length = 0;
cblk->lengthinc = 0;
cblk->npasses = 0;
}
y0 = band->coord[1][0];
y1 = ((band->coord[1][0] + (1<<codsty->log2_prec_height)) & ~((1<<codsty->log2_prec_height)-1)) - y0;
yi0 = 0;
yi1 = ff_j2k_ceildivpow2(y1 - y0, codsty->log2_cblk_height) << codsty->log2_cblk_height;
yi1 = FFMIN(yi1, band->cblkny);
cblkperprech = 1<<(codsty->log2_prec_height - codsty->log2_cblk_height);
for (precy = 0, precno = 0; precy < reslevel->num_precincts_y; precy++){
for (precx = 0; precx < reslevel->num_precincts_x; precx++, precno++){
band->prec[precno].yi0 = yi0;
band->prec[precno].yi1 = yi1;
}
yi1 += cblkperprech;
yi0 = yi1 - cblkperprech;
yi1 = FFMIN(yi1, band->cblkny);
}
x0 = band->coord[0][0];
x1 = ((band->coord[0][0] + (1<<codsty->log2_prec_width)) & ~((1<<codsty->log2_prec_width)-1)) - x0;
xi0 = 0;
xi1 = ff_j2k_ceildivpow2(x1 - x0, codsty->log2_cblk_width) << codsty->log2_cblk_width;
xi1 = FFMIN(xi1, band->cblknx);
cblkperprecw = 1<<(codsty->log2_prec_width - codsty->log2_cblk_width);
for (precx = 0, precno = 0; precx < reslevel->num_precincts_x; precx++){
for (precy = 0; precy < reslevel->num_precincts_y; precy++, precno = 0){
J2kPrec *prec = band->prec + precno;
prec->xi0 = xi0;
prec->xi1 = xi1;
prec->cblkincl = ff_j2k_tag_tree_init(prec->xi1 - prec->xi0,
prec->yi1 - prec->yi0);
prec->zerobits = ff_j2k_tag_tree_init(prec->xi1 - prec->xi0,
prec->yi1 - prec->yi0);
if (!prec->cblkincl || !prec->zerobits)
return AVERROR(ENOMEM);
}
xi1 += cblkperprecw;
xi0 = xi1 - cblkperprecw;
xi1 = FFMIN(xi1, band->cblknx);
}
}
}
return 0;
}
| true | FFmpeg | 45ae9a8fc903d5b5041a5d34015aa98ab2bc12be |
4,669 | void vncws_tls_handshake_io(void *opaque)
{
VncState *vs = (VncState *)opaque;
if (!vs->tls.session) {
VNC_DEBUG("TLS Websocket setup\n");
if (vnc_tls_client_setup(vs, vs->vd->tls.x509cert != NULL) < 0) {
return;
}
}
VNC_DEBUG("Handshake IO continue\n");
vncws_start_tls_handshake(vs);
}
| true | qemu | 3e305e4a4752f70c0b5c3cf5b43ec957881714f7 |
4,670 | int css_do_rsch(SubchDev *sch)
{
SCSW *s = &sch->curr_status.scsw;
PMCW *p = &sch->curr_status.pmcw;
int ret;
if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
ret = -ENODEV;
goto out;
}
if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
ret = -EINPROGRESS;
goto out;
}
if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
(s->ctrl & SCSW_ACTL_RESUME_PEND) ||
(!(s->ctrl & SCSW_ACTL_SUSP))) {
ret = -EINVAL;
goto out;
}
/* If monitoring is active, update counter. */
if (channel_subsys.chnmon_active) {
css_update_chnmon(sch);
}
s->ctrl |= SCSW_ACTL_RESUME_PEND;
do_subchannel_work(sch);
ret = 0;
out:
return ret;
}
| true | qemu | 66dc50f7057b9a0191f54e55764412202306858d |
4,671 | ram_addr_t ppc4xx_sdram_adjust(ram_addr_t ram_size, int nr_banks,
MemoryRegion ram_memories[],
hwaddr ram_bases[],
hwaddr ram_sizes[],
const unsigned int sdram_bank_sizes[])
{
ram_addr_t size_left = ram_size;
ram_addr_t base = 0;
int i;
int j;
for (i = 0; i < nr_banks; i++) {
for (j = 0; sdram_bank_sizes[j] != 0; j++) {
unsigned int bank_size = sdram_bank_sizes[j];
if (bank_size <= size_left) {
char name[32];
snprintf(name, sizeof(name), "ppc4xx.sdram%d", i);
memory_region_allocate_system_memory(&ram_memories[i], NULL,
name, bank_size);
ram_bases[i] = base;
ram_sizes[i] = bank_size;
base += bank_size;
size_left -= bank_size;
break;
}
}
if (!size_left) {
/* No need to use the remaining banks. */
break;
}
}
ram_size -= size_left;
if (size_left)
printf("Truncating memory to %d MiB to fit SDRAM controller limits.\n",
(int)(ram_size >> 20));
return ram_size;
}
| true | qemu | e206ad48333c50373663945746828fc893b50700 |
4,672 | static int mxf_read_cryptographic_context(void *arg, AVIOContext *pb, int tag, int size, UID uid)
{
MXFCryptoContext *cryptocontext = arg;
if (size != 16)
return -1;
if (IS_KLV_KEY(uid, mxf_crypto_source_container_ul))
avio_read(pb, cryptocontext->source_container_ul, 16);
return 0;
}
| true | FFmpeg | fd34dbea58e097609ff09cf7dcc59f74930195d3 |
4,673 | void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch)
{
int bin, blk, gs;
int end_bap, gaq_mode;
GetBitContext *gbc = &s->gbc;
int gaq_gain[AC3_MAX_COEFS];
gaq_mode = get_bits(gbc, 2);
end_bap = (gaq_mode < 2) ? 12 : 17;
/* if GAQ gain is used, decode gain codes for bins with hebap between
8 and end_bap */
gs = 0;
if (gaq_mode == EAC3_GAQ_12 || gaq_mode == EAC3_GAQ_14) {
/* read 1-bit GAQ gain codes */
for (bin = s->start_freq[ch]; bin < s->end_freq[ch]; bin++) {
if (s->bap[ch][bin] > 7 && s->bap[ch][bin] < end_bap)
gaq_gain[gs++] = get_bits1(gbc) << (gaq_mode-1);
}
} else if (gaq_mode == EAC3_GAQ_124) {
/* read 1.67-bit GAQ gain codes (3 codes in 5 bits) */
int gc = 2;
for (bin = s->start_freq[ch]; bin < s->end_freq[ch]; bin++) {
if (s->bap[ch][bin] > 7 && s->bap[ch][bin] < 17) {
if (gc++ == 2) {
int group_code = get_bits(gbc, 5);
if (group_code > 26) {
av_log(s->avctx, AV_LOG_WARNING, "GAQ gain group code out-of-range\n");
group_code = 26;
}
gaq_gain[gs++] = ff_ac3_ungroup_3_in_5_bits_tab[group_code][0];
gaq_gain[gs++] = ff_ac3_ungroup_3_in_5_bits_tab[group_code][1];
gaq_gain[gs++] = ff_ac3_ungroup_3_in_5_bits_tab[group_code][2];
gc = 0;
}
}
}
}
gs=0;
for (bin = s->start_freq[ch]; bin < s->end_freq[ch]; bin++) {
int hebap = s->bap[ch][bin];
int bits = ff_eac3_bits_vs_hebap[hebap];
if (!hebap) {
/* zero-mantissa dithering */
for (blk = 0; blk < 6; blk++) {
s->pre_mantissa[ch][bin][blk] = (av_lfg_get(&s->dith_state) & 0x7FFFFF) - 0x400000;
}
} else if (hebap < 8) {
/* Vector Quantization */
int v = get_bits(gbc, bits);
for (blk = 0; blk < 6; blk++) {
s->pre_mantissa[ch][bin][blk] = ff_eac3_mantissa_vq[hebap][v][blk] << 8;
}
} else {
/* Gain Adaptive Quantization */
int gbits, log_gain;
if (gaq_mode != EAC3_GAQ_NO && hebap < end_bap) {
log_gain = gaq_gain[gs++];
} else {
log_gain = 0;
}
gbits = bits - log_gain;
for (blk = 0; blk < 6; blk++) {
int mant = get_sbits(gbc, gbits);
if (log_gain && mant == -(1 << (gbits-1))) {
/* large mantissa */
int b;
int mbits = bits - (2 - log_gain);
mant = get_sbits(gbc, mbits);
mant <<= (23 - (mbits - 1));
/* remap mantissa value to correct for asymmetric quantization */
if (mant >= 0)
b = 1 << (23 - log_gain);
else
b = ff_eac3_gaq_remap_2_4_b[hebap-8][log_gain-1] << 8;
mant += ((ff_eac3_gaq_remap_2_4_a[hebap-8][log_gain-1] * (int64_t)mant) >> 15) + b;
} else {
/* small mantissa, no GAQ, or Gk=1 */
mant <<= 24 - bits;
if (!log_gain) {
/* remap mantissa value for no GAQ or Gk=1 */
mant += (ff_eac3_gaq_remap_1[hebap-8] * (int64_t)mant) >> 15;
}
}
s->pre_mantissa[ch][bin][blk] = mant;
}
}
idct6(s->pre_mantissa[ch][bin]);
}
}
| true | FFmpeg | 7b05b5093ea67a3397b0c37cf398bab471e1ce2b |
4,675 | void unregister_displaychangelistener(DisplayChangeListener *dcl)
{
DisplayState *ds = dcl->ds;
trace_displaychangelistener_unregister(dcl, dcl->ops->dpy_name);
if (dcl->con) {
dcl->con->dcls--;
}
QLIST_REMOVE(dcl, next);
gui_setup_refresh(ds);
} | true | qemu | 777c5f1e436d334a57b650b6951c13d8d2799df0 |
4,677 | int avpicture_layout(const AVPicture* src, enum AVPixelFormat pix_fmt,
int width, int height,
unsigned char *dest, int dest_size)
{
int i, j, nb_planes = 0, linesizes[4];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
int size = avpicture_get_size(pix_fmt, width, height);
if (size > dest_size || size < 0)
return AVERROR(EINVAL);
for (i = 0; i < desc->nb_components; i++)
nb_planes = FFMAX(desc->comp[i].plane, nb_planes);
nb_planes++;
av_image_fill_linesizes(linesizes, pix_fmt, width);
for (i = 0; i < nb_planes; i++) {
int h, shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
const unsigned char *s = src->data[i];
h = (height + (1 << shift) - 1) >> shift;
for (j = 0; j < h; j++) {
memcpy(dest, s, linesizes[i]);
dest += linesizes[i];
s += src->linesize[i];
}
}
if (desc->flags & AV_PIX_FMT_FLAG_PAL)
memcpy((unsigned char *)(((size_t)dest + 3) & ~3),
src->data[1], 256 * 4);
return size;
}
| false | FFmpeg | e2ad0b66fa273c5c823978e8f601f2c0d9ee42f8 |
4,678 | static int pipe_open(URLContext *h, const char *filename, int flags)
{
int fd;
if (flags & URL_WRONLY) {
fd = 1;
} else {
fd = 0;
}
#if defined(__MINGW32__) || defined(CONFIG_OS2) || defined(__CYGWIN__)
setmode(fd, O_BINARY);
#endif
h->priv_data = (void *)(size_t)fd;
h->is_streamed = 1;
return 0;
}
| false | FFmpeg | 05d00e953f4cc08273fbb5f795f4fdc307140108 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.