id
int32
0
27.3k
func
stringlengths
26
142k
target
bool
2 classes
project
stringclasses
2 values
commit_id
stringlengths
40
40
7,589
static QDictEntry *qdict_find(const QDict *qdict, const char *key, unsigned int hash) { QDictEntry *entry; LIST_FOREACH(entry, &qdict->table[hash], next) if (!strcmp(entry->key, key)) return entry; return NULL; }
false
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
7,591
int css_do_tsch(SubchDev *sch, IRB *target_irb) { SCSW *s = &sch->curr_status.scsw; PMCW *p = &sch->curr_status.pmcw; uint16_t stctl; uint16_t fctl; uint16_t actl; IRB irb; int ret; if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { ret = 3; goto out; } stctl = s->ctrl & SCSW_CTRL_MASK_STCTL; fctl = s->ctrl & SCSW_CTRL_MASK_FCTL; actl = s->ctrl & SCSW_CTRL_MASK_ACTL; /* Prepare the irb for the guest. */ memset(&irb, 0, sizeof(IRB)); /* Copy scsw from current status. */ memcpy(&irb.scsw, s, sizeof(SCSW)); if (stctl & SCSW_STCTL_STATUS_PEND) { if (s->cstat & (SCSW_CSTAT_DATA_CHECK | SCSW_CSTAT_CHN_CTRL_CHK | SCSW_CSTAT_INTF_CTRL_CHK)) { irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF; irb.esw[0] = 0x04804000; } else { irb.esw[0] = 0x00800000; } /* If a unit check is pending, copy sense data. */ if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) && (p->chars & PMCW_CHARS_MASK_CSENSE)) { irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL; memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data)); irb.esw[1] = 0x02000000 | (sizeof(sch->sense_data) << 8); } } /* Store the irb to the guest. */ copy_irb_to_guest(target_irb, &irb); /* Clear conditions on subchannel, if applicable. */ if (stctl & SCSW_STCTL_STATUS_PEND) { s->ctrl &= ~SCSW_CTRL_MASK_STCTL; if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) || ((fctl & SCSW_FCTL_HALT_FUNC) && (actl & SCSW_ACTL_SUSP))) { s->ctrl &= ~SCSW_CTRL_MASK_FCTL; } if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) { s->flags &= ~SCSW_FLAGS_MASK_PNO; s->ctrl &= ~(SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_HALT_PEND | SCSW_ACTL_CLEAR_PEND | SCSW_ACTL_SUSP); } else { if ((actl & SCSW_ACTL_SUSP) && (fctl & SCSW_FCTL_START_FUNC)) { s->flags &= ~SCSW_FLAGS_MASK_PNO; if (fctl & SCSW_FCTL_HALT_FUNC) { s->ctrl &= ~(SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_HALT_PEND | SCSW_ACTL_CLEAR_PEND | SCSW_ACTL_SUSP); } else { s->ctrl &= ~SCSW_ACTL_RESUME_PEND; } } } /* Clear pending sense data. */ if (p->chars & PMCW_CHARS_MASK_CSENSE) { memset(sch->sense_data, 0 , sizeof(sch->sense_data)); } } ret = ((stctl & SCSW_STCTL_STATUS_PEND) == 0); out: return ret; }
true
qemu
8312976e73fce9689ab831c1da565ec413680cff
7,592
int ff_mpeg4_frame_end(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { Mpeg4DecContext *ctx = avctx->priv_data; MpegEncContext *s = &ctx->m; /* divx 5.01+ bitstream reorder stuff */ /* Since this clobbers the input buffer and hwaccel codecs still need the * data during hwaccel->end_frame we should not do this any earlier */ if (s->divx_packed) { int current_pos = s->gb.buffer == s->bitstream_buffer ? 0 : (get_bits_count(&s->gb) >> 3); int startcode_found = 0; if (buf_size - current_pos > 7) { int i; for (i = current_pos; i < buf_size - 4; i++) if (buf[i] == 0 && buf[i + 1] == 0 && buf[i + 2] == 1 && buf[i + 3] == 0xB6) { startcode_found = !(buf[i + 4] & 0x40); break; } } if (startcode_found) { av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, buf_size - current_pos + FF_INPUT_BUFFER_PADDING_SIZE); if (!s->bitstream_buffer) return AVERROR(ENOMEM); memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos); s->bitstream_buffer_size = buf_size - current_pos; } } return 0; }
true
FFmpeg
21b25537fb8f77b098575e90d8b24556451badf3
7,593
static void pxb_host_class_init(ObjectClass *class, void *data) { DeviceClass *dc = DEVICE_CLASS(class); SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(class); PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(class); dc->fw_name = "pci"; sbc->explicit_ofw_unit_address = pxb_host_ofw_unit_address; hc->root_bus_path = pxb_host_root_bus_path; }
true
qemu
bf8d492405feaee2c1685b3b9d5e03228ed3e47f
7,594
static int vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { if (arm_feature(env, ARM_FEATURE_LPAE)) { value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); } else { value &= 7; } /* Note that we always calculate c2_mask and c2_base_mask, but * they are only used for short-descriptor tables (ie if EAE is 0); * for long-descriptor tables the TTBCR fields are used differently * and the c2_mask and c2_base_mask values are meaningless. */ env->cp15.c2_control = value; env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> value); env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> value); return 0; }
true
qemu
2ebcebe262e88111ff583f97bc5fe0aae64b8940
7,595
av_cold int ff_dcaadpcm_init(DCAADPCMEncContext *s) { if (!s) return -1; s->private_data = av_malloc(sizeof(premultiplied_coeffs) * DCA_ADPCM_VQCODEBOOK_SZ); precalc(s->private_data); return 0; }
true
FFmpeg
34fb84a97d112d85091369e9ef9ce177a05644e9
7,597
static void null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { }
true
FFmpeg
06bf6d3bc04979bd39ecdc7311d0daf8aee7e10f
7,599
static int dirac_unpack_idwt_params(DiracContext *s) { GetBitContext *gb = &s->gb; int i, level; unsigned tmp; #define CHECKEDREAD(dst, cond, errmsg) \ tmp = svq3_get_ue_golomb(gb); \ if (cond) { \ av_log(s->avctx, AV_LOG_ERROR, errmsg); \ return -1; \ }\ dst = tmp; align_get_bits(gb); s->zero_res = s->num_refs ? get_bits1(gb) : 0; if (s->zero_res) return 0; /*[DIRAC_STD] 11.3.1 Transform parameters. transform_parameters() */ CHECKEDREAD(s->wavelet_idx, tmp > 6, "wavelet_idx is too big\n") CHECKEDREAD(s->wavelet_depth, tmp > MAX_DWT_LEVELS || tmp < 1, "invalid number of DWT decompositions\n") if (!s->low_delay) { /* Codeblock parameters (core syntax only) */ if (get_bits1(gb)) { for (i = 0; i <= s->wavelet_depth; i++) { CHECKEDREAD(s->codeblock[i].width , tmp < 1, "codeblock width invalid\n") CHECKEDREAD(s->codeblock[i].height, tmp < 1, "codeblock height invalid\n") } CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n") } else for (i = 0; i <= s->wavelet_depth; i++) s->codeblock[i].width = s->codeblock[i].height = 1; } else { /* Slice parameters + quantization matrix*/ /*[DIRAC_STD] 11.3.4 Slice coding Parameters (low delay syntax only). slice_parameters() */ s->lowdelay.num_x = svq3_get_ue_golomb(gb); s->lowdelay.num_y = svq3_get_ue_golomb(gb); s->lowdelay.bytes.num = svq3_get_ue_golomb(gb); s->lowdelay.bytes.den = svq3_get_ue_golomb(gb); if (s->lowdelay.bytes.den <= 0) { av_log(s->avctx,AV_LOG_ERROR,"Invalid lowdelay.bytes.den\n"); return AVERROR_INVALIDDATA; } /* [DIRAC_STD] 11.3.5 Quantisation matrices (low-delay syntax). quant_matrix() */ if (get_bits1(gb)) { av_log(s->avctx,AV_LOG_DEBUG,"Low Delay: Has Custom Quantization Matrix!\n"); /* custom quantization matrix */ s->lowdelay.quant[0][0] = svq3_get_ue_golomb(gb); for (level = 0; level < s->wavelet_depth; level++) { s->lowdelay.quant[level][1] = svq3_get_ue_golomb(gb); s->lowdelay.quant[level][2] = svq3_get_ue_golomb(gb); s->lowdelay.quant[level][3] = svq3_get_ue_golomb(gb); } } else { if (s->wavelet_depth > 4) { av_log(s->avctx,AV_LOG_ERROR,"Mandatory custom low delay matrix missing for depth %d\n", s->wavelet_depth); return AVERROR_INVALIDDATA; } /* default quantization matrix */ for (level = 0; level < s->wavelet_depth; level++) for (i = 0; i < 4; i++) { s->lowdelay.quant[level][i] = default_qmat[s->wavelet_idx][level][i]; /* haar with no shift differs for different depths */ if (s->wavelet_idx == 3) s->lowdelay.quant[level][i] += 4*(s->wavelet_depth-1 - level); } } } return 0; }
false
FFmpeg
5145d22b88b9835db81c4d286b931a78e08ab76a
7,600
static int yop_probe(AVProbeData *probe_packet) { if (AV_RB16(probe_packet->buf) == AV_RB16("YO") && probe_packet->buf[6] && probe_packet->buf[7] && !(probe_packet->buf[8] & 1) && !(probe_packet->buf[10] & 1)) return AVPROBE_SCORE_MAX * 3 / 4; return 0; }
false
FFmpeg
76170f537304cc845d6d334d36daa0a0f16efb32
7,601
av_cold int ff_vp8_decode_init(AVCodecContext *avctx) { VP8Context *s = avctx->priv_data; int ret; s->avctx = avctx; avctx->pix_fmt = AV_PIX_FMT_YUV420P; avctx->internal->allocate_progress = 1; ff_videodsp_init(&s->vdsp, 8); ff_h264_pred_init(&s->hpc, AV_CODEC_ID_VP8, 8, 1); ff_vp8dsp_init(&s->vp8dsp); if ((ret = vp8_init_frames(s)) < 0) { ff_vp8_decode_free(avctx); return ret; } return 0; }
false
FFmpeg
b8664c929437d6d079e16979c496a2db40cf2324
7,602
static int decode_unk6(uint8_t *frame, int width, int height, const uint8_t *src, const uint8_t *src_end) { return -1; }
true
FFmpeg
29b0d94b43ac960cb442049a5d737a3386ff0337
7,603
int ga_install_service(const char *path, const char *logfile, const char *state_dir) { int ret = EXIT_FAILURE; SC_HANDLE manager; SC_HANDLE service; TCHAR module_fname[MAX_PATH]; GString *cmdline; SERVICE_DESCRIPTION desc = { (char *)QGA_SERVICE_DESCRIPTION }; if (GetModuleFileName(NULL, module_fname, MAX_PATH) == 0) { printf_win_error("No full path to service's executable"); return EXIT_FAILURE; } cmdline = g_string_new(module_fname); g_string_append(cmdline, " -d"); if (path) { g_string_append_printf(cmdline, " -p %s", path); } if (logfile) { g_string_append_printf(cmdline, " -l %s -v", logfile); } if (state_dir) { g_string_append_printf(cmdline, " -t %s", state_dir); } g_debug("service's cmdline: %s", cmdline->str); manager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS); if (manager == NULL) { printf_win_error("No handle to service control manager"); goto out_strings; } service = CreateService(manager, QGA_SERVICE_NAME, QGA_SERVICE_DISPLAY_NAME, SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS, SERVICE_AUTO_START, SERVICE_ERROR_NORMAL, cmdline->str, NULL, NULL, NULL, NULL, NULL); if (service == NULL) { printf_win_error("Failed to install service"); goto out_manager; } ChangeServiceConfig2(service, SERVICE_CONFIG_DESCRIPTION, &desc); fprintf(stderr, "Service was installed successfully.\n"); ret = EXIT_SUCCESS; CloseServiceHandle(service); out_manager: CloseServiceHandle(manager); out_strings: g_string_free(cmdline, TRUE); return ret; }
true
qemu
340d51df5592c5c11fc3885f7bdedbe581b87366
7,604
static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque) { CURLState *s = ((CURLState*)opaque); size_t realsize = size * nmemb; int i; DPRINTF("CURL: Just reading %zd bytes\n", realsize); if (!s || !s->orig_buf) goto read_end; memcpy(s->orig_buf + s->buf_off, ptr, realsize); s->buf_off += realsize; for(i=0; i<CURL_NUM_ACB; i++) { CURLAIOCB *acb = s->acb[i]; if (!acb) continue; if ((s->buf_off >= acb->end)) { qemu_iovec_from_buf(acb->qiov, 0, s->orig_buf + acb->start, acb->end - acb->start); acb->common.cb(acb->common.opaque, 0); qemu_aio_release(acb); s->acb[i] = NULL; read_end: return realsize;
true
qemu
6d4b9e55fc625514a38d27cff4b9933f617fa7dc
7,605
void qemu_sem_post(QemuSemaphore *sem) { int rc; #if defined(__APPLE__) || defined(__NetBSD__) pthread_mutex_lock(&sem->lock); if (sem->count == INT_MAX) { rc = EINVAL; } else if (sem->count++ < 0) { rc = pthread_cond_signal(&sem->cond); } else { rc = 0; } pthread_mutex_unlock(&sem->lock); if (rc != 0) { error_exit(rc, __func__); } #else rc = sem_post(&sem->sem); if (rc < 0) { error_exit(errno, __func__); } #endif }
true
qemu
79761c6681f0d1cc1c027116fcb4382d41ed3ece
7,606
int scsi_convert_sense(uint8_t *in_buf, int in_len, uint8_t *buf, int len, bool fixed) { SCSISense sense; bool fixed_in; fixed_in = (in_buf[0] & 2) == 0; if (in_len && fixed == fixed_in) { memcpy(buf, in_buf, MIN(len, in_len)); return MIN(len, in_len); } if (in_len == 0) { sense = SENSE_CODE(NO_SENSE); } else { sense = scsi_parse_sense_buf(in_buf, in_len); } return scsi_build_sense_buf(buf, len, sense, fixed); }
true
qemu
2770c90d432b571cab718e28f838097f0b2201ec
7,607
static int encode_codebook(CinepakEncContext *s, int *codebook, int size, int chunk_type_yuv, int chunk_type_gray, unsigned char *buf) { int x, y, ret, entry_size = s->pix_fmt == AV_PIX_FMT_YUV420P ? 6 : 4; ret = write_chunk_header(buf, s->pix_fmt == AV_PIX_FMT_YUV420P ? chunk_type_yuv : chunk_type_gray, entry_size * size); for(x = 0; x < size; x++) for(y = 0; y < entry_size; y++) buf[ret++] = codebook[y + x*entry_size] ^ (y >= 4 ? 0x80 : 0); return ret; }
true
FFmpeg
7da9f4523159670d577a2808d4481e64008a8894
7,608
static int ir2_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; Ir2Context * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p= (AVFrame*)&s->picture; int start; if(p->data[0]) avctx->release_buffer(avctx, p); p->reference = 1; p->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, p)) { av_log(s->avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } s->decode_delta = buf[18]; /* decide whether frame uses deltas or not */ #ifndef ALT_BITSTREAM_READER_LE for (i = 0; i < buf_size; i++) buf[i] = av_reverse[buf[i]]; #endif start = 48; /* hardcoded for now */ init_get_bits(&s->gb, buf + start, buf_size - start); if (s->decode_delta) { /* intraframe */ ir2_decode_plane(s, avctx->width, avctx->height, s->picture.data[0], s->picture.linesize[0], ir2_luma_table); /* swapped U and V */ ir2_decode_plane(s, avctx->width >> 2, avctx->height >> 2, s->picture.data[2], s->picture.linesize[2], ir2_luma_table); ir2_decode_plane(s, avctx->width >> 2, avctx->height >> 2, s->picture.data[1], s->picture.linesize[1], ir2_luma_table); } else { /* interframe */ ir2_decode_plane_inter(s, avctx->width, avctx->height, s->picture.data[0], s->picture.linesize[0], ir2_luma_table); /* swapped U and V */ ir2_decode_plane_inter(s, avctx->width >> 2, avctx->height >> 2, s->picture.data[2], s->picture.linesize[2], ir2_luma_table); ir2_decode_plane_inter(s, avctx->width >> 2, avctx->height >> 2, s->picture.data[1], s->picture.linesize[1], ir2_luma_table); } *picture= *(AVFrame*)&s->picture; *data_size = sizeof(AVPicture); return buf_size; }
false
FFmpeg
fd37eac4958a2544599d1b0fce1b153ebd7cd7da
7,611
void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, INT64 pts) { ContextInfo *ci = (ContextInfo *) ctx; AVPicture picture1; Imlib_Image image; DATA32 *data; image = get_cached_image(ci, width, height); if (!image) { image = imlib_create_image(width, height); put_cached_image(ci, image, width, height); } imlib_context_set_image(image); data = imlib_image_get_data(); if (pix_fmt != PIX_FMT_RGBA32) { avpicture_fill(&picture1, (UINT8 *) data, PIX_FMT_RGBA32, width, height); if (img_convert(&picture1, PIX_FMT_RGBA32, picture, pix_fmt, width, height) < 0) { goto done; } } else { av_abort(); } imlib_image_set_has_alpha(0); { int wid, hig, h_a, v_a; char buff[1000]; char tbuff[1000]; char *tbp = ci->text; time_t now = time(0); char *p, *q; int x, y; if (ci->file) { int fd = open(ci->file, O_RDONLY); if (fd < 0) { tbp = "[File not found]"; } else { int l = read(fd, tbuff, sizeof(tbuff) - 1); if (l >= 0) { tbuff[l] = 0; tbp = tbuff; } else { tbp = "[I/O Error]"; } close(fd); } } strftime(buff, sizeof(buff), tbp, localtime(&now)); x = ci->x; y = ci->y; for (p = buff; p; p = q) { q = strchr(p, '\n'); if (q) *q++ = 0; imlib_text_draw_with_return_metrics(x, y, p, &wid, &hig, &h_a, &v_a); y += v_a; } } if (pix_fmt != PIX_FMT_RGBA32) { if (img_convert(picture, pix_fmt, &picture1, PIX_FMT_RGBA32, width, height) < 0) { } } done: ; }
false
FFmpeg
4be3147d0d2858ae5a242af2f86de3a810a9db77
7,612
static void host_signal_handler(int host_signum, siginfo_t *info, void *puc) { CPUArchState *env = thread_cpu->env_ptr; int sig; target_siginfo_t tinfo; /* the CPU emulator uses some host signals to detect exceptions, we forward to it some signals */ if ((host_signum == SIGSEGV || host_signum == SIGBUS) && info->si_code > 0) { if (cpu_signal_handler(host_signum, info, puc)) return; } /* get target signal number */ sig = host_to_target_signal(host_signum); if (sig < 1 || sig > TARGET_NSIG) return; trace_user_host_signal(env, host_signum, sig); host_to_target_siginfo_noswap(&tinfo, info); if (queue_signal(env, sig, &tinfo) == 1) { /* interrupt the virtual CPU as soon as possible */ cpu_exit(thread_cpu); } }
true
qemu
4d330cee37a21aabfc619a1948953559e66951a4
7,613
void stellaris_gamepad_init(int n, qemu_irq *irq, const int *keycode) { gamepad_state *s; int i; s = (gamepad_state *)g_malloc0(sizeof (gamepad_state)); s->buttons = (gamepad_button *)g_malloc0(n * sizeof (gamepad_button)); for (i = 0; i < n; i++) { s->buttons[i].irq = irq[i]; s->buttons[i].keycode = keycode[i]; } s->num_buttons = n; qemu_add_kbd_event_handler(stellaris_gamepad_put_key, s); vmstate_register(NULL, -1, &vmstate_stellaris_gamepad, s); }
true
qemu
b45c03f585ea9bb1af76c73e82195418c294919d
7,614
static void arm_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); ARMCPU *cpu = ARM_CPU(obj); static bool inited; uint32_t Aff1, Aff0; cs->env_ptr = &cpu->env; cpu_exec_init(cs, &error_abort); cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free); /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it. * We don't support setting cluster ID ([16..23]) (known as Aff2 * in later ARM ARM versions), or any of the higher affinity level fields, * so these bits always RAZ. */ Aff1 = cs->cpu_index / ARM_CPUS_PER_CLUSTER; Aff0 = cs->cpu_index % ARM_CPUS_PER_CLUSTER; cpu->mp_affinity = (Aff1 << ARM_AFF1_SHIFT) | Aff0; #ifndef CONFIG_USER_ONLY /* Our inbound IRQ and FIQ lines */ if (kvm_enabled()) { /* VIRQ and VFIQ are unused with KVM but we add them to maintain * the same interface as non-KVM CPUs. */ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4); } else { qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4); } cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE, arm_gt_ptimer_cb, cpu); cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE, arm_gt_vtimer_cb, cpu); cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE, arm_gt_htimer_cb, cpu); cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE, arm_gt_stimer_cb, cpu); qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs, ARRAY_SIZE(cpu->gt_timer_outputs)); #endif /* DTB consumers generally don't in fact care what the 'compatible' * string is, so always provide some string and trust that a hypothetical * picky DTB consumer will also provide a helpful error message. */ cpu->dtb_compatible = "qemu,unknown"; cpu->psci_version = 1; /* By default assume PSCI v0.1 */ cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; if (tcg_enabled()) { cpu->psci_version = 2; /* TCG implements PSCI 0.2 */ if (!inited) { inited = true; arm_translate_init(); } } }
true
qemu
ce5b1bbf624b977a55ff7f85bb3871682d03baff
7,615
static int vmdk_create(const char *filename, QEMUOptionParameter *options, Error **errp) { int idx = 0; BlockDriverState *new_bs = NULL; Error *local_err; char *desc = NULL; int64_t total_size = 0, filesize; const char *adapter_type = NULL; const char *backing_file = NULL; const char *fmt = NULL; int flags = 0; int ret = 0; bool flat, split, compress; GString *ext_desc_lines; char path[PATH_MAX], prefix[PATH_MAX], postfix[PATH_MAX]; const int64_t split_size = 0x80000000; /* VMDK has constant split size */ const char *desc_extent_line; char parent_desc_line[BUF_SIZE] = ""; uint32_t parent_cid = 0xffffffff; uint32_t number_heads = 16; bool zeroed_grain = false; uint32_t desc_offset = 0, desc_len; const char desc_template[] = "# Disk DescriptorFile\n" "version=1\n" "CID=%" PRIx32 "\n" "parentCID=%" PRIx32 "\n" "createType=\"%s\"\n" "%s" "\n" "# Extent description\n" "%s" "\n" "# The Disk Data Base\n" "#DDB\n" "\n" "ddb.virtualHWVersion = \"%d\"\n" "ddb.geometry.cylinders = \"%" PRId64 "\"\n" "ddb.geometry.heads = \"%" PRIu32 "\"\n" "ddb.geometry.sectors = \"63\"\n" "ddb.adapterType = \"%s\"\n"; ext_desc_lines = g_string_new(NULL); if (filename_decompose(filename, path, prefix, postfix, PATH_MAX, errp)) { ret = -EINVAL; goto exit; } /* Read out options */ while (options && options->name) { if (!strcmp(options->name, BLOCK_OPT_SIZE)) { total_size = options->value.n; } else if (!strcmp(options->name, BLOCK_OPT_ADAPTER_TYPE)) { adapter_type = options->value.s; } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { backing_file = options->value.s; } else if (!strcmp(options->name, BLOCK_OPT_COMPAT6)) { flags |= options->value.n ? BLOCK_FLAG_COMPAT6 : 0; } else if (!strcmp(options->name, BLOCK_OPT_SUBFMT)) { fmt = options->value.s; } else if (!strcmp(options->name, BLOCK_OPT_ZEROED_GRAIN)) { zeroed_grain |= options->value.n; } options++; } if (!adapter_type) { adapter_type = "ide"; } else if (strcmp(adapter_type, "ide") && strcmp(adapter_type, "buslogic") && strcmp(adapter_type, "lsilogic") && strcmp(adapter_type, "legacyESX")) { error_setg(errp, "Unknown adapter type: '%s'", adapter_type); ret = -EINVAL; goto exit; } if (strcmp(adapter_type, "ide") != 0) { /* that's the number of heads with which vmware operates when creating, exporting, etc. vmdk files with a non-ide adapter type */ number_heads = 255; } if (!fmt) { /* Default format to monolithicSparse */ fmt = "monolithicSparse"; } else if (strcmp(fmt, "monolithicFlat") && strcmp(fmt, "monolithicSparse") && strcmp(fmt, "twoGbMaxExtentSparse") && strcmp(fmt, "twoGbMaxExtentFlat") && strcmp(fmt, "streamOptimized")) { error_setg(errp, "Unknown subformat: '%s'", fmt); ret = -EINVAL; goto exit; } split = !(strcmp(fmt, "twoGbMaxExtentFlat") && strcmp(fmt, "twoGbMaxExtentSparse")); flat = !(strcmp(fmt, "monolithicFlat") && strcmp(fmt, "twoGbMaxExtentFlat")); compress = !strcmp(fmt, "streamOptimized"); if (flat) { desc_extent_line = "RW %" PRId64 " FLAT \"%s\" 0\n"; } else { desc_extent_line = "RW %" PRId64 " SPARSE \"%s\"\n"; } if (flat && backing_file) { error_setg(errp, "Flat image can't have backing file"); ret = -ENOTSUP; goto exit; } if (flat && zeroed_grain) { error_setg(errp, "Flat image can't enable zeroed grain"); ret = -ENOTSUP; goto exit; } if (backing_file) { BlockDriverState *bs = NULL; ret = bdrv_open(&bs, backing_file, NULL, NULL, BDRV_O_NO_BACKING, NULL, errp); if (ret != 0) { goto exit; } if (strcmp(bs->drv->format_name, "vmdk")) { bdrv_unref(bs); ret = -EINVAL; goto exit; } parent_cid = vmdk_read_cid(bs, 0); bdrv_unref(bs); snprintf(parent_desc_line, sizeof(parent_desc_line), "parentFileNameHint=\"%s\"", backing_file); } /* Create extents */ filesize = total_size; while (filesize > 0) { char desc_line[BUF_SIZE]; char ext_filename[PATH_MAX]; char desc_filename[PATH_MAX]; int64_t size = filesize; if (split && size > split_size) { size = split_size; } if (split) { snprintf(desc_filename, sizeof(desc_filename), "%s-%c%03d%s", prefix, flat ? 'f' : 's', ++idx, postfix); } else if (flat) { snprintf(desc_filename, sizeof(desc_filename), "%s-flat%s", prefix, postfix); } else { snprintf(desc_filename, sizeof(desc_filename), "%s%s", prefix, postfix); } snprintf(ext_filename, sizeof(ext_filename), "%s%s", path, desc_filename); if (vmdk_create_extent(ext_filename, size, flat, compress, zeroed_grain, errp)) { ret = -EINVAL; goto exit; } filesize -= size; /* Format description line */ snprintf(desc_line, sizeof(desc_line), desc_extent_line, size / BDRV_SECTOR_SIZE, desc_filename); g_string_append(ext_desc_lines, desc_line); } /* generate descriptor file */ desc = g_strdup_printf(desc_template, (uint32_t)time(NULL), parent_cid, fmt, parent_desc_line, ext_desc_lines->str, (flags & BLOCK_FLAG_COMPAT6 ? 6 : 4), total_size / (int64_t)(63 * number_heads * BDRV_SECTOR_SIZE), number_heads, adapter_type); desc_len = strlen(desc); /* the descriptor offset = 0x200 */ if (!split && !flat) { desc_offset = 0x200; } else { ret = bdrv_create_file(filename, options, &local_err); if (ret < 0) { error_setg_errno(errp, -ret, "Could not create image file"); goto exit; } } assert(new_bs == NULL); ret = bdrv_open(&new_bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, NULL, &local_err); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write description"); goto exit; } ret = bdrv_pwrite(new_bs, desc_offset, desc, desc_len); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write description"); goto exit; } /* bdrv_pwrite write padding zeros to align to sector, we don't need that * for description file */ if (desc_offset == 0) { ret = bdrv_truncate(new_bs, desc_len); if (ret < 0) { error_setg_errno(errp, -ret, "Could not truncate file"); } } exit: if (new_bs) { bdrv_unref(new_bs); } g_free(desc); g_string_free(ext_desc_lines, true); return ret; }
true
qemu
c13959c745a7e4965c94d19e3153d2c44459906d
7,616
int vmstate_register_with_alias_id(DeviceState *dev, int instance_id, const VMStateDescription *vmsd, void *opaque, int alias_id, int required_for_version) { SaveStateEntry *se; /* If this triggers, alias support can be dropped for the vmsd. */ assert(alias_id == -1 || required_for_version >= vmsd->minimum_version_id); se = g_malloc0(sizeof(SaveStateEntry)); se->version_id = vmsd->version_id; se->section_id = savevm_state.global_section_id++; se->opaque = opaque; se->vmsd = vmsd; se->alias_id = alias_id; if (dev) { char *id = qdev_get_dev_path(dev); if (id) { pstrcpy(se->idstr, sizeof(se->idstr), id); pstrcat(se->idstr, sizeof(se->idstr), "/"); g_free(id); se->compat = g_malloc0(sizeof(CompatEntry)); pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), vmsd->name); se->compat->instance_id = instance_id == -1 ? calculate_compat_instance_id(vmsd->name) : instance_id; instance_id = -1; } } pstrcat(se->idstr, sizeof(se->idstr), vmsd->name); if (instance_id == -1) { se->instance_id = calculate_new_instance_id(se->idstr); } else { se->instance_id = instance_id; } assert(!se->compat || se->instance_id == 0); /* add at the end of list */ QTAILQ_INSERT_TAIL(&savevm_state.handlers, se, entry); return 0; }
true
qemu
97f3ad35517e0d02c0149637d1bb10713c52b057
7,617
void visit_start_alternate(Visitor *v, const char *name, GenericAlternate **obj, size_t size, bool promote_int, Error **errp) { Error *err = NULL; assert(obj && size >= sizeof(GenericAlternate)); assert(v->type != VISITOR_OUTPUT || *obj); if (v->start_alternate) { v->start_alternate(v, name, obj, size, promote_int, &err); } if (v->type == VISITOR_INPUT) { assert(v->start_alternate && !err != !*obj); } error_propagate(errp, err); }
true
qemu
a15fcc3cf69ee3d408f60d6cc316488d2b0249b4
7,618
void kvm_ioapic_dump_state(Monitor *mon, const QDict *qdict) { IOAPICCommonState s; kvm_ioapic_get(&s); ioapic_print_redtbl(mon, &s); }
true
qemu
c6fcb0e201ad296a9c0f587486830d9508094efb
7,619
milkymist_init(MachineState *machine) { const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; LM32CPU *cpu; CPULM32State *env; int kernel_size; DriveInfo *dinfo; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *phys_sdram = g_new(MemoryRegion, 1); qemu_irq irq[32]; int i; char *bios_filename; ResetInfo *reset_info; /* memory map */ hwaddr flash_base = 0x00000000; size_t flash_sector_size = 128 * 1024; size_t flash_size = 32 * 1024 * 1024; hwaddr sdram_base = 0x40000000; size_t sdram_size = 128 * 1024 * 1024; hwaddr initrd_base = sdram_base + 0x1002000; hwaddr cmdline_base = sdram_base + 0x1000000; size_t initrd_max = sdram_size - 0x1002000; reset_info = g_malloc0(sizeof(ResetInfo)); if (cpu_model == NULL) { cpu_model = "lm32-full"; } cpu = LM32_CPU(cpu_generic_init(TYPE_LM32_CPU, cpu_model)); if (cpu == NULL) { fprintf(stderr, "qemu: unable to find CPU '%s'\n", cpu_model); exit(1); } env = &cpu->env; reset_info->cpu = cpu; cpu_lm32_set_phys_msb_ignore(env, 1); memory_region_allocate_system_memory(phys_sdram, NULL, "milkymist.sdram", sdram_size); memory_region_add_subregion(address_space_mem, sdram_base, phys_sdram); dinfo = drive_get(IF_PFLASH, 0, 0); /* Numonyx JS28F256J3F105 */ pflash_cfi01_register(flash_base, NULL, "milkymist.flash", flash_size, dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, flash_sector_size, flash_size / flash_sector_size, 2, 0x00, 0x89, 0x00, 0x1d, 1); /* create irq lines */ env->pic_state = lm32_pic_init(qemu_allocate_irq(cpu_irq_handler, cpu, 0)); for (i = 0; i < 32; i++) { irq[i] = qdev_get_gpio_in(env->pic_state, i); } /* load bios rom */ if (bios_name == NULL) { bios_name = BIOS_FILENAME; } bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (bios_filename) { load_image_targphys(bios_filename, BIOS_OFFSET, BIOS_SIZE); } reset_info->bootstrap_pc = BIOS_OFFSET; /* if no kernel is given no valid bios rom is a fatal error */ if (!kernel_filename && !dinfo && !bios_filename && !qtest_enabled()) { fprintf(stderr, "qemu: could not load Milkymist One bios '%s'\n", bios_name); exit(1); } g_free(bios_filename); milkymist_uart_create(0x60000000, irq[0], serial_hds[0]); milkymist_sysctl_create(0x60001000, irq[1], irq[2], irq[3], 80000000, 0x10014d31, 0x0000041f, 0x00000001); milkymist_hpdmc_create(0x60002000); milkymist_vgafb_create(0x60003000, 0x40000000, 0x0fffffff); milkymist_memcard_create(0x60004000); milkymist_ac97_create(0x60005000, irq[4], irq[5], irq[6], irq[7]); milkymist_pfpu_create(0x60006000, irq[8]); if (machine->enable_graphics) { milkymist_tmu2_create(0x60007000, irq[9]); } milkymist_minimac2_create(0x60008000, 0x30000000, irq[10], irq[11]); milkymist_softusb_create(0x6000f000, irq[15], 0x20000000, 0x1000, 0x20020000, 0x2000); /* make sure juart isn't the first chardev */ env->juart_state = lm32_juart_init(serial_hds[1]); if (kernel_filename) { uint64_t entry; /* Boots a kernel elf binary. */ kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, NULL, NULL, 1, EM_LATTICEMICO32, 0, 0); reset_info->bootstrap_pc = entry; if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, sdram_base, sdram_size); reset_info->bootstrap_pc = sdram_base; } if (kernel_size < 0) { fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename); exit(1); } } if (kernel_cmdline && strlen(kernel_cmdline)) { pstrcpy_targphys("cmdline", cmdline_base, TARGET_PAGE_SIZE, kernel_cmdline); reset_info->cmdline_base = (uint32_t)cmdline_base; } if (initrd_filename) { size_t initrd_size; initrd_size = load_image_targphys(initrd_filename, initrd_base, initrd_max); reset_info->initrd_base = (uint32_t)initrd_base; reset_info->initrd_size = (uint32_t)initrd_size; } qemu_register_reset(main_cpu_reset, reset_info); }
true
qemu
4482e05cbbb7e50e476f6a9500cf0b38913bd939
7,620
static int read_probe(AVProbeData *pd) { if (pd->buf[0] == 'J' && pd->buf[1] == 'V' && strlen(MAGIC) <= pd->buf_size - 4 && !memcmp(pd->buf + 4, MAGIC, strlen(MAGIC))) return AVPROBE_SCORE_MAX; return 0; }
true
FFmpeg
db374790c75fa4ef947abcb5019fcf21d0b2de85
7,621
static void ich9_lpc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->reset = ich9_lpc_reset; k->init = ich9_lpc_initfn; dc->vmsd = &vmstate_ich9_lpc; dc->no_user = 1; k->config_write = ich9_lpc_config_write; dc->desc = "ICH9 LPC bridge"; k->vendor_id = PCI_VENDOR_ID_INTEL; k->device_id = PCI_DEVICE_ID_INTEL_ICH9_8; k->revision = ICH9_A2_LPC_REVISION; k->class_id = PCI_CLASS_BRIDGE_ISA; }
true
qemu
efec3dd631d94160288392721a5f9c39e50fb2bc
7,623
hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw) { hwaddr physical; int prot; int access_type; int ret = 0; /* data access */ access_type = ACCESS_INT; ret = get_physical_address(env, &physical, &prot, address, rw, access_type); if (ret != TLBRET_MATCH) { raise_mmu_exception(env, address, rw, ret); return -1LL; } else { return physical; } }
true
qemu
9fbf4a58c90183b30bb2c8ad971ccce7e6716a16
7,624
static int h263_decode_frame(AVCodecContext *avctx, void *data, int *data_size, UINT8 *buf, int buf_size) { MpegEncContext *s = avctx->priv_data; int ret; AVPicture *pict = data; #ifdef DEBUG printf("*****frame %d size=%d\n", avctx->frame_number, buf_size); printf("bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); #endif /* no supplementary picture */ if (buf_size == 0) { *data_size = 0; return 0; } if(s->bitstream_buffer_size) //divx 5.01+ frame reorder init_get_bits(&s->gb, s->bitstream_buffer, s->bitstream_buffer_size); else init_get_bits(&s->gb, buf, buf_size); /* let's go :-) */ if (s->h263_msmpeg4) { ret = msmpeg4_decode_picture_header(s); } else if (s->h263_pred) { ret = mpeg4_decode_picture_header(s); s->has_b_frames= !s->low_delay; } else if (s->h263_intel) { ret = intel_h263_decode_picture_header(s); } else { ret = h263_decode_picture_header(s); } /* After H263 & mpeg4 header decode we have the height, width,*/ /* and other parameters. So then we could init the picture */ /* FIXME: By the way H263 decoder is evolving it should have */ /* an H263EncContext */ if (!s->context_initialized) { avctx->width = s->width; avctx->height = s->height; avctx->aspect_ratio_info= s->aspect_ratio_info; if (MPV_common_init(s) < 0) return -1; } else if (s->width != avctx->width || s->height != avctx->height) { /* H.263 could change picture size any time */ MPV_common_end(s); if (MPV_common_init(s) < 0) return -1; } if(ret==FRAME_SKIPED) return 0; if (ret < 0) return -1; /* skip b frames if we dont have reference frames */ if(s->num_available_buffers<2 && s->pict_type==B_TYPE) return 0; MPV_frame_start(s); #ifdef DEBUG printf("qscale=%d\n", s->qscale); #endif /* decode each macroblock */ s->block_wrap[0]= s->block_wrap[1]= s->block_wrap[2]= s->block_wrap[3]= s->mb_width*2 + 2; s->block_wrap[4]= s->block_wrap[5]= s->mb_width + 2; for(s->mb_y=0; s->mb_y < s->mb_height; s->mb_y++) { /* Check for GOB headers on H.263 */ /* FIXME: In the future H.263+ will have intra prediction */ /* and we are gonna need another way to detect MPEG4 */ if (s->mb_y && !s->h263_pred) { s->first_gob_line = h263_decode_gob_header(s); } s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1; s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1); s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1; s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2); s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2); s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2); for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) { s->block_index[0]+=2; s->block_index[1]+=2; s->block_index[2]+=2; s->block_index[3]+=2; s->block_index[4]++; s->block_index[5]++; #ifdef DEBUG printf("**mb x=%d y=%d\n", s->mb_x, s->mb_y); #endif //fprintf(stderr,"\nFrame: %d\tMB: %d",avctx->frame_number, (s->mb_y * s->mb_width) + s->mb_x); /* DCT & quantize */ if (s->h263_msmpeg4) { msmpeg4_dc_scale(s); } else if (s->h263_pred) { h263_dc_scale(s); } else { /* default quantization values */ s->y_dc_scale = 8; s->c_dc_scale = 8; } clear_blocks(s->block[0]); s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; if (s->h263_msmpeg4) { if (msmpeg4_decode_mb(s, s->block) < 0) { fprintf(stderr,"\nError at MB: %d\n", (s->mb_y * s->mb_width) + s->mb_x); return -1; } } else { if (h263_decode_mb(s, s->block) < 0) { fprintf(stderr,"\nError at MB: %d\n", (s->mb_y * s->mb_width) + s->mb_x); return -1; } } MPV_decode_mb(s, s->block); } if ( avctx->draw_horiz_band && (s->num_available_buffers>=1 || (!s->has_b_frames)) ) { UINT8 *src_ptr[3]; int y, h, offset; y = s->mb_y * 16; h = s->height - y; if (h > 16) h = 16; offset = y * s->linesize; if(s->pict_type==B_TYPE || (!s->has_b_frames)){ src_ptr[0] = s->current_picture[0] + offset; src_ptr[1] = s->current_picture[1] + (offset >> 2); src_ptr[2] = s->current_picture[2] + (offset >> 2); } else { src_ptr[0] = s->last_picture[0] + offset; src_ptr[1] = s->last_picture[1] + (offset >> 2); src_ptr[2] = s->last_picture[2] + (offset >> 2); } avctx->draw_horiz_band(avctx, src_ptr, s->linesize, y, s->width, h); } } if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==I_TYPE) if(msmpeg4_decode_ext_header(s, buf_size) < 0) return -1; /* divx 5.01+ bistream reorder stuff */ if(s->h263_pred && s->bitstream_buffer_size==0){ int current_pos= get_bits_count(&s->gb)/8; if( buf_size - current_pos > 5 && buf_size - current_pos < BITSTREAM_BUFFER_SIZE){ memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos); s->bitstream_buffer_size= buf_size - current_pos; } }else s->bitstream_buffer_size=0; MPV_frame_end(s); if(s->pict_type==B_TYPE || (!s->has_b_frames)){ pict->data[0] = s->current_picture[0]; pict->data[1] = s->current_picture[1]; pict->data[2] = s->current_picture[2]; } else { pict->data[0] = s->last_picture[0]; pict->data[1] = s->last_picture[1]; pict->data[2] = s->last_picture[2]; } pict->linesize[0] = s->linesize; pict->linesize[1] = s->linesize / 2; pict->linesize[2] = s->linesize / 2; avctx->quality = s->qscale; /* Return the Picture timestamp as the frame number */ /* we substract 1 because it is added on utils.c */ avctx->frame_number = s->picture_number - 1; /* dont output the last pic after seeking note we allready added +1 for the current pix in MPV_frame_end(s) */ if(s->num_available_buffers>=2 || (!s->has_b_frames)) *data_size = sizeof(AVPicture); return buf_size; }
true
FFmpeg
d7e9533aa06f4073a27812349b35ba5fede11ca1
7,625
PPC_OP(setlr) { regs->lr = PARAM1; RETURN(); }
true
qemu
d9bce9d99f4656ae0b0127f7472db9067b8f84ab
7,626
static int mlib_YUV2ABGR420_32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ if(c->srcFormat == PIX_FMT_YUV422P){ srcStride[1] *= 2; srcStride[2] *= 2; } assert(srcStride[1] == srcStride[2]); mlib_VideoColorYUV2ABGR420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW, srcSliceH, dstStride[0], srcStride[0], srcStride[1]); return srcSliceH; }
true
FFmpeg
428098165de4c3edfe42c1b7f00627d287015863
7,627
BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *iov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { return bdrv_aio_rw_vector(bs, sector_num, iov, nb_sectors, cb, opaque, 0); }
true
qemu
71d0770c4cec9f1dc04f4dadcbf7fd6c335030a9
7,628
static int init(AVFilterContext *ctx, const char *args) { EvalContext *eval = ctx->priv; char *args1 = av_strdup(args); char *expr, *buf, *bufptr; int ret, i; eval->class = &aevalsrc_class; av_opt_set_defaults(eval); /* parse expressions */ buf = args1; i = 0; while (expr = av_strtok(buf, ":", &bufptr)) { ret = av_expr_parse(&eval->expr[i], expr, var_names, NULL, NULL, NULL, NULL, 0, ctx); if (ret < 0) i++; if (bufptr && *bufptr == ':') { /* found last expression */ bufptr++; break; buf = NULL; eval->nb_channels = i; if (bufptr && (ret = av_set_options_string(eval, bufptr, "=", ":")) < 0) if (eval->chlayout_str) { int n; ret = ff_parse_channel_layout(&eval->chlayout, eval->chlayout_str, ctx); if (ret < 0) n = av_get_channel_layout_nb_channels(eval->chlayout); if (n != eval->nb_channels) { av_log(ctx, AV_LOG_ERROR, "Mismatch between the specified number of channels '%d' " "and the number of channels '%d' in the specified channel layout '%s'\n", eval->nb_channels, n, eval->chlayout_str); ret = AVERROR(EINVAL); } else { /* guess channel layout from nb expressions/channels */ eval->chlayout = av_get_default_channel_layout(eval->nb_channels); if (!eval->chlayout) { av_log(ctx, AV_LOG_ERROR, "Invalid number of channels '%d' provided\n", eval->nb_channels); ret = AVERROR(EINVAL); if ((ret = ff_parse_sample_rate(&eval->sample_rate, eval->sample_rate_str, ctx))) eval->duration = -1; if (eval->duration_str) { int64_t us = -1; if ((ret = av_parse_time(&us, eval->duration_str, 1)) < 0) { av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", eval->duration_str); eval->duration = (double)us / 1000000; eval->n = 0; end: av_free(args1); return ret;
true
FFmpeg
989c91b5042c19c9914a3b205b1ca6e1598c66ba
7,629
BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { Coroutine *co; BlockAIOCBCoroutine *acb; trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); acb->need_bh = true; acb->req.error = -EINPROGRESS; acb->req.sector = sector_num; acb->req.nb_sectors = nb_sectors; co = qemu_coroutine_create(bdrv_aio_discard_co_entry); qemu_coroutine_enter(co, acb); bdrv_co_maybe_schedule_bh(acb); return &acb->common; }
true
qemu
0b8b8753e4d94901627b3e86431230f2319215c4
7,630
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { TiffContext *const s = avctx->priv_data; AVFrame *const p = data; ThreadFrame frame = { .f = data }; unsigned off; int le, ret, plane, planes; int i, j, entries, stride; unsigned soff, ssize; uint8_t *dst; GetByteContext stripsizes; GetByteContext stripdata; bytestream2_init(&s->gb, avpkt->data, avpkt->size); // parse image header if ((ret = ff_tdecode_header(&s->gb, &le, &off))) { av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n"); return ret; } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) { av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n"); return AVERROR_INVALIDDATA; } s->le = le; // TIFF_BPP is not a required tag and defaults to 1 s->bppcount = s->bpp = 1; s->photometric = TIFF_PHOTOMETRIC_NONE; s->compr = TIFF_RAW; s->fill_order = 0; free_geotags(s); // Reset these offsets so we can tell if they were set this frame s->stripsizesoff = s->strippos = 0; /* parse image file directory */ bytestream2_seek(&s->gb, off, SEEK_SET); entries = ff_tget_short(&s->gb, le); if (bytestream2_get_bytes_left(&s->gb) < entries * 12) return AVERROR_INVALIDDATA; for (i = 0; i < entries; i++) { if ((ret = tiff_decode_tag(s, p)) < 0) return ret; } for (i = 0; i<s->geotag_count; i++) { const char *keyname = get_geokey_name(s->geotags[i].key); if (!keyname) { av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key); continue; } if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) { av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key); continue; } ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, 0); if (ret<0) { av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname); return ret; } } if (!s->strippos && !s->stripoff) { av_log(avctx, AV_LOG_ERROR, "Image data is missing\n"); return AVERROR_INVALIDDATA; } /* now we have the data and may start decoding */ if ((ret = init_image(s, &frame)) < 0) return ret; if (s->strips == 1 && !s->stripsize) { av_log(avctx, AV_LOG_WARNING, "Image data size missing\n"); s->stripsize = avpkt->size - s->stripoff; } if (s->stripsizesoff) { if (s->stripsizesoff >= (unsigned)avpkt->size) return AVERROR_INVALIDDATA; bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff, avpkt->size - s->stripsizesoff); } if (s->strippos) { if (s->strippos >= (unsigned)avpkt->size) return AVERROR_INVALIDDATA; bytestream2_init(&stripdata, avpkt->data + s->strippos, avpkt->size - s->strippos); } if (s->rps <= 0 || s->rps % s->subsampling[1]) { av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps); return AVERROR_INVALIDDATA; } planes = s->planar ? s->bppcount : 1; for (plane = 0; plane < planes; plane++) { stride = p->linesize[plane]; dst = p->data[plane]; for (i = 0; i < s->height; i += s->rps) { if (s->stripsizesoff) ssize = ff_tget(&stripsizes, s->sstype, le); else ssize = s->stripsize; if (s->strippos) soff = ff_tget(&stripdata, s->sot, le); else soff = s->stripoff; if (soff > avpkt->size || ssize > avpkt->size - soff) { av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n"); return AVERROR_INVALIDDATA; } if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i, FFMIN(s->rps, s->height - i))) < 0) { if (avctx->err_recognition & AV_EF_EXPLODE) return ret; break; } dst += s->rps * stride; } if (s->predictor == 2) { if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) { av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported"); return AVERROR_PATCHWELCOME; } dst = p->data[plane]; soff = s->bpp >> 3; if (s->planar) soff = FFMAX(soff / s->bppcount, 1); ssize = s->width * soff; if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE || s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE || s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE || s->avctx->pix_fmt == AV_PIX_FMT_YA16LE || s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE || s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) { for (i = 0; i < s->height; i++) { for (j = soff; j < ssize; j += 2) AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff)); dst += stride; } } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE || s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE || s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE || s->avctx->pix_fmt == AV_PIX_FMT_YA16BE || s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE || s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) { for (i = 0; i < s->height; i++) { for (j = soff; j < ssize; j += 2) AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff)); dst += stride; } } else { for (i = 0; i < s->height; i++) { for (j = soff; j < ssize; j++) dst[j] += dst[j - soff]; dst += stride; } } } if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) { int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255); dst = p->data[plane]; for (i = 0; i < s->height; i++) { for (j = 0; j < stride; j++) dst[j] = c - dst[j]; dst += stride; } } } if (s->planar && s->bppcount > 2) { FFSWAP(uint8_t*, p->data[0], p->data[2]); FFSWAP(int, p->linesize[0], p->linesize[2]); FFSWAP(uint8_t*, p->data[0], p->data[1]); FFSWAP(int, p->linesize[0], p->linesize[1]); } *got_frame = 1; return avpkt->size; }
true
FFmpeg
27f80ab0160d2e64007e1c9799ffd4504cc13eb5
7,631
static int process_video_header_vp6(AVFormatContext *s) { EaDemuxContext *ea = s->priv_data; AVIOContext *pb = s->pb; avio_skip(pb, 8); ea->nb_frames = avio_rl32(pb); avio_skip(pb, 4); ea->time_base.den = avio_rl32(pb); ea->time_base.num = avio_rl32(pb); ea->video_codec = AV_CODEC_ID_VP6; return 1;
true
FFmpeg
1831274ff1ef69d4b730993e03283430775e2eca
7,632
void cpu_reset (CPUCRISState *env) { memset(env, 0, offsetof(CPUCRISState, breakpoints)); tlb_flush(env, 1); env->pregs[PR_VR] = 32; #if defined(CONFIG_USER_ONLY) /* start in user mode with interrupts enabled. */ env->pregs[PR_CCS] |= U_FLAG | I_FLAG; #else env->pregs[PR_CCS] = 0; #endif
true
qemu
eca1bdf415c454093dfc7eb983cd49287c043967
7,633
static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { BlockDriver *drv = bs->drv; if (!drv) { return -ENOMEDIUM; } if (bdrv_check_request(bs, sector_num, nb_sectors)) { return -EIO; } /* throttling disk read I/O */ if (bs->io_limits_enabled) { bdrv_io_limits_intercept(bs, false, nb_sectors); } return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); }
true
qemu
dbffbdcfff69431b622866ac5ea78df74fdc02d4
7,634
av_cold static int auto_matrix(SwrContext *s) { int i, j, out_i; double matrix[NUM_NAMED_CHANNELS][NUM_NAMED_CHANNELS]={{0}}; int64_t unaccounted, in_ch_layout, out_ch_layout; double maxcoef=0; char buf[128]; const int matrix_encoding = s->matrix_encoding; float maxval; in_ch_layout = clean_layout(s, s->in_ch_layout); out_ch_layout = clean_layout(s, s->out_ch_layout); if( out_ch_layout == AV_CH_LAYOUT_STEREO_DOWNMIX && (in_ch_layout & AV_CH_LAYOUT_STEREO_DOWNMIX) == 0 ) out_ch_layout = AV_CH_LAYOUT_STEREO; if( in_ch_layout == AV_CH_LAYOUT_STEREO_DOWNMIX && (out_ch_layout & AV_CH_LAYOUT_STEREO_DOWNMIX) == 0 ) in_ch_layout = AV_CH_LAYOUT_STEREO; if(!sane_layout(in_ch_layout)){ av_get_channel_layout_string(buf, sizeof(buf), -1, s->in_ch_layout); av_log(s, AV_LOG_ERROR, "Input channel layout '%s' is not supported\n", buf); return AVERROR(EINVAL); } if(!sane_layout(out_ch_layout)){ av_get_channel_layout_string(buf, sizeof(buf), -1, s->out_ch_layout); av_log(s, AV_LOG_ERROR, "Output channel layout '%s' is not supported\n", buf); return AVERROR(EINVAL); } memset(s->matrix, 0, sizeof(s->matrix)); for(i=0; i<FF_ARRAY_ELEMS(matrix); i++){ if(in_ch_layout & out_ch_layout & (1ULL<<i)) matrix[i][i]= 1.0; } unaccounted= in_ch_layout & ~out_ch_layout; //FIXME implement dolby surround //FIXME implement full ac3 if(unaccounted & AV_CH_FRONT_CENTER){ if((out_ch_layout & AV_CH_LAYOUT_STEREO) == AV_CH_LAYOUT_STEREO){ if(in_ch_layout & AV_CH_LAYOUT_STEREO) { matrix[ FRONT_LEFT][FRONT_CENTER]+= s->clev; matrix[FRONT_RIGHT][FRONT_CENTER]+= s->clev; } else { matrix[ FRONT_LEFT][FRONT_CENTER]+= M_SQRT1_2; matrix[FRONT_RIGHT][FRONT_CENTER]+= M_SQRT1_2; } }else av_assert0(0); } if(unaccounted & AV_CH_LAYOUT_STEREO){ if(out_ch_layout & AV_CH_FRONT_CENTER){ matrix[FRONT_CENTER][ FRONT_LEFT]+= M_SQRT1_2; matrix[FRONT_CENTER][FRONT_RIGHT]+= M_SQRT1_2; if(in_ch_layout & AV_CH_FRONT_CENTER) matrix[FRONT_CENTER][ FRONT_CENTER] = s->clev*sqrt(2); }else av_assert0(0); } if(unaccounted & AV_CH_BACK_CENTER){ if(out_ch_layout & AV_CH_BACK_LEFT){ matrix[ BACK_LEFT][BACK_CENTER]+= M_SQRT1_2; matrix[BACK_RIGHT][BACK_CENTER]+= M_SQRT1_2; }else if(out_ch_layout & AV_CH_SIDE_LEFT){ matrix[ SIDE_LEFT][BACK_CENTER]+= M_SQRT1_2; matrix[SIDE_RIGHT][BACK_CENTER]+= M_SQRT1_2; }else if(out_ch_layout & AV_CH_FRONT_LEFT){ if (matrix_encoding == AV_MATRIX_ENCODING_DOLBY || matrix_encoding == AV_MATRIX_ENCODING_DPLII) { if (unaccounted & (AV_CH_BACK_LEFT | AV_CH_SIDE_LEFT)) { matrix[FRONT_LEFT ][BACK_CENTER] -= s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][BACK_CENTER] += s->slev * M_SQRT1_2; } else { matrix[FRONT_LEFT ][BACK_CENTER] -= s->slev; matrix[FRONT_RIGHT][BACK_CENTER] += s->slev; } } else { matrix[ FRONT_LEFT][BACK_CENTER]+= s->slev*M_SQRT1_2; matrix[FRONT_RIGHT][BACK_CENTER]+= s->slev*M_SQRT1_2; } }else if(out_ch_layout & AV_CH_FRONT_CENTER){ matrix[ FRONT_CENTER][BACK_CENTER]+= s->slev*M_SQRT1_2; }else av_assert0(0); } if(unaccounted & AV_CH_BACK_LEFT){ if(out_ch_layout & AV_CH_BACK_CENTER){ matrix[BACK_CENTER][ BACK_LEFT]+= M_SQRT1_2; matrix[BACK_CENTER][BACK_RIGHT]+= M_SQRT1_2; }else if(out_ch_layout & AV_CH_SIDE_LEFT){ if(in_ch_layout & AV_CH_SIDE_LEFT){ matrix[ SIDE_LEFT][ BACK_LEFT]+= M_SQRT1_2; matrix[SIDE_RIGHT][BACK_RIGHT]+= M_SQRT1_2; }else{ matrix[ SIDE_LEFT][ BACK_LEFT]+= 1.0; matrix[SIDE_RIGHT][BACK_RIGHT]+= 1.0; } }else if(out_ch_layout & AV_CH_FRONT_LEFT){ if (matrix_encoding == AV_MATRIX_ENCODING_DOLBY) { matrix[FRONT_LEFT ][BACK_LEFT ] -= s->slev * M_SQRT1_2; matrix[FRONT_LEFT ][BACK_RIGHT] -= s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][BACK_LEFT ] += s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][BACK_RIGHT] += s->slev * M_SQRT1_2; } else if (matrix_encoding == AV_MATRIX_ENCODING_DPLII) { matrix[FRONT_LEFT ][BACK_LEFT ] -= s->slev * SQRT3_2; matrix[FRONT_LEFT ][BACK_RIGHT] -= s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][BACK_LEFT ] += s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][BACK_RIGHT] += s->slev * SQRT3_2; } else { matrix[ FRONT_LEFT][ BACK_LEFT] += s->slev; matrix[FRONT_RIGHT][BACK_RIGHT] += s->slev; } }else if(out_ch_layout & AV_CH_FRONT_CENTER){ matrix[ FRONT_CENTER][BACK_LEFT ]+= s->slev*M_SQRT1_2; matrix[ FRONT_CENTER][BACK_RIGHT]+= s->slev*M_SQRT1_2; }else av_assert0(0); } if(unaccounted & AV_CH_SIDE_LEFT){ if(out_ch_layout & AV_CH_BACK_LEFT){ /* if back channels do not exist in the input, just copy side channels to back channels, otherwise mix side into back */ if (in_ch_layout & AV_CH_BACK_LEFT) { matrix[BACK_LEFT ][SIDE_LEFT ] += M_SQRT1_2; matrix[BACK_RIGHT][SIDE_RIGHT] += M_SQRT1_2; } else { matrix[BACK_LEFT ][SIDE_LEFT ] += 1.0; matrix[BACK_RIGHT][SIDE_RIGHT] += 1.0; } }else if(out_ch_layout & AV_CH_BACK_CENTER){ matrix[BACK_CENTER][ SIDE_LEFT]+= M_SQRT1_2; matrix[BACK_CENTER][SIDE_RIGHT]+= M_SQRT1_2; }else if(out_ch_layout & AV_CH_FRONT_LEFT){ if (matrix_encoding == AV_MATRIX_ENCODING_DOLBY) { matrix[FRONT_LEFT ][SIDE_LEFT ] -= s->slev * M_SQRT1_2; matrix[FRONT_LEFT ][SIDE_RIGHT] -= s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_LEFT ] += s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_RIGHT] += s->slev * M_SQRT1_2; } else if (matrix_encoding == AV_MATRIX_ENCODING_DPLII) { matrix[FRONT_LEFT ][SIDE_LEFT ] -= s->slev * SQRT3_2; matrix[FRONT_LEFT ][SIDE_RIGHT] -= s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_LEFT ] += s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_RIGHT] += s->slev * SQRT3_2; } else { matrix[ FRONT_LEFT][ SIDE_LEFT] += s->slev; matrix[FRONT_RIGHT][SIDE_RIGHT] += s->slev; } }else if(out_ch_layout & AV_CH_FRONT_CENTER){ matrix[ FRONT_CENTER][SIDE_LEFT ]+= s->slev*M_SQRT1_2; matrix[ FRONT_CENTER][SIDE_RIGHT]+= s->slev*M_SQRT1_2; }else av_assert0(0); } if(unaccounted & AV_CH_FRONT_LEFT_OF_CENTER){ if(out_ch_layout & AV_CH_FRONT_LEFT){ matrix[ FRONT_LEFT][ FRONT_LEFT_OF_CENTER]+= 1.0; matrix[FRONT_RIGHT][FRONT_RIGHT_OF_CENTER]+= 1.0; }else if(out_ch_layout & AV_CH_FRONT_CENTER){ matrix[ FRONT_CENTER][ FRONT_LEFT_OF_CENTER]+= M_SQRT1_2; matrix[ FRONT_CENTER][FRONT_RIGHT_OF_CENTER]+= M_SQRT1_2; }else av_assert0(0); } /* mix LFE into front left/right or center */ if (unaccounted & AV_CH_LOW_FREQUENCY) { if (out_ch_layout & AV_CH_FRONT_CENTER) { matrix[FRONT_CENTER][LOW_FREQUENCY] += s->lfe_mix_level; } else if (out_ch_layout & AV_CH_FRONT_LEFT) { matrix[FRONT_LEFT ][LOW_FREQUENCY] += s->lfe_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][LOW_FREQUENCY] += s->lfe_mix_level * M_SQRT1_2; } else av_assert0(0); } for(out_i=i=0; i<64; i++){ double sum=0; int in_i=0; for(j=0; j<64; j++){ if (i < FF_ARRAY_ELEMS(matrix) && j < FF_ARRAY_ELEMS(matrix[0])) s->matrix[out_i][in_i]= matrix[i][j]; else s->matrix[out_i][in_i]= i == j && (in_ch_layout & out_ch_layout & (1ULL<<i)); sum += fabs(s->matrix[out_i][in_i]); if(in_ch_layout & (1ULL<<j)) in_i++; } maxcoef= FFMAX(maxcoef, sum); if(out_ch_layout & (1ULL<<i)) out_i++; } if(s->rematrix_volume < 0) maxcoef = -s->rematrix_volume; if (s->rematrix_maxval > 0) { maxval = s->rematrix_maxval; } else if ( av_get_packed_sample_fmt(s->out_sample_fmt) < AV_SAMPLE_FMT_FLT || av_get_packed_sample_fmt(s->int_sample_fmt) < AV_SAMPLE_FMT_FLT) { maxval = 1.0; } else maxval = INT_MAX; if(maxcoef > maxval || s->rematrix_volume < 0){ maxcoef /= maxval; for(i=0; i<SWR_CH_MAX; i++) for(j=0; j<SWR_CH_MAX; j++){ s->matrix[i][j] /= maxcoef; } } if(s->rematrix_volume > 0){ for(i=0; i<SWR_CH_MAX; i++) for(j=0; j<SWR_CH_MAX; j++){ s->matrix[i][j] *= s->rematrix_volume; } } for(i=0; i<av_get_channel_layout_nb_channels(out_ch_layout); i++){ for(j=0; j<av_get_channel_layout_nb_channels(in_ch_layout); j++){ av_log(NULL, AV_LOG_DEBUG, "%f ", s->matrix[i][j]); } av_log(NULL, AV_LOG_DEBUG, "\n"); } return 0; }
true
FFmpeg
2c5c37ade115b5efa3f77ce11bc2c4e46b384959
7,635
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err) { #if HAVE_THREADS pthread_mutex_lock(&mq->lock); mq->err_recv = err; pthread_cond_broadcast(&mq->cond); pthread_mutex_unlock(&mq->lock); #endif /* HAVE_THREADS */ }
true
FFmpeg
bd5c860fdbc33d19d2ff0f6d1f06de07c17560dd
7,636
static int local_renameat(FsContext *ctx, V9fsPath *olddir, const char *old_name, V9fsPath *newdir, const char *new_name) { int ret; int odirfd, ndirfd; odirfd = local_opendir_nofollow(ctx, olddir->data); if (odirfd == -1) { return -1; } ndirfd = local_opendir_nofollow(ctx, newdir->data); if (ndirfd == -1) { close_preserve_errno(odirfd); return -1; } ret = renameat(odirfd, old_name, ndirfd, new_name); if (ret < 0) { goto out; } if (ctx->export_flags & V9FS_SM_MAPPED_FILE) { int omap_dirfd, nmap_dirfd; ret = mkdirat(ndirfd, VIRTFS_META_DIR, 0700); if (ret < 0 && errno != EEXIST) { goto err_undo_rename; } omap_dirfd = openat(odirfd, VIRTFS_META_DIR, O_RDONLY | O_DIRECTORY | O_NOFOLLOW); if (omap_dirfd == -1) { goto err; } nmap_dirfd = openat(ndirfd, VIRTFS_META_DIR, O_RDONLY | O_DIRECTORY | O_NOFOLLOW); if (nmap_dirfd == -1) { close_preserve_errno(omap_dirfd); goto err; } /* rename the .virtfs_metadata files */ ret = renameat(omap_dirfd, old_name, nmap_dirfd, new_name); close_preserve_errno(nmap_dirfd); close_preserve_errno(omap_dirfd); if (ret < 0 && errno != ENOENT) { goto err_undo_rename; } ret = 0; } goto out; err: ret = -1; err_undo_rename: renameat_preserve_errno(ndirfd, new_name, odirfd, old_name); out: close_preserve_errno(ndirfd); close_preserve_errno(odirfd); return ret; }
true
qemu
6dd4b1f1d026e478d9177b28169b377e212400f3
7,637
static bool blit_is_unsafe(struct CirrusVGAState *s) { /* should be the case, see cirrus_bitblt_start */ assert(s->cirrus_blt_width > 0); assert(s->cirrus_blt_height > 0); if (blit_region_is_unsafe(s, s->cirrus_blt_dstpitch, s->cirrus_blt_dstaddr & s->cirrus_addr_mask)) { if (blit_region_is_unsafe(s, s->cirrus_blt_srcpitch, s->cirrus_blt_srcaddr & s->cirrus_addr_mask)) { return false;
true
qemu
bf25983345ca44aec3dd92c57142be45452bd38a
7,638
static void update_error_limit(WavpackFrameContext *ctx) { int i, br[2], sl[2]; for (i = 0; i <= ctx->stereo_in; i++) { ctx->ch[i].bitrate_acc += ctx->ch[i].bitrate_delta; br[i] = ctx->ch[i].bitrate_acc >> 16; sl[i] = LEVEL_DECAY(ctx->ch[i].slow_level); } if (ctx->stereo_in && ctx->hybrid_bitrate) { int balance = (sl[1] - sl[0] + br[1] + 1) >> 1; if (balance > br[0]) { br[1] = br[0] << 1; br[0] = 0; } else if (-balance > br[0]) { br[0] <<= 1; br[1] = 0; } else { br[1] = br[0] + balance; br[0] = br[0] - balance; } } for (i = 0; i <= ctx->stereo_in; i++) { if (ctx->hybrid_bitrate) { if (sl[i] - br[i] > -0x100) ctx->ch[i].error_limit = wp_exp2(sl[i] - br[i] + 0x100); else ctx->ch[i].error_limit = 0; } else { ctx->ch[i].error_limit = wp_exp2(br[i]); } } }
true
FFmpeg
d03d38616278bf209e6c860d8f9f564cbc6c1780
7,639
static int do_token_setup(USBDevice *s, USBPacket *p) { int request, value, index; int ret = 0; if (p->len != 8) memcpy(s->setup_buf, p->data, 8); s->setup_len = (s->setup_buf[7] << 8) | s->setup_buf[6]; s->setup_index = 0; request = (s->setup_buf[0] << 8) | s->setup_buf[1]; value = (s->setup_buf[3] << 8) | s->setup_buf[2]; index = (s->setup_buf[5] << 8) | s->setup_buf[4]; if (s->setup_buf[0] & USB_DIR_IN) { ret = s->info->handle_control(s, request, value, index, s->setup_len, s->data_buf); if (ret < 0) return ret; if (ret < s->setup_len) s->setup_len = ret; s->setup_state = SETUP_STATE_DATA; } else { if (s->setup_len == 0) s->setup_state = SETUP_STATE_ACK; else s->setup_state = SETUP_STATE_DATA; return ret;
true
qemu
19f3322379c25a235eb1ec6335676549109fa625
7,640
static int colo_packet_compare_common(Packet *ppkt, Packet *spkt, int offset) { if (trace_event_get_state(TRACE_COLO_COMPARE_MISCOMPARE)) { char pri_ip_src[20], pri_ip_dst[20], sec_ip_src[20], sec_ip_dst[20]; strcpy(pri_ip_src, inet_ntoa(ppkt->ip->ip_src)); strcpy(pri_ip_dst, inet_ntoa(ppkt->ip->ip_dst)); strcpy(sec_ip_src, inet_ntoa(spkt->ip->ip_src)); strcpy(sec_ip_dst, inet_ntoa(spkt->ip->ip_dst)); trace_colo_compare_ip_info(ppkt->size, pri_ip_src, pri_ip_dst, spkt->size, sec_ip_src, sec_ip_dst); } offset = ppkt->vnet_hdr_len + offset; if (ppkt->size == spkt->size) { return memcmp(ppkt->data + offset, spkt->data + offset, spkt->size - offset); } else { trace_colo_compare_main("Net packet size are not the same"); return -1; } }
true
qemu
d87aa138039a4be6d705793fd3e397c69c52405a
7,641
static void nbd_coroutine_end(NbdClientSession *s, struct nbd_request *request) { int i = HANDLE_TO_INDEX(s, request->handle); s->recv_coroutine[i] = NULL; if (s->in_flight-- == MAX_NBD_REQUESTS) { qemu_co_mutex_unlock(&s->free_sema); } }
true
qemu
9bc9732faeff09828fe38c0ebe2401ee131a6fca
7,642
static char *read_splashfile(char *filename, int *file_sizep, int *file_typep) { GError *err = NULL; gboolean res; gchar *content; int file_type = -1; unsigned int filehead = 0; int bmp_bpp; res = g_file_get_contents(filename, &content, (gsize *)file_sizep, &err); if (res == FALSE) { error_report("failed to read splash file '%s'", filename); g_error_free(err); return NULL; } /* check file size */ if (*file_sizep < 30) { goto error; } /* check magic ID */ filehead = ((content[0] & 0xff) + (content[1] << 8)) & 0xffff; if (filehead == 0xd8ff) { file_type = JPG_FILE; } else if (filehead == 0x4d42) { file_type = BMP_FILE; } else { goto error; } /* check BMP bpp */ if (file_type == BMP_FILE) { bmp_bpp = (content[28] + (content[29] << 8)) & 0xffff; if (bmp_bpp != 24) { goto error; } } /* return values */ *file_typep = file_type; return content; error: error_report("splash file '%s' format not recognized; must be JPEG " "or 24 bit BMP", filename); g_free(content); return NULL; }
true
qemu
d09acb9b5ef0bb4fa94d3d459919a6ebaf8804bc
7,643
int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst) { AVBSFList *lst; char *bsf_str, *buf, *dup, *saveptr; int ret; if (!str) return av_bsf_get_null_filter(bsf_lst); lst = av_bsf_list_alloc(); if (!lst) return AVERROR(ENOMEM); if (!(dup = buf = av_strdup(str))) return AVERROR(ENOMEM); while (1) { bsf_str = av_strtok(buf, ",", &saveptr); if (!bsf_str) break; ret = bsf_parse_single(bsf_str, lst); if (ret < 0) goto end; buf = NULL; } ret = av_bsf_list_finalize(&lst, bsf_lst); end: if (ret < 0) av_bsf_list_free(&lst); av_free(dup); return ret; }
true
FFmpeg
d9c2cfd31675a6403ae4ac7c141a8185dadceb12
7,644
static int decode_spectrum_and_dequant(AACContext * ac, float coef[1024], GetBitContext * gb, float sf[120], int pulse_present, const Pulse * pulse, const IndividualChannelStream * ics, enum BandType band_type[120]) { int i, k, g, idx = 0; const int c = 1024/ics->num_windows; const uint16_t * offsets = ics->swb_offset; float *coef_base = coef; for (g = 0; g < ics->num_windows; g++) memset(coef + g * 128 + offsets[ics->max_sfb], 0, sizeof(float)*(c - offsets[ics->max_sfb])); for (g = 0; g < ics->num_window_groups; g++) { for (i = 0; i < ics->max_sfb; i++, idx++) { const int cur_band_type = band_type[idx]; const int dim = cur_band_type >= FIRST_PAIR_BT ? 2 : 4; const int is_cb_unsigned = IS_CODEBOOK_UNSIGNED(cur_band_type); int group; if (cur_band_type == ZERO_BT) { for (group = 0; group < ics->group_len[g]; group++) { memset(coef + group * 128 + offsets[i], 0, (offsets[i+1] - offsets[i])*sizeof(float)); } }else if (cur_band_type == NOISE_BT) { const float scale = sf[idx] / ((offsets[i+1] - offsets[i]) * PNS_MEAN_ENERGY); for (group = 0; group < ics->group_len[g]; group++) { for (k = offsets[i]; k < offsets[i+1]; k++) { ac->random_state = lcg_random(ac->random_state); coef[group*128+k] = ac->random_state * scale; } } }else if (cur_band_type != INTENSITY_BT2 && cur_band_type != INTENSITY_BT) { for (group = 0; group < ics->group_len[g]; group++) { for (k = offsets[i]; k < offsets[i+1]; k += dim) { const int index = get_vlc2(gb, vlc_spectral[cur_band_type - 1].table, 6, 3); const int coef_tmp_idx = (group << 7) + k; const float *vq_ptr; int j; if(index >= ff_aac_spectral_sizes[cur_band_type - 1]) { av_log(ac->avccontext, AV_LOG_ERROR, "Read beyond end of ff_aac_codebook_vectors[%d][]. index %d >= %d\n", cur_band_type - 1, index, ff_aac_spectral_sizes[cur_band_type - 1]); return -1; } vq_ptr = &ff_aac_codebook_vectors[cur_band_type - 1][index * dim]; if (is_cb_unsigned) { for (j = 0; j < dim; j++) if (vq_ptr[j]) coef[coef_tmp_idx + j] = 1 - 2*(int)get_bits1(gb); }else { for (j = 0; j < dim; j++) coef[coef_tmp_idx + j] = 1.0f; } if (cur_band_type == ESC_BT) { for (j = 0; j < 2; j++) { if (vq_ptr[j] == 64.0f) { int n = 4; /* The total length of escape_sequence must be < 22 bits according to the specification (i.e. max is 11111111110xxxxxxxxxx). */ while (get_bits1(gb) && n < 15) n++; if(n == 15) { av_log(ac->avccontext, AV_LOG_ERROR, "error in spectral data, ESC overflow\n"); return -1; } n = (1<<n) + get_bits(gb, n); coef[coef_tmp_idx + j] *= cbrtf(fabsf(n)) * n; }else coef[coef_tmp_idx + j] *= vq_ptr[j]; } }else for (j = 0; j < dim; j++) coef[coef_tmp_idx + j] *= vq_ptr[j]; for (j = 0; j < dim; j++) coef[coef_tmp_idx + j] *= sf[idx]; } } } } coef += ics->group_len[g]<<7; } if (pulse_present) { for(i = 0; i < pulse->num_pulse; i++){ float co = coef_base[ pulse->pos[i] ]; float ico = co / sqrtf(sqrtf(fabsf(co))) + pulse->amp[i]; coef_base[ pulse->pos[i] ] = cbrtf(fabsf(ico)) * ico; } } return 0; }
true
FFmpeg
febcbd65fa3d00cfdfbfabd1c2a8fb8e659e1ca1
7,645
struct pxa2xx_lcdc_s *pxa2xx_lcdc_init(target_phys_addr_t base, qemu_irq irq, DisplayState *ds) { int iomemtype; struct pxa2xx_lcdc_s *s; s = (struct pxa2xx_lcdc_s *) qemu_mallocz(sizeof(struct pxa2xx_lcdc_s)); s->base = base; s->invalidated = 1; s->irq = irq; s->ds = ds; pxa2xx_lcdc_orientation(s, graphic_rotate); iomemtype = cpu_register_io_memory(0, pxa2xx_lcdc_readfn, pxa2xx_lcdc_writefn, s); cpu_register_physical_memory(base, 0x000fffff, iomemtype); graphic_console_init(ds, pxa2xx_update_display, pxa2xx_invalidate_display, pxa2xx_screen_dump, s); switch (s->ds->depth) { case 0: s->dest_width = 0; break; case 8: s->line_fn[0] = pxa2xx_draw_fn_8; s->line_fn[1] = pxa2xx_draw_fn_8t; s->dest_width = 1; break; case 15: s->line_fn[0] = pxa2xx_draw_fn_15; s->line_fn[1] = pxa2xx_draw_fn_15t; s->dest_width = 2; break; case 16: s->line_fn[0] = pxa2xx_draw_fn_16; s->line_fn[1] = pxa2xx_draw_fn_16t; s->dest_width = 2; break; case 24: s->line_fn[0] = pxa2xx_draw_fn_24; s->line_fn[1] = pxa2xx_draw_fn_24t; s->dest_width = 3; break; case 32: s->line_fn[0] = pxa2xx_draw_fn_32; s->line_fn[1] = pxa2xx_draw_fn_32t; s->dest_width = 4; break; default: fprintf(stderr, "%s: Bad color depth\n", __FUNCTION__); exit(1); } register_savevm("pxa2xx_lcdc", 0, 0, pxa2xx_lcdc_save, pxa2xx_lcdc_load, s); return s; }
true
qemu
187337f8b0ec0813dd3876d1efe37d415fb81c2e
7,646
static int stdio_get_fd(void *opaque) { QEMUFileStdio *s = opaque; return fileno(s->stdio_file); }
true
qemu
60fe637bf0e4d7989e21e50f52526444765c63b4
7,647
static inline void cris_fidx_d(unsigned int x) { register unsigned int v asm("$r10") = x; asm ("fidxd\t[%0]\n" : : "r" (v) ); }
true
qemu
21ce148c7ec71ee32834061355a5ecfd1a11f90f
7,648
void *av_malloc(size_t size) { void *ptr = NULL; #if CONFIG_MEMALIGN_HACK long diff; #endif /* let's disallow possible ambiguous cases */ if (size > (max_alloc_size - 32)) return NULL; #if CONFIG_MEMALIGN_HACK ptr = malloc(size + ALIGN); if (!ptr) return ptr; diff = ((~(long)ptr)&(ALIGN - 1)) + 1; ptr = (char *)ptr + diff; ((char *)ptr)[-1] = diff; #elif HAVE_POSIX_MEMALIGN if (size) //OS X on SDK 10.6 has a broken posix_memalign implementation if (posix_memalign(&ptr, ALIGN, size)) ptr = NULL; #elif HAVE_ALIGNED_MALLOC ptr = _aligned_malloc(size, ALIGN); #elif HAVE_MEMALIGN #ifndef __DJGPP__ ptr = memalign(ALIGN, size); #else ptr = memalign(size, ALIGN); #endif /* Why 64? * Indeed, we should align it: * on 4 for 386 * on 16 for 486 * on 32 for 586, PPro - K6-III * on 64 for K7 (maybe for P3 too). * Because L1 and L2 caches are aligned on those values. * But I don't want to code such logic here! */ /* Why 32? * For AVX ASM. SSE / NEON needs only 16. * Why not larger? Because I did not see a difference in benchmarks ... */ /* benchmarks with P3 * memalign(64) + 1 3071, 3051, 3032 * memalign(64) + 2 3051, 3032, 3041 * memalign(64) + 4 2911, 2896, 2915 * memalign(64) + 8 2545, 2554, 2550 * memalign(64) + 16 2543, 2572, 2563 * memalign(64) + 32 2546, 2545, 2571 * memalign(64) + 64 2570, 2533, 2558 * * BTW, malloc seems to do 8-byte alignment by default here. */ #else ptr = malloc(size); #endif if(!ptr && !size) { size = 1; ptr= av_malloc(1); } #if CONFIG_MEMORY_POISONING if (ptr) memset(ptr, 0x2a, size); #endif return ptr; }
false
FFmpeg
84be80698227366d970e045001e4b59e4f99f0a1
7,650
static int mov_write_audio_tag(AVIOContext *pb, MOVTrack *track) { int64_t pos = avio_tell(pb); int version = 0; uint32_t tag = track->tag; if (track->mode == MODE_MOV) { if (track->timescale > UINT16_MAX) { if (mov_get_lpcm_flags(track->enc->codec_id)) tag = AV_RL32("lpcm"); version = 2; } else if (track->audio_vbr || mov_pcm_le_gt16(track->enc->codec_id) || track->enc->codec_id == AV_CODEC_ID_ADPCM_MS || track->enc->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV || track->enc->codec_id == AV_CODEC_ID_QDM2) { version = 1; } } avio_wb32(pb, 0); /* size */ avio_wl32(pb, tag); // store it byteswapped avio_wb32(pb, 0); /* Reserved */ avio_wb16(pb, 0); /* Reserved */ avio_wb16(pb, 1); /* Data-reference index, XXX == 1 */ /* SoundDescription */ avio_wb16(pb, version); /* Version */ avio_wb16(pb, 0); /* Revision level */ avio_wb32(pb, 0); /* Reserved */ if (version == 2) { avio_wb16(pb, 3); avio_wb16(pb, 16); avio_wb16(pb, 0xfffe); avio_wb16(pb, 0); avio_wb32(pb, 0x00010000); avio_wb32(pb, 72); avio_wb64(pb, av_double2int(track->enc->sample_rate)); avio_wb32(pb, track->enc->channels); avio_wb32(pb, 0x7F000000); avio_wb32(pb, av_get_bits_per_sample(track->enc->codec_id)); avio_wb32(pb, mov_get_lpcm_flags(track->enc->codec_id)); avio_wb32(pb, track->sample_size); avio_wb32(pb, get_samples_per_packet(track)); } else { if (track->mode == MODE_MOV) { avio_wb16(pb, track->enc->channels); if (track->enc->codec_id == AV_CODEC_ID_PCM_U8 || track->enc->codec_id == AV_CODEC_ID_PCM_S8) avio_wb16(pb, 8); /* bits per sample */ else avio_wb16(pb, 16); avio_wb16(pb, track->audio_vbr ? -2 : 0); /* compression ID */ } else { /* reserved for mp4/3gp */ avio_wb16(pb, 2); avio_wb16(pb, 16); avio_wb16(pb, 0); } avio_wb16(pb, 0); /* packet size (= 0) */ avio_wb16(pb, track->enc->sample_rate <= UINT16_MAX ? track->enc->sample_rate : 0); avio_wb16(pb, 0); /* Reserved */ } if(version == 1) { /* SoundDescription V1 extended info */ avio_wb32(pb, track->enc->frame_size); /* Samples per packet */ avio_wb32(pb, track->sample_size / track->enc->channels); /* Bytes per packet */ avio_wb32(pb, track->sample_size); /* Bytes per frame */ avio_wb32(pb, 2); /* Bytes per sample */ } if(track->mode == MODE_MOV && (track->enc->codec_id == AV_CODEC_ID_AAC || track->enc->codec_id == AV_CODEC_ID_AC3 || track->enc->codec_id == AV_CODEC_ID_AMR_NB || track->enc->codec_id == AV_CODEC_ID_ALAC || track->enc->codec_id == AV_CODEC_ID_ADPCM_MS || track->enc->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV || track->enc->codec_id == AV_CODEC_ID_QDM2 || (mov_pcm_le_gt16(track->enc->codec_id) && version==1))) mov_write_wave_tag(pb, track); else if(track->tag == MKTAG('m','p','4','a')) mov_write_esds_tag(pb, track); else if(track->enc->codec_id == AV_CODEC_ID_AMR_NB) mov_write_amr_tag(pb, track); else if(track->enc->codec_id == AV_CODEC_ID_AC3) mov_write_ac3_tag(pb, track); else if(track->enc->codec_id == AV_CODEC_ID_ALAC) mov_write_extradata_tag(pb, track); else if (track->enc->codec_id == AV_CODEC_ID_WMAPRO) mov_write_wfex_tag(pb, track); else if (track->vos_len > 0) mov_write_glbl_tag(pb, track); if (track->mode == MODE_MOV && track->enc->codec_type == AVMEDIA_TYPE_AUDIO) mov_write_chan_tag(pb, track); return update_size(pb, pos); }
false
FFmpeg
60b433d905c582ed3656c120b3ffffd0119d5377
7,652
static int metadata_parse(FLACContext *s) { int i, metadata_last, metadata_type, metadata_size; int initial_pos= get_bits_count(&s->gb); if (show_bits_long(&s->gb, 32) == MKBETAG('f','L','a','C')) { skip_bits_long(&s->gb, 32); do { metadata_last = get_bits1(&s->gb); metadata_type = get_bits(&s->gb, 7); metadata_size = get_bits_long(&s->gb, 24); if (get_bits_count(&s->gb) + 8*metadata_size > s->gb.size_in_bits) { skip_bits_long(&s->gb, initial_pos - get_bits_count(&s->gb)); break; } if (metadata_size) { switch (metadata_type) { case FLAC_METADATA_TYPE_STREAMINFO: if (!s->got_streaminfo) { ff_flac_parse_streaminfo(s->avctx, (FLACStreaminfo *)s, s->gb.buffer+get_bits_count(&s->gb)/8); allocate_buffers(s); s->got_streaminfo = 1; } default: for (i = 0; i < metadata_size; i++) skip_bits(&s->gb, 8); } } } while (!metadata_last); return 1; } return 0; }
false
FFmpeg
55a727383bab266b757b642aabaa2b066c14e7c7
7,653
static int coroutine_fn v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp, int32_t newdirfid, V9fsString *name) { char *end; int err = 0; V9fsPath new_path; V9fsFidState *tfidp; V9fsState *s = pdu->s; V9fsFidState *dirfidp = NULL; char *old_name, *new_name; v9fs_path_init(&new_path); if (newdirfid != -1) { dirfidp = get_fid(pdu, newdirfid); if (dirfidp == NULL) { err = -ENOENT; goto out_nofid; } BUG_ON(dirfidp->fid_type != P9_FID_NONE); v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path); } else { old_name = fidp->path.data; end = strrchr(old_name, '/'); if (end) { end++; } else { end = old_name; } new_name = g_malloc0(end - old_name + name->size + 1); strncat(new_name, old_name, end - old_name); strncat(new_name + (end - old_name), name->data, name->size); v9fs_co_name_to_path(pdu, NULL, new_name, &new_path); g_free(new_name); } err = v9fs_co_rename(pdu, &fidp->path, &new_path); if (err < 0) { goto out; } /* * Fixup fid's pointing to the old name to * start pointing to the new name */ for (tfidp = s->fid_list; tfidp; tfidp = tfidp->next) { if (v9fs_path_is_ancestor(&fidp->path, &tfidp->path)) { /* replace the name */ v9fs_fix_path(&tfidp->path, &new_path, strlen(fidp->path.data)); } } out: if (dirfidp) { put_fid(pdu, dirfidp); } v9fs_path_free(&new_path); out_nofid: return err; }
true
qemu
49dd946bb5419681c8668b09a6d10f42bc707b78
7,655
static av_cold int alac_decode_close(AVCodecContext *avctx) { ALACContext *alac = avctx->priv_data; int chan; for (chan = 0; chan < alac->numchannels; chan++) { av_freep(&alac->predicterror_buffer[chan]); av_freep(&alac->outputsamples_buffer[chan]); av_freep(&alac->wasted_bits_buffer[chan]); } return 0; }
true
FFmpeg
53df079a730043cd0aa330c9aba7950034b1424f
7,656
void OPPROTO op_fdiv_ST0_FT0(void) { ST0 /= FT0; }
true
qemu
2ee73ac3a855fb0cfba3db91fdd1ecebdbc6f971
7,657
static void ipmi_sim_handle_command(IPMIBmc *b, uint8_t *cmd, unsigned int cmd_len, unsigned int max_cmd_len, uint8_t msg_id) { IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b); IPMIInterface *s = ibs->parent.intf; IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); unsigned int netfn; uint8_t rsp[MAX_IPMI_MSG_SIZE]; unsigned int rsp_len_holder = 0; unsigned int *rsp_len = &rsp_len_holder; unsigned int max_rsp_len = sizeof(rsp); /* Set up the response, set the low bit of NETFN. */ /* Note that max_rsp_len must be at least 3 */ if (max_rsp_len < 3) { rsp[2] = IPMI_CC_REQUEST_DATA_TRUNCATED; goto out; } IPMI_ADD_RSP_DATA(cmd[0] | 0x04); IPMI_ADD_RSP_DATA(cmd[1]); IPMI_ADD_RSP_DATA(0); /* Assume success */ /* If it's too short or it was truncated, return an error. */ if (cmd_len < 2) { rsp[2] = IPMI_CC_REQUEST_DATA_LENGTH_INVALID; goto out; } if (cmd_len > max_cmd_len) { rsp[2] = IPMI_CC_REQUEST_DATA_TRUNCATED; goto out; } if ((cmd[0] & 0x03) != 0) { /* Only have stuff on LUN 0 */ rsp[2] = IPMI_CC_COMMAND_INVALID_FOR_LUN; goto out; } netfn = cmd[0] >> 2; /* Odd netfns are not valid, make sure the command is registered */ if ((netfn & 1) || !ibs->netfns[netfn / 2] || (cmd[1] >= ibs->netfns[netfn / 2]->cmd_nums) || (!ibs->netfns[netfn / 2]->cmd_handlers[cmd[1]])) { rsp[2] = IPMI_CC_INVALID_CMD; goto out; } ibs->netfns[netfn / 2]->cmd_handlers[cmd[1]](ibs, cmd, cmd_len, rsp, rsp_len, max_rsp_len); out: k->handle_rsp(s, msg_id, rsp, *rsp_len); next_timeout(ibs); }
true
qemu
4f298a4b2957b7833bc607c951ca27c458d98d88
7,658
void OPPROTO op_divw (void) { if (unlikely(((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0)) { T0 = (int32_t)((-1) * ((uint32_t)T0 >> 31)); } else { T0 = (int32_t)T0 / (int32_t)T1; } RETURN(); }
true
qemu
6f2d8978728c48ca46f5c01835438508aace5c64
7,660
static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, QEMUIOVector *qiov, BlockDriverCompletionFunc *cb, void *opaque) { uint64_t backing_length = 0; size_t size; /* If there is a backing file, get its length. Treat the absence of a * backing file like a zero length backing file. */ if (s->bs->backing_hd) { int64_t l = bdrv_getlength(s->bs->backing_hd); if (l < 0) { cb(opaque, l); return; } backing_length = l; } /* Zero all sectors if reading beyond the end of the backing file */ if (pos >= backing_length || pos + qiov->size > backing_length) { qemu_iovec_memset(qiov, 0, 0, qiov->size); } /* Complete now if there are no backing file sectors to read */ if (pos >= backing_length) { cb(opaque, 0); return; } /* If the read straddles the end of the backing file, shorten it */ size = MIN((uint64_t)backing_length - pos, qiov->size); BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE, qiov, size / BDRV_SECTOR_SIZE, cb, opaque); }
true
qemu
f06ee3d4aa547df8d7d2317b2b6db7a88c1f3744
7,661
int av_parse_cpu_caps(unsigned *flags, const char *s) { static const AVOption cpuflags_opts[] = { { "flags" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" }, #if ARCH_PPC { "altivec" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ALTIVEC }, .unit = "flags" }, #elif ARCH_X86 { "mmx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" }, { "mmx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX2 }, .unit = "flags" }, { "mmxext" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX2 }, .unit = "flags" }, { "sse" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE }, .unit = "flags" }, { "sse2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE2 }, .unit = "flags" }, { "sse2slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE2SLOW }, .unit = "flags" }, { "sse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE3 }, .unit = "flags" }, { "sse3slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE3SLOW }, .unit = "flags" }, { "ssse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSSE3 }, .unit = "flags" }, { "atom" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ATOM }, .unit = "flags" }, { "sse4.1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE4 }, .unit = "flags" }, { "sse4.2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE42 }, .unit = "flags" }, { "avx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX }, .unit = "flags" }, { "xop" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_XOP }, .unit = "flags" }, { "fma3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_FMA3 }, .unit = "flags" }, { "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_FMA4 }, .unit = "flags" }, { "avx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX2 }, .unit = "flags" }, { "bmi1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI1 }, .unit = "flags" }, { "bmi2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI2 }, .unit = "flags" }, { "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_3DNOW }, .unit = "flags" }, { "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_3DNOWEXT }, .unit = "flags" }, { "cmov", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_CMOV }, .unit = "flags" }, #define CPU_FLAG_P2 AV_CPU_FLAG_CMOV | AV_CPU_FLAG_MMX #define CPU_FLAG_P3 CPU_FLAG_P2 | AV_CPU_FLAG_MMX2 | AV_CPU_FLAG_SSE #define CPU_FLAG_P4 CPU_FLAG_P3| AV_CPU_FLAG_SSE2 { "pentium2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P2 }, .unit = "flags" }, { "pentium3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P3 }, .unit = "flags" }, { "pentium4", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P4 }, .unit = "flags" }, #define CPU_FLAG_K62 AV_CPU_FLAG_MMX | AV_CPU_FLAG_3DNOW #define CPU_FLAG_ATHLON CPU_FLAG_K62 | AV_CPU_FLAG_CMOV | AV_CPU_FLAG_3DNOWEXT | AV_CPU_FLAG_MMX2 #define CPU_FLAG_ATHLONXP CPU_FLAG_ATHLON | AV_CPU_FLAG_SSE #define CPU_FLAG_K8 CPU_FLAG_ATHLONXP | AV_CPU_FLAG_SSE2 { "k6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" }, { "k62", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_K62 }, .unit = "flags" }, { "athlon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_ATHLON }, .unit = "flags" }, { "athlonxp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_ATHLONXP }, .unit = "flags" }, { "k8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_K8 }, .unit = "flags" }, #elif ARCH_ARM { "armv5te", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV5TE }, .unit = "flags" }, { "armv6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6 }, .unit = "flags" }, { "armv6t2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6T2 }, .unit = "flags" }, { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" }, { "vfpv3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFPV3 }, .unit = "flags" }, { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" }, #elif ARCH_AARCH64 { "armv8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV8 }, .unit = "flags" }, { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" }, { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" }, #endif { NULL }, }; static const AVClass class = { .class_name = "cpuflags", .item_name = av_default_item_name, .option = cpuflags_opts, .version = LIBAVUTIL_VERSION_INT, }; const AVClass *pclass = &class; return av_opt_eval_flags(&pclass, &cpuflags_opts[0], s, flags); }
true
FFmpeg
1e519b9d407fd35538b8d4dfdc723448355e9fe1
7,663
static int nbd_handle_reply_err(QIOChannel *ioc, nbd_opt_reply *reply, Error **errp) { char *msg = NULL; int result = -1; if (!(reply->type & (1 << 31))) { return 1; } if (reply->length) { if (reply->length > NBD_MAX_BUFFER_SIZE) { error_setg(errp, "server error 0x%" PRIx32 " (%s) message is too long", reply->type, nbd_rep_lookup(reply->type)); goto cleanup; } msg = g_malloc(reply->length + 1); if (nbd_read(ioc, msg, reply->length, errp) < 0) { error_prepend(errp, "failed to read option error 0x%" PRIx32 " (%s) message", reply->type, nbd_rep_lookup(reply->type)); goto cleanup; } msg[reply->length] = '\0'; } switch (reply->type) { case NBD_REP_ERR_UNSUP: trace_nbd_reply_err_unsup(reply->option, nbd_opt_lookup(reply->option)); result = 0; goto cleanup; case NBD_REP_ERR_POLICY: error_setg(errp, "Denied by server for option %" PRIx32 " (%s)", reply->option, nbd_opt_lookup(reply->option)); break; case NBD_REP_ERR_INVALID: error_setg(errp, "Invalid data length for option %" PRIx32 " (%s)", reply->option, nbd_opt_lookup(reply->option)); break; case NBD_REP_ERR_PLATFORM: error_setg(errp, "Server lacks support for option %" PRIx32 " (%s)", reply->option, nbd_opt_lookup(reply->option)); break; case NBD_REP_ERR_TLS_REQD: error_setg(errp, "TLS negotiation required before option %" PRIx32 " (%s)", reply->option, nbd_opt_lookup(reply->option)); break; case NBD_REP_ERR_UNKNOWN: error_setg(errp, "Requested export not available for option %" PRIx32 " (%s)", reply->option, nbd_opt_lookup(reply->option)); break; case NBD_REP_ERR_SHUTDOWN: error_setg(errp, "Server shutting down before option %" PRIx32 " (%s)", reply->option, nbd_opt_lookup(reply->option)); break; case NBD_REP_ERR_BLOCK_SIZE_REQD: error_setg(errp, "Server requires INFO_BLOCK_SIZE for option %" PRIx32 " (%s)", reply->option, nbd_opt_lookup(reply->option)); break; default: error_setg(errp, "Unknown error code when asking for option %" PRIx32 " (%s)", reply->option, nbd_opt_lookup(reply->option)); break; } if (msg) { error_append_hint(errp, "%s\n", msg); } cleanup: g_free(msg); if (result < 0) { nbd_send_opt_abort(ioc); } return result; }
true
qemu
9a76bd783d0421962e8c65bb853a57eef4897720
7,664
static int check_refcounts_l1(BlockDriverState *bs, BdrvCheckResult *res, uint16_t *refcount_table, int refcount_table_size, int64_t l1_table_offset, int l1_size, int flags) { BDRVQcowState *s = bs->opaque; uint64_t *l1_table, l2_offset, l1_size2; int i, ret; l1_size2 = l1_size * sizeof(uint64_t); /* Mark L1 table as used */ inc_refcounts(bs, res, refcount_table, refcount_table_size, l1_table_offset, l1_size2); /* Read L1 table entries from disk */ if (l1_size2 == 0) { l1_table = NULL; } else { l1_table = g_malloc(l1_size2); if (bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2) != l1_size2) goto fail; for(i = 0;i < l1_size; i++) be64_to_cpus(&l1_table[i]); } /* Do the actual checks */ for(i = 0; i < l1_size; i++) { l2_offset = l1_table[i]; if (l2_offset) { /* Mark L2 table as used */ l2_offset &= L1E_OFFSET_MASK; inc_refcounts(bs, res, refcount_table, refcount_table_size, l2_offset, s->cluster_size); /* L2 tables are cluster aligned */ if (offset_into_cluster(s, l2_offset)) { fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not " "cluster aligned; L1 entry corrupted\n", l2_offset); res->corruptions++; } /* Process and check L2 entries */ ret = check_refcounts_l2(bs, res, refcount_table, refcount_table_size, l2_offset, flags); if (ret < 0) { goto fail; } } } g_free(l1_table); return 0; fail: fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n"); res->check_errors++; g_free(l1_table); return -EIO; }
true
qemu
de82815db1c89da058b7fb941dab137d6d9ab738
7,667
static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n) { int level, i, j, run; RLTable *rl = &ff_rl_mpeg1; uint8_t * const scantable = s->intra_scantable.permutated; const uint16_t *quant_matrix; const int qscale = s->qscale; int mismatch; mismatch = 1; { OPEN_READER(re, &s->gb); i = -1; if (n < 4) quant_matrix = s->inter_matrix; else quant_matrix = s->chroma_inter_matrix; // special case for first coefficient, no need to add second VLC table UPDATE_CACHE(re, &s->gb); if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { level= (3 * qscale * quant_matrix[0]) >> 5; if (GET_CACHE(re, &s->gb) & 0x40000000) level = -level; block[0] = level; mismatch ^= level; i++; SKIP_BITS(re, &s->gb, 2); if (((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) goto end; } /* now quantify & encode AC coefficients */ for (;;) { GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); if (level != 0) { i += run; j = scantable[i]; level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); SKIP_BITS(re, &s->gb, 1); } else { /* escape */ run = SHOW_UBITS(re, &s->gb, 6) + 1; LAST_SKIP_BITS(re, &s->gb, 6); UPDATE_CACHE(re, &s->gb); level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); i += run; j = scantable[i]; if (level < 0) { level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5; level = -level; } else { level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5; } } if (i > 63) { av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } mismatch ^= level; block[j] = level; if (((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) break; UPDATE_CACHE(re, &s->gb); } end: LAST_SKIP_BITS(re, &s->gb, 2); CLOSE_READER(re, &s->gb); } block[63] ^= (mismatch & 1); s->block_last_index[n] = i; return 0; }
true
FFmpeg
6d93307f8df81808f0dcdbc064b848054a6e83b3
7,668
static void channel_out_run(struct fs_dma_ctrl *ctrl, int c) { uint32_t len; uint32_t saved_data_buf; unsigned char buf[2 * 1024]; if (ctrl->channels[c].eol == 1) return; saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); D(fprintf(logfile, "ch=%d buf=%x after=%x saved_data_buf=%x\n", c, (uint32_t)ctrl->channels[c].current_d.buf, (uint32_t)ctrl->channels[c].current_d.after, saved_data_buf)); len = (uint32_t)(unsigned long) ctrl->channels[c].current_d.after; len -= saved_data_buf; if (len > sizeof buf) len = sizeof buf; cpu_physical_memory_read (saved_data_buf, buf, len); D(printf("channel %d pushes %x %u bytes\n", c, saved_data_buf, len)); if (ctrl->channels[c].client->client.push) ctrl->channels[c].client->client.push( ctrl->channels[c].client->client.opaque, buf, len); else printf("WARNING: DMA ch%d dataloss, no attached client.\n", c); saved_data_buf += len; if (saved_data_buf == (uint32_t)(unsigned long)ctrl->channels[c].current_d.after) { /* Done. Step to next. */ if (ctrl->channels[c].current_d.out_eop) { /* TODO: signal eop to the client. */ D(printf("signal eop\n")); } if (ctrl->channels[c].current_d.intr) { /* TODO: signal eop to the client. */ /* data intr. */ D(printf("signal intr\n")); ctrl->channels[c].regs[R_INTR] |= (1 << 2); channel_update_irq(ctrl, c); } if (ctrl->channels[c].current_d.eol) { D(printf("channel %d EOL\n", c)); ctrl->channels[c].eol = 1; /* Mark the context as disabled. */ ctrl->channels[c].current_c.dis = 1; channel_store_c(ctrl, c); channel_stop(ctrl, c); } else { ctrl->channels[c].regs[RW_SAVED_DATA] = (uint32_t)(unsigned long) ctrl->channels[c].current_d.next; /* Load new descriptor. */ channel_load_d(ctrl, c); saved_data_buf = (uint32_t)(unsigned long) ctrl->channels[c].current_d.buf; } channel_store_d(ctrl, c); ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; D(dump_d(c, &ctrl->channels[c].current_d)); } ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; }
true
qemu
4487fd349baa6d2ae34ab86dea843642d20a036a
7,669
static int dvbsub_parse_region_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { DVBSubContext *ctx = avctx->priv_data; const uint8_t *buf_end = buf + buf_size; int region_id, object_id; int av_unused version; DVBSubRegion *region; DVBSubObject *object; DVBSubObjectDisplay *display; int fill; int ret; if (buf_size < 10) return AVERROR_INVALIDDATA; region_id = *buf++; region = get_region(ctx, region_id); if (!region) { region = av_mallocz(sizeof(DVBSubRegion)); if (!region) return AVERROR(ENOMEM); region->id = region_id; region->version = -1; region->next = ctx->region_list; ctx->region_list = region; version = ((*buf)>>4) & 15; fill = ((*buf++) >> 3) & 1; region->width = AV_RB16(buf); buf += 2; region->height = AV_RB16(buf); buf += 2; if (region->width * region->height != region->buf_size) { av_free(region->pbuf); region->buf_size = region->width * region->height; region->pbuf = av_malloc(region->buf_size); if (!region->pbuf) { region->buf_size = region->width = region->height = 0; return AVERROR(ENOMEM); fill = 1; region->dirty = 0; region->depth = 1 << (((*buf++) >> 2) & 7); if(region->depth<2 || region->depth>8){ av_log(avctx, AV_LOG_ERROR, "region depth %d is invalid\n", region->depth); region->depth= 4; region->clut = *buf++; if (region->depth == 8) { region->bgcolor = *buf++; buf += 1; } else { buf += 1; if (region->depth == 4) region->bgcolor = (((*buf++) >> 4) & 15); else region->bgcolor = (((*buf++) >> 2) & 3); ff_dlog(avctx, "Region %d, (%dx%d)\n", region_id, region->width, region->height); if (fill) { memset(region->pbuf, region->bgcolor, region->buf_size); ff_dlog(avctx, "Fill region (%d)\n", region->bgcolor); delete_region_display_list(ctx, region); while (buf + 5 < buf_end) { object_id = AV_RB16(buf); buf += 2; object = get_object(ctx, object_id); if (!object) { object = av_mallocz(sizeof(DVBSubObject)); if (!object) return AVERROR(ENOMEM); object->id = object_id; object->next = ctx->object_list; ctx->object_list = object; object->type = (*buf) >> 6; display = av_mallocz(sizeof(DVBSubObjectDisplay)); if (!display) return AVERROR(ENOMEM); display->object_id = object_id; display->region_id = region_id; display->x_pos = AV_RB16(buf) & 0xfff; buf += 2; display->y_pos = AV_RB16(buf) & 0xfff; buf += 2; if ((object->type == 1 || object->type == 2) && buf+1 < buf_end) { display->fgcolor = *buf++; display->bgcolor = *buf++; display->region_list_next = region->display_list; region->display_list = display; display->object_list_next = object->display_list; object->display_list = display; return 0;
true
FFmpeg
0075d9eced22839fa4f7a6eaa02155803ccae3e6
7,671
static void x86_cpu_parse_featurestr(CPUState *cs, char *features, Error **errp) { X86CPU *cpu = X86_CPU(cs); char *featurestr; /* Single 'key=value" string being parsed */ /* Features to be added */ FeatureWordArray plus_features = { 0 }; /* Features to be removed */ FeatureWordArray minus_features = { 0 }; uint32_t numvalue; CPUX86State *env = &cpu->env; Error *local_err = NULL; featurestr = features ? strtok(features, ",") : NULL; while (featurestr) { char *val; if (featurestr[0] == '+') { add_flagname_to_bitmaps(featurestr + 1, plus_features); } else if (featurestr[0] == '-') { add_flagname_to_bitmaps(featurestr + 1, minus_features); } else if ((val = strchr(featurestr, '='))) { *val = 0; val++; feat2prop(featurestr); if (!strcmp(featurestr, "xlevel")) { char *err; char num[32]; numvalue = strtoul(val, &err, 0); if (!*val || *err) { error_setg(&local_err, "bad numerical value %s", val); goto out; } if (numvalue < 0x80000000) { error_report("xlevel value shall always be >= 0x80000000" ", fixup will be removed in future versions"); numvalue += 0x80000000; } snprintf(num, sizeof(num), "%" PRIu32, numvalue); object_property_parse(OBJECT(cpu), num, featurestr, &local_err); } else if (!strcmp(featurestr, "tsc-freq")) { int64_t tsc_freq; char *err; char num[32]; tsc_freq = strtosz_suffix_unit(val, &err, STRTOSZ_DEFSUFFIX_B, 1000); if (tsc_freq < 0 || *err) { error_setg(&local_err, "bad numerical value %s", val); goto out; } snprintf(num, sizeof(num), "%" PRId64, tsc_freq); object_property_parse(OBJECT(cpu), num, "tsc-frequency", &local_err); } else if (!strcmp(featurestr, "hv-spinlocks")) { char *err; const int min = 0xFFF; char num[32]; numvalue = strtoul(val, &err, 0); if (!*val || *err) { error_setg(&local_err, "bad numerical value %s", val); goto out; } if (numvalue < min) { error_report("hv-spinlocks value shall always be >= 0x%x" ", fixup will be removed in future versions", min); numvalue = min; } snprintf(num, sizeof(num), "%" PRId32, numvalue); object_property_parse(OBJECT(cpu), num, featurestr, &local_err); } else { object_property_parse(OBJECT(cpu), val, featurestr, &local_err); } } else { feat2prop(featurestr); object_property_parse(OBJECT(cpu), "on", featurestr, &local_err); } if (local_err) { error_propagate(errp, local_err); goto out; } featurestr = strtok(NULL, ","); } env->features[FEAT_1_EDX] |= plus_features[FEAT_1_EDX]; env->features[FEAT_1_ECX] |= plus_features[FEAT_1_ECX]; env->features[FEAT_8000_0001_EDX] |= plus_features[FEAT_8000_0001_EDX]; env->features[FEAT_8000_0001_ECX] |= plus_features[FEAT_8000_0001_ECX]; env->features[FEAT_C000_0001_EDX] |= plus_features[FEAT_C000_0001_EDX]; env->features[FEAT_KVM] |= plus_features[FEAT_KVM]; env->features[FEAT_SVM] |= plus_features[FEAT_SVM]; env->features[FEAT_7_0_EBX] |= plus_features[FEAT_7_0_EBX]; env->features[FEAT_1_EDX] &= ~minus_features[FEAT_1_EDX]; env->features[FEAT_1_ECX] &= ~minus_features[FEAT_1_ECX]; env->features[FEAT_8000_0001_EDX] &= ~minus_features[FEAT_8000_0001_EDX]; env->features[FEAT_8000_0001_ECX] &= ~minus_features[FEAT_8000_0001_ECX]; env->features[FEAT_C000_0001_EDX] &= ~minus_features[FEAT_C000_0001_EDX]; env->features[FEAT_KVM] &= ~minus_features[FEAT_KVM]; env->features[FEAT_SVM] &= ~minus_features[FEAT_SVM]; env->features[FEAT_7_0_EBX] &= ~minus_features[FEAT_7_0_EBX]; out: return; }
true
qemu
6b1dd54b6a4652a3a1e15a4beacd3be554a9ade1
7,672
static int flashsv_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { int buf_size = avpkt->size; FlashSVContext *s = avctx->priv_data; int h_blocks, v_blocks, h_part, v_part, i, j; GetBitContext gb; /* no supplementary picture */ if (buf_size == 0) return 0; if (buf_size < 4) return -1; init_get_bits(&gb, avpkt->data, buf_size * 8); /* start to parse the bitstream */ s->block_width = 16 * (get_bits(&gb, 4) + 1); s->image_width = get_bits(&gb, 12); s->block_height = 16 * (get_bits(&gb, 4) + 1); s->image_height = get_bits(&gb, 12); if (s->ver == 2) { skip_bits(&gb, 6); if (get_bits1(&gb)) { av_log_missing_feature(avctx, "iframe", 1); return AVERROR_PATCHWELCOME; if (get_bits1(&gb)) { av_log_missing_feature(avctx, "Custom palette", 1); return AVERROR_PATCHWELCOME; /* calculate number of blocks and size of border (partial) blocks */ h_blocks = s->image_width / s->block_width; h_part = s->image_width % s->block_width; v_blocks = s->image_height / s->block_height; v_part = s->image_height % s->block_height; /* the block size could change between frames, make sure the buffer * is large enough, if not, get a larger one */ if (s->block_size < s->block_width * s->block_height) { int tmpblock_size = 3 * s->block_width * s->block_height; s->tmpblock = av_realloc(s->tmpblock, tmpblock_size); if (!s->tmpblock) { av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); return AVERROR(ENOMEM); if (s->ver == 2) { s->deflate_block_size = calc_deflate_block_size(tmpblock_size); if (s->deflate_block_size <= 0) { av_log(avctx, AV_LOG_ERROR, "Can't determine deflate buffer size.\n"); return -1; s->deflate_block = av_realloc(s->deflate_block, s->deflate_block_size); if (!s->deflate_block) { av_log(avctx, AV_LOG_ERROR, "Can't allocate deflate buffer.\n"); return AVERROR(ENOMEM); s->block_size = s->block_width * s->block_height; /* initialize the image size once */ if (avctx->width == 0 && avctx->height == 0) { avctx->width = s->image_width; avctx->height = s->image_height; /* check for changes of image width and image height */ if (avctx->width != s->image_width || avctx->height != s->image_height) { "Frame width or height differs from first frame!\n"); av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d vs ch = %d, cv = %d\n", avctx->height, avctx->width, s->image_height, s->image_width); /* we care for keyframes only in Screen Video v2 */ s->is_keyframe = (avpkt->flags & AV_PKT_FLAG_KEY) && (s->ver == 2); if (s->is_keyframe) { s->keyframedata = av_realloc(s->keyframedata, avpkt->size); memcpy(s->keyframedata, avpkt->data, avpkt->size); s->blocks = av_realloc(s->blocks, (v_blocks + !!v_part) * (h_blocks + !!h_part) * sizeof(s->blocks[0])); av_dlog(avctx, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n", s->image_width, s->image_height, s->block_width, s->block_height, h_blocks, v_blocks, h_part, v_part); s->frame.reference = 3; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; /* loop over all block columns */ for (j = 0; j < v_blocks + (v_part ? 1 : 0); j++) { int y_pos = j * s->block_height; // vertical position in frame int cur_blk_height = (j < v_blocks) ? s->block_height : v_part; /* loop over all block rows */ for (i = 0; i < h_blocks + (h_part ? 1 : 0); i++) { int x_pos = i * s->block_width; // horizontal position in frame int cur_blk_width = (i < h_blocks) ? s->block_width : h_part; int has_diff = 0; /* get the size of the compressed zlib chunk */ int size = get_bits(&gb, 16); s->color_depth = 0; s->zlibprime_curr = 0; s->zlibprime_prev = 0; s->diff_start = 0; s->diff_height = cur_blk_height; if (8 * size > get_bits_left(&gb)) { avctx->release_buffer(avctx, &s->frame); s->frame.data[0] = NULL; if (s->ver == 2 && size) { skip_bits(&gb, 3); s->color_depth = get_bits(&gb, 2); has_diff = get_bits1(&gb); s->zlibprime_curr = get_bits1(&gb); s->zlibprime_prev = get_bits1(&gb); if (s->color_depth != 0 && s->color_depth != 2) { "%dx%d invalid color depth %d\n", i, j, s->color_depth); if (has_diff) { s->diff_start = get_bits(&gb, 8); s->diff_height = get_bits(&gb, 8); av_log(avctx, AV_LOG_DEBUG, "%dx%d diff start %d height %d\n", i, j, s->diff_start, s->diff_height); size -= 2; if (s->zlibprime_prev) av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_prev\n", i, j); if (s->zlibprime_curr) { int col = get_bits(&gb, 8); int row = get_bits(&gb, 8); av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_curr %dx%d\n", i, j, col, row); size -= 2; av_log_missing_feature(avctx, "zlibprime_curr", 1); return AVERROR_PATCHWELCOME; size--; // account for flags byte if (has_diff) { int k; int off = (s->image_height - y_pos - 1) * s->frame.linesize[0]; for (k = 0; k < cur_blk_height; k++) memcpy(s->frame.data[0] + off - k*s->frame.linesize[0] + x_pos*3, s->keyframe + off - k*s->frame.linesize[0] + x_pos*3, cur_blk_width * 3); /* skip unchanged blocks, which have size 0 */ if (size) { if (flashsv_decode_block(avctx, avpkt, &gb, size, cur_blk_width, cur_blk_height, x_pos, y_pos, i + j * (h_blocks + !!h_part))) "error in decompression of block %dx%d\n", i, j); if (s->is_keyframe && s->ver == 2) { s->keyframe = av_malloc(s->frame.linesize[0] * avctx->height); av_log(avctx, AV_LOG_ERROR, "Cannot allocate image data\n"); return AVERROR(ENOMEM); memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height); *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; if ((get_bits_count(&gb) / 8) != buf_size) av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n", buf_size, (get_bits_count(&gb) / 8)); /* report that the buffer was completely consumed */ return buf_size;
true
FFmpeg
5ae72f54532960cb9eae82a1c9e8d505106c022b
7,673
static void setup_window(AVFormatContext *s) { XCBGrabContext *c = s->priv_data; uint32_t mask = XCB_CW_OVERRIDE_REDIRECT | XCB_CW_EVENT_MASK; uint32_t values[] = { 1, XCB_EVENT_MASK_EXPOSURE | XCB_EVENT_MASK_STRUCTURE_NOTIFY }; xcb_rectangle_t rect = { 0, 0, c->width, c->height }; c->window = xcb_generate_id(c->conn); xcb_create_window(c->conn, XCB_COPY_FROM_PARENT, c->window, c->screen->root, c->x - c->region_border, c->y - c->region_border, c->width + c->region_border * 2, c->height + c->region_border * 2, 0, XCB_WINDOW_CLASS_INPUT_OUTPUT, XCB_COPY_FROM_PARENT, mask, values); #if CONFIG_LIBXCB_SHAPE xcb_shape_rectangles(c->conn, XCB_SHAPE_SO_SUBTRACT, XCB_SHAPE_SK_BOUNDING, XCB_CLIP_ORDERING_UNSORTED, c->window, c->region_border, c->region_border, 1, &rect); #endif xcb_map_window(c->conn, c->window); draw_rectangle(s); }
true
FFmpeg
6a817ac1e9a0d2b747f71abc5345a54434ceb4a2
7,674
static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, int src_size) { register const uint8_t* s=src; register uint8_t* d=dst; register const uint8_t *end; const uint8_t *mm_end; end = s + src_size; __asm__ volatile(PREFETCH" %0"::"m"(*s)); __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg)); __asm__ volatile("movq %0, %%mm6"::"m"(mask15b)); mm_end = end - 15; while (s<mm_end) { __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq 8%1, %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "psrlq $1, %%mm0 \n\t" "psrlq $1, %%mm2 \n\t" "pand %%mm7, %%mm0 \n\t" "pand %%mm7, %%mm2 \n\t" "pand %%mm6, %%mm1 \n\t" "pand %%mm6, %%mm3 \n\t" "por %%mm1, %%mm0 \n\t" "por %%mm3, %%mm2 \n\t" MOVNTQ" %%mm0, %0 \n\t" MOVNTQ" %%mm2, 8%0" :"=m"(*d) :"m"(*s) ); d+=16; s+=16; } __asm__ volatile(SFENCE:::"memory"); __asm__ volatile(EMMS:::"memory"); mm_end = end - 3; while (s < mm_end) { register uint32_t x= *((const uint32_t*)s); *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F); s+=4; d+=4; } if (s < end) { register uint16_t x= *((const uint16_t*)s); *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F); } }
true
FFmpeg
90540c2d5ace46a1e9789c75fde0b1f7dbb12a9b
7,675
void ff_ivi_recompose53(const IVIPlaneDesc *plane, uint8_t *dst, const int dst_pitch, const int num_bands) { int x, y, indx; int32_t p0, p1, p2, p3, tmp0, tmp1, tmp2; int32_t b0_1, b0_2, b1_1, b1_2, b1_3, b2_1, b2_2, b2_3, b2_4, b2_5, b2_6; int32_t b3_1, b3_2, b3_3, b3_4, b3_5, b3_6, b3_7, b3_8, b3_9; int32_t pitch, back_pitch; const IDWTELEM *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr; /* all bands should have the same pitch */ pitch = plane->bands[0].pitch; /* pixels at the position "y-1" will be set to pixels at the "y" for the 1st iteration */ back_pitch = 0; /* get pointers to the wavelet bands */ b0_ptr = plane->bands[0].buf; b1_ptr = plane->bands[1].buf; b2_ptr = plane->bands[2].buf; b3_ptr = plane->bands[3].buf; for (y = 0; y < plane->height; y += 2) { if (y+2 >= plane->height) pitch= 0; /* load storage variables with values */ if (num_bands > 0) { b0_1 = b0_ptr[0]; b0_2 = b0_ptr[pitch]; } if (num_bands > 1) { b1_1 = b1_ptr[back_pitch]; b1_2 = b1_ptr[0]; b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch]; } if (num_bands > 2) { b2_2 = b2_ptr[0]; // b2[x, y ] b2_3 = b2_2; // b2[x+1,y ] = b2[x,y] b2_5 = b2_ptr[pitch]; // b2[x ,y+1] b2_6 = b2_5; // b2[x+1,y+1] = b2[x,y+1] } if (num_bands > 3) { b3_2 = b3_ptr[back_pitch]; // b3[x ,y-1] b3_3 = b3_2; // b3[x+1,y-1] = b3[x ,y-1] b3_5 = b3_ptr[0]; // b3[x ,y ] b3_6 = b3_5; // b3[x+1,y ] = b3[x ,y ] b3_8 = b3_2 - b3_5*6 + b3_ptr[pitch]; b3_9 = b3_8; } for (x = 0, indx = 0; x < plane->width; x+=2, indx++) { /* some values calculated in the previous iterations can */ /* be reused in the next ones, so do appropriate copying */ b2_1 = b2_2; // b2[x-1,y ] = b2[x, y ] b2_2 = b2_3; // b2[x ,y ] = b2[x+1,y ] b2_4 = b2_5; // b2[x-1,y+1] = b2[x ,y+1] b2_5 = b2_6; // b2[x ,y+1] = b2[x+1,y+1] b3_1 = b3_2; // b3[x-1,y-1] = b3[x ,y-1] b3_2 = b3_3; // b3[x ,y-1] = b3[x+1,y-1] b3_4 = b3_5; // b3[x-1,y ] = b3[x ,y ] b3_5 = b3_6; // b3[x ,y ] = b3[x+1,y ] b3_7 = b3_8; // vert_HPF(x-1) b3_8 = b3_9; // vert_HPF(x ) p0 = p1 = p2 = p3 = 0; /* process the LL-band by applying LPF both vertically and horizontally */ if (num_bands > 0) { tmp0 = b0_1; tmp2 = b0_2; b0_1 = b0_ptr[indx+1]; b0_2 = b0_ptr[pitch+indx+1]; tmp1 = tmp0 + b0_1; p0 = tmp0 << 4; p1 = tmp1 << 3; p2 = (tmp0 + tmp2) << 3; p3 = (tmp1 + tmp2 + b0_2) << 2; } /* process the HL-band by applying HPF vertically and LPF horizontally */ if (num_bands > 1) { tmp0 = b1_2; tmp1 = b1_1; b1_2 = b1_ptr[indx+1]; b1_1 = b1_ptr[back_pitch+indx+1]; tmp2 = tmp1 - tmp0*6 + b1_3; b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch+indx+1]; p0 += (tmp0 + tmp1) << 3; p1 += (tmp0 + tmp1 + b1_1 + b1_2) << 2; p2 += tmp2 << 2; p3 += (tmp2 + b1_3) << 1; } /* process the LH-band by applying LPF vertically and HPF horizontally */ if (num_bands > 2) { b2_3 = b2_ptr[indx+1]; b2_6 = b2_ptr[pitch+indx+1]; tmp0 = b2_1 + b2_2; tmp1 = b2_1 - b2_2*6 + b2_3; p0 += tmp0 << 3; p1 += tmp1 << 2; p2 += (tmp0 + b2_4 + b2_5) << 2; p3 += (tmp1 + b2_4 - b2_5*6 + b2_6) << 1; } /* process the HH-band by applying HPF both vertically and horizontally */ if (num_bands > 3) { b3_6 = b3_ptr[indx+1]; // b3[x+1,y ] b3_3 = b3_ptr[back_pitch+indx+1]; // b3[x+1,y-1] tmp0 = b3_1 + b3_4; tmp1 = b3_2 + b3_5; tmp2 = b3_3 + b3_6; b3_9 = b3_3 - b3_6*6 + b3_ptr[pitch+indx+1]; p0 += (tmp0 + tmp1) << 2; p1 += (tmp0 - tmp1*6 + tmp2) << 1; p2 += (b3_7 + b3_8) << 1; p3 += b3_7 - b3_8*6 + b3_9; } /* output four pixels */ dst[x] = av_clip_uint8((p0 >> 6) + 128); dst[x+1] = av_clip_uint8((p1 >> 6) + 128); dst[dst_pitch+x] = av_clip_uint8((p2 >> 6) + 128); dst[dst_pitch+x+1] = av_clip_uint8((p3 >> 6) + 128); }// for x dst += dst_pitch << 1; back_pitch = -pitch; b0_ptr += pitch; b1_ptr += pitch; b2_ptr += pitch; b3_ptr += pitch; } }
true
FFmpeg
f9143d2407b38f33b85487fd597c9194f79adb20
7,676
void select_soundhw(const char *optarg) { struct soundhw *c; if (*optarg == '?') { show_valid_cards: printf("Valid sound card names (comma separated):\n"); for (c = soundhw; c->name; ++c) { printf ("%-11s %s\n", c->name, c->descr); } printf("\n-soundhw all will enable all of the above\n"); exit(*optarg != '?'); } else { size_t l; const char *p; char *e; int bad_card = 0; if (!strcmp(optarg, "all")) { for (c = soundhw; c->name; ++c) { c->enabled = 1; } return; } p = optarg; while (*p) { e = strchr(p, ','); l = !e ? strlen(p) : (size_t) (e - p); for (c = soundhw; c->name; ++c) { if (!strncmp(c->name, p, l) && !c->name[l]) { c->enabled = 1; break; } } if (!c->name) { if (l > 80) { fprintf(stderr, "Unknown sound card name (too big to show)\n"); } else { fprintf(stderr, "Unknown sound card name `%.*s'\n", (int) l, p); } bad_card = 1; } p += l + (e != NULL); } if (bad_card) { goto show_valid_cards; } } }
true
qemu
c8057f951d64de93bfd01569c0a725baa9f94372
7,677
int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){ AVPacketList *pktl; int stream_count=0, noninterleaved_count=0; int64_t delta_dts_max = 0; int i; if(pkt){ ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts); } for(i=0; i < s->nb_streams; i++) { if (s->streams[i]->last_in_packet_buffer) { ++stream_count; } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { ++noninterleaved_count; } } if (s->nb_streams == stream_count) { flush = 1; } else if (!flush){ for(i=0; i < s->nb_streams; i++) { if (s->streams[i]->last_in_packet_buffer) { int64_t delta_dts = av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts, s->streams[i]->time_base, AV_TIME_BASE_Q) - av_rescale_q(s->packet_buffer->pkt.dts, s->streams[s->packet_buffer->pkt.stream_index]->time_base, AV_TIME_BASE_Q); delta_dts_max= FFMAX(delta_dts_max, delta_dts); } } if(s->nb_streams == stream_count+noninterleaved_count && delta_dts_max > 20*AV_TIME_BASE) { av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count); flush = 1; } } if(stream_count && flush){ pktl= s->packet_buffer; *out= pktl->pkt; s->packet_buffer= pktl->next; if(!s->packet_buffer) s->packet_buffer_end= NULL; if(s->streams[out->stream_index]->last_in_packet_buffer == pktl) s->streams[out->stream_index]->last_in_packet_buffer= NULL; av_freep(&pktl); return 1; }else{ av_init_packet(out); return 0; } }
true
FFmpeg
4d7c71c36467331f1e0c0f17af9f371d33308a9c
7,679
static int read_header(AVFormatContext *s) { WtvContext *wtv = s->priv_data; int root_sector, root_size; uint8_t root[WTV_SECTOR_SIZE]; AVIOContext *pb; int64_t timeline_pos; int64_t ret; wtv->epoch = wtv->pts = wtv->last_valid_pts = AV_NOPTS_VALUE; /* read root directory sector */ avio_skip(s->pb, 0x30); root_size = avio_rl32(s->pb); if (root_size > sizeof(root)) { av_log(s, AV_LOG_ERROR, "root directory size exceeds sector size\n"); return AVERROR_INVALIDDATA; } avio_skip(s->pb, 4); root_sector = avio_rl32(s->pb); ret = seek_by_sector(s->pb, root_sector, 0); if (ret < 0) return ret; root_size = avio_read(s->pb, root, root_size); if (root_size < 0) return AVERROR_INVALIDDATA; /* parse chunks up until first data chunk */ wtv->pb = wtvfile_open(s, root, root_size, ff_timeline_le16); if (!wtv->pb) { av_log(s, AV_LOG_ERROR, "timeline data missing\n"); return AVERROR_INVALIDDATA; } ret = parse_chunks(s, SEEK_TO_DATA, 0, 0); if (ret < 0) return ret; avio_seek(wtv->pb, -32, SEEK_CUR); timeline_pos = avio_tell(s->pb); // save before opening another file /* read metadata */ pb = wtvfile_open(s, root, root_size, ff_table_0_entries_legacy_attrib_le16); if (pb) { parse_legacy_attrib(s, pb); wtvfile_close(pb); } s->ctx_flags |= AVFMTCTX_NOHEADER; // Needed for noStreams.wtv /* read seek index */ if (s->nb_streams) { AVStream *st = s->streams[0]; pb = wtvfile_open(s, root, root_size, ff_table_0_entries_time_le16); if (pb) { while(1) { uint64_t timestamp = avio_rl64(pb); uint64_t frame_nb = avio_rl64(pb); if (avio_feof(pb)) break; ff_add_index_entry(&wtv->index_entries, &wtv->nb_index_entries, &wtv->index_entries_allocated_size, 0, timestamp, frame_nb, 0, AVINDEX_KEYFRAME); } wtvfile_close(pb); if (wtv->nb_index_entries) { pb = wtvfile_open(s, root, root_size, ff_timeline_table_0_entries_Events_le16); if (pb) { AVIndexEntry *e = wtv->index_entries; AVIndexEntry *e_end = wtv->index_entries + wtv->nb_index_entries - 1; uint64_t last_position = 0; while (1) { uint64_t frame_nb = avio_rl64(pb); uint64_t position = avio_rl64(pb); while (frame_nb > e->size && e <= e_end) { e->pos = last_position; e++; } if (avio_feof(pb)) break; last_position = position; } e_end->pos = last_position; wtvfile_close(pb); st->duration = e_end->timestamp; } } } } avio_seek(s->pb, timeline_pos, SEEK_SET); return 0; }
false
FFmpeg
cc5e5548df4af48674c7aef518e831b19e99f9fc
7,680
SwsVector *sws_getGaussianVec(double variance, double quality) { const int length = (int)(variance * quality + 0.5) | 1; int i; double middle = (length - 1) * 0.5; SwsVector *vec = sws_allocVec(length); if (!vec) return NULL; for (i = 0; i < length; i++) { double dist = i - middle; vec->coeff[i] = exp(-dist * dist / (2 * variance * variance)) / sqrt(2 * variance * M_PI); } sws_normalizeVec(vec, 1.0); return vec; }
false
FFmpeg
e823e7367754dd23de16a141c06471735a488f0d
7,681
static void h261_h_loop_filter_c(uint8_t *dest,uint8_t *src, int stride){ int i,j,xy,yz; int res; for(i=1; i<7; i++){ for(j=0; j<8; j++){ xy = j * stride + i; yz = j * 8 + i; res = (int)src[yz-1] + ((int)(src[yz]) *2) + (int)src[yz+1]; res+=2; res>>=2; dest[xy] = (uint8_t)res; } } }
false
FFmpeg
fdbbf2e0fc1bb91a5d735a49f39337eb172e68a7
7,682
PcGuestInfo *pc_guest_info_init(ram_addr_t below_4g_mem_size, ram_addr_t above_4g_mem_size) { PcGuestInfoState *guest_info_state = g_malloc0(sizeof *guest_info_state); PcGuestInfo *guest_info = &guest_info_state->info; guest_info->pci_info.w32.end = IO_APIC_DEFAULT_ADDRESS; if (sizeof(hwaddr) == 4) { guest_info->pci_info.w64.begin = 0; guest_info->pci_info.w64.end = 0; } else { /* * BIOS does not set MTRR entries for the 64 bit window, so no need to * align address to power of two. Align address at 1G, this makes sure * it can be exactly covered with a PAT entry even when using huge * pages. */ guest_info->pci_info.w64.begin = ROUND_UP((0x1ULL << 32) + above_4g_mem_size, 0x1ULL << 30); guest_info->pci_info.w64.end = guest_info->pci_info.w64.begin + (0x1ULL << 62); assert(guest_info->pci_info.w64.begin <= guest_info->pci_info.w64.end); } guest_info_state->machine_done.notify = pc_guest_info_machine_done; qemu_add_machine_init_done_notifier(&guest_info_state->machine_done); return guest_info; }
true
qemu
398489018183d613306ab022653552247d93919f
7,683
static void escaped_string(void) { int i; struct { const char *encoded; const char *decoded; int skip; } test_cases[] = { { "\"\\b\"", "\b" }, { "\"\\f\"", "\f" }, { "\"\\n\"", "\n" }, { "\"\\r\"", "\r" }, { "\"\\t\"", "\t" }, { "\"/\"", "/" }, { "\"\\/\"", "/", .skip = 1 }, { "\"\\\\\"", "\\" }, { "\"\\\"\"", "\"" }, { "\"hello world \\\"embedded string\\\"\"", "hello world \"embedded string\"" }, { "\"hello world\\nwith new line\"", "hello world\nwith new line" }, { "\"single byte utf-8 \\u0020\"", "single byte utf-8 ", .skip = 1 }, { "\"double byte utf-8 \\u00A2\"", "double byte utf-8 \xc2\xa2" }, { "\"triple byte utf-8 \\u20AC\"", "triple byte utf-8 \xe2\x82\xac" }, { "'\\b'", "\b", .skip = 1 }, { "'\\f'", "\f", .skip = 1 }, { "'\\n'", "\n", .skip = 1 }, { "'\\r'", "\r", .skip = 1 }, { "'\\t'", "\t", .skip = 1 }, { "'\\/'", "/", .skip = 1 }, { "'\\\\'", "\\", .skip = 1 }, {} }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QString *str; obj = qobject_from_json(test_cases[i].encoded, NULL); str = qobject_to_qstring(obj); g_assert(str); g_assert_cmpstr(qstring_get_str(str), ==, test_cases[i].decoded); if (test_cases[i].skip == 0) { str = qobject_to_json(obj); g_assert_cmpstr(qstring_get_str(str), ==, test_cases[i].encoded); qobject_decref(obj); } QDECREF(str); } }
true
qemu
aec4b054ea36c53c8b887da99f20010133b84378
7,684
ogg_read_header (AVFormatContext * s, AVFormatParameters * ap) { struct ogg *ogg = s->priv_data; ogg->curidx = -1; //linear headers seek from start if (ogg_get_headers (s) < 0){ return -1; } //linear granulepos seek from end ogg_get_length (s); //fill the extradata in the per codec callbacks return 0; }
true
FFmpeg
c9da676de43d778d62efb1cfa75544d770736d67
7,685
static void print_net_client(Monitor *mon, VLANClientState *vc) { monitor_printf(mon, "%s: type=%s,%s\n", vc->name, net_client_types[vc->info->type].type, vc->info_str); }
true
qemu
6687b79d636cd60ed9adb1177d0d946b58fa7717
7,686
static int apng_read_close(AVFormatContext *s) { APNGDemuxContext *ctx = s->priv_data; av_freep(&ctx->extra_data); ctx->extra_data_size = 0; return 0; }
true
FFmpeg
16c429166ddf1736972b6ccce84bd3509ec16a34
7,687
static void br(DisasContext *dc, uint32_t code, uint32_t flags) { I_TYPE(instr, code); gen_goto_tb(dc, 0, dc->pc + 4 + (instr.imm16s & -4)); dc->is_jmp = DISAS_TB_JUMP; }
true
qemu
4ae4b609ee2d5bcc9df6c03c21dc1fed527aada1
7,688
static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf) { DNXHDEncContext *ctx = avctx->priv_data; const uint8_t header_prefix[5] = { 0x00,0x00,0x02,0x80,0x01 }; memcpy(buf, header_prefix, 5); buf[5] = ctx->interlaced ? ctx->cur_field+2 : 0x01; buf[6] = 0x80; // crc flag off buf[7] = 0xa0; // reserved AV_WB16(buf + 0x18, avctx->height); // ALPF AV_WB16(buf + 0x1a, avctx->width); // SPL AV_WB16(buf + 0x1d, avctx->height); // NAL buf[0x21] = 0x38; // FIXME 8 bit per comp buf[0x22] = 0x88 + (ctx->frame.interlaced_frame<<2); AV_WB32(buf + 0x28, ctx->cid); // CID buf[0x2c] = ctx->interlaced ? 0 : 0x80; buf[0x5f] = 0x01; // UDL buf[0x167] = 0x02; // reserved AV_WB16(buf + 0x16a, ctx->m.mb_height * 4 + 4); // MSIPS buf[0x16d] = ctx->m.mb_height; // Ns buf[0x16f] = 0x10; // reserved ctx->msip = buf + 0x170; return 0; }
true
FFmpeg
301a24de52f5baa09beff0958327af2c2a7005dc
7,691
static int slirp_smb(SlirpState* s, const char *exported_dir, struct in_addr vserver_addr) { static int instance; char smb_conf[128]; char smb_cmdline[128]; FILE *f; snprintf(s->smb_dir, sizeof(s->smb_dir), "/tmp/qemu-smb.%ld-%d", (long)getpid(), instance++); if (mkdir(s->smb_dir, 0700) < 0) { error_report("could not create samba server dir '%s'", s->smb_dir); return -1; } snprintf(smb_conf, sizeof(smb_conf), "%s/%s", s->smb_dir, "smb.conf"); f = fopen(smb_conf, "w"); if (!f) { slirp_smb_cleanup(s); error_report("could not create samba server configuration file '%s'", smb_conf); return -1; } fprintf(f, "[global]\n" "private dir=%s\n" "socket address=127.0.0.1\n" "pid directory=%s\n" "lock directory=%s\n" "state directory=%s\n" "log file=%s/log.smbd\n" "smb passwd file=%s/smbpasswd\n" "security = share\n" "[qemu]\n" "path=%s\n" "read only=no\n" "guest ok=yes\n", exported_dir ); fclose(f); snprintf(smb_cmdline, sizeof(smb_cmdline), "%s -s %s", CONFIG_SMBD_COMMAND, smb_conf); if (slirp_add_exec(s->slirp, 0, smb_cmdline, &vserver_addr, 139) < 0) { slirp_smb_cleanup(s); error_report("conflicting/invalid smbserver address"); return -1; } return 0; }
true
qemu
276eda5735824dd6cf66e1f0951aa8af97354540
7,692
static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start, int64_t size) { int64_t i; int ret; for (i = 0; i < size / TARGET_PAGE_SIZE; i++) { ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); if (ret < 0) { return ret; } } if ((size % TARGET_PAGE_SIZE) != 0) { ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE, size % TARGET_PAGE_SIZE); if (ret < 0) { return ret; } } return 0; }
true
qemu
56c4bfb3f07f3107894c00281276aea4f5e8834d
7,693
static int rtmp_handshake(URLContext *s, RTMPContext *rt) { AVLFG rnd; uint8_t tosend [RTMP_HANDSHAKE_PACKET_SIZE+1] = { 3, // unencrypted data 0, 0, 0, 0, // client uptime RTMP_CLIENT_VER1, RTMP_CLIENT_VER2, RTMP_CLIENT_VER3, RTMP_CLIENT_VER4, }; uint8_t clientdata[RTMP_HANDSHAKE_PACKET_SIZE]; uint8_t serverdata[RTMP_HANDSHAKE_PACKET_SIZE+1]; int i; int server_pos, client_pos; uint8_t digest[32], signature[32]; int ret, type = 0; av_log(s, AV_LOG_DEBUG, "Handshaking...\n"); av_lfg_init(&rnd, 0xDEADC0DE); // generate handshake packet - 1536 bytes of pseudorandom data for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++) tosend[i] = av_lfg_get(&rnd) >> 24; if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) { /* When the client wants to use RTMPE, we have to change the command * byte to 0x06 which means to use encrypted data and we have to set * the flash version to at least 9.0.115.0. */ tosend[0] = 6; tosend[5] = 128; tosend[6] = 0; tosend[7] = 3; tosend[8] = 2; /* Initialize the Diffie-Hellmann context and generate the public key * to send to the server. */ if ((ret = ff_rtmpe_gen_pub_key(rt->stream, tosend + 1)) < 0) client_pos = rtmp_handshake_imprint_with_digest(tosend + 1, rt->encrypted); if (client_pos < 0) return client_pos; if ((ret = ffurl_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) { av_log(s, AV_LOG_ERROR, "Cannot write RTMP handshake request\n"); if ((ret = ffurl_read_complete(rt->stream, serverdata, RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) { av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n"); if ((ret = ffurl_read_complete(rt->stream, clientdata, RTMP_HANDSHAKE_PACKET_SIZE)) < 0) { av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n"); av_log(s, AV_LOG_DEBUG, "Type answer %d\n", serverdata[0]); av_log(s, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n", serverdata[5], serverdata[6], serverdata[7], serverdata[8]); if (rt->is_input && serverdata[5] >= 3) { server_pos = rtmp_validate_digest(serverdata + 1, 772); if (server_pos < 0) return server_pos; if (!server_pos) { type = 1; server_pos = rtmp_validate_digest(serverdata + 1, 8); if (server_pos < 0) return server_pos; if (!server_pos) { av_log(s, AV_LOG_ERROR, "Server response validating failed\n"); return AVERROR(EIO); ret = ff_rtmp_calc_digest(tosend + 1 + client_pos, 32, 0, rtmp_server_key, sizeof(rtmp_server_key), digest); if (ret < 0) ret = ff_rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0, digest, 32, signature); if (ret < 0) if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) { /* Compute the shared secret key sent by the server and initialize * the RC4 encryption. */ if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1, tosend + 1, type)) < 0) /* Encrypt the signature received by the server. */ ff_rtmpe_encrypt_sig(rt->stream, signature, digest, serverdata[0]); if (memcmp(signature, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) { av_log(s, AV_LOG_ERROR, "Signature mismatch\n"); return AVERROR(EIO); for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++) tosend[i] = av_lfg_get(&rnd) >> 24; ret = ff_rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0, rtmp_player_key, sizeof(rtmp_player_key), digest); if (ret < 0) ret = ff_rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0, digest, 32, tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32); if (ret < 0) if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) { /* Encrypt the signature to be send to the server. */ ff_rtmpe_encrypt_sig(rt->stream, tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32, digest, serverdata[0]); // write reply back to the server if ((ret = ffurl_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE)) < 0) if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) { /* Set RC4 keys for encryption and update the keystreams. */ if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0) } else { if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) { /* Compute the shared secret key sent by the server and initialize * the RC4 encryption. */ if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1, tosend + 1, 1)) < 0) if (serverdata[0] == 9) { /* Encrypt the signature received by the server. */ ff_rtmpe_encrypt_sig(rt->stream, signature, digest, serverdata[0]); if ((ret = ffurl_write(rt->stream, serverdata + 1, RTMP_HANDSHAKE_PACKET_SIZE)) < 0) if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) { /* Set RC4 keys for encryption and update the keystreams. */ if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0) return 0;
true
FFmpeg
635ac8e1be91e941908f85642e4bbb609e48193f
7,694
static int qemu_rdma_reg_control(RDMAContext *rdma, int idx) { rdma->wr_data[idx].control_mr = ibv_reg_mr(rdma->pd, rdma->wr_data[idx].control, RDMA_CONTROL_MAX_BUFFER, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); if (rdma->wr_data[idx].control_mr) { rdma->total_registrations++; return 0; } fprintf(stderr, "qemu_rdma_reg_control failed!\n"); return -1; }
true
qemu
60fe637bf0e4d7989e21e50f52526444765c63b4
7,695
static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, ppc_slb_t *slb, bool secondary, target_ulong ptem, ppc_hash_pte64_t *pte) { CPUPPCState *env = &cpu->env; int i; uint64_t token; target_ulong pte0, pte1; target_ulong pte_index; pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP; token = ppc_hash64_start_access(cpu, pte_index); if (!token) { return -1; } for (i = 0; i < HPTES_PER_GROUP; i++) { pte0 = ppc_hash64_load_hpte0(cpu, token, i); pte1 = ppc_hash64_load_hpte1(cpu, token, i); if ((pte0 & HPTE64_V_VALID) && (secondary == !!(pte0 & HPTE64_V_SECONDARY)) && HPTE64_V_COMPARE(pte0, ptem)) { unsigned pshift = hpte_page_shift(slb->sps, pte0, pte1); /* * If there is no match, ignore the PTE, it could simply * be for a different segment size encoding and the * architecture specifies we should not match. Linux will * potentially leave behind PTEs for the wrong base page * size when demoting segments. */ if (pshift == 0) { continue; } /* We don't do anything with pshift yet as qemu TLB only deals * with 4K pages anyway */ pte->pte0 = pte0; pte->pte1 = pte1; ppc_hash64_stop_access(cpu, token); return (pte_index + i) * HASH_PTE_SIZE_64; } } ppc_hash64_stop_access(cpu, token); /* * We didn't find a valid entry. */ return -1; }
true
qemu
073de86aa934d46d596a2367e7501da5500e5b86
7,697
static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, int64_t offset_in_cluster, const uint8_t *buf, int nb_sectors, int64_t sector_num) { int ret; VmdkGrainMarker *data = NULL; uLongf buf_len; const uint8_t *write_buf = buf; int write_len = nb_sectors * 512; if (extent->compressed) { if (!extent->has_marker) { ret = -EINVAL; goto out; } buf_len = (extent->cluster_sectors << 9) * 2; data = g_malloc(buf_len + sizeof(VmdkGrainMarker)); if (compress(data->data, &buf_len, buf, nb_sectors << 9) != Z_OK || buf_len == 0) { ret = -EINVAL; goto out; } data->lba = sector_num; data->size = buf_len; write_buf = (uint8_t *)data; write_len = buf_len + sizeof(VmdkGrainMarker); } ret = bdrv_pwrite(extent->file, cluster_offset + offset_in_cluster, write_buf, write_len); if (ret != write_len) { ret = ret < 0 ? ret : -EIO; goto out; } ret = 0; out: g_free(data); return ret; }
true
qemu
5e82a31eb967db135fc4e688b134fb0972d62de3
7,698
int ff_scale_eval_dimensions(void *log_ctx, const char *w_expr, const char *h_expr, AVFilterLink *inlink, AVFilterLink *outlink, int *ret_w, int *ret_h) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format); const char *expr; int w, h; int factor_w, factor_h; int eval_w, eval_h; int ret; const char scale2ref = outlink->src->nb_inputs == 2 && outlink->src->inputs[1] == inlink; double var_values[VARS_NB + VARS_S2R_NB], res; const AVPixFmtDescriptor *main_desc; const AVFilterLink *main_link; const char *const *names = scale2ref ? var_names_scale2ref : var_names; if (scale2ref) { main_link = outlink->src->inputs[0]; main_desc = av_pix_fmt_desc_get(main_link->format); } var_values[VAR_PI] = M_PI; var_values[VAR_PHI] = M_PHI; var_values[VAR_E] = M_E; var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w; var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h; var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN; var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN; var_values[VAR_A] = (double) inlink->w / inlink->h; var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR]; var_values[VAR_HSUB] = 1 << desc->log2_chroma_w; var_values[VAR_VSUB] = 1 << desc->log2_chroma_h; var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w; var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h; if (scale2ref) { var_values[VARS_NB + VAR_S2R_MAIN_W] = main_link->w; var_values[VARS_NB + VAR_S2R_MAIN_H] = main_link->h; var_values[VARS_NB + VAR_S2R_MAIN_A] = (double) main_link->w / main_link->h; var_values[VARS_NB + VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ? (double) main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1; var_values[VARS_NB + VAR_S2R_MAIN_DAR] = var_values[VARS_NB + VAR_S2R_MDAR] = var_values[VARS_NB + VAR_S2R_MAIN_A] * var_values[VARS_NB + VAR_S2R_MAIN_SAR]; var_values[VARS_NB + VAR_S2R_MAIN_HSUB] = 1 << main_desc->log2_chroma_w; var_values[VARS_NB + VAR_S2R_MAIN_VSUB] = 1 << main_desc->log2_chroma_h; } /* evaluate width and height */ av_expr_parse_and_eval(&res, (expr = w_expr), names, var_values, NULL, NULL, NULL, NULL, NULL, 0, log_ctx); eval_w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res; if ((ret = av_expr_parse_and_eval(&res, (expr = h_expr), names, var_values, NULL, NULL, NULL, NULL, NULL, 0, log_ctx)) < 0) goto fail; eval_h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res; /* evaluate again the width, as it may depend on the output height */ if ((ret = av_expr_parse_and_eval(&res, (expr = w_expr), names, var_values, NULL, NULL, NULL, NULL, NULL, 0, log_ctx)) < 0) goto fail; eval_w = res; w = eval_w; h = eval_h; /* Check if it is requested that the result has to be divisible by a some * factor (w or h = -n with n being the factor). */ factor_w = 1; factor_h = 1; if (w < -1) { factor_w = -w; } if (h < -1) { factor_h = -h; } if (w < 0 && h < 0) eval_w = eval_h = 0; if (!(w = eval_w)) w = inlink->w; if (!(h = eval_h)) h = inlink->h; /* Make sure that the result is divisible by the factor we determined * earlier. If no factor was set, it is nothing will happen as the default * factor is 1 */ if (w < 0) w = av_rescale(h, inlink->w, inlink->h * factor_w) * factor_w; if (h < 0) h = av_rescale(w, inlink->h, inlink->w * factor_h) * factor_h; *ret_w = w; *ret_h = h; return 0; fail: av_log(log_ctx, AV_LOG_ERROR, "Error when evaluating the expression '%s'.\n" "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n", expr, w_expr, h_expr); return ret; }
false
FFmpeg
05feeeb813e9f71f69b6b3a7f33856c609237c06
7,699
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { ADPCMContext *c = avctx->priv_data; ADPCMChannelStatus *cs; int n, m, channel, i; int block_predictor[2]; short *samples; short *samples_end; const uint8_t *src; int st; /* stereo */ /* DK3 ADPCM accounting variables */ unsigned char last_byte = 0; unsigned char nibble; int decode_top_nibble_next = 0; int diff_channel; /* EA ADPCM state variables */ uint32_t samples_in_chunk; int32_t previous_left_sample, previous_right_sample; int32_t current_left_sample, current_right_sample; int32_t next_left_sample, next_right_sample; int32_t coeff1l, coeff2l, coeff1r, coeff2r; uint8_t shift_left, shift_right; int count1, count2; int coeff[2][2], shift[2];//used in EA MAXIS ADPCM if (!buf_size) return 0; //should protect all 4bit ADPCM variants //8 is needed for CODEC_ID_ADPCM_IMA_WAV with 2 channels // if(*data_size/4 < buf_size + 8) return -1; samples = data; samples_end= samples + *data_size/2; *data_size= 0; src = buf; st = avctx->channels == 2 ? 1 : 0; switch(avctx->codec->id) { case CODEC_ID_ADPCM_IMA_QT: n = (buf_size - 2);/* >> 2*avctx->channels;*/ channel = c->channel; cs = &(c->status[channel]); /* (pppppp) (piiiiiii) */ /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */ cs->predictor = (*src++) << 8; cs->predictor |= (*src & 0x80); cs->predictor &= 0xFF80; /* sign extension */ if(cs->predictor & 0x8000) cs->predictor -= 0x10000; cs->predictor = av_clip_int16(cs->predictor); cs->step_index = (*src++) & 0x7F; if (cs->step_index > 88){ av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index); cs->step_index = 88; } cs->step = step_table[cs->step_index]; if (st && channel) samples++; for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */ *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3); samples += avctx->channels; *samples = adpcm_ima_expand_nibble(cs, src[0] >> 4 , 3); samples += avctx->channels; src ++; } if(st) { /* handle stereo interlacing */ c->channel = (channel + 1) % 2; /* we get one packet for left, then one for right data */ if(!channel) { /* wait for the other packet before outputing anything */ return src - buf; } samples--; } break; case CODEC_ID_ADPCM_IMA_WAV: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; // samples_per_block= (block_align-4*chanels)*8 / (bits_per_sample * chanels) + 1; for(i=0; i<avctx->channels; i++){ cs = &(c->status[i]); cs->predictor = *samples++ = (int16_t)(src[0] + (src[1]<<8)); src+=2; cs->step_index = *src++; if (cs->step_index > 88){ av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index); cs->step_index = 88; } if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]); /* unused */ } while(src < buf + buf_size){ for(m=0; m<4; m++){ for(i=0; i<=st; i++) *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3); for(i=0; i<=st; i++) *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3); src++; } src += 4*st; } break; case CODEC_ID_ADPCM_4XM: cs = &(c->status[0]); c->status[0].predictor= (int16_t)(src[0] + (src[1]<<8)); src+=2; if(st){ c->status[1].predictor= (int16_t)(src[0] + (src[1]<<8)); src+=2; } c->status[0].step_index= (int16_t)(src[0] + (src[1]<<8)); src+=2; if(st){ c->status[1].step_index= (int16_t)(src[0] + (src[1]<<8)); src+=2; } if (cs->step_index < 0) cs->step_index = 0; if (cs->step_index > 88) cs->step_index = 88; m= (buf_size - (src - buf))>>st; for(i=0; i<m; i++) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4); if (st) *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4); *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4); if (st) *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4); } src += m<<st; break; case CODEC_ID_ADPCM_MS: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; n = buf_size - 7 * avctx->channels; if (n < 0) return -1; block_predictor[0] = av_clip(*src++, 0, 7); block_predictor[1] = 0; if (st) block_predictor[1] = av_clip(*src++, 0, 7); c->status[0].idelta = (int16_t)((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); src+=2; if (st){ c->status[1].idelta = (int16_t)((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); src+=2; } c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]]; c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]]; c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]]; c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]]; c->status[0].sample1 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); src+=2; if (st) c->status[1].sample1 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); if (st) src+=2; c->status[0].sample2 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); src+=2; if (st) c->status[1].sample2 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); if (st) src+=2; *samples++ = c->status[0].sample1; if (st) *samples++ = c->status[1].sample1; *samples++ = c->status[0].sample2; if (st) *samples++ = c->status[1].sample2; for(;n>0;n--) { *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 ); *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F); src ++; } break; case CODEC_ID_ADPCM_IMA_DK4: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; c->status[0].predictor = (int16_t)(src[0] | (src[1] << 8)); c->status[0].step_index = src[2]; src += 4; *samples++ = c->status[0].predictor; if (st) { c->status[1].predictor = (int16_t)(src[0] | (src[1] << 8)); c->status[1].step_index = src[2]; src += 4; *samples++ = c->status[1].predictor; } while (src < buf + buf_size) { /* take care of the top nibble (always left or mono channel) */ *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 3); /* take care of the bottom nibble, which is right sample for * stereo, or another mono sample */ if (st) *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[0] & 0x0F, 3); else *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] & 0x0F, 3); src++; } break; case CODEC_ID_ADPCM_IMA_DK3: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; if(buf_size + 16 > (samples_end - samples)*3/8) return -1; c->status[0].predictor = (int16_t)(src[10] | (src[11] << 8)); c->status[1].predictor = (int16_t)(src[12] | (src[13] << 8)); c->status[0].step_index = src[14]; c->status[1].step_index = src[15]; /* sign extend the predictors */ src += 16; diff_channel = c->status[1].predictor; /* the DK3_GET_NEXT_NIBBLE macro issues the break statement when * the buffer is consumed */ while (1) { /* for this algorithm, c->status[0] is the sum channel and * c->status[1] is the diff channel */ /* process the first predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the diff channel predictor */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[1], nibble, 3); /* process the first pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; *samples++ = c->status[0].predictor + c->status[1].predictor; *samples++ = c->status[0].predictor - c->status[1].predictor; /* process the second predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the second pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; *samples++ = c->status[0].predictor + c->status[1].predictor; *samples++ = c->status[0].predictor - c->status[1].predictor; } break; case CODEC_ID_ADPCM_IMA_WS: /* no per-block initialization; just start decoding the data */ while (src < buf + buf_size) { if (st) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4 , 3); *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[0] & 0x0F, 3); } else { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4 , 3); *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] & 0x0F, 3); } src++; } break; case CODEC_ID_ADPCM_XA: while (buf_size >= 128) { xa_decode(samples, src, &c->status[0], &c->status[1], avctx->channels); src += 128; samples += 28 * 8; buf_size -= 128; } break; case CODEC_ID_ADPCM_IMA_EA_EACS: samples_in_chunk = bytestream_get_le32(&src) >> (1-st); if (samples_in_chunk > buf_size-4-(8<<st)) { src += buf_size - 4; break; } for (i=0; i<=st; i++) c->status[i].step_index = bytestream_get_le32(&src); for (i=0; i<=st; i++) c->status[i].predictor = bytestream_get_le32(&src); for (; samples_in_chunk; samples_in_chunk--, src++) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3); } break; case CODEC_ID_ADPCM_IMA_EA_SEAD: for (; src < buf+buf_size; src++) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6); *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6); } break; case CODEC_ID_ADPCM_EA: samples_in_chunk = AV_RL32(src); if (samples_in_chunk >= ((buf_size - 12) * 2)) { src += buf_size; break; } src += 4; current_left_sample = (int16_t)AV_RL16(src); src += 2; previous_left_sample = (int16_t)AV_RL16(src); src += 2; current_right_sample = (int16_t)AV_RL16(src); src += 2; previous_right_sample = (int16_t)AV_RL16(src); src += 2; for (count1 = 0; count1 < samples_in_chunk/28;count1++) { coeff1l = ea_adpcm_table[ *src >> 4 ]; coeff2l = ea_adpcm_table[(*src >> 4 ) + 4]; coeff1r = ea_adpcm_table[*src & 0x0F]; coeff2r = ea_adpcm_table[(*src & 0x0F) + 4]; src++; shift_left = (*src >> 4 ) + 8; shift_right = (*src & 0x0F) + 8; src++; for (count2 = 0; count2 < 28; count2++) { next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left; next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right; src++; next_left_sample = (next_left_sample + (current_left_sample * coeff1l) + (previous_left_sample * coeff2l) + 0x80) >> 8; next_right_sample = (next_right_sample + (current_right_sample * coeff1r) + (previous_right_sample * coeff2r) + 0x80) >> 8; previous_left_sample = current_left_sample; current_left_sample = av_clip_int16(next_left_sample); previous_right_sample = current_right_sample; current_right_sample = av_clip_int16(next_right_sample); *samples++ = (unsigned short)current_left_sample; *samples++ = (unsigned short)current_right_sample; } } break; case CODEC_ID_ADPCM_EA_MAXIS_XA: for(channel = 0; channel < avctx->channels; channel++) { for (i=0; i<2; i++) coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i]; shift[channel] = (*src & 0x0F) + 8; src++; } for (count1 = 0; count1 < (buf_size - avctx->channels) / avctx->channels; count1++) { for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */ for(channel = 0; channel < avctx->channels; channel++) { int32_t sample = (int32_t)(((*(src+channel) >> i) & 0x0F) << 0x1C) >> shift[channel]; sample = (sample + c->status[channel].sample1 * coeff[channel][0] + c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8; c->status[channel].sample2 = c->status[channel].sample1; c->status[channel].sample1 = av_clip_int16(sample); *samples++ = c->status[channel].sample1; } } src+=avctx->channels; } break; case CODEC_ID_ADPCM_EA_R1: case CODEC_ID_ADPCM_EA_R2: case CODEC_ID_ADPCM_EA_R3: { /* channel numbering 2chan: 0=fl, 1=fr 4chan: 0=fl, 1=rl, 2=fr, 3=rr 6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */ const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3; int32_t previous_sample, current_sample, next_sample; int32_t coeff1, coeff2; uint8_t shift; unsigned int channel; uint16_t *samplesC; const uint8_t *srcC; samples_in_chunk = (big_endian ? bytestream_get_be32(&src) : bytestream_get_le32(&src)) / 28; if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) || 28*samples_in_chunk*avctx->channels > samples_end-samples) { src += buf_size - 4; break; } for (channel=0; channel<avctx->channels; channel++) { srcC = src + (big_endian ? bytestream_get_be32(&src) : bytestream_get_le32(&src)) + (avctx->channels-channel-1) * 4; samplesC = samples + channel; if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) { current_sample = (int16_t)bytestream_get_le16(&srcC); previous_sample = (int16_t)bytestream_get_le16(&srcC); } else { current_sample = c->status[channel].predictor; previous_sample = c->status[channel].prev_sample; } for (count1=0; count1<samples_in_chunk; count1++) { if (*srcC == 0xEE) { /* only seen in R2 and R3 */ srcC++; current_sample = (int16_t)bytestream_get_be16(&srcC); previous_sample = (int16_t)bytestream_get_be16(&srcC); for (count2=0; count2<28; count2++) { *samplesC = (int16_t)bytestream_get_be16(&srcC); samplesC += avctx->channels; } } else { coeff1 = ea_adpcm_table[ *srcC>>4 ]; coeff2 = ea_adpcm_table[(*srcC>>4) + 4]; shift = (*srcC++ & 0x0F) + 8; for (count2=0; count2<28; count2++) { if (count2 & 1) next_sample = (int32_t)((*srcC++ & 0x0F) << 28) >> shift; else next_sample = (int32_t)((*srcC & 0xF0) << 24) >> shift; next_sample += (current_sample * coeff1) + (previous_sample * coeff2); next_sample = av_clip_int16(next_sample >> 8); previous_sample = current_sample; current_sample = next_sample; *samplesC = current_sample; samplesC += avctx->channels; } } } if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) { c->status[channel].predictor = current_sample; c->status[channel].prev_sample = previous_sample; } } src = src + buf_size - (4 + 4*avctx->channels); samples += 28 * samples_in_chunk * avctx->channels; break; } case CODEC_ID_ADPCM_EA_XAS: if (samples_end-samples < 32*4*avctx->channels || buf_size < (4+15)*4*avctx->channels) { src += buf_size; break; } for (channel=0; channel<avctx->channels; channel++) { int coeff[2][4], shift[4]; short *s2, *s = &samples[channel]; for (n=0; n<4; n++, s+=32*avctx->channels) { for (i=0; i<2; i++) coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i]; shift[n] = (src[2]&0x0F) + 8; for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels) s2[0] = (src[0]&0xF0) + (src[1]<<8); } for (m=2; m<32; m+=2) { s = &samples[m*avctx->channels + channel]; for (n=0; n<4; n++, src++, s+=32*avctx->channels) { for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) { int level = (int32_t)((*src & (0xF0>>i)) << (24+i)) >> shift[n]; int pred = s2[-1*avctx->channels] * coeff[0][n] + s2[-2*avctx->channels] * coeff[1][n]; s2[0] = av_clip_int16((level + pred + 0x80) >> 8); } } } } samples += 32*4*avctx->channels; break; case CODEC_ID_ADPCM_IMA_AMV: case CODEC_ID_ADPCM_IMA_SMJPEG: c->status[0].predictor = (int16_t)bytestream_get_le16(&src); c->status[0].step_index = bytestream_get_le16(&src); if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) src+=4; while (src < buf + buf_size) { char hi, lo; lo = *src & 0x0F; hi = *src >> 4; if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) FFSWAP(char, hi, lo); *samples++ = adpcm_ima_expand_nibble(&c->status[0], lo, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[0], hi, 3); src++; } break; case CODEC_ID_ADPCM_CT: while (src < buf + buf_size) { if (st) { *samples++ = adpcm_ct_expand_nibble(&c->status[0], src[0] >> 4); *samples++ = adpcm_ct_expand_nibble(&c->status[1], src[0] & 0x0F); } else { *samples++ = adpcm_ct_expand_nibble(&c->status[0], src[0] >> 4); *samples++ = adpcm_ct_expand_nibble(&c->status[0], src[0] & 0x0F); } src++; } break; case CODEC_ID_ADPCM_SBPRO_4: case CODEC_ID_ADPCM_SBPRO_3: case CODEC_ID_ADPCM_SBPRO_2: if (!c->status[0].step_index) { /* the first byte is a raw sample */ *samples++ = 128 * (*src++ - 0x80); if (st) *samples++ = 128 * (*src++ - 0x80); c->status[0].step_index = 1; } if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) { while (src < buf + buf_size) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 4, 4, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], src[0] & 0x0F, 4, 0); src++; } } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) { while (src < buf + buf_size && samples + 2 < samples_end) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 5 , 3, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], (src[0] >> 2) & 0x07, 3, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] & 0x03, 2, 0); src++; } } else { while (src < buf + buf_size && samples + 3 < samples_end) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 6 , 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], (src[0] >> 4) & 0x03, 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], (src[0] >> 2) & 0x03, 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], src[0] & 0x03, 2, 2); src++; } } break; case CODEC_ID_ADPCM_SWF: { GetBitContext gb; const int *table; int k0, signmask, nb_bits, count; int size = buf_size*8; init_get_bits(&gb, buf, size); //read bits & initial values nb_bits = get_bits(&gb, 2)+2; //av_log(NULL,AV_LOG_INFO,"nb_bits: %d\n", nb_bits); table = swf_index_tables[nb_bits-2]; k0 = 1 << (nb_bits-2); signmask = 1 << (nb_bits-1); while (get_bits_count(&gb) <= size - 22*avctx->channels) { for (i = 0; i < avctx->channels; i++) { *samples++ = c->status[i].predictor = get_sbits(&gb, 16); c->status[i].step_index = get_bits(&gb, 6); } for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) { int i; for (i = 0; i < avctx->channels; i++) { // similar to IMA adpcm int delta = get_bits(&gb, nb_bits); int step = step_table[c->status[i].step_index]; long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 int k = k0; do { if (delta & k) vpdiff += step; step >>= 1; k >>= 1; } while(k); vpdiff += step; if (delta & signmask) c->status[i].predictor -= vpdiff; else c->status[i].predictor += vpdiff; c->status[i].step_index += table[delta & (~signmask)]; c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88); c->status[i].predictor = av_clip_int16(c->status[i].predictor); *samples++ = c->status[i].predictor; if (samples >= samples_end) { av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n"); return -1; } } } } src += buf_size; break; } case CODEC_ID_ADPCM_YAMAHA: while (src < buf + buf_size) { if (st) { *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], src[0] & 0x0F); *samples++ = adpcm_yamaha_expand_nibble(&c->status[1], src[0] >> 4 ); } else { *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], src[0] & 0x0F); *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], src[0] >> 4 ); } src++; } break; case CODEC_ID_ADPCM_THP: { int table[2][16]; unsigned int samplecnt; int prev[2][2]; int ch; if (buf_size < 80) { av_log(avctx, AV_LOG_ERROR, "frame too small\n"); return -1; } src+=4; samplecnt = bytestream_get_be32(&src); for (i = 0; i < 32; i++) table[0][i] = (int16_t)bytestream_get_be16(&src); /* Initialize the previous sample. */ for (i = 0; i < 4; i++) prev[0][i] = (int16_t)bytestream_get_be16(&src); if (samplecnt >= (samples_end - samples) / (st + 1)) { av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n"); return -1; } for (ch = 0; ch <= st; ch++) { samples = (unsigned short *) data + ch; /* Read in every sample for this channel. */ for (i = 0; i < samplecnt / 14; i++) { int index = (*src >> 4) & 7; unsigned int exp = 28 - (*src++ & 15); int factor1 = table[ch][index * 2]; int factor2 = table[ch][index * 2 + 1]; /* Decode 14 samples. */ for (n = 0; n < 14; n++) { int32_t sampledat; if(n&1) sampledat= *src++ <<28; else sampledat= (*src&0xF0)<<24; sampledat = ((prev[ch][0]*factor1 + prev[ch][1]*factor2) >> 11) + (sampledat>>exp); *samples = av_clip_int16(sampledat); prev[ch][1] = prev[ch][0]; prev[ch][0] = *samples++; /* In case of stereo, skip one sample, this sample is for the other channel. */ samples += st; } } } /* In the previous loop, in case stereo is used, samples is increased exactly one time too often. */ samples -= st; break; } default: return -1; } *data_size = (uint8_t *)samples - (uint8_t *)data; return src - buf; }
false
FFmpeg
9ff8976dad1b2768857a6129daf9dac069ac5bee
7,700
int tap_win32_init(VLANState *vlan, const char *model, const char *name, const char *ifname) { TAPState *s; s = qemu_mallocz(sizeof(TAPState)); if (!s) return -1; if (tap_win32_open(&s->handle, ifname) < 0) { printf("tap: Could not open '%s'\n", ifname); return -1; } s->vc = qemu_new_vlan_client(vlan, model, name, tap_receive, NULL, s); snprintf(s->vc->info_str, sizeof(s->vc->info_str), "tap: ifname=%s", ifname); qemu_add_wait_object(s->handle->tap_semaphore, tap_win32_send, s); return 0; }
true
qemu
b946a1533209f61a93e34898aebb5b43154b99c3
7,701
static int parse_netdev(DeviceState *dev, const char *str, void **ptr) { NICPeers *peers_ptr = (NICPeers *)ptr; NICConf *conf = container_of(peers_ptr, NICConf, peers); NetClientState **ncs = peers_ptr->ncs; NetClientState *peers[MAX_QUEUE_NUM]; int queues, i = 0; int ret; queues = qemu_find_net_clients_except(str, peers, NET_CLIENT_OPTIONS_KIND_NIC, MAX_QUEUE_NUM); if (queues == 0) { ret = -ENOENT; if (queues > MAX_QUEUE_NUM) { ret = -E2BIG; for (i = 0; i < queues; i++) { if (peers[i] == NULL) { ret = -ENOENT; if (peers[i]->peer) { ret = -EEXIST; ncs[i] = peers[i]; ncs[i]->queue_index = i; conf->queues = queues; return 0; err: return ret;
true
qemu
30c367ed446b6ea53245589a5cf373578ac075d7
7,702
static void property_get_str(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { StringProperty *prop = opaque; char *value; value = prop->get(obj, errp); if (value) { visit_type_str(v, &value, name, errp); g_free(value); } }
true
qemu
e1c8237df5395f6a453f18109bd9dd33fb2a397c
7,703
static void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUCRISState *env) { struct target_signal_frame *frame; abi_ulong frame_addr; int err = 0; int i; frame_addr = get_sigframe(env, sizeof *frame); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) goto badframe; /* * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't * use this trampoline anymore but it sets it up for GDB. * In QEMU, using the trampoline simplifies things a bit so we use it. * * This is movu.w __NR_sigreturn, r9; break 13; */ __put_user(0x9c5f, frame->retcode+0); __put_user(TARGET_NR_sigreturn, frame->retcode + 1); __put_user(0xe93d, frame->retcode + 2); /* Save the mask. */ __put_user(set->sig[0], &frame->sc.oldmask); if (err) goto badframe; for(i = 1; i < TARGET_NSIG_WORDS; i++) { if (__put_user(set->sig[i], &frame->extramask[i - 1])) goto badframe; } setup_sigcontext(&frame->sc, env); /* Move the stack and setup the arguments for the handler. */ env->regs[R_SP] = frame_addr; env->regs[10] = sig; env->pc = (unsigned long) ka->_sa_handler; /* Link SRP so the guest returns through the trampoline. */ env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); unlock_user_struct(frame, frame_addr, 1); return; badframe: unlock_user_struct(frame, frame_addr, 1); force_sig(TARGET_SIGSEGV); }
true
qemu
0188fadb7fe460d8c4c743372b1f7b25773e183e
7,704
static int filter_frame(AVFilterLink *link, AVFrame *in) { AVFilterContext *ctx = link->dst; AVFilterLink *outlink = ctx->outputs[0]; ColorSpaceContext *s = ctx->priv; // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the // input one if it is writable *OR* the actual literal values of in_* // and out_* are identical (not just their respective properties) AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h); int res; ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32); unsigned rgb_sz = rgb_stride * in->height; struct ThreadData td; if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ? default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm; if (s->user_trc == AVCOL_TRC_UNSPECIFIED) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format); out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)]; if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12) out->color_trc = AVCOL_TRC_BT2020_12; } else { out->color_trc = s->user_trc; } out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ? default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp; out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ? in->color_range : s->user_rng; if (rgb_sz != s->rgb_sz) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format); int uvw = in->width >> desc->log2_chroma_w; av_freep(&s->rgb[0]); av_freep(&s->rgb[1]); av_freep(&s->rgb[2]); s->rgb_sz = 0; av_freep(&s->dither_scratch_base[0][0]); av_freep(&s->dither_scratch_base[0][1]); av_freep(&s->dither_scratch_base[1][0]); av_freep(&s->dither_scratch_base[1][1]); av_freep(&s->dither_scratch_base[2][0]); av_freep(&s->dither_scratch_base[2][1]); s->rgb[0] = av_malloc(rgb_sz); s->rgb[1] = av_malloc(rgb_sz); s->rgb[2] = av_malloc(rgb_sz); s->dither_scratch_base[0][0] = av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4)); s->dither_scratch_base[0][1] = av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4)); s->dither_scratch_base[1][0] = av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4)); s->dither_scratch_base[1][1] = av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4)); s->dither_scratch_base[2][0] = av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4)); s->dither_scratch_base[2][1] = av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4)); s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1]; s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1]; s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1]; s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1]; s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1]; s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1]; if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] || !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] || !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] || !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) { uninit(ctx); return AVERROR(ENOMEM); } s->rgb_sz = rgb_sz; } res = create_filtergraph(ctx, in, out); if (res < 0) return res; s->rgb_stride = rgb_stride / sizeof(int16_t); td.in = in; td.out = out; td.in_linesize[0] = in->linesize[0]; td.in_linesize[1] = in->linesize[1]; td.in_linesize[2] = in->linesize[2]; td.out_linesize[0] = out->linesize[0]; td.out_linesize[1] = out->linesize[1]; td.out_linesize[2] = out->linesize[2]; td.in_ss_h = av_pix_fmt_desc_get(in->format)->log2_chroma_h; td.out_ss_h = av_pix_fmt_desc_get(out->format)->log2_chroma_h; if (s->yuv2yuv_passthrough) { av_frame_copy(out, in); } else { ctx->internal->execute(ctx, convert, &td, NULL, FFMIN((in->height + 1) >> 1, ctx->graph->nb_threads)); } av_frame_free(&in); return ff_filter_frame(outlink, out); }
true
FFmpeg
531ff7161d9d6b0cf8f71125319c1f5df5041637
7,705
static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf, Error **errp) { struct glfs *glfs; int ret; int old_errno; GlusterServerList *server; glfs = glfs_new(gconf->volume); if (!glfs) { goto out; } for (server = gconf->server; server; server = server->next) { if (server->value->type == GLUSTER_TRANSPORT_UNIX) { ret = glfs_set_volfile_server(glfs, GlusterTransport_lookup[server->value->type], server->value->u.q_unix.path, 0); } else { ret = glfs_set_volfile_server(glfs, GlusterTransport_lookup[server->value->type], server->value->u.tcp.host, atoi(server->value->u.tcp.port)); } if (ret < 0) { goto out; } } ret = glfs_set_logging(glfs, "-", gconf->debug_level); if (ret < 0) { goto out; } ret = glfs_init(glfs); if (ret) { error_setg(errp, "Gluster connection for volume %s, path %s failed" " to connect", gconf->volume, gconf->path); for (server = gconf->server; server; server = server->next) { if (server->value->type == GLUSTER_TRANSPORT_UNIX) { error_append_hint(errp, "hint: failed on socket %s ", server->value->u.q_unix.path); } else { error_append_hint(errp, "hint: failed on host %s and port %s ", server->value->u.tcp.host, server->value->u.tcp.port); } } error_append_hint(errp, "Please refer to gluster logs for more info\n"); /* glfs_init sometimes doesn't set errno although docs suggest that */ if (errno == 0) { errno = EINVAL; } goto out; } return glfs; out: if (glfs) { old_errno = errno; glfs_fini(glfs); errno = old_errno; } return NULL; }
true
qemu
e9db8ff38e539260a2cb5a7918d1155b7d92a264
7,707
void virtio_queue_set_notification(VirtQueue *vq, int enable) { vq->notification = enable; if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) { vring_avail_event(vq, vring_avail_idx(vq)); } else if (enable) { vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); } else { vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
true
qemu
92045d80badc43c9f95897aad675dc7ef17a3b3f