name
stringlengths 1
473k
| code
stringlengths 7
647k
| asm
stringlengths 4
3.39M
| file
stringlengths 8
196
|
---|---|---|---|
format_octal
|
static int
format_octal(int64_t v, char *p, int s)
{
int len;
len = s;
/* Octal values can't be negative, so use 0. */
if (v < 0) {
while (len-- > 0)
*p++ = '0';
return (-1);
}
p += s; /* Start at the end and work backwards. */
while (s-- > 0) {
*--p = (char)('0' + (v & 7));
v >>= 3;
}
if (v == 0)
return (0);
/* If it overflowed, fill field with max value. */
while (len-- > 0)
*p++ = '7';
return (-1);
}
|
pushq %rbx
movl %edx, %eax
testq %rdi, %rdi
js 0x555d59
testl %eax, %eax
jle 0x555d73
movl %eax, %edx
addq %rdx, %rsi
leal 0x1(%rax), %ecx
movq %rdi, %r8
movl %r8d, %r9d
andb $0x7, %r9b
orb $0x30, %r9b
movb %r9b, -0x1(%rsi)
decq %rsi
shrq $0x3, %rdi
decl %ecx
cmpl $0x1, %ecx
ja 0x555d10
testl %eax, %eax
setle %al
xorl %ebx, %ebx
cmpq $0x8, %r8
setb %cl
adcl $-0x1, %ebx
orb %al, %cl
jne 0x555d7a
movq %rsi, %rdi
movl $0x37, %esi
callq 0x3fa90
movl $0xffffffff, %ebx # imm = 0xFFFFFFFF
jmp 0x555d7a
movl $0xffffffff, %ebx # imm = 0xFFFFFFFF
testl %eax, %eax
jle 0x555d7a
movl %eax, %edx
movq %rsi, %rdi
movl $0x30, %esi
callq 0x3fa90
jmp 0x555d7a
xorl %ebx, %ebx
negq %rdi
sbbl %ebx, %ebx
movl %ebx, %eax
popq %rbx
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmlibarchive/libarchive/archive_write_set_format_ustar.c
|
archive_write_set_format_v7tar
|
int
archive_write_set_format_v7tar(struct archive *_a)
{
struct archive_write *a = (struct archive_write *)_a;
struct v7tar *v7tar;
archive_check_magic(_a, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_NEW, "archive_write_set_format_v7tar");
/* If someone else was already registered, unregister them. */
if (a->format_free != NULL)
(a->format_free)(a);
/* Basic internal sanity test. */
if (sizeof(template_header) != 512) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Internal: template_header wrong size: %zu should be 512",
sizeof(template_header));
return (ARCHIVE_FATAL);
}
v7tar = (struct v7tar *)calloc(1, sizeof(*v7tar));
if (v7tar == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate v7tar data");
return (ARCHIVE_FATAL);
}
a->format_data = v7tar;
a->format_name = "tar (non-POSIX)";
a->format_options = archive_write_v7tar_options;
a->format_write_header = archive_write_v7tar_header;
a->format_write_data = archive_write_v7tar_data;
a->format_close = archive_write_v7tar_close;
a->format_free = archive_write_v7tar_free;
a->format_finish_entry = archive_write_v7tar_finish_entry;
a->archive.archive_format = ARCHIVE_FORMAT_TAR;
a->archive.archive_format_name = "tar (non-POSIX)";
return (ARCHIVE_OK);
}
|
pushq %rbp
pushq %rbx
pushq %rax
movq %rdi, %rbx
leaq 0x17298a(%rip), %rcx # 0x6c8717
movl $0xb0c5c0de, %esi # imm = 0xB0C5C0DE
movl $0x1, %edx
callq 0x55de18
movl $0xffffffe2, %ebp # imm = 0xFFFFFFE2
cmpl $-0x1e, %eax
je 0x555e5d
movq 0x138(%rbx), %rax
testq %rax, %rax
je 0x555dbb
movq %rbx, %rdi
callq *%rax
movl $0x1, %edi
movl $0x28, %esi
callq 0x41eb8
testq %rax, %rax
je 0x555e47
movq %rax, 0xf8(%rbx)
leaq 0x167fdb(%rip), %rax # 0x6bddb8
movq %rax, 0x100(%rbx)
leaq 0x7b(%rip), %rcx # 0x555e66
movq %rcx, 0x110(%rbx)
leaq 0xf6(%rip), %rcx # 0x555eef
movq %rcx, 0x120(%rbx)
leaq 0x80b(%rip), %rcx # 0x556612
movq %rcx, 0x128(%rbx)
leaq 0x82d(%rip), %rcx # 0x556642
movq %rcx, 0x130(%rbx)
leaq 0x829(%rip), %rcx # 0x55664c
movq %rcx, 0x138(%rbx)
leaq 0x83a(%rip), %rcx # 0x55666b
movq %rcx, 0x118(%rbx)
movl $0x30000, 0x10(%rbx) # imm = 0x30000
movq %rax, 0x18(%rbx)
xorl %ebp, %ebp
jmp 0x555e5d
leaq 0x1728e8(%rip), %rdx # 0x6c8736
movq %rbx, %rdi
movl $0xc, %esi
xorl %eax, %eax
callq 0x53e204
movl %ebp, %eax
addq $0x8, %rsp
popq %rbx
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibarchive/libarchive/archive_write_set_format_v7tar.c
|
warc_data
|
static ssize_t
_warc_data(struct archive_write *a, const void *buf, size_t len)
{
struct warc_s *w = a->format_data;
if (w->typ == AE_IFREG) {
int rc;
/* never write more bytes than announced */
if (len > w->populz) {
len = (size_t)w->populz;
}
/* now then, out we put the whole shebang */
rc = __archive_write_output(a, buf, len);
if (rc != ARCHIVE_OK) {
return rc;
}
}
return len;
}
|
pushq %rbx
movq %rdx, %rbx
movq 0xf8(%rdi), %rax
cmpl $0x8000, 0x10(%rax) # imm = 0x8000
jne 0x556a4a
movq 0x18(%rax), %rax
cmpq %rbx, %rax
cmovbq %rax, %rbx
movq %rbx, %rdx
callq 0x53e917
testl %eax, %eax
cltq
cmovneq %rax, %rbx
movq %rbx, %rax
popq %rbx
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibarchive/libarchive/archive_write_set_format_warc.c
|
popul_ehdr
|
static ssize_t
_popul_ehdr(struct archive_string *tgt, size_t tsz, warc_essential_hdr_t hdr)
{
static const char _ver[] = "WARC/1.0\r\n";
static const char * const _typ[LAST_WT] = {
NULL, "warcinfo", "metadata", "resource", NULL
};
char std_uuid[48U];
if (hdr.type == WT_NONE || hdr.type > WT_RSRC) {
/* brilliant, how exactly did we get here? */
return -1;
}
archive_strcpy(tgt, _ver);
archive_string_sprintf(tgt, "WARC-Type: %s\r\n", _typ[hdr.type]);
if (hdr.tgturi != NULL) {
/* check if there's a xyz:// */
static const char _uri[] = "";
static const char _fil[] = "file://";
const char *u;
char *chk = strchr(hdr.tgturi, ':');
if (chk != NULL && chk[1U] == '/' && chk[2U] == '/') {
/* yep, it's definitely a URI */
u = _uri;
} else {
/* hm, best to prepend file:// then */
u = _fil;
}
archive_string_sprintf(tgt,
"WARC-Target-URI: %s%s\r\n", u, hdr.tgturi);
}
/* record time is usually when the http is sent off,
* just treat the archive writing as such for a moment */
xstrftime(tgt, "WARC-Date: %Y-%m-%dT%H:%M:%SZ\r\n", hdr.rtime);
/* while we're at it, record the mtime */
xstrftime(tgt, "Last-Modified: %Y-%m-%dT%H:%M:%SZ\r\n", hdr.mtime);
if (hdr.recid == NULL) {
/* generate one, grrrr */
warc_uuid_t u;
_gen_uuid(&u);
/* Unfortunately, archive_string_sprintf does not
* handle the minimum number following '%'.
* So we have to use snprintf function here instead
* of archive_string_snprintf function. */
#if defined(_WIN32) && !defined(__CYGWIN__) && !( defined(_MSC_VER) && _MSC_VER >= 1900)
#define snprintf _snprintf
#endif
snprintf(
std_uuid, sizeof(std_uuid),
"<urn:uuid:%08x-%04x-%04x-%04x-%04x%08x>",
u.u[0U],
u.u[1U] >> 16U, u.u[1U] & 0xffffU,
u.u[2U] >> 16U, u.u[2U] & 0xffffU,
u.u[3U]);
hdr.recid = std_uuid;
}
/* record-id is mandatory, fingers crossed we won't fail */
archive_string_sprintf(tgt, "WARC-Record-ID: %s\r\n", hdr.recid);
if (hdr.cnttyp != NULL) {
archive_string_sprintf(tgt, "Content-Type: %s\r\n", hdr.cnttyp);
}
/* next one is mandatory */
archive_string_sprintf(tgt, "Content-Length: %ju\r\n", (uintmax_t)hdr.cntlen);
/**/
archive_strncat(tgt, "\r\n", 2);
return (archive_strlen(tgt) >= tsz)? -1: (ssize_t)archive_strlen(tgt);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x48, %rsp
movl 0x70(%rsp), %r14d
leal -0x4(%r14), %ecx
movq $-0x1, %rax
cmpl $-0x3, %ecx
jb 0x556c73
movq %rdi, %rbx
leaq 0x70(%rsp), %r15
movq $0x0, 0x8(%rdi)
leaq 0x171ff4(%rip), %rsi # 0x6c8ad1
movl $0xa, %edx
callq 0x53a01f
leaq 0x3055b2(%rip), %rax # 0x85c0a0
movq (%rax,%r14,8), %rdx
leaq 0x171ebd(%rip), %rsi # 0x6c89b6
movq %rbx, %rdi
xorl %eax, %eax
callq 0x53dd88
movq 0x8(%r15), %r14
testq %r14, %r14
je 0x556b4e
movq %r14, %rdi
movl $0x3a, %esi
callq 0x40250
testq %rax, %rax
je 0x556b33
cmpb $0x2f, 0x1(%rax)
jne 0x556b33
cmpb $0x2f, 0x2(%rax)
jne 0x556b33
leaq 0x171fab(%rip), %rdx # 0x6c8adc
jmp 0x556b3a
leaq 0x171fa3(%rip), %rdx # 0x6c8add
leaq 0x171e85(%rip), %rsi # 0x6c89c6
movq %rbx, %rdi
movq %r14, %rcx
xorl %eax, %eax
callq 0x53dd88
movq 0x18(%r15), %rdx
leaq 0x171e85(%rip), %rsi # 0x6c89de
movq %rbx, %rdi
callq 0x556c7f
movq 0x20(%r15), %rdx
leaq 0x171e92(%rip), %rsi # 0x6c89fe
movq %rbx, %rdi
callq 0x556c7f
movq 0x10(%r15), %r14
testq %r14, %r14
jne 0x556c06
movq %rsp, %r14
movl $0x10, %esi
movq %r14, %rdi
callq 0x564154
movl (%r14), %ecx
movl 0x4(%r14), %r8d
movl %r8d, %r9d
andl $0xffff0fff, %r9d # imm = 0xFFFF0FFF
orl $0x4000, %r9d # imm = 0x4000
movl %r9d, 0x4(%r14)
movl 0x8(%r14), %r11d
movl %r11d, %r10d
andl $0x3fffffff, %r10d # imm = 0x3FFFFFFF
orl $0x80000000, %r10d # imm = 0x80000000
movl %r10d, 0x8(%r14)
shrl $0x10, %r8d
movl $0xffff, %eax # imm = 0xFFFF
andl %eax, %r9d
shrl $0x10, %r10d
andl %eax, %r11d
movl 0xc(%r14), %r12d
subq $0x8, %rsp
leaq 0x171e3a(%rip), %rdx # 0x6c8a22
leaq 0x18(%rsp), %r14
movl $0x30, %esi
movq %r14, %rdi
xorl %eax, %eax
pushq %r12
pushq %r11
pushq %r10
callq 0x40190
addq $0x20, %rsp
leaq 0x171e3d(%rip), %rsi # 0x6c8a4a
movq %rbx, %rdi
movq %r14, %rdx
xorl %eax, %eax
callq 0x53dd88
movq 0x28(%r15), %rdx
testq %rdx, %rdx
je 0x556c34
leaq 0x171e35(%rip), %rsi # 0x6c8a5f
movq %rbx, %rdi
xorl %eax, %eax
callq 0x53dd88
movq 0x30(%r15), %rdx
leaq 0x171e33(%rip), %rsi # 0x6c8a72
movq %rbx, %rdi
xorl %eax, %eax
callq 0x53dd88
leaq 0x13792e(%rip), %rsi # 0x68e57e
movl $0x2, %edx
movq %rbx, %rdi
callq 0x53a01f
movq 0x8(%rbx), %rcx
cmpq $0x200, %rcx # imm = 0x200
movq $-0x1, %rax
cmovbq %rcx, %rax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibarchive/libarchive/archive_write_set_format_warc.c
|
xstrftime
|
static void
xstrftime(struct archive_string *as, const char *fmt, time_t t)
{
/** like strftime(3) but for time_t objects */
struct tm *rt;
#if defined(HAVE_GMTIME_R) || defined(HAVE__GMTIME64_S)
struct tm timeHere;
#endif
#if defined(HAVE__GMTIME64_S)
errno_t terr;
__time64_t tmptime;
#endif
char strtime[100];
size_t len;
#ifdef HAVE_GMTIME_R
if ((rt = gmtime_r(&t, &timeHere)) == NULL)
return;
#elif defined(HAVE__GMTIME64_S)
tmptime = t;
terr = _gmtime64_s(&timeHere, &tmptime);
if (terr)
rt = NULL;
else
rt = &timeHere;
#else
if ((rt = gmtime(&t)) == NULL)
return;
#endif
/* leave the hard yacker to our role model strftime() */
len = strftime(strtime, sizeof(strtime)-1, fmt, rt);
archive_strncat(as, strtime, len);
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0xb0, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movq %rsp, %rdi
movq %rdx, (%rdi)
leaq 0x8(%rsp), %rsi
callq 0x40ca0
testq %rax, %rax
je 0x556ccc
leaq 0x40(%rsp), %r15
movl $0x63, %esi
movq %r15, %rdi
movq %r14, %rdx
movq %rax, %rcx
callq 0x410f0
movq %rbx, %rdi
movq %r15, %rsi
movq %rax, %rdx
callq 0x53a01f
addq $0xb0, %rsp
popq %rbx
popq %r14
popq %r15
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmlibarchive/libarchive/archive_write_set_format_warc.c
|
archive_write_zip_data
|
static ssize_t
archive_write_zip_data(struct archive_write *a, const void *buff, size_t s)
{
int ret;
struct zip *zip = a->format_data;
if ((int64_t)s > zip->entry_uncompressed_limit)
s = (size_t)zip->entry_uncompressed_limit;
zip->entry_uncompressed_written += s;
if (s == 0) return 0;
if (zip->entry_flags & ZIP_ENTRY_FLAG_ENCRYPTED) {
switch (zip->entry_encryption) {
case ENCRYPTION_TRADITIONAL:
/* Initialize traditional PKWARE encryption context. */
if (!zip->tctx_valid) {
ret = init_traditional_pkware_encryption(a);
if (ret != ARCHIVE_OK)
return (ret);
zip->tctx_valid = 1;
}
break;
case ENCRYPTION_WINZIP_AES128:
case ENCRYPTION_WINZIP_AES256:
if (!zip->cctx_valid) {
ret = init_winzip_aes_encryption(a);
if (ret != ARCHIVE_OK)
return (ret);
zip->cctx_valid = zip->hctx_valid = 1;
}
break;
case ENCRYPTION_NONE:
default:
break;
}
}
switch (zip->entry_compression) {
case COMPRESSION_STORE:
if (zip->tctx_valid || zip->cctx_valid) {
const uint8_t *rb = (const uint8_t *)buff;
const uint8_t * const re = rb + s;
while (rb < re) {
size_t l;
if (zip->tctx_valid) {
l = trad_enc_encrypt_update(&zip->tctx,
rb, re - rb,
zip->buf, zip->len_buf);
} else {
l = zip->len_buf;
ret = archive_encrypto_aes_ctr_update(
&zip->cctx,
rb, re - rb, zip->buf, &l);
if (ret < 0) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Failed to encrypt file");
return (ARCHIVE_FAILED);
}
archive_hmac_sha1_update(&zip->hctx,
zip->buf, l);
}
ret = __archive_write_output(a, zip->buf, l);
if (ret != ARCHIVE_OK)
return (ret);
zip->entry_compressed_written += l;
zip->written_bytes += l;
rb += l;
}
} else {
ret = __archive_write_output(a, buff, s);
if (ret != ARCHIVE_OK)
return (ret);
zip->written_bytes += s;
zip->entry_compressed_written += s;
}
break;
#if HAVE_ZLIB_H
case COMPRESSION_DEFLATE:
zip->stream.next_in = (unsigned char*)(uintptr_t)buff;
zip->stream.avail_in = (uInt)s;
do {
ret = deflate(&zip->stream, Z_NO_FLUSH);
if (ret == Z_STREAM_ERROR)
return (ARCHIVE_FATAL);
if (zip->stream.avail_out == 0) {
if (zip->tctx_valid) {
trad_enc_encrypt_update(&zip->tctx,
zip->buf, zip->len_buf,
zip->buf, zip->len_buf);
} else if (zip->cctx_valid) {
size_t outl = zip->len_buf;
ret = archive_encrypto_aes_ctr_update(
&zip->cctx,
zip->buf, zip->len_buf,
zip->buf, &outl);
if (ret < 0) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Failed to encrypt file");
return (ARCHIVE_FAILED);
}
archive_hmac_sha1_update(&zip->hctx,
zip->buf, zip->len_buf);
}
ret = __archive_write_output(a, zip->buf,
zip->len_buf);
if (ret != ARCHIVE_OK)
return (ret);
zip->entry_compressed_written += zip->len_buf;
zip->written_bytes += zip->len_buf;
zip->stream.next_out = zip->buf;
zip->stream.avail_out = (uInt)zip->len_buf;
}
} while (zip->stream.avail_in != 0);
break;
#endif
case COMPRESSION_UNSPECIFIED:
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Invalid ZIP compression type");
return ARCHIVE_FATAL;
}
zip->entry_uncompressed_limit -= s;
if (!zip->cctx_valid || zip->aes_vendor != AES_VENDOR_AE_2)
zip->entry_crc32 =
zip->crc32func(zip->entry_crc32, buff, (unsigned)s);
return (s);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x98, %rsp
movq %rdx, %rbx
movq 0xf8(%rdi), %r13
movq 0x28(%r13), %rax
cmpq %rdx, %rax
cmovlq %rax, %rbx
addq %rbx, 0x20(%r13)
testq %rbx, %rbx
je 0x557f4e
movq %rsi, %rbp
movq %rdi, %r14
testb $0x1, 0x44(%r13)
je 0x557f65
movl 0x40(%r13), %eax
leal -0x2(%rax), %ecx
cmpl $0x2, %ecx
jae 0x557f55
cmpb $0x0, 0xc0(%r13)
jne 0x557f65
movq %rbp, 0x18(%rsp)
movq %r14, %rdi
callq 0x5591a2
testq %rax, %rax
je 0x558261
movq %rax, %rbp
movl 0x40(%r13), %r12d
xorl %r15d, %r15d
cmpl $0x2, %r12d
setne %r15b
leaq 0x8(,%r15,8), %rsi
leaq 0x30(%rsp), %rdi
movq %rsi, 0x10(%rsp)
callq 0x564154
testl %eax, %eax
jne 0x558241
movl %r12d, 0x28(%rsp)
shll $0x4, %r15d
addq $0x10, %r15
movq %rbp, %rdi
callq 0x3fd60
leal (%r15,%r15), %ecx
movq %rcx, 0x20(%rsp)
orq $0x2, %rcx
movq %rcx, (%rsp)
leaq 0x30(%rsp), %rdx
leaq 0x50(%rsp), %r12
movq %rbp, %rdi
movq %rax, %rsi
movq 0x10(%rsp), %rcx
movl $0x3e8, %r8d # imm = 0x3E8
movq %r12, %r9
leaq 0x3041d7(%rip), %rax # 0x85c0f8
callq *(%rax)
leaq 0x68(%r13), %rbp
movq %rbp, %rdi
movq %r12, %rsi
movq %r15, %rdx
leaq 0x3041c1(%rip), %rax # 0x85c0f8
callq *0x20(%rax)
testl %eax, %eax
je 0x558394
leaq 0x166dad(%rip), %rdx # 0x6becf6
jmp 0x558268
xorl %ebx, %ebx
jmp 0x558424
cmpl $0x1, %eax
jne 0x557f65
cmpb $0x0, 0x5c(%r13)
je 0x55821a
movl 0x3c(%r13), %eax
cmpl $0x8, %eax
je 0x558092
testl %eax, %eax
jne 0x5581f8
cmpb $0x0, 0x5c(%r13)
movq %rbp, 0x18(%rsp)
jne 0x557f94
cmpb $0x0, 0xc0(%r13)
je 0x558281
testq %rbx, %rbx
jle 0x5581c4
movq 0x18(%rsp), %rbp
leaq (%rbx,%rbp), %r12
leaq 0x50(%r13), %r15
leaq 0x304147(%rip), %rax # 0x85c0f8
movq 0x28(%rax), %rax
movq %rax, 0x10(%rsp)
leaq 0x68(%r13), %rax
movq %rax, 0x28(%rsp)
leaq 0x305316(%rip), %rax # 0x85d2e0
movq 0x8(%rax), %rax
movq %rax, 0x20(%rsp)
leaq 0xc8(%r13), %rax
movq %rax, 0x48(%rsp)
cmpb $0x0, 0x5c(%r13)
je 0x55800e
movq %r12, %rdx
subq %rbp, %rdx
movq 0x1b0(%r13), %r8
movq 0x1b8(%r13), %rcx
movq %r15, %rdi
movq %rbp, %rsi
callq 0x558e56
movl %eax, %edx
movq %rdx, 0x50(%rsp)
jmp 0x55805a
movq 0x1b0(%r13), %rax
movq %rax, 0x50(%rsp)
movq %r12, %rdx
subq %rbp, %rdx
movq 0x1b8(%r13), %rcx
movq 0x28(%rsp), %rdi
movq %rbp, %rsi
leaq 0x50(%rsp), %r8
callq *0x10(%rsp)
testl %eax, %eax
js 0x5582a7
movq 0x1b8(%r13), %rsi
movq 0x50(%rsp), %rdx
movq 0x48(%rsp), %rdi
callq *0x20(%rsp)
movq 0x50(%rsp), %rdx
movq 0x1b8(%r13), %rsi
movq %r14, %rdi
callq 0x53e917
testl %eax, %eax
jne 0x558421
movq 0x50(%rsp), %rax
addq %rax, 0x18(%r13)
addq %rax, 0x110(%r13)
addq %rax, %rbp
cmpq %r12, %rbp
jb 0x557fdf
jmp 0x5581c4
leaq 0x140(%r13), %r12
movq %rbp, 0x18(%rsp)
movq %rbp, 0x140(%r13)
movl %ebx, 0x148(%r13)
leaq 0x50(%r13), %rbp
leaq 0x304041(%rip), %rax # 0x85c0f8
movq 0x28(%rax), %rax
movq %rax, 0x10(%rsp)
leaq 0x68(%r13), %rax
movq %rax, 0x28(%rsp)
leaq 0x305210(%rip), %rax # 0x85d2e0
movq 0x8(%rax), %rax
movq %rax, 0x20(%rsp)
leaq 0xc8(%r13), %r15
movq %r12, %rdi
xorl %esi, %esi
callq 0x62fe7e
cmpl $-0x2, %eax
je 0x55820e
cmpl $0x0, 0x160(%r13)
jne 0x5581b6
cmpb $0x0, 0x5c(%r13)
je 0x558126
movq 0x1b0(%r13), %rdx
movq 0x1b8(%r13), %rcx
movq %rbp, %rdi
movq %rcx, %rsi
movq %rdx, %r8
callq 0x558e56
jmp 0x558171
cmpb $0x0, 0xc0(%r13)
je 0x558171
movq 0x1b0(%r13), %rdx
movq %rdx, 0x50(%rsp)
movq 0x1b8(%r13), %rcx
movq 0x28(%rsp), %rdi
movq %rcx, %rsi
leaq 0x50(%rsp), %r8
callq *0x10(%rsp)
testl %eax, %eax
js 0x5582a7
movq 0x1b0(%r13), %rdx
movq 0x1b8(%r13), %rsi
movq %r15, %rdi
callq *0x20(%rsp)
movq 0x1b0(%r13), %rdx
movq 0x1b8(%r13), %rsi
movq %r14, %rdi
callq 0x53e917
testl %eax, %eax
jne 0x558421
movq 0x1b0(%r13), %rax
movq 0x1b8(%r13), %rcx
addq %rax, 0x18(%r13)
addq %rax, 0x110(%r13)
movq %rcx, 0x158(%r13)
movl %eax, 0x160(%r13)
cmpl $0x0, 0x148(%r13)
jne 0x5580e0
subq %rbx, 0x28(%r13)
cmpb $0x0, 0xc0(%r13)
je 0x5581dd
cmpl $0x2, 0x60(%r13)
je 0x558424
movl 0x38(%r13), %edi
movl %ebx, %edx
movq 0x18(%rsp), %rsi
callq *0xe8(%r13)
movl %eax, 0x38(%r13)
jmp 0x558424
leaq 0x170b6a(%rip), %rdx # 0x6c8d69
movq %r14, %rdi
movl $0xffffffff, %esi # imm = 0xFFFFFFFF
xorl %eax, %eax
callq 0x53e204
movq $-0x1e, %rbx
jmp 0x558424
movq %r14, %rdi
callq 0x5591a2
testq %rax, %rax
je 0x558261
movq %rax, %r12
leaq 0x50(%rsp), %rdi
movl $0xb, %esi
callq 0x564154
testl %eax, %eax
je 0x5582c9
leaq 0x170b5a(%rip), %rdx # 0x6c8da2
movq %r14, %rdi
movl $0xffffffff, %esi # imm = 0xFFFFFFFF
xorl %eax, %eax
callq 0x53e204
movl $0xffffffe2, %eax # imm = 0xFFFFFFE2
jmp 0x558421
leaq 0x170b1e(%rip), %rdx # 0x6c8d86
movq %r14, %rdi
movl $0xffffffff, %esi # imm = 0xFFFFFFFF
xorl %eax, %eax
callq 0x53e204
movl $0xffffffe7, %eax # imm = 0xFFFFFFE7
jmp 0x558421
movq %r14, %rdi
movq %rbp, %rsi
movq %rbx, %rdx
callq 0x53e917
testl %eax, %eax
jne 0x558421
addq %rbx, 0x110(%r13)
addq %rbx, 0x18(%r13)
jmp 0x5581c4
leaq 0x170aa4(%rip), %rdx # 0x6c8d52
movq %r14, %rdi
movl $0xffffffff, %esi # imm = 0xFFFFFFFF
xorl %eax, %eax
callq 0x53e204
movq $-0x19, %rbx
jmp 0x558424
movq %r14, 0x10(%rsp)
movq %rbp, 0x18(%rsp)
leaq 0x50(%r13), %r14
movq %r12, %rdi
callq 0x3fd60
movabsq $0x2345678912345678, %rcx # imm = 0x2345678912345678
movq %rcx, 0x50(%r13)
movl $0x34567890, 0x58(%r13) # imm = 0x34567890
testq %rax, %rax
je 0x55831e
movq %rax, %rbp
movq %r12, %rcx
xorl %r12d, %r12d
movzbl (%rcx,%r12), %esi
movq %r14, %rdi
movq %rcx, %r15
callq 0x558ebc
movq %r15, %rcx
incq %r12
cmpq %r12, %rbp
jne 0x558303
movb 0x5d(%r13), %al
movb %al, 0x5b(%rsp)
xorl %r12d, %r12d
movq 0x18(%rsp), %rbp
movzbl 0x50(%rsp,%r12), %esi
movl 0x58(%r13), %eax
orl $0x2, %eax
movl %eax, %ecx
xorl $0x1, %ecx
imull %eax, %ecx
shrl $0x8, %ecx
xorl %esi, %ecx
movb %cl, 0x30(%rsp,%r12)
movq %r14, %rdi
callq 0x558ebc
incq %r12
cmpq $0xc, %r12
jne 0x55832e
leaq 0x30(%rsp), %rsi
movl $0xc, %edx
movq 0x10(%rsp), %r14
movq %r14, %rdi
callq 0x53e917
testl %eax, %eax
jne 0x558421
addq $0xc, 0x110(%r13)
addq $0xc, 0x18(%r13)
movb $0x1, 0x5c(%r13)
jmp 0x557f65
xorl %eax, %eax
movl 0x28(%rsp), %ecx
movl %ecx, %r12d
cmpl $0x2, %ecx
setne %al
leaq 0x304f36(%rip), %rcx # 0x85d2e0
leaq 0xc8(%r13), %rdi
shll $0x4, %eax
leaq 0x50(%rsp), %rdx
leaq (%rax,%rdx), %rsi
addq $0x10, %rsi
movq %r15, %rdx
callq *(%rcx)
testl %eax, %eax
je 0x5583e3
movq %rbp, %rdi
leaq 0x303d24(%rip), %rax # 0x85c0f8
callq *0x30(%rax)
leaq 0x16696f(%rip), %rdx # 0x6bed4d
jmp 0x558268
xorl %eax, %eax
cmpl $0x2, %r12d
setne %al
movq 0x20(%rsp), %r12
movb 0x50(%rsp,%r12), %cl
movb 0x51(%rsp,%r12), %dl
movb %cl, 0x38(%rsp,%rax,8)
movq 0x10(%rsp), %r15
movb %dl, 0x31(%rsp,%r15)
orq $0x2, %r15
leaq 0x30(%rsp), %rsi
movq %r14, %rdi
movq %r15, %rdx
callq 0x53e917
testl %eax, %eax
je 0x558439
movslq %eax, %rbx
movq %rbx, %rax
addq $0x98, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
addq %r15, 0x110(%r13)
addq %r15, 0x18(%r13)
movb $0x1, %al
movb %al, 0xd0(%r13)
movb %al, 0xc0(%r13)
movq 0x18(%rsp), %rbp
jmp 0x557f65
|
/JKorbelRA[P]CMake/Utilities/cmlibarchive/libarchive/archive_write_set_format_zip.c
|
ZSTD_ldm_fillHashTable
|
void ZSTD_ldm_fillHashTable(
ldmState_t* ldmState, const BYTE* ip,
const BYTE* iend, ldmParams_t const* params)
{
U32 const minMatchLength = params->minMatchLength;
U32 const hBits = params->hashLog - params->bucketSizeLog;
BYTE const* const base = ldmState->window.base;
BYTE const* const istart = ip;
ldmRollingHashState_t hashState;
size_t* const splits = ldmState->splitIndices;
unsigned numSplits;
DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
ZSTD_ldm_gear_init(&hashState, params);
while (ip < iend) {
size_t hashed;
unsigned n;
numSplits = 0;
hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
for (n = 0; n < numSplits; n++) {
if (ip + splits[n] >= istart + minMatchLength) {
BYTE const* const split = ip + splits[n] - minMatchLength;
U64 const xxhash = XXH64(split, minMatchLength, 0);
U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
ldmEntry_t entry;
entry.offset = (U32)(split - base);
entry.checksum = (U32)(xxhash >> 32);
ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
}
}
ip += hashed;
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
movq %rsi, %r15
movl 0xc(%rcx), %r11d
movl 0x4(%rcx), %eax
movl 0x8(%rcx), %esi
movq %rdi, 0x10(%rsp)
movq 0x8(%rdi), %rdi
movq %rdi, 0x38(%rsp)
movq %rcx, 0x40(%rsp)
movl 0x10(%rcx), %ecx
cmpl $0x40, %r11d
movl $0x40, %edi
cmovbl %r11d, %edi
movq $-0x1, %r8
shlq %cl, %r8
leal -0x1(%rcx), %r9d
movl %edi, %r10d
subl %ecx, %r10d
xorl %ecx, %ecx
cmpl %edi, %r9d
notq %r8
cmovbl %r10d, %ecx
shlq %cl, %r8
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movq %rcx, 0x48(%rsp)
movq %r8, 0x50(%rsp)
cmpq %rdx, %r15
jae 0x5e2913
movq 0x10(%rsp), %rcx
leaq 0x40(%rcx), %r13
subl %esi, %eax
movl %r11d, %ecx
leaq (%r15,%rcx), %rbx
movq %rcx, 0x30(%rsp)
negq %rcx
movq %rcx, 0x28(%rsp)
movl $0xffffffff, %esi # imm = 0xFFFFFFFF
movl %eax, %ecx
shll %cl, %esi
notl %esi
movl %esi, 0xc(%rsp)
movq %rdx, 0x18(%rsp)
movl $0x0, 0x8(%rsp)
subq %r15, %rdx
leaq 0x48(%rsp), %rdi
movq %r15, %rsi
movq %r13, %rcx
leaq 0x8(%rsp), %r8
callq 0x5e2922
movq %rax, 0x20(%rsp)
movl 0x8(%rsp), %ebp
testq %rbp, %rbp
je 0x5e2900
xorl %r12d, %r12d
movq (%r13,%r12,8), %r14
addq %r15, %r14
cmpq %rbx, %r14
jb 0x5e28f8
addq 0x28(%rsp), %r14
movq %r14, %rdi
movq 0x30(%rsp), %rsi
xorl %edx, %edx
callq 0x60cfca
movl %eax, %edx
andl 0xc(%rsp), %edx
subl 0x38(%rsp), %r14d
movabsq $-0x100000000, %rcx # imm = 0xFFFFFFFF00000000
andq %rcx, %rax
orq %rax, %r14
movq 0x40(%rsp), %rax
movb 0x8(%rax), %cl
movq 0x10(%rsp), %r8
movq 0x38(%r8), %rax
movzbl (%rax,%rdx), %esi
movq %rdx, %rdi
shlq %cl, %rdi
shlq $0x3, %rdi
addq 0x28(%r8), %rdi
movq %r14, (%rdi,%rsi,8)
movl $0xffffffff, %edi # imm = 0xFFFFFFFF
shll %cl, %edi
incl %esi
notl %edi
andl %esi, %edi
movb %dil, (%rax,%rdx)
incq %r12
cmpq %r12, %rbp
jne 0x5e2884
addq 0x20(%rsp), %r15
movq 0x18(%rsp), %rdx
cmpq %rdx, %r15
jb 0x5e2853
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_ldm.c
|
ZSTD_ldm_skipSequences
|
void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
if (srcSize <= seq->litLength) {
/* Skip past srcSize literals */
seq->litLength -= (U32)srcSize;
return;
}
srcSize -= seq->litLength;
seq->litLength = 0;
if (srcSize < seq->matchLength) {
/* Skip past the first srcSize of the match */
seq->matchLength -= (U32)srcSize;
if (seq->matchLength < minMatch) {
/* The match is too short, omit it */
if (rawSeqStore->pos + 1 < rawSeqStore->size) {
seq[1].litLength += seq[0].matchLength;
}
rawSeqStore->pos++;
}
return;
}
srcSize -= seq->matchLength;
seq->matchLength = 0;
rawSeqStore->pos++;
}
}
|
testq %rsi, %rsi
je 0x5e3695
movq 0x8(%rdi), %rax
movq 0x18(%rdi), %r8
leaq (%rax,%rax,2), %rcx
leaq 0x4(,%rcx,4), %rcx
cmpq %r8, %rax
jae 0x5e3695
movq (%rdi), %r9
movl (%r9,%rcx), %r11d
movq %rsi, %r10
subq %r11, %r10
jbe 0x5e3696
movl $0x0, (%r9,%rcx)
movl 0x4(%r9,%rcx), %esi
cmpq %rsi, %r10
jb 0x5e369e
incq %rax
movl $0x0, 0x4(%r9,%rcx)
addq $0xc, %rcx
movq %rax, 0x8(%rdi)
subq %rsi, %r10
movq %r10, %rsi
jne 0x5e3653
retq
subl %esi, %r11d
movl %r11d, (%r9,%rcx)
retq
subl %r10d, %esi
movl %esi, 0x4(%r9,%rcx)
cmpl %edx, %esi
jae 0x5e3695
incq %rax
cmpq %r8, %rax
jae 0x5e36b7
addl %esi, 0xc(%r9,%rcx)
movq %rax, 0x8(%rdi)
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_ldm.c
|
ZSTD_ldm_blockCompress
|
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
void const* src, size_t srcSize)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
unsigned const minMatch = cParams->minMatch;
ZSTD_blockCompressor const blockCompressor =
ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
/* Input bounds */
BYTE const* const istart = (BYTE const*)src;
BYTE const* const iend = istart + srcSize;
/* Input positions */
BYTE const* ip = istart;
DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
/* If using opt parser, use LDMs only as candidates rather than always accepting them */
if (cParams->strategy >= ZSTD_btopt) {
size_t lastLLSize;
ms->ldmSeqStore = rawSeqStore;
lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
return lastLLSize;
}
assert(rawSeqStore->pos <= rawSeqStore->size);
assert(rawSeqStore->size <= rawSeqStore->capacity);
/* Loop through each sequence and apply the block compressor to the literals */
while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
/* maybeSplitSequence updates rawSeqStore->pos */
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
(U32)(iend - ip), minMatch);
int i;
/* End signal */
if (sequence.offset == 0)
break;
assert(ip + sequence.litLength + sequence.matchLength <= iend);
/* Fill tables for block compressor */
ZSTD_ldm_limitTableUpdate(ms, ip);
ZSTD_ldm_fillFastTables(ms, ip);
/* Run the block compressor */
DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
{
size_t const newLitLength =
blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
ip += sequence.litLength;
/* Update the repcodes */
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
rep[i] = rep[i-1];
rep[0] = sequence.offset;
/* Store the sequence */
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
sequence.offset + ZSTD_REP_MOVE,
sequence.matchLength - MINMATCH);
ip += sequence.matchLength;
}
}
/* Fill the tables for the block compressor */
ZSTD_ldm_limitTableUpdate(ms, ip);
ZSTD_ldm_fillFastTables(ms, ip);
/* Compress the last literals */
return blockCompressor(ms, seqStore, rep, ip, iend - ip);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %r9, %r13
movq %rcx, 0x20(%rsp)
movq %rdx, %rcx
movq %rsi, %r12
movq %rdi, %r14
movl 0x1c(%rsi), %eax
movl 0x108(%rsi), %edi
movl $0x1, %edx
cmpl 0x18(%rsi), %eax
jb 0x5e3760
movq 0xe8(%r12), %rax
testq %rax, %rax
je 0x5e375e
cmpl $0x1, 0x7c(%rax)
movl $0x3, %edx
sbbl $0x0, %edx
jmp 0x5e3760
xorl %edx, %edx
movq %rcx, 0x18(%rsp)
movq 0x80(%rsp), %rbx
movl 0x100(%r12), %eax
movl %eax, 0x2c(%rsp)
movl %r8d, %esi
callq 0x58e7c8
cmpl $0x6, 0x108(%r12)
jbe 0x5e37c9
movq %r14, 0x110(%r12)
movq %r12, %rdi
movq 0x18(%rsp), %rsi
movq 0x20(%rsp), %rdx
movq %r13, %rcx
movq %rbx, %r8
callq *%rax
movq %r14, %rdi
movq %rax, %r14
movq %rbx, %rsi
callq 0x5e36bc
movq %r14, %rax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, 0x38(%rsp)
leaq (%rbx,%r13), %r15
movq 0x8(%r14), %rax
cmpq 0x18(%r14), %rax
setae %cl
testq %rbx, %rbx
setle %dl
orb %cl, %dl
jne 0x5e3a80
leaq -0x20(%r15), %rcx
movq %rcx, 0x40(%rsp)
movq %r14, 0x8(%rsp)
movq %r15, 0x30(%rsp)
movq %r15, %rcx
subq %r13, %rcx
movq (%r14), %rdx
leaq (%rax,%rax,2), %rsi
movq (%rdx,%rsi,4), %r15
movq %r15, %rbx
shrq $0x20, %rbx
movl 0x8(%rdx,%rsi,4), %edi
leal (%rdi,%rbx), %edx
cmpl %ecx, %edx
jbe 0x5e3850
movl %ecx, %eax
subl %ebx, %eax
movl 0x2c(%rsp), %edx
cmpl %edx, %eax
movl $0x0, %esi
cmovbq %rsi, %r15
movl %ecx, %eax
subl %ebx, %eax
cmoval %eax, %edi
movq %rdi, 0x10(%rsp)
cmovbeq %rsi, %r15
movl %ecx, %esi
movq %r14, %rdi
callq 0x5e363a
jmp 0x5e385c
movq %rdi, 0x10(%rsp)
incq %rax
movq %rax, 0x8(%r14)
testl %r15d, %r15d
je 0x5e3a85
movl 0x8(%r12), %ecx
movl %r13d, %eax
subl %ecx, %eax
movl 0x2c(%r12), %ecx
leal 0x400(%rcx), %edx
cmpl %eax, %edx
jae 0x5e3899
movl %eax, %edx
subl %ecx, %edx
addl $0xfffffc00, %edx # imm = 0xFFFFFC00
movl $0x200, %ecx # imm = 0x200
cmpl %ecx, %edx
cmovael %ecx, %edx
subl %edx, %eax
movl %eax, 0x2c(%r12)
movq %r12, %rdi
movq %r13, %rsi
callq 0x5e3af6
movq %r12, %r14
movq %r12, %rdi
movq 0x18(%rsp), %rbp
movq %rbp, %rsi
movq 0x20(%rsp), %r12
movq %r12, %rdx
movq %r13, %rcx
movq %rbx, %r8
callq *0x38(%rsp)
movq %rbp, %r9
movq (%r12), %rcx
movq %rcx, 0x4(%r12)
leaq (%rbx,%r13), %rbp
movl %r15d, (%r12)
movq %rbp, %rdx
subq %rax, %rdx
movq 0x18(%r9), %rcx
movq 0x40(%rsp), %r10
cmpq %r10, %rbp
jbe 0x5e38f9
cmpq %r10, %rdx
jbe 0x5e3961
movq %r14, %r12
jmp 0x5e3980
movups (%rdx), %xmm0
movups %xmm0, (%rcx)
movq 0x18(%r9), %rcx
cmpq $0x10, %rax
jbe 0x5e3987
movups 0x10(%rdx), %xmm0
movups %xmm0, 0x10(%rcx)
cmpq $0x21, %rax
movq %r14, %r12
movq 0x8(%rsp), %r14
jl 0x5e39fb
leaq (%rcx,%rax), %rdx
addq $0x20, %rcx
subq %rax, %rbx
leaq (%rbx,%r13), %rsi
addq $0x30, %rsi
xorl %edi, %edi
movups -0x10(%rsi,%rdi), %xmm0
leaq (%rcx,%rdi), %r8
addq $0x20, %r8
movups %xmm0, -0x20(%r8)
movups (%rsi,%rdi), %xmm0
movups %xmm0, -0x10(%r8)
addq $0x20, %rdi
cmpq %rdx, %r8
jb 0x5e3938
jmp 0x5e39fb
movq %r10, %rdi
subq %rdx, %rdi
leaq (%rcx,%rdi), %rsi
movups (%rdx), %xmm0
movups %xmm0, (%rcx)
cmpq $0x11, %rdi
movq %r14, %r12
jge 0x5e399f
movq %r10, %rdx
movq %rsi, %rcx
movq 0x8(%rsp), %r14
jmp 0x5e39de
addq %rax, %rcx
movq %rcx, 0x18(%r9)
movq 0x8(%r9), %rcx
movq %r14, %r12
movq 0x8(%rsp), %r14
jmp 0x5e3a21
subq %rax, %rbx
leaq (%rbx,%r13), %rdx
addq $0x10, %rdx
movl $0x10, %edi
movq 0x8(%rsp), %r14
movups -0x10(%rdx,%rdi), %xmm0
leaq (%rcx,%rdi), %r8
addq $0x20, %r8
movups %xmm0, -0x20(%r8)
movups (%rdx,%rdi), %xmm0
movups %xmm0, -0x10(%r8)
addq $0x20, %rdi
cmpq %rsi, %r8
jb 0x5e39b4
movq %r10, %rdx
movq %rsi, %rcx
cmpq %rbp, %rdx
jae 0x5e39fb
movq %rbp, %rsi
subq %rdx, %rsi
xorl %edi, %edi
movb (%rdx,%rdi), %r8b
movb %r8b, (%rcx,%rdi)
incq %rdi
cmpq %rdi, %rsi
jne 0x5e39eb
addq %rax, 0x18(%r9)
movq 0x8(%r9), %rcx
cmpq $0x10000, %rax # imm = 0x10000
jb 0x5e3a21
movl $0x1, 0x48(%r9)
movq %rcx, %rdx
subq (%r9), %rdx
shrq $0x3, %rdx
movl %edx, 0x4c(%r9)
movq 0x10(%rsp), %rsi
leal -0x3(%rsi), %edx
movw %ax, 0x4(%rcx)
addl $0x3, %r15d
movl %r15d, (%rcx)
cmpl $0x10000, %edx # imm = 0x10000
jb 0x5e3a52
movl $0x2, 0x48(%r9)
movq %rcx, %rax
subq (%r9), %rax
shrq $0x3, %rax
movl %eax, 0x4c(%r9)
movw %dx, 0x6(%rcx)
addq $0x8, %rcx
movq %rcx, 0x8(%r9)
movl %esi, %eax
addq %rax, %rbp
movq 0x8(%r14), %rax
cmpq 0x18(%r14), %rax
movq 0x30(%rsp), %r15
jae 0x5e3a8d
movq %rbp, %r13
cmpq %r15, %rbp
jb 0x5e37fe
jmp 0x5e3a8d
movq %r13, %rbp
jmp 0x5e3a8d
movq %r13, %rbp
movq 0x30(%rsp), %r15
movl 0x8(%r12), %ecx
movl %ebp, %eax
subl %ecx, %eax
movl 0x2c(%r12), %ecx
leal 0x400(%rcx), %edx
cmpl %eax, %edx
jae 0x5e3ac0
movl %eax, %edx
subl %ecx, %edx
addl $0xfffffc00, %edx # imm = 0xFFFFFC00
movl $0x200, %ecx # imm = 0x200
cmpl %ecx, %edx
cmovael %ecx, %edx
subl %edx, %eax
movl %eax, 0x2c(%r12)
movq %r12, %rdi
movq %rbp, %rsi
callq 0x5e3af6
subq %rbp, %r15
movq %r12, %rdi
movq 0x18(%rsp), %rsi
movq 0x20(%rsp), %rdx
movq %rbp, %rcx
movq %r15, %r8
movq 0x38(%rsp), %rax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmpq *%rax
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_ldm.c
|
ZSTD_insertBt1
|
static U32 ZSTD_insertBt1(
ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
U32 const mls, const int extDict)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hashLog = cParams->hashLog;
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
U32* const bt = ms->chainTable;
U32 const btLog = cParams->chainLog - 1;
U32 const btMask = (1 << btLog) - 1;
U32 matchIndex = hashTable[h];
size_t commonLengthSmaller=0, commonLengthLarger=0;
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* match;
const U32 curr = (U32)(ip-base);
const U32 btLow = btMask >= curr ? 0 : curr - btMask;
U32* smallerPtr = bt + 2*(curr&btMask);
U32* largerPtr = smallerPtr + 1;
U32 dummy32; /* to be nullified at the end */
U32 const windowLow = ms->window.lowLimit;
U32 matchEndIdx = curr+8+1;
size_t bestLength = 8;
U32 nbCompares = 1U << cParams->searchLog;
#ifdef ZSTD_C_PREDICT
U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
predictedSmall += (predictedSmall>0);
predictedLarge += (predictedLarge>0);
#endif /* ZSTD_C_PREDICT */
DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
assert(ip <= iend-8); /* required for h calculation */
hashTable[h] = curr; /* Update Hash Table */
assert(windowLow > 0);
while (nbCompares-- && (matchIndex >= windowLow)) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
assert(matchIndex < curr);
#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
if (matchIndex == predictedSmall) {
/* no need to check length, result known */
*smallerPtr = matchIndex;
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
predictedSmall = predictPtr[1] + (predictPtr[1]>0);
continue;
}
if (matchIndex == predictedLarge) {
*largerPtr = matchIndex;
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
largerPtr = nextPtr;
matchIndex = nextPtr[0];
predictedLarge = predictPtr[0] + (predictPtr[0]>0);
continue;
}
#endif
if (!extDict || (matchIndex+matchLength >= dictLimit)) {
assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */
match = base + matchIndex;
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
} else {
match = dictBase + matchIndex;
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
if (matchIndex+matchLength >= dictLimit)
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
}
if (matchLength > bestLength) {
bestLength = matchLength;
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
}
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
}
if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
/* match is smaller than current */
*smallerPtr = matchIndex; /* update smaller idx */
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
} else {
/* match is larger than current */
*largerPtr = matchIndex;
commonLengthLarger = matchLength;
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
largerPtr = nextPtr;
matchIndex = nextPtr[0];
} }
*smallerPtr = *largerPtr = 0;
{ U32 positions = 0;
if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */
assert(matchEndIdx > curr + 8);
return MAX(positions, matchEndIdx - (curr + 8));
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xb8, %rsp
movl %r8d, 0x30(%rsp)
movl %ecx, %eax
movq 0x60(%rdi), %r13
movl 0xf8(%rdi), %ecx
addl $-0x5, %eax
cmpl $0x3, %eax
ja 0x607c13
leaq 0xcec13(%rip), %r8 # 0x6d6490
movslq (%r8,%rax,4), %rax
addq %r8, %rax
jmpq *%rax
movabsq $-0x30e44323485a9b9d, %rax # imm = 0xCF1BBCDCB7A56463
addq $0x35a9b9d, %rax # imm = 0x35A9B9D
jmp 0x6078c6
movabsq $-0x30e44323485a9b9d, %rax # imm = 0xCF1BBCDCB7A56463
addq $0x7fffe9d, %rax # imm = 0x7FFFE9D
jmp 0x6078c6
movabsq $-0x30e44323485a9b9d, %rax # imm = 0xCF1BBCDCB7A56463
jmp 0x6078c6
movabsq $-0x30e44323485a9b9d, %rax # imm = 0xCF1BBCDCB7A56463
addq $0x7f59b9d, %rax # imm = 0x7F59B9D
imulq (%rsi), %rax
negb %cl
shrq %cl, %rax
movq 0x70(%rdi), %r9
movb 0xf4(%rdi), %cl
decb %cl
movl $0xffffffff, %ebp # imm = 0xFFFFFFFF
movl $0xffffffff, %r12d # imm = 0xFFFFFFFF
shll %cl, %r12d
notl %r12d
movl (%r13,%rax,4), %ebx
movq 0x8(%rdi), %r14
movq 0x10(%rdi), %rcx
movq %rcx, 0x40(%rsp)
movl 0x18(%rdi), %r10d
movl %esi, %r15d
subl %r14d, %r15d
xorl %r11d, %r11d
movl %r15d, %ecx
subl %r12d, %ecx
cmovbl %r11d, %ecx
movl %ecx, 0x8(%rsp)
movl %r15d, %r8d
movl %r12d, 0x2c(%rsp)
andl %r12d, %r8d
addl %r8d, %r8d
movl 0x1c(%rdi), %r12d
movl 0xfc(%rdi), %ecx
movl %r15d, (%r13,%rax,4)
leaq (%r9,%r8,4), %rdi
leal 0x9(%r15), %eax
movl %eax, 0x4(%rsp)
movl %r12d, 0x28(%rsp)
cmpl %r12d, %ebx
jae 0x607958
movq $0x0, (%rdi)
jmp 0x607c65
movq %r15, 0x70(%rsp)
movq 0x40(%rsp), %rax
addq %r10, %rax
movq %rax, 0xa8(%rsp)
shll %cl, %ebp
leaq (%r14,%r10), %rax
movq %rax, 0xa0(%rsp)
leaq (%r9,%r8,4), %r8
addq $0x4, %r8
notl %ebp
leaq -0x7(%rdx), %r12
leaq -0x3(%rdx), %rax
movq %rax, 0x88(%rsp)
leaq -0x1(%rdx), %rax
movq %rax, 0x80(%rsp)
leaq 0x8(%r14), %rax
movq %rax, 0x38(%rsp)
leaq 0x8(%rsi), %rax
movq %rax, 0x78(%rsp)
movl $0x8, %r15d
movq $0x0, 0x68(%rsp)
movq %rdx, 0x60(%rsp)
movq %rsi, 0x58(%rsp)
movq %r9, 0x50(%rsp)
xorl %r9d, %r9d
movq %r14, 0x48(%rsp)
movq %r10, %r14
movq %r12, 0x98(%rsp)
movq %r10, 0x90(%rsp)
movq %r8, 0x18(%rsp)
movq %rdi, 0x20(%rsp)
movl %ebp, 0xc(%rsp)
movq 0x68(%rsp), %rbp
cmpq %rbp, %r9
cmovbq %r9, %rbp
movl %ebx, %eax
leaq (%rax,%rbp), %r11
leaq (%rsi,%rbp), %rdi
cmpl $0x0, 0x30(%rsp)
je 0x607a91
cmpq %r14, %r11
jae 0x607a91
movq %rax, %r13
movq 0x40(%rsp), %rax
movq %r15, 0x10(%rsp)
leaq (%rax,%r13), %r15
leaq (%r15,%rbp), %rsi
movq 0xa8(%rsp), %rcx
movq 0xa0(%rsp), %r8
movq %rbx, 0xb0(%rsp)
movq %r9, %r12
callq 0x607c88
movq %r12, %r9
movq 0x48(%rsp), %rdx
movq 0x90(%rsp), %r14
movq 0xb0(%rsp), %rbx
movq 0x98(%rsp), %r12
addq %rbp, %rax
leaq (%rax,%r13), %rcx
addq %rdx, %r13
cmpq %r14, %rcx
cmovbq %r15, %r13
movq 0x10(%rsp), %r15
movq 0x18(%rsp), %r11
jmp 0x607b64
addq 0x48(%rsp), %rax
movq %rax, %r13
leaq (%rax,%rbp), %rcx
cmpq %rdi, %r12
jbe 0x607af3
movq (%rcx), %rax
movq (%rdi), %rcx
cmpq %rcx, %rax
jne 0x607af8
movq %r9, 0x10(%rsp)
movq 0x38(%rsp), %rax
leaq (%rax,%r11), %rsi
movq 0x78(%rsp), %rax
leaq (%rax,%rbp), %r8
xorl %ecx, %ecx
leaq (%r8,%rcx), %rax
cmpq %r12, %rax
jae 0x607b09
movq (%rsi,%rcx), %r9
movq (%rax), %rax
addq $0x8, %rcx
cmpq %rax, %r9
je 0x607ac6
xorq %r9, %rax
bsfq %rax, %rax
shrl $0x3, %eax
addq %rcx, %rax
movq 0x10(%rsp), %r9
jmp 0x607b02
movq %rdi, %rax
jmp 0x607b16
xorq %rax, %rcx
bsfq %rcx, %rax
shrl $0x3, %eax
movq 0x18(%rsp), %r11
jmp 0x607b61
addq 0x38(%rsp), %rcx
addq %r11, %rcx
movq 0x10(%rsp), %r9
cmpq 0x88(%rsp), %rax
movq 0x18(%rsp), %r11
jae 0x607b33
movl (%rcx), %esi
cmpl (%rax), %esi
jne 0x607b33
addq $0x4, %rax
addq $0x4, %rcx
cmpq 0x80(%rsp), %rax
jae 0x607b4d
movzwl (%rcx), %esi
cmpw (%rax), %si
jne 0x607b4d
addq $0x2, %rax
addq $0x2, %rcx
cmpq %rdx, %rax
jae 0x607b5e
movb (%rcx), %cl
xorl %edx, %edx
cmpb (%rax), %cl
sete %dl
addq %rdx, %rax
subq %rdi, %rax
addq %rbp, %rax
cmpq %r15, %rax
jbe 0x607b92
movl 0x4(%rsp), %ecx
subl %ebx, %ecx
movq %rax, %r15
cmpq %rcx, %rax
movq 0x60(%rsp), %rdx
movq 0x58(%rsp), %rsi
movq 0x50(%rsp), %r8
jbe 0x607ba1
leal (%rbx,%rax), %ecx
movl %ecx, 0x4(%rsp)
movq %rax, %r15
jmp 0x607ba1
movq 0x60(%rsp), %rdx
movq 0x58(%rsp), %rsi
movq 0x50(%rsp), %r8
leaq (%rsi,%rax), %rdi
cmpq %rdx, %rdi
je 0x607c22
movl %ebx, %ecx
andl 0x2c(%rsp), %ecx
addl %ecx, %ecx
leaq (%r8,%rcx,4), %rcx
movb (%r13,%rax), %r8b
cmpb (%rdi), %r8b
jae 0x607be3
movq 0x20(%rsp), %rdi
movl %ebx, (%rdi)
cmpl 0x8(%rsp), %ebx
movl 0xc(%rsp), %ebp
jbe 0x607c33
movq %rcx, %rdi
addq $0x4, %rdi
movq %r11, %rcx
movq %rax, %r9
movq %rdi, %rax
jmp 0x607bfd
movl %ebx, (%r11)
movq %rax, 0x68(%rsp)
movq %rcx, %rax
cmpl 0x8(%rsp), %ebx
movl 0xc(%rsp), %ebp
movq 0x20(%rsp), %rdi
jbe 0x607c2c
addl $-0x1, %ebp
jae 0x607c3b
movl (%rax), %ebx
movq %rcx, %r8
cmpl 0x28(%rsp), %ebx
jae 0x6079ec
jmp 0x607c3b
imull $0x9e3779b1, (%rsi), %eax # imm = 0x9E3779B1
negb %cl
shrl %cl, %eax
jmp 0x6078cf
movq %r11, %rcx
movq 0x20(%rsp), %rdi
jmp 0x607c3b
leaq 0x34(%rsp), %rcx
jmp 0x607c3b
leaq 0x34(%rsp), %rdi
movq %r11, %rcx
xorl %eax, %eax
movl %eax, (%rcx)
leal -0x180(%r15), %ecx
movl $0xc0, %r11d
cmpl %r11d, %ecx
cmovbl %ecx, %r11d
movl %eax, (%rdi)
cmpq $0x181, %r15 # imm = 0x181
cmovbl %eax, %r11d
movq 0x70(%rsp), %r15
movl 0x4(%rsp), %eax
subl %r15d, %eax
addl $-0x8, %eax
cmpl %eax, %r11d
cmoval %r11d, %eax
addq $0xb8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_opt.c
|
ZSTD_litLengthPrice
|
static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
{
if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
/* dynamic statistics */
{ U32 const llCode = ZSTD_LLcode(litLength);
return (LL_bits[llCode] * BITCOST_MULTIPLIER)
+ optPtr->litLengthSumBasePrice
- WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
}
}
|
movl %edi, %eax
cmpl $0x1, 0x50(%rsi)
jne 0x60840c
incl %eax
bsrl %eax, %ecx
xorl $0x1f, %ecx
xorl $0x1f, %ecx
testl %edx, %edx
je 0x60845f
movl %ecx, %edx
shll $0x8, %edx
shll $0x8, %eax
shrl %cl, %eax
addl %edx, %eax
retq
cmpl $0x40, %eax
jb 0x60841c
bsrl %eax, %eax
xorl $-0x20, %eax
addl $0x33, %eax
jmp 0x608429
movl %eax, %eax
leaq 0xce10b(%rip), %rcx # 0x6d6530
movzbl (%rax,%rcx), %eax
movl %eax, %ecx
leaq 0xce06e(%rip), %rax # 0x6d64a0
movl (%rax,%rcx,4), %eax
shll $0x8, %eax
addl 0x44(%rsi), %eax
movq 0x8(%rsi), %rsi
movl (%rsi,%rcx,4), %esi
incl %esi
bsrl %esi, %ecx
xorl $0x1f, %ecx
testl %edx, %edx
je 0x608465
xorl $0x1f, %ecx
shll $0x8, %esi
shrl %cl, %esi
shll $0x8, %ecx
addl %ecx, %esi
negl %esi
jmp 0x608470
shll $0x8, %ecx
movl %ecx, %eax
retq
shll $0x8, %ecx
addl $0xffffe100, %ecx # imm = 0xFFFFE100
movl %ecx, %esi
addl %esi, %eax
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_opt.c
|
ZSTD_initDCtx_internal
|
static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
{
dctx->staticSize = 0;
dctx->ddict = NULL;
dctx->ddictLocal = NULL;
dctx->dictEnd = NULL;
dctx->ddictIsCold = 0;
dctx->dictUses = ZSTD_dont_use;
dctx->inBuff = NULL;
dctx->inBuffSize = 0;
dctx->outBuffSize = 0;
dctx->streamStage = zdss_init;
dctx->legacyContext = NULL;
dctx->previousLegacyVersion = 0;
dctx->noForwardProgress = 0;
dctx->oversizedDuration = 0;
dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
dctx->ddictSet = NULL;
ZSTD_DCtx_resetParameters(dctx);
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
dctx->dictContentEndForFuzzing = NULL;
#endif
}
|
pushq %rbx
xorl %esi, %esi
movq %rsi, 0x75d0(%rdi)
movq %rsi, 0x74d8(%rdi)
movq %rsi, 0x75f4(%rdi)
movq %rsi, 0x7638(%rdi)
movq %rsi, 0x7658(%rdi)
movl %esi, 0x7660(%rdi)
movl %esi, 0x766c(%rdi)
movq %rsi, 0x276c8(%rdi)
xorl %eax, %eax
cpuid
xorps %xmm0, %xmm0
movups %xmm0, 0x75e0(%rdi)
movups %xmm0, 0x760c(%rdi)
movl %esi, 0x761c(%rdi)
movl $0x0, %ebx
testl %eax, %eax
je 0x608746
cmpl $0x7, %eax
jb 0x608746
movl $0x7, %eax
xorl %ecx, %ecx
cpuid
shrl $0x8, %ebx
andl $0x1, %ebx
movl %ebx, 0x75d8(%rdi)
movq %rsi, 0x7600(%rdi)
movq %rsi, 0x7590(%rdi)
movq $0x8000001, 0x7628(%rdi) # imm = 0x8000001
movl %esi, 0x7670(%rdi)
movl %esi, 0x7608(%rdi)
popq %rbx
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_DCtx_trace_end
|
static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
{
#if ZSTD_TRACE
if (dctx->traceCtx && ZSTD_trace_decompress_end != NULL) {
ZSTD_Trace trace;
ZSTD_memset(&trace, 0, sizeof(trace));
trace.version = ZSTD_VERSION_NUMBER;
trace.streaming = streaming;
if (dctx->ddict) {
trace.dictionaryID = ZSTD_getDictID_fromDDict(dctx->ddict);
trace.dictionarySize = ZSTD_DDict_dictSize(dctx->ddict);
trace.dictionaryIsCold = dctx->ddictIsCold;
}
trace.uncompressedSize = (size_t)uncompressedSize;
trace.compressedSize = (size_t)compressedSize;
trace.dctx = dctx;
ZSTD_trace_decompress_end(dctx->traceCtx, &trace);
}
#else
(void)dctx;
(void)uncompressedSize;
(void)compressedSize;
(void)streaming;
#endif
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x40, %rsp
movq %rdx, %rbx
movq %rdi, %r14
movq 0x276d0(%rdi), %rdi
testq %rdi, %rdi
sete %al
cmpq $0x0, 0x2544a3(%rip) # 0x85df30
sete %dl
orb %al, %dl
jne 0x609b03
movq %rsi, %r15
xorps %xmm0, %xmm0
movups %xmm0, 0x28(%rsp)
movups %xmm0, 0x18(%rsp)
movups %xmm0, 0x8(%rsp)
movl $0x2904, (%rsp) # imm = 0x2904
movl %ecx, 0x4(%rsp)
movq 0x75e8(%r14), %rax
testq %rax, %rax
je 0x609aef
movq %rax, %rdi
callq 0x614f58
movl %eax, 0x8(%rsp)
movq 0x75e8(%r14), %rdi
callq 0x614b31
movq %rax, 0x10(%rsp)
movl 0x75f4(%r14), %eax
movl %eax, 0xc(%rsp)
movq 0x276d0(%r14), %rdi
movq %rsp, %rsi
movq %r15, 0x18(%rsi)
movq %rbx, 0x20(%rsi)
movq %r14, 0x38(%rsi)
callq 0x41ea8
addq $0x40, %rsp
popq %rbx
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_decompressBegin
|
size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
{
assert(dctx != NULL);
#if ZSTD_TRACE
dctx->traceCtx = (ZSTD_trace_decompress_begin != NULL) ? ZSTD_trace_decompress_begin(dctx) : 0;
#endif
dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
dctx->stage = ZSTDds_getFrameHeaderSize;
dctx->processedCSize = 0;
dctx->decodedSize = 0;
dctx->previousDstEnd = NULL;
dctx->prefixStart = NULL;
dctx->virtualStart = NULL;
dctx->dictEnd = NULL;
dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
dctx->litEntropy = dctx->fseEntropy = 0;
dctx->dictID = 0;
dctx->bType = bt_reserved;
ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
dctx->LLTptr = dctx->entropy.LLTable;
dctx->MLTptr = dctx->entropy.MLTable;
dctx->OFTptr = dctx->entropy.OFTable;
dctx->HUFptr = dctx->entropy.hufTable;
return 0;
}
|
pushq %rbx
movq %rdi, %rbx
cmpq $0x0, 0x2541c8(%rip) # 0x85df28
je 0x609d6c
movq %rbx, %rdi
callq 0x41ea0
jmp 0x609d6e
xorl %eax, %eax
movq %rax, 0x276d0(%rbx)
xorl %eax, %eax
cmpl $0x0, 0x7590(%rbx)
sete %al
leaq 0x1(,%rax,4), %rax
movq %rax, 0x74e0(%rbx)
leaq 0x20(%rbx), %rax
leaq 0x2838(%rbx), %rcx
xorps %xmm0, %xmm0
movups %xmm0, 0x74c0(%rbx)
movups %xmm0, 0x74d0(%rbx)
movups %xmm0, 0x7510(%rbx)
movl $0xc00000c, 0x2838(%rbx) # imm = 0xC00000C
movl $0x0, 0x75f0(%rbx)
movss 0xcb4a1(%rip), %xmm0 # 0x6d5270
movups %xmm0, 0x7520(%rbx)
movabsq $0x400000001, %rdx # imm = 0x400000001
movq %rdx, 0x683c(%rbx)
movl $0x8, 0x6844(%rbx)
movq %rax, (%rbx)
leaq 0x1830(%rbx), %rax
movq %rax, 0x8(%rbx)
leaq 0x1028(%rbx), %rax
movq %rax, 0x10(%rbx)
movq %rcx, 0x18(%rbx)
xorl %eax, %eax
popq %rbx
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_decompressBegin_usingDict
|
size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
{
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
if (dict && dictSize)
RETURN_ERROR_IF(
ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
dictionary_corrupted, "");
return 0;
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
callq 0x609d54
testq %r14, %r14
sete %al
testq %rbx, %rbx
sete %cl
orb %al, %cl
jne 0x609ebc
movq %r14, %rax
cmpq $0x8, %rbx
jb 0x609e89
cmpl $0xec30a437, (%r14) # imm = 0xEC30A437
movq %r14, %rax
jne 0x609e89
movl 0x4(%r14), %eax
movl %eax, 0x75f0(%r15)
leaq 0x20(%r15), %rdi
movq %r14, %rsi
movq %rbx, %rdx
callq 0x609b0d
movq $-0x1e, %rcx
cmpq $-0x78, %rax
ja 0x609ebe
addq %r14, %rax
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x7528(%r15)
movq 0x74c0(%r15), %rcx
movq 0x74c8(%r15), %rdx
movq %rcx, 0x74d8(%r15)
subq %rcx, %rdx
addq %rax, %rdx
movq %rdx, 0x74d0(%r15)
movq %rax, 0x74c8(%r15)
addq %rbx, %r14
movq %r14, 0x74c0(%r15)
xorl %ecx, %ecx
movq %rcx, %rax
popq %rbx
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_initDStream
|
size_t ZSTD_initDStream(ZSTD_DStream* zds)
{
DEBUGLOG(4, "ZSTD_initDStream");
return ZSTD_initDStream_usingDDict(zds, NULL);
}
|
pushq %rbp
pushq %rbx
pushq %rax
movq %rdi, %rbx
xorl %ebp, %ebp
movl %ebp, 0x760c(%rdi)
movl %ebp, 0x766c(%rdi)
movq 0x75e0(%rdi), %rdi
callq 0x614dda
movl %ebp, 0x75f8(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0x75e0(%rbx)
xorl %eax, %eax
cmpl %ebp, 0x7590(%rbx)
sete %al
leaq 0x1(,%rax,4), %rax
addq $0x8, %rsp
popq %rbx
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_DCtx_refDDict
|
size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
ZSTD_clearDict(dctx);
if (ddict) {
dctx->ddict = ddict;
dctx->dictUses = ZSTD_use_indefinitely;
if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {
if (dctx->ddictSet == NULL) {
dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);
if (!dctx->ddictSet) {
RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");
}
}
assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */
FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");
}
}
return 0;
}
|
movq $-0x3c, %rax
cmpl $0x0, 0x760c(%rdi)
je 0x60a249
retq
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x60, %rsp
movq %rsi, %rbx
movq %rdi, %r15
movq 0x75e0(%rdi), %rdi
callq 0x614dda
movl $0x0, 0x75f8(%r15)
xorps %xmm0, %xmm0
movups %xmm0, 0x75e0(%r15)
testq %rbx, %rbx
je 0x60a40e
movq %rbx, 0x75e8(%r15)
movl $0xffffffff, 0x75f8(%r15) # imm = 0xFFFFFFFF
cmpl $0x1, 0x7608(%r15)
jne 0x60a40e
movq 0x7600(%r15), %r14
testq %r14, %r14
jne 0x60a337
movq 0x75b8(%r15), %rax
movq %rax, 0x30(%rsp)
movups 0x75a8(%r15), %xmm0
movaps %xmm0, 0x20(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x10(%rsp)
movaps 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
movl $0x18, %edi
callq 0x58c8ff
movq %rax, %r14
movq 0x30(%rsp), %rax
movq %rax, 0x10(%rsp)
movaps 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
movl $0x200, %edi # imm = 0x200
callq 0x58c91f
movq %rax, (%r14)
movq $0x40, 0x8(%r14)
movq $0x0, 0x10(%r14)
movq %r14, %rcx
testq %rax, %rax
cmoveq %rax, %rcx
movq %rcx, 0x7600(%r15)
je 0x60a41e
movq 0x75b8(%r15), %rax
movq %rax, 0x50(%rsp)
movups 0x75a8(%r15), %xmm0
movaps %xmm0, 0x40(%rsp)
movq 0x8(%r14), %r12
movq 0x10(%r14), %rax
shlq $0x2, %rax
cmpq %rax, %r12
ja 0x60a3fd
addq $0x75a8, %r15 # imm = 0x75A8
movq 0x10(%r15), %rax
movq %rax, 0x30(%rsp)
movups (%r15), %xmm0
movaps %xmm0, 0x20(%rsp)
movq %r12, %rdi
shlq $0x4, %rdi
movq 0x50(%rsp), %rax
movq %rax, 0x10(%rsp)
movaps 0x40(%rsp), %xmm0
movups %xmm0, (%rsp)
callq 0x58c91f
testq %rax, %rax
je 0x60a41e
movq (%r14), %r15
movq 0x8(%r14), %r13
addq %r12, %r12
movq %rax, (%r14)
movq %r12, 0x8(%r14)
movq $0x0, 0x10(%r14)
testq %r13, %r13
je 0x60a3e2
xorl %r12d, %r12d
movq (%r15,%r12,8), %rsi
testq %rsi, %rsi
je 0x60a3da
movq %r14, %rdi
callq 0x60b1b0
cmpq $-0x78, %rax
ja 0x60a427
incq %r12
cmpq %r12, %r13
jne 0x60a3c3
movq 0x30(%rsp), %rax
movq %rax, 0x10(%rsp)
movaps 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r15, %rdi
callq 0x58c957
movq %r14, %rdi
movq %rbx, %rsi
callq 0x60b1b0
cmpq $-0x78, %rax
ja 0x60a410
xorl %eax, %eax
addq $0x60, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
movq $-0x40, %rax
jmp 0x60a410
movq $-0x1, %rax
jmp 0x60a410
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_dParam_getBounds
|
ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
{
ZSTD_bounds bounds = { 0, 0, 0 };
switch(dParam) {
case ZSTD_d_windowLogMax:
bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
bounds.upperBound = ZSTD_WINDOWLOG_MAX;
return bounds;
case ZSTD_d_format:
bounds.lowerBound = (int)ZSTD_f_zstd1;
bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
return bounds;
case ZSTD_d_stableOutBuffer:
bounds.lowerBound = (int)ZSTD_bm_buffered;
bounds.upperBound = (int)ZSTD_bm_stable;
return bounds;
case ZSTD_d_forceIgnoreChecksum:
bounds.lowerBound = (int)ZSTD_d_validateChecksum;
bounds.upperBound = (int)ZSTD_d_ignoreChecksum;
return bounds;
case ZSTD_d_refMultipleDDicts:
bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
return bounds;
default:;
}
bounds.error = ERROR(parameter_unsupported);
return bounds;
}
|
leal -0x3e8(%rdi), %eax
cmpl $0x3, %eax
ja 0x60a4b2
leaq 0xcc274(%rip), %rcx # 0x6d6710
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movabsq $0x100000000, %rdx # imm = 0x100000000
xorl %eax, %eax
retq
cmpl $0x64, %edi
jne 0x60a4c3
movabsq $0x1f0000000a, %rdx # imm = 0x1F0000000A
jmp 0x60a4af
movq $-0x28, %rax
xorl %edx, %edx
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_DCtx_setFormat
|
size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
{
return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format);
}
|
movq $-0x3c, %rax
cmpl $0x0, 0x760c(%rdi)
je 0x60a4de
retq
movq $-0x2a, %rax
cmpl $0x1, %esi
ja 0x60a4dd
movl %esi, 0x7590(%rdi)
xorl %eax, %eax
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_decompressStream
|
size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
const char* const src = (const char*)input->src;
const char* const istart = input->pos != 0 ? src + input->pos : src;
const char* const iend = input->size != 0 ? src + input->size : src;
const char* ip = istart;
char* const dst = (char*)output->dst;
char* const ostart = output->pos != 0 ? dst + output->pos : dst;
char* const oend = output->size != 0 ? dst + output->size : dst;
char* op = ostart;
U32 someMoreWork = 1;
DEBUGLOG(5, "ZSTD_decompressStream");
RETURN_ERROR_IF(
input->pos > input->size,
srcSize_wrong,
"forbidden. in: pos: %u vs size: %u",
(U32)input->pos, (U32)input->size);
RETURN_ERROR_IF(
output->pos > output->size,
dstSize_tooSmall,
"forbidden. out: pos: %u vs size: %u",
(U32)output->pos, (U32)output->size);
DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), "");
while (someMoreWork) {
switch(zds->streamStage)
{
case zdss_init :
DEBUGLOG(5, "stage zdss_init => transparent reset ");
zds->streamStage = zdss_loadHeader;
zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
zds->legacyVersion = 0;
zds->hostageByte = 0;
zds->expectedOutBuffer = *output;
/* fall-through */
case zdss_loadHeader :
DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
if (zds->legacyVersion) {
RETURN_ERROR_IF(zds->staticSize, memory_allocation,
"legacy support is incompatible with static dctx");
{ size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
if (hint==0) zds->streamStage = zdss_init;
return hint;
} }
#endif
{ size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
if (zds->refMultipleDDicts && zds->ddictSet) {
ZSTD_DCtx_selectFrameDDict(zds);
}
DEBUGLOG(5, "header size : %u", (U32)hSize);
if (ZSTD_isError(hSize)) {
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
if (legacyVersion) {
ZSTD_DDict const* const ddict = ZSTD_getDDict(zds);
const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL;
size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0;
DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
RETURN_ERROR_IF(zds->staticSize, memory_allocation,
"legacy support is incompatible with static dctx");
FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext,
zds->previousLegacyVersion, legacyVersion,
dict, dictSize), "");
zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
{ size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input);
if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */
return hint;
} }
#endif
return hSize; /* error */
}
if (hSize != 0) { /* need more input */
size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
size_t const remainingInput = (size_t)(iend-ip);
assert(iend >= ip);
if (toLoad > remainingInput) { /* not enough input to load full header */
if (remainingInput > 0) {
ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
zds->lhSize += remainingInput;
}
input->pos = input->size;
return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
}
assert(ip != NULL);
ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
break;
} }
/* check for single-pass mode opportunity */
if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& zds->fParams.frameType != ZSTD_skippableFrame
&& (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
if (cSize <= (size_t)(iend-istart)) {
/* shortcut : using single-pass mode */
size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
if (ZSTD_isError(decompressedSize)) return decompressedSize;
DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
ip = istart + cSize;
op += decompressedSize;
zds->expected = 0;
zds->streamStage = zdss_init;
someMoreWork = 0;
break;
} }
/* Check output buffer is large enough for ZSTD_odm_stable. */
if (zds->outBufferMode == ZSTD_bm_stable
&& zds->fParams.frameType != ZSTD_skippableFrame
&& zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) {
RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small");
}
/* Consume header (see ZSTDds_decodeFrameHeader) */
DEBUGLOG(4, "Consume header");
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
zds->stage = ZSTDds_skipFrame;
} else {
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), "");
zds->expected = ZSTD_blockHeaderSize;
zds->stage = ZSTDds_decodeBlockHeader;
}
/* control buffer memory usage */
DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
(U32)(zds->fParams.windowSize >>10),
(U32)(zds->maxWindowSize >> 10) );
zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
frameParameter_windowTooLarge, "");
/* Adapt buffer sizes to frame header instructions */
{ size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
: 0;
ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
{ int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize);
int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds);
if (tooSmall || tooLarge) {
size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
DEBUGLOG(4, "inBuff : from %u to %u",
(U32)zds->inBuffSize, (U32)neededInBuffSize);
DEBUGLOG(4, "outBuff : from %u to %u",
(U32)zds->outBuffSize, (U32)neededOutBuffSize);
if (zds->staticSize) { /* static DCtx */
DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
RETURN_ERROR_IF(
bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
memory_allocation, "");
} else {
ZSTD_customFree(zds->inBuff, zds->customMem);
zds->inBuffSize = 0;
zds->outBuffSize = 0;
zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem);
RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, "");
}
zds->inBuffSize = neededInBuffSize;
zds->outBuff = zds->inBuff + zds->inBuffSize;
zds->outBuffSize = neededOutBuffSize;
} } }
zds->streamStage = zdss_read;
/* fall-through */
case zdss_read:
DEBUGLOG(5, "stage zdss_read");
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip));
DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
if (neededInSize==0) { /* end of frame */
zds->streamStage = zdss_init;
someMoreWork = 0;
break;
}
if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
ip += neededInSize;
/* Function modifies the stage so we must break */
break;
} }
if (ip==iend) { someMoreWork = 0; break; } /* no more input */
zds->streamStage = zdss_load;
/* fall-through */
case zdss_load:
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
size_t const toLoad = neededInSize - zds->inPos;
int const isSkipFrame = ZSTD_isSkipFrame(zds);
size_t loadedSize;
/* At this point we shouldn't be decompressing a block that we can stream. */
assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
if (isSkipFrame) {
loadedSize = MIN(toLoad, (size_t)(iend-ip));
} else {
RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
corruption_detected,
"should never happen");
loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
}
ip += loadedSize;
zds->inPos += loadedSize;
if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
/* decode loaded input */
zds->inPos = 0; /* input is consumed */
FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), "");
/* Function modifies the stage so we must break */
break;
}
case zdss_flush:
{ size_t const toFlushSize = zds->outEnd - zds->outStart;
size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
op += flushedSize;
zds->outStart += flushedSize;
if (flushedSize == toFlushSize) { /* flush completed */
zds->streamStage = zdss_read;
if ( (zds->outBuffSize < zds->fParams.frameContentSize)
&& (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
(int)(zds->outBuffSize - zds->outStart),
(U32)zds->fParams.blockSizeMax);
zds->outStart = zds->outEnd = 0;
}
break;
} }
/* cannot complete flush */
someMoreWork = 0;
break;
default:
assert(0); /* impossible */
RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
} }
/* result */
input->pos = (size_t)(ip - (const char*)(input->src));
output->pos = (size_t)(op - (char*)(output->dst));
/* Update the expected output buffer for ZSTD_obm_stable. */
zds->expectedOutBuffer = *output;
if ((ip==istart) && (op==ostart)) { /* no forward progress */
zds->noForwardProgress ++;
if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
assert(0);
}
} else {
zds->noForwardProgress = 0;
}
{ size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
if (!nextSrcSizeHint) { /* frame fully decoded */
if (zds->outEnd == zds->outStart) { /* output fully flushed */
if (zds->hostageByte) {
if (input->pos >= input->size) {
/* can't release hostage (not present) */
zds->streamStage = zdss_read;
return 1;
}
input->pos++; /* release hostage */
} /* zds->hostageByte */
return 0;
} /* zds->outEnd == zds->outStart */
if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */
zds->hostageByte=1;
}
return 1;
} /* nextSrcSizeHint==0 */
nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */
assert(zds->inPos <= nextSrcSizeHint);
nextSrcSizeHint -= zds->inPos; /* part already loaded*/
return nextSrcSizeHint;
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa8, %rsp
movq %rdi, %rbx
movq 0x10(%rdx), %rax
movq (%rdx), %r10
movq 0x8(%rdx), %rcx
movq (%rsi), %r15
movq 0x8(%rsi), %r8
movq 0x10(%rsi), %rdi
leaq (%r15,%rdi), %r9
movq %r9, 0x28(%rsp)
movq $-0x48, %r14
movq %rcx, %r11
subq %rax, %r11
jb 0x60ae51
cmpq %r8, %rdi
jbe 0x60a731
movq $-0x46, %r14
jmp 0x60ae51
movq %r11, 0xa0(%rsp)
movq %r9, 0x60(%rsp)
movq %rsi, 0x48(%rsp)
movq %rdi, 0x80(%rsp)
movq %rax, 0x90(%rsp)
movq %rdx, 0x40(%rsp)
cmpl $0x1, 0x7670(%rbx)
jne 0x60a7a0
cmpl $0x0, 0x760c(%rbx)
je 0x60a7a0
movq $-0x68, %r14
cmpq %r15, 0x7678(%rbx)
jne 0x60ae51
movq 0x80(%rsp), %rax
cmpq %rax, 0x7688(%rbx)
jne 0x60ae51
cmpq %r8, 0x7680(%rbx)
jne 0x60ae51
movq 0x90(%rsp), %rax
addq %r10, %rax
addq %rcx, %r10
movq %r10, 0x30(%rsp)
addq %r8, %r15
leaq 0x7678(%rbx), %rdx
movq %rdx, 0x88(%rsp)
leaq 0x7640(%rbx), %rdx
movq %rdx, 0x78(%rsp)
leaq 0x74e8(%rbx), %rdx
movq %rdx, 0x68(%rsp)
leaq 0x276b0(%rbx), %rdx
movq %rdx, 0x70(%rsp)
leaq 0x75a8(%rbx), %rdx
movq %rdx, 0x98(%rsp)
movq %rax, 0x38(%rsp)
movq %rax, 0x20(%rsp)
movq %rcx, 0x58(%rsp)
movq %r8, 0x50(%rsp)
movq 0x28(%rsp), %rbp
movl 0x760c(%rbx), %eax
cmpl $0x4, %eax
jne 0x60a8b2
movq 0x7640(%rbx), %rsi
movq 0x7648(%rbx), %r14
subq %rsi, %r14
movq %r15, %r13
subq %rbp, %r13
cmpq %r14, %r13
movq %r14, %r12
cmovbq %r13, %r12
testq %r12, %r12
je 0x60a85f
addq 0x7630(%rbx), %rsi
movq %rbp, %rdi
movq %r12, %rdx
callq 0x3f250
movq 0x7640(%rbx), %rsi
addq %r12, %rbp
addq %r12, %rsi
movq 0x78(%rsp), %rax
movq %rsi, (%rax)
cmpq %r13, %r14
ja 0x60ad8b
movl $0x2, 0x760c(%rbx)
movq 0x7638(%rbx), %rax
cmpq 0x74e8(%rbx), %rax
jae 0x60a811
movl 0x74f8(%rbx), %ecx
addq %rcx, %rsi
cmpq %rax, %rsi
jbe 0x60a811
movq 0x78(%rsp), %rax
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
jmp 0x60a811
jae 0x60ad7a
movl %eax, %eax
leaq 0xcbe7f(%rip), %rcx # 0x6d6740
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movq %rbp, 0x28(%rsp)
movl $0x1, 0x760c(%rbx)
xorl %ecx, %ecx
movq %rcx, 0x7620(%rbx)
movq %rcx, 0x7664(%rbx)
movq 0x78(%rsp), %rax
movq %rcx, 0x10(%rax)
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
movq 0x48(%rsp), %rcx
movq 0x10(%rcx), %rax
movq 0x88(%rsp), %rdx
movq %rax, 0x10(%rdx)
movups (%rcx), %xmm0
movups %xmm0, (%rdx)
xorl %edx, %edx
jmp 0x60a94e
movq %rbp, 0x28(%rsp)
movq 0x20(%rsp), %r13
jmp 0x60ac28
movq %rbp, 0x28(%rsp)
movq 0x74e0(%rbx), %rbp
movl 0x7524(%rbx), %eax
movq 0x20(%rsp), %r13
jmp 0x60ac7c
movq %rbp, 0x28(%rsp)
movq 0x7650(%rbx), %rdx
movl 0x7590(%rbx), %ecx
movq 0x68(%rsp), %rdi
movq 0x70(%rsp), %rsi
callq 0x6089b4
movq %rax, %r14
cmpl $0x0, 0x7608(%rbx)
je 0x60a981
cmpq $0x0, 0x7600(%rbx)
je 0x60a981
movq %rbx, %rdi
callq 0x60afef
cmpq $-0x78, %r14
ja 0x60ae51
testq %r14, %r14
je 0x60a9d7
movq 0x7650(%rbx), %rdi
movq %r14, %rbp
subq %rdi, %rbp
movq 0x30(%rsp), %r12
movq 0x20(%rsp), %rsi
subq %rsi, %r12
cmpq %r12, %rbp
ja 0x60aef4
addq 0x70(%rsp), %rdi
movq %rsi, %r12
movq %rbp, %rdx
callq 0x3f250
movq %r14, 0x7650(%rbx)
addq %rbp, %r12
movq %r12, 0x20(%rsp)
jmp 0x60a80c
movq 0x68(%rsp), %rax
movq (%rax), %rax
cmpq $-0x1, %rax
je 0x60aa1c
cmpl $0x1, 0x74fc(%rbx)
je 0x60aa1c
movq 0x28(%rsp), %r13
movq %r15, %rbp
subq %r13, %rbp
cmpq %rax, %rbp
jb 0x60aa1c
movq 0x38(%rsp), %rdi
movq 0xa0(%rsp), %r14
movq %r14, %rsi
callq 0x608d2f
cmpq %r14, %rax
jbe 0x60af8e
cmpl $0x1, 0x7670(%rbx)
jne 0x60aa4d
cmpl $0x1, 0x74fc(%rbx)
je 0x60aa4d
movq 0x68(%rsp), %rax
movq (%rax), %rax
cmpq $-0x1, %rax
je 0x60aa4d
movq %r15, %rcx
subq 0x28(%rsp), %rcx
cmpq %rax, %rcx
jb 0x60a725
movq %rbx, %rdi
callq 0x6093d3
movq %rbx, %rdi
movq %rax, %rsi
callq 0x609ec7
movl 0x276b0(%rbx), %eax
andl $-0x10, %eax
cmpl $0x184d2a50, %eax # imm = 0x184D2A50
jne 0x60aa7d
movl 0x276b4(%rbx), %eax
movl $0x7, %ecx
jmp 0x60aaa5
movq 0x7650(%rbx), %rdx
movq %rbx, %rdi
movq 0x70(%rsp), %rsi
callq 0x6099b0
cmpq $-0x78, %rax
ja 0x60af86
movl $0x2, %ecx
movl $0x3, %eax
movq %rax, 0x74e0(%rbx)
movl %ecx, 0x7524(%rbx)
movq 0x74f0(%rbx), %rax
cmpq $0x401, %rax # imm = 0x401
movl $0x400, %ecx # imm = 0x400
cmovbq %rcx, %rax
movq %rax, 0x74f0(%rbx)
cmpq 0x7628(%rbx), %rax
ja 0x60af6e
movl 0x74f8(%rbx), %r14d
cmpl $0x5, %r14d
movl $0x4, %ecx
cmovbl %ecx, %r14d
cmpl $0x0, 0x7670(%rbx)
je 0x60aafe
xorl %r12d, %r12d
jmp 0x60ab20
movq 0x68(%rsp), %rcx
movq (%rcx), %r12
movl $0x20000, %ecx # imm = 0x20000
cmpq %rcx, %rax
cmovbq %rax, %rcx
addq %rcx, %rax
addq $0x40, %rax
cmpq %r12, %rax
cmovbq %rax, %r12
movq 0x7618(%rbx), %rcx
movq 0x7638(%rbx), %rax
leaq (%rax,%rcx), %rdx
leaq (%r12,%r14), %rbp
leaq (,%rbp,2), %rsi
addq %rbp, %rsi
cmpq %rsi, %rdx
jae 0x60ab4a
xorl %edx, %edx
jmp 0x60ab54
movq 0x276c8(%rbx), %rdx
incq %rdx
movq 0x20(%rsp), %r13
movq %rdx, 0x276c8(%rbx)
cmpq %r14, %rcx
jb 0x60ab7d
cmpq %r12, %rax
setb %al
cmpq $0x7f, %rdx
seta %cl
orb %al, %cl
cmpb $0x1, %cl
jne 0x60ac1e
movq 0x75d0(%rbx), %rax
testq %rax, %rax
je 0x60aba1
addq $-0x276d8, %rax # imm = 0xFFFD8928
cmpq %rax, %rbp
ja 0x60af7a
movq 0x7610(%rbx), %rax
jmp 0x60ac06
movq 0x7610(%rbx), %rdi
movq 0x98(%rsp), %r13
movq 0x10(%r13), %rax
movq %rax, 0x10(%rsp)
movups (%r13), %xmm0
movups %xmm0, (%rsp)
callq 0x58c957
xorl %eax, %eax
movq %rax, 0x7618(%rbx)
movq %rax, 0x7638(%rbx)
movq 0x10(%r13), %rax
movq %rax, 0x10(%rsp)
movups (%r13), %xmm0
movups %xmm0, (%rsp)
movq %rbp, %rdi
callq 0x58c8ff
movq 0x20(%rsp), %r13
movq %rax, 0x7610(%rbx)
testq %rax, %rax
je 0x60af7a
movq %r14, 0x7618(%rbx)
addq %r14, %rax
movq %rax, 0x7630(%rbx)
movq %r12, 0x7638(%rbx)
movl $0x2, 0x760c(%rbx)
movq 0x30(%rsp), %rcx
subq %r13, %rcx
movl 0x7524(%rbx), %eax
leal -0x3(%rax), %edx
cmpl $0x2, %edx
jae 0x60ac4b
cmpl $0x0, 0x7520(%rbx)
je 0x60ad5c
movq 0x74e0(%rbx), %rbp
movq %rbp, %r12
testq %r12, %r12
je 0x60aec7
cmpq %r12, %rcx
jae 0x60ace8
cmpq 0x30(%rsp), %r13
je 0x60af5f
movl $0x3, 0x760c(%rbx)
movq 0x7620(%rbx), %rdi
movq %rbp, %r14
subq %rdi, %r14
cmpl $0x7, %eax
jne 0x60ac9f
movq 0x30(%rsp), %r12
subq %r13, %r12
cmpq %r12, %r14
cmovbq %r14, %r12
jmp 0x60ad0e
movq 0x7618(%rbx), %rax
subq %rdi, %rax
cmpq %rax, %r14
ja 0x60af53
movq 0x30(%rsp), %r12
subq %r13, %r12
cmpq %r12, %r14
cmovbq %r14, %r12
testq %r12, %r12
je 0x60ad0b
addq 0x7610(%rbx), %rdi
movq 0x20(%rsp), %rsi
movq %r12, %rdx
callq 0x3f250
movq 0x20(%rsp), %r13
movq 0x7620(%rbx), %rdi
jmp 0x60ad0e
movq %rbx, %rdi
leaq 0x28(%rsp), %rsi
movq %r15, %rdx
movq %r13, %rcx
movq %r12, %r8
callq 0x60b0a5
movq %rax, %r14
addq %r12, %r13
movq %r13, 0x20(%rsp)
jmp 0x60ad4d
xorl %r12d, %r12d
addq %r12, %r13
movq %r13, 0x20(%rsp)
addq %r12, %rdi
movq %rdi, 0x7620(%rbx)
cmpq %r14, %r12
jb 0x60ad90
movq $0x0, 0x7620(%rbx)
movq 0x7610(%rbx), %rcx
movq %rbx, %rdi
leaq 0x28(%rsp), %rsi
movq %r15, %rdx
movq %rbp, %r8
callq 0x60b0a5
movq %rax, %r14
cmpq $-0x77, %rax
jb 0x60a80c
jmp 0x60ae51
cmpq $0x1, %rcx
movq %rcx, %r12
adcq $0x0, %r12
movq 0x74e0(%rbx), %rbp
cmpq %rbp, %r12
cmovaeq %rbp, %r12
jmp 0x60ac55
movq %rbp, 0x28(%rsp)
movq $-0x1, %r14
jmp 0x60ae51
movq %rbp, 0x28(%rsp)
movq 0x40(%rsp), %rcx
movq 0x58(%rsp), %rdx
movq 0x50(%rsp), %rsi
movq 0x48(%rsp), %rdi
movq 0x60(%rsp), %r8
movq 0x38(%rsp), %r15
movq 0x20(%rsp), %r9
movq %r9, %rax
subq (%rcx), %rax
movq %rax, 0x10(%rcx)
movq 0x28(%rsp), %rax
movq (%rdi), %rcx
xorq %rax, %r8
subq %rcx, %rax
movq %rax, 0x10(%rdi)
movq 0x88(%rsp), %rcx
movq %rax, 0x10(%rcx)
movups (%rdi), %xmm0
movups %xmm0, (%rcx)
xorq %r9, %r15
orq %r15, %r8
jne 0x60ae21
movl 0x766c(%rbx), %eax
leal 0x1(%rax), %ecx
movl %ecx, 0x766c(%rbx)
cmpl $0xf, %eax
jl 0x60ae2b
movq $-0x46, %r14
cmpq %rsi, 0x80(%rsp)
je 0x60ae51
movq $-0x48, %r14
cmpq 0x90(%rsp), %rdx
jne 0x60ae2b
jmp 0x60ae51
movl $0x0, 0x766c(%rbx)
movq 0x74e0(%rbx), %r14
testq %r14, %r14
je 0x60ae66
xorl %eax, %eax
cmpl $0x3, 0x7524(%rbx)
sete %al
leaq (%rax,%rax,2), %rax
subq 0x7620(%rbx), %r14
addq %rax, %r14
movq %r14, %rax
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq 0x7648(%rbx), %rcx
movl 0x7668(%rbx), %eax
cmpq 0x7640(%rbx), %rcx
movq 0x40(%rsp), %rcx
jne 0x60ae9b
testl %eax, %eax
je 0x60ae96
movq 0x10(%rcx), %rax
cmpq 0x8(%rcx), %rax
jae 0x60aeb5
incq %rax
movq %rax, 0x10(%rcx)
xorl %r14d, %r14d
jmp 0x60ae51
movl $0x1, %r14d
testl %eax, %eax
jne 0x60ae51
decq 0x10(%rcx)
movl $0x1, 0x7668(%rbx)
jmp 0x60ae51
movl $0x2, 0x760c(%rbx)
movl $0x1, %r14d
jmp 0x60ae51
movq 0x38(%rsp), %r15
movl $0x0, 0x760c(%rbx)
movq 0x40(%rsp), %rcx
movq 0x58(%rsp), %rdx
movq 0x50(%rsp), %rsi
movq 0x48(%rsp), %rdi
movq 0x60(%rsp), %r8
jmp 0x60adae
cmpq %rsi, 0x30(%rsp)
movq 0x40(%rsp), %r15
je 0x60af24
movq 0x70(%rsp), %rax
addq %rdi, %rax
movq %rax, %rdi
movq %r12, %rdx
callq 0x3f250
addq 0x7650(%rbx), %r12
movq %r12, 0x7650(%rbx)
movq %r12, %rdi
movq 0x8(%r15), %rax
movq %rax, 0x10(%r15)
xorl %eax, %eax
cmpl $0x0, 0x7590(%rbx)
sete %al
leaq 0x2(,%rax,4), %rax
cmpq %r14, %rax
cmovaq %rax, %r14
subq %rdi, %r14
addq $0x3, %r14
jmp 0x60ae51
movq $-0x14, %r14
jmp 0x60ae51
movq 0x30(%rsp), %rax
movq %rax, 0x20(%rsp)
jmp 0x60ad90
movq $-0x10, %r14
jmp 0x60ae51
movq $-0x40, %r14
jmp 0x60ae51
movq %rax, %r14
jmp 0x60ae51
movq %rax, %r12
movq %rbx, %rdi
callq 0x6093d3
movq %rax, 0x8(%rsp)
movq $0x0, (%rsp)
movq %rbx, %rdi
movq %r13, %rsi
movq %rbp, %rdx
movq 0x38(%rsp), %r15
movq %r15, %rcx
movq %r12, %r8
xorl %r9d, %r9d
callq 0x608eda
movq %rax, %r14
cmpq $-0x78, %rax
ja 0x60ae51
addq %r15, %r12
addq %r14, %r13
movq %r13, 0x28(%rsp)
movq $0x0, 0x74e0(%rbx)
movq %r12, 0x20(%rsp)
jmp 0x60aecc
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_DDictHashSet_emplaceDDict
|
static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {
const U32 dictID = ZSTD_getDictID_fromDDict(ddict);
size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");
DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
while (hashSet->ddictPtrTable[idx] != NULL) {
/* Replace existing ddict if inserting ddict with same dictID */
if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {
DEBUGLOG(4, "DictID already exists, replacing rather than adding");
hashSet->ddictPtrTable[idx] = ddict;
return 0;
}
idx &= idxRangeMask;
idx++;
}
DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
hashSet->ddictPtrTable[idx] = ddict;
hashSet->ddictPtrCount++;
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x10, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
callq 0x614f58
movl %eax, %ebp
leaq 0xc(%rsp), %rdi
movl %eax, (%rdi)
movl $0x4, %esi
xorl %edx, %edx
callq 0x60cfca
movq %rax, %r15
movq 0x8(%rbx), %r12
movq 0x10(%rbx), %rcx
movq $-0x1, %rax
cmpq %r12, %rcx
je 0x60b23e
decq %r12
andq %r12, %r15
movq (%rbx), %rax
movq (%rax,%r15,8), %rdi
testq %rdi, %rdi
je 0x60b228
callq 0x614f58
cmpl %ebp, %eax
je 0x60b235
andq %r12, %r15
movq (%rbx), %rax
movq 0x8(%rax,%r15,8), %rdi
incq %r15
testq %rdi, %rdi
jne 0x60b208
movq 0x10(%rbx), %rcx
movq %r14, (%rax,%r15,8)
incq %rcx
movq %rcx, 0x10(%rbx)
jmp 0x60b23c
movq (%rbx), %rax
movq %r14, (%rax,%r15,8)
xorl %eax, %eax
addq $0x10, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
HUF_readStats_wksp
|
size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize,
int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
#endif
(void)bmi2;
return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rdx, %r15
movq 0x30(%rsp), %rdx
movq $-0x48, %rax
testq %rdx, %rdx
je 0x60b710
movq %r8, %r13
movq %rcx, %rbx
movq %rdi, %r12
movzbl (%r9), %r14d
testb %r14b, %r14b
js 0x60b5cc
cmpq %rdx, %r14
jae 0x60b710
movq 0x38(%rsp), %rax
decq %rsi
incq %r9
movq %r12, %rdi
movq %r9, %rdx
movq %r14, %rcx
movl $0x6, %r8d
movq %rax, %r9
pushq $0x0
pushq 0x48(%rsp)
callq 0x60c44d
addq $0x10, %rsp
movq %rax, %rdx
cmpq $-0x78, %rax
jbe 0x60b625
jmp 0x60b710
leaq -0x7e(%r14), %rcx
shrq %rcx
cmpq %rdx, %rcx
jae 0x60b710
addq $-0x7f, %r14
movq $-0x14, %rax
cmpq %rsi, %r14
jae 0x60b710
testq %r14, %r14
je 0x60b6f7
incq %r9
xorl %eax, %eax
movb (%r9), %dl
shrb $0x4, %dl
movb %dl, (%r12,%rax)
movb (%r9), %dl
andb $0xf, %dl
movb %dl, 0x1(%r12,%rax)
addq $0x2, %rax
incq %r9
cmpq %r14, %rax
jb 0x60b5fe
movq %r14, %rdx
movq %rcx, %r14
xorps %xmm0, %xmm0
movups %xmm0, 0x20(%r15)
movups %xmm0, 0x10(%r15)
movups %xmm0, (%r15)
movl $0x0, 0x30(%r15)
movq $-0x14, %rax
testq %rdx, %rdx
je 0x60b710
xorl %ecx, %ecx
movl $0x1, %edi
xorl %esi, %esi
movzbl (%r12,%rcx), %r8d
cmpq $0xb, %r8
ja 0x60b710
incl (%r15,%r8,4)
movb (%r12,%rcx), %cl
movl $0x1, %r8d
shll %cl, %r8d
sarl %r8d
addl %r8d, %esi
movl %edi, %ecx
incl %edi
cmpq %rcx, %rdx
ja 0x60b657
testl %esi, %esi
je 0x60b710
bsrl %esi, %ecx
cmpl $0xb, %ecx
ja 0x60b710
movl %ecx, %r8d
xorl $0x1f, %r8d
movl $0x20, %edi
movl $0x20, %r9d
subl %r8d, %r9d
movl %r9d, (%r13)
movl $0x2, %r8d
shll %cl, %r8d
subl %esi, %r8d
bsrl %r8d, %ecx
movl $0x1, %esi
shll %cl, %esi
cmpl %r8d, %esi
jne 0x60b710
xorl $0x1f, %ecx
subl %ecx, %edi
movb %dil, (%r12,%rdx)
incl (%r15,%rdi,4)
movl 0x4(%r15), %ecx
cmpl $0x2, %ecx
setb %sil
orb %sil, %cl
testb $0x1, %cl
jne 0x60b710
incl %edx
movl %edx, (%rbx)
incq %r14
movq %r14, %rax
jmp 0x60b710
xorps %xmm0, %xmm0
movups %xmm0, 0x20(%r15)
movups %xmm0, 0x10(%r15)
movups %xmm0, (%r15)
movl $0x0, 0x30(%r15)
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/common/entropy_common.c
|
FSE_decompress_wksp_bmi2
|
size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
}
#endif
(void)bmi2;
return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x88, %rsp
movq 0xc0(%rsp), %r14
movl $0xff, 0x24(%rsp)
movq $-0x1, %rax
cmpq $0x204, %r14 # imm = 0x204
jb 0x60c5de
movq %r9, %r13
movl %r8d, %r15d
movq %rcx, %r8
movq %rdx, %r12
movq %rsi, %rbp
movq %rdi, 0x18(%rsp)
leaq 0x24(%rsp), %rsi
leaq 0x34(%rsp), %rdx
movq %r9, %rdi
movq %r12, %rcx
movq %r8, 0x8(%rsp)
xorl %r9d, %r9d
callq 0x60b28e
movq %rax, %rbx
cmpq $-0x78, %rax
ja 0x60c5db
movl 0x34(%rsp), %ecx
movq $-0x2c, %rax
cmpl %r15d, %ecx
ja 0x60c5de
movl $0x1, %edx
shll %cl, %edx
movslq %edx, %rsi
movl 0x24(%rsp), %edx
leal 0x1(%rdx), %edi
movl $0x1, %r8d
shlq %cl, %r8
leaq (%r8,%rsi,4), %r8
addq $0x4, %r8
leaq (%r8,%rdi,2), %rdi
addq $0xb, %rdi
andq $-0x4, %rdi
addq $0x204, %rdi # imm = 0x204
cmpq %r14, %rdi
ja 0x60c5de
leaq 0x4(,%rsi,4), %rax
incq %rsi
leaq 0x200(%r13), %r15
leaq 0x200(,%rsi,4), %r8
addq %r13, %r8
subq %rax, %r14
addq $-0x204, %r14 # imm = 0xFDFC
movq %r15, %rdi
movq %r13, %rsi
movq %r14, %r9
callq 0x60b88d
cmpq $-0x78, %rax
ja 0x60c5de
leaq (%r12,%rbx), %rsi
movq 0x8(%rsp), %r8
movq %r8, %rdx
subq %rbx, %rdx
movq 0x18(%rsp), %rcx
leaq (%rcx,%rbp), %rax
movq %rax, 0x10(%rsp)
cmpw $0x0, 0x202(%r13)
leaq -0x3(%rcx,%rbp), %r13
je 0x60c5f0
testq %rdx, %rdx
je 0x60c66d
movq %rsi, 0x50(%rsp)
leaq 0x8(%rsi), %rax
movq %rax, 0x58(%rsp)
cmpq $0x8, %rdx
jb 0x60c679
leaq (%r12,%r8), %rax
addq $-0x8, %rax
movq %rax, 0x48(%rsp)
movq (%rax), %rcx
movq %rcx, 0x38(%rsp)
shrq $0x38, %rcx
movq $-0x1, %rax
je 0x60c5de
bsrl %ecx, %eax
notl %eax
addl $0x9, %eax
movl %eax, 0x40(%rsp)
movq %rdx, %rax
cmpq $-0x78, %rdx
ja 0x60c5de
jmp 0x60c704
movq %rbx, %rax
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0x38(%rsp), %rdi
callq 0x60ccf5
cmpq $-0x78, %rax
ja 0x60c5de
leaq 0x78(%rsp), %rbx
leaq 0x38(%rsp), %r14
movq %rbx, %rdi
movq %r14, %rsi
movq %r15, %rdx
callq 0x60ce06
movq %r15, %rdx
leaq 0x68(%rsp), %r15
movq %r15, %rdi
movq %r14, %rsi
callq 0x60ce06
movl 0x8(%r14), %esi
movq (%r14), %r9
movq (%rbx), %r12
movq 0x8(%rbx), %rax
movq %rax, 0x8(%rsp)
movq (%r15), %rbx
movq 0x8(%r15), %rax
movq %rax, (%rsp)
movq 0x20(%r14), %r10
movq 0x18(%r14), %rax
movq 0x10(%r14), %r11
cmpl $0x40, %esi
movq %rax, 0x28(%rsp)
jbe 0x60c76f
movq 0x18(%rsp), %rdx
jmp 0x60c8a1
movq $-0x48, %rax
jmp 0x60c5de
movq %rsi, 0x48(%rsp)
movzbl (%rsi), %eax
movq %rax, 0x38(%rsp)
leaq -0x2(%rdx), %rcx
cmpq $0x5, %rcx
ja 0x60c6e4
leaq 0xca999(%rip), %rdi # 0x6d7030
movslq (%rdi,%rcx,4), %rcx
addq %rdi, %rcx
jmpq *%rcx
movzbl 0x6(%rsi), %ecx
shlq $0x30, %rcx
orq %rcx, %rax
movzbl 0x5(%rsi), %ecx
shlq $0x28, %rcx
addq %rcx, %rax
movzbl 0x4(%rsi), %ecx
shlq $0x20, %rcx
addq %rcx, %rax
movzbl 0x3(%rsi), %ecx
shll $0x18, %ecx
addq %rcx, %rax
movzbl 0x2(%rsi), %ecx
shll $0x10, %ecx
addq %rcx, %rax
movzbl 0x1(%rsi), %ecx
shll $0x8, %ecx
addq %rax, %rcx
movq %rcx, 0x38(%rsp)
movzbl -0x1(%r12,%r8), %eax
testl %eax, %eax
je 0x60cc4c
bsrl %eax, %eax
xorl $0x1f, %eax
shll $0x3, %edx
subl %edx, %eax
addl $0x29, %eax
movl %eax, 0x40(%rsp)
leaq 0x78(%rsp), %rbx
leaq 0x38(%rsp), %r14
movq %rbx, %rdi
movq %r14, %rsi
movq %r15, %rdx
callq 0x60ce06
movq %r15, %rdx
leaq 0x68(%rsp), %r15
movq %r15, %rdi
movq %r14, %rsi
callq 0x60ce06
movl 0x8(%r14), %edx
movq (%r14), %r9
movq (%rbx), %r12
movq 0x8(%rbx), %rdi
movq (%r15), %rbx
movq 0x8(%r15), %r8
movq 0x20(%r14), %rax
movq %rax, 0x8(%rsp)
movq 0x18(%r14), %rax
movq %rax, (%rsp)
movq 0x10(%r14), %r11
cmpl $0x40, %edx
jbe 0x60c9f5
movq 0x18(%rsp), %r10
movq %r10, %rsi
jmp 0x60cb00
movq 0x18(%rsp), %rdx
movq %r10, 0x60(%rsp)
leaq 0xca8e0(%rip), %r8 # 0x6d7060
movq (%rsp), %rdi
cmpq %r10, %r11
jae 0x60c7bb
cmpq %rax, %r11
je 0x60c8a1
movl %esi, %r14d
shrl $0x3, %r14d
movq %r11, %rcx
subq %r14, %rcx
movl %r11d, %r9d
subl %eax, %r9d
cmpq %rax, %rcx
setae %cl
cmovael %r14d, %r9d
leal (,%r9,8), %ebp
subl %ebp, %esi
jmp 0x60c7c7
movl %esi, %r9d
shrl $0x3, %r9d
andl $0x7, %esi
movb $0x1, %cl
movl %r9d, %r9d
subq %r9, %r11
movq (%r11), %r9
cmpq %r13, %rdx
jae 0x60c8a1
testb %cl, %cl
je 0x60c8a1
movq 0x8(%rsp), %rax
movzwl (%rax,%r12,4), %r14d
movb 0x2(%rax,%r12,4), %bpl
movzbl 0x3(%rax,%r12,4), %r15d
addl %r15d, %esi
movl %esi, %ecx
negb %cl
movq %r9, %r12
shrq %cl, %r12
andl (%r8,%r15,4), %r12d
movb %bpl, (%rdx)
movq %r13, %r10
movzbl 0x3(%rdi,%rbx,4), %r13d
addl %r13d, %esi
movl %esi, %ecx
negb %cl
movq %r9, %rbp
shrq %cl, %rbp
movzwl (%rdi,%rbx,4), %r15d
andl (%r8,%r13,4), %ebp
movb 0x2(%rdi,%rbx,4), %cl
movb %cl, 0x1(%rdx)
leaq (%rax,%r12,4), %rbx
movzbl 0x3(%rbx,%r14,4), %r13d
addl %r13d, %esi
movl %esi, %ecx
negb %cl
movq %r9, %r12
shrq %cl, %r12
movb 0x2(%rbx,%r14,4), %cl
movzwl (%rbx,%r14,4), %ebx
andl (%r8,%r13,4), %r12d
addq %rbx, %r12
movb %cl, 0x2(%rdx)
leaq (%rdi,%rbp,4), %r14
movzbl 0x3(%r14,%r15,4), %r13d
addl %r13d, %esi
movl %esi, %ecx
negb %cl
movq %r9, %rbx
shrq %cl, %rbx
movzwl (%r14,%r15,4), %ecx
movb 0x2(%r14,%r15,4), %bpl
andl (%r8,%r13,4), %ebx
movq %r10, %r13
movq 0x60(%rsp), %r10
movq 0x28(%rsp), %rax
addq %rcx, %rbx
movb %bpl, 0x3(%rdx)
addq $0x4, %rdx
cmpl $0x40, %esi
jbe 0x60c784
movq 0x10(%rsp), %rcx
addq $-0x2, %rcx
movq $-0x46, %rax
movq %rcx, 0x10(%rsp)
cmpq %rcx, %rdx
ja 0x60c5de
movq %rdx, %r14
subq 0x18(%rsp), %r14
incq %rdx
movq 0x8(%rsp), %rcx
movzwl (%rcx,%r12,4), %r13d
movb 0x2(%rcx,%r12,4), %bpl
movzbl 0x3(%rcx,%r12,4), %r15d
addl %r15d, %esi
movl %esi, %ecx
negb %cl
movq %r9, %r12
shrq %cl, %r12
leaq 0xca76d(%rip), %rcx # 0x6d7060
andl (%rcx,%r15,4), %r12d
movb %bpl, -0x1(%rdx)
cmpl $0x40, %esi
ja 0x60cc58
cmpq %r10, %r11
jae 0x60c939
movq 0x28(%rsp), %rbp
cmpq %rbp, %r11
je 0x60c949
movl %esi, %r9d
shrl $0x3, %r9d
movq %r11, %r15
subq %r9, %r15
movl %r11d, %ecx
subl %ebp, %ecx
cmpq %rbp, %r15
cmovael %r9d, %ecx
leal (,%rcx,8), %r9d
subl %r9d, %esi
jmp 0x60c941
movl %esi, %ecx
shrl $0x3, %ecx
andl $0x7, %esi
movl %ecx, %ecx
subq %rcx, %r11
movq (%r11), %r9
cmpq 0x10(%rsp), %rdx
ja 0x60c5de
addq %r13, %r12
movq (%rsp), %rcx
movzwl (%rcx,%rbx,4), %r13d
movb 0x2(%rcx,%rbx,4), %bpl
movzbl 0x3(%rcx,%rbx,4), %r15d
addl %r15d, %esi
movl %esi, %ecx
negb %cl
movq %r9, %rbx
shrq %cl, %rbx
leaq 0xca6e1(%rip), %rcx # 0x6d7060
andl (%rcx,%r15,4), %ebx
movb %bpl, (%rdx)
cmpl $0x40, %esi
ja 0x60cc6b
cmpq %r10, %r11
jae 0x60c9c4
movq 0x28(%rsp), %rbp
cmpq %rbp, %r11
je 0x60c9d4
movl %esi, %r9d
shrl $0x3, %r9d
movq %r11, %r15
subq %r9, %r15
movl %r11d, %ecx
subl %ebp, %ecx
cmpq %rbp, %r15
cmovael %r9d, %ecx
leal (,%rcx,8), %r9d
subl %r9d, %esi
jmp 0x60c9cc
movl %esi, %ecx
shrl $0x3, %ecx
andl $0x7, %esi
movl %ecx, %ecx
subq %rcx, %r11
movq (%r11), %r9
addq %r13, %rbx
addq $0x2, %r14
leaq 0x2(%rdx), %rcx
incq %rdx
cmpq 0x10(%rsp), %rdx
movq %rcx, %rdx
jbe 0x60c8ca
jmp 0x60c5de
movq 0x18(%rsp), %r10
movq %r10, %rsi
cmpq 0x8(%rsp), %r11
jae 0x60ca3b
movq (%rsp), %r14
cmpq %r14, %r11
je 0x60cb00
movl %edx, %r9d
shrl $0x3, %r9d
movq %r11, %rax
subq %r9, %rax
movl %r11d, %ecx
subl %r14d, %ecx
cmpq %r14, %rax
setae %al
cmovael %r9d, %ecx
leal (,%rcx,8), %r9d
subl %r9d, %edx
jmp 0x60ca45
movl %edx, %ecx
shrl $0x3, %ecx
andl $0x7, %edx
movb $0x1, %al
movl %ecx, %ecx
subq %rcx, %r11
movq (%r11), %r9
cmpq %r13, %rsi
jae 0x60cb00
testb %al, %al
je 0x60cb00
movzwl (%rdi,%r12,4), %eax
movq %r9, %rbp
movl %edx, %ecx
shlq %cl, %rbp
movzbl 0x3(%rdi,%r12,4), %ecx
addl %ecx, %edx
negb %cl
shrq %cl, %rbp
movb 0x2(%rdi,%r12,4), %cl
movb %cl, (%rsi)
movzwl (%r8,%rbx,4), %r14d
movq %r9, %r15
movl %edx, %ecx
shlq %cl, %r15
movzbl 0x3(%r8,%rbx,4), %ecx
addl %ecx, %edx
negb %cl
shrq %cl, %r15
movb 0x2(%r8,%rbx,4), %cl
movb %cl, 0x1(%rsi)
leaq (%rdi,%rbp,4), %rbx
movq %r9, %r12
movl %edx, %ecx
shlq %cl, %r12
movzbl 0x3(%rbx,%rax,4), %ecx
addl %ecx, %edx
negb %cl
shrq %cl, %r12
movzwl (%rbx,%rax,4), %ecx
movb 0x2(%rbx,%rax,4), %al
addq %rcx, %r12
movb %al, 0x2(%rsi)
leaq (%r8,%r15,4), %rax
movzwl (%rax,%r14,4), %r15d
movb 0x2(%rax,%r14,4), %bpl
movq %r9, %rbx
movl %edx, %ecx
shlq %cl, %rbx
movzbl 0x3(%rax,%r14,4), %eax
addl %eax, %edx
movl %eax, %ecx
negb %cl
shrq %cl, %rbx
addq %r15, %rbx
movb %bpl, 0x3(%rsi)
addq $0x4, %rsi
cmpl $0x40, %edx
jbe 0x60c9fd
movq 0x10(%rsp), %rcx
addq $-0x2, %rcx
movq $-0x46, %rax
movq %rcx, 0x10(%rsp)
cmpq %rcx, %rsi
ja 0x60c5de
movq %rsi, %r14
subq %r10, %r14
incq %rsi
movq (%rsp), %r10
movzwl (%rdi,%r12,4), %r15d
movb 0x2(%rdi,%r12,4), %bpl
movzbl 0x3(%rdi,%r12,4), %r13d
movq %r9, %r12
movl %edx, %ecx
shlq %cl, %r12
movl %r13d, %ecx
negb %cl
shrq %cl, %r12
addl %r13d, %edx
movb %bpl, -0x1(%rsi)
cmpl $0x40, %edx
ja 0x60cc86
cmpq 0x8(%rsp), %r11
jae 0x60cb8e
cmpq %r10, %r11
je 0x60cb9e
movl %edx, %r9d
shrl $0x3, %r9d
movq %r11, %r13
subq %r9, %r13
movl %r11d, %ecx
subl %r10d, %ecx
cmpq %r10, %r13
cmovael %r9d, %ecx
leal (,%rcx,8), %r9d
subl %r9d, %edx
jmp 0x60cb96
movl %edx, %ecx
shrl $0x3, %ecx
andl $0x7, %edx
movl %ecx, %ecx
subq %rcx, %r11
movq (%r11), %r9
cmpq 0x10(%rsp), %rsi
ja 0x60c5de
movzwl (%r8,%rbx,4), %r13d
movb 0x2(%r8,%rbx,4), %bpl
movzbl 0x3(%r8,%rbx,4), %r10d
movq %r9, %rbx
movl %edx, %ecx
shlq %cl, %rbx
movl %r10d, %ecx
negb %cl
shrq %cl, %rbx
addl %r10d, %edx
addq %r15, %r12
movb %bpl, (%rsi)
cmpl $0x40, %edx
ja 0x60cc95
cmpq 0x8(%rsp), %r11
jae 0x60cc17
movq (%rsp), %r10
cmpq %r10, %r11
je 0x60cc2b
movl %edx, %r9d
shrl $0x3, %r9d
movq %r11, %r10
subq %r9, %r10
movl %r11d, %ecx
subl (%rsp), %ecx
cmpq (%rsp), %r10
movq (%rsp), %r10
cmovael %r9d, %ecx
leal (,%rcx,8), %r9d
subl %r9d, %edx
jmp 0x60cc23
movl %edx, %ecx
shrl $0x3, %ecx
andl $0x7, %edx
movq (%rsp), %r10
movl %ecx, %ecx
subq %rcx, %r11
movq (%r11), %r9
addq %r13, %rbx
addq $0x2, %r14
leaq 0x2(%rsi), %rcx
incq %rsi
cmpq 0x10(%rsp), %rsi
movq %rcx, %rsi
jbe 0x60cb2b
jmp 0x60c5de
movq $-0x14, %rax
jmp 0x60c5de
movq (%rsp), %rax
leaq (%rax,%rbx,4), %rcx
addq $0x2, %rcx
movl $0x2, %eax
jmp 0x60cc80
movq 0x8(%rsp), %rax
leaq (%rax,%r12,4), %rcx
addq $0x2, %rcx
incq %rdx
movl $0x3, %eax
movb (%rcx), %cl
movb %cl, (%rdx)
jmp 0x60cca9
leaq (%r8,%rbx,4), %rcx
addq $0x2, %rcx
movl $0x2, %eax
jmp 0x60cca5
leaq (%rdi,%r12,4), %rcx
addq $0x2, %rcx
incq %rsi
movl $0x3, %eax
movb (%rcx), %cl
movb %cl, (%rsi)
addq %r14, %rax
jmp 0x60c5de
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/common/fse_decompress.c
|
BIT_initDStream
|
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
bitD->start = (const char*)srcBuffer;
bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
} else {
bitD->ptr = bitD->start;
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
/* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
/* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
/* fall-through */
default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
}
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
}
return srcSize;
}
|
testq %rdx, %rdx
je 0x60cd3a
movq %rdx, %rax
movq %rsi, 0x18(%rdi)
leaq 0x8(%rsi), %rcx
movq %rcx, 0x20(%rdi)
cmpq $0x8, %rdx
jb 0x60cd54
leaq (%rsi,%rax), %rcx
addq $-0x8, %rcx
movq %rcx, 0x10(%rdi)
movq (%rcx), %rdx
movq %rdx, (%rdi)
movzbl 0x7(%rcx), %ecx
testl %ecx, %ecx
je 0x60cde8
bsrl %ecx, %ecx
notl %ecx
addl $0x9, %ecx
jmp 0x60cde4
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdi)
movups %xmm0, (%rdi)
movq $0x0, 0x20(%rdi)
movq $-0x48, %rax
retq
movq %rsi, 0x10(%rdi)
movzbl (%rsi), %ecx
movq %rcx, (%rdi)
leaq -0x2(%rax), %rdx
cmpq $0x5, %rdx
ja 0x60cdc9
leaq 0xca2d9(%rip), %r8 # 0x6d7048
movslq (%r8,%rdx,4), %rdx
addq %r8, %rdx
jmpq *%rdx
movzbl 0x6(%rsi), %edx
shlq $0x30, %rdx
orq %rdx, %rcx
movq %rcx, (%rdi)
movzbl 0x5(%rsi), %edx
shlq $0x28, %rdx
addq %rdx, %rcx
movq %rcx, (%rdi)
movzbl 0x4(%rsi), %edx
shlq $0x20, %rdx
addq %rdx, %rcx
movq %rcx, (%rdi)
movzbl 0x3(%rsi), %edx
shll $0x18, %edx
addq %rdx, %rcx
movq %rcx, (%rdi)
movzbl 0x2(%rsi), %edx
shll $0x10, %edx
addq %rdx, %rcx
movq %rcx, (%rdi)
movzbl 0x1(%rsi), %edx
shll $0x8, %edx
addq %rcx, %rdx
movq %rdx, (%rdi)
movzbl -0x1(%rsi,%rax), %ecx
testl %ecx, %ecx
je 0x60cdf7
bsrl %ecx, %ecx
xorl $0x1f, %ecx
leal (,%rax,8), %edx
subl %edx, %ecx
addl $0x29, %ecx
movl %ecx, 0x8(%rdi)
retq
movl $0x0, 0x8(%rdi)
movq $-0x1, %rax
retq
movl $0x0, 0x8(%rdi)
movq $-0x14, %rax
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/common/bitstream.h
|
XXH32_reset
|
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
{
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
ZSTD_memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
state.v1 = seed + PRIME32_1 + PRIME32_2;
state.v2 = seed + PRIME32_2;
state.v3 = seed + 0;
state.v4 = seed - PRIME32_1;
ZSTD_memcpy(statePtr, &state, sizeof(state));
return XXH_OK;
}
|
leal 0x24234428(%rsi), %eax
leal -0x7a143589(%rsi), %ecx
movq $0x0, (%rdi)
movl %eax, 0x8(%rdi)
movl %ecx, 0xc(%rdi)
movl %esi, 0x10(%rdi)
addl $0x61c8864f, %esi # imm = 0x61C8864F
movl %esi, 0x14(%rdi)
xorps %xmm0, %xmm0
movups %xmm0, 0x18(%rdi)
movl $0x0, 0x28(%rdi)
xorl %eax, %eax
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/common/xxhash.c
|
XXH64_digest
|
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_digest_endian(state_in, XXH_littleEndian);
else
return XXH64_digest_endian(state_in, XXH_bigEndian);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movabsq $-0x3d4d51c2d82b14b1, %rax # imm = 0xC2B2AE3D27D4EB4F
movabsq $-0x61c8864e7a143579, %rcx # imm = 0x9E3779B185EBCA87
movabsq $-0x7a1435883d4d519d, %rsi # imm = 0x85EBCA77C2B2AE63
movabsq $0x27d4eb2f165667c5, %rdx # imm = 0x27D4EB2F165667C5
movl 0x48(%rdi), %r10d
movq (%rdi), %r11
cmpq $0x20, %r11
jb 0x60d793
movq 0x8(%rdi), %r14
movq 0x10(%rdi), %rbx
movq 0x18(%rdi), %r9
movq 0x20(%rdi), %r8
movq %r14, %r15
rolq %r15
movq %rbx, %r12
rolq $0x7, %r12
addq %r15, %r12
movq %r9, %r15
rolq $0xc, %r15
movq %r8, %r13
rolq $0x12, %r13
addq %r15, %r13
addq %r12, %r13
imulq %rax, %r14
rolq $0x1f, %r14
imulq %rcx, %r14
xorq %r13, %r14
imulq %rcx, %r14
addq %rsi, %r14
imulq %rax, %rbx
rolq $0x1f, %rbx
imulq %rcx, %rbx
xorq %r14, %rbx
imulq %rcx, %rbx
imulq %rax, %r9
rolq $0x1f, %r9
addq %rsi, %rbx
imulq %rcx, %r9
xorq %rbx, %r9
imulq %rcx, %r9
addq %rsi, %r9
imulq %rax, %r8
rolq $0x1f, %r8
imulq %rcx, %r8
xorq %r9, %r8
imulq %rcx, %r8
addq %rsi, %r8
jmp 0x60d79a
movq 0x18(%rdi), %r8
addq %rdx, %r8
leaq (%rdi,%r10), %r9
addq $0x28, %r9
addq $0x28, %rdi
addq %r11, %r8
cmpl $0x8, %r10d
jae 0x60d7b7
movq %r8, %r10
movq %rdi, %r11
jmp 0x60d7e7
movq (%rdi), %r10
imulq %rax, %r10
rolq $0x1f, %r10
imulq %rcx, %r10
xorq %r8, %r10
rolq $0x1b, %r10
imulq %rcx, %r10
addq %rsi, %r10
leaq 0x8(%rdi), %r11
addq $0x10, %rdi
movq %r10, %r8
cmpq %r9, %rdi
movq %r11, %rdi
jbe 0x60d7b7
movabsq $0x165667b19e3779f9, %rsi # imm = 0x165667B19E3779F9
leaq 0x4(%r11), %rdi
cmpq %r9, %rdi
jbe 0x60d7ff
movq %r11, %rdi
jmp 0x60d817
movl (%r11), %r8d
imulq %rcx, %r8
xorq %r10, %r8
rolq $0x17, %r8
imulq %rax, %r8
addq %rsi, %r8
movq %r8, %r10
cmpq %r9, %rdi
jae 0x60d83c
movzbl (%rdi), %r8d
imulq %rdx, %r8
xorq %r10, %r8
rolq $0xb, %r8
imulq %rcx, %r8
incq %rdi
movq %r8, %r10
cmpq %r9, %rdi
jb 0x60d81c
jmp 0x60d83f
movq %r10, %r8
movq %r8, %rcx
shrq $0x21, %rcx
xorq %r8, %rcx
imulq %rax, %rcx
movq %rcx, %rdx
shrq $0x1d, %rdx
xorq %rcx, %rdx
imulq %rsi, %rdx
movq %rdx, %rax
shrq $0x20, %rax
xorq %rdx, %rax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/common/xxhash.c
|
FSE_buildCTable_wksp
|
size_t FSE_buildCTable_wksp(FSE_CTable* ct,
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
void* workSpace, size_t wkspSize)
{
U32 const tableSize = 1 << tableLog;
U32 const tableMask = tableSize - 1;
void* const ptr = ct;
U16* const tableU16 = ( (U16*) ptr) + 2;
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
U32 const step = FSE_TABLESTEP(tableSize);
U32* cumul = (U32*)workSpace;
FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2));
U32 highThreshold = tableSize-1;
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */
if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
/* CTable header */
tableU16[-2] = (U16) tableLog;
tableU16[-1] = (U16) maxSymbolValue;
assert(tableLog < 16); /* required for threshold strategy to work */
/* For explanations on how to distribute symbol values over the table :
* http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
#ifdef __clang_analyzer__
ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
#endif
/* symbol start positions */
{ U32 u;
cumul[0] = 0;
for (u=1; u <= maxSymbolValue+1; u++) {
if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
cumul[u] = cumul[u-1] + 1;
tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
} else {
cumul[u] = cumul[u-1] + normalizedCounter[u-1];
} }
cumul[maxSymbolValue+1] = tableSize+1;
}
/* Spread symbols */
{ U32 position = 0;
U32 symbol;
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
int nbOccurrences;
int const freq = normalizedCounter[symbol];
for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
position = (position + step) & tableMask;
while (position > highThreshold)
position = (position + step) & tableMask; /* Low proba area */
} }
assert(position==0); /* Must have initialized all positions */
}
/* Build table */
{ U32 u; for (u=0; u<tableSize; u++) {
FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
} }
/* Build Symbol Transformation Table */
{ unsigned total = 0;
unsigned s;
for (s=0; s<=maxSymbolValue; s++) {
switch (normalizedCounter[s])
{
case 0:
/* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
break;
case -1:
case 1:
symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
symbolTT[s].deltaFindState = total - 1;
total ++;
break;
default :
{
U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
symbolTT[s].deltaFindState = total - normalizedCounter[s];
total += normalizedCounter[s];
} } } }
#if 0 /* debug : symbol costs */
DEBUGLOG(5, "\n --- table statistics : ");
{ U32 symbol;
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
DEBUGLOG(5, "%3u: w=%3i, maxBits=%u, fracBits=%.2f",
symbol, normalizedCounter[symbol],
FSE_getMaxNbBits(symbolTT, symbol),
(double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
}
}
#endif
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movl $0x1, %ebx
shll %cl, %ebx
movl $0x1, %r11d
movl %ebx, %r14d
shrl %r14d
testl %ecx, %ecx
cmovnel %r14d, %r11d
movq $-0x1, %rax
testb $0x3, %r8b
jne 0x60da50
movl %ecx, %r10d
leal 0x2(%rdx), %r13d
leal -0x2(%r10), %ecx
movl $0x1, %r15d
shlq %cl, %r15
addq %r13, %r15
shlq $0x2, %r15
movq $-0x2c, %rax
cmpq %r9, %r15
ja 0x60da50
leal -0x1(%rbx), %ecx
movl %ebx, %r15d
shrl $0x3, %r15d
leaq (%r8,%r13,4), %rax
movw %r10w, (%rdi)
movw %dx, 0x2(%rdi)
movl $0x0, (%r8)
movl %ecx, %r9d
incl %edx
je 0x60d954
cmpl $0x3, %r13d
movl $0x2, %r12d
cmovael %r13d, %r12d
decq %r12
xorl %r13d, %r13d
movl %ecx, %r9d
movswl (%rsi,%r13,2), %ebp
cmpl $-0x1, %ebp
je 0x60d937
addl (%r8,%r13,4), %ebp
movl %ebp, 0x4(%r8,%r13,4)
jmp 0x60d94c
movl (%r8,%r13,4), %ebp
incl %ebp
movl %ebp, 0x4(%r8,%r13,4)
movl %r9d, %ebp
decl %r9d
movb %r13b, (%rax,%rbp)
incq %r13
cmpq %r13, %r12
jne 0x60d922
movq %r11, -0x8(%rsp)
leal 0x1(%rbx), %ebp
movl %edx, %r12d
movl %ebp, (%r8,%r12,4)
leal (%r15,%r14), %ebp
addl $0x3, %ebp
cmpl $0x1, %edx
adcl $0x0, %edx
xorl %r14d, %r14d
xorl %r15d, %r15d
movswl (%rsi,%r14,2), %r12d
testl %r12d, %r12d
jle 0x60d99d
xorl %r13d, %r13d
movl %r15d, %r11d
movb %r14b, (%rax,%r11)
addl %ebp, %r15d
andl %ecx, %r15d
cmpl %r9d, %r15d
ja 0x60d98a
incl %r13d
cmpl %r12d, %r13d
jne 0x60d983
incq %r14
cmpq %rdx, %r14
jne 0x60d976
movl %ebx, %ecx
xorl %r9d, %r9d
movzbl (%rax,%r9), %r11d
leal (%rcx,%r9), %ebp
movl (%r8,%r11,4), %r14d
leal 0x1(%r14), %r15d
movl %r15d, (%r8,%r11,4)
movw %bp, 0x4(%rdi,%r14,2)
incq %r9
cmpq %r9, %rcx
jne 0x60d9aa
movl %r10d, %r8d
shll $0x10, %r8d
subl %ebx, %r8d
leal 0x10000(%r8), %r9d
movq -0x8(%rsp), %rax
leaq (%rdi,%rax,4), %rdi
addq $0x8, %rdi
xorl %eax, %eax
xorl %r11d, %r11d
xorl %ebx, %ebx
movzwl (%rsi,%r11,2), %ecx
cmpl $0xffff, %ecx # imm = 0xFFFF
je 0x60da0e
cmpl $0x1, %ecx
je 0x60da0e
testl %ecx, %ecx
jne 0x60da1e
movl %r9d, (%rdi,%r11,8)
jmp 0x60da48
movl %r8d, (%rdi,%r11,8)
leal -0x1(%rbx), %ecx
movl %ecx, -0x4(%rdi,%r11,8)
incl %ebx
jmp 0x60da48
movswl %cx, %r14d
leal -0x1(%r14), %ecx
bsrl %ecx, %ebp
movl %r10d, %ecx
subl %ebp, %ecx
movl %ebx, %ebp
subl %r14d, %ebp
addl %r14d, %ebx
shll %cl, %r14d
shll $0x10, %ecx
subl %r14d, %ecx
movl %ecx, (%rdi,%r11,8)
movl %ebp, -0x4(%rdi,%r11,8)
incq %r11
cmpq %r11, %rdx
jne 0x60d9f2
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/fse_compress.c
|
FSE_writeNCount_generic
|
static size_t
FSE_writeNCount_generic (void* header, size_t headerBufferSize,
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
unsigned writeIsSafe)
{
BYTE* const ostart = (BYTE*) header;
BYTE* out = ostart;
BYTE* const oend = ostart + headerBufferSize;
int nbBits;
const int tableSize = 1 << tableLog;
int remaining;
int threshold;
U32 bitStream = 0;
int bitCount = 0;
unsigned symbol = 0;
unsigned const alphabetSize = maxSymbolValue + 1;
int previousIs0 = 0;
/* Table Size */
bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
bitCount += 4;
/* Init */
remaining = tableSize+1; /* +1 for extra accuracy */
threshold = tableSize;
nbBits = tableLog+1;
while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
if (previousIs0) {
unsigned start = symbol;
while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
if (symbol == alphabetSize) break; /* incorrect distribution */
while (symbol >= start+24) {
start+=24;
bitStream += 0xFFFFU << bitCount;
if ((!writeIsSafe) && (out > oend-2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE) bitStream;
out[1] = (BYTE)(bitStream>>8);
out+=2;
bitStream>>=16;
}
while (symbol >= start+3) {
start+=3;
bitStream += 3 << bitCount;
bitCount += 2;
}
bitStream += (symbol-start) << bitCount;
bitCount += 2;
if (bitCount>16) {
if ((!writeIsSafe) && (out > oend - 2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out += 2;
bitStream >>= 16;
bitCount -= 16;
} }
{ int count = normalizedCounter[symbol++];
int const max = (2*threshold-1) - remaining;
remaining -= count < 0 ? -count : count;
count++; /* +1 for extra accuracy */
if (count>=threshold)
count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
bitStream += count << bitCount;
bitCount += nbBits;
bitCount -= (count<max);
previousIs0 = (count==1);
if (remaining<1) return ERROR(GENERIC);
while (remaining<threshold) { nbBits--; threshold>>=1; }
}
if (bitCount>16) {
if ((!writeIsSafe) && (out > oend - 2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out += 2;
bitStream >>= 16;
bitCount -= 16;
} }
if (remaining != 1)
return ERROR(GENERIC); /* incorrect normalized distribution */
assert(symbol <= alphabetSize);
/* flush remaining bitStream */
if ((!writeIsSafe) && (out > oend - 2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out+= (bitCount+7) /8;
return (out-ostart);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movl %ecx, %r15d
movq $-0x1, %rax
incl %r15d
je 0x60dd19
addq %rdi, %rsi
leal -0x5(%r8), %r11d
movl $0x1, %r14d
movl %r8d, %ecx
shll %cl, %r14d
leal 0x1(%r14), %ebp
incl %r8d
addq $-0x2, %rsi
movl %r15d, %eax
movq %rax, -0x8(%rsp)
movl $0x4, %ecx
xorl %r12d, %r12d
movq %rdi, -0x10(%rsp)
movq %rdi, %rbx
xorl %r13d, %r13d
movq %rsi, -0x18(%rsp)
movl %r9d, -0x1c(%rsp)
movl %r15d, -0x20(%rsp)
testl %r12d, %r12d
je 0x60db75
movl %r13d, %r10d
cmpl %r15d, %r13d
movq -0x8(%rsp), %rax
jae 0x60db5e
movl %r13d, %r10d
cmpw $0x0, (%rdx,%r10,2)
jne 0x60db5e
incq %r10
cmpq %r10, %rax
jne 0x60db49
jmp 0x60dcd6
cmpl %r15d, %r10d
je 0x60dcd6
leal 0x18(%r13), %eax
cmpl %eax, %r10d
jae 0x60db7d
movl %r13d, %edi
jmp 0x60dbb1
movl %r13d, %r10d
jmp 0x60dc09
movl $0xffff, %eax # imm = 0xFFFF
shll %cl, %eax
testl %r9d, %r9d
jne 0x60db92
cmpq %rsi, %rbx
ja 0x60dd12
addl %eax, %r11d
movw %r11w, (%rbx)
addq $0x2, %rbx
shrl $0x10, %r11d
leal 0x18(%r13), %edi
addl $0x30, %r13d
cmpl %r13d, %r10d
movl %edi, %r13d
jae 0x60db84
leal 0x3(%rdi), %eax
cmpl %eax, %r10d
jb 0x60dbd3
movl $0x3, %eax
shll %cl, %eax
addl %eax, %r11d
addl $0x2, %ecx
leal 0x3(%rdi), %eax
addl $0x6, %edi
cmpl %edi, %r10d
movl %eax, %edi
jae 0x60dbb9
movl %r11d, %eax
movl %r10d, %r11d
subl %edi, %r11d
shll %cl, %r11d
addl %eax, %r11d
cmpl $0xf, %ecx
jl 0x60dc06
testl %r9d, %r9d
jne 0x60dbf5
cmpq %rsi, %rbx
ja 0x60dd12
movw %r11w, (%rbx)
addq $0x2, %rbx
shrl $0x10, %r11d
addl $-0xe, %ecx
jmp 0x60dc09
addl $0x2, %ecx
movl %r10d, %eax
movswl (%rdx,%rax,2), %eax
movl %ebp, %esi
notl %esi
movl %eax, %edi
negl %edi
cmovsl %eax, %edi
leal (%rsi,%r14,2), %esi
leal 0x1(%rax), %r15d
cmpl %r14d, %r15d
movl %esi, %r15d
movl $0x0, %r9d
cmovll %r9d, %r15d
leal (%r15,%rax), %r13d
incl %r13d
movl %r13d, %r15d
shll %cl, %r15d
xorl %eax, %eax
cmpl %esi, %r13d
setl %sil
xorl %r12d, %r12d
cmpl $0x1, %r13d
sete %r13b
subl %edi, %ebp
jle 0x60dd24
movl %r8d, %edi
cmpl %r14d, %ebp
jge 0x60dc6b
decl %edi
sarl %r14d
jmp 0x60dc5f
addl %r15d, %r11d
addl %r8d, %ecx
movb %sil, %al
subl %eax, %ecx
cmpl $0x11, %ecx
jl 0x60dcad
movl -0x1c(%rsp), %r9d
testl %r9d, %r9d
sete %al
movq -0x18(%rsp), %rsi
cmpq %rsi, %rbx
seta %r8b
testb %r8b, %al
movl -0x20(%rsp), %r15d
jne 0x60dd12
movw %r11w, (%rbx)
addq $0x2, %rbx
shrl $0x10, %r11d
addl $-0x10, %ecx
jmp 0x60dcbc
movq -0x18(%rsp), %rsi
movl -0x1c(%rsp), %r9d
movl -0x20(%rsp), %r15d
incl %r10d
cmpl %r15d, %r10d
jae 0x60dcd6
movb %r13b, %r12b
movl %r10d, %r13d
movl %edi, %r8d
cmpl $0x1, %ebp
jne 0x60db34
leal 0x7(%rcx), %eax
addl $0xe, %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
cmpl $0x1, %ebp
movq $-0x1, %rax
jne 0x60dd19
testl %r9d, %r9d
jne 0x60dcfe
movq $-0x46, %rax
cmpq %rsi, %rbx
ja 0x60dd19
sarl $0x3, %ecx
movslq %ecx, %rax
movw %r11w, (%rbx)
addq %rbx, %rax
subq -0x10(%rsp), %rax
jmp 0x60dd19
movq $-0x46, %rax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq $-0x1, %rax
jmp 0x60dd19
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/fse_compress.c
|
FSE_optimalTableLog_internal
|
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
{
U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
U32 tableLog = maxTableLog;
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
assert(srcSize > 1); /* Not supported, RLE should be used instead */
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
return tableLog;
}
|
leal -0x1(%rsi), %eax
bsrl %eax, %eax
subl %ecx, %eax
bsrl %esi, %ecx
xorl $-0x20, %ecx
addl $0x21, %ecx
bsrl %edx, %edx
xorl $-0x20, %edx
addl $0x22, %edx
cmpl %edx, %ecx
cmovbl %ecx, %edx
testl %edi, %edi
movl $0xb, %ecx
cmovnel %edi, %ecx
cmpl %ecx, %eax
cmovbl %eax, %ecx
cmpl %ecx, %edx
cmoval %edx, %ecx
cmpl $0x6, %ecx
movl $0x5, %edx
cmovael %ecx, %edx
cmpl $0xc, %edx
movl $0xc, %eax
cmovbl %edx, %eax
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/fse_compress.c
|
FSE_optimalTableLog
|
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
{
U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
U32 tableLog = maxTableLog;
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
assert(srcSize > 1); /* Not supported, RLE should be used instead */
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
return tableLog;
}
|
leal -0x1(%rsi), %eax
bsrl %eax, %eax
xorl $-0x20, %eax
addl $0x1e, %eax
bsrl %esi, %ecx
xorl $-0x20, %ecx
addl $0x21, %ecx
bsrl %edx, %edx
xorl $-0x20, %edx
addl $0x22, %edx
cmpl %edx, %ecx
cmovbl %ecx, %edx
testl %edi, %edi
movl $0xb, %ecx
cmovnel %edi, %ecx
cmpl %ecx, %eax
cmovbl %eax, %ecx
cmpl %ecx, %edx
cmoval %edx, %ecx
cmpl $0x6, %ecx
movl $0x5, %edx
cmovael %ecx, %edx
cmpl $0xc, %edx
movl $0xc, %eax
cmovbl %edx, %eax
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/fse_compress.c
|
FSE_normalizeCount
|
size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
const unsigned* count, size_t total,
unsigned maxSymbolValue, unsigned useLowProbCount)
{
/* Sanity checks */
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
{ static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
short const lowProbCount = useLowProbCount ? -1 : 1;
U64 const scale = 62 - tableLog;
U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
U64 const vStep = 1ULL<<(scale-20);
int stillToDistribute = 1<<tableLog;
unsigned s;
unsigned largest=0;
short largestP=0;
U32 lowThreshold = (U32)(total >> tableLog);
for (s=0; s<=maxSymbolValue; s++) {
if (count[s] == total) return 0; /* rle special case */
if (count[s] == 0) { normalizedCounter[s]=0; continue; }
if (count[s] <= lowThreshold) {
normalizedCounter[s] = lowProbCount;
stillToDistribute--;
} else {
short proba = (short)((count[s]*step) >> scale);
if (proba<8) {
U64 restToBeat = vStep * rtbTable[proba];
proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
}
if (proba > largestP) { largestP=proba; largest=s; }
normalizedCounter[s] = proba;
stillToDistribute -= proba;
} }
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
/* corner case, need another normalization method */
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
if (FSE_isError(errorCode)) return errorCode;
}
else normalizedCounter[largest] += (short)stillToDistribute;
}
#if 0
{ /* Print Table (debug) */
U32 s;
U32 nTotal = 0;
for (s=0; s<=maxSymbolValue; s++)
RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
for (s=0; s<=maxSymbolValue; s++)
nTotal += abs(normalizedCounter[s]);
if (nTotal != (1U<<tableLog))
RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
getchar();
}
#endif
return tableLog;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
testl %esi, %esi
movl $0xb, %ebx
cmovnel %esi, %ebx
movq $-0x1, %rax
cmpl $0x5, %ebx
jb 0x60e055
movq $-0x2c, %rax
cmpl $0xc, %ebx
ja 0x60e055
movl %r8d, %r12d
movq %rcx, %r11
bsrl %r11d, %eax
xorl $-0x20, %eax
addl $0x21, %eax
bsrl %r8d, %ecx
xorl $-0x20, %ecx
addl $0x22, %ecx
cmpl %ecx, %eax
cmovbl %eax, %ecx
movq $-0x1, %rax
cmpl %ecx, %ebx
jb 0x60e055
movq %rdx, %r14
negl %r9d
movl $0x0, %ebp
sbbl %ebp, %ebp
orl $0x1, %ebp
movl $0x3e, %esi
subl %ebx, %esi
movl %r11d, %ecx
movabsq $0x4000000000000000, %rax # imm = 0x4000000000000000
xorl %edx, %edx
divq %rcx
movl $0x1, %edx
movl %ebx, %ecx
shll %cl, %edx
movq %r11, %r13
shrq %cl, %r13
movq %rsi, -0x20(%rsp)
leaq -0x14(%rsi), %rcx
movq %rcx, -0x10(%rsp)
movl %ebx, %ecx
movq %rcx, -0x28(%rsp)
movl %edx, -0x18(%rsp)
movl %edx, %r15d
xorl %r8d, %r8d
xorl %r9d, %r9d
xorl %edx, %edx
movl %ebp, -0x14(%rsp)
movq %r12, -0x8(%rsp)
movl %edx, %edx
movl (%r14,%rdx,4), %r10d
cmpq %r11, %r10
je 0x60df71
testq %r10, %r10
je 0x60df3a
cmpl %r13d, %r10d
jbe 0x60df42
imulq %rax, %r10
movq %r10, %rsi
movq -0x20(%rsp), %rcx
shrq %cl, %rsi
testl $0xfff8, %esi # imm = 0xFFF8
jne 0x60df22
movzwl %si, %ebp
movl %ebp, %ecx
movq %rdi, %r12
movq %r14, %rdi
leaq 0xc921a(%rip), %r14 # 0x6d7110
movl (%r14,%rcx,4), %r14d
movq -0x10(%rsp), %rcx
shlq %cl, %r14
movq -0x20(%rsp), %rcx
shlq %cl, %rbp
subq %rbp, %r10
movl -0x14(%rsp), %ebp
cmpq %r10, %r14
movq %rdi, %r14
movq %r12, %rdi
movq -0x8(%rsp), %r12
adcl $0x0, %esi
movzwl %si, %ecx
cmpw %r8w, %cx
cmoval %edx, %r9d
cmoval %ecx, %r8d
movw %cx, (%rdi,%rdx,2)
subl %ecx, %r15d
jmp 0x60df49
movw $0x0, (%rdi,%rdx,2)
jmp 0x60df49
movw %bp, (%rdi,%rdx,2)
decl %r15d
incl %edx
cmpl %r12d, %edx
jbe 0x60deb4
movl %r15d, %edx
negl %edx
movl %r9d, %ecx
movswl (%rdi,%rcx,2), %eax
movl %eax, %esi
sarl %esi
cmpl %edx, %esi
jle 0x60df78
leaq (%rdi,%rcx,2), %rcx
jmp 0x60e04a
xorl %eax, %eax
jmp 0x60e055
leaq (%r11,%r11,2), %r9
incb %bl
movl %ebx, %ecx
shrq %cl, %r9
leal 0x1(%r12), %ecx
xorl %eax, %eax
xorl %ebx, %ebx
movl -0x18(%rsp), %r10d
movl (%r14,%rax,4), %edx
testq %rdx, %rdx
je 0x60dfac
cmpl %r13d, %edx
jbe 0x60dfb4
cmpl %r9d, %edx
jbe 0x60dfba
movw $0xfffe, (%rdi,%rax,2) # imm = 0xFFFE
jmp 0x60dfc5
movw $0x0, (%rdi,%rax,2)
jmp 0x60dfc5
movw %bp, (%rdi,%rax,2)
jmp 0x60dfc0
movw $0x1, (%rdi,%rax,2)
incl %ebx
subq %rdx, %r11
incq %rax
cmpq %rax, %rcx
jne 0x60df91
movl %r10d, %r15d
subl %ebx, %r15d
je 0x60e050
movl %r15d, %esi
movq %r11, %rax
xorl %edx, %edx
divq %rsi
movl %r9d, %edx
cmpq %rdx, %rax
jbe 0x60e021
leaq (%r11,%r11,2), %rax
addl %r15d, %r15d
xorl %r9d, %r9d
xorl %edx, %edx
divq %r15
cmpw $-0x2, (%rdi,%r9,2)
jne 0x60e013
movl (%r14,%r9,4), %edx
cmpl %eax, %edx
ja 0x60e013
movw $0x1, (%rdi,%r9,2)
incl %ebx
subq %rdx, %r11
incq %r9
cmpq %r9, %rcx
jne 0x60dff7
subl %ebx, %r10d
movl %r10d, %r15d
cmpl %ecx, %ebx
jne 0x60e060
xorl %eax, %eax
xorl %esi, %esi
xorl %edx, %edx
movl (%r14,%rax,4), %r9d
cmpl %esi, %r9d
cmoval %eax, %edx
cmoval %r9d, %esi
incq %rax
cmpq %rax, %rcx
jne 0x60e02b
movl %edx, %eax
leaq (%rdi,%rax,2), %rcx
movzwl (%rcx), %eax
addl %r15d, %eax
movw %ax, (%rcx)
movq -0x28(%rsp), %rax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
testq %r11, %r11
je 0x60e0ce
movq -0x20(%rsp), %rdx
leal -0x1(%rdx), %ecx
movq $-0x1, %r9
shlq %cl, %r9
movl %r15d, %eax
movl %edx, %ecx
shlq %cl, %rax
notq %r9
addq %r9, %rax
movl %r11d, %ecx
xorl %r11d, %r11d
xorl %edx, %edx
divq %rcx
movl %r11d, %r11d
cmpw $-0x2, (%rdi,%r11,2)
jne 0x60e0c4
movl (%r14,%r11,4), %edx
imulq %rax, %rdx
addq %r9, %rdx
movq -0x20(%rsp), %rcx
shrq %cl, %r9
movq %rdx, %rsi
shrq %cl, %rsi
cmpq %r9, %rsi
je 0x60e102
subl %r9d, %esi
movw %si, (%rdi,%r11,2)
movq %rdx, %r9
incl %r11d
cmpl %r12d, %r11d
jbe 0x60e090
jmp 0x60e050
testl %r15d, %r15d
je 0x60e050
xorl %eax, %eax
xorl %edx, %edx
movl %edx, %ecx
movzwl (%rdi,%rcx,2), %edx
testw %dx, %dx
jle 0x60e0ef
decl %r15d
incl %edx
movw %dx, (%rdi,%rcx,2)
leal 0x1(%rcx), %edx
cmpl %r12d, %ecx
cmovel %eax, %edx
testl %r15d, %r15d
jne 0x60e0db
jmp 0x60e050
movq $-0x1, %rax
jmp 0x60e055
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/fse_compress.c
|
FSE_buildCTable_raw
|
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
{
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSymbolValue = tableMask;
void* const ptr = ct;
U16* const tableU16 = ( (U16*) ptr) + 2;
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* header */
tableU16[-2] = (U16) nbBits;
tableU16[-1] = (U16) maxSymbolValue;
/* Build table */
for (s=0; s<tableSize; s++)
tableU16[s] = (U16)(tableSize + s);
/* Build Symbol Transformation Table */
{ const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
for (s=0; s<=maxSymbolValue; s++) {
symbolTT[s].deltaNbBits = deltaNbBits;
symbolTT[s].deltaFindState = s-1;
} }
return 0;
}
|
movl %esi, %ecx
movl $0x1, %eax
shll %cl, %eax
testl %esi, %esi
je 0x60e442
movl %eax, %esi
shrl %esi
movw %cx, (%rdi)
leal -0x1(%rax), %edx
movw %dx, 0x2(%rdi)
movl %eax, %edx
leaq 0x7(%rdx), %r8
andq $-0x8, %r8
leaq -0x1(%rdx), %r9
movq %r9, %xmm0
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
movdqa 0x88823(%rip), %xmm1 # 0x696970
movdqa 0x8882b(%rip), %xmm2 # 0x696980
movdqa 0x2d5b3(%rip), %xmm3 # 0x63b710
movdqa 0x2d5bb(%rip), %xmm4 # 0x63b720
xorl %r9d, %r9d
movdqa 0x2d5c0(%rip), %xmm5 # 0x63b730
pxor %xmm5, %xmm0
pcmpeqd %xmm6, %xmm6
movdqa 0xbb690(%rip), %xmm7 # 0x6c9810
movdqa %xmm4, %xmm8
pxor %xmm5, %xmm8
movdqa %xmm8, %xmm10
pcmpgtd %xmm0, %xmm10
pshufd $0xa0, %xmm10, %xmm9 # xmm9 = xmm10[0,0,2,2]
pshuflw $0xe8, %xmm9, %xmm11 # xmm11 = xmm9[0,2,2,3,4,5,6,7]
pcmpeqd %xmm0, %xmm8
pshufd $0xf5, %xmm8, %xmm8 # xmm8 = xmm8[1,1,3,3]
pshuflw $0xe8, %xmm8, %xmm12 # xmm12 = xmm8[0,2,2,3,4,5,6,7]
pand %xmm11, %xmm12
pshufd $0xf5, %xmm10, %xmm10 # xmm10 = xmm10[1,1,3,3]
pshuflw $0xe8, %xmm10, %xmm11 # xmm11 = xmm10[0,2,2,3,4,5,6,7]
por %xmm12, %xmm11
pxor %xmm6, %xmm11
packssdw %xmm11, %xmm11
movd %xmm11, %r10d
testb $0x1, %r10b
je 0x60e1e6
leal (%rdx,%r9), %r10d
movw %r10w, 0x4(%rdi,%r9,2)
pand %xmm9, %xmm8
por %xmm10, %xmm8
packssdw %xmm8, %xmm8
pxor %xmm6, %xmm8
packssdw %xmm8, %xmm8
movd %xmm8, %r10d
shrl $0x10, %r10d
testb $0x1, %r10b
je 0x60e21b
leal (%rdx,%r9), %r10d
incl %r10d
movw %r10w, 0x6(%rdi,%r9,2)
movdqa %xmm3, %xmm9
pxor %xmm5, %xmm9
movdqa %xmm9, %xmm10
pcmpgtd %xmm0, %xmm10
pshufd $0xa0, %xmm10, %xmm8 # xmm8 = xmm10[0,0,2,2]
pcmpeqd %xmm0, %xmm9
pshufd $0xf5, %xmm9, %xmm9 # xmm9 = xmm9[1,1,3,3]
movdqa %xmm9, %xmm11
pand %xmm8, %xmm11
pshufd $0xf5, %xmm10, %xmm10 # xmm10 = xmm10[1,1,3,3]
por %xmm10, %xmm11
packssdw %xmm11, %xmm11
pxor %xmm6, %xmm11
packssdw %xmm11, %xmm11
pextrw $0x2, %xmm11, %r10d
testb $0x1, %r10b
je 0x60e27e
leal (%rdx,%r9), %r10d
addl $0x2, %r10d
movw %r10w, 0x8(%rdi,%r9,2)
pshufhw $0x84, %xmm8, %xmm8 # xmm8 = xmm8[0,1,2,3,4,5,4,6]
pshufhw $0x84, %xmm9, %xmm9 # xmm9 = xmm9[0,1,2,3,4,5,4,6]
pand %xmm8, %xmm9
pshufhw $0x84, %xmm10, %xmm8 # xmm8 = xmm10[0,1,2,3,4,5,4,6]
por %xmm9, %xmm8
pxor %xmm6, %xmm8
packssdw %xmm8, %xmm8
pextrw $0x3, %xmm8, %r10d
testb $0x1, %r10b
je 0x60e2be
leal (%rdx,%r9), %r10d
addl $0x3, %r10d
movw %r10w, 0xa(%rdi,%r9,2)
movdqa %xmm2, %xmm8
pxor %xmm5, %xmm8
movdqa %xmm8, %xmm10
pcmpgtd %xmm0, %xmm10
pshufd $0xa0, %xmm10, %xmm9 # xmm9 = xmm10[0,0,2,2]
pshuflw $0xe8, %xmm9, %xmm11 # xmm11 = xmm9[0,2,2,3,4,5,6,7]
pcmpeqd %xmm0, %xmm8
pshufd $0xf5, %xmm8, %xmm8 # xmm8 = xmm8[1,1,3,3]
pshuflw $0xe8, %xmm8, %xmm12 # xmm12 = xmm8[0,2,2,3,4,5,6,7]
pand %xmm11, %xmm12
pshufd $0xf5, %xmm10, %xmm10 # xmm10 = xmm10[1,1,3,3]
pshuflw $0xe8, %xmm10, %xmm11 # xmm11 = xmm10[0,2,2,3,4,5,6,7]
por %xmm12, %xmm11
pxor %xmm6, %xmm11
packssdw %xmm11, %xmm11
pextrw $0x4, %xmm11, %r10d
testb $0x1, %r10b
je 0x60e329
leal (%rdx,%r9), %r10d
addl $0x4, %r10d
movw %r10w, 0xc(%rdi,%r9,2)
pand %xmm9, %xmm8
por %xmm10, %xmm8
packssdw %xmm8, %xmm8
pxor %xmm6, %xmm8
packssdw %xmm8, %xmm8
pextrw $0x5, %xmm8, %r10d
testb $0x1, %r10b
je 0x60e35c
leal (%rdx,%r9), %r10d
addl $0x5, %r10d
movw %r10w, 0xe(%rdi,%r9,2)
movdqa %xmm1, %xmm9
pxor %xmm5, %xmm9
movdqa %xmm9, %xmm10
pcmpgtd %xmm0, %xmm10
pshufd $0xa0, %xmm10, %xmm8 # xmm8 = xmm10[0,0,2,2]
pcmpeqd %xmm0, %xmm9
pshufd $0xf5, %xmm9, %xmm9 # xmm9 = xmm9[1,1,3,3]
movdqa %xmm9, %xmm11
pand %xmm8, %xmm11
pshufd $0xf5, %xmm10, %xmm10 # xmm10 = xmm10[1,1,3,3]
por %xmm10, %xmm11
packssdw %xmm11, %xmm11
pxor %xmm6, %xmm11
packssdw %xmm11, %xmm11
pextrw $0x6, %xmm11, %r10d
testb $0x1, %r10b
je 0x60e3bf
leal (%rdx,%r9), %r10d
addl $0x6, %r10d
movw %r10w, 0x10(%rdi,%r9,2)
pshufhw $0x84, %xmm8, %xmm8 # xmm8 = xmm8[0,1,2,3,4,5,4,6]
pshufhw $0x84, %xmm9, %xmm9 # xmm9 = xmm9[0,1,2,3,4,5,4,6]
pand %xmm8, %xmm9
pshufhw $0x84, %xmm10, %xmm8 # xmm8 = xmm10[0,1,2,3,4,5,4,6]
por %xmm9, %xmm8
pxor %xmm6, %xmm8
packssdw %xmm8, %xmm8
pextrw $0x7, %xmm8, %r10d
testb $0x1, %r10b
je 0x60e3ff
leal (%rdx,%r9), %r10d
addl $0x7, %r10d
movw %r10w, 0x12(%rdi,%r9,2)
addq $0x8, %r9
paddq %xmm7, %xmm4
paddq %xmm7, %xmm3
paddq %xmm7, %xmm2
paddq %xmm7, %xmm1
cmpq %r9, %r8
jne 0x60e180
shll $0x10, %ecx
subl %eax, %ecx
leaq (%rdi,%rsi,4), %rsi
addq $0x8, %rsi
xorl %eax, %eax
xorl %edi, %edi
movl %ecx, (%rsi,%rdi,8)
leal -0x1(%rdi), %r8d
movl %r8d, -0x4(%rsi,%rdi,8)
incq %rdi
cmpq %rdi, %rdx
jne 0x60e42d
retq
movq $-0x1, %rax
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/fse_compress.c
|
HIST_count_simple
|
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const BYTE* const end = ip + srcSize;
unsigned maxSymbolValue = *maxSymbolValuePtr;
unsigned largestCount=0;
ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
while (ip<end) {
assert(*ip <= maxSymbolValue);
count[*ip++]++;
}
while (!count[maxSymbolValue]) maxSymbolValue--;
*maxSymbolValuePtr = maxSymbolValue;
{ U32 s;
for (s=0; s<=maxSymbolValue; s++)
if (count[s] > largestCount) largestCount = count[s];
}
return largestCount;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %rcx, %r12
movq %rdx, %r15
movq %rdi, %rbx
leaq (%rdx,%rcx), %r14
movq %rsi, (%rsp)
movl (%rsi), %r13d
leal 0x1(%r13), %edx
shlq $0x2, %rdx
xorl %ebp, %ebp
xorl %esi, %esi
callq 0x3fa90
testq %r12, %r12
je 0x60ebca
movq (%rsp), %rcx
jle 0x60eb9c
movzbl (%r15), %eax
incq %r15
incl (%rbx,%rax,4)
cmpq %r14, %r15
jb 0x60eb8d
movl %r13d, %eax
decl %r13d
cmpl $0x0, (%rbx,%rax,4)
je 0x60eb9c
incl %r13d
movl %r13d, (%rcx)
xorl %ebp, %ebp
movq $-0x1, %rcx
movl 0x4(%rbx,%rcx,4), %edx
cmpl %ebp, %edx
cmoval %edx, %ebp
incq %rcx
cmpq %rax, %rcx
jb 0x60ebb7
jmp 0x60ebd4
movq (%rsp), %rax
movl $0x0, (%rax)
movl %ebp, %eax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/hist.c
|
HIST_count_parallel_wksp
|
static size_t HIST_count_parallel_wksp(
unsigned* count, unsigned* maxSymbolValuePtr,
const void* source, size_t sourceSize,
HIST_checkInput_e check,
U32* const workSpace)
{
const BYTE* ip = (const BYTE*)source;
const BYTE* const iend = ip+sourceSize;
size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
unsigned max=0;
U32* const Counting1 = workSpace;
U32* const Counting2 = Counting1 + 256;
U32* const Counting3 = Counting2 + 256;
U32* const Counting4 = Counting3 + 256;
/* safety checks */
assert(*maxSymbolValuePtr <= 255);
if (!sourceSize) {
ZSTD_memset(count, 0, countSize);
*maxSymbolValuePtr = 0;
return 0;
}
ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
/* by stripes of 16 bytes */
{ U32 cached = MEM_read32(ip); ip += 4;
while (ip < iend-15) {
U32 c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
}
ip-=4;
}
/* finish last symbols */
while (ip<iend) Counting1[*ip++]++;
{ U32 s;
for (s=0; s<256; s++) {
Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
if (Counting1[s] > max) max = Counting1[s];
} }
{ unsigned maxSymbolValue = 255;
while (!Counting1[maxSymbolValue]) maxSymbolValue--;
if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
*maxSymbolValuePtr = maxSymbolValue;
ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
}
return (size_t)max;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movq %rsi, %rbx
movl (%rsi), %r14d
incl %r14d
shlq $0x2, %r14
testq %rcx, %rcx
je 0x60ed95
movq %r9, %r15
movq %rcx, %rbp
movq %rdx, %r13
movl %r8d, 0xc(%rsp)
movq %rdi, 0x10(%rsp)
leaq (%rdx,%rcx), %r12
movl $0x1000, %edx # imm = 0x1000
movq %r9, %rdi
xorl %esi, %esi
callq 0x3fa90
cmpq $0x14, %rbp
jl 0x60edad
leaq -0xf(%r12), %rax
movl (%r13), %esi
movl $0xff, %ecx
movl $0x3fc, %edx # imm = 0x3FC
movl 0x4(%r13), %edi
movl %esi, %r8d
andl %ecx, %r8d
incl (%r15,%r8,4)
movl %esi, %r8d
shrl $0x8, %r8d
andl %ecx, %r8d
incl 0x400(%r15,%r8,4)
movl %esi, %r8d
shrl $0xe, %r8d
andl %edx, %r8d
incl 0x800(%r15,%r8)
shrl $0x18, %esi
incl 0xc00(%r15,%rsi,4)
movl 0x8(%r13), %esi
movl %edi, %r8d
andl %ecx, %r8d
incl (%r15,%r8,4)
movl %edi, %r8d
shrl $0x8, %r8d
andl %ecx, %r8d
incl 0x400(%r15,%r8,4)
movl %edi, %r8d
shrl $0xe, %r8d
andl %edx, %r8d
incl 0x800(%r15,%r8)
shrl $0x18, %edi
incl 0xc00(%r15,%rdi,4)
movl 0xc(%r13), %edi
movl %esi, %r8d
andl %ecx, %r8d
incl (%r15,%r8,4)
movl %esi, %r8d
shrl $0x8, %r8d
andl %ecx, %r8d
incl 0x400(%r15,%r8,4)
movl %esi, %r8d
shrl $0xe, %r8d
andl %edx, %r8d
incl 0x800(%r15,%r8)
shrl $0x18, %esi
incl 0xc00(%r15,%rsi,4)
movl 0x10(%r13), %esi
movl %edi, %r8d
andl %ecx, %r8d
incl (%r15,%r8,4)
movl %edi, %r8d
shrl $0x8, %r8d
andl %ecx, %r8d
incl 0x400(%r15,%r8,4)
movl %edi, %r8d
shrl $0xe, %r8d
andl %edx, %r8d
incl 0x800(%r15,%r8)
shrl $0x18, %edi
incl 0xc00(%r15,%rdi,4)
leaq 0x10(%r13), %rdi
addq $0x14, %r13
cmpq %rax, %r13
movq %rdi, %r13
jb 0x60ec8b
jmp 0x60edbc
xorl %r13d, %r13d
xorl %esi, %esi
movq %r14, %rdx
callq 0x3fa90
movl $0x0, (%rbx)
jmp 0x60eeb5
movq %r13, %rdi
jmp 0x60edbc
movzbl (%rdi), %eax
incq %rdi
incl (%r15,%rax,4)
cmpq %r12, %rdi
jb 0x60edb2
pxor %xmm1, %xmm1
xorl %eax, %eax
movdqa 0x2c961(%rip), %xmm0 # 0x63b730
movl 0xc(%rsp), %ecx
movdqu 0x400(%r15,%rax,4), %xmm2
movdqu 0x800(%r15,%rax,4), %xmm3
paddd %xmm2, %xmm3
movdqu 0xc00(%r15,%rax,4), %xmm2
movdqu (%r15,%rax,4), %xmm4
paddd %xmm2, %xmm4
paddd %xmm3, %xmm4
movdqu %xmm4, (%r15,%rax,4)
movdqa %xmm4, %xmm2
pxor %xmm0, %xmm2
movdqa %xmm1, %xmm3
pxor %xmm0, %xmm3
pcmpgtd %xmm3, %xmm2
pand %xmm2, %xmm4
pandn %xmm1, %xmm2
movdqa %xmm2, %xmm1
por %xmm4, %xmm1
addq $0x4, %rax
cmpq $0x100, %rax # imm = 0x100
jne 0x60edd3
pshufd $0xee, %xmm1, %xmm2 # xmm2 = xmm1[2,3,2,3]
movdqa %xmm1, %xmm3
pxor %xmm0, %xmm3
movdqa %xmm2, %xmm4
pxor %xmm0, %xmm4
pcmpgtd %xmm4, %xmm3
pand %xmm3, %xmm1
pandn %xmm2, %xmm3
por %xmm1, %xmm3
pshufd $0x55, %xmm3, %xmm1 # xmm1 = xmm3[1,1,1,1]
movdqa %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm0
pcmpgtd %xmm0, %xmm2
pand %xmm2, %xmm3
pandn %xmm1, %xmm2
por %xmm3, %xmm2
movd %xmm2, %ebp
movl $0x100, %eax # imm = 0x100
decl %eax
cmpl $0x0, (%r15,%rax,4)
je 0x60ee88
testl %ecx, %ecx
je 0x60eea0
movq $-0x30, %r13
cmpl (%rbx), %eax
ja 0x60eeb5
movl %eax, (%rbx)
movq 0x10(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
callq 0x3f470
movl %ebp, %r13d
movq %r13, %rax
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/hist.c
|
HIST_count_wksp
|
size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
const void* source, size_t sourceSize,
void* workSpace, size_t workSpaceSize)
{
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
if (*maxSymbolValuePtr < 255)
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
*maxSymbolValuePtr = 255;
return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
}
|
movq %r8, %r10
movq $-0x1, %rax
testb $0x3, %r10b
jne 0x60eee7
movq $-0x42, %rax
cmpq $0x1000, %r9 # imm = 0x1000
jae 0x60eee8
retq
cmpl $0xfe, (%rsi)
ja 0x60eefe
movl $0x1, %r8d
movq %r10, %r9
jmp 0x60ec24
movl $0xff, (%rsi)
movq %r10, %r8
jmp 0x60ebe5
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/hist.c
|
HUF_writeCTable_wksp
|
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
void* workspace, size_t workspaceSize)
{
BYTE* op = (BYTE*)dst;
U32 n;
HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)workspace;
/* check conditions */
if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
/* convert to weight */
wksp->bitsToWeight[0] = 0;
for (n=1; n<huffLog+1; n++)
wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
for (n=0; n<maxSymbolValue; n++)
wksp->huffWeight[n] = wksp->bitsToWeight[CTable[n].nbBits];
/* attempt weights compression by FSE */
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
op[0] = (BYTE)hSize;
return hSize+1;
} }
/* write raw values as 4-bits (max : 15) */
if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
for (n=0; n<maxSymbolValue; n+=2)
op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
return ((maxSymbolValue+1)/2) + 1;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq $-0x1, %r14
cmpq $0x2c0, 0x70(%rsp) # imm = 0x2C0
jb 0x60f7a3
movl %ecx, %ebx
movq $-0x2e, %r14
cmpl $0xff, %ecx
ja 0x60f7a3
movq %r9, %r12
movb $0x0, 0x1b4(%r9)
leal 0x1(%r8), %eax
cmpl $0x2, %eax
jb 0x60f5ab
movl %eax, %ecx
leaq 0xe(%rcx), %rax
addq $-0x2, %rcx
movq %rcx, %xmm0
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
movl %r8d, %ecx
andq $-0x10, %rax
negq %rax
xorl %r10d, %r10d
movdqa 0x87984(%rip), %xmm3 # 0x696950
movdqa 0x8798c(%rip), %xmm4 # 0x696960
movdqa 0x87994(%rip), %xmm5 # 0x696970
movdqa 0x8799c(%rip), %xmm6 # 0x696980
movdqa 0x2c724(%rip), %xmm7 # 0x63b710
movdqa 0x2c72b(%rip), %xmm8 # 0x63b720
movdqa 0x2c732(%rip), %xmm9 # 0x63b730
pcmpeqd %xmm10, %xmm10
xorl %r9d, %r9d
movq %r9, %xmm11
pshufd $0x44, %xmm11, %xmm11 # xmm11 = xmm11[0,1,0,1]
movdqa %xmm11, %xmm13
por %xmm8, %xmm13
movdqa %xmm0, %xmm12
pxor %xmm9, %xmm12
pxor %xmm9, %xmm13
movdqa %xmm13, %xmm15
pcmpgtd %xmm12, %xmm15
pshufd $0xa0, %xmm15, %xmm14 # xmm14 = xmm15[0,0,2,2]
pshuflw $0xe8, %xmm14, %xmm1 # xmm1 = xmm14[0,2,2,3,4,5,6,7]
pcmpeqd %xmm12, %xmm13
pshufd $0xf5, %xmm13, %xmm13 # xmm13 = xmm13[1,1,3,3]
pshuflw $0xe8, %xmm13, %xmm2 # xmm2 = xmm13[0,2,2,3,4,5,6,7]
pand %xmm1, %xmm2
pshufd $0xf5, %xmm15, %xmm15 # xmm15 = xmm15[1,1,3,3]
pshuflw $0xe8, %xmm15, %xmm1 # xmm1 = xmm15[0,2,2,3,4,5,6,7]
por %xmm2, %xmm1
pxor %xmm10, %xmm1
packssdw %xmm1, %xmm1
movd %xmm1, %r8d
testb $0x1, %r8b
je 0x60f085
leal (%rcx,%r10), %r8d
movb %r8b, 0x1b5(%r12,%r9)
pand %xmm14, %xmm13
por %xmm15, %xmm13
packssdw %xmm13, %xmm13
pxor %xmm10, %xmm13
packssdw %xmm13, %xmm13
packsswb %xmm13, %xmm13
movd %xmm13, %r8d
shrl $0x8, %r8d
testb $0x1, %r8b
je 0x60f0c1
leal (%rcx,%r10), %r8d
decb %r8b
movb %r8b, 0x1b6(%r12,%r9)
movdqa %xmm11, %xmm1
por %xmm7, %xmm1
pxor %xmm9, %xmm1
movdqa %xmm1, %xmm2
pcmpgtd %xmm12, %xmm2
pshufd $0xa0, %xmm2, %xmm13 # xmm13 = xmm2[0,0,2,2]
pcmpeqd %xmm12, %xmm1
pshufd $0xf5, %xmm1, %xmm14 # xmm14 = xmm1[1,1,3,3]
movdqa %xmm14, %xmm1
pand %xmm13, %xmm1
pshufd $0xf5, %xmm2, %xmm15 # xmm15 = xmm2[1,1,3,3]
por %xmm15, %xmm1
packssdw %xmm1, %xmm1
pxor %xmm10, %xmm1
packssdw %xmm1, %xmm1
packsswb %xmm1, %xmm1
movd %xmm1, %r8d
shrl $0x10, %r8d
testb $0x1, %r8b
je 0x60f12e
leal (%rcx,%r10), %r8d
addb $-0x2, %r8b
movb %r8b, 0x1b7(%r12,%r9)
pshufhw $0x84, %xmm13, %xmm1 # xmm1 = xmm13[0,1,2,3,4,5,4,6]
pshufhw $0x84, %xmm14, %xmm2 # xmm2 = xmm14[0,1,2,3,4,5,4,6]
pand %xmm1, %xmm2
pshufhw $0x84, %xmm15, %xmm1 # xmm1 = xmm15[0,1,2,3,4,5,4,6]
por %xmm2, %xmm1
pxor %xmm10, %xmm1
packssdw %xmm1, %xmm1
packsswb %xmm1, %xmm1
movd %xmm1, %r8d
shrl $0x18, %r8d
testb $0x1, %r8b
je 0x60f174
leal (%rcx,%r10), %r8d
addb $-0x3, %r8b
movb %r8b, 0x1b8(%r12,%r9)
movdqa %xmm11, %xmm1
por %xmm6, %xmm1
pxor %xmm9, %xmm1
movdqa %xmm1, %xmm2
pcmpgtd %xmm12, %xmm2
pshufd $0xa0, %xmm2, %xmm14 # xmm14 = xmm2[0,0,2,2]
pshuflw $0xe8, %xmm14, %xmm15 # xmm15 = xmm14[0,2,2,3,4,5,6,7]
pcmpeqd %xmm12, %xmm1
pshufd $0xf5, %xmm1, %xmm13 # xmm13 = xmm1[1,1,3,3]
pshuflw $0xe8, %xmm13, %xmm1 # xmm1 = xmm13[0,2,2,3,4,5,6,7]
pand %xmm15, %xmm1
pshufd $0xf5, %xmm2, %xmm15 # xmm15 = xmm2[1,1,3,3]
pshuflw $0xe8, %xmm15, %xmm2 # xmm2 = xmm15[0,2,2,3,4,5,6,7]
por %xmm1, %xmm2
pxor %xmm10, %xmm2
packssdw %xmm2, %xmm1
packsswb %xmm1, %xmm1
pextrw $0x2, %xmm1, %r8d
testb $0x1, %r8b
je 0x60f1e6
leal (%rcx,%r10), %r8d
addb $-0x4, %r8b
movb %r8b, 0x1b9(%r12,%r9)
pand %xmm14, %xmm13
por %xmm15, %xmm13
packssdw %xmm13, %xmm13
pxor %xmm10, %xmm13
packssdw %xmm13, %xmm1
packsswb %xmm1, %xmm1
pextrw $0x2, %xmm1, %r8d
shrl $0x8, %r8d
testb $0x1, %r8b
je 0x60f223
leal (%rcx,%r10), %r8d
addb $-0x5, %r8b
movb %r8b, 0x1ba(%r12,%r9)
movdqa %xmm11, %xmm1
por %xmm5, %xmm1
pxor %xmm9, %xmm1
movdqa %xmm1, %xmm2
pcmpgtd %xmm12, %xmm2
pshufd $0xa0, %xmm2, %xmm13 # xmm13 = xmm2[0,0,2,2]
pcmpeqd %xmm12, %xmm1
pshufd $0xf5, %xmm1, %xmm14 # xmm14 = xmm1[1,1,3,3]
movdqa %xmm14, %xmm1
pand %xmm13, %xmm1
pshufd $0xf5, %xmm2, %xmm15 # xmm15 = xmm2[1,1,3,3]
por %xmm15, %xmm1
packssdw %xmm1, %xmm1
pxor %xmm10, %xmm1
packssdw %xmm1, %xmm1
packsswb %xmm1, %xmm1
pextrw $0x3, %xmm1, %r8d
testb $0x1, %r8b
je 0x60f28d
leal (%rcx,%r10), %r8d
addb $-0x6, %r8b
movb %r8b, 0x1bb(%r12,%r9)
pshufhw $0x84, %xmm13, %xmm1 # xmm1 = xmm13[0,1,2,3,4,5,4,6]
pshufhw $0x84, %xmm14, %xmm2 # xmm2 = xmm14[0,1,2,3,4,5,4,6]
pand %xmm1, %xmm2
pshufhw $0x84, %xmm15, %xmm1 # xmm1 = xmm15[0,1,2,3,4,5,4,6]
por %xmm2, %xmm1
pxor %xmm10, %xmm1
packssdw %xmm1, %xmm1
packsswb %xmm1, %xmm1
pextrw $0x3, %xmm1, %r8d
shrl $0x8, %r8d
testb $0x1, %r8b
je 0x60f2d4
leal (%rcx,%r10), %r8d
addb $-0x7, %r8b
movb %r8b, 0x1bc(%r12,%r9)
movdqa %xmm11, %xmm1
por %xmm4, %xmm1
pxor %xmm9, %xmm1
movdqa %xmm1, %xmm2
pcmpgtd %xmm12, %xmm2
pshufd $0xa0, %xmm2, %xmm14 # xmm14 = xmm2[0,0,2,2]
pshuflw $0xe8, %xmm14, %xmm15 # xmm15 = xmm14[0,2,2,3,4,5,6,7]
pcmpeqd %xmm12, %xmm1
pshufd $0xf5, %xmm1, %xmm13 # xmm13 = xmm1[1,1,3,3]
pshuflw $0xe8, %xmm13, %xmm1 # xmm1 = xmm13[0,2,2,3,4,5,6,7]
pand %xmm15, %xmm1
pshufd $0xf5, %xmm2, %xmm15 # xmm15 = xmm2[1,1,3,3]
pshuflw $0xe8, %xmm15, %xmm2 # xmm2 = xmm15[0,2,2,3,4,5,6,7]
por %xmm1, %xmm2
pxor %xmm10, %xmm2
packssdw %xmm2, %xmm2
packsswb %xmm2, %xmm1
pextrw $0x4, %xmm1, %r8d
testb $0x1, %r8b
je 0x60f346
leal (%rcx,%r10), %r8d
addb $-0x8, %r8b
movb %r8b, 0x1bd(%r12,%r9)
pand %xmm14, %xmm13
por %xmm15, %xmm13
packssdw %xmm13, %xmm13
pxor %xmm10, %xmm13
packssdw %xmm13, %xmm13
packsswb %xmm13, %xmm1
pextrw $0x4, %xmm1, %r8d
shrl $0x8, %r8d
testb $0x1, %r8b
je 0x60f384
leal (%rcx,%r10), %r8d
addb $-0x9, %r8b
movb %r8b, 0x1be(%r12,%r9)
movdqa %xmm11, %xmm1
por %xmm3, %xmm1
pxor %xmm9, %xmm1
movdqa %xmm1, %xmm2
pcmpgtd %xmm12, %xmm2
pshufd $0xa0, %xmm2, %xmm13 # xmm13 = xmm2[0,0,2,2]
pcmpeqd %xmm12, %xmm1
pshufd $0xf5, %xmm1, %xmm14 # xmm14 = xmm1[1,1,3,3]
movdqa %xmm14, %xmm1
pand %xmm13, %xmm1
pshufd $0xf5, %xmm2, %xmm15 # xmm15 = xmm2[1,1,3,3]
por %xmm15, %xmm1
packssdw %xmm1, %xmm1
pxor %xmm10, %xmm1
packssdw %xmm1, %xmm1
packsswb %xmm1, %xmm1
pextrw $0x5, %xmm1, %r8d
testb $0x1, %r8b
je 0x60f3ee
leal (%rcx,%r10), %r8d
addb $-0xa, %r8b
movb %r8b, 0x1bf(%r12,%r9)
pshufhw $0x84, %xmm13, %xmm1 # xmm1 = xmm13[0,1,2,3,4,5,4,6]
pshufhw $0x84, %xmm14, %xmm2 # xmm2 = xmm14[0,1,2,3,4,5,4,6]
pand %xmm1, %xmm2
pshufhw $0x84, %xmm15, %xmm1 # xmm1 = xmm15[0,1,2,3,4,5,4,6]
por %xmm2, %xmm1
pxor %xmm10, %xmm1
packssdw %xmm1, %xmm1
packsswb %xmm1, %xmm1
pextrw $0x5, %xmm1, %r8d
shrl $0x8, %r8d
testb $0x1, %r8b
je 0x60f435
leal (%rcx,%r10), %r8d
addb $-0xb, %r8b
movb %r8b, 0x1c0(%r12,%r9)
movdqa %xmm11, %xmm1
por 0x874fe(%rip), %xmm1 # 0x696940
pxor %xmm9, %xmm1
movdqa %xmm1, %xmm2
pcmpgtd %xmm12, %xmm2
pshufd $0xa0, %xmm2, %xmm14 # xmm14 = xmm2[0,0,2,2]
pshuflw $0xe8, %xmm14, %xmm15 # xmm15 = xmm14[0,2,2,3,4,5,6,7]
pcmpeqd %xmm12, %xmm1
pshufd $0xf5, %xmm1, %xmm13 # xmm13 = xmm1[1,1,3,3]
pshuflw $0xe8, %xmm13, %xmm1 # xmm1 = xmm13[0,2,2,3,4,5,6,7]
pand %xmm15, %xmm1
pshufd $0xf5, %xmm2, %xmm15 # xmm15 = xmm2[1,1,3,3]
pshuflw $0xe8, %xmm15, %xmm2 # xmm2 = xmm15[0,2,2,3,4,5,6,7]
por %xmm1, %xmm2
pxor %xmm10, %xmm2
packssdw %xmm2, %xmm1
packsswb %xmm1, %xmm1
pextrw $0x6, %xmm1, %r8d
testb $0x1, %r8b
je 0x60f4ab
leal (%rcx,%r10), %r8d
addb $-0xc, %r8b
movb %r8b, 0x1c1(%r12,%r9)
pand %xmm14, %xmm13
por %xmm15, %xmm13
packssdw %xmm13, %xmm13
pxor %xmm10, %xmm13
packssdw %xmm13, %xmm1
packsswb %xmm1, %xmm1
pextrw $0x6, %xmm1, %r8d
shrl $0x8, %r8d
testb $0x1, %r8b
je 0x60f4e8
leal (%rcx,%r10), %r8d
addb $-0xd, %r8b
movb %r8b, 0x1c2(%r12,%r9)
por 0x8743f(%rip), %xmm11 # 0x696930
pxor %xmm9, %xmm11
movdqa %xmm11, %xmm1
pcmpgtd %xmm12, %xmm1
pshufd $0xa0, %xmm1, %xmm13 # xmm13 = xmm1[0,0,2,2]
pcmpeqd %xmm12, %xmm11
pshufd $0xf5, %xmm11, %xmm11 # xmm11 = xmm11[1,1,3,3]
movdqa %xmm11, %xmm2
pand %xmm13, %xmm2
pshufd $0xf5, %xmm1, %xmm12 # xmm12 = xmm1[1,1,3,3]
por %xmm12, %xmm2
packssdw %xmm2, %xmm1
pxor %xmm10, %xmm1
packssdw %xmm1, %xmm1
packsswb %xmm1, %xmm1
pextrw $0x7, %xmm1, %r8d
testb $0x1, %r8b
je 0x60f553
leal (%rcx,%r10), %r8d
addb $-0xe, %r8b
movb %r8b, 0x1c3(%r12,%r9)
pshufhw $0x84, %xmm13, %xmm1 # xmm1 = xmm13[0,1,2,3,4,5,4,6]
pshufhw $0x84, %xmm11, %xmm2 # xmm2 = xmm11[0,1,2,3,4,5,4,6]
pand %xmm1, %xmm2
pshufhw $0x84, %xmm12, %xmm1 # xmm1 = xmm12[0,1,2,3,4,5,4,6]
por %xmm2, %xmm1
pxor %xmm10, %xmm1
packssdw %xmm1, %xmm1
packsswb %xmm1, %xmm1
pextrw $0x7, %xmm1, %r8d
shrl $0x8, %r8d
testb $0x1, %r8b
je 0x60f59a
leal (%rcx,%r10), %r8d
addb $-0xf, %r8b
movb %r8b, 0x1c4(%r12,%r9)
addq $0x10, %r9
addq $-0x10, %r10
cmpq %r10, %rax
jne 0x60f006
testl %ebx, %ebx
je 0x60f62a
movl %ebx, %r13d
xorl %eax, %eax
movzbl 0x2(%rdx,%rax,4), %ecx
movb 0x1b4(%r12,%rcx), %cl
movb %cl, 0x1c1(%r12,%rax)
incq %rax
cmpq %rax, %r13
jne 0x60f5b4
leaq 0x1(%rdi), %rbp
leaq 0x1c1(%r12), %r15
movl $0xc, 0x4(%rsp)
cmpl $0x1, %ebx
je 0x60f63b
movq %rsi, 0x8(%rsp)
movq %rdi, 0x10(%rsp)
leaq 0x164(%r12), %r14
leaq 0x4(%rsp), %rsi
movq %r14, %rdi
movq %r15, %rdx
movq %r13, %rcx
callq 0x60eb52
cmpl %ebx, %eax
sete %cl
cmpl $0x1, %eax
sete %al
orb %cl, %al
je 0x60f699
movq 0x10(%rsp), %rdi
movq 0x8(%rsp), %rsi
jmp 0x60f63b
leaq 0x1(%rdi), %rbp
addq $0x1c1, %r12 # imm = 0x1C1
xorl %r13d, %r13d
movq %r12, %r15
movq $-0x1, %r14
cmpl $0x80, %ebx
ja 0x60f7a3
leal 0x1(%rbx), %eax
shrl %eax
incl %eax
movq $-0x46, %r14
cmpq %rsi, %rax
ja 0x60f7a3
leal 0x7f(%rbx), %ecx
movb %cl, (%rdi)
movb $0x0, (%r15,%r13)
testl %ebx, %ebx
je 0x60f7a0
xorl %ecx, %ecx
movb (%r15,%rcx), %dl
shlb $0x4, %dl
addb 0x1(%r15,%rcx), %dl
movb %dl, (%rbp)
addq $0x2, %rcx
incq %rbp
cmpq %r13, %rcx
jb 0x60f679
jmp 0x60f7a0
movl 0x4(%rsp), %edx
movl $0x6, %edi
movq %r13, %rsi
callq 0x60dd9f
leaq 0x198(%r12), %rdi
movl 0x4(%rsp), %r8d
movq %rdi, 0x20(%rsp)
movl %eax, 0x1c(%rsp)
movl %eax, %esi
movq %r14, %rdx
movq %r13, %rcx
xorl %r9d, %r9d
callq 0x60dded
cmpq $-0x78, %rax
ja 0x60f7a0
movq 0x8(%rsp), %rax
leaq -0x1(%rax), %rsi
movl 0x4(%rsp), %ecx
movq %rbp, %rdi
movq %rsi, 0x30(%rsp)
movq 0x20(%rsp), %rdx
movl 0x1c(%rsp), %r14d
movl %r14d, %r8d
callq 0x60da8f
cmpq $-0x78, %rax
ja 0x60f7a0
movq %rax, 0x28(%rsp)
movl 0x4(%rsp), %edx
leaq 0xec(%r12), %r8
movl $0x78, %r9d
movq %r12, %rdi
movq 0x20(%rsp), %rsi
movl %r14d, %ecx
callq 0x60d888
cmpq $-0x78, %rax
ja 0x60f7a0
movq 0x28(%rsp), %rax
leaq (%rax,%rbp), %r14
movq 0x30(%rsp), %rsi
subq %rax, %rsi
movq %r14, %rdi
movq %r15, %rdx
movq %r13, %rcx
movq %r12, %r8
callq 0x60e468
cmpq $-0x78, %rax
ja 0x60f7a0
testq %rax, %rax
je 0x60f61e
addq %rax, %r14
subq %rbp, %r14
cmpq $-0x78, %r14
movq 0x10(%rsp), %rdi
movq 0x8(%rsp), %rsi
ja 0x60f7a3
cmpq $0x2, %r14
setb %al
movl %ebx, %ecx
shrl %ecx
cmpq %rcx, %r14
setae %cl
orb %al, %cl
jne 0x60f63b
movb %r14b, (%rdi)
incq %r14
jmp 0x60f7a3
movq %rax, %r14
movq %r14, %rax
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/huf_compress.c
|
HUF_readCTable
|
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
U32 nbSymbols = 0;
/* get symbol weights */
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
*hasZeroWeights = (rankVal[0] > 0);
/* check result */
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
/* Prepare base value per rank */
{ U32 n, nextRankStart = 0;
for (n=1; n<=tableLog; n++) {
U32 curr = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = curr;
} }
/* fill nbBits */
{ U32 n; for (n=0; n<nbSymbols; n++) {
const U32 w = huffWeight[n];
CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
} }
/* fill val */
{ U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
{ U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
/* determine stating value per rank */
valPerRank[tableLog+1] = 0; /* for w==0 */
{ U16 min = 0;
U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
valPerRank[n] = min; /* get starting value within each rank */
min += nbPerRank[n];
min >>= 1;
} }
/* assign value within rank, symbol order */
{ U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
}
*maxSymbolValuePtr = nbSymbols - 1;
return readSize;
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x190, %rsp # imm = 0x190
movq %r8, %r15
movq %rdx, %r9
movq %rsi, %rbx
movq %rdi, %r14
xorl %edx, %edx
leaq 0xc(%rsp), %r8
movl %edx, (%r8)
leaq 0x8(%rsp), %rax
movl %edx, (%rax)
movq %rcx, (%rsp)
leaq 0x90(%rsp), %rdi
leaq 0x50(%rsp), %rdx
movl $0x100, %esi # imm = 0x100
movq %rax, %rcx
callq 0x60b530
cmpq $-0x78, %rax
ja 0x60f953
xorl %ecx, %ecx
cmpl $0x0, 0x50(%rsp)
setne %cl
movl %ecx, (%r15)
movl 0xc(%rsp), %esi
movq $-0x2c, %rcx
cmpq $0xc, %rsi
ja 0x60f956
movl 0x8(%rsp), %edx
movl (%rbx), %edi
incl %edi
movq $-0x30, %rcx
cmpl %edi, %edx
ja 0x60f956
testl %esi, %esi
je 0x60f882
xorl %ecx, %ecx
xorl %edi, %edi
movl 0x54(%rsp,%rcx,4), %r8d
shll %cl, %r8d
addl %edi, %r8d
movl %edi, 0x54(%rsp,%rcx,4)
incq %rcx
movl %r8d, %edi
cmpq %rcx, %rsi
jne 0x60f868
testq %rdx, %rdx
je 0x60f8e8
leal 0x1(%rsi), %ecx
xorl %edi, %edi
xorl %r8d, %r8d
movb 0x90(%rsp,%r8), %r9b
movl %ecx, %r10d
subb %r9b, %r10b
testb %r9b, %r9b
movzbl %r10b, %r9d
cmovel %edi, %r9d
movb %r9b, 0x2(%r14,%r8,4)
incq %r8
cmpq %r8, %rdx
jne 0x60f88f
xorps %xmm0, %xmm0
movups %xmm0, 0x3c(%rsp)
movaps %xmm0, 0x30(%rsp)
movups %xmm0, 0x1c(%rsp)
movaps %xmm0, 0x10(%rsp)
testq %rdx, %rdx
je 0x60f8ff
xorl %ecx, %ecx
movzbl 0x2(%r14,%rcx,4), %edi
incw 0x30(%rsp,%rdi,2)
incq %rcx
cmpq %rcx, %rdx
jne 0x60f8d3
jmp 0x60f8ff
xorps %xmm0, %xmm0
movups %xmm0, 0x3c(%rsp)
movaps %xmm0, 0x30(%rsp)
movups %xmm0, 0x1c(%rsp)
movaps %xmm0, 0x10(%rsp)
movw $0x0, 0x12(%rsp,%rsi,2)
testl %esi, %esi
je 0x60f924
xorl %ecx, %ecx
movl %esi, %edi
movw %cx, 0x10(%rsp,%rsi,2)
addw 0x30(%rsp,%rsi,2), %cx
movzwl %cx, %ecx
shrl %ecx
decq %rsi
decl %edi
jne 0x60f90e
testq %rdx, %rdx
je 0x60f94f
xorl %ecx, %ecx
movzbl 0x2(%r14,%rcx,4), %esi
movzwl 0x10(%rsp,%rsi,2), %edi
movl %edi, %r8d
incl %r8d
movw %r8w, 0x10(%rsp,%rsi,2)
movw %di, (%r14,%rcx,4)
incq %rcx
cmpq %rcx, %rdx
jne 0x60f92b
decl %edx
movl %edx, (%rbx)
movq %rax, %rcx
movq %rcx, %rax
addq $0x190, %rsp # imm = 0x190
popq %rbx
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/huf_compress.c
|
HUF_compress1X_usingCTable_internal
|
FORCE_INLINE_TEMPLATE size_t
HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable)
{
const BYTE* ip = (const BYTE*) src;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
size_t n;
BIT_CStream_t bitC;
/* init */
if (dstSize < 8) return 0; /* not enough space to compress */
{ size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op));
if (HUF_isError(initErr)) return 0; }
n = srcSize & ~3; /* join to mod 4 */
switch (srcSize & 3)
{
case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
HUF_FLUSHBITS_2(&bitC);
/* fall-through */
case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
HUF_FLUSHBITS_1(&bitC);
/* fall-through */
case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
HUF_FLUSHBITS(&bitC);
/* fall-through */
case 0 : /* fall-through */
default: break;
}
for (; n>0; n-=4) { /* note : n&3==0 at this stage */
HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
HUF_FLUSHBITS_1(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
HUF_FLUSHBITS_2(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
HUF_FLUSHBITS_1(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
HUF_FLUSHBITS(&bitC);
}
return BIT_closeCStream(&bitC);
}
|
cmpq $0x8, %rsi
jae 0x610116
xorl %eax, %eax
retq
xorl %eax, %eax
cmpq $0x8, %rsi
je 0x6102a6
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movq %rcx, %r9
leaq (%rdi,%rsi), %r10
addq $-0x8, %r10
movq %rcx, %r11
andq $-0x4, %r11
andl $0x3, %r9d
leaq 0xc707a(%rip), %rcx # 0x6d71c0
movslq (%rcx,%r9,4), %rsi
addq %rcx, %rsi
jmpq *%rsi
movq %rdi, %rbx
jmp 0x6101c4
xorl %esi, %esi
jmp 0x610169
movzbl 0x2(%rdx,%r11), %eax
movzwl (%r8,%rax,4), %esi
movzbl 0x2(%r8,%rax,4), %eax
movzbl 0x1(%rdx,%r11), %r9d
movzwl (%r8,%r9,4), %ebx
movl %eax, %ecx
shlq %cl, %rbx
movzbl 0x2(%r8,%r9,4), %ecx
orq %rsi, %rbx
addl %eax, %ecx
movl %ecx, %eax
jmp 0x61018a
xorl %ebx, %ebx
movzbl (%rdx,%r11), %ecx
movzwl (%r8,%rcx,4), %r9d
movzbl 0x2(%r8,%rcx,4), %esi
movl %eax, %ecx
shlq %cl, %r9
orq %rbx, %r9
addl %eax, %esi
movl %esi, %ebx
shrl $0x3, %ebx
addq %rdi, %rbx
cmpq %r10, %rbx
cmovaq %r10, %rbx
movq %r9, (%rdi)
movl %esi, %eax
andl $0x7, %eax
andb $-0x8, %sil
movl %esi, %ecx
shrq %cl, %r9
testq %r11, %r11
je 0x61026b
movzbl -0x1(%rdx,%r11), %ecx
movzwl (%r8,%rcx,4), %r14d
movzbl 0x2(%r8,%rcx,4), %esi
movl %eax, %ecx
shlq %cl, %r14
orq %r9, %r14
addl %eax, %esi
movzbl -0x2(%rdx,%r11), %eax
movzwl (%r8,%rax,4), %r9d
movl %esi, %ecx
shlq %cl, %r9
movzbl 0x2(%r8,%rax,4), %ecx
addl %esi, %ecx
movzbl -0x3(%rdx,%r11), %eax
movzwl (%r8,%rax,4), %r15d
shlq %cl, %r15
orq %r14, %r9
movzbl 0x2(%r8,%rax,4), %eax
orq %r9, %r15
addl %ecx, %eax
movzbl -0x4(%rdx,%r11), %ecx
movzwl (%r8,%rcx,4), %r12d
movzbl 0x2(%r8,%rcx,4), %esi
movl %eax, %ecx
shlq %cl, %r12
orq %r15, %r12
addl %eax, %esi
movl %esi, %r14d
shrl $0x3, %r14d
addq %rbx, %r14
cmpq %r10, %r14
cmovaq %r10, %r14
movl %esi, %eax
andb $-0x8, %sil
movq %r12, %r9
movl %esi, %ecx
shrq %cl, %r9
movq %r12, (%rbx)
andl $0x7, %eax
movq %r14, %rbx
addq $-0x4, %r11
jne 0x6101cd
jmp 0x61026e
movq %rbx, %r14
btsq %rax, %r9
incl %eax
movl %eax, %ecx
shrl $0x3, %ecx
movq %r9, (%r14)
addq %r14, %rcx
cmpq %r10, %rcx
cmovaq %r10, %rcx
cmpq %r10, %rcx
popq %rbx
popq %r12
popq %r14
popq %r15
jae 0x610113
andl $0x7, %eax
cmpl $0x1, %eax
adcq $-0x1, %rdi
subq %rdi, %rcx
movq %rcx, %rax
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/huf_compress.c
|
HUF_compress4X_usingCTable_internal
|
static size_t
HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable, int bmi2)
{
size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
if (srcSize < 12) return 0; /* no saving possible : too small input */
op += 6; /* jumpTable */
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize==0) return 0;
assert(cSize <= 65535);
MEM_writeLE16(ostart, (U16)cSize);
op += cSize;
}
ip += segmentSize;
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize==0) return 0;
assert(cSize <= 65535);
MEM_writeLE16(ostart+2, (U16)cSize);
op += cSize;
}
ip += segmentSize;
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize==0) return 0;
assert(cSize <= 65535);
MEM_writeLE16(ostart+4, (U16)cSize);
op += cSize;
}
ip += segmentSize;
assert(op <= oend);
assert(ip <= iend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
if (cSize==0) return 0;
op += cSize;
}
return (size_t)(op-ostart);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movq %rcx, %rbp
cmpq $0x11, %rsi
setb %al
cmpq $0xc, %rcx
setb %cl
orb %al, %cl
je 0x6102d3
xorl %eax, %eax
jmp 0x6103c9
movq %r8, %r15
movq %rdx, %r12
movq %rsi, %r13
leaq 0x3(%rbp), %rbx
shrq $0x2, %rbx
movq %rdi, 0x8(%rsp)
leaq 0x6(%rdi), %r14
addq $-0x6, %rsi
movq %r14, %rdi
movq %rbx, %rcx
callq 0x61010d
cmpq $-0x78, %rax
ja 0x6103c9
testq %rax, %rax
je 0x6102cc
movq 0x8(%rsp), %rcx
addq %rcx, %r13
movw %ax, (%rcx)
addq %rax, %r14
leaq (%r12,%rbx), %rdx
movq %r13, %rsi
subq %r14, %rsi
movq %r14, %rdi
movq %rdx, 0x10(%rsp)
movq %rbx, %rcx
movq %r15, %r8
callq 0x61010d
cmpq $-0x78, %rax
ja 0x6103c9
testq %rax, %rax
je 0x6102cc
movq 0x8(%rsp), %rcx
movw %ax, 0x2(%rcx)
addq %rax, %r14
movq 0x10(%rsp), %rdx
addq %rbx, %rdx
movq %r13, %rsi
subq %r14, %rsi
movq %r14, %rdi
movq %rdx, 0x10(%rsp)
movq %rbx, %rcx
movq %r15, %r8
callq 0x61010d
cmpq $-0x78, %rax
ja 0x6103c9
testq %rax, %rax
je 0x6102cc
addq %rbp, %r12
movq 0x8(%rsp), %rcx
movw %ax, 0x4(%rcx)
addq %rax, %r14
movq 0x10(%rsp), %rdx
addq %rbx, %rdx
subq %r14, %r13
subq %rdx, %r12
movq %r14, %rdi
movq %r13, %rsi
movq %r12, %rcx
movq %r15, %r8
callq 0x61010d
cmpq $-0x78, %rax
ja 0x6103c9
testq %rax, %rax
je 0x6102cc
addq %rax, %r14
subq 0x8(%rsp), %r14
movq %r14, %rax
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/huf_compress.c
|
HUF_compress_internal
|
static size_t
HUF_compress_internal (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
HUF_nbStreams_e nbStreams,
void* workSpace_align4, size_t wkspSize,
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
const int bmi2)
{
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
assert(((size_t)workSpace_align4 & 3) == 0); /* must be aligned on 4-bytes boundaries */
/* checks & inits */
if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
if (!srcSize) return 0; /* Uncompressed */
if (!dstSize) return 0; /* cannot fit anything within dst budget */
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
/* Heuristic : If old table is valid, use it for small inputs */
if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, oldHufTable, bmi2);
}
/* Scan input and build symbol stats */
{ CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
/* Check validity of previous table */
if ( repeat
&& *repeat == HUF_repeat_check
&& !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
*repeat = HUF_repeat_none;
}
/* Heuristic : use existing table for small inputs */
if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, oldHufTable, bmi2);
}
/* Build Huffman Tree */
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
maxSymbolValue, huffLog,
&table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
CHECK_F(maxBits);
huffLog = (U32)maxBits;
/* Zero unused symbols in CTable, so we can check it for validity */
ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
}
/* Write table description header */
{ CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
&table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
/* Check if using previous huffman table is beneficial */
if (repeat && *repeat != HUF_repeat_none) {
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, oldHufTable, bmi2);
} }
/* Use the new huffman table */
if (hSize + 12ul >= srcSize) { return 0; }
op += hSize;
if (repeat) { *repeat = HUF_repeat_none; }
if (oldHufTable)
ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
}
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, table->CTable, bmi2);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movl %r9d, %eax
movq 0x70(%rsp), %r9
movl %r8d, 0xc(%rsp)
movq $-0x42, %rbp
cmpq $0x1900, %r9 # imm = 0x1900
jb 0x61052c
movq %rcx, %rbx
movq %rdx, %r14
movq %rsi, %r12
testq %rcx, %rcx
sete %cl
testq %rsi, %rsi
sete %dl
orb %cl, %dl
je 0x610450
xorl %ebp, %ebp
jmp 0x61052c
movq $-0x48, %rbp
cmpq $0x20000, %rbx # imm = 0x20000
ja 0x61052c
movq $-0x2c, %rbp
cmpl $0xc, %eax
ja 0x61052c
movq $-0x2e, %rbp
cmpl $0xff, %r8d
ja 0x61052c
movq %rdi, %r15
movl 0x88(%rsp), %ecx
movq 0x80(%rsp), %rdi
testl %r8d, %r8d
jne 0x6104a7
movl $0xff, 0xc(%rsp)
movq 0x78(%rsp), %rsi
movl 0x60(%rsp), %r10d
leaq (%r15,%r12), %rdx
testl %eax, %eax
movl $0xb, %r8d
cmovnel %eax, %r8d
testl %ecx, %ecx
sete %al
testq %rdi, %rdi
sete %r13b
orb %al, %r13b
jne 0x6104f1
cmpl $0x2, (%rdi)
jne 0x6104f1
movq %rsi, (%rsp)
movq %r15, %rdi
movq %r15, %rsi
movq %r14, %rcx
movq %rbx, %r8
movl %r10d, %r9d
callq 0x6108d6
jmp 0x61052f
movl %r8d, 0x18(%rsp)
movq %rdx, 0x10(%rsp)
movq 0x68(%rsp), %rdi
leaq 0xc(%rsp), %rsi
movq %r14, %rdx
movq %rbx, %rcx
movq %rdi, %r8
callq 0x60eec7
movq %rax, %rbp
cmpq $-0x78, %rax
ja 0x61052c
cmpq %rbx, %rbp
jne 0x61053e
movb (%r14), %al
movb %al, (%r15)
movl $0x1, %ebp
movq %rbp, %rax
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rbx, %rax
shrq $0x7, %rax
addq $0x4, %rax
cmpq %rax, %rbp
jbe 0x610449
movq 0x80(%rsp), %r10
testq %r10, %r10
movl 0x18(%rsp), %edi
je 0x6105c9
movl (%r10), %eax
cmpl $0x1, %eax
jne 0x6105bb
movl 0xc(%rsp), %ecx
movl $0x1, %eax
testl %ecx, %ecx
js 0x6105bb
incq %rcx
xorl %edx, %edx
xorl %esi, %esi
movq 0x78(%rsp), %r9
movq 0x68(%rsp), %r11
cmpl $0x0, (%r11,%rdx,4)
setne %bpl
cmpb $0x0, 0x2(%r9,%rdx,4)
sete %r8b
andb %bpl, %r8b
movzbl %r8b, %r8d
orl %r8d, %esi
incq %rdx
cmpq %rdx, %rcx
jne 0x610589
testl %esi, %esi
je 0x6105bb
movl $0x0, (%r10)
jmp 0x6105c9
testl %eax, %eax
sete %al
orb %al, %r13b
je 0x6106f1
movl 0xc(%rsp), %edx
movq %rbx, %rsi
movl $0x1, %ecx
callq 0x60dd55
movq 0x68(%rsp), %rsi
leaq 0x400(%rsi), %r13
movl 0xc(%rsp), %edx
leaq 0x800(%rsi), %r8
movl $0x1100, %r9d # imm = 0x1100
movq %r13, %rdi
movl %eax, %ecx
movq %r8, 0x18(%rsp)
callq 0x60f96e
movq %rax, %rbp
cmpq $-0x78, %rax
ja 0x61052c
movl 0xc(%rsp), %eax
movq %rax, 0x20(%rsp)
incl %eax
leaq (,%rax,4), %rdi
addq %r13, %rdi
shlq $0x2, %rax
movl $0x400, %edx # imm = 0x400
subq %rax, %rdx
xorl %esi, %esi
callq 0x3fa90
movq $0x2c0, (%rsp) # imm = 0x2C0
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq 0x20(%rsp), %rcx
movl %ebp, %r8d
movq 0x18(%rsp), %r9
callq 0x60ef52
movq %rax, %rbp
cmpq $-0x78, %rax
ja 0x61052c
movq 0x80(%rsp), %r9
testq %r9, %r9
je 0x610702
cmpl $0x0, (%r9)
movq 0x78(%rsp), %rdi
je 0x610716
movl 0xc(%rsp), %ecx
testl %ecx, %ecx
js 0x6106eb
incq %rcx
xorl %edx, %edx
xorl %eax, %eax
movq 0x68(%rsp), %r10
movzbl 0x2(%rdi,%rdx,4), %esi
imull (%r10,%rdx,4), %esi
addq %rsi, %rax
incq %rdx
cmpq %rdx, %rcx
jne 0x6106a0
xorl %esi, %esi
xorl %edx, %edx
movzbl 0x402(%r10,%rsi,4), %r8d
imull (%r10,%rsi,4), %r8d
addq %r8, %rdx
incq %rsi
cmpq %rsi, %rcx
jne 0x6106b9
leaq 0xc(%rbp), %rcx
cmpq %rbx, %rcx
jae 0x6106eb
shrq $0x3, %rax
shrq $0x3, %rdx
addq %rbp, %rdx
cmpq %rdx, %rax
ja 0x610723
movq %rdi, (%rsp)
jmp 0x6106fa
movq 0x78(%rsp), %rax
movq %rax, (%rsp)
movq %r15, %rdi
movq %r15, %rsi
jmp 0x610749
leaq 0xc(%rbp), %rax
cmpq %rbx, %rax
movq 0x78(%rsp), %rdi
jae 0x610449
jmp 0x61072a
leaq 0xc(%rbp), %rax
cmpq %rbx, %rax
jae 0x610449
movl $0x0, (%r9)
addq %r15, %rbp
testq %rdi, %rdi
je 0x61073f
movl $0x400, %edx # imm = 0x400
movq %r13, %rsi
callq 0x3f250
movq %r13, (%rsp)
movq %r15, %rdi
movq %rbp, %rsi
movq 0x10(%rsp), %rdx
movq %r14, %rcx
movq %rbx, %r8
movl 0x60(%rsp), %r9d
callq 0x6108d6
movq %rax, %rbp
jmp 0x61052c
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/huf_compress.c
|
ZSTD_noCompressLiterals
|
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
BYTE* const ostart = (BYTE*)dst;
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
switch(flSize)
{
case 1: /* 2 - 1 - 5 */
ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
break;
case 2: /* 2 - 2 - 12 */
MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
break;
case 3: /* 2 - 2 - 20 */
MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
break;
default: /* not necessary : flSize is {1,2,3} */
assert(0);
}
ZSTD_memcpy(ostart + flSize, src, srcSize);
DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
return srcSize + flSize;
}
|
pushq %rbx
xorl %r8d, %r8d
cmpq $0x20, %rcx
setae %r8b
cmpq $0x1000, %rcx # imm = 0x1000
sbbl $-0x1, %r8d
leaq (%r8,%rcx), %rbx
incq %rbx
movq $-0x46, %rax
cmpq %rsi, %rbx
ja 0x61099c
incq %r8
cmpl $0x3, %r8d
je 0x610976
cmpl $0x2, %r8d
jne 0x610982
movl %ecx, %eax
shll $0x4, %eax
orl $0x4, %eax
movw %ax, (%rdi)
jmp 0x61098b
movl %ecx, %eax
shll $0x4, %eax
orl $0xc, %eax
movl %eax, (%rdi)
jmp 0x61098b
leal (,%rcx,8), %eax
movb %al, (%rdi)
addq %r8, %rdi
movq %rdx, %rsi
movq %rcx, %rdx
callq 0x3f250
movq %rbx, %rax
popq %rbx
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress_literals.c
|
ZSTD_compressRleLiteralsBlock
|
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
BYTE* const ostart = (BYTE*)dst;
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
(void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
switch(flSize)
{
case 1: /* 2 - 1 - 5 */
ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
break;
case 2: /* 2 - 2 - 12 */
MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
break;
case 3: /* 2 - 2 - 20 */
MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
break;
default: /* not necessary : flSize is {1,2,3} */
assert(0);
}
ostart[flSize] = *(const BYTE*)src;
DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
return flSize+1;
}
|
xorl %eax, %eax
cmpq $0x20, %rcx
setae %al
cmpq $0x1000, %rcx # imm = 0x1000
sbbq $-0x1, %rax
incq %rax
cmpq $0x3, %rax
je 0x6109cb
cmpl $0x2, %eax
jne 0x6109d5
shll $0x4, %ecx
orl $0x5, %ecx
movw %cx, (%rdi)
jmp 0x6109dc
shll $0x4, %ecx
orl $0xd, %ecx
movl %ecx, (%rdi)
jmp 0x6109dc
shlb $0x3, %cl
incb %cl
movb %cl, (%rdi)
movb (%rdx), %cl
movb %cl, (%rdi,%rax)
incl %eax
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress_literals.c
|
HUF_readDTableX1_wksp_bmi2
|
size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
{
U32 tableLog = 0;
U32 nbSymbols = 0;
size_t iSize;
void* const dtPtr = DTable + 1;
HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
/* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
if (HUF_isError(iSize)) return iSize;
/* Table header */
{ DTableDesc dtd = HUF_getDTableDesc(DTable);
if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
dtd.tableType = 0;
dtd.tableLog = (BYTE)tableLog;
ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
}
/* Compute symbols and rankStart given rankVal:
*
* rankVal already contains the number of values of each weight.
*
* symbols contains the symbols ordered by weight. First are the rankVal[0]
* weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
* symbols[0] is filled (but unused) to avoid a branch.
*
* rankStart contains the offset where each rank belongs in the DTable.
* rankStart[0] is not filled because there are no entries in the table for
* weight 0.
*/
{
int n;
int nextRankStart = 0;
int const unroll = 4;
int const nLimit = (int)nbSymbols - unroll + 1;
for (n=0; n<(int)tableLog+1; n++) {
U32 const curr = nextRankStart;
nextRankStart += wksp->rankVal[n];
wksp->rankStart[n] = curr;
}
for (n=0; n < nLimit; n += unroll) {
int u;
for (u=0; u < unroll; ++u) {
size_t const w = wksp->huffWeight[n+u];
wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
}
}
for (; n < (int)nbSymbols; ++n) {
size_t const w = wksp->huffWeight[n];
wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
}
}
/* fill DTable
* We fill all entries of each weight in order.
* That way length is a constant for each iteration of the outter loop.
* We can switch based on the length to a different inner loop which is
* optimized for that particular case.
*/
{
U32 w;
int symbol=wksp->rankVal[0];
int rankStart=0;
for (w=1; w<tableLog+1; ++w) {
int const symbolCount = wksp->rankVal[w];
int const length = (1 << w) >> 1;
int uStart = rankStart;
BYTE const nbBits = (BYTE)(tableLog + 1 - w);
int s;
int u;
switch (length) {
case 1:
for (s=0; s<symbolCount; ++s) {
HUF_DEltX1 D;
D.byte = wksp->symbols[symbol + s];
D.nbBits = nbBits;
dt[uStart] = D;
uStart += 1;
}
break;
case 2:
for (s=0; s<symbolCount; ++s) {
HUF_DEltX1 D;
D.byte = wksp->symbols[symbol + s];
D.nbBits = nbBits;
dt[uStart+0] = D;
dt[uStart+1] = D;
uStart += 2;
}
break;
case 4:
for (s=0; s<symbolCount; ++s) {
U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
MEM_write64(dt + uStart, D4);
uStart += 4;
}
break;
case 8:
for (s=0; s<symbolCount; ++s) {
U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
MEM_write64(dt + uStart, D4);
MEM_write64(dt + uStart + 4, D4);
uStart += 8;
}
break;
default:
for (s=0; s<symbolCount; ++s) {
U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
for (u=0; u < length; u += 16) {
MEM_write64(dt + uStart + u + 0, D4);
MEM_write64(dt + uStart + u + 4, D4);
MEM_write64(dt + uStart + u + 8, D4);
MEM_write64(dt + uStart + u + 12, D4);
}
assert(u == length);
uStart += length;
}
break;
}
symbol += symbolCount;
rankStart += symbolCount * length;
}
}
return iSize;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
movq %rcx, %rbx
xorl %ecx, %ecx
movl %ecx, 0xc(%rsp)
movl %ecx, 0x8(%rsp)
movq $-0x2c, %r12
cmpq $0x5e8, %r8 # imm = 0x5E8
jb 0x611043
movl %r9d, %r10d
movq %rdx, %rax
movq %rsi, %r9
movq %rdi, %r14
leaq 0x4e8(%rbx), %r15
leaq 0x80(%rbx), %r11
leaq 0x8(%rsp), %rcx
leaq 0xc(%rsp), %r8
movl $0x100, %esi # imm = 0x100
movq %r15, %rdi
movq %rbx, %rdx
pushq %r10
pushq $0x368 # imm = 0x368
pushq %r11
pushq %rax
callq 0x60b556
addq $0x20, %rsp
cmpq $-0x78, %rax
ja 0x611040
movl (%r14), %edx
movl 0xc(%rsp), %ebp
movzbl %dl, %ecx
incl %ecx
cmpl %ecx, %ebp
ja 0x611043
movl %edx, %ecx
shrl $0x18, %ecx
movb %dl, (%r14)
movb $0x0, 0x1(%r14)
movb %bpl, 0x2(%r14)
movb %cl, 0x3(%r14)
movl 0x8(%rsp), %ecx
leal -0x3(%rcx), %r9d
leaq 0x1(%rbp), %rsi
xorl %edi, %edi
xorl %edx, %edx
movl (%rbx,%rdi,4), %r8d
addl %edx, %r8d
movl %edx, 0x40(%rbx,%rdi,4)
incq %rdi
movl %r8d, %edx
cmpq %rdi, %rsi
jne 0x610d88
xorl %esi, %esi
cmpl $0x4, %ecx
jl 0x610de4
movl %r9d, %edi
xorl %esi, %esi
movl %esi, %r11d
xorl %r8d, %r8d
movzbl (%r15,%r8), %edx
movl 0x40(%rbx,%rdx,4), %r9d
leal 0x1(%r9), %r10d
movl %r10d, 0x40(%rbx,%rdx,4)
movb %r11b, 0x3e8(%rbx,%r9)
incq %r8
incb %r11b
cmpq $0x4, %r8
jne 0x610db0
addq $0x4, %rsi
addq $0x4, %r15
cmpq %rdi, %rsi
jb 0x610daa
cmpl %ecx, %esi
jge 0x610e0f
movl %esi, %r8d
movzbl 0x4e8(%rbx,%r8), %edx
movl 0x40(%rbx,%rdx,4), %esi
leal 0x1(%rsi), %edi
movl %edi, 0x40(%rbx,%rdx,4)
movb %r8b, 0x3e8(%rbx,%rsi)
incq %r8
cmpq %r8, %rcx
jne 0x610deb
incl %ebp
cmpl $0x2, %ebp
jb 0x611040
movq %rax, 0x18(%rsp)
movabsq $0x1000100010001, %rsi # imm = 0x1000100010001
movl (%rbx), %edi
movl %ebp, %eax
movq %rax, 0x48(%rsp)
leaq 0xc(%r14), %rax
movq %rax, 0x38(%rsp)
leaq 0x3e8(%rbx), %rax
movq %rax, 0x10(%rsp)
leaq 0x4(%r14), %rax
movq %rax, 0x30(%rsp)
leaq 0x7(%r14), %rax
movq %rax, 0x28(%rsp)
leaq 0x5(%r14), %rax
movq %rax, 0x20(%rsp)
addq $0x1c, %r14
movq %r14, 0x40(%rsp)
xorl %r13d, %r13d
movl $0x1, %ecx
movq %rbp, 0x50(%rsp)
movl (%rbx,%rcx,4), %r9d
movl $0x1, %r11d
shll %cl, %r11d
sarl %r11d
movl %ebp, %r15d
subl %ecx, %r15d
leal -0x1(%r11), %eax
cmpl $0x7, %eax
ja 0x610fb5
leaq 0xc632e(%rip), %rdx # 0x6d71d0
movslq (%rdx,%rax,4), %rax
addq %rdx, %rax
jmpq *%rax
testl %r9d, %r9d
jle 0x61101c
movslq %edi, %rax
movslq %r13d, %rdx
movq 0x20(%rsp), %r8
leaq (%r8,%rdx,2), %r8
addq 0x10(%rsp), %rax
xorl %r10d, %r10d
movb (%rax,%r10), %dl
movb %dl, -0x1(%r8,%r10,2)
movb %r15b, (%r8,%r10,2)
incq %r10
cmpq %r10, %r9
jne 0x610ecb
jmp 0x61101c
testl %r9d, %r9d
jle 0x61101c
shll $0x8, %r15d
movzwl %r15w, %eax
movslq %edi, %r10
movslq %r13d, %rdx
movq 0x30(%rsp), %r8
leaq (%r8,%rdx,2), %r14
addq 0x10(%rsp), %r10
xorl %r8d, %r8d
movzbl (%r10,%r8), %edx
orl %eax, %edx
imulq %rsi, %rdx
movq %rdx, (%r14,%r8,8)
incq %r8
cmpq %r8, %r9
jne 0x610f0d
jmp 0x61101c
testl %r9d, %r9d
jle 0x61101c
movslq %edi, %rax
movslq %r13d, %rdx
movq 0x28(%rsp), %r8
leaq (%r8,%rdx,2), %r8
addq 0x10(%rsp), %rax
xorl %r10d, %r10d
movb (%rax,%r10), %dl
movb %dl, -0x3(%r8,%r10,4)
movb %r15b, -0x2(%r8,%r10,4)
movb %dl, -0x1(%r8,%r10,4)
movb %r15b, (%r8,%r10,4)
incq %r10
cmpq %r10, %r9
jne 0x610f49
jmp 0x61101c
testl %r9d, %r9d
jle 0x61101c
shll $0x8, %r15d
movzwl %r15w, %eax
movslq %edi, %r10
movslq %r13d, %rdx
movq 0x38(%rsp), %r8
leaq (%r8,%rdx,2), %r14
addq 0x10(%rsp), %r10
xorl %r15d, %r15d
movzbl (%r10,%r15), %edx
orl %eax, %edx
imulq %rsi, %rdx
movq %rdx, -0x8(%r14)
movq %rdx, (%r14)
incq %r15
addq $0x10, %r14
cmpq %r15, %r9
jne 0x610f95
jmp 0x61101c
testl %r9d, %r9d
jle 0x61101c
shll $0x8, %r15d
movzwl %r15w, %r15d
movslq %r11d, %r12
movslq %edi, %r10
movslq %r13d, %rax
movq 0x40(%rsp), %rdx
leaq (%rdx,%rax,2), %rax
leaq (%r12,%r12), %r14
xorl %ebp, %ebp
testl %r11d, %r11d
jle 0x611011
leaq (%r10,%rbp), %rdx
movzbl 0x3e8(%rbx,%rdx), %r8d
orl %r15d, %r8d
imulq %rsi, %r8
xorl %edx, %edx
movq %r8, -0x18(%rax,%rdx,2)
movq %r8, -0x10(%rax,%rdx,2)
movq %r8, -0x8(%rax,%rdx,2)
movq %r8, (%rax,%rdx,2)
addq $0x10, %rdx
cmpq %r12, %rdx
jl 0x610ff5
incq %rbp
addq %r14, %rax
cmpq %r9, %rbp
jne 0x610fda
addl %r9d, %edi
imull %r9d, %r11d
addl %r11d, %r13d
incq %rcx
cmpq 0x48(%rsp), %rcx
movq 0x50(%rsp), %rbp
jne 0x610e78
movq 0x18(%rsp), %r12
jmp 0x611043
movq %rax, %r12
movq %r12, %rax
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/huf_decompress.c
|
HUF_decompress1X1_DCtx_wksp
|
size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %r8, %rbx
movq %rcx, %r14
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %r15
movq 0x30(%rsp), %r8
movq %rcx, %rsi
movq %rbx, %rdx
movq %r9, %rcx
xorl %r9d, %r9d
callq 0x610cd4
movq %rax, %rcx
cmpq $-0x78, %rax
ja 0x6112f6
movq $-0x48, %rcx
subq %rax, %rbx
jbe 0x6112f6
addq %rax, %r14
movq %r13, %rdi
movq %r12, %rsi
movq %r14, %rdx
movq %rbx, %rcx
movq %r15, %r8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
jmp 0x611064
movq %rcx, %rax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/huf_decompress.c
|
HUF_readDTableX2_wksp
|
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize)
{
U32 tableLog, maxW, sizeOfSort, nbSymbols;
DTableDesc dtd = HUF_getDTableDesc(DTable);
U32 const maxTableLog = dtd.maxTableLog;
size_t iSize;
void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
U32 *rankStart;
HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
rankStart = wksp->rankStart0 + 1;
ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
/* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), /* bmi2 */ 0);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
/* find maxWeight */
for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
/* Get start index of each weight */
{ U32 w, nextRankStart = 0;
for (w=1; w<maxW+1; w++) {
U32 curr = nextRankStart;
nextRankStart += wksp->rankStats[w];
rankStart[w] = curr;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
sizeOfSort = nextRankStart;
}
/* sort symbols by weight */
{ U32 s;
for (s=0; s<nbSymbols; s++) {
U32 const w = wksp->weightList[s];
U32 const r = rankStart[w]++;
wksp->sortedSymbol[r].symbol = (BYTE)s;
wksp->sortedSymbol[r].weight = (BYTE)w;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
/* Build rankVal */
{ U32* const rankVal0 = wksp->rankVal[0];
{ int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
U32 nextRankVal = 0;
U32 w;
for (w=1; w<maxW+1; w++) {
U32 curr = nextRankVal;
nextRankVal += wksp->rankStats[w] << (w+rescale);
rankVal0[w] = curr;
} }
{ U32 const minBits = tableLog+1 - maxW;
U32 consumed;
for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
U32* const rankValPtr = wksp->rankVal[consumed];
U32 w;
for (w = 1; w < maxW+1; w++) {
rankValPtr[w] = rankVal0[w] >> consumed;
} } } }
HUF_fillDTableX2(dt, maxTableLog,
wksp->sortedSymbol, sizeOfSort,
wksp->rankStart0, wksp->rankVal, maxW,
tableLog+1,
wksp->calleeWksp, sizeof(wksp->calleeWksp) / sizeof(U32));
dtd.tableLog = (BYTE)maxTableLog;
dtd.tableType = 1;
ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
return iSize;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x78, %rsp
movq $-0x1, %r12
cmpq $0x944, %r8 # imm = 0x944
jb 0x61292f
movq %rcx, %r14
movq %rdi, %rbx
movl (%rdi), %ebp
movzbl %bpl, %ecx
pxor %xmm0, %xmm0
movdqu %xmm0, 0x2cc(%r14)
movdqu %xmm0, 0x2c0(%r14)
movdqu %xmm0, 0x2b0(%r14)
movdqu %xmm0, 0x2a0(%r14)
movdqu %xmm0, 0x290(%r14)
movdqu %xmm0, 0x280(%r14)
movdqu %xmm0, 0x270(%r14)
movq $-0x2c, %r12
movl %ecx, 0x4(%rsp)
cmpl $0xc, %ecx
ja 0x61292f
movq %rdx, %rax
movq %rsi, %r9
leaq 0x270(%r14), %r15
leaq 0x4dc(%r14), %rdi
leaq 0x5dc(%r14), %r13
leaq 0x18(%rsp), %rcx
leaq 0x1c(%rsp), %r8
movl $0x100, %esi # imm = 0x100
movq %r15, %rdx
pushq $0x0
pushq $0x368 # imm = 0x368
pushq %r13
pushq %rax
callq 0x60b556
addq $0x20, %rsp
movq %rax, %rcx
cmpq $-0x78, %rax
ja 0x61292c
movl 0x1c(%rsp), %edx
movl 0x4(%rsp), %eax
movl %eax, %esi
subl %edx, %esi
jb 0x61292f
movq %rcx, 0x40(%rsp)
movl %ebp, %eax
shrl $0x18, %eax
movl %eax, 0xc(%rsp)
leaq 0x4(%rbx), %rax
movq %rax, 0x48(%rsp)
leal 0x1(%rdx), %eax
movl %eax, 0x8(%rsp)
xorl %r12d, %r12d
movl %edx, %eax
movl %eax, %ecx
decl %eax
incl %r12d
cmpl $0x0, (%r15,%rcx,4)
je 0x6123ec
subl %r12d, %edx
addl $0x2, %edx
xorl %r9d, %r9d
cmpl $-0x3, %eax
ja 0x612430
movl %edx, %ecx
decq %rcx
xorl %edi, %edi
xorl %r8d, %r8d
movl 0x274(%r14,%rdi,4), %r9d
addl %r8d, %r9d
movl %r8d, 0x2ac(%r14,%rdi,4)
incq %rdi
movl %r9d, %r8d
cmpq %rdi, %rcx
jne 0x612412
movl 0x4(%rsp), %edi
subl %r12d, %edi
movl %r9d, (%rsp)
movl %r9d, 0x2a8(%r14)
movl 0x18(%rsp), %ecx
testq %rcx, %rcx
je 0x612483
xorl %r8d, %r8d
movzbl 0x4dc(%r14,%r8), %r9d
movl 0x2a8(%r14,%r9,4), %r10d
leal 0x1(%r10), %r11d
movl %r11d, 0x2a8(%r14,%r9,4)
movb %r8b, 0x2dc(%r14,%r10,2)
movb %r9b, 0x2dd(%r14,%r10,2)
incq %r8
cmpq %r8, %rcx
jne 0x61244e
incl %edi
movl $0x0, 0x2a8(%r14)
cmpl $-0x3, %eax
ja 0x6124c3
movl %edx, %r8d
decq %r8
xorl %r11d, %r11d
xorl %r10d, %r10d
movl 0x274(%r14,%r11,4), %r9d
leal (%rsi,%r11), %ecx
shll %cl, %r9d
addl %r10d, %r9d
movl %r10d, 0x4(%r14,%r11,4)
incq %r11
movl %r9d, %r10d
cmpq %r11, %r8
jne 0x6124a1
cmpl %edi, %r12d
jae 0x612501
movl %r12d, %ecx
leaq 0x4(%r14), %rsi
imulq $0x34, %rcx, %r11
movl %edx, %edx
decq %rdx
cmpl $-0x3, %eax
ja 0x6124f6
movq %rdx, %r8
movq %rsi, %r10
movl (%r10), %r9d
shrl %cl, %r9d
movl %r9d, (%r10,%r11)
addq $0x4, %r10
decq %r8
jne 0x6124e3
incq %rcx
addq $0x34, %r11
cmpl %ecx, %edi
jne 0x6124d8
movl %r12d, 0x14(%rsp)
movl %ebp, 0x10(%rsp)
movq %rbx, 0x20(%rsp)
movl 0x30(%r14), %eax
movl %eax, 0x30(%r13)
movdqu (%r14), %xmm0
movdqu 0x10(%r14), %xmm1
movq %r14, 0x30(%rsp)
movdqu 0x20(%r14), %xmm2
movdqu %xmm2, 0x20(%r13)
movdqu %xmm1, 0x10(%r13)
movdqu %xmm0, (%r13)
cmpl $0x0, (%rsp)
je 0x61290c
movq 0x30(%rsp), %rax
leaq 0x2dc(%rax), %rcx
movq %rcx, 0x38(%rsp)
movl 0x8(%rsp), %ecx
subl 0x4(%rsp), %ecx
movq %rcx, 0x50(%rsp)
leaq 0x610(%rax), %r12
movl (%rsp), %eax
movq %rax, 0x58(%rsp)
movq 0x20(%rsp), %rax
addq $0x10, %rax
movq %rax, 0x28(%rsp)
xorl %edi, %edi
movdqa 0x29182(%rip), %xmm0 # 0x63b710
movdqa 0x2918a(%rip), %xmm1 # 0x63b720
movdqa 0x29192(%rip), %xmm2 # 0x63b730
pcmpeqd %xmm3, %xmm3
movdqa 0x291a6(%rip), %xmm4 # 0x63b750
movq %r13, 0x60(%rsp)
movq 0x38(%rsp), %rax
movzbl (%rax,%rdi,2), %ebx
movzbl 0x1(%rax,%rdi,2), %ecx
movl 0x8(%rsp), %eax
movl %eax, %esi
subl %ecx, %esi
movq %rcx, 0x70(%rsp)
movl (%r13,%rcx,4), %r13d
movl 0x4(%rsp), %edx
subl %esi, %edx
movl $0x1, %eax
movl %edx, %ecx
shll %cl, %eax
cmpl 0x14(%rsp), %edx
movq %rax, 0x68(%rsp)
jae 0x6126ef
addl %r13d, %eax
cmpl %eax, %r13d
jae 0x6128ea
movzbl %sil, %ecx
shll $0x10, %ecx
addl %ebx, %ecx
addl $0x1000000, %ecx # imm = 0x1000000
movl %eax, %eax
subq %r13, %rax
leaq 0x3(%rax), %rdx
andq $-0x4, %rdx
decq %rax
movq %rax, %xmm5
pshufd $0x44, %xmm5, %xmm5 # xmm5 = xmm5[0,1,0,1]
movq 0x28(%rsp), %rax
leaq (%rax,%r13,4), %rax
pxor %xmm2, %xmm5
xorl %esi, %esi
movq %rsi, %xmm6
pshufd $0x44, %xmm6, %xmm6 # xmm6 = xmm6[0,1,0,1]
movdqa %xmm6, %xmm7
por %xmm1, %xmm7
pxor %xmm2, %xmm7
movdqa %xmm7, %xmm8
pcmpgtd %xmm5, %xmm8
pcmpeqd %xmm5, %xmm7
pshufd $0xf5, %xmm7, %xmm9 # xmm9 = xmm7[1,1,3,3]
pand %xmm8, %xmm9
pshufd $0xf5, %xmm8, %xmm7 # xmm7 = xmm8[1,1,3,3]
por %xmm9, %xmm7
movd %xmm7, %r8d
notl %r8d
testb $0x1, %r8b
je 0x61267d
movl %ecx, -0xc(%rax,%rsi,4)
pxor %xmm3, %xmm7
pextrw $0x4, %xmm7, %r8d
testb $0x1, %r8b
je 0x612691
movl %ecx, -0x8(%rax,%rsi,4)
por %xmm0, %xmm6
pxor %xmm2, %xmm6
movdqa %xmm6, %xmm7
pcmpgtd %xmm5, %xmm7
pcmpeqd %xmm5, %xmm6
pshufd $0xf5, %xmm6, %xmm8 # xmm8 = xmm6[1,1,3,3]
pand %xmm7, %xmm8
pshufd $0xf5, %xmm7, %xmm6 # xmm6 = xmm7[1,1,3,3]
por %xmm8, %xmm6
pxor %xmm3, %xmm6
pextrw $0x0, %xmm6, %r8d
testb $0x1, %r8b
je 0x6126ce
movl %ecx, -0x4(%rax,%rsi,4)
pextrw $0x4, %xmm6, %r8d
testb $0x1, %r8b
je 0x6126dd
movl %ecx, (%rax,%rsi,4)
addq $0x4, %rsi
cmpq %rsi, %rdx
jne 0x612631
jmp 0x6128ea
movq 0x50(%rsp), %rax
leal (%rsi,%rax), %ecx
cmpl $0x2, %ecx
movl $0x1, %eax
cmovgel %ecx, %eax
movq 0x30(%rsp), %r10
movl 0x2a4(%r10,%rax,4), %r14d
movl %esi, %r8d
imulq $0x34, %r8, %r8
movl 0x30(%r10,%r8), %r9d
movl %r9d, 0x30(%r12)
movdqu (%r10,%r8), %xmm5
movdqu 0x10(%r10,%r8), %xmm6
movdqu 0x20(%r10,%r8), %xmm7
movdqu %xmm7, 0x20(%r12)
movdqu %xmm6, 0x10(%r12)
movdqu %xmm5, (%r12)
cmpl $0x2, %ecx
jl 0x612867
movl (%r12,%rax,4), %eax
testq %rax, %rax
je 0x612867
movzbl %sil, %ecx
shll $0x10, %ecx
leal (%rcx,%rbx), %ebp
addl $0x1000000, %ebp # imm = 0x1000000
leaq -0x1(%rax), %rcx
movq %rcx, %xmm5
pshufd $0x44, %xmm5, %xmm5 # xmm5 = xmm5[0,1,0,1]
movq 0x28(%rsp), %rcx
leaq (%rcx,%r13,4), %r11
leaq 0xc(,%rax,4), %rcx
andq $-0x10, %rcx
xorl %eax, %eax
movdqa %xmm1, %xmm6
movdqa %xmm0, %xmm7
movdqa %xmm5, %xmm8
pxor %xmm2, %xmm8
movdqa %xmm6, %xmm9
pxor %xmm2, %xmm9
movdqa %xmm9, %xmm10
pcmpgtd %xmm8, %xmm10
pcmpeqd %xmm8, %xmm9
pshufd $0xf5, %xmm9, %xmm11 # xmm11 = xmm9[1,1,3,3]
pand %xmm10, %xmm11
pshufd $0xf5, %xmm10, %xmm9 # xmm9 = xmm10[1,1,3,3]
por %xmm11, %xmm9
movd %xmm9, %r8d
notl %r8d
testb $0x1, %r8b
je 0x6127e7
movl %ebp, -0xc(%r11,%rax)
pxor %xmm3, %xmm9
pextrw $0x4, %xmm9, %r8d
testb $0x1, %r8b
je 0x6127fd
movl %ebp, -0x8(%r11,%rax)
movdqa %xmm7, %xmm9
pxor %xmm2, %xmm9
movdqa %xmm9, %xmm10
pcmpgtd %xmm8, %xmm10
pcmpeqd %xmm8, %xmm9
pshufd $0xf5, %xmm9, %xmm9 # xmm9 = xmm9[1,1,3,3]
pand %xmm10, %xmm9
pshufd $0xf5, %xmm10, %xmm8 # xmm8 = xmm10[1,1,3,3]
por %xmm9, %xmm8
pxor %xmm3, %xmm8
pextrw $0x0, %xmm8, %r8d
testb $0x1, %r8b
je 0x612842
movl %ebp, -0x4(%r11,%rax)
pextrw $0x4, %xmm8, %r8d
testb $0x1, %r8b
je 0x612852
movl %ebp, (%r11,%rax)
paddq %xmm4, %xmm6
paddq %xmm4, %xmm7
addq $0x10, %rax
cmpq %rax, %rcx
jne 0x61279b
cmpl %r14d, (%rsp)
je 0x6128ea
movl (%rsp), %ecx
subl %r14d, %ecx
movq 0x48(%rsp), %rax
leaq (%rax,%r13,4), %rax
movq 0x38(%rsp), %r8
leaq (%r8,%r14,2), %r11
movl %ecx, %r13d
xorl %ebp, %ebp
movzbl (%r11,%rbp,2), %r9d
movzbl 0x1(%r11,%rbp,2), %r15d
movl 0x8(%rsp), %r14d
subl %r15d, %r14d
movl %edx, %ecx
subl %r14d, %ecx
movl $0x1, %r8d
shll %cl, %r8d
movl (%r12,%r15,4), %ecx
leal (%r8,%rcx), %r10d
shll $0x8, %r9d
addl %esi, %r14d
movzbl %r14b, %r14d
shll $0x10, %r14d
orl %r9d, %r14d
leal (%rbx,%r14), %r9d
addl $0x2000000, %r9d # imm = 0x2000000
movl %ecx, %r14d
incl %ecx
movl %r9d, (%rax,%r14,4)
cmpl %r10d, %ecx
jb 0x6128d0
addl %r8d, (%r12,%r15,4)
incq %rbp
cmpq %r13, %rbp
jne 0x61288a
movq 0x60(%rsp), %r13
movq 0x70(%rsp), %rax
movq 0x68(%rsp), %rcx
addl %ecx, (%r13,%rax,4)
incq %rdi
cmpq 0x58(%rsp), %rdi
jne 0x6125af
movq 0x20(%rsp), %rax
movl 0x10(%rsp), %ecx
movb %cl, (%rax)
movb $0x1, 0x1(%rax)
movb %cl, 0x2(%rax)
movl 0xc(%rsp), %ecx
movb %cl, 0x3(%rax)
movq 0x40(%rsp), %r12
jmp 0x61292f
movq %rcx, %r12
movq %r12, %rax
addq $0x78, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/huf_decompress.c
|
HUF_decompress4X2_DCtx_wksp_bmi2
|
static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize, int bmi2)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %r8, %rbx
movq %rcx, %r14
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %r15
movq 0x30(%rsp), %r8
movq %rcx, %rsi
movq %rbx, %rdx
movq %r9, %rcx
callq 0x6122e5
movq %rax, %rcx
cmpq $-0x78, %rax
ja 0x6142f2
movq $-0x48, %rcx
subq %rax, %rbx
jbe 0x6142f2
addq %rax, %r14
movq %r13, %rdi
movq %r12, %rsi
movq %r14, %rdx
movq %rbx, %rcx
movq %r15, %r8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
jmp 0x612d31
movq %rcx, %rax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/huf_decompress.c
|
HUF_decompress4X_hufOnly_wksp
|
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
size_t dstSize, const void* cSrc,
size_t cSrcSize, void* workSpace,
size_t wkspSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize == 0) return ERROR(corruption_detected);
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
#else
return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
cSrcSize, workSpace, wkspSize):
HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
#endif
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
testq %rdx, %rdx
je 0x6143cc
movq %r8, %r14
testq %r8, %r8
je 0x6143d5
movq %r9, %rbx
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %rbp
movq 0x40(%rsp), %rax
movq %rax, (%rsp)
movq %rdx, %rdi
movq %r14, %rsi
callq 0x61431f
movq %rbp, %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r15, %rcx
movq %r14, %r8
movq %rbx, %r9
testl %eax, %eax
je 0x6143eb
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x614292
movq $-0x46, %rax
jmp 0x6143dc
movq $-0x14, %rax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
pushq $0x0
pushq 0x8(%rsp)
callq 0x612270
addq $0x18, %rsp
jmp 0x6143e0
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/huf_decompress.c
|
HUF_decompress1X_usingDTable_bmi2
|
size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dtd;
assert(dtd.tableType == 1);
return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
#else
return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
#endif
}
|
cmpb $0x0, 0x1(%r8)
je 0x611064
jmp 0x612958
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/huf_decompress.c
|
HUF_decompress
|
size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 };
#endif
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);
#else
return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
#endif
}
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
testq %rsi, %rsi
je 0x6148ad
movq %rcx, %r15
movq %rsi, %rbx
movq $-0x14, %rax
cmpq %rsi, %rcx
ja 0x6148ce
movq %rdx, %r14
movq %rdi, %r12
jne 0x6148b6
movq %r12, %rdi
movq %r14, %rsi
movq %rbx, %rdx
callq 0x3f250
jmp 0x6148cb
movq $-0x46, %rax
jmp 0x6148ce
cmpq $0x1, %r15
jne 0x6148da
movzbl (%r14), %esi
movq %r12, %rdi
movq %rbx, %rdx
callq 0x3fa90
movq %rbx, %rax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
movq %rbx, %rdi
movq %r15, %rsi
callq 0x61431f
movl %eax, %eax
leaq 0x2491e2(%rip), %r8 # 0x85dad0
movq %r12, %rdi
movq %rbx, %rsi
movq %r14, %rdx
movq %r15, %rcx
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
jmpq *(%r8,%rax,8)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/huf_decompress.c
|
HUF_decompress4X_DCtx
|
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
#else
return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
#endif
}
}
|
testq %rdx, %rdx
je 0x61494c
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa00, %rsp # imm = 0xA00
movq %r8, %r15
movq %rdx, %rbx
movq $-0x14, %rax
cmpq %rdx, %r8
ja 0x6149c6
movq %rcx, %r14
movq %rsi, %r12
jne 0x614954
movq %r12, %rdi
movq %r14, %rsi
movq %rbx, %rdx
callq 0x3f250
jmp 0x614969
movq $-0x46, %rax
retq
cmpq $0x1, %r15
jne 0x61496e
movzbl (%r14), %esi
movq %r12, %rdi
movq %rbx, %rdx
callq 0x3fa90
movq %rbx, %rax
jmp 0x6149c6
movq %rdi, %r13
movq %rbx, %rdi
movq %r15, %rsi
callq 0x61431f
testl %eax, %eax
je 0x6149a4
subq $0x8, %rsp
leaq 0x8(%rsp), %r9
movq %r13, %rdi
movq %r12, %rsi
movq %rbx, %rdx
movq %r14, %rcx
movq %r15, %r8
pushq $0xa00 # imm = 0xA00
callq 0x614292
jmp 0x6149c2
movq %rsp, %r9
movq %r13, %rdi
movq %r12, %rsi
movq %rbx, %rdx
movq %r14, %rcx
movq %r15, %r8
pushq $0x0
pushq $0xa00 # imm = 0xA00
callq 0x612270
addq $0x10, %rsp
addq $0xa00, %rsp # imm = 0xA00
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/huf_decompress.c
|
ZSTD_copyDDictParameters
|
void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
{
DEBUGLOG(4, "ZSTD_copyDDictParameters");
assert(dctx != NULL);
assert(ddict != NULL);
dctx->dictID = ddict->dictID;
dctx->prefixStart = ddict->dictContent;
dctx->virtualStart = ddict->dictContent;
dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
dctx->previousDstEnd = dctx->dictEnd;
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
dctx->dictContentBeginForFuzzing = dctx->prefixStart;
dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
#endif
if (ddict->entropyPresent) {
dctx->litEntropy = 1;
dctx->fseEntropy = 1;
dctx->LLTptr = ddict->entropy.LLTable;
dctx->MLTptr = ddict->entropy.MLTable;
dctx->OFTptr = ddict->entropy.OFTable;
dctx->HUFptr = ddict->entropy.hufTable;
dctx->entropy.rep[0] = ddict->entropy.rep[0];
dctx->entropy.rep[1] = ddict->entropy.rep[1];
dctx->entropy.rep[2] = ddict->entropy.rep[2];
} else {
dctx->litEntropy = 0;
dctx->fseEntropy = 0;
}
}
|
movl 0x6ab4(%rsi), %eax
movl %eax, 0x75f0(%rdi)
movq 0x8(%rsi), %rax
movq %rax, 0x74c8(%rdi)
movq %rax, 0x74d0(%rdi)
addq 0x10(%rsi), %rax
movq %rax, 0x74d8(%rdi)
movq %rax, 0x74c0(%rdi)
cmpl $0x0, 0x6ab8(%rsi)
je 0x614bcd
movabsq $0x100000001, %rax # imm = 0x100000001
movq %rax, 0x7528(%rdi)
leaq 0x18(%rsi), %rax
movq %rax, (%rdi)
leaq 0x1828(%rsi), %rax
movq %rax, 0x8(%rdi)
leaq 0x1020(%rsi), %rax
movq %rax, 0x10(%rdi)
leaq 0x2830(%rsi), %rax
movq %rax, 0x18(%rdi)
movl 0x6834(%rsi), %eax
movl %eax, 0x683c(%rdi)
movl 0x6838(%rsi), %eax
movl %eax, 0x6840(%rdi)
movl 0x683c(%rsi), %eax
movl %eax, 0x6844(%rdi)
retq
movq $0x0, 0x7528(%rdi)
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_ddict.c
|
ZSTD_initStaticDDict
|
const ZSTD_DDict* ZSTD_initStaticDDict(
void* sBuffer, size_t sBufferSize,
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
size_t const neededSpace = sizeof(ZSTD_DDict)
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
assert(sBuffer != NULL);
assert(dict != NULL);
if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
if (sBufferSize < neededSpace) return NULL;
if (dictLoadMethod == ZSTD_dlm_byCopy) {
ZSTD_memcpy(ddict+1, dict, dictSize); /* local copy */
dict = ddict+1;
}
if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
dict, dictSize,
ZSTD_dlm_byRef, dictContentType) ))
return NULL;
return ddict;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
testb $0x7, %bl
jne 0x614ed7
movq %rcx, %r14
movl $0x6ad8, %eax # imm = 0x6AD8
addq %rax, %rcx
cmpl $0x1, %r8d
cmoveq %rax, %rcx
cmpq %rsi, %rcx
jbe 0x614ee4
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
movl %r9d, %ebp
testl %r8d, %r8d
jne 0x614f04
leaq 0x6ad8(%rbx), %r15
movq %r15, %rdi
movq %rdx, %rsi
movq %r14, %rdx
callq 0x3f250
movq %r15, %rdx
movq %rbx, %rdi
movq %rdx, %rsi
movq %r14, %rdx
movl $0x1, %ecx
movl %ebp, %r8d
callq 0x614ccb
movq %rax, %rcx
xorl %eax, %eax
cmpq $-0x77, %rcx
cmovbq %rbx, %rax
jmp 0x614ed9
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_ddict.c
|
ZSTD_decodeLiteralsBlock
|
size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
{
DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
{ const BYTE* const istart = (const BYTE*) src;
symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
switch(litEncType)
{
case set_repeat:
DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
/* fall-through */
case set_compressed:
RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
{ size_t lhSize, litSize, litCSize;
U32 singleStream=0;
U32 const lhlCode = (istart[0] >> 2) & 3;
U32 const lhc = MEM_readLE32(istart);
size_t hufSuccess;
switch(lhlCode)
{
case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
/* 2 - 2 - 10 - 10 */
singleStream = !lhlCode;
lhSize = 3;
litSize = (lhc >> 4) & 0x3FF;
litCSize = (lhc >> 14) & 0x3FF;
break;
case 2:
/* 2 - 2 - 14 - 14 */
lhSize = 4;
litSize = (lhc >> 4) & 0x3FFF;
litCSize = lhc >> 18;
break;
case 3:
/* 2 - 2 - 18 - 18 */
lhSize = 5;
litSize = (lhc >> 4) & 0x3FFFF;
litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
break;
}
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
/* prefetch huffman table if cold */
if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
}
if (litEncType==set_repeat) {
if (singleStream) {
hufSuccess = HUF_decompress1X_usingDTable_bmi2(
dctx->litBuffer, litSize, istart+lhSize, litCSize,
dctx->HUFptr, dctx->bmi2);
} else {
hufSuccess = HUF_decompress4X_usingDTable_bmi2(
dctx->litBuffer, litSize, istart+lhSize, litCSize,
dctx->HUFptr, dctx->bmi2);
}
} else {
if (singleStream) {
#if defined(HUF_FORCE_DECOMPRESS_X2)
hufSuccess = HUF_decompress1X_DCtx_wksp(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
sizeof(dctx->workspace));
#else
hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
sizeof(dctx->workspace), dctx->bmi2);
#endif
} else {
hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
sizeof(dctx->workspace), dctx->bmi2);
}
}
RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
dctx->litEntropy = 1;
if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return litCSize + lhSize;
}
case set_basic:
{ size_t litSize, lhSize;
U32 const lhlCode = ((istart[0]) >> 2) & 3;
switch(lhlCode)
{
case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
lhSize = 1;
litSize = istart[0] >> 3;
break;
case 1:
lhSize = 2;
litSize = MEM_readLE16(istart) >> 4;
break;
case 3:
lhSize = 3;
litSize = MEM_readLE24(istart) >> 4;
break;
}
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return lhSize+litSize;
}
/* direct reference into compressed stream */
dctx->litPtr = istart+lhSize;
dctx->litSize = litSize;
return lhSize+litSize;
}
case set_rle:
{ U32 const lhlCode = ((istart[0]) >> 2) & 3;
size_t litSize, lhSize;
switch(lhlCode)
{
case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
lhSize = 1;
litSize = istart[0] >> 3;
break;
case 1:
lhSize = 2;
litSize = MEM_readLE16(istart) >> 4;
break;
case 3:
lhSize = 3;
litSize = MEM_readLE24(istart) >> 4;
RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
break;
}
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return lhSize+1;
}
default:
RETURN_ERROR(corruption_detected, "impossible");
}
}
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq $-0x14, %r14
cmpq $0x3, %rdx
jae 0x614fe6
movq %r14, %rax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
movq %rsi, %rcx
movq %rdi, %rbx
movzbl (%rsi), %r15d
movl %r15d, %r12d
andl $0x3, %r12d
leaq 0xc2452(%rip), %rax # 0x6d7450
movslq (%rax,%r12,4), %rsi
addq %rax, %rsi
jmpq *%rsi
movl %r15d, %eax
shrl $0x2, %eax
andl $0x3, %eax
cmpl $0x1, %eax
je 0x6150f8
cmpl $0x3, %eax
jne 0x61511a
movzwl (%rcx), %eax
movzbl 0x2(%rcx), %r15d
shll $0x10, %r15d
orl %eax, %r15d
shrl $0x4, %r15d
movl $0x3, %eax
jmp 0x615123
cmpl $0x0, 0x7528(%rbx)
je 0x6150ec
cmpq $0x5, %rdx
jb 0x614fd9
shrb $0x2, %r15b
andb $0x3, %r15b
movl (%rcx), %esi
cmpb $0x2, %r15b
je 0x6151cb
movzbl %r15b, %eax
cmpl $0x3, %eax
jne 0x6151e3
movl %esi, %eax
shrl $0x4, %eax
andl $0x3ffff, %eax # imm = 0x3FFFF
cmpl $0x20000, %eax # imm = 0x20000
ja 0x614fd9
movzbl 0x4(%rcx), %r10d
shldl $0xa, %esi, %r10d
movb $0x1, %r11b
movl $0x5, %r8d
jmp 0x615204
movl %r15d, %eax
shrl $0x2, %eax
andl $0x3, %eax
cmpl $0x1, %eax
je 0x615107
cmpl $0x3, %eax
jne 0x615193
cmpq $0x3, %rdx
je 0x614fd9
movzwl (%rcx), %eax
movzbl 0x2(%rcx), %r15d
shll $0x10, %r15d
orq %rax, %r15
cmpl $0x20000f, %r15d # imm = 0x20000F
ja 0x614fd9
shrl $0x4, %r15d
movl $0x3, %r14d
jmp 0x61519d
movq $-0x1e, %r14
jmp 0x614fd9
movzwl (%rcx), %r15d
shrl $0x4, %r15d
movl $0x2, %eax
jmp 0x615123
movzwl (%rcx), %r15d
shrl $0x4, %r15d
movl $0x2, %r14d
jmp 0x61519d
shrl $0x3, %r15d
movl $0x1, %eax
leaq (%rax,%r15), %r12
leaq (%rax,%r15), %rsi
addq $0x20, %rsi
cmpq %rdx, %rsi
jbe 0x61517a
cmpq %rdx, %r12
ja 0x614fd9
leaq 0x7690(%rbx), %r14
addq %rax, %rcx
movq %r14, %rdi
movq %rcx, %rsi
movq %r15, %rdx
callq 0x3f250
movq %r14, 0x75a0(%rbx)
movq %r15, 0x75c0(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0x7690(%rbx,%r15)
movups %xmm0, 0x76a0(%rbx,%r15)
jmp 0x61518b
addq %rax, %rcx
movq %rcx, 0x75a0(%rbx)
movq %r15, 0x75c0(%rbx)
movq %r12, %r14
jmp 0x614fd9
shrl $0x3, %r15d
movl $0x1, %r14d
leaq 0x7690(%rbx), %r12
leaq 0x20(%r15), %rdx
movzbl (%rcx,%r14), %esi
movq %r12, %rdi
callq 0x3fa90
movq %r12, 0x75a0(%rbx)
movq %r15, 0x75c0(%rbx)
incq %r14
jmp 0x614fd9
movl %esi, %eax
shrl $0x4, %eax
andl $0x3fff, %eax # imm = 0x3FFF
shrl $0x12, %esi
movl $0x4, %r8d
movb $0x1, %r11b
jmp 0x615201
testb %r15b, %r15b
setne %r11b
movl %esi, %eax
shrl $0x4, %eax
movl $0x3ff, %edi # imm = 0x3FF
andl %edi, %eax
shrl $0xe, %esi
andl %edi, %esi
movl $0x3, %r8d
movl %esi, %r10d
leaq (%r10,%r8), %r13
cmpq %rdx, %r13
ja 0x614fd9
movl %eax, %r15d
cmpl $0x301, %r15d # imm = 0x301
jb 0x615243
cmpl $0x0, 0x75f4(%rbx)
je 0x615243
movq 0x18(%rbx), %rax
movq $-0x40, %rdx
prefetcht1 0x40(%rax,%rdx)
addq $0x40, %rdx
cmpq $0x3fc4, %rdx # imm = 0x3FC4
jb 0x615231
movl 0x75d8(%rbx), %eax
cmpl $0x3, %r12d
jne 0x615275
leaq 0x7690(%rbx), %rdi
addq %r8, %rcx
movq 0x18(%rbx), %r8
movq %r15, %rsi
movq %rcx, %rdx
movq %r10, %rcx
movl %eax, %r9d
testb %r11b, %r11b
je 0x6152a5
callq 0x614554
jmp 0x6152bb
leaq 0x2838(%rbx), %rdi
leaq 0x7690(%rbx), %rsi
addq %r8, %rcx
leaq 0x6abc(%rbx), %r9
movq %r15, %rdx
movq %r10, %r8
testb %r11b, %r11b
je 0x6152ac
pushq %rax
pushq $0xa00 # imm = 0xA00
callq 0x614564
jmp 0x6152b7
callq 0x6144cf
jmp 0x6152bb
pushq %rax
pushq $0xa00 # imm = 0xA00
callq 0x6144df
addq $0x10, %rsp
cmpq $-0x78, %rax
ja 0x614fd9
leaq 0x7690(%rbx), %rax
movq %rax, 0x75a0(%rbx)
movq %r15, 0x75c0(%rbx)
movl $0x1, 0x7528(%rbx)
cmpl $0x2, %r12d
jne 0x6152f5
leaq 0x2838(%rbx), %rcx
movq %rcx, 0x18(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rax,%r15)
movups %xmm0, (%rax,%r15)
movq %r13, %r14
jmp 0x614fd9
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress_block.c
|
ZSTD_decodeSeqHeaders
|
size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
const void* src, size_t srcSize)
{
const BYTE* const istart = (const BYTE*)src;
const BYTE* const iend = istart + srcSize;
const BYTE* ip = istart;
int nbSeq;
DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
/* check */
RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
/* SeqHead */
nbSeq = *ip++;
if (!nbSeq) {
*nbSeqPtr=0;
RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
return 1;
}
if (nbSeq > 0x7F) {
if (nbSeq == 0xFF) {
RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
nbSeq = MEM_readLE16(ip) + LONGNBSEQ;
ip+=2;
} else {
RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
nbSeq = ((nbSeq-0x80)<<8) + *ip++;
}
}
*nbSeqPtr = nbSeq;
/* FSE table descriptors */
RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
{ symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
ip++;
/* Build DTables */
{ size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
LLtype, MaxLL, LLFSELog,
ip, iend-ip,
LL_base, LL_bits,
LL_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
dctx->bmi2);
RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += llhSize;
}
{ size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
OFtype, MaxOff, OffFSELog,
ip, iend-ip,
OF_base, OF_bits,
OF_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
dctx->bmi2);
RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += ofhSize;
}
{ size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
MLtype, MaxML, MLFSELog,
ip, iend-ip,
ML_base, ML_bits,
ML_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
dctx->bmi2);
RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += mlhSize;
}
}
return ip-istart;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movq $-0x48, %r15
testq %rcx, %rcx
je 0x6157b7
movq %rcx, %r14
movq %rdx, %rbx
movzbl (%rdx), %ebp
testl %ebp, %ebp
je 0x6157a9
movq %rdi, %r12
testb %bpl, %bpl
js 0x6157c9
leaq 0x1(%rbx), %rax
addq %rbx, %r14
movl %ebp, (%rsi)
leaq 0x1(%rax), %r13
cmpq %r14, %r13
ja 0x6157b7
movzbl (%rax), %edx
movl %edx, 0x4(%rsp)
shrl $0x6, %edx
leaq 0x20(%r12), %rdi
movq %r14, %rax
subq %r13, %rax
movl 0x75f4(%r12), %r10d
leaq 0x6abc(%r12), %r11
movl 0x752c(%r12), %ecx
movq %rcx, 0x8(%rsp)
movl 0x75d8(%r12), %r15d
subq $0x8, %rsp
movq %r12, %rsi
movl $0x23, %ecx
movl $0x9, %r8d
movq %r13, %r9
pushq %r15
movq %r11, 0x20(%rsp)
pushq %r11
pushq %rbp
pushq %r10
pushq 0x30(%rsp)
leaq 0xc1f57(%rip), %r10 # 0x6d75c0
pushq %r10
leaq 0xc1ebe(%rip), %r10 # 0x6d7530
pushq %r10
leaq 0xc1e25(%rip), %r10 # 0x6d74a0
pushq %r10
pushq %rax
callq 0x615808
addq $0x50, %rsp
movq $-0x14, %r15
cmpq $-0x78, %rax
ja 0x6157b7
addq %rax, %r13
movl 0x4(%rsp), %edx
shrl $0x4, %edx
andl $0x3, %edx
leaq 0x1028(%r12), %rdi
leaq 0x10(%r12), %rsi
movq %r14, %rax
subq %r13, %rax
movl 0x75f4(%r12), %r10d
movl 0x752c(%r12), %ecx
movq %rcx, 0x8(%rsp)
movl 0x75d8(%r12), %r11d
subq $0x8, %rsp
movl $0x1f, %ecx
movl $0x8, %r8d
movq %r13, %r9
pushq %r11
pushq 0x20(%rsp)
pushq %rbp
pushq %r10
pushq 0x30(%rsp)
leaq 0xc21d5(%rip), %r10 # 0x6d78d0
pushq %r10
leaq 0xc214c(%rip), %r10 # 0x6d7850
pushq %r10
leaq 0xc20c3(%rip), %r10 # 0x6d77d0
pushq %r10
pushq %rax
callq 0x615808
addq $0x50, %rsp
cmpq $-0x78, %rax
ja 0x6157b7
movl 0x4(%rsp), %edx
shrl $0x2, %edx
andl $0x3, %edx
addq %rax, %r13
leaq 0x1830(%r12), %rdi
leaq 0x8(%r12), %rsi
subq %r13, %r14
movl 0x75f4(%r12), %eax
movl 0x752c(%r12), %r10d
movl 0x75d8(%r12), %r11d
subq $0x8, %rsp
leaq 0xc243d(%rip), %r12 # 0x6d7ba0
movl $0x34, %ecx
movl $0x9, %r8d
movq %r13, %r9
pushq %r11
pushq 0x20(%rsp)
pushq %rbp
pushq %rax
pushq %r10
pushq %r12
leaq 0xc233c(%rip), %rax # 0x6d7ac0
pushq %rax
leaq 0xc2254(%rip), %rax # 0x6d79e0
pushq %rax
pushq %r14
callq 0x615808
addq $0x50, %rsp
cmpq $-0x78, %rax
ja 0x6157b7
addq %rax, %r13
subq %rbx, %r13
movq %r13, %r15
jmp 0x6157b7
movl $0x0, (%rsi)
cmpq $0x1, %r14
cmoveq %r14, %r15
movq %r15, %rax
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
cmpl $0xff, %ebp
je 0x6157ef
cmpq $0x2, %r14
jl 0x6157b7
shll $0x8, %ebp
leaq 0x2(%rbx), %rax
movzbl 0x1(%rbx), %ecx
addl %ecx, %ebp
addl $0xffff8000, %ebp # imm = 0xFFFF8000
jmp 0x6155f1
cmpq $0x3, %r14
jl 0x6157b7
leaq 0x3(%rbx), %rax
movzwl 0x1(%rbx), %ebp
addl $0x7f00, %ebp # imm = 0x7F00
jmp 0x6155f1
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress_block.c
|
ZSTD_decompressBlock
|
size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
size_t dSize;
ZSTD_checkContinuity(dctx, dst, dstCapacity);
dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
dctx->previousDstEnd = (char*)dst + dSize;
return dSize;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %rbx
movq %rdi, %r14
movq 0x74c0(%rdi), %rax
cmpq %rsi, %rax
sete %sil
testq %rdx, %rdx
sete %dil
orb %sil, %dil
jne 0x617354
movq %rax, 0x74d8(%r14)
movq 0x74c8(%r14), %rsi
subq %rax, %rsi
addq %rbx, %rsi
movq %rsi, 0x74d0(%r14)
movq %rbx, 0x74c8(%r14)
movq %rbx, 0x74c0(%r14)
movq %r14, %rdi
movq %rbx, %rsi
xorl %r9d, %r9d
callq 0x615980
addq %rax, %rbx
movq %rbx, 0x74c0(%r14)
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress_block.c
|
ZSTD_initFseState
|
static void
ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
{
const void* ptr = dt;
const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
(U32)DStatePtr->state, DTableH->tableLog);
BIT_reloadDStream(bitD);
DStatePtr->table = dt + 1;
}
|
movl 0x4(%rdx), %r8d
movq (%rsi), %r9
movl 0x8(%rsi), %eax
addl %r8d, %eax
movl %eax, %ecx
negb %cl
shrq %cl, %r9
leaq 0xc0a21(%rip), %rcx # 0x6d7db0
andl (%rcx,%r8,4), %r9d
movl %eax, 0x8(%rsi)
movq %r9, (%rdi)
cmpl $0x40, %eax
ja 0x6173f5
movq 0x10(%rsi), %rcx
cmpq 0x20(%rsi), %rcx
jae 0x6173db
movq 0x18(%rsi), %r8
cmpq %r8, %rcx
je 0x6173f5
movl %eax, %r9d
shrl $0x3, %r9d
movq %rcx, %r10
subq %r9, %r10
movl %ecx, %r11d
subl %r8d, %r11d
cmpq %r8, %r10
cmovael %r9d, %r11d
subq %r11, %rcx
movq %rcx, 0x10(%rsi)
shll $0x3, %r11d
subl %r11d, %eax
jmp 0x6173ec
movl %eax, %r8d
shrl $0x3, %r8d
subq %r8, %rcx
movq %rcx, 0x10(%rsi)
andl $0x7, %eax
movl %eax, 0x8(%rsi)
movq (%rcx), %rax
movq %rax, (%rsi)
addq $0x8, %rdx
movq %rdx, 0x8(%rdi)
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress_block.c
|
BIT_reloadDStream
|
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
return BIT_DStream_overflow;
if (bitD->ptr >= bitD->limitPtr) {
return BIT_reloadDStreamFast(bitD);
}
if (bitD->ptr == bitD->start) {
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
return BIT_DStream_completed;
}
/* start < ptr < limitPtr */
{ U32 nbBytes = bitD->bitsConsumed >> 3;
BIT_DStream_status result = BIT_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start) {
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
result = BIT_DStream_endOfBuffer;
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes*8;
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
return result;
}
}
|
movl 0x8(%rdi), %eax
cmpq $0x40, %rax
ja 0x61745a
movq 0x10(%rdi), %rcx
cmpq 0x20(%rdi), %rcx
jae 0x617442
movq 0x18(%rdi), %rdx
cmpq %rdx, %rcx
je 0x61745a
movl %eax, %esi
shrl $0x3, %esi
movq %rcx, %r8
subq %rsi, %r8
movl %ecx, %r9d
subl %edx, %r9d
cmpq %rdx, %r8
cmovael %esi, %r9d
subq %r9, %rcx
movq %rcx, 0x10(%rdi)
shll $0x3, %r9d
subl %r9d, %eax
jmp 0x617451
movl %eax, %edx
shrl $0x3, %edx
subq %rdx, %rcx
movq %rcx, 0x10(%rdi)
andl $0x7, %eax
movl %eax, 0x8(%rdi)
movq (%rcx), %rax
movq %rax, (%rdi)
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/../common/bitstream.h
|
ZSTD_execSequenceEnd
|
FORCE_NOINLINE
size_t ZSTD_execSequenceEnd(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
/* bounds checks : careful of address space overflow in 32-bit mode */
RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
assert(op < op + sequenceLength);
assert(oLitEnd < op + sequenceLength);
/* copy literals */
ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
op = oLitEnd;
*litPtr = iLitEnd;
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix */
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
match = dictEnd - (prefixStart-match);
if (match + sequence.matchLength <= dictEnd) {
ZSTD_memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
} }
ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
return sequenceLength;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rcx, %rax
movq %rdx, %r12
movq %rsi, %r15
movq 0x60(%rsp), %rcx
movq 0x68(%rsp), %rbx
leaq (%rbx,%rcx), %rsi
movq %r15, %rdx
subq %rdi, %rdx
movq $-0x46, %rbp
cmpq %rdx, %rsi
ja 0x617561
movq (%r12), %rdx
subq %rdx, %rax
movq $-0x14, %rbp
cmpq %rax, %rcx
ja 0x617561
movq %r9, 0x10(%rsp)
movq %rsi, (%rsp)
leaq (%rdi,%rcx), %r14
movq %r14, 0x18(%rsp)
leaq (%rdx,%rcx), %rsi
movq %rsi, 0x20(%rsp)
movq 0x70(%rsp), %r13
subq %r13, %r14
addq $-0x20, %r15
movq %r15, %rsi
movq %r8, 0x8(%rsp)
xorl %r8d, %r8d
callq 0x617580
movq 0x8(%rsp), %rcx
movq 0x20(%rsp), %rax
movq %rax, (%r12)
movq 0x18(%rsp), %r12
movq %r12, %rax
subq %rcx, %rax
cmpq %rax, %r13
jbe 0x617543
movq %r12, %rax
subq 0x10(%rsp), %rax
cmpq %rax, %r13
movq (%rsp), %r13
ja 0x617561
movq 0x78(%rsp), %rax
subq %rcx, %r14
leaq (%rax,%r14), %rsi
leaq (%rsi,%rbx), %rcx
cmpq %rax, %rcx
jbe 0x617573
movq %r14, %rdx
negq %rdx
movq %r12, %rdi
callq 0x3f470
movq 0x8(%rsp), %rdx
subq %r14, %r12
addq %r14, %rbx
jmp 0x61754a
movq %r14, %rdx
movq (%rsp), %r13
movq %r12, %rdi
movq %r15, %rsi
movq %rbx, %rcx
movl $0x1, %r8d
callq 0x617580
movq %r13, %rbp
movq %rbp, %rax
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %r12, %rdi
movq %rbx, %rdx
callq 0x3f470
jmp 0x61755e
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress_block.c
|
ZSTD_safecopy
|
static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
ptrdiff_t const diff = op - ip;
BYTE* const oend = op + length;
assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
(ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
if (length < 8) {
/* Handle short lengths. */
while (op < oend) *op++ = *ip++;
return;
}
if (ovtype == ZSTD_overlap_src_before_dst) {
/* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
assert(length >= 8);
ZSTD_overlapCopy8(&op, &ip, diff);
assert(op - ip >= 8);
assert(op <= oend);
}
if (oend <= oend_w) {
/* No risk of overwrite. */
ZSTD_wildcopy(op, ip, length, ovtype);
return;
}
if (op <= oend_w) {
/* Wildcopy until we get close to the end. */
assert(oend > oend_w);
ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
ip += oend_w - op;
op = oend_w;
}
/* Handle the leftovers. */
while (op < oend) *op++ = *ip++;
}
|
leaq (%rdi,%rcx), %rax
cmpq $0x7, %rcx
jg 0x6175a7
testq %rcx, %rcx
jle 0x61770b
movb (%rdx), %cl
incq %rdx
movb %cl, (%rdi)
incq %rdi
cmpq %rax, %rdi
jb 0x617593
jmp 0x61770b
cmpl $0x1, %r8d
jne 0x61760b
movq %rdi, %r9
subq %rdx, %r9
cmpq $0x7, %r9
ja 0x6175fd
leaq 0xc0890(%rip), %r10 # 0x6d7e50
movslq (%r10,%r9,4), %r10
movb (%rdx), %r11b
movb %r11b, (%rdi)
movb 0x1(%rdx), %r11b
movb %r11b, 0x1(%rdi)
movb 0x2(%rdx), %r11b
movb %r11b, 0x2(%rdi)
movb 0x3(%rdx), %r11b
movb %r11b, 0x3(%rdi)
leaq 0xc0847(%rip), %r11 # 0x6d7e30
movl (%r11,%r9,4), %r9d
movl (%rdx,%r9), %r11d
addq %r9, %rdx
subq %r10, %rdx
movl %r11d, 0x4(%rdi)
jmp 0x617603
movq (%rdx), %r9
movq %r9, (%rdi)
addq $0x8, %rdx
addq $0x8, %rdi
cmpq %rsi, %rax
jbe 0x61761d
cmpq %rsi, %rdi
jbe 0x61768b
movq %rdi, %rsi
jmp 0x617706
leaq (%rdi,%rcx), %rax
cmpl $0x1, %r8d
jne 0x61764b
movq %rdi, %rsi
subq %rdx, %rsi
cmpq $0xf, %rsi
jg 0x61764b
movq (%rdx), %rcx
movq %rcx, (%rdi)
addq $0x8, %rdi
addq $0x8, %rdx
cmpq %rax, %rdi
jb 0x617633
jmp 0x61770b
movups (%rdx), %xmm0
movups %xmm0, (%rdi)
cmpq $0x11, %rcx
jb 0x61770b
addq $0x10, %rdx
movl $0x10, %ecx
movups -0x10(%rdx,%rcx), %xmm0
leaq (%rdi,%rcx), %rsi
addq $0x20, %rsi
movups %xmm0, -0x20(%rsi)
movups (%rdx,%rcx), %xmm0
movups %xmm0, -0x10(%rsi)
addq $0x20, %rcx
cmpq %rax, %rsi
jb 0x617664
jmp 0x61770b
movq %rsi, %rcx
subq %rdi, %rcx
cmpl $0x1, %r8d
jne 0x6176bb
movq %rdi, %r8
subq %rdx, %r8
cmpq $0xf, %r8
jg 0x6176bb
movq %rdx, %r8
movq (%r8), %r9
movq %r9, (%rdi)
addq $0x8, %rdi
addq $0x8, %r8
cmpq %rsi, %rdi
jb 0x6176a6
jmp 0x6176f7
movups (%rdx), %xmm0
movups %xmm0, (%rdi)
cmpq $0x11, %rcx
jl 0x6176f7
leaq 0x10(%rdx), %r8
movl $0x10, %r9d
movups -0x10(%r8,%r9), %xmm0
leaq (%rdi,%r9), %r10
addq $0x20, %r10
movups %xmm0, -0x20(%r10)
movups (%r8,%r9), %xmm0
movups %xmm0, -0x10(%r10)
addq $0x20, %r9
cmpq %rsi, %r10
jb 0x6176d1
addq %rcx, %rdx
jmp 0x617706
movb (%rdx), %cl
incq %rdx
movb %cl, (%rsi)
incq %rsi
cmpq %rax, %rsi
jb 0x6176fc
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress_block.c
|
rhash_file
|
RHASH_API int rhash_file(unsigned hash_id, const char* filepath, unsigned char* result)
{
FILE* fd;
rhash ctx;
int res;
hash_id &= RHASH_ALL_HASHES;
if (hash_id == 0) {
errno = EINVAL;
return -1;
}
if ((fd = fopen(filepath, "rb")) == NULL) return -1;
if ((ctx = rhash_init(hash_id)) == NULL) {
fclose(fd);
return -1;
}
res = rhash_file_update(ctx, fd); /* hash the file */
fclose(fd);
rhash_final(ctx, result);
rhash_free(ctx);
return res;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movl %edi, %ebp
andl $0x3ff, %ebp # imm = 0x3FF
je 0x617ded
movq %rdx, %rbx
leaq 0x2bec1(%rip), %rax # 0x643c60
movq %rsi, %rdi
movq %rax, %rsi
callq 0x417b0
testq %rax, %rax
je 0x617e02
movq %rax, %r14
movl %ebp, %edi
callq 0x617720
testq %rax, %rax
je 0x617dfa
movq %rax, %r15
movq %rax, %rdi
movq %r14, %rsi
callq 0x617cc1
movl %eax, %ebp
movq %r14, %rdi
callq 0x3fe60
movq %r15, %rdi
movq %rbx, %rsi
callq 0x617b78
movq %r15, %rdi
callq 0x617990
movl %ebp, %eax
jmp 0x617e07
callq 0x415e0
movl $0x16, (%rax)
jmp 0x617e02
movq %r14, %rdi
callq 0x3fe60
movl $0xffffffff, %eax # imm = 0xFFFFFFFF
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/rhash.c
|
rhash_init_algorithms
|
void rhash_init_algorithms(unsigned mask)
{
(void)mask; /* unused now */
/* verify that RHASH_HASH_COUNT is the index of the major bit of RHASH_ALL_HASHES */
assert(1 == (RHASH_ALL_HASHES >> (RHASH_HASH_COUNT - 1)));
#ifdef GENERATE_GOST94_LOOKUP_TABLE
rhash_gost94_init_table();
#endif
rhash_uninitialized_algorithms = 0;
}
|
movl $0x0, 0x253a2e(%rip) # 0x86b8e0
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/algorithms.c
|
rhash_md5_init
|
void rhash_md5_init(md5_ctx* ctx)
{
ctx->length = 0;
/* initialize state */
ctx->hash[0] = 0x67452301;
ctx->hash[1] = 0xefcdab89;
ctx->hash[2] = 0x98badcfe;
ctx->hash[3] = 0x10325476;
}
|
movq $0x0, 0x40(%rdi)
movaps 0x92e75(%rip), %xmm0 # 0x6aad60
movups %xmm0, 0x48(%rdi)
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/md5.c
|
rhash_md5_update
|
void rhash_md5_update(md5_ctx* ctx, const unsigned char* msg, size_t size)
{
unsigned index = (unsigned)ctx->length & 63;
ctx->length += size;
/* fill partial block */
if (index) {
unsigned left = md5_block_size - index;
le32_copy((char*)ctx->message, index, msg, (size < left ? size : left));
if (size < left) return;
/* process partial block */
rhash_md5_process_block(ctx->hash, ctx->message);
msg += left;
size -= left;
}
while (size >= md5_block_size) {
unsigned* aligned_message_block;
if (IS_LITTLE_ENDIAN && IS_ALIGNED_32(msg)) {
/* the most common case is processing a 32-bit aligned message
on a little-endian CPU without copying it */
aligned_message_block = (unsigned*)msg;
} else {
le32_copy(ctx->message, 0, msg, md5_block_size);
aligned_message_block = ctx->message;
}
rhash_md5_process_block(ctx->hash, aligned_message_block);
msg += md5_block_size;
size -= md5_block_size;
}
if (size) {
/* save leftovers */
le32_copy(ctx->message, 0, msg, size);
}
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rdx, %rbx
movq %rsi, %r15
movq %rdi, %r14
movq 0x40(%rdi), %rdi
leaq (%rdi,%rdx), %rcx
movl %edi, %eax
movq %rcx, 0x40(%r14)
andl $0x3f, %eax
je 0x617f4e
movl $0x40, %r12d
subl %eax, %r12d
andl $0x3f, %edi
addq %r14, %rdi
cmpq %rbx, %r12
movq %rbx, %rdx
cmovbq %r12, %rdx
movq %r15, %rsi
callq 0x3f250
subq %r12, %rbx
jb 0x617fc8
leaq 0x48(%r14), %rdi
movq %r14, %rsi
callq 0x617fd2
addq %r12, %r15
cmpq $0x40, %rbx
jb 0x617fa9
leaq 0x48(%r14), %r13
movq %r15, %r12
movq %r12, %rsi
testb $0x3, %r15b
je 0x617f91
movups (%r12), %xmm0
movups 0x10(%r12), %xmm1
movups 0x20(%r12), %xmm2
movups 0x30(%r12), %xmm3
movups %xmm3, 0x30(%r14)
movups %xmm2, 0x20(%r14)
movups %xmm1, 0x10(%r14)
movups %xmm0, (%r14)
movq %r14, %rsi
movq %r13, %rdi
callq 0x617fd2
addq $0x40, %r12
addq $-0x40, %rbx
cmpq $0x3f, %rbx
ja 0x617f5b
jmp 0x617fac
movq %r15, %r12
testq %rbx, %rbx
je 0x617fc8
movq %r14, %rdi
movq %r12, %rsi
movq %rbx, %rdx
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
jmp 0x3f250
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/md5.c
|
rhash_md5_process_block
|
static void rhash_md5_process_block(unsigned state[4], const unsigned* x)
{
register unsigned a, b, c, d;
a = state[0];
b = state[1];
c = state[2];
d = state[3];
MD5_ROUND1(a, b, c, d, x[ 0], 7, 0xd76aa478);
MD5_ROUND1(d, a, b, c, x[ 1], 12, 0xe8c7b756);
MD5_ROUND1(c, d, a, b, x[ 2], 17, 0x242070db);
MD5_ROUND1(b, c, d, a, x[ 3], 22, 0xc1bdceee);
MD5_ROUND1(a, b, c, d, x[ 4], 7, 0xf57c0faf);
MD5_ROUND1(d, a, b, c, x[ 5], 12, 0x4787c62a);
MD5_ROUND1(c, d, a, b, x[ 6], 17, 0xa8304613);
MD5_ROUND1(b, c, d, a, x[ 7], 22, 0xfd469501);
MD5_ROUND1(a, b, c, d, x[ 8], 7, 0x698098d8);
MD5_ROUND1(d, a, b, c, x[ 9], 12, 0x8b44f7af);
MD5_ROUND1(c, d, a, b, x[10], 17, 0xffff5bb1);
MD5_ROUND1(b, c, d, a, x[11], 22, 0x895cd7be);
MD5_ROUND1(a, b, c, d, x[12], 7, 0x6b901122);
MD5_ROUND1(d, a, b, c, x[13], 12, 0xfd987193);
MD5_ROUND1(c, d, a, b, x[14], 17, 0xa679438e);
MD5_ROUND1(b, c, d, a, x[15], 22, 0x49b40821);
MD5_ROUND2(a, b, c, d, x[ 1], 5, 0xf61e2562);
MD5_ROUND2(d, a, b, c, x[ 6], 9, 0xc040b340);
MD5_ROUND2(c, d, a, b, x[11], 14, 0x265e5a51);
MD5_ROUND2(b, c, d, a, x[ 0], 20, 0xe9b6c7aa);
MD5_ROUND2(a, b, c, d, x[ 5], 5, 0xd62f105d);
MD5_ROUND2(d, a, b, c, x[10], 9, 0x2441453);
MD5_ROUND2(c, d, a, b, x[15], 14, 0xd8a1e681);
MD5_ROUND2(b, c, d, a, x[ 4], 20, 0xe7d3fbc8);
MD5_ROUND2(a, b, c, d, x[ 9], 5, 0x21e1cde6);
MD5_ROUND2(d, a, b, c, x[14], 9, 0xc33707d6);
MD5_ROUND2(c, d, a, b, x[ 3], 14, 0xf4d50d87);
MD5_ROUND2(b, c, d, a, x[ 8], 20, 0x455a14ed);
MD5_ROUND2(a, b, c, d, x[13], 5, 0xa9e3e905);
MD5_ROUND2(d, a, b, c, x[ 2], 9, 0xfcefa3f8);
MD5_ROUND2(c, d, a, b, x[ 7], 14, 0x676f02d9);
MD5_ROUND2(b, c, d, a, x[12], 20, 0x8d2a4c8a);
MD5_ROUND3(a, b, c, d, x[ 5], 4, 0xfffa3942);
MD5_ROUND3(d, a, b, c, x[ 8], 11, 0x8771f681);
MD5_ROUND3(c, d, a, b, x[11], 16, 0x6d9d6122);
MD5_ROUND3(b, c, d, a, x[14], 23, 0xfde5380c);
MD5_ROUND3(a, b, c, d, x[ 1], 4, 0xa4beea44);
MD5_ROUND3(d, a, b, c, x[ 4], 11, 0x4bdecfa9);
MD5_ROUND3(c, d, a, b, x[ 7], 16, 0xf6bb4b60);
MD5_ROUND3(b, c, d, a, x[10], 23, 0xbebfbc70);
MD5_ROUND3(a, b, c, d, x[13], 4, 0x289b7ec6);
MD5_ROUND3(d, a, b, c, x[ 0], 11, 0xeaa127fa);
MD5_ROUND3(c, d, a, b, x[ 3], 16, 0xd4ef3085);
MD5_ROUND3(b, c, d, a, x[ 6], 23, 0x4881d05);
MD5_ROUND3(a, b, c, d, x[ 9], 4, 0xd9d4d039);
MD5_ROUND3(d, a, b, c, x[12], 11, 0xe6db99e5);
MD5_ROUND3(c, d, a, b, x[15], 16, 0x1fa27cf8);
MD5_ROUND3(b, c, d, a, x[ 2], 23, 0xc4ac5665);
MD5_ROUND4(a, b, c, d, x[ 0], 6, 0xf4292244);
MD5_ROUND4(d, a, b, c, x[ 7], 10, 0x432aff97);
MD5_ROUND4(c, d, a, b, x[14], 15, 0xab9423a7);
MD5_ROUND4(b, c, d, a, x[ 5], 21, 0xfc93a039);
MD5_ROUND4(a, b, c, d, x[12], 6, 0x655b59c3);
MD5_ROUND4(d, a, b, c, x[ 3], 10, 0x8f0ccc92);
MD5_ROUND4(c, d, a, b, x[10], 15, 0xffeff47d);
MD5_ROUND4(b, c, d, a, x[ 1], 21, 0x85845dd1);
MD5_ROUND4(a, b, c, d, x[ 8], 6, 0x6fa87e4f);
MD5_ROUND4(d, a, b, c, x[15], 10, 0xfe2ce6e0);
MD5_ROUND4(c, d, a, b, x[ 6], 15, 0xa3014314);
MD5_ROUND4(b, c, d, a, x[13], 21, 0x4e0811a1);
MD5_ROUND4(a, b, c, d, x[ 4], 6, 0xf7537e82);
MD5_ROUND4(d, a, b, c, x[11], 10, 0xbd3af235);
MD5_ROUND4(c, d, a, b, x[ 2], 15, 0x2ad7d2bb);
MD5_ROUND4(b, c, d, a, x[ 9], 21, 0xeb86d391);
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rsi, %r9
movq %rdi, -0x20(%rsp)
movl (%rdi), %r8d
movq %r8, -0x8(%rsp)
movl 0x4(%rdi), %r10d
movl 0x8(%rdi), %edx
movl 0xc(%rdi), %ecx
movl %ecx, %eax
xorl %edx, %eax
andl %r10d, %eax
xorl %ecx, %eax
movq %rcx, %rsi
movq %rcx, -0x10(%rsp)
movl (%r9), %ebx
leal (%r8,%rbx), %ecx
movq %rbx, -0x40(%rsp)
addl %ecx, %eax
addl $0xd76aa478, %eax # imm = 0xD76AA478
roll $0x7, %eax
movl 0x4(%r9), %r12d
addl %r10d, %eax
movl %edx, %ecx
xorl %r10d, %ecx
andl %eax, %ecx
xorl %edx, %ecx
movq %rdx, %rdi
movq %rdx, -0x18(%rsp)
leal (%rsi,%r12), %edx
addl %edx, %ecx
addl $0xe8c7b756, %ecx # imm = 0xE8C7B756
roll $0xc, %ecx
addl %eax, %ecx
movl %eax, %edx
xorl %r10d, %edx
andl %ecx, %edx
xorl %r10d, %edx
movq %r10, -0x28(%rsp)
movl 0x8(%r9), %esi
movq %rsi, -0x30(%rsp)
addl %edi, %esi
addl %esi, %edx
addl $0x242070db, %edx # imm = 0x242070DB
roll $0x11, %edx
addl %ecx, %edx
movl %ecx, %esi
xorl %eax, %esi
andl %edx, %esi
xorl %eax, %esi
movl 0xc(%r9), %edi
movq %rdi, -0x38(%rsp)
leal (%r10,%rdi), %r8d
addl %esi, %r8d
addl $0xc1bdceee, %r8d # imm = 0xC1BDCEEE
roll $0x16, %r8d
addl %edx, %r8d
movl %edx, %esi
xorl %ecx, %esi
andl %r8d, %esi
xorl %ecx, %esi
movl 0x10(%r9), %edi
movl %edi, -0x6c(%rsp)
addl %edi, %eax
addl %esi, %eax
addl $0xf57c0faf, %eax # imm = 0xF57C0FAF
roll $0x7, %eax
addl %r8d, %eax
movl %r8d, %esi
xorl %edx, %esi
andl %eax, %esi
xorl %edx, %esi
movl 0x14(%r9), %edi
movl %edi, -0x78(%rsp)
addl %edi, %ecx
addl %esi, %ecx
addl $0x4787c62a, %ecx # imm = 0x4787C62A
roll $0xc, %ecx
addl %eax, %ecx
movl %eax, %esi
xorl %r8d, %esi
andl %ecx, %esi
xorl %r8d, %esi
movl 0x18(%r9), %r11d
addl %r11d, %edx
movl %r11d, -0x4c(%rsp)
addl %esi, %edx
addl $0xa8304613, %edx # imm = 0xA8304613
roll $0x11, %edx
addl %ecx, %edx
movl %ecx, %esi
xorl %eax, %esi
andl %edx, %esi
xorl %eax, %esi
movl 0x1c(%r9), %edi
movl %edi, -0x50(%rsp)
addl %edi, %r8d
addl %esi, %r8d
addl $0xfd469501, %r8d # imm = 0xFD469501
roll $0x16, %r8d
addl %edx, %r8d
movl %edx, %esi
xorl %ecx, %esi
andl %r8d, %esi
xorl %ecx, %esi
movl 0x20(%r9), %edi
movl %edi, -0x70(%rsp)
addl %edi, %eax
addl %esi, %eax
addl $0x698098d8, %eax # imm = 0x698098D8
roll $0x7, %eax
addl %r8d, %eax
movl %r8d, %esi
xorl %edx, %esi
andl %eax, %esi
xorl %edx, %esi
movl 0x24(%r9), %r14d
addl %r14d, %ecx
movl %r14d, -0x5c(%rsp)
addl %esi, %ecx
addl $0x8b44f7af, %ecx # imm = 0x8B44F7AF
roll $0xc, %ecx
addl %eax, %ecx
movl %eax, %esi
xorl %r8d, %esi
andl %ecx, %esi
xorl %r8d, %esi
movl 0x28(%r9), %edi
addl %edi, %edx
movl %edi, %r13d
movl %edi, -0x60(%rsp)
addl %esi, %edx
addl $0xffff5bb1, %edx # imm = 0xFFFF5BB1
roll $0x11, %edx
addl %ecx, %edx
movl %ecx, %esi
xorl %eax, %esi
andl %edx, %esi
xorl %eax, %esi
movl 0x2c(%r9), %r10d
addl %r10d, %r8d
movl %r10d, -0x68(%rsp)
addl %r8d, %esi
addl $0x895cd7be, %esi # imm = 0x895CD7BE
roll $0x16, %esi
addl %edx, %esi
movl %edx, %r8d
xorl %ecx, %r8d
andl %esi, %r8d
xorl %ecx, %r8d
movl 0x30(%r9), %edi
movl %edi, -0x58(%rsp)
addl %edi, %eax
addl %r8d, %eax
addl $0x6b901122, %eax # imm = 0x6B901122
roll $0x7, %eax
addl %esi, %eax
movl %esi, %r8d
xorl %edx, %r8d
andl %eax, %r8d
xorl %edx, %r8d
movl 0x34(%r9), %edi
movl %edi, -0x74(%rsp)
addl %edi, %ecx
leal (%r8,%rcx), %edi
addl $0xfd987193, %edi # imm = 0xFD987193
roll $0xc, %edi
addl %eax, %edi
movl %eax, %ebp
xorl %esi, %ebp
andl %edi, %ebp
xorl %esi, %ebp
movl 0x38(%r9), %r15d
addl %r15d, %edx
addl %ebp, %edx
addl $0xa679438e, %edx # imm = 0xA679438E
roll $0x11, %edx
addl %edi, %edx
movl %edi, %ecx
xorl %eax, %ecx
andl %edx, %ecx
xorl %eax, %ecx
movl 0x3c(%r9), %r8d
addl %r8d, %esi
movl %r8d, -0x54(%rsp)
addl %ecx, %esi
addl $0x49b40821, %esi # imm = 0x49B40821
roll $0x16, %esi
addl %edx, %esi
movl %esi, %ecx
xorl %edx, %ecx
andl %edi, %ecx
xorl %edx, %ecx
movq %r12, -0x48(%rsp)
addl %r12d, %eax
addl %ecx, %eax
addl $0xf61e2562, %eax # imm = 0xF61E2562
roll $0x5, %eax
addl %esi, %eax
movl %eax, %ecx
xorl %esi, %ecx
andl %edx, %ecx
xorl %esi, %ecx
addl %r11d, %edi
addl %edi, %ecx
addl $0xc040b340, %ecx # imm = 0xC040B340
roll $0x9, %ecx
addl %eax, %ecx
movl %ecx, %edi
xorl %eax, %edi
andl %esi, %edi
xorl %eax, %edi
addl %r10d, %edx
addl %edi, %edx
addl $0x265e5a51, %edx # imm = 0x265E5A51
roll $0xe, %edx
addl %ecx, %edx
movl %edx, %edi
xorl %ecx, %edi
andl %eax, %edi
xorl %ecx, %edi
addl %ebx, %esi
addl %edi, %esi
addl $0xe9b6c7aa, %esi # imm = 0xE9B6C7AA
roll $0x14, %esi
addl %edx, %esi
movl %esi, %edi
xorl %edx, %edi
andl %ecx, %edi
xorl %edx, %edi
movl -0x78(%rsp), %ebx
addl %ebx, %eax
addl %edi, %eax
addl $0xd62f105d, %eax # imm = 0xD62F105D
roll $0x5, %eax
addl %esi, %eax
movl %eax, %edi
xorl %esi, %edi
andl %edx, %edi
xorl %esi, %edi
addl %r13d, %ecx
addl %edi, %ecx
addl $0x2441453, %ecx # imm = 0x2441453
roll $0x9, %ecx
addl %eax, %ecx
movl %ecx, %edi
xorl %eax, %edi
andl %esi, %edi
xorl %eax, %edi
addl %r8d, %edx
addl %edi, %edx
addl $0xd8a1e681, %edx # imm = 0xD8A1E681
roll $0xe, %edx
addl %ecx, %edx
movl %edx, %edi
xorl %ecx, %edi
andl %eax, %edi
xorl %ecx, %edi
movl -0x6c(%rsp), %r10d
addl %r10d, %esi
addl %edi, %esi
addl $0xe7d3fbc8, %esi # imm = 0xE7D3FBC8
roll $0x14, %esi
addl %edx, %esi
movl %esi, %edi
xorl %edx, %edi
andl %ecx, %edi
xorl %edx, %edi
addl %r14d, %eax
addl %edi, %eax
addl $0x21e1cde6, %eax # imm = 0x21E1CDE6
roll $0x5, %eax
addl %esi, %eax
movl %eax, %edi
xorl %esi, %edi
andl %edx, %edi
xorl %esi, %edi
movl %r15d, -0x64(%rsp)
addl %r15d, %ecx
addl %edi, %ecx
addl $0xc33707d6, %ecx # imm = 0xC33707D6
roll $0x9, %ecx
addl %eax, %ecx
movl %ecx, %edi
xorl %eax, %edi
andl %esi, %edi
xorl %eax, %edi
movq -0x38(%rsp), %r14
addl %r14d, %edx
addl %edi, %edx
addl $0xf4d50d87, %edx # imm = 0xF4D50D87
roll $0xe, %edx
addl %ecx, %edx
movl %edx, %edi
xorl %ecx, %edi
andl %eax, %edi
xorl %ecx, %edi
movl -0x70(%rsp), %r11d
addl %r11d, %esi
addl %edi, %esi
addl $0x455a14ed, %esi # imm = 0x455A14ED
roll $0x14, %esi
addl %edx, %esi
movl %esi, %edi
xorl %edx, %edi
andl %ecx, %edi
xorl %edx, %edi
movl -0x74(%rsp), %ebp
addl %ebp, %eax
addl %edi, %eax
addl $0xa9e3e905, %eax # imm = 0xA9E3E905
roll $0x5, %eax
addl %esi, %eax
movl %eax, %edi
xorl %esi, %edi
andl %edx, %edi
xorl %esi, %edi
movq -0x30(%rsp), %r12
addl %r12d, %ecx
addl %edi, %ecx
addl $0xfcefa3f8, %ecx # imm = 0xFCEFA3F8
roll $0x9, %ecx
addl %eax, %ecx
movl %ecx, %edi
xorl %eax, %edi
andl %esi, %edi
xorl %eax, %edi
movl -0x50(%rsp), %r8d
addl %r8d, %edx
addl %edi, %edx
addl $0x676f02d9, %edx # imm = 0x676F02D9
roll $0xe, %edx
addl %ecx, %edx
movl %edx, %edi
xorl %ecx, %edi
andl %eax, %edi
xorl %ecx, %edi
movl -0x58(%rsp), %r13d
addl %r13d, %esi
addl %edi, %esi
addl $0x8d2a4c8a, %esi # imm = 0x8D2A4C8A
roll $0x14, %esi
addl %edx, %esi
movl %esi, %edi
xorl %edx, %edi
movl %edi, %r9d
xorl %ecx, %r9d
addl %ebx, %eax
addl %eax, %r9d
addl $0xfffa3942, %r9d # imm = 0xFFFA3942
roll $0x4, %r9d
addl %esi, %r9d
xorl %r9d, %edi
addl %r11d, %ecx
leal (%rdi,%rcx), %eax
addl $0x8771f681, %eax # imm = 0x8771F681
roll $0xb, %eax
addl %r9d, %eax
movl %r9d, %ecx
xorl %esi, %ecx
xorl %eax, %ecx
addl -0x68(%rsp), %edx
addl %edx, %ecx
addl $0x6d9d6122, %ecx # imm = 0x6D9D6122
roll $0x10, %ecx
addl %eax, %ecx
movl %eax, %edx
xorl %r9d, %edx
xorl %ecx, %edx
addl %r15d, %esi
addl %esi, %edx
addl $0xfde5380c, %edx # imm = 0xFDE5380C
roll $0x17, %edx
addl %ecx, %edx
movl %ecx, %esi
xorl %eax, %esi
xorl %edx, %esi
addl -0x48(%rsp), %r9d
addl %r9d, %esi
addl $0xa4beea44, %esi # imm = 0xA4BEEA44
roll $0x4, %esi
addl %edx, %esi
movl %edx, %edi
xorl %ecx, %edi
xorl %esi, %edi
addl %r10d, %eax
addl %edi, %eax
addl $0x4bdecfa9, %eax # imm = 0x4BDECFA9
roll $0xb, %eax
addl %esi, %eax
movl %esi, %edi
xorl %edx, %edi
xorl %eax, %edi
addl %r8d, %ecx
movl %r8d, %r9d
addl %edi, %ecx
addl $0xf6bb4b60, %ecx # imm = 0xF6BB4B60
roll $0x10, %ecx
addl %eax, %ecx
movl %eax, %edi
xorl %esi, %edi
xorl %ecx, %edi
movl -0x60(%rsp), %r11d
addl %r11d, %edx
addl %edi, %edx
addl $0xbebfbc70, %edx # imm = 0xBEBFBC70
roll $0x17, %edx
addl %ecx, %edx
movl %ecx, %edi
xorl %eax, %edi
xorl %edx, %edi
addl %ebp, %esi
addl %edi, %esi
addl $0x289b7ec6, %esi # imm = 0x289B7EC6
roll $0x4, %esi
addl %edx, %esi
movl %edx, %edi
xorl %ecx, %edi
xorl %esi, %edi
movq -0x40(%rsp), %r8
addl %r8d, %eax
addl %edi, %eax
addl $0xeaa127fa, %eax # imm = 0xEAA127FA
roll $0xb, %eax
addl %esi, %eax
movl %esi, %edi
xorl %edx, %edi
xorl %eax, %edi
addl %r14d, %ecx
addl %edi, %ecx
addl $0xd4ef3085, %ecx # imm = 0xD4EF3085
roll $0x10, %ecx
addl %eax, %ecx
movl %eax, %edi
xorl %esi, %edi
xorl %ecx, %edi
movl -0x4c(%rsp), %r15d
addl %r15d, %edx
addl %edi, %edx
addl $0x4881d05, %edx # imm = 0x4881D05
roll $0x17, %edx
addl %ecx, %edx
movl %ecx, %edi
xorl %eax, %edi
xorl %edx, %edi
movl -0x5c(%rsp), %ebp
addl %ebp, %esi
addl %edi, %esi
addl $0xd9d4d039, %esi # imm = 0xD9D4D039
roll $0x4, %esi
addl %edx, %esi
movl %edx, %edi
xorl %ecx, %edi
xorl %esi, %edi
addl %r13d, %eax
addl %edi, %eax
addl $0xe6db99e5, %eax # imm = 0xE6DB99E5
roll $0xb, %eax
addl %esi, %eax
movl %esi, %edi
xorl %edx, %edi
xorl %eax, %edi
movl -0x54(%rsp), %r10d
addl %r10d, %ecx
addl %edi, %ecx
addl $0x1fa27cf8, %ecx # imm = 0x1FA27CF8
roll $0x10, %ecx
addl %eax, %ecx
movl %eax, %edi
xorl %esi, %edi
xorl %ecx, %edi
addl %r12d, %edx
addl %edi, %edx
addl $0xc4ac5665, %edx # imm = 0xC4AC5665
addl %r8d, %esi
roll $0x17, %edx
addl %ecx, %edx
movl %eax, %edi
notl %edi
orl %edx, %edi
xorl %ecx, %edi
leal (%rdi,%rsi), %ebx
addl $0xf4292244, %ebx # imm = 0xF4292244
addl %r9d, %eax
roll $0x6, %ebx
addl %edx, %ebx
movl %ecx, %esi
notl %esi
orl %ebx, %esi
xorl %edx, %esi
leal (%rsi,%rax), %r9d
addl $0x432aff97, %r9d # imm = 0x432AFF97
addl -0x64(%rsp), %ecx
roll $0xa, %r9d
addl %ebx, %r9d
movl %edx, %eax
notl %eax
orl %r9d, %eax
xorl %ebx, %eax
addl %eax, %ecx
addl $0xab9423a7, %ecx # imm = 0xAB9423A7
roll $0xf, %ecx
addl -0x78(%rsp), %edx
addl %r9d, %ecx
movl %ebx, %eax
notl %eax
orl %ecx, %eax
xorl %r9d, %eax
addl %edx, %eax
addl $0xfc93a039, %eax # imm = 0xFC93A039
addl %r13d, %ebx
roll $0x15, %eax
addl %ecx, %eax
movl %r9d, %edx
notl %edx
orl %eax, %edx
xorl %ecx, %edx
leal (%rdx,%rbx), %r8d
addl $0x655b59c3, %r8d # imm = 0x655B59C3
addl %r14d, %r9d
roll $0x6, %r8d
addl %eax, %r8d
movl %ecx, %edx
notl %edx
orl %r8d, %edx
xorl %eax, %edx
addl %edx, %r9d
addl $0x8f0ccc92, %r9d # imm = 0x8F0CCC92
addl %r11d, %ecx
roll $0xa, %r9d
addl %r8d, %r9d
movl %eax, %edx
notl %edx
orl %r9d, %edx
xorl %r8d, %edx
addl %edx, %ecx
addl $0xffeff47d, %ecx # imm = 0xFFEFF47D
roll $0xf, %ecx
addl -0x48(%rsp), %eax
addl %r9d, %ecx
movl %r8d, %edx
notl %edx
orl %ecx, %edx
xorl %r9d, %edx
addl %edx, %eax
addl $0x85845dd1, %eax # imm = 0x85845DD1
addl -0x70(%rsp), %r8d
roll $0x15, %eax
addl %ecx, %eax
movl %r9d, %edx
notl %edx
orl %eax, %edx
xorl %ecx, %edx
addl %edx, %r8d
addl $0x6fa87e4f, %r8d # imm = 0x6FA87E4F
addl %r10d, %r9d
roll $0x6, %r8d
addl %eax, %r8d
movl %ecx, %edx
notl %edx
orl %r8d, %edx
xorl %eax, %edx
addl %r9d, %edx
addl $0xfe2ce6e0, %edx # imm = 0xFE2CE6E0
roll $0xa, %edx
addl %r8d, %edx
addl %r15d, %ecx
movl %eax, %esi
notl %esi
orl %edx, %esi
xorl %r8d, %esi
addl %esi, %ecx
addl $0xa3014314, %ecx # imm = 0xA3014314
roll $0xf, %ecx
addl %edx, %ecx
addl -0x74(%rsp), %eax
movl %r8d, %esi
notl %esi
orl %ecx, %esi
xorl %edx, %esi
addl %esi, %eax
addl $0x4e0811a1, %eax # imm = 0x4E0811A1
roll $0x15, %eax
addl %ecx, %eax
addl -0x6c(%rsp), %r8d
movl %edx, %esi
notl %esi
orl %eax, %esi
xorl %ecx, %esi
addl %r8d, %esi
addl $0xf7537e82, %esi # imm = 0xF7537E82
roll $0x6, %esi
addl %eax, %esi
addl -0x68(%rsp), %edx
movl %ecx, %edi
notl %edi
orl %esi, %edi
xorl %eax, %edi
addl %edi, %edx
addl $0xbd3af235, %edx # imm = 0xBD3AF235
roll $0xa, %edx
addl %esi, %edx
addl %r12d, %ecx
movl %eax, %edi
notl %edi
orl %edx, %edi
xorl %esi, %edi
addl %edi, %ecx
addl $0x2ad7d2bb, %ecx # imm = 0x2AD7D2BB
roll $0xf, %ecx
addl %edx, %ecx
addl %ebp, %eax
movl %esi, %edi
notl %edi
orl %ecx, %edi
xorl %edx, %edi
addl %edi, %eax
addl $0xeb86d391, %eax # imm = 0xEB86D391
addl -0x8(%rsp), %esi
roll $0x15, %eax
movq -0x20(%rsp), %rdi
movl %esi, (%rdi)
movq -0x28(%rsp), %rsi
addl %ecx, %esi
addl %eax, %esi
movl %esi, 0x4(%rdi)
addl -0x18(%rsp), %ecx
movl %ecx, 0x8(%rdi)
addl -0x10(%rsp), %edx
movl %edx, 0xc(%rdi)
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/md5.c
|
rhash_md5_final
|
void rhash_md5_final(md5_ctx* ctx, unsigned char* result)
{
unsigned index = ((unsigned)ctx->length & 63) >> 2;
unsigned shift = ((unsigned)ctx->length & 3) * 8;
/* pad message and run for last block */
/* append the byte 0x80 to the message */
ctx->message[index] &= ~(0xFFFFFFFFu << shift);
ctx->message[index++] ^= 0x80u << shift;
/* if no room left in the message to store 64-bit message length */
if (index > 14) {
/* then fill the rest with zeros and process it */
while (index < 16) {
ctx->message[index++] = 0;
}
rhash_md5_process_block(ctx->hash, ctx->message);
index = 0;
}
while (index < 14) {
ctx->message[index++] = 0;
}
ctx->message[14] = (unsigned)(ctx->length << 3);
ctx->message[15] = (unsigned)(ctx->length >> 29);
rhash_md5_process_block(ctx->hash, ctx->message);
if (result) le32_copy(result, 0, &ctx->hash, 16);
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %rbx
movl 0x40(%rdi), %ecx
movl %ecx, %eax
shll $0x3, %ecx
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
shll %cl, %edx
movl $0x80, %edi
shll %cl, %edi
shrl $0x2, %eax
andl $0xf, %eax
notl %edx
andl (%rbx,%rax,4), %edx
movq %rsi, %r14
xorl %edx, %edi
movl %edi, (%rbx,%rax,4)
cmpl $0xe, %eax
jb 0x618776
jne 0x618766
movl $0x0, 0x3c(%rbx)
leaq 0x48(%rbx), %rdi
movq %rbx, %rsi
callq 0x617fd2
xorl %eax, %eax
jmp 0x61877d
cmpl $0xd, %eax
je 0x618796
incl %eax
shll $0x2, %eax
leaq (%rbx,%rax), %rdi
movl $0x34, %edx
subl %eax, %edx
addq $0x4, %rdx
xorl %esi, %esi
callq 0x3fa90
movq 0x40(%rbx), %rax
leal (,%rax,8), %ecx
movl %ecx, 0x38(%rbx)
shrq $0x1d, %rax
movl %eax, 0x3c(%rbx)
leaq 0x48(%rbx), %r15
movq %r15, %rdi
movq %rbx, %rsi
callq 0x617fd2
testq %r14, %r14
je 0x6187c7
movups (%r15), %xmm0
movups %xmm0, (%r14)
popq %rbx
popq %r14
popq %r15
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/md5.c
|
rhash_sha1_update
|
void rhash_sha1_update(sha1_ctx* ctx, const unsigned char* msg, size_t size)
{
unsigned index = (unsigned)ctx->length & 63;
ctx->length += size;
/* fill partial block */
if (index) {
unsigned left = sha1_block_size - index;
memcpy(ctx->message + index, msg, (size < left ? size : left));
if (size < left) return;
/* process partial block */
rhash_sha1_process_block(ctx->hash, (unsigned*)ctx->message);
msg += left;
size -= left;
}
while (size >= sha1_block_size) {
unsigned* aligned_message_block;
if (IS_ALIGNED_32(msg)) {
/* the most common case is processing of an already aligned message
without copying it */
aligned_message_block = (unsigned*)msg;
} else {
memcpy(ctx->message, msg, sha1_block_size);
aligned_message_block = (unsigned*)ctx->message;
}
rhash_sha1_process_block(ctx->hash, aligned_message_block);
msg += sha1_block_size;
size -= sha1_block_size;
}
if (size) {
/* save leftovers */
memcpy(ctx->message, msg, size);
}
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rdx, %rbx
movq %rsi, %r15
movq %rdi, %r14
movq 0x40(%rdi), %rdi
leaq (%rdi,%rdx), %rcx
movl %edi, %eax
movq %rcx, 0x40(%r14)
andl $0x3f, %eax
je 0x618849
movl $0x40, %r12d
subl %eax, %r12d
andl $0x3f, %edi
addq %r14, %rdi
cmpq %rbx, %r12
movq %rbx, %rdx
cmovbq %r12, %rdx
movq %r15, %rsi
callq 0x3f250
subq %r12, %rbx
jb 0x6188c3
leaq 0x48(%r14), %rdi
movq %r14, %rsi
callq 0x6188cd
addq %r12, %r15
cmpq $0x40, %rbx
jb 0x6188a4
leaq 0x48(%r14), %r13
movq %r15, %r12
movq %r12, %rsi
testb $0x3, %r15b
je 0x61888c
movups (%r12), %xmm0
movups 0x10(%r12), %xmm1
movups 0x20(%r12), %xmm2
movups 0x30(%r12), %xmm3
movups %xmm3, 0x30(%r14)
movups %xmm2, 0x20(%r14)
movups %xmm1, 0x10(%r14)
movups %xmm0, (%r14)
movq %r14, %rsi
movq %r13, %rdi
callq 0x6188cd
addq $0x40, %r12
addq $-0x40, %rbx
cmpq $0x3f, %rbx
ja 0x618856
jmp 0x6188a7
movq %r15, %r12
testq %rbx, %rbx
je 0x6188c3
movq %r14, %rdi
movq %r12, %rsi
movq %rbx, %rdx
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
jmp 0x3f250
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/sha1.c
|
rhash_sha1_process_block
|
static void rhash_sha1_process_block(unsigned* hash, const unsigned* block)
{
int t; /* Loop counter */
uint32_t temp; /* Temporary word value */
uint32_t W[80]; /* Word sequence */
uint32_t A, B, C, D, E; /* Word buffers */
/* initialize the first 16 words in the array W */
for (t = 0; t < 16; t++) {
/* note: it is much faster to apply be2me here, then using be32_copy */
W[t] = be2me_32(block[t]);
}
/* initialize the rest */
for (t = 16; t < 80; t++) {
W[t] = ROTL32(W[t - 3] ^ W[t - 8] ^ W[t - 14] ^ W[t - 16], 1);
}
A = hash[0];
B = hash[1];
C = hash[2];
D = hash[3];
E = hash[4];
for (t = 0; t < 20; t++) {
/* the following is faster than ((B & C) | ((~B) & D)) */
temp = ROTL32(A, 5) + (((C ^ D) & B) ^ D)
+ E + W[t] + 0x5A827999;
E = D;
D = C;
C = ROTL32(B, 30);
B = A;
A = temp;
}
for (t = 20; t < 40; t++) {
temp = ROTL32(A, 5) + (B ^ C ^ D) + E + W[t] + 0x6ED9EBA1;
E = D;
D = C;
C = ROTL32(B, 30);
B = A;
A = temp;
}
for (t = 40; t < 60; t++) {
temp = ROTL32(A, 5) + ((B & C) | (B & D) | (C & D))
+ E + W[t] + 0x8F1BBCDC;
E = D;
D = C;
C = ROTL32(B, 30);
B = A;
A = temp;
}
for (t = 60; t < 80; t++) {
temp = ROTL32(A, 5) + (B ^ C ^ D) + E + W[t] + 0xCA62C1D6;
E = D;
D = C;
C = ROTL32(B, 30);
B = A;
A = temp;
}
hash[0] += A;
hash[1] += B;
hash[2] += C;
hash[3] += D;
hash[4] += E;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xc8, %rsp
xorl %eax, %eax
pxor %xmm0, %xmm0
movdqu (%rsi,%rax,4), %xmm1
movdqa %xmm1, %xmm2
punpckhbw %xmm0, %xmm2 # xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
pshuflw $0x1b, %xmm2, %xmm2 # xmm2 = xmm2[3,2,1,0,4,5,6,7]
pshufhw $0x1b, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2,3,7,6,5,4]
punpcklbw %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
pshuflw $0x1b, %xmm1, %xmm1 # xmm1 = xmm1[3,2,1,0,4,5,6,7]
pshufhw $0x1b, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2,3,7,6,5,4]
packuswb %xmm2, %xmm1
movdqa %xmm1, -0x80(%rsp,%rax,4)
addq $0x4, %rax
cmpq $0x10, %rax
jne 0x6188e4
movl $0x10, %eax
movl -0xa0(%rsp,%rax,4), %ecx
xorl -0x8c(%rsp,%rax,4), %ecx
xorl -0xb8(%rsp,%rax,4), %ecx
xorl -0xc0(%rsp,%rax,4), %ecx
roll %ecx
movl %ecx, -0x80(%rsp,%rax,4)
incq %rax
cmpq $0x50, %rax
jne 0x618922
movl (%rdi), %r8d
movl 0x4(%rdi), %esi
movl 0x8(%rdi), %edx
movl 0xc(%rdi), %ecx
movl 0x10(%rdi), %eax
xorl %r11d, %r11d
movl %eax, %r12d
movl %ecx, %r13d
movl %edx, %r9d
movl %esi, %r15d
movl %r8d, %r10d
movl %r10d, %ebx
movl %r9d, %r14d
movl %r13d, %ebp
movl %r10d, %r9d
roll $0x5, %r9d
addl %r12d, %r9d
movl %r13d, %r10d
xorl %r14d, %r10d
andl %r15d, %r10d
xorl %r13d, %r10d
addl %r9d, %r10d
movl -0x80(%rsp,%r11,4), %r9d
addl %r9d, %r10d
addl $0x5a827999, %r10d # imm = 0x5A827999
movl %r15d, %r9d
roll $0x1e, %r9d
incq %r11
movl %r13d, %r12d
movl %r14d, %r13d
movl %ebx, %r15d
cmpq $0x14, %r11
jne 0x61896e
movl $0x14, %r13d
movl %r10d, %r11d
movl %r9d, %r12d
movl %r10d, %r9d
roll $0x5, %r9d
movl %r14d, %r15d
movl %r12d, %r10d
xorl %ebx, %r10d
xorl %r14d, %r10d
addl %r9d, %r10d
addl %ebp, %r10d
movl -0x80(%rsp,%r13,4), %r9d
addl %r9d, %r10d
addl $0x6ed9eba1, %r10d # imm = 0x6ED9EBA1
movl %ebx, %r9d
roll $0x1e, %r9d
incq %r13
movl %r14d, %ebp
movl %r12d, %r14d
movl %r11d, %ebx
cmpq $0x28, %r13
jne 0x6189be
movl $0x28, %r13d
movl %r10d, %ebx
movl %r9d, %r14d
movl %r12d, %ebp
movl %r10d, %r9d
roll $0x5, %r9d
addl %r15d, %r9d
movl %r12d, %r10d
orl %r14d, %r10d
andl %r11d, %r10d
movl %r12d, %r15d
andl %r14d, %r15d
orl %r10d, %r15d
addl %r9d, %r15d
movl -0x80(%rsp,%r13,4), %r9d
leal (%r9,%r15), %r10d
addl $0x8f1bbcdc, %r10d # imm = 0x8F1BBCDC
movl %r11d, %r9d
roll $0x1e, %r9d
incq %r13
movl %r12d, %r15d
movl %r14d, %r12d
movl %ebx, %r11d
cmpq $0x3c, %r13
jne 0x618a0b
movl $0x3c, %r15d
movl %r10d, %r13d
movl %r9d, %r11d
movl %r10d, %r9d
roll $0x5, %r9d
movl %r14d, %r12d
movl %r11d, %r10d
xorl %ebx, %r10d
xorl %r14d, %r10d
addl %r9d, %r10d
addl %ebp, %r10d
movl -0x80(%rsp,%r15,4), %r9d
addl %r9d, %r10d
addl $0xca62c1d6, %r10d # imm = 0xCA62C1D6
movl %ebx, %r9d
roll $0x1e, %r9d
incq %r15
movl %r14d, %ebp
movl %r11d, %r14d
movl %r13d, %ebx
cmpq $0x50, %r15
jne 0x618a62
addl %r8d, %r10d
movl %r10d, (%rdi)
addl %esi, %r13d
movl %r13d, 0x4(%rdi)
addl %edx, %r9d
movl %r9d, 0x8(%rdi)
addl %ecx, %r11d
movl %r11d, 0xc(%rdi)
addl %eax, %r12d
movl %r12d, 0x10(%rdi)
addq $0xc8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/sha1.c
|
rhash_sha256_init
|
void rhash_sha256_init(sha256_ctx* ctx)
{
/* Initial values. These words were obtained by taking the first 32
* bits of the fractional parts of the square roots of the first
* eight prime numbers. */
static const unsigned SHA256_H0[8] = {
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
};
ctx->length = 0;
ctx->digest_length = sha256_hash_size;
/* initialize algorithm state */
memcpy(ctx->hash, SHA256_H0, sizeof(ctx->hash));
}
|
movq $0x0, 0x40(%rdi)
movl $0x20, 0x68(%rdi)
movaps 0xb02d6(%rip), %xmm0 # 0x6c8ed0
movups %xmm0, 0x48(%rdi)
movaps 0xb02db(%rip), %xmm0 # 0x6c8ee0
movups %xmm0, 0x58(%rdi)
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/sha256.c
|
rhash_sha256_process_block
|
static void rhash_sha256_process_block(unsigned hash[8], unsigned block[16])
{
unsigned A, B, C, D, E, F, G, H;
unsigned W[16];
const unsigned* k;
int i;
A = hash[0], B = hash[1], C = hash[2], D = hash[3];
E = hash[4], F = hash[5], G = hash[6], H = hash[7];
/* Compute SHA using alternate Method: FIPS 180-3 6.1.3 */
ROUND_1_16(A, B, C, D, E, F, G, H, 0);
ROUND_1_16(H, A, B, C, D, E, F, G, 1);
ROUND_1_16(G, H, A, B, C, D, E, F, 2);
ROUND_1_16(F, G, H, A, B, C, D, E, 3);
ROUND_1_16(E, F, G, H, A, B, C, D, 4);
ROUND_1_16(D, E, F, G, H, A, B, C, 5);
ROUND_1_16(C, D, E, F, G, H, A, B, 6);
ROUND_1_16(B, C, D, E, F, G, H, A, 7);
ROUND_1_16(A, B, C, D, E, F, G, H, 8);
ROUND_1_16(H, A, B, C, D, E, F, G, 9);
ROUND_1_16(G, H, A, B, C, D, E, F, 10);
ROUND_1_16(F, G, H, A, B, C, D, E, 11);
ROUND_1_16(E, F, G, H, A, B, C, D, 12);
ROUND_1_16(D, E, F, G, H, A, B, C, 13);
ROUND_1_16(C, D, E, F, G, H, A, B, 14);
ROUND_1_16(B, C, D, E, F, G, H, A, 15);
for (i = 16, k = &rhash_k256[16]; i < 64; i += 16, k += 16) {
ROUND_17_64(A, B, C, D, E, F, G, H, 0);
ROUND_17_64(H, A, B, C, D, E, F, G, 1);
ROUND_17_64(G, H, A, B, C, D, E, F, 2);
ROUND_17_64(F, G, H, A, B, C, D, E, 3);
ROUND_17_64(E, F, G, H, A, B, C, D, 4);
ROUND_17_64(D, E, F, G, H, A, B, C, 5);
ROUND_17_64(C, D, E, F, G, H, A, B, 6);
ROUND_17_64(B, C, D, E, F, G, H, A, 7);
ROUND_17_64(A, B, C, D, E, F, G, H, 8);
ROUND_17_64(H, A, B, C, D, E, F, G, 9);
ROUND_17_64(G, H, A, B, C, D, E, F, 10);
ROUND_17_64(F, G, H, A, B, C, D, E, 11);
ROUND_17_64(E, F, G, H, A, B, C, D, 12);
ROUND_17_64(D, E, F, G, H, A, B, C, 13);
ROUND_17_64(C, D, E, F, G, H, A, B, 14);
ROUND_17_64(B, C, D, E, F, G, H, A, 15);
}
hash[0] += A, hash[1] += B, hash[2] += C, hash[3] += D;
hash[4] += E, hash[5] += F, hash[6] += G, hash[7] += H;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x60, %rsp
movl (%rdi), %r12d
movl 0x4(%rdi), %r15d
movl 0x8(%rdi), %ebx
movl 0xc(%rdi), %r8d
movl 0x10(%rdi), %r14d
movl 0x14(%rdi), %r11d
movl 0x18(%rdi), %r9d
movq %rdi, 0x58(%rsp)
movl 0x1c(%rdi), %edx
movl %r14d, %eax
roll $0x1a, %eax
movl %r14d, %ecx
roll $0x15, %ecx
xorl %eax, %ecx
movl %r14d, %eax
roll $0x7, %eax
xorl %ecx, %eax
movl %r9d, %ecx
xorl %r11d, %ecx
andl %r14d, %ecx
xorl %r9d, %ecx
movl (%rsi), %r10d
bswapl %r10d
movl %edx, 0x18(%rsp)
addl %edx, %ecx
addl %eax, %ecx
movl %r12d, %eax
roll $0x1e, %eax
movl %r12d, %edx
roll $0x13, %edx
xorl %eax, %edx
movl %r12d, %edi
roll $0xa, %edi
xorl %edx, %edi
movl %r15d, %edx
andl %r12d, %edx
movl %r15d, %eax
xorl %r12d, %eax
andl %ebx, %eax
xorl %edx, %eax
movl %ecx, %edx
addl %r10d, %edx
movq %r8, 0x38(%rsp)
addl %edx, %r8d
addl $0x428a2f98, %r8d # imm = 0x428A2F98
addl %edi, %eax
movl %r8d, %edx
roll $0x1a, %edx
movq %r10, 0x8(%rsp)
addl %r10d, %ecx
addl $0x428a2f98, %ecx # imm = 0x428A2F98
movl %r8d, %edi
roll $0x15, %edi
addl %ecx, %eax
movl %r8d, %ecx
roll $0x7, %ecx
xorl %edx, %edi
xorl %edi, %ecx
movl %r11d, %edx
xorl %r14d, %edx
andl %r8d, %edx
xorl %r11d, %edx
movl 0x4(%rsi), %edi
bswapl %edi
movq %r9, 0x50(%rsp)
movq %rdi, 0x10(%rsp)
addl %r9d, %edi
addl %edi, %edx
leal (%rdx,%rcx), %edi
addl %ecx, %edx
addl $0x71374491, %edx # imm = 0x71374491
movl %eax, %ecx
roll $0x1e, %ecx
movl %eax, %r9d
roll $0x13, %r9d
xorl %ecx, %r9d
movl %eax, %r10d
roll $0xa, %r10d
xorl %r9d, %r10d
movl %eax, %r9d
andl %r12d, %r9d
movl %eax, %ecx
xorl %r12d, %ecx
andl %r15d, %ecx
xorl %r9d, %ecx
addl %r10d, %ecx
addl %edx, %ecx
movq %rbx, 0x30(%rsp)
leal (%rbx,%rdi), %r10d
addl $0x71374491, %r10d # imm = 0x71374491
movl %r10d, %edx
roll $0x1a, %edx
movl %r10d, %edi
roll $0x15, %edi
xorl %edx, %edi
movl %r10d, %edx
roll $0x7, %edx
xorl %edi, %edx
movl %r8d, %edi
xorl %r14d, %edi
andl %r10d, %edi
xorl %r14d, %edi
movl 0x8(%rsi), %r9d
bswapl %r9d
movq %r11, 0x48(%rsp)
movq %r9, (%rsp)
addl %r11d, %r9d
addl %r9d, %edi
leal (%rdi,%rdx), %r9d
addl %edx, %edi
addl $0xb5c0fbcf, %edi # imm = 0xB5C0FBCF
movl %ecx, %edx
roll $0x1e, %edx
movl %ecx, %r11d
roll $0x13, %r11d
xorl %edx, %r11d
movl %ecx, %ebx
roll $0xa, %ebx
xorl %r11d, %ebx
movl %ecx, %r11d
andl %eax, %r11d
movl %ecx, %edx
xorl %eax, %edx
andl %r12d, %edx
xorl %r11d, %edx
addl %ebx, %edx
addl %edi, %edx
movq %r15, 0x28(%rsp)
leal (%r15,%r9), %r11d
addl $0xb5c0fbcf, %r11d # imm = 0xB5C0FBCF
movl %r11d, %edi
roll $0x1a, %edi
movl %r11d, %r9d
roll $0x15, %r9d
xorl %edi, %r9d
movl %r11d, %edi
roll $0x7, %edi
xorl %r9d, %edi
movl %r10d, %r9d
xorl %r8d, %r9d
andl %r11d, %r9d
xorl %r8d, %r9d
movl 0xc(%rsi), %ebx
bswapl %ebx
movq %r14, 0x40(%rsp)
movq %rbx, -0x8(%rsp)
addl %r14d, %ebx
addl %ebx, %r9d
movl %edx, %ebx
roll $0x1e, %ebx
leal (%r9,%rdi), %r14d
addl %edi, %r9d
addl $0xe9b5dba5, %r9d # imm = 0xE9B5DBA5
movl %edx, %edi
roll $0x13, %edi
xorl %ebx, %edi
movl %edx, %ebx
roll $0xa, %ebx
xorl %edi, %ebx
movl %edx, %ebp
andl %ecx, %ebp
movl %edx, %edi
xorl %ecx, %edi
andl %eax, %edi
xorl %ebp, %edi
addl %ebx, %edi
movq %r12, 0x20(%rsp)
leal (%r12,%r14), %ebx
addl $0xe9b5dba5, %ebx # imm = 0xE9B5DBA5
movl %ebx, %ebp
roll $0x1a, %ebp
addl %r9d, %edi
movl %ebx, %r9d
roll $0x15, %r9d
xorl %ebp, %r9d
movl %ebx, %r14d
roll $0x7, %r14d
xorl %r9d, %r14d
movl %r11d, %r9d
xorl %r10d, %r9d
andl %ebx, %r9d
xorl %r10d, %r9d
movl 0x10(%rsi), %ebp
bswapl %ebp
movl %ebp, -0x30(%rsp)
addl %ebp, %r8d
addl %r9d, %r8d
movl %edi, %r9d
roll $0x1e, %r9d
addl %r14d, %r8d
addl $0x3956c25b, %r8d # imm = 0x3956C25B
movl %edi, %ebp
roll $0x13, %ebp
xorl %r9d, %ebp
movl %edi, %r14d
roll $0xa, %r14d
xorl %ebp, %r14d
movl %edi, %ebp
andl %edx, %ebp
movl %edi, %r9d
xorl %edx, %r9d
andl %ecx, %r9d
xorl %ebp, %r9d
addl %r14d, %r9d
addl %r8d, %eax
movl %eax, %ebp
roll $0x1a, %ebp
addl %r8d, %r9d
movl %eax, %r8d
roll $0x15, %r8d
xorl %ebp, %r8d
movl %eax, %r14d
roll $0x7, %r14d
xorl %r8d, %r14d
movl %ebx, %r8d
xorl %r11d, %r8d
andl %eax, %r8d
xorl %r11d, %r8d
movl 0x14(%rsi), %ebp
movq %rsi, %r12
bswapl %ebp
movl %ebp, -0x3c(%rsp)
addl %ebp, %r10d
addl %r8d, %r10d
movl %r9d, %r8d
roll $0x1e, %r8d
leal (%r14,%r10), %ebp
addl $0x59f111f1, %ebp # imm = 0x59F111F1
movl %r9d, %r10d
roll $0x13, %r10d
xorl %r8d, %r10d
movl %r9d, %r8d
roll $0xa, %r8d
xorl %r10d, %r8d
movl %r9d, %r14d
andl %edi, %r14d
movl %r9d, %r10d
xorl %edi, %r10d
andl %edx, %r10d
xorl %r14d, %r10d
addl %r8d, %r10d
addl %ebp, %ecx
movl %ecx, %r8d
roll $0x1a, %r8d
addl %ebp, %r10d
movl %ecx, %ebp
roll $0x15, %ebp
xorl %r8d, %ebp
movl %ecx, %r8d
roll $0x7, %r8d
xorl %ebp, %r8d
movl %eax, %ebp
xorl %ebx, %ebp
andl %ecx, %ebp
xorl %ebx, %ebp
movl 0x18(%rsi), %r14d
bswapl %r14d
movl %r14d, -0x4c(%rsp)
addl %r14d, %r11d
addl %ebp, %r11d
movl %r10d, %ebp
roll $0x1e, %ebp
addl %r11d, %r8d
addl $0x923f82a4, %r8d # imm = 0x923F82A4
movl %r10d, %r11d
roll $0x13, %r11d
xorl %ebp, %r11d
movl %r10d, %ebp
roll $0xa, %ebp
xorl %r11d, %ebp
movl %r10d, %r14d
andl %r9d, %r14d
movl %r10d, %esi
xorl %r9d, %esi
andl %edi, %esi
xorl %r14d, %esi
addl %ebp, %esi
addl %r8d, %edx
movl %edx, %ebp
roll $0x1a, %ebp
addl %r8d, %esi
movl %edx, %r8d
roll $0x15, %r8d
xorl %ebp, %r8d
movl %edx, %r14d
roll $0x7, %r14d
xorl %r8d, %r14d
movl %ecx, %r8d
xorl %eax, %r8d
andl %edx, %r8d
xorl %eax, %r8d
movl 0x1c(%r12), %r11d
bswapl %r11d
movl %r11d, -0x44(%rsp)
addl %r11d, %ebx
addl %r8d, %ebx
movl %esi, %r8d
roll $0x1e, %r8d
leal (%r14,%rbx), %ebp
addl $0xab1c5ed5, %ebp # imm = 0xAB1C5ED5
movl %esi, %ebx
roll $0x13, %ebx
xorl %r8d, %ebx
movl %esi, %r8d
roll $0xa, %r8d
xorl %ebx, %r8d
movl %esi, %r14d
andl %r10d, %r14d
movl %esi, %ebx
xorl %r10d, %ebx
andl %r9d, %ebx
xorl %r14d, %ebx
addl %r8d, %ebx
addl %ebp, %edi
movl %edi, %r8d
roll $0x1a, %r8d
addl %ebp, %ebx
movl %edi, %ebp
roll $0x15, %ebp
xorl %r8d, %ebp
movl %edi, %r8d
roll $0x7, %r8d
xorl %ebp, %r8d
movl %edx, %ebp
xorl %ecx, %ebp
andl %edi, %ebp
xorl %ecx, %ebp
movl 0x20(%r12), %r14d
bswapl %r14d
movl %r14d, -0x40(%rsp)
addl %r14d, %eax
addl %ebp, %eax
movl %ebx, %ebp
roll $0x1e, %ebp
addl %r8d, %eax
addl $0xd807aa98, %eax # imm = 0xD807AA98
movl %ebx, %r8d
roll $0x13, %r8d
xorl %ebp, %r8d
movl %ebx, %ebp
roll $0xa, %ebp
xorl %r8d, %ebp
movl %ebx, %r14d
andl %esi, %r14d
movl %ebx, %r8d
xorl %esi, %r8d
andl %r10d, %r8d
xorl %r14d, %r8d
addl %ebp, %r8d
addl %eax, %r9d
movl %r9d, %ebp
roll $0x1a, %ebp
addl %eax, %r8d
movl %r9d, %eax
roll $0x15, %eax
xorl %ebp, %eax
movl %r9d, %r14d
roll $0x7, %r14d
xorl %eax, %r14d
movl %edi, %eax
xorl %edx, %eax
andl %r9d, %eax
xorl %edx, %eax
movl 0x24(%r12), %r11d
bswapl %r11d
movl %r11d, -0x7c(%rsp)
addl %r11d, %ecx
addl %eax, %ecx
movl %r8d, %eax
roll $0x1e, %eax
addl %r14d, %ecx
addl $0x12835b01, %ecx # imm = 0x12835B01
movl %r8d, %ebp
roll $0x13, %ebp
xorl %eax, %ebp
movl %r8d, %eax
roll $0xa, %eax
xorl %ebp, %eax
movl %r8d, %ebp
andl %ebx, %ebp
movl %r8d, %r15d
xorl %ebx, %r15d
andl %esi, %r15d
xorl %ebp, %r15d
addl %eax, %r15d
addl %ecx, %r10d
movl %r10d, %eax
roll $0x1a, %eax
addl %ecx, %r15d
movl %r10d, %ecx
roll $0x15, %ecx
xorl %eax, %ecx
movl %r10d, %eax
roll $0x7, %eax
xorl %ecx, %eax
movl %r9d, %ecx
xorl %edi, %ecx
andl %r10d, %ecx
xorl %edi, %ecx
movl 0x28(%r12), %ebp
bswapl %ebp
movl %ebp, -0x50(%rsp)
addl %ebp, %edx
addl %ecx, %edx
movl %r15d, %ecx
roll $0x1e, %ecx
addl %edx, %eax
addl $0x243185be, %eax # imm = 0x243185BE
movl %r15d, %edx
roll $0x13, %edx
xorl %ecx, %edx
movl %r15d, %ecx
roll $0xa, %ecx
xorl %edx, %ecx
movl %r15d, %edx
andl %r8d, %edx
movl %r15d, %ebp
xorl %r8d, %ebp
andl %ebx, %ebp
xorl %edx, %ebp
addl %ecx, %ebp
addl %eax, %esi
movl %esi, %ecx
roll $0x1a, %ecx
addl %eax, %ebp
movl %esi, %eax
roll $0x15, %eax
xorl %ecx, %eax
movl %esi, %ecx
roll $0x7, %ecx
xorl %eax, %ecx
movl %r10d, %eax
xorl %r9d, %eax
andl %esi, %eax
xorl %r9d, %eax
movl 0x2c(%r12), %edx
movq %r12, %r11
bswapl %edx
movl %edx, -0x38(%rsp)
addl %edx, %edi
addl %eax, %edi
movl %ebp, %eax
roll $0x1e, %eax
addl %edi, %ecx
addl $0x550c7dc3, %ecx # imm = 0x550C7DC3
movl %ebp, %edx
roll $0x13, %edx
xorl %eax, %edx
movl %ebp, %eax
roll $0xa, %eax
xorl %edx, %eax
movl %ebp, %edx
andl %r15d, %edx
movl %ebp, %r13d
xorl %r15d, %r13d
andl %r8d, %r13d
xorl %edx, %r13d
addl %eax, %r13d
addl %ecx, %ebx
movl %ebx, %eax
roll $0x1a, %eax
addl %ecx, %r13d
movl %ebx, %ecx
roll $0x15, %ecx
xorl %eax, %ecx
movl %ebx, %eax
roll $0x7, %eax
xorl %ecx, %eax
movl %esi, %ecx
xorl %r10d, %ecx
andl %ebx, %ecx
xorl %r10d, %ecx
movl 0x30(%r12), %edx
bswapl %edx
movl %edx, -0x74(%rsp)
addl %edx, %r9d
addl %ecx, %r9d
movl %r13d, %ecx
roll $0x1e, %ecx
addl %r9d, %eax
addl $0x72be5d74, %eax # imm = 0x72BE5D74
movl %r13d, %edx
roll $0x13, %edx
xorl %ecx, %edx
movl %r13d, %ecx
roll $0xa, %ecx
xorl %edx, %ecx
movl %r13d, %edx
andl %ebp, %edx
movl %r13d, %r12d
xorl %ebp, %r12d
andl %r15d, %r12d
xorl %edx, %r12d
addl %ecx, %r12d
addl %eax, %r8d
movl %r8d, %ecx
roll $0x1a, %ecx
addl %eax, %r12d
movl %r8d, %eax
roll $0x15, %eax
xorl %ecx, %eax
movl %r8d, %ecx
roll $0x7, %ecx
xorl %eax, %ecx
movl %ebx, %eax
xorl %esi, %eax
andl %r8d, %eax
xorl %esi, %eax
movl 0x34(%r11), %edx
bswapl %edx
movl %edx, -0x78(%rsp)
addl %edx, %r10d
addl %eax, %r10d
movl %r12d, %eax
roll $0x1e, %eax
addl %r10d, %ecx
addl $0x80deb1fe, %ecx # imm = 0x80DEB1FE
movl %r12d, %edx
roll $0x13, %edx
xorl %eax, %edx
movl %r12d, %eax
roll $0xa, %eax
xorl %edx, %eax
movl %r12d, %edx
andl %r13d, %edx
movl %r12d, %r10d
xorl %r13d, %r10d
andl %ebp, %r10d
xorl %edx, %r10d
addl %eax, %r10d
addl %ecx, %r15d
movl %r15d, %eax
roll $0x1a, %eax
addl %ecx, %r10d
movl %r15d, %ecx
roll $0x15, %ecx
xorl %eax, %ecx
movl %r15d, %eax
roll $0x7, %eax
xorl %ecx, %eax
movl %r8d, %ecx
xorl %ebx, %ecx
andl %r15d, %ecx
xorl %ebx, %ecx
movl 0x38(%r11), %edx
bswapl %edx
addl %edx, %esi
addl %ecx, %esi
movl %r10d, %ecx
roll $0x1e, %ecx
addl %esi, %eax
addl $0x9bdc06a7, %eax # imm = 0x9BDC06A7
movl %edx, %esi
movl %r10d, %edx
roll $0x13, %edx
xorl %ecx, %edx
movl %r10d, %ecx
roll $0xa, %ecx
xorl %edx, %ecx
movl %r10d, %edx
andl %r12d, %edx
movl %r10d, %r14d
xorl %r12d, %r14d
andl %r13d, %r14d
xorl %edx, %r14d
addl %ecx, %r14d
addl %eax, %ebp
movl %ebp, %ecx
roll $0x1a, %ecx
addl %eax, %r14d
movl %ebp, %eax
roll $0x15, %eax
xorl %ecx, %eax
movl %ebp, %ecx
roll $0x7, %ecx
xorl %eax, %ecx
movl %r15d, %eax
xorl %r8d, %eax
andl %ebp, %eax
xorl %r8d, %eax
movl 0x3c(%r11), %edx
bswapl %edx
movl %edx, -0x6c(%rsp)
addl %edx, %ebx
addl %eax, %ebx
movl %r14d, %eax
roll $0x1e, %eax
addl %ebx, %ecx
addl $0xc19bf174, %ecx # imm = 0xC19BF174
movl -0x7c(%rsp), %r11d
movl %r14d, %edx
roll $0x13, %edx
xorl %eax, %edx
movl %r14d, %eax
roll $0xa, %eax
xorl %edx, %eax
movl %r14d, %edx
andl %r10d, %edx
movl %r14d, %edi
xorl %r10d, %edi
andl %r12d, %edi
xorl %edx, %edi
addl %eax, %edi
addl %ecx, %r13d
addl %ecx, %edi
xorl %eax, %eax
movq %rax, -0x68(%rsp)
movl %edi, -0x48(%rsp)
movl %r13d, %eax
roll $0x1a, %eax
movl %r13d, %ecx
roll $0x15, %ecx
xorl %eax, %ecx
movl %r13d, %edx
roll $0x7, %edx
xorl %ecx, %edx
movl %ebp, %eax
xorl %r15d, %eax
andl %r13d, %eax
xorl %r15d, %eax
addl %r8d, %eax
movq 0x10(%rsp), %r8
movl %r8d, %ecx
roll $0x19, %ecx
addl %edx, %eax
movl %r8d, %edx
roll $0xe, %edx
xorl %ecx, %edx
movl %esi, %ecx
roll $0xf, %ecx
movl %esi, %r9d
movl %esi, -0x5c(%rsp)
roll $0xd, %esi
xorl %ecx, %esi
movl %r8d, %ecx
shrl $0x3, %ecx
xorl %edx, %ecx
movl %r14d, %ebx
movq 0x8(%rsp), %r14
movl %r11d, -0x7c(%rsp)
addl %r11d, %r14d
addl %ecx, %r14d
movl %r9d, %ecx
shrl $0xa, %ecx
xorl %esi, %ecx
movl %edi, %edx
roll $0x1e, %edx
movl %edi, %esi
roll $0x13, %esi
xorl %edx, %esi
movl %edi, %edx
roll $0xa, %edx
xorl %esi, %edx
addl %ecx, %r14d
movl %edi, %ecx
andl %ebx, %ecx
movl %edi, %r9d
xorl %ebx, %r9d
movl %ebx, -0x70(%rsp)
andl %r10d, %r9d
xorl %ecx, %r9d
addl %edx, %r9d
addl %r14d, %eax
leaq 0xbec5d(%rip), %rcx # 0x6d8140
movq -0x68(%rsp), %rdx
addl 0x40(%rcx,%rdx,4), %eax
addl %eax, %r12d
addl %eax, %r9d
movl -0x6c(%rsp), %ecx
movl %ecx, %eax
roll $0xf, %eax
movl %ecx, %edx
roll $0xd, %edx
xorl %eax, %edx
shrl $0xa, %ecx
movq (%rsp), %r11
movl %r11d, %eax
roll $0x19, %eax
xorl %edx, %ecx
movl %r11d, %edx
roll $0xe, %edx
xorl %eax, %edx
movl %r12d, %eax
roll $0x1a, %eax
movl %r12d, %esi
roll $0x15, %esi
xorl %eax, %esi
movl %r12d, %eax
roll $0x7, %eax
xorl %esi, %eax
movl %r11d, %esi
shrl $0x3, %esi
xorl %edx, %esi
addl -0x50(%rsp), %r8d
addl %esi, %r8d
movl %r9d, -0x2c(%rsp)
movl %r9d, %edx
roll $0x1e, %edx
addl %ecx, %r8d
movl %r9d, %ecx
roll $0x13, %ecx
xorl %edx, %ecx
movl %r9d, %edx
roll $0xa, %edx
xorl %ecx, %edx
movl %r9d, %ecx
andl %edi, %ecx
xorl %edi, %r9d
andl %ebx, %r9d
xorl %ecx, %r9d
addl %edx, %r9d
movl %r13d, %ecx
xorl %ebp, %ecx
andl %r12d, %ecx
xorl %ebp, %ecx
addl %r8d, %r15d
movq -0x68(%rsp), %rdi
leaq 0xbebb9(%rip), %rdx # 0x6d8140
addl 0x44(%rdx,%rdi,4), %r15d
addl %ecx, %r15d
movl %r14d, %ecx
roll $0xf, %ecx
movl %r14d, %edx
movq %r14, 0x8(%rsp)
roll $0xd, %edx
xorl %ecx, %edx
addl %eax, %r15d
movq -0x8(%rsp), %rbx
movl %ebx, %eax
roll $0x19, %eax
movl %ebx, %ecx
roll $0xe, %ecx
movl %r14d, %esi
shrl $0xa, %esi
xorl %edx, %esi
xorl %eax, %ecx
addl %r15d, %r10d
addl %r15d, %r9d
movl %ebx, %eax
shrl $0x3, %eax
xorl %ecx, %eax
addl -0x38(%rsp), %r11d
addl %eax, %r11d
movl %r10d, %ecx
roll $0x1a, %ecx
movl %r10d, %eax
roll $0x15, %eax
xorl %ecx, %eax
addl %esi, %r11d
movq %r11, (%rsp)
movl %r12d, %ecx
xorl %r13d, %ecx
andl %r10d, %ecx
addl %r11d, %ebp
leaq 0xbeb45(%rip), %rdx # 0x6d8140
addl 0x48(%rdx,%rdi,4), %ebp
movq %rdi, %r15
xorl %r13d, %ecx
addl %ecx, %ebp
movl %r9d, %edx
roll $0x1e, %edx
movl %r9d, %ecx
movl %r9d, %r11d
roll $0x13, %ecx
xorl %edx, %ecx
movq %r8, 0x10(%rsp)
movl %r8d, %edx
roll $0xf, %edx
movl %r8d, %esi
roll $0xd, %esi
xorl %edx, %esi
movl %r8d, %edx
shrl $0xa, %edx
xorl %esi, %edx
movl %r9d, %esi
movl -0x2c(%rsp), %edi
andl %edi, %esi
movl %r9d, %r8d
xorl %edi, %r8d
movl %edi, %r9d
andl -0x48(%rsp), %r8d
xorl %esi, %r8d
movl %r8d, %edi
movl %r10d, %esi
roll $0x7, %esi
xorl %eax, %esi
movl %r11d, %eax
roll $0xa, %eax
xorl %ecx, %eax
movl -0x30(%rsp), %r14d
movl %r14d, %ecx
roll $0x19, %ecx
movl %r14d, %r8d
roll $0xe, %r8d
xorl %ecx, %r8d
addl %esi, %ebp
movl %r14d, %ecx
shrl $0x3, %ecx
xorl %r8d, %ecx
addl %eax, %edi
movl -0x74(%rsp), %eax
movq %rbx, %rsi
addl %eax, %esi
addl %ecx, %esi
movl -0x70(%rsp), %ebx
addl %ebp, %ebx
addl %ebp, %edi
addl %edx, %esi
movq %rsi, -0x8(%rsp)
movl %r10d, %eax
xorl %r12d, %eax
andl %ebx, %eax
addl %esi, %r13d
movq %r15, %rbp
leaq 0xbea8f(%rip), %rcx # 0x6d8140
addl 0x4c(%rcx,%r15,4), %r13d
xorl %r12d, %eax
addl %eax, %r13d
movl %ebx, %edx
movl %ebx, -0x70(%rsp)
movl %ebx, %eax
roll $0x1a, %eax
movl %ebx, %ecx
roll $0x15, %ecx
xorl %eax, %ecx
roll $0x7, %edx
xorl %ecx, %edx
movl %edi, %eax
roll $0x1e, %eax
movl %edi, %ecx
roll $0x13, %ecx
xorl %eax, %ecx
movl %edi, %eax
roll $0xa, %eax
xorl %ecx, %eax
addl %edx, %r13d
movl %edi, %ecx
movl %r11d, %edx
movl %r11d, -0x10(%rsp)
andl %r11d, %ecx
movl %edi, %r8d
movl %edi, %r11d
xorl %edx, %r8d
movq (%rsp), %r15
movl %r15d, %edx
roll $0xf, %edx
movl %r9d, %ebx
andl %r9d, %r8d
xorl %ecx, %r8d
movl %r15d, %ecx
roll $0xd, %ecx
addl %eax, %r8d
xorl %edx, %ecx
movl -0x48(%rsp), %r9d
addl %r13d, %r9d
addl %r13d, %r8d
movl -0x3c(%rsp), %edi
movl %edi, %eax
roll $0x19, %eax
movl %edi, %edx
roll $0xe, %edx
movl %r15d, %esi
shrl $0xa, %esi
xorl %ecx, %esi
xorl %eax, %edx
movl %edi, %eax
movl %edi, %r15d
shrl $0x3, %eax
xorl %edx, %eax
movl %r9d, %ecx
roll $0x1a, %ecx
movl %r9d, %edx
roll $0x15, %edx
xorl %ecx, %edx
movl %r9d, %ecx
movl %r9d, -0x48(%rsp)
roll $0x7, %ecx
xorl %edx, %ecx
addl -0x78(%rsp), %r14d
addl %eax, %r14d
addl %esi, %r14d
movl -0x70(%rsp), %eax
xorl %r10d, %eax
andl %r9d, %eax
addl %r14d, %r12d
xorl %r10d, %eax
leaq 0xbe9b9(%rip), %r13 # 0x6d8140
addl 0x50(%r13,%rbp,4), %r12d
addl %eax, %r12d
addl %ecx, %r12d
movl %r8d, -0x18(%rsp)
movl %r8d, %eax
roll $0x1e, %eax
movl %r8d, %ecx
roll $0x13, %ecx
xorl %eax, %ecx
movl %r8d, %eax
roll $0xa, %eax
xorl %ecx, %eax
movl %r8d, %ecx
movl %r11d, -0xc(%rsp)
andl %r11d, %ecx
xorl %r11d, %r8d
andl -0x10(%rsp), %r8d
xorl %ecx, %r8d
addl %eax, %r8d
addl %r12d, %ebx
addl %r12d, %r8d
movl %r8d, -0x28(%rsp)
movq -0x8(%rsp), %r9
movl %r9d, %eax
roll $0xf, %eax
movl %r9d, %ecx
roll $0xd, %ecx
xorl %eax, %ecx
movl -0x4c(%rsp), %esi
movl %esi, %edx
roll $0x19, %edx
movl %r9d, %eax
shrl $0xa, %eax
xorl %ecx, %eax
movl %esi, %ecx
movl %esi, -0x4c(%rsp)
roll $0xe, %ecx
xorl %edx, %ecx
movl %ebx, %edx
roll $0x1a, %edx
movl %ebx, %r8d
roll $0x15, %r8d
xorl %edx, %r8d
movl %r8d, -0x54(%rsp)
movl %esi, %edx
shrl $0x3, %edx
xorl %ecx, %edx
movl -0x5c(%rsp), %r8d
addl %r8d, %r15d
addl %edx, %r15d
movl -0x44(%rsp), %edx
movl %edx, %ecx
roll $0xe, %ecx
movl %ecx, -0x14(%rsp)
addl %eax, %r15d
movl %r15d, -0x3c(%rsp)
movl -0x40(%rsp), %ecx
roll $0xe, %ecx
movl %ecx, -0x1c(%rsp)
movl -0x7c(%rsp), %ecx
roll $0xe, %ecx
movl %ecx, -0x20(%rsp)
movl -0x50(%rsp), %edi
movl %edi, %ecx
roll $0xe, %ecx
movl %ecx, -0x24(%rsp)
movl 0x54(%r13,%rbp,4), %ecx
addl %r15d, %ecx
addl %r10d, %ecx
movl %ecx, %ebp
movl %r14d, %esi
roll $0xf, %r14d
movl -0x38(%rsp), %r13d
movl %r13d, %r15d
roll $0xe, %r15d
movl -0x74(%rsp), %eax
roll $0xe, %eax
movl %eax, -0x34(%rsp)
movl -0x48(%rsp), %eax
movl -0x70(%rsp), %ecx
xorl %ecx, %eax
movl -0x78(%rsp), %r12d
movl %r12d, %r9d
roll $0xe, %r9d
movl %r9d, 0x1c(%rsp)
movl %ebx, -0x2c(%rsp)
andl %ebx, %eax
xorl %ecx, %eax
movl -0x28(%rsp), %r11d
movl %r11d, %ecx
roll $0x1e, %ecx
addl %eax, %ebp
movl %ebp, -0x58(%rsp)
movl %edx, %eax
movl %edx, -0x44(%rsp)
roll $0x13, %r11d
xorl %ecx, %r11d
movl %esi, %r10d
movl %esi, %edx
movl %esi, -0x30(%rsp)
roll $0xd, %r10d
xorl %r14d, %r10d
movl %r8d, %r14d
movl %r8d, %ecx
roll $0x19, %ecx
roll $0xe, %r14d
movl -0x6c(%rsp), %ebp
movl %ebp, %r8d
roll $0x19, %r8d
shrl $0xa, %edx
xorl %r10d, %edx
roll $0x19, %eax
xorl %eax, -0x14(%rsp)
movl -0x40(%rsp), %eax
roll $0x19, %eax
xorl %eax, -0x1c(%rsp)
movl -0x7c(%rsp), %eax
roll $0x19, %eax
xorl %eax, -0x20(%rsp)
movl %edi, %eax
movl %edi, -0x50(%rsp)
roll $0x19, %eax
xorl %eax, -0x24(%rsp)
movl %r13d, %eax
roll $0x19, %eax
xorl %eax, %r15d
movl -0x74(%rsp), %r13d
movl %r13d, %eax
roll $0x19, %eax
movl -0x34(%rsp), %r9d
xorl %eax, %r9d
roll $0x19, %r12d
movl 0x1c(%rsp), %edi
xorl %r12d, %edi
movl %ebp, %eax
roll $0xe, %eax
xorl %ecx, %r14d
xorl %r8d, %eax
movl -0x28(%rsp), %esi
movl %esi, %ecx
movl -0x18(%rsp), %r8d
andl %r8d, %ecx
movl %esi, %r10d
xorl %r8d, %r10d
andl -0xc(%rsp), %r10d
xorl %ecx, %r10d
movl %ebx, %r8d
roll $0x7, %r8d
xorl -0x54(%rsp), %r8d
movl %esi, %ecx
roll $0xa, %ecx
xorl %r11d, %ecx
movq 0x8(%rsp), %r12
movl %r12d, %r11d
roll $0x19, %r11d
movl %r12d, %ebx
roll $0xe, %ebx
xorl %r11d, %ebx
movl -0x58(%rsp), %esi
addl %r8d, %esi
movl %r12d, %r11d
shrl $0x3, %r11d
xorl %ebx, %r11d
addl %ecx, %r10d
movl -0x4c(%rsp), %r8d
addl %ebp, %r8d
addl %ebp, %r11d
movl %r11d, -0x34(%rsp)
shrl $0x3, %ebp
xorl %eax, %ebp
movl -0x5c(%rsp), %eax
addl %eax, %ebp
movl %ebp, -0x54(%rsp)
movl %eax, %ecx
shrl $0x3, %ecx
xorl %r14d, %ecx
movl -0x78(%rsp), %eax
addl %eax, %ecx
movl %ecx, -0x5c(%rsp)
movl %eax, %ecx
shrl $0x3, %ecx
xorl %edi, %ecx
addl %r13d, %ecx
movl %ecx, -0x78(%rsp)
shrl $0x3, %r13d
xorl %r9d, %r13d
movl -0x38(%rsp), %eax
addl %eax, %r13d
movl %r13d, -0x74(%rsp)
shrl $0x3, %eax
xorl %r15d, %eax
movl -0x50(%rsp), %ebx
addl %ebx, %eax
movl %eax, -0x58(%rsp)
shrl $0x3, %ebx
xorl -0x24(%rsp), %ebx
movl -0x7c(%rsp), %eax
addl %eax, %ebx
movl %ebx, -0x6c(%rsp)
shrl $0x3, %eax
xorl -0x20(%rsp), %eax
movl -0x40(%rsp), %r12d
addl %r12d, %eax
movl %eax, -0x7c(%rsp)
shrl $0x3, %r12d
xorl -0x1c(%rsp), %r12d
movl -0x44(%rsp), %eax
addl %eax, %r12d
shrl $0x3, %eax
xorl -0x14(%rsp), %eax
movl %r8d, %r13d
addl %eax, %r13d
addl %edx, %r13d
movq -0x68(%rsp), %rax
leaq 0xbe6e1(%rip), %rcx # 0x6d8140
movl 0x58(%rcx,%rax,4), %edx
addl %r13d, %edx
addl -0x70(%rsp), %edx
movl -0x10(%rsp), %ebx
addl %esi, %ebx
addl %esi, %r10d
movl -0x2c(%rsp), %r9d
movl %r9d, %eax
movl -0x48(%rsp), %edi
xorl %edi, %eax
andl %ebx, %eax
xorl %edi, %eax
addl %eax, %edx
movl %ebx, %eax
roll $0x1a, %eax
movl %ebx, %r8d
roll $0x15, %r8d
xorl %eax, %r8d
movl %ebx, %eax
roll $0x7, %eax
xorl %r8d, %eax
movl %r10d, %r8d
roll $0x1e, %r8d
movl %r10d, %ebp
roll $0x13, %ebp
xorl %r8d, %ebp
movl %r10d, %r8d
roll $0xa, %r8d
xorl %ebp, %r8d
addl %eax, %edx
movl %r10d, %eax
movl -0x28(%rsp), %esi
andl %esi, %eax
movl %r10d, %r14d
xorl %esi, %r14d
movl -0x3c(%rsp), %r15d
movl %r15d, %ebp
roll $0xf, %ebp
movl -0x18(%rsp), %r11d
andl %r11d, %r14d
xorl %eax, %r14d
movl %r15d, %eax
roll $0xd, %eax
addl %r8d, %r14d
xorl %ebp, %eax
movl %r15d, %r8d
shrl $0xa, %r8d
xorl %eax, %r8d
addl 0x8(%rsp), %r12d
addl %r8d, %r12d
movl %r12d, -0x70(%rsp)
movq -0x68(%rsp), %rax
movl 0x5c(%rcx,%rax,4), %r8d
addl %r12d, %r8d
addl %edi, %r8d
movl -0xc(%rsp), %ecx
addl %edx, %ecx
addl %edx, %r14d
movl %ebx, %eax
xorl %r9d, %eax
andl %ecx, %eax
xorl %r9d, %eax
addl %eax, %r8d
movl %ecx, %eax
roll $0x1a, %eax
movl %ecx, %edx
roll $0x15, %edx
xorl %eax, %edx
movl %ecx, %eax
roll $0x7, %eax
xorl %edx, %eax
movl %r14d, %edx
roll $0x1e, %edx
movl %r14d, %edi
roll $0x13, %edi
xorl %edx, %edi
movl %r14d, %edx
roll $0xa, %edx
xorl %edi, %edx
addl %eax, %r8d
movl %r14d, %eax
andl %r10d, %eax
movl %r14d, %edi
xorl %r10d, %edi
movl %r13d, -0x4c(%rsp)
movl %r13d, %ebp
roll $0xf, %ebp
andl %esi, %edi
movl %esi, %r12d
xorl %eax, %edi
movl %r13d, %eax
roll $0xd, %eax
addl %edx, %edi
xorl %ebp, %eax
movl %r13d, %edx
shrl $0xa, %edx
xorl %eax, %edx
movl -0x7c(%rsp), %esi
addl 0x10(%rsp), %esi
addl %edx, %esi
movl %esi, -0x7c(%rsp)
movq -0x68(%rsp), %rax
leaq 0xbe5a3(%rip), %r13 # 0x6d8140
movl 0x60(%r13,%rax,4), %edx
addl %esi, %edx
addl %r9d, %edx
addl %r8d, %r11d
addl %r8d, %edi
movl %ecx, %eax
xorl %ebx, %eax
andl %r11d, %eax
xorl %ebx, %eax
addl %eax, %edx
movl %r11d, %eax
roll $0x1a, %eax
movl %r11d, %r8d
roll $0x15, %r8d
xorl %eax, %r8d
movl %r11d, %eax
movl %r11d, %r9d
roll $0x7, %eax
xorl %r8d, %eax
movl %edi, %r8d
roll $0x1e, %r8d
movl %edi, %ebp
roll $0x13, %ebp
xorl %r8d, %ebp
movl %edi, %r15d
roll $0xa, %r15d
xorl %ebp, %r15d
addl %eax, %edx
movl %edi, %eax
andl %r14d, %eax
movl %edi, %r8d
xorl %r14d, %r8d
movl -0x70(%rsp), %esi
movl %esi, %ebp
roll $0xf, %ebp
andl %r10d, %r8d
xorl %eax, %r8d
movl %esi, %eax
roll $0xd, %eax
addl %r15d, %r8d
xorl %ebp, %eax
movl %esi, %ebp
shrl $0xa, %ebp
xorl %eax, %ebp
movl -0x6c(%rsp), %esi
addl (%rsp), %esi
addl %ebp, %esi
movl %esi, -0x6c(%rsp)
movq -0x68(%rsp), %rax
movl 0x64(%r13,%rax,4), %ebp
addl %esi, %ebp
addl %ebx, %ebp
addl %edx, %r12d
addl %edx, %r8d
movl %r11d, %eax
xorl %ecx, %eax
andl %r12d, %eax
xorl %ecx, %eax
addl %eax, %ebp
movl %r12d, %eax
roll $0x1a, %eax
movl %r12d, %edx
roll $0x15, %edx
xorl %eax, %edx
movl %r12d, %eax
movl %r12d, %esi
roll $0x7, %eax
xorl %edx, %eax
movl %r8d, %edx
roll $0x1e, %edx
movl %r8d, %r15d
roll $0x13, %r15d
xorl %edx, %r15d
movl %r8d, %edx
roll $0xa, %edx
xorl %r15d, %edx
addl %eax, %ebp
movl %r8d, %eax
andl %edi, %eax
movl %r8d, %r15d
xorl %edi, %r15d
movl -0x7c(%rsp), %r11d
movl %r11d, %r12d
roll $0xf, %r12d
andl %r14d, %r15d
xorl %eax, %r15d
movl %r11d, %eax
roll $0xd, %eax
addl %edx, %r15d
xorl %r12d, %eax
movl %r11d, %edx
shrl $0xa, %edx
xorl %eax, %edx
movl -0x58(%rsp), %r11d
addl -0x8(%rsp), %r11d
addl %edx, %r11d
movq -0x68(%rsp), %rax
movl 0x68(%r13,%rax,4), %edx
movq %r13, %rbx
addl %r11d, %edx
addl %ecx, %edx
addl %ebp, %r10d
addl %ebp, %r15d
movl %esi, %eax
xorl %r9d, %eax
andl %r10d, %eax
xorl %r9d, %eax
addl %eax, %edx
movl %r10d, %eax
roll $0x1a, %eax
movl %r10d, %ebp
roll $0x15, %ebp
xorl %eax, %ebp
movl %r10d, %eax
roll $0x7, %eax
xorl %ebp, %eax
movl %r15d, %ebp
roll $0x1e, %ebp
movl %r15d, %r12d
roll $0x13, %r12d
xorl %ebp, %r12d
movl %r15d, %r13d
roll $0xa, %r13d
xorl %r12d, %r13d
addl %eax, %edx
movl %r15d, %eax
andl %r8d, %eax
movl %r15d, %ebp
xorl %r8d, %ebp
movl -0x6c(%rsp), %ecx
movl %ecx, %r12d
roll $0xf, %r12d
andl %edi, %ebp
xorl %eax, %ebp
movl %ecx, %eax
roll $0xd, %eax
addl %r13d, %ebp
xorl %r12d, %eax
movl %ecx, %r12d
shrl $0xa, %r12d
xorl %eax, %r12d
movl -0x74(%rsp), %ecx
addl -0x30(%rsp), %ecx
addl %r12d, %ecx
movl %ecx, -0x74(%rsp)
movq -0x68(%rsp), %rax
movl 0x6c(%rbx,%rax,4), %r12d
addl %ecx, %r12d
addl %r9d, %r12d
addl %edx, %r14d
addl %edx, %ebp
movl %r10d, %eax
xorl %esi, %eax
andl %r14d, %eax
xorl %esi, %eax
addl %eax, %r12d
movl %r14d, %eax
roll $0x1a, %eax
movl %r14d, %edx
roll $0x15, %edx
xorl %eax, %edx
movl %r14d, %eax
roll $0x7, %eax
xorl %edx, %eax
movl %ebp, %edx
roll $0x1e, %edx
movl %ebp, %r13d
roll $0x13, %r13d
xorl %edx, %r13d
movl %ebp, %edx
roll $0xa, %edx
xorl %r13d, %edx
addl %eax, %r12d
movl %ebp, %eax
andl %r15d, %eax
movl %ebp, %r13d
xorl %r15d, %r13d
movl %r11d, %ecx
movl %r11d, -0x58(%rsp)
roll $0xf, %r11d
andl %r8d, %r13d
xorl %eax, %r13d
movl %ecx, %eax
roll $0xd, %eax
addl %edx, %r13d
xorl %r11d, %eax
movl %ecx, %edx
shrl $0xa, %edx
xorl %eax, %edx
movl -0x78(%rsp), %ecx
addl -0x3c(%rsp), %ecx
addl %edx, %ecx
movl %ecx, -0x78(%rsp)
movq -0x68(%rsp), %rax
movl 0x70(%rbx,%rax,4), %edx
addl %ecx, %edx
addl %esi, %edx
addl %r12d, %edi
addl %r12d, %r13d
movl %r14d, %eax
xorl %r10d, %eax
andl %edi, %eax
xorl %r10d, %eax
addl %eax, %edx
movl %edi, %eax
roll $0x1a, %eax
movl %edi, %r11d
roll $0x15, %r11d
xorl %eax, %r11d
movl %edi, %eax
roll $0x7, %eax
xorl %r11d, %eax
movl %r13d, %r11d
roll $0x1e, %r11d
movl %r13d, %r12d
roll $0x13, %r12d
xorl %r11d, %r12d
movl %r13d, %r11d
roll $0xa, %r11d
xorl %r12d, %r11d
addl %eax, %edx
movl %r13d, %eax
andl %ebp, %eax
movl %r13d, %r12d
xorl %ebp, %r12d
movl -0x74(%rsp), %esi
movl %esi, %ecx
roll $0xf, %ecx
andl %r15d, %r12d
xorl %eax, %r12d
movl %esi, %eax
roll $0xd, %eax
addl %r11d, %r12d
xorl %ecx, %eax
movl %esi, %ecx
shrl $0xa, %ecx
xorl %eax, %ecx
movl -0x5c(%rsp), %esi
addl -0x4c(%rsp), %esi
addl %ecx, %esi
movq -0x68(%rsp), %rax
movl 0x74(%rbx,%rax,4), %eax
addl %esi, %eax
addl %r10d, %eax
addl %edx, %r8d
addl %edx, %r12d
movl %edi, %ecx
xorl %r14d, %ecx
andl %r8d, %ecx
xorl %r14d, %ecx
addl %ecx, %eax
movl %r8d, %ecx
roll $0x1a, %ecx
movl %r8d, %edx
roll $0x15, %edx
xorl %ecx, %edx
movl %r8d, %ecx
roll $0x7, %ecx
xorl %edx, %ecx
movl %r12d, %edx
roll $0x1e, %edx
movl %r12d, %r10d
roll $0x13, %r10d
xorl %edx, %r10d
movl %r12d, %edx
roll $0xa, %edx
xorl %r10d, %edx
addl %ecx, %eax
movl %r12d, %ecx
andl %r13d, %ecx
movl %r12d, %r10d
xorl %r13d, %r10d
movl -0x78(%rsp), %r9d
movl %r9d, %r11d
roll $0xf, %r11d
andl %ebp, %r10d
xorl %ecx, %r10d
movl %r9d, %ecx
roll $0xd, %ecx
addl %edx, %r10d
xorl %r11d, %ecx
movl %r9d, %edx
shrl $0xa, %edx
xorl %ecx, %edx
movl -0x54(%rsp), %r9d
addl -0x70(%rsp), %r9d
addl %edx, %r9d
movl %r9d, -0x54(%rsp)
movq -0x68(%rsp), %rcx
movl 0x78(%rbx,%rcx,4), %edx
addl %r9d, %edx
addl %r14d, %edx
addl %eax, %r15d
addl %eax, %r10d
movl %r8d, %eax
xorl %edi, %eax
andl %r15d, %eax
xorl %edi, %eax
addl %eax, %edx
movl %r15d, %eax
roll $0x1a, %eax
movl %r15d, %ecx
roll $0x15, %ecx
xorl %eax, %ecx
movl %r15d, %eax
roll $0x7, %eax
xorl %ecx, %eax
movl %r10d, %ecx
roll $0x1e, %ecx
movl %r10d, %r11d
roll $0x13, %r11d
xorl %ecx, %r11d
movl %r10d, %ecx
roll $0xa, %ecx
xorl %r11d, %ecx
addl %eax, %edx
movl %r10d, %eax
andl %r12d, %eax
movl %r10d, %r14d
xorl %r12d, %r14d
movl %esi, -0x5c(%rsp)
movl %esi, %r11d
roll $0xf, %r11d
andl %r13d, %r14d
xorl %eax, %r14d
movl %esi, %eax
roll $0xd, %eax
addl %ecx, %r14d
xorl %r11d, %eax
movl %esi, %ecx
shrl $0xa, %ecx
xorl %eax, %ecx
movl -0x34(%rsp), %esi
movl -0x7c(%rsp), %r9d
addl %r9d, %esi
addl %ecx, %esi
movq -0x68(%rsp), %rax
movl 0x7c(%rbx,%rax,4), %eax
addl %esi, %eax
addl %edi, %eax
addl %edx, %ebp
addl %edx, %r14d
movl %r15d, %ecx
xorl %r8d, %ecx
andl %ebp, %ecx
xorl %r8d, %ecx
addl %ecx, %eax
movl %ebp, %ecx
roll $0x1a, %ecx
movl %ebp, %edx
roll $0x15, %edx
xorl %ecx, %edx
movl %ebp, %ecx
roll $0x7, %ecx
xorl %edx, %ecx
movl %r14d, %edx
roll $0x1e, %edx
movl %r14d, %edi
roll $0x13, %edi
xorl %edx, %edi
movl %r14d, %edx
roll $0xa, %edx
xorl %edi, %edx
addl %ecx, %eax
movl %r14d, %ecx
andl %r10d, %ecx
movl %r14d, %edi
xorl %r10d, %edi
andl %r12d, %edi
xorl %ecx, %edi
addl %edx, %edi
addl %eax, %r13d
addl %eax, %edi
movl -0x74(%rsp), %eax
movl %eax, -0x38(%rsp)
movl -0x58(%rsp), %eax
movl %eax, -0x50(%rsp)
movl -0x78(%rsp), %eax
movl %eax, -0x74(%rsp)
movl -0x6c(%rsp), %r11d
movl %r9d, -0x40(%rsp)
movq -0x68(%rsp), %rcx
movl -0x70(%rsp), %eax
movl %eax, -0x44(%rsp)
movl -0x5c(%rsp), %eax
movl %eax, -0x78(%rsp)
addq $0x10, %rcx
movl %esi, -0x6c(%rsp)
movl -0x54(%rsp), %esi
movq %rcx, %rax
cmpl $0x30, %ecx
jb 0x619436
addl 0x20(%rsp), %edi
movq 0x58(%rsp), %rax
movl %edi, (%rax)
addl 0x28(%rsp), %r14d
movl %r14d, 0x4(%rax)
addl 0x30(%rsp), %r10d
movl %r10d, 0x8(%rax)
addl 0x38(%rsp), %r12d
movl %r12d, 0xc(%rax)
addl 0x40(%rsp), %r13d
movl %r13d, 0x10(%rax)
addl 0x48(%rsp), %ebp
movl %ebp, 0x14(%rax)
addl 0x50(%rsp), %r15d
movl %r15d, 0x18(%rax)
addl 0x18(%rsp), %r8d
movl %r8d, 0x1c(%rax)
addq $0x60, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/sha256.c
|
rhash_sha256_final
|
void rhash_sha256_final(sha256_ctx* ctx, unsigned char* result)
{
size_t index = ((unsigned)ctx->length & 63) >> 2;
unsigned shift = ((unsigned)ctx->length & 3) * 8;
/* pad message and run for last block */
/* append the byte 0x80 to the message */
ctx->message[index] &= le2me_32(~(0xFFFFFFFFu << shift));
ctx->message[index++] ^= le2me_32(0x80u << shift);
/* if no room left in the message to store 64-bit message length */
if (index > 14) {
/* then fill the rest with zeros and process it */
while (index < 16) {
ctx->message[index++] = 0;
}
rhash_sha256_process_block(ctx->hash, ctx->message);
index = 0;
}
while (index < 14) {
ctx->message[index++] = 0;
}
ctx->message[14] = be2me_32( (unsigned)(ctx->length >> 29) );
ctx->message[15] = be2me_32( (unsigned)(ctx->length << 3) );
rhash_sha256_process_block(ctx->hash, ctx->message);
if (result) be32_copy(result, 0, ctx->hash, ctx->digest_length);
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %rbx
movl 0x40(%rdi), %ecx
movl %ecx, %eax
shll $0x3, %ecx
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
shll %cl, %edx
movl $0x80, %edi
shll %cl, %edi
shrl $0x2, %eax
andl $0xf, %eax
notl %edx
andl (%rbx,%rax,4), %edx
movq %rsi, %r14
xorl %edx, %edi
movl %edi, (%rbx,%rax,4)
cmpl $0xe, %eax
jb 0x61a0e6
cmpl $0xe, %eax
jne 0x61a0d6
movl $0x0, 0x3c(%rbx)
leaq 0x48(%rbx), %rdi
movq %rbx, %rsi
callq 0x618d0e
xorl %eax, %eax
jmp 0x61a0ef
cmpq $0xd, %rax
je 0x61a105
incq %rax
shll $0x2, %eax
leaq (%rbx,%rax), %rdi
movl $0x38, %edx
subq %rax, %rdx
xorl %esi, %esi
callq 0x3fa90
movq 0x40(%rbx), %rax
movq %rax, %rcx
shrq $0x1d, %rcx
bswapl %ecx
movl %ecx, 0x38(%rbx)
shll $0x3, %eax
bswapl %eax
movl %eax, 0x3c(%rbx)
leaq 0x48(%rbx), %r15
movq %r15, %rdi
movq %rbx, %rsi
callq 0x618d0e
testq %r14, %r14
je 0x61a146
movl 0x68(%rbx), %ecx
movq %r14, %rdi
xorl %esi, %esi
movq %r15, %rdx
popq %rbx
popq %r14
popq %r15
jmp 0x61c358
popq %rbx
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/sha256.c
|
rhash_sha3_process_block
|
static void rhash_sha3_process_block(uint64_t hash[25], const uint64_t* block, size_t block_size)
{
/* expanded loop */
hash[ 0] ^= le2me_64(block[ 0]);
hash[ 1] ^= le2me_64(block[ 1]);
hash[ 2] ^= le2me_64(block[ 2]);
hash[ 3] ^= le2me_64(block[ 3]);
hash[ 4] ^= le2me_64(block[ 4]);
hash[ 5] ^= le2me_64(block[ 5]);
hash[ 6] ^= le2me_64(block[ 6]);
hash[ 7] ^= le2me_64(block[ 7]);
hash[ 8] ^= le2me_64(block[ 8]);
/* if not sha3-512 */
if (block_size > 72) {
hash[ 9] ^= le2me_64(block[ 9]);
hash[10] ^= le2me_64(block[10]);
hash[11] ^= le2me_64(block[11]);
hash[12] ^= le2me_64(block[12]);
/* if not sha3-384 */
if (block_size > 104) {
hash[13] ^= le2me_64(block[13]);
hash[14] ^= le2me_64(block[14]);
hash[15] ^= le2me_64(block[15]);
hash[16] ^= le2me_64(block[16]);
/* if not sha3-256 */
if (block_size > 136) {
hash[17] ^= le2me_64(block[17]);
#ifdef FULL_SHA3_FAMILY_SUPPORT
/* if not sha3-224 */
if (block_size > 144) {
hash[18] ^= le2me_64(block[18]);
hash[19] ^= le2me_64(block[19]);
hash[20] ^= le2me_64(block[20]);
hash[21] ^= le2me_64(block[21]);
hash[22] ^= le2me_64(block[22]);
hash[23] ^= le2me_64(block[23]);
hash[24] ^= le2me_64(block[24]);
}
#endif
}
}
}
/* make a permutation of the hash */
rhash_sha3_permutation(hash);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq (%rdi), %r11
xorq (%rsi), %r11
movq 0x8(%rdi), %rax
movq %r11, (%rdi)
xorq 0x8(%rsi), %rax
movq %rax, 0x8(%rdi)
movq 0x10(%rdi), %r9
xorq 0x10(%rsi), %r9
movq %r9, 0x10(%rdi)
movq 0x18(%rdi), %rbp
xorq 0x18(%rsi), %rbp
movq %rbp, 0x18(%rdi)
movq 0x20(%rdi), %r8
xorq 0x20(%rsi), %r8
movq %r8, 0x20(%rdi)
movq 0x28(%rdi), %rcx
xorq 0x28(%rsi), %rcx
movq %rcx, -0x50(%rsp)
movq %rcx, 0x28(%rdi)
movq 0x30(%rdi), %r10
xorq 0x30(%rsi), %r10
movq %r10, 0x30(%rdi)
movq 0x38(%rdi), %rcx
xorq 0x38(%rsi), %rcx
movq %rcx, 0x38(%rdi)
movq 0x40(%rdi), %r12
xorq 0x40(%rsi), %r12
movq %r12, 0x40(%rdi)
movq 0x48(%rdi), %rbx
cmpq $0x48, %rdx
movq %r8, -0x8(%rsp)
movq %r9, -0x10(%rsp)
movq %rax, -0x18(%rsp)
movq %r11, -0x60(%rsp)
movq %r12, -0x28(%rsp)
movq %rbp, -0x70(%rsp)
jbe 0x61a3cf
xorq 0x48(%rsi), %rbx
movq %rbx, 0x48(%rdi)
movq 0x50(%rdi), %rax
movq 0x58(%rdi), %r15
xorq 0x50(%rsi), %rax
movq %rax, 0x50(%rdi)
xorq 0x58(%rsi), %r15
movq %r15, 0x58(%rdi)
movq 0x60(%rdi), %r14
xorq 0x60(%rsi), %r14
movq %r14, 0x60(%rdi)
cmpq $0x69, %rdx
movq %r15, -0x78(%rsp)
jb 0x61a3e0
movq %rax, %r11
movq 0x68(%rsi), %rax
xorq %rax, 0x68(%rdi)
movq 0x70(%rsi), %rax
xorq %rax, 0x70(%rdi)
movq 0x78(%rsi), %rax
xorq %rax, 0x78(%rdi)
movq 0x80(%rsi), %rax
xorq %rax, 0x80(%rdi)
cmpq $0x89, %rdx
jb 0x61a3ca
movq 0x88(%rsi), %rax
xorq %rax, 0x88(%rdi)
movq %r11, %rax
jmp 0x61a3e0
movq 0x50(%rdi), %rax
movq 0x58(%rdi), %rdx
movq %rdx, -0x78(%rsp)
movq 0x60(%rdi), %r14
movq %rbx, -0x20(%rsp)
movq %rcx, -0x38(%rsp)
movq 0x80(%rdi), %rbp
movq 0xa8(%rdi), %rcx
movq %rcx, -0x48(%rsp)
movq 0x68(%rdi), %r12
movq 0x70(%rdi), %r15
movq 0x98(%rdi), %rsi
movq %rsi, -0x30(%rsp)
movq 0xc0(%rdi), %rsi
movq %rsi, -0x80(%rsp)
movq 0x88(%rdi), %rsi
movq %rsi, -0x68(%rsp)
movq 0xb0(%rdi), %rcx
movq %rcx, -0x40(%rsp)
movq 0x78(%rdi), %r9
movq 0xa0(%rdi), %rcx
movq %rcx, (%rsp)
movq 0x90(%rdi), %r8
movq %rdi, 0x48(%rsp)
movq 0xb8(%rdi), %rcx
movq %rcx, 0x8(%rsp)
xorl %esi, %esi
movq -0x78(%rsp), %rdx
movq -0x70(%rsp), %rdi
movq %r12, 0x28(%rsp)
movq %rbp, 0x18(%rsp)
movq %r9, 0x30(%rsp)
movq %rsi, 0x60(%rsp)
movq %r10, %r11
xorq -0x18(%rsp), %r11
movq %rdx, -0x78(%rsp)
movq %rdx, %rcx
xorq %rbp, %rcx
xorq %r11, %rcx
xorq -0x48(%rsp), %rcx
rolq %rcx
movq -0x8(%rsp), %rbx
xorq -0x20(%rsp), %rbx
movq %r15, -0x58(%rsp)
movq %r15, %rsi
movq -0x30(%rsp), %rdx
xorq %rdx, %rsi
xorq %rbx, %rsi
xorq %r15, %rbx
xorq %rdx, %rbx
xorq -0x80(%rsp), %rbx
movq -0x38(%rsp), %rbp
xorq -0x10(%rsp), %rbp
movq %r14, %r13
xorq -0x68(%rsp), %r13
xorq %rbp, %r13
xorq -0x40(%rsp), %r13
rolq %r13
xorq %rcx, %rbx
movq -0x60(%rsp), %rcx
xorq -0x50(%rsp), %rcx
movq %rax, %rdx
movq %rax, 0x40(%rsp)
xorq %r9, %rax
xorq %rcx, %rax
xorq %rdx, %rcx
xorq %r9, %rcx
movq (%rsp), %r9
xorq %r9, %rcx
xorq %r13, %rcx
movq %r8, %rdx
movq %r8, -0x70(%rsp)
movq -0x28(%rsp), %r8
xorq %rdi, %r8
movq %r12, %r13
xorq %rdx, %r13
xorq %r8, %r13
movq 0x8(%rsp), %r15
xorq %r15, %r13
rolq %r13
movq -0x78(%rsp), %r12
xorq %r12, %r11
movq 0x18(%rsp), %rdx
xorq %rdx, %r11
xorq -0x48(%rsp), %r11
xorq %r13, %r11
xorq -0x80(%rsp), %rsi
rolq %rsi
xorq %r14, %rbp
movq -0x68(%rsp), %r13
xorq %r13, %rbp
xorq -0x40(%rsp), %rbp
xorq %r9, %rax
rolq %rax
xorq %rsi, %rbp
movq 0x28(%rsp), %rsi
xorq %rsi, %r8
xorq -0x70(%rsp), %r8
xorq %r15, %r8
xorq %rax, %r8
movq -0x60(%rsp), %rax
xorq %rbx, %rax
movq %rax, 0x38(%rsp)
xorq %rbx, -0x50(%rsp)
xorq %rbx, 0x40(%rsp)
xorq %rbx, 0x30(%rsp)
xorq %r9, %rbx
movq %rbx, 0x58(%rsp)
movq -0x18(%rsp), %rax
xorq %rcx, %rax
xorq %rcx, %r10
xorq %rcx, %r12
movq %r12, -0x78(%rsp)
xorq %rcx, %rdx
movq %rdx, 0x18(%rsp)
xorq -0x48(%rsp), %rcx
movq %rcx, 0x10(%rsp)
movq -0x10(%rsp), %rbx
xorq %r11, %rbx
xorq %r11, -0x38(%rsp)
xorq %r11, %r14
xorq %r11, %r13
movq %r13, -0x68(%rsp)
xorq -0x40(%rsp), %r11
movq %r11, %r12
xorq %rbp, %rdi
movq -0x28(%rsp), %r11
xorq %rbp, %r11
xorq %rbp, %rsi
movq %rsi, 0x28(%rsp)
movq -0x70(%rsp), %rsi
xorq %rbp, %rsi
xorq %r15, %rbp
movq %rbp, 0x20(%rsp)
movq -0x8(%rsp), %rdx
xorq %r8, %rdx
movq -0x20(%rsp), %rcx
xorq %r8, %rcx
movq -0x58(%rsp), %rbp
xorq %r8, %rbp
xorq %r8, -0x30(%rsp)
xorq -0x80(%rsp), %r8
movq %rax, 0x50(%rsp)
rolq $0x3e, %rbx
movq %rdi, %rax
rolq $0x1c, %rax
rolq $0x2c, %r10
rolq $0x2b, %r14
rolq $0x15, %rsi
movq %rsi, -0x70(%rsp)
movq %rdx, -0x58(%rsp)
rolq $0xe, %r8
movq %r10, %rdx
notq %rdx
andq %r14, %rdx
movq %rdx, -0x60(%rsp)
movq %rsi, %rdx
notq %rdx
andq %r8, %rdx
xorq %r14, %rdx
movq %rdx, -0x10(%rsp)
notq %r14
andq %rsi, %r14
xorq %r10, %r14
movq %r14, -0x18(%rsp)
movq 0x38(%rsp), %r13
movq %r13, %rdx
notq %rdx
andq %r10, %rdx
rolq $0x37, %r11
movq %rbp, %rsi
rolq $0x27, %rsi
movq 0x30(%rsp), %r9
rolq $0x29, %r9
movq 0x10(%rsp), %r10
rolq $0x2, %r10
movq %r11, %rdi
notq %rdi
andq %rsi, %rdi
xorq %rbx, %rdi
movq %rdi, (%rsp)
movq %r9, %rdi
notq %rdi
movq %r10, %r14
movq %r10, 0x10(%rsp)
andq %r10, %rdi
xorq %rsi, %rdi
movq %rdi, -0x40(%rsp)
notq %rsi
andq %r9, %rsi
xorq %r11, %rsi
movq %rsi, -0x48(%rsp)
notq %r14
andq %rbx, %r14
notq %rbx
andq %r11, %rbx
movq %rbx, -0x80(%rsp)
movq -0x50(%rsp), %rbx
movq %rcx, %rdi
rolq $0x14, %rdi
movq 0x40(%rsp), %rcx
rolq $0x3, %rcx
movq 0x18(%rsp), %rbp
rolq $0x2d, %rbp
rolq $0x3d, %r12
movq %rdi, %rsi
notq %rsi
andq %rcx, %rsi
xorq %rax, %rsi
movq %rsi, -0x50(%rsp)
movq %rcx, %r10
movq %rcx, %rsi
notq %r10
andq %rbp, %r10
xorq %rdi, %r10
movq %r12, %r15
notq %r15
andq %rax, %r15
notq %rax
andq %rax, %rdi
movq %rdi, %r11
movq %r8, %rdi
notq %rdi
andq %r13, %rdi
xorq -0x70(%rsp), %rdi
xorq %r8, %rdx
movq %rdx, -0x8(%rsp)
movq -0x38(%rsp), %rcx
movq %rbp, %rax
notq %rax
andq %r12, %rax
xorq %rsi, %rax
movq %rax, -0x38(%rsp)
xorq %rbp, %r15
movq %r15, -0x28(%rsp)
xorq %r12, %r11
movq %r11, -0x20(%rsp)
xorq %r9, %r14
movq %r14, 0x8(%rsp)
movq -0x58(%rsp), %rsi
rolq $0x1b, %rsi
movq %rsi, -0x58(%rsp)
rolq $0x24, %rbx
movq -0x78(%rsp), %rax
rolq $0xa, %rax
movq -0x68(%rsp), %r14
rolq $0xf, %r14
movq 0x20(%rsp), %rdx
rolq $0x38, %rdx
movq %rdx, 0x20(%rsp)
movq %rbx, %r9
notq %r9
andq %rax, %r9
movq %rax, %rbp
notq %rbp
andq %r14, %rbp
movq %rdx, %r8
notq %r8
andq %rsi, %r8
xorq %r14, %r8
notq %r14
andq %rdx, %r14
xorq %rax, %r14
movq %r14, -0x68(%rsp)
movq 0x50(%rsp), %r15
rolq %r15
rolq $0x6, %rcx
movq 0x28(%rsp), %rdx
rolq $0x19, %rdx
movq -0x30(%rsp), %r13
rolq $0x8, %r13
movq 0x58(%rsp), %rsi
rolq $0x12, %rsi
movq %rcx, %rax
notq %rax
andq %rdx, %rax
movq %r13, %r14
notq %r14
andq %rsi, %r14
xorq %rdx, %r14
notq %rdx
andq %r13, %rdx
movq %rsi, %r12
notq %r12
andq %r15, %r12
xorq %r13, %r12
xorq %r15, %rax
xorq %rcx, %rdx
notq %r15
andq %rcx, %r15
xorq %rsi, %r15
movq -0x58(%rsp), %rcx
xorq %rcx, %r9
xorq %rbx, %rbp
notq %rcx
andq %rbx, %rcx
movq 0x60(%rsp), %rsi
xorq 0x20(%rsp), %rcx
movq %rcx, -0x30(%rsp)
leaq 0xbda94(%rip), %rcx # 0x6d8300
movq -0x60(%rsp), %rbx
xorq (%rsi,%rcx), %rbx
movq -0x80(%rsp), %rcx
xorq 0x10(%rsp), %rcx
movq %rcx, -0x80(%rsp)
xorq 0x38(%rsp), %rbx
movq %rbx, -0x60(%rsp)
addq $0x8, %rsi
cmpq $0xc0, %rsi
jne 0x61a468
movq %rax, %rcx
movq 0x48(%rsp), %rax
movq -0x18(%rsp), %rsi
movq %rsi, 0x8(%rax)
movq %r10, 0x30(%rax)
movq %rdx, 0x58(%rax)
movq %rbp, 0x80(%rax)
movq -0x48(%rsp), %rdx
movq %rdx, 0xa8(%rax)
movq -0x8(%rsp), %rdx
movq %rdx, 0x20(%rax)
movq -0x20(%rsp), %rsi
movq %rsi, 0x48(%rax)
movq %r15, 0x70(%rax)
movq -0x30(%rsp), %rsi
movq %rsi, 0x98(%rax)
movq -0x80(%rsp), %rsi
movq %rsi, 0xc0(%rax)
movq -0x10(%rsp), %rdx
movq %rdx, 0x10(%rax)
movq -0x38(%rsp), %rsi
movq %rsi, 0x38(%rax)
movq %r14, 0x60(%rax)
movq -0x68(%rsp), %rsi
movq %rsi, 0x88(%rax)
movq -0x40(%rsp), %rdx
movq %rdx, 0xb0(%rax)
movq -0x60(%rsp), %rdx
movq %rdx, (%rax)
movq -0x50(%rsp), %rdx
movq %rdx, 0x28(%rax)
movq %rcx, 0x50(%rax)
movq %r9, 0x78(%rax)
movq (%rsp), %rcx
movq %rcx, 0xa0(%rax)
movq %rdi, 0x18(%rax)
movq -0x28(%rsp), %rcx
movq %rcx, 0x40(%rax)
movq %r12, 0x68(%rax)
movq %r8, 0x90(%rax)
movq 0x8(%rsp), %rcx
movq %rcx, 0xb8(%rax)
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/sha3.c
|
rhash_sha3_final
|
void rhash_sha3_final(sha3_ctx* ctx, unsigned char* result)
{
size_t digest_length = 100 - ctx->block_size / 2;
const size_t block_size = ctx->block_size;
if (!(ctx->rest & SHA3_FINALIZED))
{
/* clear the rest of the data queue */
memset((char*)ctx->message + ctx->rest, 0, block_size - ctx->rest);
((char*)ctx->message)[ctx->rest] |= 0x06;
((char*)ctx->message)[block_size - 1] |= 0x80;
/* process final block */
rhash_sha3_process_block(ctx->hash, ctx->message, block_size);
ctx->rest = SHA3_FINALIZED; /* mark context as finalized */
}
assert(block_size > digest_length);
if (result) me64_to_le_str(result, ctx->hash, digest_length);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
movl 0x18c(%rdi), %r12d
movl %r12d, %eax
shrl %eax
movl $0x64, %r15d
subl %eax, %r15d
movslq 0x188(%rdi), %rax
testq %rax, %rax
js 0x61a9fc
leaq 0xc8(%rbx), %r13
leaq (%rbx,%rax), %rdi
addq $0xc8, %rdi
movq %r12, %rdx
subq %rax, %rdx
xorl %esi, %esi
callq 0x3fa90
movl 0x188(%rbx), %eax
orb $0x6, 0xc8(%rbx,%rax)
orb $-0x80, 0xc7(%rbx,%r12)
movq %rbx, %rdi
movq %r13, %rsi
movq %r12, %rdx
callq 0x61a2af
movl $0x80000000, 0x188(%rbx) # imm = 0x80000000
cmpl %r15d, %r12d
jbe 0x61aa27
testq %r14, %r14
je 0x61aa1d
movq %r14, %rdi
movq %rbx, %rsi
movq %r15, %rdx
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
jmp 0x3f250
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
leaq 0xbd812(%rip), %rdi # 0x6d8240
leaq 0xbd826(%rip), %rsi # 0x6d825b
leaq 0xbd88d(%rip), %rcx # 0x6d82c9
movl $0x14b, %edx # imm = 0x14B
callq 0x3f4b0
nop
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/sha3.c
|
rhash_sha512_process_block
|
static void rhash_sha512_process_block(uint64_t hash[8], uint64_t block[16])
{
uint64_t A, B, C, D, E, F, G, H;
uint64_t W[16];
const uint64_t* k;
int i;
A = hash[0], B = hash[1], C = hash[2], D = hash[3];
E = hash[4], F = hash[5], G = hash[6], H = hash[7];
/* Compute SHA using alternate Method: FIPS 180-3 6.1.3 */
ROUND_1_16(A, B, C, D, E, F, G, H, 0);
ROUND_1_16(H, A, B, C, D, E, F, G, 1);
ROUND_1_16(G, H, A, B, C, D, E, F, 2);
ROUND_1_16(F, G, H, A, B, C, D, E, 3);
ROUND_1_16(E, F, G, H, A, B, C, D, 4);
ROUND_1_16(D, E, F, G, H, A, B, C, 5);
ROUND_1_16(C, D, E, F, G, H, A, B, 6);
ROUND_1_16(B, C, D, E, F, G, H, A, 7);
ROUND_1_16(A, B, C, D, E, F, G, H, 8);
ROUND_1_16(H, A, B, C, D, E, F, G, 9);
ROUND_1_16(G, H, A, B, C, D, E, F, 10);
ROUND_1_16(F, G, H, A, B, C, D, E, 11);
ROUND_1_16(E, F, G, H, A, B, C, D, 12);
ROUND_1_16(D, E, F, G, H, A, B, C, 13);
ROUND_1_16(C, D, E, F, G, H, A, B, 14);
ROUND_1_16(B, C, D, E, F, G, H, A, 15);
for (i = 16, k = &rhash_k512[16]; i < 80; i += 16, k += 16) {
ROUND_17_80(A, B, C, D, E, F, G, H, 0);
ROUND_17_80(H, A, B, C, D, E, F, G, 1);
ROUND_17_80(G, H, A, B, C, D, E, F, 2);
ROUND_17_80(F, G, H, A, B, C, D, E, 3);
ROUND_17_80(E, F, G, H, A, B, C, D, 4);
ROUND_17_80(D, E, F, G, H, A, B, C, 5);
ROUND_17_80(C, D, E, F, G, H, A, B, 6);
ROUND_17_80(B, C, D, E, F, G, H, A, 7);
ROUND_17_80(A, B, C, D, E, F, G, H, 8);
ROUND_17_80(H, A, B, C, D, E, F, G, 9);
ROUND_17_80(G, H, A, B, C, D, E, F, 10);
ROUND_17_80(F, G, H, A, B, C, D, E, 11);
ROUND_17_80(E, F, G, H, A, B, C, D, 12);
ROUND_17_80(D, E, F, G, H, A, B, C, 13);
ROUND_17_80(C, D, E, F, G, H, A, B, 14);
ROUND_17_80(B, C, D, E, F, G, H, A, 15);
}
hash[0] += A, hash[1] += B, hash[2] += C, hash[3] += D;
hash[4] += E, hash[5] += F, hash[6] += G, hash[7] += H;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xc8, %rsp
movq (%rdi), %r12
movq 0x8(%rdi), %r15
movq 0x10(%rdi), %rbx
movq 0x18(%rdi), %r11
movq 0x20(%rdi), %r14
movq 0x28(%rdi), %r10
movq 0x30(%rdi), %r9
movq %r14, %rax
rolq $0x32, %rax
movq %rdi, 0xb0(%rsp)
movq 0x38(%rdi), %rdx
movq %r14, %rcx
rolq $0x2e, %rcx
xorq %rax, %rcx
movq %r14, %rax
rolq $0x17, %rax
xorq %rcx, %rax
movq %r9, %rcx
xorq %r10, %rcx
andq %r14, %rcx
xorq %r9, %rcx
movq (%rsi), %rdi
bswapq %rdi
movq %rdx, 0xa0(%rsp)
addq %rdx, %rcx
addq %rax, %rcx
movabsq $0x428a2f98d728ae22, %rax # imm = 0x428A2F98D728AE22
movq %rdi, 0x20(%rsp)
addq %rdi, %rax
addq %rcx, %rax
movq %r12, %rcx
rolq $0x24, %rcx
movq %r12, %rdx
rolq $0x1e, %rdx
xorq %rcx, %rdx
movq %r12, %rcx
rolq $0x19, %rcx
xorq %rdx, %rcx
movq %r15, %rdx
andq %r12, %rdx
movq %r15, %r8
xorq %r12, %r8
andq %rbx, %r8
xorq %rdx, %r8
addq %rcx, %r8
addq %rax, %r8
movq %r11, 0x88(%rsp)
addq %r11, %rax
movq %rax, %rcx
rolq $0x32, %rcx
movq %rax, %rdx
rolq $0x2e, %rdx
xorq %rcx, %rdx
movq %rax, %rcx
rolq $0x17, %rcx
xorq %rdx, %rcx
movq %r10, %rdi
xorq %r14, %rdi
andq %rax, %rdi
xorq %r10, %rdi
movq 0x8(%rsi), %rdx
bswapq %rdx
movq %r9, 0xa8(%rsp)
movq %rdx, 0x18(%rsp)
addq %r9, %rdx
addq %rdx, %rdi
movabsq $0x7137449123ef65cd, %rdx # imm = 0x7137449123EF65CD
addq %rdi, %rdx
addq %rcx, %rdx
movq %r8, %rcx
rolq $0x24, %rcx
movq %r8, %rdi
rolq $0x1e, %rdi
xorq %rcx, %rdi
movq %r8, %rcx
rolq $0x19, %rcx
xorq %rdi, %rcx
movq %r8, %rdi
andq %r12, %rdi
movq %r8, %r11
xorq %r12, %r11
andq %r15, %r11
xorq %rdi, %r11
addq %rcx, %r11
addq %rdx, %r11
movq %rbx, 0x80(%rsp)
addq %rbx, %rdx
movq %rdx, %rcx
rolq $0x32, %rcx
movq %rdx, %rdi
rolq $0x2e, %rdi
xorq %rcx, %rdi
movq %rdx, %r9
rolq $0x17, %r9
xorq %rdi, %r9
movq %rax, %rdi
xorq %r14, %rdi
andq %rdx, %rdi
xorq %r14, %rdi
movq 0x10(%rsi), %rcx
bswapq %rcx
movq %r10, 0x98(%rsp)
movq %rcx, 0x10(%rsp)
addq %r10, %rcx
addq %rcx, %rdi
movabsq $-0x4a3f043013b2c4d1, %rcx # imm = 0xB5C0FBCFEC4D3B2F
addq %rdi, %rcx
addq %r9, %rcx
movq %r11, %rdi
rolq $0x24, %rdi
movq %r11, %r9
rolq $0x1e, %r9
xorq %rdi, %r9
movq %r11, %rdi
rolq $0x19, %rdi
xorq %r9, %rdi
movq %r11, %r9
andq %r8, %r9
movq %r11, %r10
xorq %r8, %r10
andq %r12, %r10
xorq %r9, %r10
addq %rdi, %r10
addq %rcx, %r10
movq %r15, 0x78(%rsp)
addq %r15, %rcx
movq %rcx, %rdi
rolq $0x32, %rdi
movq %rcx, %r9
rolq $0x2e, %r9
xorq %rdi, %r9
movq %rcx, %rdi
rolq $0x17, %rdi
xorq %r9, %rdi
movq %rdx, %rbx
xorq %rax, %rbx
andq %rcx, %rbx
xorq %rax, %rbx
movq 0x18(%rsi), %r9
bswapq %r9
movq %r14, 0x90(%rsp)
movq %r9, 0x8(%rsp)
addq %r14, %r9
addq %r9, %rbx
movabsq $-0x164a245a7e762444, %r9 # imm = 0xE9B5DBA58189DBBC
addq %rbx, %r9
addq %rdi, %r9
movq %r10, %rdi
rolq $0x24, %rdi
movq %r10, %rbx
rolq $0x1e, %rbx
xorq %rdi, %rbx
movq %r10, %r14
rolq $0x19, %r14
xorq %rbx, %r14
movq %r10, %rbx
andq %r11, %rbx
movq %r10, %rdi
xorq %r11, %rdi
andq %r8, %rdi
xorq %rbx, %rdi
addq %r14, %rdi
addq %r9, %rdi
movq %r12, 0x70(%rsp)
addq %r12, %r9
movq %r9, %rbx
rolq $0x32, %rbx
movq %r9, %r14
rolq $0x2e, %r14
xorq %rbx, %r14
movq %r9, %rbx
rolq $0x17, %rbx
xorq %r14, %rbx
movq %rcx, %r14
xorq %rdx, %r14
andq %r9, %r14
xorq %rdx, %r14
movq 0x20(%rsi), %r15
bswapq %r15
movq %r15, (%rsp)
addq %r15, %rax
addq %r14, %rax
movabsq $0x3956c25bf348b538, %r14 # imm = 0x3956C25BF348B538
addq %rax, %r14
addq %rbx, %r14
movq %rdi, %rax
rolq $0x24, %rax
movq %rdi, %rbx
rolq $0x1e, %rbx
xorq %rax, %rbx
movq %rdi, %r15
rolq $0x19, %r15
xorq %rbx, %r15
movq %rdi, %rbx
andq %r10, %rbx
movq %rdi, %rax
xorq %r10, %rax
andq %r11, %rax
xorq %rbx, %rax
addq %r15, %rax
addq %r14, %r8
addq %r14, %rax
movq %r8, %rbx
rolq $0x32, %rbx
movq %r8, %r14
rolq $0x2e, %r14
xorq %rbx, %r14
movq %r8, %rbx
rolq $0x17, %rbx
xorq %r14, %rbx
movq %r9, %r14
xorq %rcx, %r14
andq %r8, %r14
xorq %rcx, %r14
movq 0x28(%rsi), %r15
bswapq %r15
movq %r15, -0x8(%rsp)
addq %r15, %rdx
addq %r14, %rdx
movabsq $0x59f111f1b605d019, %r14 # imm = 0x59F111F1B605D019
addq %rdx, %r14
addq %rbx, %r14
movq %rax, %rdx
rolq $0x24, %rdx
movq %rax, %rbx
rolq $0x1e, %rbx
xorq %rdx, %rbx
movq %rax, %r15
rolq $0x19, %r15
xorq %rbx, %r15
movq %rax, %rbx
andq %rdi, %rbx
movq %rax, %rdx
xorq %rdi, %rdx
andq %r10, %rdx
xorq %rbx, %rdx
addq %r15, %rdx
addq %r14, %r11
addq %r14, %rdx
movq %r11, %rbx
rolq $0x32, %rbx
movq %r11, %r14
rolq $0x2e, %r14
xorq %rbx, %r14
movq %r11, %rbx
rolq $0x17, %rbx
xorq %r14, %rbx
movq %r8, %r14
xorq %r9, %r14
andq %r11, %r14
xorq %r9, %r14
movq 0x30(%rsi), %r15
bswapq %r15
movq %r15, -0x20(%rsp)
addq %r15, %rcx
addq %r14, %rcx
movq %rdx, %r14
rolq $0x24, %r14
movabsq $-0x6dc07d5b50e6b065, %r15 # imm = 0x923F82A4AF194F9B
addq %rcx, %r15
movq %rdx, %rcx
rolq $0x1e, %rcx
addq %rbx, %r15
movq %rdx, %rbx
rolq $0x19, %rbx
xorq %r14, %rcx
xorq %rcx, %rbx
movq %rdx, %r14
andq %rax, %r14
movq %rdx, %rcx
xorq %rax, %rcx
andq %rdi, %rcx
xorq %r14, %rcx
addq %r15, %r10
movq %r10, %r14
rolq $0x32, %r14
addq %rbx, %rcx
movq %r10, %rbx
rolq $0x2e, %rbx
addq %r15, %rcx
movq %r10, %r15
rolq $0x17, %r15
xorq %r14, %rbx
xorq %rbx, %r15
movq %r11, %rbx
xorq %r8, %rbx
andq %r10, %rbx
movq 0x38(%rsi), %r14
bswapq %r14
movq %r14, -0x18(%rsp)
xorq %r8, %rbx
addq %r14, %r9
addq %rbx, %r9
movabsq $-0x54e3a12a25927ee8, %rbx # imm = 0xAB1C5ED5DA6D8118
addq %r9, %rbx
movq %rcx, %r9
rolq $0x24, %r9
addq %r15, %rbx
movq %rcx, %r14
rolq $0x1e, %r14
xorq %r9, %r14
movq %rcx, %r15
rolq $0x19, %r15
xorq %r14, %r15
movq %rcx, %r14
andq %rdx, %r14
movq %rcx, %r9
xorq %rdx, %r9
andq %rax, %r9
xorq %r14, %r9
addq %r15, %r9
addq %rbx, %rdi
movq %rdi, %r14
rolq $0x32, %r14
addq %rbx, %r9
movq %rdi, %rbx
rolq $0x2e, %rbx
xorq %r14, %rbx
movq %rdi, %r14
rolq $0x17, %r14
xorq %rbx, %r14
movq %r10, %rbx
xorq %r11, %rbx
andq %rdi, %rbx
xorq %r11, %rbx
movq 0x40(%rsi), %r15
bswapq %r15
movq %r15, -0x10(%rsp)
addq %r15, %r8
addq %rbx, %r8
movabsq $-0x27f855675cfcfdbe, %rbx # imm = 0xD807AA98A3030242
addq %r8, %rbx
addq %r14, %rbx
movq %r9, %r8
rolq $0x24, %r8
movq %r9, %r14
rolq $0x1e, %r14
xorq %r8, %r14
movq %r9, %r15
rolq $0x19, %r15
xorq %r14, %r15
movq %r9, %r14
andq %rcx, %r14
movq %r9, %r8
xorq %rcx, %r8
andq %rdx, %r8
xorq %r14, %r8
addq %r15, %r8
addq %rbx, %rax
addq %rbx, %r8
movq %rax, %rbx
rolq $0x32, %rbx
movq %rax, %r14
rolq $0x2e, %r14
xorq %rbx, %r14
movq %rax, %rbx
rolq $0x17, %rbx
xorq %r14, %rbx
movq %rdi, %r14
xorq %r10, %r14
andq %rax, %r14
xorq %r10, %r14
movq 0x48(%rsi), %r15
bswapq %r15
movq %r15, -0x28(%rsp)
addq %r15, %r11
addq %r14, %r11
movabsq $0x12835b0145706fbe, %r14 # imm = 0x12835B0145706FBE
addq %r11, %r14
addq %rbx, %r14
movq %r8, %r11
rolq $0x24, %r11
movq %r8, %rbx
rolq $0x1e, %rbx
xorq %r11, %rbx
movq %r8, %r11
rolq $0x19, %r11
xorq %rbx, %r11
movq %r8, %rbx
andq %r9, %rbx
movq %r8, %r15
xorq %r9, %r15
andq %rcx, %r15
xorq %rbx, %r15
addq %r11, %r15
addq %r14, %rdx
addq %r14, %r15
movq %rdx, %r11
rolq $0x32, %r11
movq %rdx, %rbx
rolq $0x2e, %rbx
xorq %r11, %rbx
movq %rdx, %r11
rolq $0x17, %r11
xorq %rbx, %r11
movq %rax, %rbx
xorq %rdi, %rbx
andq %rdx, %rbx
xorq %rdi, %rbx
movq 0x50(%rsi), %r14
bswapq %r14
movq %r14, -0x30(%rsp)
addq %r14, %r10
addq %rbx, %r10
movq %r15, %rbx
rolq $0x24, %rbx
movabsq $0x243185be4ee4b28c, %r14 # imm = 0x243185BE4EE4B28C
addq %r10, %r14
movq %r15, %r10
rolq $0x1e, %r10
addq %r11, %r14
movq %r15, %r11
rolq $0x19, %r11
xorq %rbx, %r10
xorq %r10, %r11
movq %r15, %r10
andq %r8, %r10
movq %r15, %rbp
xorq %r8, %rbp
andq %r9, %rbp
xorq %r10, %rbp
addq %r14, %rcx
movq %rcx, %r10
rolq $0x32, %r10
addq %r11, %rbp
movq %rcx, %r11
rolq $0x2e, %r11
addq %r14, %rbp
movq %rcx, %rbx
rolq $0x17, %rbx
xorq %r10, %r11
xorq %r11, %rbx
movq %rdx, %r10
xorq %rax, %r10
andq %rcx, %r10
movq 0x58(%rsi), %r11
bswapq %r11
xorq %rax, %r10
movq %r11, -0x38(%rsp)
addq %r11, %rdi
addq %r10, %rdi
movabsq $0x550c7dc3d5ffb4e2, %r10 # imm = 0x550C7DC3D5FFB4E2
addq %rdi, %r10
movq %rbp, %rdi
rolq $0x24, %rdi
addq %rbx, %r10
movq %rbp, %r11
rolq $0x1e, %r11
xorq %rdi, %r11
movq %rbp, %rdi
rolq $0x19, %rdi
xorq %r11, %rdi
movq %rbp, %r11
andq %r15, %r11
movq %rbp, %r13
xorq %r15, %r13
andq %r8, %r13
xorq %r11, %r13
addq %rdi, %r13
addq %r10, %r9
movq %r9, %rdi
rolq $0x32, %rdi
addq %r10, %r13
movq %r9, %r10
rolq $0x2e, %r10
xorq %rdi, %r10
movq %r9, %rdi
rolq $0x17, %rdi
xorq %r10, %rdi
movq %rcx, %r10
xorq %rdx, %r10
andq %r9, %r10
movq %r9, %r11
xorq %rdx, %r10
movq 0x60(%rsi), %rbx
bswapq %rbx
movq %rbx, -0x70(%rsp)
addq %rbx, %rax
addq %r10, %rax
movabsq $0x72be5d74f27b896f, %r10 # imm = 0x72BE5D74F27B896F
addq %rax, %r10
addq %rdi, %r10
movq %r13, %rax
rolq $0x24, %rax
movq %r13, %rdi
rolq $0x1e, %rdi
xorq %rax, %rdi
movq %r13, %rax
rolq $0x19, %rax
xorq %rdi, %rax
movq %r13, %rdi
andq %rbp, %rdi
movq %r13, %r12
xorq %rbp, %r12
andq %r15, %r12
xorq %rdi, %r12
addq %rax, %r12
addq %r10, %r8
addq %r10, %r12
movq %r8, %rax
rolq $0x32, %rax
movq %r8, %rdi
rolq $0x2e, %rdi
xorq %rax, %rdi
movq %r8, %rax
rolq $0x17, %rax
xorq %rdi, %rax
movq %r9, %rdi
xorq %rcx, %rdi
andq %r8, %rdi
xorq %rcx, %rdi
movq 0x68(%rsi), %r10
bswapq %r10
movq %r10, -0x68(%rsp)
addq %r10, %rdx
addq %rdi, %rdx
movabsq $-0x7f214e01c4e9694f, %rdi # imm = 0x80DEB1FE3B1696B1
addq %rdx, %rdi
addq %rax, %rdi
movq %r12, %rax
rolq $0x24, %rax
movq %r12, %rdx
rolq $0x1e, %rdx
xorq %rax, %rdx
movq %r12, %rax
rolq $0x19, %rax
xorq %rdx, %rax
movq %r12, %rdx
andq %r13, %rdx
movq %r12, %rbx
xorq %r13, %rbx
andq %rbp, %rbx
xorq %rdx, %rbx
addq %rax, %rbx
addq %rdi, %r15
addq %rdi, %rbx
movq %r15, %rax
rolq $0x32, %rax
movq %r15, %rdx
rolq $0x2e, %rdx
xorq %rax, %rdx
movq %r15, %rax
rolq $0x17, %rax
xorq %rdx, %rax
movq %r8, %rdx
xorq %r9, %rdx
andq %r15, %rdx
xorq %r9, %rdx
movq 0x70(%rsi), %r10
bswapq %r10
addq %r10, %rcx
addq %rdx, %rcx
movq %rbx, %rdx
rolq $0x24, %rdx
movabsq $-0x6423f958da38edcb, %rdi # imm = 0x9BDC06A725C71235
addq %rcx, %rdi
movq %rbx, %rcx
rolq $0x1e, %rcx
addq %rax, %rdi
movq %rbx, %rax
rolq $0x19, %rax
xorq %rdx, %rcx
xorq %rcx, %rax
movq %rbx, %rcx
andq %r12, %rcx
movq %rbx, %r14
xorq %r12, %r14
andq %r13, %r14
xorq %rcx, %r14
addq %rdi, %rbp
movq %rbp, %rcx
rolq $0x32, %rcx
addq %rax, %r14
movq %rbp, %rax
rolq $0x2e, %rax
addq %rdi, %r14
movq %rbp, %rdx
rolq $0x17, %rdx
xorq %rcx, %rax
xorq %rax, %rdx
movq %r15, %rax
xorq %r8, %rax
andq %rbp, %rax
movq 0x78(%rsi), %rcx
movq %r10, %rsi
bswapq %rcx
xorq %r8, %rax
movq %rcx, -0x78(%rsp)
addq %rcx, %r11
addq %rax, %r11
movabsq $-0x3e640e8b3096d96c, %rax # imm = 0xC19BF174CF692694
addq %r11, %rax
movq %r14, %rcx
rolq $0x24, %rcx
addq %rdx, %rax
movq %r14, %rdx
rolq $0x1e, %rdx
xorq %rcx, %rdx
movq %r14, %rcx
rolq $0x19, %rcx
xorq %rdx, %rcx
movq %r14, %rdx
andq %rbx, %rdx
movq %r14, %rdi
xorq %rbx, %rdi
andq %r12, %rdi
xorq %rdx, %rdi
addq %rcx, %rdi
addq %rax, %r13
addq %rax, %rdi
xorl %r9d, %r9d
movq %r14, -0x50(%rsp)
movq %r9, -0x80(%rsp)
movq %r13, %rax
rolq $0x32, %rax
movq %r13, %rcx
rolq $0x2e, %rcx
xorq %rax, %rcx
movq %r13, %rax
rolq $0x17, %rax
xorq %rcx, %rax
movq %rbp, %rcx
xorq %r15, %rcx
andq %r13, %rcx
xorq %r15, %rcx
addq %r8, %rcx
movq 0x18(%rsp), %r11
movq %r11, %rdx
rorq %rdx
addq %rax, %rcx
movq %r11, %rax
rolq $0x38, %rax
xorq %rdx, %rax
movq %rsi, %rdx
rolq $0x2d, %rdx
movq %rsi, %r8
movq %rsi, -0x60(%rsp)
rolq $0x3, %rsi
xorq %rdx, %rsi
movq %r11, %rdx
shrq $0x7, %rdx
xorq %rax, %rdx
movq 0x20(%rsp), %r10
addq -0x28(%rsp), %r10
addq %rdx, %r10
movq %r8, %rax
shrq $0x6, %rax
xorq %rsi, %rax
movq %rdi, %rdx
rolq $0x24, %rdx
movq %rdi, %rsi
rolq $0x1e, %rsi
xorq %rdx, %rsi
movq %rdi, %rdx
rolq $0x19, %rdx
xorq %rsi, %rdx
addq %rax, %r10
movq %rdi, %rax
andq %r14, %rax
movq %rdi, %r8
xorq %r14, %r8
andq %rbx, %r8
xorq %rax, %r8
addq %rdx, %r8
addq %r10, %rcx
leaq 0xbcf69(%rip), %rax # 0x6d8440
addq 0x80(%rax,%r9,8), %rcx
addq %rcx, %r12
addq %rcx, %r8
movq -0x78(%rsp), %r9
movq %r9, %rax
rolq $0x2d, %rax
movq %r9, %rcx
rolq $0x3, %rcx
xorq %rax, %rcx
movq %r9, %rax
shrq $0x6, %rax
movq 0x10(%rsp), %r9
movq %r9, %rdx
rorq %rdx
xorq %rcx, %rax
movq %r9, %rsi
rolq $0x38, %rsi
xorq %rdx, %rsi
movq %r12, %rcx
rolq $0x32, %rcx
movq %r12, %rdx
rolq $0x2e, %rdx
xorq %rcx, %rdx
movq %r12, %rcx
rolq $0x17, %rcx
xorq %rdx, %rcx
movq %r9, %rdx
shrq $0x7, %rdx
xorq %rsi, %rdx
addq -0x30(%rsp), %r11
addq %rdx, %r11
movq %r11, %rsi
movq %r8, %r11
movq %r8, %rdx
rolq $0x24, %rdx
addq %rax, %rsi
movq %rsi, %r8
movq %rsi, 0x18(%rsp)
movq %r11, %rax
rolq $0x1e, %rax
xorq %rdx, %rax
movq %r11, %rdx
rolq $0x19, %rdx
xorq %rax, %rdx
movq %r11, %rax
andq %rdi, %rax
movq %r11, %rsi
xorq %rdi, %rsi
andq %r14, %rsi
xorq %rax, %rsi
addq %rdx, %rsi
movq %rsi, %r14
movq %r13, %rax
xorq %rbp, %rax
andq %r12, %rax
xorq %rbp, %rax
addq %r8, %r15
movq -0x80(%rsp), %r8
leaq 0xbce9a(%rip), %rdx # 0x6d8440
addq 0x88(%rdx,%r8,8), %r15
addq %rax, %r15
movq %r10, %rsi
movq %r10, %rax
rolq $0x2d, %rax
movq %r10, %rdx
movq %rdi, 0x28(%rsp)
movq %r10, 0x20(%rsp)
rolq $0x3, %rdx
xorq %rax, %rdx
addq %rcx, %r15
movq 0x8(%rsp), %r10
movq %r10, %rax
rorq %rax
movq %r10, %rcx
rolq $0x38, %rcx
shrq $0x6, %rsi
xorq %rdx, %rsi
xorq %rax, %rcx
addq %r15, %rbx
addq %r15, %r14
movq %r10, %rax
shrq $0x7, %rax
xorq %rcx, %rax
addq -0x38(%rsp), %r9
addq %rax, %r9
movq %rbx, %rax
rolq $0x32, %rax
movq %rbx, %rcx
rolq $0x2e, %rcx
xorq %rax, %rcx
addq %rsi, %r9
movq %r9, 0x10(%rsp)
movq %r12, %rax
xorq %r13, %rax
andq %rbx, %rax
addq %r9, %rbp
leaq 0xbce0e(%rip), %rdx # 0x6d8440
addq 0x90(%rdx,%r8,8), %rbp
movq %r8, %r9
xorq %r13, %rax
addq %rax, %rbp
movq %r14, %rax
rolq $0x24, %rax
movq %r14, %rdx
rolq $0x1e, %rdx
xorq %rax, %rdx
movq 0x18(%rsp), %r8
movq %r8, %rax
rolq $0x2d, %rax
movq %r8, %rsi
rolq $0x3, %rsi
xorq %rax, %rsi
movq %r8, %rax
shrq $0x6, %rax
xorq %rsi, %rax
movq %r14, %rsi
andq %r11, %rsi
movq %r14, %r8
xorq %r11, %r8
andq %rdi, %r8
xorq %rsi, %r8
movq %r8, %rdi
movq %rbx, %rsi
rolq $0x17, %rsi
xorq %rcx, %rsi
movq %r14, %rcx
rolq $0x19, %rcx
xorq %rdx, %rcx
movq (%rsp), %r15
movq %r15, %rdx
rorq %rdx
movq %r15, %r8
rolq $0x38, %r8
xorq %rdx, %r8
addq %rsi, %rbp
movq %r15, %rdx
shrq $0x7, %rdx
xorq %r8, %rdx
addq %rcx, %rdi
addq -0x70(%rsp), %r10
addq %rdx, %r10
movq -0x50(%rsp), %rsi
addq %rbp, %rsi
addq %rbp, %rdi
addq %rax, %r10
movq %r10, 0x8(%rsp)
movq %rbx, %rax
xorq %r12, %rax
andq %rsi, %rax
addq %r10, %r13
movq %r9, %rbp
leaq 0xbcd4e(%rip), %rcx # 0x6d8440
addq 0x98(%rcx,%r9,8), %r13
xorq %r12, %rax
addq %rax, %r13
movq %rsi, %rax
rolq $0x32, %rax
movq %rsi, %rcx
rolq $0x2e, %rcx
xorq %rax, %rcx
movq %rsi, %rdx
movq %rsi, %r9
movq %rsi, -0x50(%rsp)
rolq $0x17, %rdx
xorq %rcx, %rdx
movq %rdi, %r10
movq %rdi, %rax
rolq $0x24, %rax
movq %rdi, %rcx
rolq $0x1e, %rcx
xorq %rax, %rcx
movq %rdi, %rax
rolq $0x19, %rax
xorq %rcx, %rax
addq %rdx, %r13
movq %rdi, %rcx
andq %r14, %rcx
movq %rdi, %r8
xorq %r14, %r8
movq %r14, 0xc0(%rsp)
movq 0x10(%rsp), %rsi
movq %rsi, %rdx
rolq $0x2d, %rdx
andq %r11, %r8
xorq %rcx, %r8
movq %rsi, %rcx
rolq $0x3, %rcx
addq %rax, %r8
xorq %rdx, %rcx
movq 0x28(%rsp), %rdi
addq %r13, %rdi
addq %r13, %r8
movq -0x8(%rsp), %r13
movq %r13, %rax
rorq %rax
movq %r13, %rdx
rolq $0x38, %rdx
shrq $0x6, %rsi
xorq %rcx, %rsi
xorq %rax, %rdx
movq %r13, %rax
shrq $0x7, %rax
xorq %rdx, %rax
movq %rdi, %rcx
rolq $0x32, %rcx
movq %rdi, %rdx
rolq $0x2e, %rdx
xorq %rcx, %rdx
movq %rdi, %rcx
movq %rdi, 0x28(%rsp)
rolq $0x17, %rcx
xorq %rdx, %rcx
addq -0x68(%rsp), %r15
addq %rax, %r15
addq %rsi, %r15
movq %r15, (%rsp)
xorq %rbx, %r9
andq %rdi, %r9
addq %r15, %r12
xorq %rbx, %r9
leaq 0xbcc56(%rip), %rdx # 0x6d8440
addq 0xa0(%rdx,%rbp,8), %r12
movq %rbp, %rdi
addq %r9, %r12
addq %rcx, %r12
movq %r8, 0x58(%rsp)
movq %r8, %rax
rolq $0x24, %rax
movq %r8, %rcx
rolq $0x1e, %rcx
xorq %rax, %rcx
movq %r8, %rax
rolq $0x19, %rax
xorq %rcx, %rax
movq %r8, %rcx
movq %r10, 0x68(%rsp)
andq %r10, %rcx
xorq %r10, %r8
andq %r14, %r8
xorq %rcx, %r8
addq %rax, %r8
addq %r12, %r11
addq %r12, %r8
movq %r8, 0x38(%rsp)
movq 0x8(%rsp), %r10
movq %r10, %rax
rolq $0x2d, %rax
movq %r10, %rdx
movq %r10, %rcx
rolq $0x3, %rdx
xorq %rax, %rdx
movq -0x20(%rsp), %rsi
movq %rsi, %rax
rorq %rax
shrq $0x6, %rcx
xorq %rdx, %rcx
movq %rsi, %rdx
movq %rsi, -0x20(%rsp)
rolq $0x38, %rdx
xorq %rax, %rdx
movq %r11, %rax
rolq $0x32, %rax
movq %r11, %r8
movq %r11, 0x40(%rsp)
rolq $0x2e, %r8
xorq %rax, %r8
movq %r8, -0x40(%rsp)
movq %rsi, %rax
shrq $0x7, %rax
xorq %rdx, %rax
movq -0x60(%rsp), %r15
addq %r15, %r13
addq %rax, %r13
movq -0x18(%rsp), %r10
movq %r10, %rax
rolq $0x38, %rax
movq %rax, 0x60(%rsp)
addq %rcx, %r13
movq %r13, -0x8(%rsp)
movq -0x10(%rsp), %r8
movq %r8, %rax
rolq $0x38, %rax
movq %rax, 0x50(%rsp)
movq -0x28(%rsp), %rax
rolq $0x38, %rax
movq %rax, -0x58(%rsp)
movq -0x30(%rsp), %rbp
movq %rbp, %rax
rolq $0x38, %rax
movq %rax, -0x48(%rsp)
leaq 0xbcb49(%rip), %rax # 0x6d8440
movq 0xa8(%rax,%rdi,8), %r14
addq %r13, %r14
addq %rbx, %r14
movq (%rsp), %r9
movq %r9, %rsi
rolq $0x2d, %rsi
movq -0x38(%rsp), %rcx
rolq $0x38, %rcx
movq %rcx, 0x48(%rsp)
movq -0x70(%rsp), %rbx
movq %rbx, %rax
rolq $0x38, %rax
movq %rax, 0x30(%rsp)
movq 0x28(%rsp), %rax
movq -0x50(%rsp), %rcx
xorq %rcx, %rax
movq %r10, -0x18(%rsp)
movq -0x68(%rsp), %rdi
movq %rdi, %rdx
rolq $0x38, %rdx
movq %rdx, 0xb8(%rsp)
andq %r11, %rax
xorq %rcx, %rax
movq 0x38(%rsp), %r11
movq %r11, %rcx
rolq $0x24, %rcx
addq %rax, %r14
movq %r8, %r12
movq %r8, -0x10(%rsp)
rolq $0x1e, %r11
xorq %rcx, %r11
movq %r9, %rax
rolq $0x3, %rax
xorq %rsi, %rax
movq %r15, %rdx
rorq %rdx
rolq $0x38, %r15
movq -0x78(%rsp), %rcx
movq %rcx, %r8
rorq %r8
movq %r9, %rsi
shrq $0x6, %rsi
xorq %rax, %rsi
rorq %r10
xorq %r10, 0x60(%rsp)
movq %r12, %rax
rorq %rax
xorq %rax, 0x50(%rsp)
movq -0x28(%rsp), %rax
rorq %rax
xorq %rax, -0x58(%rsp)
movq %rbp, -0x30(%rsp)
rorq %rbp
movq -0x48(%rsp), %r12
xorq %rbp, %r12
movq -0x38(%rsp), %rax
rorq %rax
xorq %rax, 0x48(%rsp)
rorq %rbx
movq 0x30(%rsp), %r10
xorq %rbx, %r10
movq %rdi, %rbp
movq %rdi, %rax
rorq %rax
movq 0xb8(%rsp), %r9
xorq %rax, %r9
rolq $0x38, %rcx
xorq %rdx, %r15
xorq %r8, %rcx
movq 0x38(%rsp), %rdi
movq %rdi, %rax
movq 0x58(%rsp), %rdx
andq %rdx, %rax
movq %rdi, %rbx
xorq %rdx, %rbx
andq 0x68(%rsp), %rbx
xorq %rax, %rbx
movq 0x40(%rsp), %r8
rolq $0x17, %r8
xorq -0x40(%rsp), %r8
movq %rdi, %rdx
rolq $0x19, %rdx
xorq %r11, %rdx
movq 0x20(%rsp), %rax
movq %rax, %r13
rorq %r13
movq %rax, %r11
rolq $0x38, %r11
xorq %r13, %r11
addq %r8, %r14
movq %rax, %rdi
shrq $0x7, %rdi
xorq %r11, %rdi
addq %rdx, %rbx
movq -0x78(%rsp), %rax
movq -0x20(%rsp), %rdx
addq %rax, %rdx
addq %rax, %rdi
movq %rdi, 0x30(%rsp)
shrq $0x7, %rax
xorq %rcx, %rax
movq -0x60(%rsp), %rcx
addq %rcx, %rax
movq %rax, -0x40(%rsp)
shrq $0x7, %rcx
xorq %r15, %rcx
addq %rbp, %rcx
movq %rcx, -0x78(%rsp)
shrq $0x7, %rbp
xorq %r9, %rbp
movq -0x70(%rsp), %rax
addq %rax, %rbp
movq %rbp, -0x48(%rsp)
movq %rax, %rcx
shrq $0x7, %rcx
xorq %r10, %rcx
movq -0x38(%rsp), %rax
addq %rax, %rcx
movq %rcx, -0x70(%rsp)
shrq $0x7, %rax
xorq 0x48(%rsp), %rax
movq -0x30(%rsp), %r11
addq %r11, %rax
movq %rax, -0x60(%rsp)
shrq $0x7, %r11
xorq %r12, %r11
movq -0x28(%rsp), %r9
addq %r9, %r11
movq %r11, -0x68(%rsp)
shrq $0x7, %r9
xorq -0x58(%rsp), %r9
movq -0x10(%rsp), %r11
addq %r11, %r9
movq %r9, -0x58(%rsp)
shrq $0x7, %r11
xorq 0x50(%rsp), %r11
movq -0x18(%rsp), %rax
addq %rax, %r11
movq %r11, %rbp
shrq $0x7, %rax
xorq 0x60(%rsp), %rax
movq %rdx, %r9
addq %rax, %r9
addq %rsi, %r9
movq -0x80(%rsp), %rax
leaq 0xbc908(%rip), %rcx # 0x6d8440
movq 0xb0(%rcx,%rax,8), %rsi
addq %r9, %rsi
addq -0x50(%rsp), %rsi
movq 0xc0(%rsp), %r11
addq %r14, %r11
addq %r14, %rbx
movq 0x40(%rsp), %rdi
movq %rdi, %rax
movq 0x28(%rsp), %rdx
xorq %rdx, %rax
andq %r11, %rax
xorq %rdx, %rax
addq %rax, %rsi
movq %r11, %rax
rolq $0x32, %rax
movq %r11, %r8
rolq $0x2e, %r8
xorq %rax, %r8
movq %r11, %rax
rolq $0x17, %rax
xorq %r8, %rax
movq %rbx, %r8
rolq $0x24, %r8
movq %rbx, %r14
rolq $0x1e, %r14
xorq %r8, %r14
movq %rbx, %r8
rolq $0x19, %r8
xorq %r14, %r8
addq %rax, %rsi
movq %rbx, %rax
movq 0x38(%rsp), %r13
andq %r13, %rax
movq %rbx, %r14
xorq %r13, %r14
movq -0x8(%rsp), %r12
movq %r12, %r15
rolq $0x2d, %r15
movq 0x58(%rsp), %r10
andq %r10, %r14
xorq %rax, %r14
movq %r12, %rax
rolq $0x3, %rax
addq %r8, %r14
xorq %r15, %rax
movq %r12, %r8
shrq $0x6, %r8
xorq %rax, %r8
addq 0x20(%rsp), %rbp
addq %r8, %rbp
movq -0x80(%rsp), %rax
movq 0xb8(%rcx,%rax,8), %r8
movq %rcx, %r12
addq %rbp, %r8
addq %rdx, %r8
movq 0x68(%rsp), %rcx
addq %rsi, %rcx
addq %rsi, %r14
movq %r11, %rax
movq %rdi, %rdx
xorq %rdi, %rax
andq %rcx, %rax
xorq %rdi, %rax
addq %rax, %r8
movq %rcx, %rax
rolq $0x32, %rax
movq %rcx, %rsi
rolq $0x2e, %rsi
xorq %rax, %rsi
movq %rcx, %rax
rolq $0x17, %rax
xorq %rsi, %rax
movq %r14, %rsi
rolq $0x24, %rsi
movq %r14, %rdi
rolq $0x1e, %rdi
xorq %rsi, %rdi
movq %r14, %rsi
rolq $0x19, %rsi
xorq %rdi, %rsi
addq %rax, %r8
movq %r14, %rax
andq %rbx, %rax
movq %r14, %rdi
xorq %rbx, %rdi
movq %r9, -0x20(%rsp)
movq %r9, %r15
rolq $0x2d, %r15
andq %r13, %rdi
xorq %rax, %rdi
movq %r9, %rax
rolq $0x3, %rax
addq %rsi, %rdi
xorq %r15, %rax
movq %r9, %rsi
shrq $0x6, %rsi
xorq %rax, %rsi
movq -0x58(%rsp), %r9
addq 0x18(%rsp), %r9
addq %rsi, %r9
movq %r9, -0x58(%rsp)
movq -0x80(%rsp), %rax
movq 0xc0(%r12,%rax,8), %rsi
addq %r9, %rsi
addq %rdx, %rsi
addq %r8, %r10
addq %r8, %rdi
movq %rcx, %rax
xorq %r11, %rax
andq %r10, %rax
xorq %r11, %rax
addq %rax, %rsi
movq %r10, %rax
rolq $0x32, %rax
movq %r10, %r8
rolq $0x2e, %r8
xorq %rax, %r8
movq %r10, %rax
movq %r10, %r9
rolq $0x17, %rax
xorq %r8, %rax
movq %rdi, %r8
rolq $0x24, %r8
movq %rdi, %r15
rolq $0x1e, %r15
xorq %r8, %r15
movq %rdi, %r12
rolq $0x19, %r12
xorq %r15, %r12
addq %rax, %rsi
movq %rdi, %rax
andq %r14, %rax
movq %rdi, %r8
xorq %r14, %r8
movq %rbp, -0x50(%rsp)
movq %rbp, %r15
rolq $0x2d, %r15
andq %rbx, %r8
xorq %rax, %r8
movq %rbp, %rax
rolq $0x3, %rax
addq %r12, %r8
xorq %r15, %rax
movq %rbp, %r15
shrq $0x6, %r15
xorq %rax, %r15
movq -0x68(%rsp), %r10
addq 0x10(%rsp), %r10
addq %r15, %r10
movq %r10, -0x68(%rsp)
movq -0x80(%rsp), %rax
leaq 0xbc6e7(%rip), %rdx # 0x6d8440
movq 0xc8(%rdx,%rax,8), %r12
addq %r10, %r12
addq %r11, %r12
addq %rsi, %r13
addq %rsi, %r8
movq %r9, %rax
xorq %rcx, %rax
andq %r13, %rax
xorq %rcx, %rax
addq %rax, %r12
movq %r13, %rax
rolq $0x32, %rax
movq %r13, %rsi
rolq $0x2e, %rsi
xorq %rax, %rsi
movq %r13, %rax
movq %r13, %rdx
rolq $0x17, %rax
xorq %rsi, %rax
movq %r8, %rsi
rolq $0x24, %rsi
movq %r8, %r15
rolq $0x1e, %r15
xorq %rsi, %r15
movq %r8, %rsi
rolq $0x19, %rsi
xorq %r15, %rsi
addq %rax, %r12
movq %r8, %rax
andq %rdi, %rax
movq %r8, %r15
xorq %rdi, %r15
movq -0x58(%rsp), %r10
movq %r10, %r13
rolq $0x2d, %r13
andq %r14, %r15
xorq %rax, %r15
movq %r10, %rax
rolq $0x3, %rax
addq %rsi, %r15
xorq %r13, %rax
movq %r10, %rsi
shrq $0x6, %rsi
xorq %rax, %rsi
movq -0x60(%rsp), %r11
addq 0x8(%rsp), %r11
addq %rsi, %r11
movq %r11, -0x60(%rsp)
movq -0x80(%rsp), %rax
leaq 0xbc635(%rip), %r10 # 0x6d8440
movq 0xd0(%r10,%rax,8), %rsi
addq %r11, %rsi
addq %rcx, %rsi
addq %r12, %rbx
addq %r12, %r15
movq %rdx, %rax
xorq %r9, %rax
andq %rbx, %rax
xorq %r9, %rax
addq %rax, %rsi
movq %rbx, %rax
rolq $0x32, %rax
movq %rbx, %r12
rolq $0x2e, %r12
xorq %rax, %r12
movq %rbx, %rax
rolq $0x17, %rax
xorq %r12, %rax
movq %r15, %r12
rolq $0x24, %r12
movq %r15, %r13
rolq $0x1e, %r13
xorq %r12, %r13
movq %r15, %r12
rolq $0x19, %r12
xorq %r13, %r12
addq %rax, %rsi
movq %r15, %rax
andq %r8, %rax
movq %r15, %rbp
xorq %r8, %rbp
movq -0x68(%rsp), %rcx
movq %rcx, %r13
rolq $0x2d, %r13
andq %rdi, %rbp
xorq %rax, %rbp
movq %rcx, %rax
rolq $0x3, %rax
addq %r12, %rbp
xorq %r13, %rax
movq %rcx, %r12
shrq $0x6, %r12
xorq %rax, %r12
movq -0x70(%rsp), %rcx
addq (%rsp), %rcx
addq %r12, %rcx
movq %rcx, -0x70(%rsp)
movq -0x80(%rsp), %rax
movq 0xd8(%r10,%rax,8), %r12
movq %r10, %r11
addq %rcx, %r12
addq %r9, %r12
addq %rsi, %r14
addq %rsi, %rbp
movq %rbx, %rax
xorq %rdx, %rax
andq %r14, %rax
xorq %rdx, %rax
addq %rax, %r12
movq %r14, %rax
rolq $0x32, %rax
movq %r14, %rsi
rolq $0x2e, %rsi
xorq %rax, %rsi
movq %r14, %rax
rolq $0x17, %rax
xorq %rsi, %rax
movq %rbp, %rsi
rolq $0x24, %rsi
movq %rbp, %r13
rolq $0x1e, %r13
xorq %rsi, %r13
movq %rbp, %rsi
rolq $0x19, %rsi
xorq %r13, %rsi
addq %rax, %r12
movq %rbp, %rax
andq %r15, %rax
movq %rbp, %r13
xorq %r15, %r13
movq -0x60(%rsp), %rcx
movq %rcx, %r10
rolq $0x2d, %r10
andq %r8, %r13
xorq %rax, %r13
movq %rcx, %rax
rolq $0x3, %rax
addq %rsi, %r13
xorq %r10, %rax
movq %rcx, %rsi
shrq $0x6, %rsi
xorq %rax, %rsi
movq -0x48(%rsp), %rcx
addq -0x8(%rsp), %rcx
addq %rsi, %rcx
movq -0x80(%rsp), %rax
movq 0xe0(%r11,%rax,8), %rsi
addq %rcx, %rsi
movq %rcx, %r9
addq %rdx, %rsi
addq %r12, %rdi
addq %r12, %r13
movq %r14, %rax
xorq %rbx, %rax
andq %rdi, %rax
xorq %rbx, %rax
addq %rax, %rsi
movq %rdi, %rax
rolq $0x32, %rax
movq %rdi, %r10
rolq $0x2e, %r10
xorq %rax, %r10
movq %rdi, %rax
rolq $0x17, %rax
xorq %r10, %rax
movq %r13, %r10
rolq $0x24, %r10
movq %r13, %r12
rolq $0x1e, %r12
xorq %r10, %r12
movq %r13, %r10
rolq $0x19, %r10
xorq %r12, %r10
addq %rax, %rsi
movq %r13, %rax
andq %rbp, %rax
movq %r13, %r12
xorq %rbp, %r12
movq -0x70(%rsp), %rdx
movq %rdx, %rcx
rolq $0x2d, %rcx
andq %r15, %r12
xorq %rax, %r12
movq %rdx, %rax
rolq $0x3, %rax
addq %r10, %r12
xorq %rcx, %rax
movq %rdx, %rcx
shrq $0x6, %rcx
xorq %rax, %rcx
movq -0x78(%rsp), %rdx
addq -0x20(%rsp), %rdx
addq %rcx, %rdx
movq %rdx, -0x78(%rsp)
movq -0x80(%rsp), %rax
movq 0xe8(%r11,%rax,8), %rax
addq %rdx, %rax
addq %rbx, %rax
addq %rsi, %r8
addq %rsi, %r12
movq %rdi, %rcx
xorq %r14, %rcx
andq %r8, %rcx
xorq %r14, %rcx
addq %rcx, %rax
movq %r8, %rcx
rolq $0x32, %rcx
movq %r8, %rsi
rolq $0x2e, %rsi
xorq %rcx, %rsi
movq %r8, %rcx
rolq $0x17, %rcx
xorq %rsi, %rcx
movq %r12, %rsi
rolq $0x24, %rsi
movq %r12, %r10
rolq $0x1e, %r10
xorq %rsi, %r10
movq %r12, %rsi
rolq $0x19, %rsi
xorq %r10, %rsi
addq %rcx, %rax
movq %r12, %rcx
andq %r13, %rcx
movq %r12, %rbx
xorq %r13, %rbx
movq %r9, -0x48(%rsp)
movq %r9, %r10
rolq $0x2d, %r10
andq %rbp, %rbx
xorq %rcx, %rbx
movq %r9, %rcx
rolq $0x3, %rcx
addq %rsi, %rbx
xorq %r10, %rcx
movq %r9, %rsi
shrq $0x6, %rsi
xorq %rcx, %rsi
movq -0x40(%rsp), %rdx
addq -0x50(%rsp), %rdx
addq %rsi, %rdx
movq %rdx, -0x40(%rsp)
movq -0x80(%rsp), %rcx
movq 0xf0(%r11,%rcx,8), %rsi
addq %rdx, %rsi
addq %r14, %rsi
addq %rax, %r15
addq %rax, %rbx
movq %r8, %rax
xorq %rdi, %rax
andq %r15, %rax
xorq %rdi, %rax
addq %rax, %rsi
movq %r15, %rax
rolq $0x32, %rax
movq %r15, %rcx
rolq $0x2e, %rcx
xorq %rax, %rcx
movq %r15, %rax
rolq $0x17, %rax
xorq %rcx, %rax
movq %rbx, %rcx
rolq $0x24, %rcx
movq %rbx, %r10
rolq $0x1e, %r10
xorq %rcx, %r10
movq %rbx, %rcx
rolq $0x19, %rcx
xorq %r10, %rcx
addq %rax, %rsi
movq %rbx, %rax
andq %r12, %rax
movq %rbx, %r14
xorq %r12, %r14
movq -0x78(%rsp), %rdx
movq %rdx, %r10
rolq $0x2d, %r10
andq %r13, %r14
xorq %rax, %r14
movq %rdx, %rax
rolq $0x3, %rax
addq %rcx, %r14
xorq %r10, %rax
movq -0x80(%rsp), %r10
movq %rdx, %rcx
shrq $0x6, %rcx
xorq %rax, %rcx
movq 0x30(%rsp), %r9
movq -0x58(%rsp), %rdx
addq %rdx, %r9
addq %rcx, %r9
movq 0xf8(%r11,%r10,8), %rax
addq %r9, %rax
addq %rdi, %rax
addq %rsi, %rbp
addq %rsi, %r14
movq %r15, %rcx
xorq %r8, %rcx
andq %rbp, %rcx
xorq %r8, %rcx
addq %rcx, %rax
movq %rbp, %rcx
rolq $0x32, %rcx
movq %rbp, %rsi
rolq $0x2e, %rsi
xorq %rcx, %rsi
movq %rbp, %rcx
rolq $0x17, %rcx
xorq %rsi, %rcx
movq %r14, %rsi
rolq $0x24, %rsi
movq %r14, %rdi
rolq $0x1e, %rdi
xorq %rsi, %rdi
movq %r14, %rsi
rolq $0x19, %rsi
xorq %rdi, %rsi
addq %rcx, %rax
movq %r14, %rcx
andq %rbx, %rcx
movq %r14, %rdi
xorq %rbx, %rdi
andq %r12, %rdi
xorq %rcx, %rdi
addq %rsi, %rdi
addq %rax, %r13
addq %rax, %rdi
movq -0x70(%rsp), %rax
movq %rax, -0x38(%rsp)
movq -0x60(%rsp), %rax
movq %rax, -0x30(%rsp)
movq -0x48(%rsp), %rax
movq %rax, -0x70(%rsp)
movq -0x68(%rsp), %rax
movq %rax, -0x28(%rsp)
movq %rdx, -0x10(%rsp)
movq -0x50(%rsp), %rax
movq %rax, -0x18(%rsp)
movq -0x78(%rsp), %rax
movq %rax, -0x68(%rsp)
addq $0x10, %r10
movq %r9, -0x78(%rsp)
movq -0x40(%rsp), %rsi
movq %r10, %r9
cmpl $0x40, %r10d
jb 0x61b417
addq 0x70(%rsp), %rdi
movq 0xb0(%rsp), %rax
movq %rdi, (%rax)
addq 0x78(%rsp), %r14
movq %r14, 0x8(%rax)
addq 0x80(%rsp), %rbx
movq %rbx, 0x10(%rax)
addq 0x88(%rsp), %r12
movq %r12, 0x18(%rax)
addq 0x90(%rsp), %r13
movq %r13, 0x20(%rax)
addq 0x98(%rsp), %rbp
movq %rbp, 0x28(%rax)
addq 0xa8(%rsp), %r15
movq %r15, 0x30(%rax)
addq 0xa0(%rsp), %r8
movq %r8, 0x38(%rax)
addq $0xc8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/sha512.c
|
rhash_swap_copy_u64_to_str
|
void rhash_swap_copy_u64_to_str(void* to, const void* from, size_t length)
{
/* if all pointers and length are 64-bits aligned */
if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | length ) & 7) ) {
/* copy aligned memory block as 64-bit integers */
const uint64_t* src = (const uint64_t*)from;
const uint64_t* end = (const uint64_t*)((const char*)src + length);
uint64_t* dst = (uint64_t*)to;
while (src < end) *(dst++) = bswap_64( *(src++) );
} else {
size_t index;
char* dst = (char*)to;
for (index = 0; index < length; index++) *(dst++) = ((char*)from)[index ^ 7];
}
}
|
movl %esi, %eax
orl %edi, %eax
movl %edx, %ecx
orl %eax, %ecx
testb $0x7, %cl
je 0x61c466
testq %rdx, %rdx
je 0x61c484
xorl %eax, %eax
movq %rax, %rcx
xorq $0x7, %rcx
movb (%rsi,%rcx), %cl
movb %cl, (%rdi,%rax)
incq %rax
cmpq %rax, %rdx
jne 0x61c44f
jmp 0x61c484
testq %rdx, %rdx
jle 0x61c484
addq %rsi, %rdx
movq (%rsi), %rax
bswapq %rax
addq $0x8, %rsi
movq %rax, (%rdi)
addq $0x8, %rdi
cmpq %rdx, %rsi
jb 0x61c46e
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibrhash/librhash/byte_order.c
|
uv_timer_stop
|
int uv_timer_stop(uv_timer_t* handle) {
if (!uv__is_active(handle))
return 0;
heap_remove(timer_heap(handle->loop),
(struct heap_node*) &handle->heap_node,
timer_less_than);
uv__handle_stop(handle);
return 0;
}
|
testb $0x4, 0x58(%rdi)
je 0x61c7cb
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %rbx
movq 0x8(%rdi), %rcx
movl 0x210(%rcx), %edx
testl %edx, %edx
je 0x61c7ac
leaq 0x208(%rcx), %r14
leaq 0x68(%rbx), %rax
movq %r14, %r8
cmpl $0x1, %edx
je 0x61c6aa
xorl %esi, %esi
movl %edx, %r9d
xorl %edi, %edi
movl %edx, %r8d
movl %r9d, %r10d
andl $0x1, %r10d
leal (%r10,%rdi,2), %edi
shrl %r8d
decl %esi
cmpl $0x3, %r9d
movl %r8d, %r9d
ja 0x61c676
movq %r14, %r9
leal (,%rdi,8), %r8d
andl $0x8, %r8d
addq (%r9), %r8
shrl %edi
movq %r8, %r9
incl %esi
jne 0x61c692
decl %edx
movl %edx, 0x210(%rcx)
movq (%r8), %r15
movq $0x0, (%r8)
cmpq %rax, %r15
je 0x61c7a0
movq 0x68(%rbx), %rdx
movq %rdx, (%r15)
movq 0x70(%rbx), %rcx
movq %rcx, 0x8(%r15)
movq 0x78(%rbx), %rsi
movq %rsi, 0x10(%r15)
testq %rdx, %rdx
je 0x61c6e5
movq %r15, 0x10(%rdx)
testq %rcx, %rcx
je 0x61c6ee
movq %r15, 0x10(%rcx)
movq 0x78(%rbx), %rcx
movq %r14, %rdx
testq %rcx, %rcx
je 0x61c706
xorl %edx, %edx
cmpq %rax, (%rcx)
setne %dl
leaq (%rcx,%rdx,8), %rdx
movq %r15, (%rdx)
movq (%r15), %rax
movq %r15, %rdx
testq %rax, %rax
je 0x61c738
movq 0x18(%rax), %rcx
movq 0x18(%r15), %rdx
cmpq %rdx, %rcx
jae 0x61c726
movq %rax, %rdx
jmp 0x61c738
movq %r15, %rdx
ja 0x61c738
movq 0x28(%rax), %rcx
movq %r15, %rdx
cmpq 0x28(%r15), %rcx
jb 0x61c721
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x61c75f
movq 0x18(%rax), %rcx
movq 0x18(%rdx), %rsi
cmpq %rsi, %rcx
jae 0x61c753
movq %rax, %rdx
jmp 0x61c75f
ja 0x61c75f
movq 0x28(%rax), %rcx
cmpq 0x28(%rdx), %rcx
jb 0x61c74e
cmpq %r15, %rdx
je 0x61c795
movq %r14, %rdi
movq %r15, %rsi
callq 0x61c8c1
jmp 0x61c709
movq 0x18(%r15), %rax
movq 0x18(%rsi), %rcx
cmpq %rcx, %rax
jb 0x61c78a
ja 0x61c7ac
movq 0x28(%r15), %rax
cmpq 0x28(%rsi), %rax
jae 0x61c7ac
movq %r14, %rdi
movq %r15, %rdx
callq 0x61c8c1
movq 0x10(%r15), %rsi
testq %rsi, %rsi
jne 0x61c771
jmp 0x61c7ac
cmpq %rax, (%r14)
jne 0x61c7ac
movq $0x0, (%r14)
movl 0x58(%rbx), %eax
testb $0x4, %al
je 0x61c7c6
movl %eax, %ecx
andl $-0x5, %ecx
movl %ecx, 0x58(%rbx)
testb $0x8, %al
je 0x61c7c6
movq 0x8(%rbx), %rax
decl 0x8(%rax)
popq %rbx
popq %r14
popq %r15
xorl %eax, %eax
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/timer.c
|
uv__next_timeout
|
int uv__next_timeout(const uv_loop_t* loop) {
const struct heap_node* heap_node;
const uv_timer_t* handle;
uint64_t diff;
heap_node = heap_min(timer_heap(loop));
if (heap_node == NULL)
return -1; /* block indefinitely */
handle = container_of(heap_node, uv_timer_t, heap_node);
if (handle->timeout <= loop->time)
return 0;
diff = handle->timeout - loop->time;
if (diff > INT_MAX)
diff = INT_MAX;
return (int) diff;
}
|
movq 0x208(%rdi), %rax
testq %rax, %rax
je 0x61c868
movq 0x18(%rax), %rcx
xorl %eax, %eax
subq 0x220(%rdi), %rcx
jbe 0x61c867
movl $0x7fffffff, %eax # imm = 0x7FFFFFFF
cmpq %rax, %rcx
cmovbq %rcx, %rax
retq
movl $0xffffffff, %eax # imm = 0xFFFFFFFF
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/timer.c
|
uv__run_timers
|
void uv__run_timers(uv_loop_t* loop) {
struct heap_node* heap_node;
uv_timer_t* handle;
for (;;) {
heap_node = heap_min(timer_heap(loop));
if (heap_node == NULL)
break;
handle = container_of(heap_node, uv_timer_t, heap_node);
if (handle->timeout > loop->time)
break;
uv_timer_stop(handle);
uv_timer_again(handle);
handle->timer_cb(handle);
}
}
|
pushq %r15
pushq %r14
pushq %rbx
movq 0x208(%rdi), %r15
testq %r15, %r15
je 0x61c8b6
movq %rdi, %rbx
movq 0x18(%r15), %rax
cmpq 0x220(%rbx), %rax
ja 0x61c8b6
leaq -0x68(%r15), %r14
movq %r14, %rdi
callq 0x61c635
movq %r14, %rdi
callq 0x61c7ce
movq %r14, %rdi
callq *-0x8(%r15)
movq 0x208(%rbx), %r15
testq %r15, %r15
jne 0x61c882
popq %rbx
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/timer.c
|
uv__reallocf
|
void* uv__reallocf(void* ptr, size_t size) {
void* newptr;
newptr = uv__realloc(ptr, size);
if (newptr == NULL)
if (size > 0)
uv__free(ptr);
return newptr;
}
|
pushq %rbp
pushq %r14
pushq %rbx
movq %rdi, %rbx
testq %rsi, %rsi
je 0x61ca59
movq %rbx, %rdi
callq *0x244d34(%rip) # 0x861788
testq %rax, %rax
jne 0x61ca71
callq 0x415e0
movq %rax, %r14
movl (%rax), %ebp
movq %rbx, %rdi
callq *0x244d2c(%rip) # 0x861798
movl %ebp, (%r14)
xorl %eax, %eax
popq %rbx
popq %r14
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_replace_allocator
|
int uv_replace_allocator(uv_malloc_func malloc_func,
uv_realloc_func realloc_func,
uv_calloc_func calloc_func,
uv_free_func free_func) {
if (malloc_func == NULL || realloc_func == NULL ||
calloc_func == NULL || free_func == NULL) {
return UV_EINVAL;
}
uv__allocator.local_malloc = malloc_func;
uv__allocator.local_realloc = realloc_func;
uv__allocator.local_calloc = calloc_func;
uv__allocator.local_free = free_func;
return 0;
}
|
movq %rsi, %xmm0
movq %rdi, %xmm1
punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0]
movq %rcx, %xmm0
movq %rdx, %xmm2
punpcklqdq %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
pxor %xmm0, %xmm0
pcmpeqd %xmm0, %xmm2
pcmpeqd %xmm0, %xmm1
movdqa %xmm1, %xmm0
shufps $0xdd, %xmm2, %xmm0 # xmm0 = xmm0[1,3],xmm2[1,3]
shufps $0x88, %xmm2, %xmm1 # xmm1 = xmm1[0,2],xmm2[0,2]
andps %xmm0, %xmm1
movmskps %xmm1, %r8d
movl $0xffffffea, %eax # imm = 0xFFFFFFEA
testl %r8d, %r8d
je 0x61cabc
retq
movq %rdi, 0x244cbd(%rip) # 0x861780
movq %rsi, 0x244cbe(%rip) # 0x861788
movq %rdx, 0x244cbf(%rip) # 0x861790
movq %rcx, 0x244cc0(%rip) # 0x861798
xorl %eax, %eax
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_strerror
|
const char* uv_strerror(int err) {
switch (err) {
UV_ERRNO_MAP(UV_STRERROR_GEN)
}
return uv__unknown_err_code(err);
}
|
cmpl $-0x7e, %edi
jle 0x61d959
leal 0x7d(%rdi), %eax
cmpl $0x7c, %eax
ja 0x61d9d4
leaq 0xbb444(%rip), %rcx # 0x6d8d8c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
leaq 0xbbc3b(%rip), %rax # 0x6d9593
retq
cmpl $0xfffff043, %edi # imm = 0xFFFFF043
jle 0x61d984
leal 0xbc6(%rdi), %eax
cmpl $0xe, %eax
ja 0x61d9c4
leaq 0xbb3dd(%rip), %rcx # 0x6d8d50
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
leaq 0xbbb68(%rip), %rax # 0x6d94eb
retq
cmpl $0xfffff001, %edi # imm = 0xFFFFF001
je 0x61d9bc
cmpl $0xfffff002, %edi # imm = 0xFFFFF002
je 0x61d9b4
cmpl $0xfffff010, %edi # imm = 0xFFFFF010
jne 0x61d9d4
leaq 0xbbc03(%rip), %rax # 0x6d95a6
retq
leaq 0xbbb83(%rip), %rax # 0x6d952e
retq
leaq 0xbba46(%rip), %rax # 0x6d93f9
retq
leaq 0x463d7(%rip), %rax # 0x663d92
retq
leaq 0xc0f43(%rip), %rax # 0x6de906
retq
cmpl $0xfffff044, %edi # imm = 0xFFFFF044
jne 0x61d9d4
leaq 0xbc055(%rip), %rax # 0x6d9a28
retq
jmp 0x61d254
leaq 0xbbd14(%rip), %rax # 0x6d96f4
retq
leaq 0xbbfda(%rip), %rax # 0x6d99c2
retq
leaq 0xbbda7(%rip), %rax # 0x6d9797
retq
leaq 0xbbf98(%rip), %rax # 0x6d9990
retq
leaq 0x86f58(%rip), %rax # 0x6a4958
retq
leaq 0xbbfd4(%rip), %rax # 0x6d99dc
retq
leaq 0xbbef9(%rip), %rax # 0x6d9909
retq
leaq 0xbbc71(%rip), %rax # 0x6d9689
retq
leaq 0xbc02a(%rip), %rax # 0x6d9a4a
retq
leaq 0xbbd8d(%rip), %rax # 0x6d97b5
retq
leaq 0xbbbfa(%rip), %rax # 0x6d962a
retq
leaq 0xac5eb(%rip), %rax # 0x6ca023
retq
leaq 0xbbdd5(%rip), %rax # 0x6d9815
retq
leaq 0xbbc4b(%rip), %rax # 0x6d9693
retq
leaq 0xbbeca(%rip), %rax # 0x6d991a
retq
leaq 0xbbdcd(%rip), %rax # 0x6d9825
retq
leaq 0xbbcc9(%rip), %rax # 0x6d9729
retq
leaq 0xbbe5c(%rip), %rax # 0x6d98c4
retq
leaq 0xbbeee(%rip), %rax # 0x6d995e
retq
leaq 0xbbe40(%rip), %rax # 0x6d98b8
retq
leaq 0xbbac8(%rip), %rax # 0x6d9548
retq
leaq 0xbbce6(%rip), %rax # 0x6d976e
retq
leaq 0xbbbbe(%rip), %rax # 0x6d964e
retq
leaq 0xbbf60(%rip), %rax # 0x6d99f8
retq
leaq 0xbbe90(%rip), %rax # 0x6d9930
retq
leaq 0xbbd55(%rip), %rax # 0x6d97fd
retq
leaq 0xbbb31(%rip), %rax # 0x6d95e1
retq
leaq 0xbbc61(%rip), %rax # 0x6d9719
retq
leaq 0xbbcbd(%rip), %rax # 0x6d977d
retq
leaq 0xbbe22(%rip), %rax # 0x6d98ea
retq
leaq 0xbbb8d(%rip), %rax # 0x6d965d
retq
leaq 0xbb90b(%rip), %rax # 0x6d93e3
retq
leaq 0xbbae0(%rip), %rax # 0x6d95c0
retq
leaq 0xbbc58(%rip), %rax # 0x6d9740
retq
leaq 0xbbefb(%rip), %rax # 0x6d99eb
retq
leaq 0xbbe83(%rip), %rax # 0x6d997b
retq
leaq 0xbbc54(%rip), %rax # 0x6d9754
retq
leaq 0xbbba7(%rip), %rax # 0x6d96af
retq
leaq 0xbb8bc(%rip), %rax # 0x6d93cc
retq
leaq 0x9796e(%rip), %rax # 0x6b5486
retq
leaq 0xbbd19(%rip), %rax # 0x6d9839
retq
leaq 0xbbacc(%rip), %rax # 0x6d95f4
retq
leaq 0xbbb41(%rip), %rax # 0x6d9671
retq
leaq 0xbbb98(%rip), %rax # 0x6d96d0
retq
leaq 0xbbca4(%rip), %rax # 0x6d97e4
retq
leaq 0xbbd8b(%rip), %rax # 0x6d98d3
retq
leaq 0xbba2b(%rip), %rax # 0x6d957b
retq
leaq 0xbbbb0(%rip), %rax # 0x6d9708
retq
leaq 0xbb843(%rip), %rax # 0x6d93a3
retq
leaq 0xbbcf0(%rip), %rax # 0x6d9858
retq
leaq 0xbb8a6(%rip), %rax # 0x6d9416
retq
leaq 0xbba95(%rip), %rax # 0x6d960d
retq
leaq 0xbbcfa(%rip), %rax # 0x6d987a
retq
leaq 0xbbc44(%rip), %rax # 0x6d97cc
retq
leaq 0xbbe79(%rip), %rax # 0x6d9a09
retq
leaq 0xbbd08(%rip), %rax # 0x6d98a0
retq
leaq 0xbb81a(%rip), %rax # 0x6d93ba
retq
leaq 0xbb9bf(%rip), %rax # 0x6d9567
retq
leaq 0xbbdf2(%rip), %rax # 0x6d99a2
retq
leaq 0xb922a(%rip), %rax # 0x6d6de2
retq
leaq 0xbbdab(%rip), %rax # 0x6d996b
retq
leaq 0xbb8ac(%rip), %rax # 0x6d9474
retq
leaq 0xbb8df(%rip), %rax # 0x6d94af
retq
leaq 0xbb871(%rip), %rax # 0x6d9449
retq
leaq 0x9c8dc(%rip), %rax # 0x6ba4bc
retq
leaq 0xbb8d2(%rip), %rax # 0x6d94ba
retq
leaq 0xbb8a7(%rip), %rax # 0x6d9497
retq
leaq 0xbb88d(%rip), %rax # 0x6d9485
retq
leaq 0xbb837(%rip), %rax # 0x6d9437
retq
leaq 0xbb900(%rip), %rax # 0x6d9508
retq
leaq 0xbb84c(%rip), %rax # 0x6d945c
retq
leaq 0xbb8ba(%rip), %rax # 0x6d94d2
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_tcp_bind
|
int uv_tcp_bind(uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int flags) {
unsigned int addrlen;
if (handle->type != UV_TCP)
return UV_EINVAL;
if (uv__is_closing(handle)) {
return UV_EINVAL;
}
if (addr->sa_family == AF_INET)
addrlen = sizeof(struct sockaddr_in);
else if (addr->sa_family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
else
return UV_EINVAL;
return uv__tcp_bind(handle, addr, addrlen, flags);
}
|
cmpl $0xc, 0x10(%rdi)
jne 0x61dd50
testb $0x3, 0x58(%rdi)
je 0x61dd56
movl $0xffffffea, %eax # imm = 0xFFFFFFEA
retq
movl %edx, %ecx
movzwl (%rsi), %eax
cmpl $0x2, %eax
je 0x61dd6f
cmpl $0xa, %eax
jne 0x61dd50
movl $0x1c, %edx
jmp 0x62850f
movl $0x10, %edx
jmp 0x62850f
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_udp_connect
|
int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr) {
unsigned int addrlen;
if (handle->type != UV_UDP)
return UV_EINVAL;
/* Disconnect the handle */
if (addr == NULL) {
if (!(handle->flags & UV_HANDLE_UDP_CONNECTED))
return UV_ENOTCONN;
return uv__udp_disconnect(handle);
}
if (addr->sa_family == AF_INET)
addrlen = sizeof(struct sockaddr_in);
else if (addr->sa_family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
else
return UV_EINVAL;
if (handle->flags & UV_HANDLE_UDP_CONNECTED)
return UV_EISCONN;
return uv__udp_connect(handle, addr, addrlen);
}
|
movl $0xffffffea, %eax # imm = 0xFFFFFFEA
cmpl $0xf, 0x10(%rdi)
jne 0x61de85
testq %rsi, %rsi
je 0x61de61
movzwl (%rsi), %ecx
cmpl $0x2, %ecx
je 0x61de71
cmpl $0xa, %ecx
jne 0x61de85
movl $0x1c, %edx
jmp 0x61de76
movl $0xffffff95, %eax # imm = 0xFFFFFF95
testb $0x2, 0x5b(%rdi)
je 0x61de85
jmp 0x629ca5
movl $0x10, %edx
movl $0xffffff96, %eax # imm = 0xFFFFFF96
testb $0x2, 0x5b(%rdi)
je 0x629bbd
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_walk
|
void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
QUEUE queue;
QUEUE* q;
uv_handle_t* h;
QUEUE_MOVE(&loop->handle_queue, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_handle_t, handle_queue);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&loop->handle_queue, q);
if (h->flags & UV_HANDLE_INTERNAL) continue;
walk_cb(h, arg);
}
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10, %rsp
leaq 0x10(%rdi), %r12
movq 0x10(%rdi), %rax
cmpq %rax, %r12
je 0x61e068
movq %rdi, %r15
movq 0x18(%rdi), %rcx
movq %rsp, %r13
movq %rcx, 0x8(%r13)
movq %r13, (%rcx)
movq %rax, (%r13)
movq 0x8(%rax), %rcx
movq %rcx, 0x18(%rdi)
movq %r12, (%rcx)
movq %r13, 0x8(%rax)
movq (%r13), %rdi
cmpq %rdi, %r13
je 0x61e068
movq %rdx, %rbx
movq %rsi, %r14
movq (%rdi), %rax
movq 0x8(%rdi), %rcx
movq %rax, (%rcx)
movq 0x8(%rdi), %rcx
movq %rcx, 0x8(%rax)
movq %r12, (%rdi)
movq 0x18(%r15), %rax
movq %rax, 0x8(%rdi)
movq %rdi, (%rax)
movq %rdi, 0x18(%r15)
testb $0x10, 0x38(%rdi)
jne 0x61e05f
addq $-0x20, %rdi
movq %rbx, %rsi
callq *%r14
movq (%rsp), %rdi
cmpq %rdi, %r13
jne 0x61e02b
addq $0x10, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv__fs_scandir_cleanup
|
void uv__fs_scandir_cleanup(uv_fs_t* req) {
uv__dirent_t** dents;
unsigned int* nbufs = uv__get_nbufs(req);
dents = req->ptr;
if (*nbufs > 0 && *nbufs != (unsigned int) req->result)
(*nbufs)--;
for (; *nbufs < (unsigned int) req->result; (*nbufs)++)
uv__fs_scandir_free(dents[*nbufs]);
uv__fs_scandir_free(req->ptr);
req->ptr = NULL;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
movl 0x124(%rdi), %eax
movl 0x58(%rdi), %ecx
testl %eax, %eax
je 0x61e2a1
cmpl %ecx, %eax
je 0x61e2a3
decl %eax
movl %eax, 0x124(%rbx)
jmp 0x61e2a3
xorl %eax, %eax
movq 0x60(%rbx), %r14
cmpl %ecx, %eax
jae 0x61e2cd
movl %eax, %eax
movq (%r14,%rax,8), %rdi
callq 0x41e70
movl 0x124(%rbx), %eax
incl %eax
movl %eax, 0x124(%rbx)
cmpl 0x58(%rbx), %eax
jb 0x61e2ab
movq 0x60(%rbx), %r14
movq %r14, %rdi
callq 0x41e70
movq $0x0, 0x60(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_fs_scandir_next
|
int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) {
uv__dirent_t** dents;
uv__dirent_t* dent;
unsigned int* nbufs;
/* Check to see if req passed */
if (req->result < 0)
return req->result;
/* Ptr will be null if req was canceled or no files found */
if (!req->ptr)
return UV_EOF;
nbufs = uv__get_nbufs(req);
assert(nbufs);
dents = req->ptr;
/* Free previous entity */
if (*nbufs > 0)
uv__fs_scandir_free(dents[*nbufs - 1]);
/* End was already reached */
if (*nbufs == (unsigned int) req->result) {
uv__fs_scandir_free(dents);
req->ptr = NULL;
return UV_EOF;
}
dent = dents[(*nbufs)++];
ent->name = dent->d_name;
ent->type = uv__fs_get_dirent_type(dent);
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq 0x58(%rdi), %rax
testq %rax, %rax
js 0x61e32c
movq %rdi, %r14
movq 0x60(%rdi), %r15
movl $0xfffff001, %ebp # imm = 0xFFFFF001
testq %r15, %r15
je 0x61e369
movq %rsi, %rbx
movl 0x124(%r14), %ecx
testl %ecx, %ecx
je 0x61e330
decl %ecx
movq (%r15,%rcx,8), %rdi
callq 0x41e70
movl 0x124(%r14), %ecx
movq 0x58(%r14), %rax
jmp 0x61e332
movl %eax, %ebp
jmp 0x61e369
xorl %ecx, %ecx
cmpl %eax, %ecx
jne 0x61e348
movq %r15, %rdi
callq 0x41e70
movq $0x0, 0x60(%r14)
jmp 0x61e369
leal 0x1(%rcx), %eax
movl %eax, 0x124(%r14)
movl %ecx, %eax
movq (%r15,%rax,8), %rdi
leaq 0x13(%rdi), %rax
movq %rax, (%rbx)
callq 0x61e376
movl %eax, 0x8(%rbx)
xorl %ebp, %ebp
movl %ebp, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv__fs_get_dirent_type
|
uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent) {
uv_dirent_type_t type;
#ifdef HAVE_DIRENT_TYPES
switch (dent->d_type) {
case UV__DT_DIR:
type = UV_DIRENT_DIR;
break;
case UV__DT_FILE:
type = UV_DIRENT_FILE;
break;
case UV__DT_LINK:
type = UV_DIRENT_LINK;
break;
case UV__DT_FIFO:
type = UV_DIRENT_FIFO;
break;
case UV__DT_SOCKET:
type = UV_DIRENT_SOCKET;
break;
case UV__DT_CHAR:
type = UV_DIRENT_CHAR;
break;
case UV__DT_BLOCK:
type = UV_DIRENT_BLOCK;
break;
default:
type = UV_DIRENT_UNKNOWN;
}
#else
type = UV_DIRENT_UNKNOWN;
#endif
return type;
}
|
movb 0x12(%rdi), %cl
decb %cl
xorl %eax, %eax
cmpb $0xb, %cl
ja 0x61e38f
movzbl %cl, %eax
leaq 0xbad04(%rip), %rcx # 0x6d9090
movl (%rcx,%rax,4), %eax
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv__fs_readdir_cleanup
|
void uv__fs_readdir_cleanup(uv_fs_t* req) {
uv_dir_t* dir;
uv_dirent_t* dirents;
int i;
if (req->ptr == NULL)
return;
dir = req->ptr;
dirents = dir->dirents;
req->ptr = NULL;
if (dirents == NULL)
return;
for (i = 0; i < req->result; ++i) {
uv__free((char*) dirents[i].name);
dirents[i].name = NULL;
}
}
|
movq 0x60(%rdi), %rax
testq %rax, %rax
je 0x61e3f0
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movq %rdi, %rbx
movq (%rax), %r15
movq $0x0, 0x60(%rdi)
testq %r15, %r15
je 0x61e3e8
cmpq $0x0, 0x58(%rbx)
jle 0x61e3e8
callq 0x415e0
movq %rax, %r14
movl (%rax), %ebp
xorl %r12d, %r12d
movq (%r15), %rdi
callq *0x2433c7(%rip) # 0x861798
movl %ebp, (%r14)
movq $0x0, (%r15)
incq %r12
addq $0x10, %r15
cmpq %r12, 0x58(%rbx)
jg 0x61e3c8
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_loop_configure
|
int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) {
va_list ap;
int err;
va_start(ap, option);
/* Any platform-agnostic options should be handled here. */
err = uv__loop_configure(loop, option, ap);
va_end(ap);
return err;
}
|
subq $0xd8, %rsp
leaq 0x20(%rsp), %r10
movq %rdx, 0x10(%r10)
movq %rcx, 0x18(%r10)
movq %r8, 0x20(%r10)
movq %r9, 0x28(%r10)
testb %al, %al
je 0x61e448
movaps %xmm0, 0x50(%rsp)
movaps %xmm1, 0x60(%rsp)
movaps %xmm2, 0x70(%rsp)
movaps %xmm3, 0x80(%rsp)
movaps %xmm4, 0x90(%rsp)
movaps %xmm5, 0xa0(%rsp)
movaps %xmm6, 0xb0(%rsp)
movaps %xmm7, 0xc0(%rsp)
movq %rsp, %rdx
movq %r10, 0x10(%rdx)
leaq 0xe0(%rsp), %rax
movq %rax, 0x8(%rdx)
movabsq $0x3000000010, %rax # imm = 0x3000000010
movq %rax, (%rdx)
callq 0x62440c
addq $0xd8, %rsp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_default_loop
|
uv_loop_t* uv_default_loop(void) {
if (default_loop_ptr != NULL)
return default_loop_ptr;
if (uv_loop_init(&default_loop_struct))
return NULL;
default_loop_ptr = &default_loop_struct;
return default_loop_ptr;
}
|
pushq %rbx
movq 0x24d46b(%rip), %rbx # 0x86b8e8
testq %rbx, %rbx
jne 0x61e4a0
leaq 0x24d467(%rip), %rbx # 0x86b8f0
movq %rbx, %rdi
callq 0x623ff0
testl %eax, %eax
je 0x61e499
xorl %ebx, %ebx
jmp 0x61e4a0
movq %rbx, 0x24d448(%rip) # 0x86b8e8
movq %rbx, %rax
popq %rbx
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_loop_new
|
uv_loop_t* uv_loop_new(void) {
uv_loop_t* loop;
loop = uv__malloc(sizeof(*loop));
if (loop == NULL)
return NULL;
if (uv_loop_init(loop)) {
uv__free(loop);
return NULL;
}
return loop;
}
|
pushq %rbp
pushq %r14
pushq %rbx
movl $0x350, %edi # imm = 0x350
callq *0x2432cc(%rip) # 0x861780
testq %rax, %rax
je 0x61e4de
movq %rax, %rbx
movq %rax, %rdi
callq 0x623ff0
testl %eax, %eax
je 0x61e4e0
callq 0x415e0
movq %rax, %r14
movl (%rax), %ebp
movq %rbx, %rdi
callq *0x2432bd(%rip) # 0x861798
movl %ebp, (%r14)
xorl %ebx, %ebx
movq %rbx, %rax
popq %rbx
popq %r14
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_free_cpu_info
|
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++)
uv__free(cpu_infos[i].model);
uv__free(cpu_infos);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movl %esi, %ebp
movq %rdi, %rbx
callq 0x415e0
movq %rax, %r14
testl %ebp, %ebp
jle 0x61e676
movl %ebp, %eax
movl (%r14), %ebp
imulq $0x38, %rax, %r15
xorl %r12d, %r12d
movq (%rbx,%r12), %rdi
callq *0x24312e(%rip) # 0x861798
movl %ebp, (%r14)
addq $0x38, %r12
cmpq %r12, %r15
jne 0x61e660
movl (%r14), %ebp
movq %rbx, %rdi
callq *0x243116(%rip) # 0x861798
movl %ebp, (%r14)
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_library_shutdown
|
__attribute__((destructor))
#endif
void uv_library_shutdown(void) {
static int was_shutdown;
if (uv__load_relaxed(&was_shutdown))
return;
uv__process_title_cleanup();
uv__signal_cleanup();
#ifdef __MVS__
/* TODO(itodorov) - zos: revisit when Woz compiler is available. */
uv__os390_cleanup();
#else
uv__threadpool_cleanup();
#endif
uv__store_relaxed(&was_shutdown, 1);
}
|
cmpb $0x0, 0x24d5ab(%rip) # 0x86bc40
jne 0x61e6b2
pushq %rax
callq 0x62d7de
callq 0x625940
callq 0x62e510
movb $0x1, 0x24d592(%rip) # 0x86bc40
addq $0x8, %rsp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/uv-common.c
|
uv_async_init
|
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
int err;
err = uv__async_start(loop);
if (err)
return err;
uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
handle->async_cb = async_cb;
handle->pending = 0;
QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
uv__handle_start(handle);
return 0;
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdx, %r15
movq %rsi, %rbx
movq %rdi, %r14
callq 0x61e804
testl %eax, %eax
jne 0x61e7fe
movq %r14, 0x8(%rbx)
movl $0x1, 0x10(%rbx)
leaq 0x10(%r14), %rcx
leaq 0x20(%rbx), %rdx
movq %rcx, 0x20(%rbx)
movq 0x18(%r14), %rcx
movq %rcx, 0x28(%rbx)
movq %rdx, (%rcx)
movq %rdx, 0x18(%r14)
movq $0x0, 0x50(%rbx)
movq %r15, 0x60(%rbx)
movl $0x0, 0x78(%rbx)
leaq 0x1b0(%r14), %rcx
leaq 0x68(%rbx), %rdx
movq %rcx, 0x68(%rbx)
movq 0x1b8(%r14), %rcx
movq %rcx, 0x70(%rbx)
movq %rdx, (%rcx)
movq %rdx, 0x1b8(%r14)
movl $0xc, 0x58(%rbx)
movq 0x8(%rbx), %rcx
incl 0x8(%rcx)
popq %rbx
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/async.c
|
uv__async_start
|
static int uv__async_start(uv_loop_t* loop) {
int pipefd[2];
int err;
if (loop->async_io_watcher.fd != -1)
return 0;
#ifdef __linux__
err = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
if (err < 0)
return UV__ERR(errno);
pipefd[0] = err;
pipefd[1] = -1;
#else
err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
if (err < 0)
return err;
#endif
uv__io_init(&loop->async_io_watcher, uv__async_io, pipefd[0]);
uv__io_start(loop, &loop->async_io_watcher, POLLIN);
loop->async_wfd = pipefd[1];
return 0;
}
|
pushq %r15
pushq %r14
pushq %rbx
xorl %ebx, %ebx
cmpl $-0x1, 0x1f8(%rdi)
jne 0x61e867
movq %rdi, %r14
xorl %ebx, %ebx
xorl %edi, %edi
movl $0x80800, %esi # imm = 0x80800
callq 0x41d30
testl %eax, %eax
js 0x61e85e
leaq 0x1c8(%r14), %r15
leaq 0x1ca(%rip), %rsi # 0x61ea01
movq %r15, %rdi
movl %eax, %edx
callq 0x61f578
movq %r14, %rdi
movq %r15, %rsi
movl $0x1, %edx
callq 0x61f5e9
movl $0xffffffff, 0x200(%r14) # imm = 0xFFFFFFFF
jmp 0x61e867
callq 0x415e0
xorl %ebx, %ebx
subl (%rax), %ebx
movl %ebx, %eax
popq %rbx
popq %r14
popq %r15
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/async.c
|
uv_async_send
|
int uv_async_send(uv_async_t* handle) {
/* Do a cheap read first. */
if (ACCESS_ONCE(int, handle->pending) != 0)
return 0;
/* Tell the other thread we're busy with the handle. */
if (cmpxchgi(&handle->pending, 0, 1) != 0)
return 0;
/* Wake up the other thread's event loop. */
uv__async_send(handle->loop);
/* Tell the other thread we're done. */
if (cmpxchgi(&handle->pending, 1, 2) != 1)
abort();
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
cmpl $0x0, 0x78(%rdi)
jne 0x61e907
movq %rdi, %rbx
movl $0x1, %ecx
xorl %eax, %eax
lock
cmpxchgl %ecx, 0x78(%rdi)
testl %eax, %eax
jne 0x61e907
movq 0x8(%rbx), %rax
movl 0x200(%rax), %ebp
cmpl $-0x1, %ebp
je 0x61e8b1
leaq 0xa07a0(%rip), %r15 # 0x6bf049
movl $0x1, %r14d
jmp 0x61e8c4
movl 0x1f8(%rax), %ebp
leaq 0xbb35a(%rip), %r15 # 0x6d9c18
movl $0x8, %r14d
movl %ebp, %edi
movq %r15, %rsi
movq %r14, %rdx
callq 0x41690
cmpl $-0x1, %eax
jne 0x61e8ec
callq 0x415e0
movl (%rax), %eax
cmpl $0x4, %eax
je 0x61e8c4
cmpl $0xb, %eax
je 0x61e8f3
callq 0x40a90
cltq
cmpq %r14, %rax
jne 0x61e8e7
movl $0x2, %ecx
movl $0x1, %eax
lock
cmpxchgl %ecx, 0x78(%rbx)
cmpl $0x1, %eax
jne 0x61e8e7
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/async.c
|
uv__async_stop
|
void uv__async_stop(uv_loop_t* loop) {
if (loop->async_io_watcher.fd == -1)
return;
if (loop->async_wfd != -1) {
if (loop->async_wfd != loop->async_io_watcher.fd)
uv__close(loop->async_wfd);
loop->async_wfd = -1;
}
uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
uv__close(loop->async_io_watcher.fd);
loop->async_io_watcher.fd = -1;
}
|
movl 0x1f8(%rdi), %eax
cmpl $-0x1, %eax
je 0x61ea00
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
leaq 0x1c8(%rdi), %r14
movl 0x200(%rdi), %edi
cmpl $-0x1, %edi
je 0x61e9d4
cmpl %eax, %edi
je 0x61e9ca
callq 0x61f25b
movl $0xffffffff, 0x200(%rbx) # imm = 0xFFFFFFFF
movq %rbx, %rdi
movq %r14, %rsi
movl $0x1, %edx
callq 0x61f7a7
movl 0x1f8(%rbx), %edi
callq 0x61f25b
movl $0xffffffff, 0x1f8(%rbx) # imm = 0xFFFFFFFF
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/async.c
|
uv__nonblock_ioctl
|
int uv__nonblock_ioctl(int fd, int set) {
int r;
do
r = ioctl(fd, FIONBIO, &set);
while (r == -1 && errno == EINTR);
if (r)
return UV__ERR(errno);
return 0;
}
|
pushq %r14
pushq %rbx
pushq %rax
movl %edi, %ebx
leaq 0x4(%rsp), %r14
movl %esi, (%r14)
movl $0x5421, %esi # imm = 0x5421
movl %ebx, %edi
movq %r14, %rdx
xorl %eax, %eax
callq 0x3f3d0
cmpl $-0x1, %eax
jne 0x61f1fe
callq 0x415e0
cmpl $0x4, (%rax)
je 0x61f1dc
jmp 0x61f207
testl %eax, %eax
je 0x61f20e
callq 0x415e0
movq %rax, %rcx
xorl %eax, %eax
subl (%rcx), %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/core.c
|
uv__read_start
|
int uv__read_start(uv_stream_t* stream,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
stream->type == UV_TTY);
/* The UV_HANDLE_READING flag is irrelevant of the state of the stream - it
* just expresses the desired state of the user. */
stream->flags |= UV_HANDLE_READING;
stream->flags &= ~UV_HANDLE_READ_EOF;
/* TODO: try to do the read inline? */
assert(uv__stream_fd(stream) >= 0);
assert(alloc_cb);
stream->read_cb = read_cb;
stream->alloc_cb = alloc_cb;
uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
uv__handle_start(stream);
uv__stream_osx_interrupt_select(stream);
return 0;
}
|
pushq %rbx
movl 0x10(%rdi), %eax
cmpl $0xe, %eax
ja 0x628078
movl $0x5080, %ecx # imm = 0x5080
btl %eax, %ecx
jae 0x628078
movq %rdi, %rbx
movl $0xffffe7ff, %eax # imm = 0xFFFFE7FF
andl 0x58(%rdi), %eax
orl $0x1000, %eax # imm = 0x1000
movl %eax, 0x58(%rdi)
cmpl $0x0, 0xb8(%rdi)
js 0x62803a
testq %rsi, %rsi
je 0x628059
leaq 0x88(%rbx), %rax
movq %rdx, 0x70(%rbx)
movq %rsi, 0x68(%rbx)
movq 0x8(%rbx), %rdi
movq %rax, %rsi
movl $0x1, %edx
callq 0x61f5e9
movl 0x58(%rbx), %eax
testb $0x4, %al
jne 0x628036
movl %eax, %ecx
orl $0x4, %ecx
movl %ecx, 0x58(%rbx)
testb $0x8, %al
je 0x628036
movq 0x8(%rbx), %rax
incl 0x8(%rax)
xorl %eax, %eax
popq %rbx
retq
leaq 0xb306e(%rip), %rdi # 0x6db0af
leaq 0xb2d94(%rip), %rsi # 0x6daddc
leaq 0xb3161(%rip), %rcx # 0x6db1b0
movl $0x5e9, %edx # imm = 0x5E9
callq 0x3f4b0
leaq 0xb318b(%rip), %rdi # 0x6db1eb
leaq 0xb2d75(%rip), %rsi # 0x6daddc
leaq 0xb3142(%rip), %rcx # 0x6db1b0
movl $0x5ea, %edx # imm = 0x5EA
callq 0x3f4b0
leaq 0xb30df(%rip), %rdi # 0x6db15e
leaq 0xb2d56(%rip), %rsi # 0x6daddc
leaq 0xb3123(%rip), %rcx # 0x6db1b0
movl $0x5e1, %edx # imm = 0x5E1
callq 0x3f4b0
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/stream.c
|
uv__stream_close
|
void uv__stream_close(uv_stream_t* handle) {
unsigned int i;
uv__stream_queued_fds_t* queued_fds;
#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
/* Terminate select loop first */
if (handle->select != NULL) {
uv__stream_select_t* s;
s = handle->select;
uv_sem_post(&s->close_sem);
uv_sem_post(&s->async_sem);
uv__stream_osx_interrupt_select(handle);
uv_thread_join(&s->thread);
uv_sem_destroy(&s->close_sem);
uv_sem_destroy(&s->async_sem);
uv__close(s->fake_fd);
uv__close(s->int_fd);
uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close);
handle->select = NULL;
}
#endif /* defined(__APPLE__) */
uv__io_close(handle->loop, &handle->io_watcher);
uv_read_stop(handle);
uv__handle_stop(handle);
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
if (handle->io_watcher.fd != -1) {
/* Don't close stdio file descriptors. Nothing good comes from it. */
if (handle->io_watcher.fd > STDERR_FILENO)
uv__close(handle->io_watcher.fd);
handle->io_watcher.fd = -1;
}
if (handle->accepted_fd != -1) {
uv__close(handle->accepted_fd);
handle->accepted_fd = -1;
}
/* Close all queued fds */
if (handle->queued_fds != NULL) {
queued_fds = handle->queued_fds;
for (i = 0; i < queued_fds->offset; i++)
uv__close(queued_fds->fds[i]);
uv__free(handle->queued_fds);
handle->queued_fds = NULL;
}
assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rdi, %r14
movq 0x8(%rdi), %rdi
leaq 0x88(%r14), %rbx
movq %rbx, %rsi
callq 0x61f8bd
movq %r14, %rdi
callq 0x628097
movl 0x58(%r14), %ecx
testb $0x4, %cl
jne 0x62812d
movl %ecx, %eax
jmp 0x62813e
movl %ecx, %eax
andl $-0x5, %eax
testb $0x8, %cl
je 0x62813e
movq 0x8(%r14), %rcx
decl 0x8(%rcx)
andl $0xffff3fff, %eax # imm = 0xFFFF3FFF
movl %eax, 0x58(%r14)
movl 0xb8(%r14), %edi
cmpl $-0x1, %edi
je 0x628168
cmpl $0x3, %edi
jl 0x62815d
callq 0x61f25b
movl $0xffffffff, 0xb8(%r14) # imm = 0xFFFFFFFF
movl 0xec(%r14), %edi
cmpl $-0x1, %edi
je 0x628184
callq 0x61f25b
movl $0xffffffff, 0xec(%r14) # imm = 0xFFFFFFFF
movq 0xf0(%r14), %r15
testq %r15, %r15
je 0x6281ca
cmpl $0x0, 0x4(%r15)
je 0x6281b7
xorl %r12d, %r12d
movl 0x8(%r15,%r12,4), %edi
callq 0x61f25b
incq %r12
movl 0x4(%r15), %eax
cmpq %rax, %r12
jb 0x62819a
movq 0xf0(%r14), %r15
movq %r15, %rdi
callq 0x61c9db
movq $0x0, 0xf0(%r14)
movq %rbx, %rdi
movl $0x5, %esi
callq 0x61f927
testl %eax, %eax
jne 0x6281e7
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
leaq 0xb3006(%rip), %rdi # 0x6db1f4
leaq 0xb2be7(%rip), %rsi # 0x6daddc
leaq 0xb302e(%rip), %rcx # 0x6db22a
movl $0x654, %edx # imm = 0x654
callq 0x3f4b0
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/stream.c
|
uv__write_req_finish
|
static void uv__write_req_finish(uv_write_t* req) {
uv_stream_t* stream = req->handle;
/* Pop the req off tcp->write_queue. */
QUEUE_REMOVE(&req->queue);
/* Only free when there was no error. On error, we touch up write_queue_size
* right before making the callback. The reason we don't do that right away
* is that a write_queue_size > 0 is our only way to signal to the user that
* they should stop writing - which they should if we got an error. Something
* to revisit in future revisions of the libuv API.
*/
if (req->error == 0) {
if (req->bufs != req->bufsml)
uv__free(req->bufs);
req->bufs = NULL;
}
/* Add it to the write_completed_queue where it will have its
* callback called in the near future.
*/
QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
uv__io_feed(stream->loop, &stream->io_watcher);
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %r14
movq 0x50(%rdi), %rbx
movq 0x58(%rdi), %rax
leaq 0x58(%rdi), %r15
movq 0x60(%rdi), %rcx
movq %rax, (%rcx)
movq 0x60(%rdi), %rcx
movq %rcx, 0x8(%rax)
cmpl $0x0, 0x7c(%rdi)
jne 0x6282c5
movq 0x70(%r14), %rdi
leaq 0x80(%r14), %rax
cmpq %rax, %rdi
je 0x6282bd
callq 0x61c9db
movq $0x0, 0x70(%r14)
leaq 0xd0(%rbx), %rax
movq %rax, 0x58(%r14)
movq 0xd8(%rbx), %rax
movq %rax, 0x60(%r14)
movq %r15, (%rax)
movq %r15, 0xd8(%rbx)
movq 0x8(%rbx), %rdi
addq $0x88, %rbx
movq %rbx, %rsi
popq %rbx
popq %r14
popq %r15
jmp 0x61f904
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/stream.c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.