repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/badblocks_ndctl.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * badblocks_ndctl.c -- implementation of DIMMs API based on the ndctl library */ #define _GNU_SOURCE #include <sys/types.h> #include <libgen.h> #include <limits.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <sys/sysmacros.h> #include <fcntl.h> #include <ndctl/libndctl.h> #include <ndctl/libdaxctl.h> #include "libpmem2.h" #include "pmem2_utils.h" #include "source.h" #include "region_namespace_ndctl.h" #include "file.h" #include "out.h" #include "badblocks.h" #include "set_badblocks.h" #include "extent.h" typedef int pmem2_badblock_next_type( struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb); typedef void *pmem2_badblock_get_next_type( struct pmem2_badblock_context *bbctx); struct pmem2_badblock_context { /* file descriptor */ int fd; /* pmem2 file type */ enum pmem2_file_type file_type; /* ndctl context */ struct ndctl_ctx *ctx; /* * Function pointer to: * - pmem2_badblock_next_namespace() or * - pmem2_badblock_next_region() */ pmem2_badblock_next_type *pmem2_badblock_next_func; /* * Function pointer to: * - pmem2_namespace_get_first_badblock() or * - pmem2_namespace_get_next_badblock() or * - pmem2_region_get_first_badblock() or * - pmem2_region_get_next_badblock() */ pmem2_badblock_get_next_type *pmem2_badblock_get_next_func; /* needed only by the ndctl namespace badblock iterator */ struct ndctl_namespace *ndns; /* needed only by the ndctl region badblock iterator */ struct { struct ndctl_bus *bus; struct ndctl_region *region; unsigned long long ns_res; /* address of the namespace */ unsigned long long ns_beg; /* the begining of the namespace */ unsigned long long ns_end; /* the end of the namespace */ } rgn; /* file's extents */ struct extents *exts; unsigned first_extent; struct pmem2_badblock last_bb; }; /* forward declarations */ static int pmem2_badblock_next_namespace( struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb); static int pmem2_badblock_next_region( struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb); static void *pmem2_namespace_get_first_badblock( struct pmem2_badblock_context *bbctx); static void *pmem2_region_get_first_badblock( struct pmem2_badblock_context *bbctx); /* * badblocks_get_namespace_bounds -- (internal) returns the bounds * (offset and size) of the given namespace * relative to the beginning of its region */ static int badblocks_get_namespace_bounds(struct ndctl_region *region, struct ndctl_namespace *ndns, unsigned long long *ns_offset, unsigned long long *ns_size) { LOG(3, "region %p namespace %p ns_offset %p ns_size %p", region, ndns, ns_offset, ns_size); struct ndctl_pfn *pfn = ndctl_namespace_get_pfn(ndns); struct ndctl_dax *dax = ndctl_namespace_get_dax(ndns); ASSERTne(ns_offset, NULL); ASSERTne(ns_size, NULL); if (pfn) { *ns_offset = ndctl_pfn_get_resource(pfn); if (*ns_offset == ULLONG_MAX) { ERR("(pfn) cannot read offset of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } *ns_size = ndctl_pfn_get_size(pfn); if (*ns_size == ULLONG_MAX) { ERR("(pfn) cannot read size of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } LOG(10, "(pfn) ns_offset 0x%llx ns_size %llu", *ns_offset, *ns_size); } else if (dax) { *ns_offset = ndctl_dax_get_resource(dax); if (*ns_offset == ULLONG_MAX) { ERR("(dax) cannot read offset of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } *ns_size = ndctl_dax_get_size(dax); if (*ns_size == ULLONG_MAX) { ERR("(dax) cannot read size of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } LOG(10, "(dax) ns_offset 0x%llx ns_size %llu", *ns_offset, *ns_size); } else { /* raw or btt */ *ns_offset = ndctl_namespace_get_resource(ndns); if (*ns_offset == ULLONG_MAX) { ERR("(raw/btt) cannot read offset of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } *ns_size = ndctl_namespace_get_size(ndns); if (*ns_size == ULLONG_MAX) { ERR("(raw/btt) cannot read size of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } LOG(10, "(raw/btt) ns_offset 0x%llx ns_size %llu", *ns_offset, *ns_size); } unsigned long long region_offset = ndctl_region_get_resource(region); if (region_offset == ULLONG_MAX) { ERR("!cannot read offset of the region"); return PMEM2_E_ERRNO; } LOG(10, "region_offset 0x%llx", region_offset); *ns_offset -= region_offset; return 0; } /* * badblocks_devdax_clear_one_badblock -- (internal) clear one bad block * in the dax device */ static int badblocks_devdax_clear_one_badblock(struct ndctl_bus *bus, unsigned long long address, unsigned long long length) { LOG(3, "bus %p address 0x%llx length %llu (bytes)", bus, address, length); int ret; struct ndctl_cmd *cmd_ars_cap = ndctl_bus_cmd_new_ars_cap(bus, address, length); if (cmd_ars_cap == NULL) { ERR("ndctl_bus_cmd_new_ars_cap() failed (bus '%s')", ndctl_bus_get_provider(bus)); return PMEM2_E_ERRNO; } ret = ndctl_cmd_submit(cmd_ars_cap); if (ret) { ERR("ndctl_cmd_submit() failed (bus '%s')", ndctl_bus_get_provider(bus)); /* ndctl_cmd_submit() returns -errno */ goto out_ars_cap; } struct ndctl_range range; ret = ndctl_cmd_ars_cap_get_range(cmd_ars_cap, &range); if (ret) { ERR("ndctl_cmd_ars_cap_get_range() failed"); /* ndctl_cmd_ars_cap_get_range() returns -errno */ goto out_ars_cap; } struct ndctl_cmd *cmd_clear_error = ndctl_bus_cmd_new_clear_error( range.address, range.length, cmd_ars_cap); ret = ndctl_cmd_submit(cmd_clear_error); if (ret) { ERR("ndctl_cmd_submit() failed (bus '%s')", ndctl_bus_get_provider(bus)); /* ndctl_cmd_submit() returns -errno */ goto out_clear_error; } size_t cleared = ndctl_cmd_clear_error_get_cleared(cmd_clear_error); LOG(4, "cleared %zu out of %llu bad blocks", cleared, length); ASSERT(cleared <= length); if (cleared < length) { ERR("failed to clear %llu out of %llu bad blocks", length - cleared, length); errno = ENXIO; /* ndctl handles such error in this way */ ret = PMEM2_E_ERRNO; } else { ret = 0; } out_clear_error: ndctl_cmd_unref(cmd_clear_error); out_ars_cap: ndctl_cmd_unref(cmd_ars_cap); return ret; } /* * pmem2_badblock_context_new -- allocate and create a new bad block context */ int pmem2_badblock_context_new(const struct pmem2_source *src, struct pmem2_badblock_context **bbctx) { LOG(3, "src %p bbctx %p", src, bbctx); ASSERTne(bbctx, NULL); if (src->type == PMEM2_SOURCE_ANON) { ERR("Anonymous source does not support bad blocks"); return PMEM2_E_NOSUPP; } ASSERTeq(src->type, PMEM2_SOURCE_FD); struct ndctl_ctx *ctx; struct ndctl_region *region; struct ndctl_namespace *ndns; struct pmem2_badblock_context *tbbctx = NULL; enum pmem2_file_type pmem2_type; int ret = PMEM2_E_UNKNOWN; *bbctx = NULL; errno = ndctl_new(&ctx) * (-1); if (errno) { ERR("!ndctl_new"); return PMEM2_E_ERRNO; } pmem2_type = src->value.ftype; ret = pmem2_region_namespace(ctx, src, &region, &ndns); if (ret) { LOG(1, "getting region and namespace failed"); goto exit_ndctl_unref; } tbbctx = pmem2_zalloc(sizeof(struct pmem2_badblock_context), &ret); if (ret) goto exit_ndctl_unref; tbbctx->fd = src->value.fd; tbbctx->file_type = pmem2_type; tbbctx->ctx = ctx; if (region == NULL || ndns == NULL) { /* did not found any matching device */ *bbctx = tbbctx; return 0; } if (ndctl_namespace_get_mode(ndns) == NDCTL_NS_MODE_FSDAX) { tbbctx->ndns = ndns; tbbctx->pmem2_badblock_next_func = pmem2_badblock_next_namespace; tbbctx->pmem2_badblock_get_next_func = pmem2_namespace_get_first_badblock; } else { unsigned long long ns_beg, ns_size, ns_end; ret = badblocks_get_namespace_bounds( region, ndns, &ns_beg, &ns_size); if (ret) { LOG(1, "cannot read namespace's bounds"); goto error_free_all; } ns_end = ns_beg + ns_size - 1; LOG(10, "namespace: begin %llu, end %llu size %llu (in 512B sectors)", B2SEC(ns_beg), B2SEC(ns_end + 1) - 1, B2SEC(ns_size)); tbbctx->rgn.bus = ndctl_region_get_bus(region); tbbctx->rgn.region = region; tbbctx->rgn.ns_beg = ns_beg; tbbctx->rgn.ns_end = ns_end; tbbctx->rgn.ns_res = ns_beg + ndctl_region_get_resource(region); tbbctx->pmem2_badblock_next_func = pmem2_badblock_next_region; tbbctx->pmem2_badblock_get_next_func = pmem2_region_get_first_badblock; } if (pmem2_type == PMEM2_FTYPE_REG) { /* only regular files have extents */ ret = pmem2_extents_create_get(src->value.fd, &tbbctx->exts); if (ret) { LOG(1, "getting extents of fd %i failed", src->value.fd); goto error_free_all; } } /* set the context */ *bbctx = tbbctx; return 0; error_free_all: pmem2_extents_destroy(&tbbctx->exts); Free(tbbctx); exit_ndctl_unref: ndctl_unref(ctx); return ret; } /* * pmem2_badblock_context_delete -- delete and free the bad block context */ void pmem2_badblock_context_delete(struct pmem2_badblock_context **bbctx) { LOG(3, "bbctx %p", bbctx); ASSERTne(bbctx, NULL); if (*bbctx == NULL) return; struct pmem2_badblock_context *tbbctx = *bbctx; pmem2_extents_destroy(&tbbctx->exts); ndctl_unref(tbbctx->ctx); Free(tbbctx); *bbctx = NULL; } /* * pmem2_namespace_get_next_badblock -- (internal) wrapper for * ndctl_namespace_get_next_badblock */ static void * pmem2_namespace_get_next_badblock(struct pmem2_badblock_context *bbctx) { LOG(3, "bbctx %p", bbctx); return ndctl_namespace_get_next_badblock(bbctx->ndns); } /* * pmem2_namespace_get_first_badblock -- (internal) wrapper for * ndctl_namespace_get_first_badblock */ static void * pmem2_namespace_get_first_badblock(struct pmem2_badblock_context *bbctx) { LOG(3, "bbctx %p", bbctx); bbctx->pmem2_badblock_get_next_func = pmem2_namespace_get_next_badblock; return ndctl_namespace_get_first_badblock(bbctx->ndns); } /* * pmem2_region_get_next_badblock -- (internal) wrapper for * ndctl_region_get_next_badblock */ static void * pmem2_region_get_next_badblock(struct pmem2_badblock_context *bbctx) { LOG(3, "bbctx %p", bbctx); return ndctl_region_get_next_badblock(bbctx->rgn.region); } /* * pmem2_region_get_first_badblock -- (internal) wrapper for * ndctl_region_get_first_badblock */ static void * pmem2_region_get_first_badblock(struct pmem2_badblock_context *bbctx) { LOG(3, "bbctx %p", bbctx); bbctx->pmem2_badblock_get_next_func = pmem2_region_get_next_badblock; return ndctl_region_get_first_badblock(bbctx->rgn.region); } /* * pmem2_badblock_next_namespace -- (internal) version of pmem2_badblock_next() * called for ndctl with namespace badblock * iterator * * This function works only for fsdax, but does not require any special * permissions. */ static int pmem2_badblock_next_namespace(struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb) { LOG(3, "bbctx %p bb %p", bbctx, bb); ASSERTne(bbctx, NULL); ASSERTne(bb, NULL); struct badblock *bbn; bbn = bbctx->pmem2_badblock_get_next_func(bbctx); if (bbn == NULL) return PMEM2_E_NO_BAD_BLOCK_FOUND; /* * libndctl returns offset and length of a bad block * both expressed in 512B sectors. Offset is relative * to the beginning of the namespace. */ bb->offset = SEC2B(bbn->offset); bb->length = SEC2B(bbn->len); return 0; } /* * pmem2_badblock_next_region -- (internal) version of pmem2_badblock_next() * called for ndctl with region badblock iterator * * This function works for all types of namespaces, but requires read access to * privileged device information. */ static int pmem2_badblock_next_region(struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb) { LOG(3, "bbctx %p bb %p", bbctx, bb); ASSERTne(bbctx, NULL); ASSERTne(bb, NULL); unsigned long long bb_beg, bb_end; unsigned long long beg, end; struct badblock *bbn; unsigned long long ns_beg = bbctx->rgn.ns_beg; unsigned long long ns_end = bbctx->rgn.ns_end; do { bbn = bbctx->pmem2_badblock_get_next_func(bbctx); if (bbn == NULL) return PMEM2_E_NO_BAD_BLOCK_FOUND; LOG(10, "region bad block: begin %llu end %llu length %u (in 512B sectors)", bbn->offset, bbn->offset + bbn->len - 1, bbn->len); /* * libndctl returns offset and length of a bad block * both expressed in 512B sectors. Offset is relative * to the beginning of the region. */ bb_beg = SEC2B(bbn->offset); bb_end = bb_beg + SEC2B(bbn->len) - 1; } while (bb_beg > ns_end || ns_beg > bb_end); beg = (bb_beg > ns_beg) ? bb_beg : ns_beg; end = (bb_end < ns_end) ? bb_end : ns_end; /* * Form a new bad block structure with offset and length * expressed in bytes and offset relative to the beginning * of the namespace. */ bb->offset = beg - ns_beg; bb->length = end - beg + 1; LOG(4, "namespace bad block: begin %llu end %llu length %llu (in 512B sectors)", B2SEC(beg - ns_beg), B2SEC(end - ns_beg), B2SEC(end - beg) + 1); return 0; } /* * pmem2_badblock_next -- get the next bad block */ int pmem2_badblock_next(struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb) { LOG(3, "bbctx %p bb %p", bbctx, bb); ASSERTne(bbctx, NULL); ASSERTne(bb, NULL); struct pmem2_badblock bbn; unsigned long long bb_beg; unsigned long long bb_end; unsigned long long bb_len; unsigned long long bb_off; unsigned long long ext_beg; unsigned long long ext_end; unsigned e; int ret; if (bbctx->rgn.region == NULL && bbctx->ndns == NULL) { /* did not found any matching device */ return PMEM2_E_NO_BAD_BLOCK_FOUND; } struct extents *exts = bbctx->exts; /* DAX devices have no extents */ if (!exts) { ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn); *bb = bbn; return ret; } /* * There is at least one extent. * Loop until: * 1) a bad block overlaps with an extent or * 2) there are no more bad blocks. */ int bb_overlaps_with_extent = 0; do { if (bbctx->last_bb.length) { /* * We have saved the last bad block to check it * with the next extent saved * in bbctx->first_extent. */ ASSERTne(bbctx->first_extent, 0); bbn = bbctx->last_bb; bbctx->last_bb.offset = 0; bbctx->last_bb.length = 0; } else { ASSERTeq(bbctx->first_extent, 0); /* look for the next bad block */ ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn); if (ret) return ret; } bb_beg = bbn.offset; bb_end = bb_beg + bbn.length - 1; for (e = bbctx->first_extent; e < exts->extents_count; e++) { ext_beg = exts->extents[e].offset_physical; ext_end = ext_beg + exts->extents[e].length - 1; /* check if the bad block overlaps with the extent */ if (bb_beg <= ext_end && ext_beg <= bb_end) { /* bad block overlaps with the extent */ bb_overlaps_with_extent = 1; if (bb_end > ext_end && e + 1 < exts->extents_count) { /* * The bad block is longer than * the extent and there are * more extents. * Save the current bad block * to check it with the next extent. */ bbctx->first_extent = e + 1; bbctx->last_bb = bbn; } else { /* * All extents were checked * with the current bad block. */ bbctx->first_extent = 0; bbctx->last_bb.length = 0; bbctx->last_bb.offset = 0; } break; } } /* check all extents with the next bad block */ if (bb_overlaps_with_extent == 0) { bbctx->first_extent = 0; bbctx->last_bb.length = 0; bbctx->last_bb.offset = 0; } } while (bb_overlaps_with_extent == 0); /* bad block overlaps with an extent */ bb_beg = (bb_beg > ext_beg) ? bb_beg : ext_beg; bb_end = (bb_end < ext_end) ? bb_end : ext_end; bb_len = bb_end - bb_beg + 1; bb_off = bb_beg + exts->extents[e].offset_logical - exts->extents[e].offset_physical; LOG(10, "bad block found: physical offset: %llu, length: %llu", bb_beg, bb_len); /* make sure the offset is block-aligned */ unsigned long long not_block_aligned = bb_off & (exts->blksize - 1); if (not_block_aligned) { bb_off -= not_block_aligned; bb_len += not_block_aligned; } /* make sure the length is block-aligned */ bb_len = ALIGN_UP(bb_len, exts->blksize); LOG(4, "bad block found: logical offset: %llu, length: %llu", bb_off, bb_len); /* * Return the bad block with offset and length * expressed in bytes and offset relative * to the beginning of the file. */ bb->offset = bb_off; bb->length = bb_len; return 0; } /* * pmem2_badblock_clear_fsdax -- (internal) clear one bad block * in a FSDAX device */ static int pmem2_badblock_clear_fsdax(int fd, const struct pmem2_badblock *bb) { LOG(3, "fd %i badblock %p", fd, bb); ASSERTne(bb, NULL); LOG(10, "clearing a bad block: fd %i logical offset %zu length %zu (in 512B sectors)", fd, B2SEC(bb->offset), B2SEC(bb->length)); /* fallocate() takes offset as the off_t type */ if (bb->offset > (size_t)INT64_MAX) { ERR("bad block's offset is greater than INT64_MAX"); return PMEM2_E_OFFSET_OUT_OF_RANGE; } /* fallocate() takes length as the off_t type */ if (bb->length > (size_t)INT64_MAX) { ERR("bad block's length is greater than INT64_MAX"); return PMEM2_E_LENGTH_OUT_OF_RANGE; } off_t offset = (off_t)bb->offset; off_t length = (off_t)bb->length; /* deallocate bad blocks */ if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset, length)) { ERR("!fallocate"); return PMEM2_E_ERRNO; } /* allocate new blocks */ if (fallocate(fd, FALLOC_FL_KEEP_SIZE, offset, length)) { ERR("!fallocate"); return PMEM2_E_ERRNO; } return 0; } /* * pmem2_badblock_clear_devdax -- (internal) clear one bad block * in a DAX device */ static int pmem2_badblock_clear_devdax(const struct pmem2_badblock_context *bbctx, const struct pmem2_badblock *bb) { LOG(3, "bbctx %p bb %p", bbctx, bb); ASSERTne(bb, NULL); ASSERTne(bbctx, NULL); ASSERTne(bbctx->rgn.bus, NULL); ASSERTne(bbctx->rgn.ns_res, 0); LOG(4, "clearing a bad block: offset %zu length %zu (in 512B sectors)", B2SEC(bb->offset), B2SEC(bb->length)); int ret = badblocks_devdax_clear_one_badblock(bbctx->rgn.bus, bb->offset + bbctx->rgn.ns_res, bb->length); if (ret) { LOG(1, "failed to clear a bad block: offset %zu length %zu (in 512B sectors)", B2SEC(bb->offset), B2SEC(bb->length)); return ret; } return 0; } /* * pmem2_badblock_clear -- clear one bad block */ int pmem2_badblock_clear(struct pmem2_badblock_context *bbctx, const struct pmem2_badblock *bb) { LOG(3, "bbctx %p badblock %p", bbctx, bb); ASSERTne(bbctx, NULL); ASSERTne(bb, NULL); if (bbctx->file_type == PMEM2_FTYPE_DEVDAX) return pmem2_badblock_clear_devdax(bbctx, bb); ASSERTeq(bbctx->file_type, PMEM2_FTYPE_REG); return pmem2_badblock_clear_fsdax(bbctx->fd, bb); }
19,316
24.218016
80
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/region_namespace_ndctl.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * region_namespace_ndctl.h -- internal definitions for libpmem2 * common ndctl functions */ #ifndef PMDK_REGION_NAMESPACE_NDCTL_H #define PMDK_REGION_NAMESPACE_NDCTL_H 1 #include "os.h" #ifdef __cplusplus extern "C" { #endif #define FOREACH_BUS_REGION_NAMESPACE(ctx, bus, region, ndns) \ ndctl_bus_foreach(ctx, bus) \ ndctl_region_foreach(bus, region) \ ndctl_namespace_foreach(region, ndns) int pmem2_region_namespace(struct ndctl_ctx *ctx, const struct pmem2_source *src, struct ndctl_region **pregion, struct ndctl_namespace **pndns); #ifdef __cplusplus } #endif #endif /* PMDK_REGION_NAMESPACE_NDCTL_H */
754
21.878788
64
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/vm_reservation.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * vm_reservation.c -- implementation of virtual memory allocation API */ #include "libpmem2.h" /* * pmem2_vm_reservation_new -- creates new virtual memory reservation */ int pmem2_vm_reservation_new(struct pmem2_vm_reservation **rsv, size_t size, void *address) { return PMEM2_E_NOSUPP; } /* * pmem2_vm_reservation_delete -- deletes reservation bound to * structure pmem2_vm_reservation */ int pmem2_vm_reservation_delete(struct pmem2_vm_reservation **rsv) { return PMEM2_E_NOSUPP; }
614
20.206897
70
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/usc_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * usc_windows.c -- pmem2 usc function for windows */ #include "alloc.h" #include "source.h" #include "out.h" #include "libpmem2.h" #include "pmem2_utils.h" #define GUID_SIZE sizeof("XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX") #define VOLUME_PATH_SIZE sizeof("\\\\?\\Volume{}") + (GUID_SIZE - 2 /* \0 */) /* * get_volume_handle -- returns volume handle */ static int get_volume_handle(HANDLE handle, HANDLE *volume_handle) { wchar_t *volume; wchar_t tmp[10]; DWORD len = GetFinalPathNameByHandleW(handle, tmp, 10, VOLUME_NAME_GUID); if (len == 0) { ERR("!!GetFinalPathNameByHandleW"); return pmem2_lasterror_to_err(); } len *= sizeof(wchar_t); int err; volume = pmem2_malloc(len, &err); if (volume == NULL) return err; if (!GetFinalPathNameByHandleW(handle, volume, len, VOLUME_NAME_GUID)) { Free(volume); ERR("!!GetFinalPathNameByHandleW"); return pmem2_lasterror_to_err(); } ASSERTeq(volume[VOLUME_PATH_SIZE], '\\'); volume[VOLUME_PATH_SIZE] = '\0'; *volume_handle = CreateFileW(volume, /* path to the file */ /* request access to send ioctl to the file */ FILE_READ_ATTRIBUTES, /* do not block access to the file */ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, /* security attributes */ OPEN_EXISTING, /* open only if it exists */ FILE_ATTRIBUTE_NORMAL, /* no attributes */ NULL); /* used only for new files */ Free(volume); if (*volume_handle == INVALID_HANDLE_VALUE) { ERR("!!CreateFileW"); return pmem2_lasterror_to_err(); } return 0; } static int get_device_guid(HANDLE handle, GUID *guid) { HANDLE vHandle; int ret = get_volume_handle(handle, &vHandle); if (vHandle == INVALID_HANDLE_VALUE) return ret; STORAGE_DEVICE_NUMBER_EX sdn; sdn.DeviceNumber = -1; DWORD dwBytesReturned = 0; if (!DeviceIoControl(vHandle, IOCTL_STORAGE_GET_DEVICE_NUMBER_EX, NULL, 0, &sdn, sizeof(sdn), &dwBytesReturned, NULL)) { /* * IOCTL_STORAGE_GET_DEVICE_NUMBER_EX is not supported * on this server */ ERR( "Getting device id (IOCTL_STORAGE_GET_DEVICE_NUMBER_EX) is not supported on this system"); CloseHandle(vHandle); return PMEM2_E_NOSUPP; } *guid = sdn.DeviceGuid; CloseHandle(vHandle); return 0; } int pmem2_source_device_idW(const struct pmem2_source *src, wchar_t *id, size_t *len) { if (src->type == PMEM2_SOURCE_ANON) { ERR("Anonymous source does not have device id"); return PMEM2_E_NOSUPP; } ASSERTeq(src->type, PMEM2_SOURCE_HANDLE); if (id == NULL) { *len = GUID_SIZE * sizeof(*id); return 0; } if (*len < GUID_SIZE * sizeof(*id)) { ERR("id buffer is to small"); return PMEM2_E_BUFFER_TOO_SMALL; } GUID guid; int ret = get_device_guid(src->value.handle, &guid); if (ret) return ret; _snwprintf(id, GUID_SIZE, L"%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX", guid.Data1, guid.Data2, guid.Data3, guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]); return 0; } int pmem2_source_device_idU(const struct pmem2_source *src, char *id, size_t *len) { if (src->type == PMEM2_SOURCE_ANON) { ERR("Anonymous source does not have device id"); return PMEM2_E_NOSUPP; } ASSERTeq(src->type, PMEM2_SOURCE_HANDLE); if (id == NULL) { *len = GUID_SIZE * sizeof(*id); return 0; } if (*len < GUID_SIZE * sizeof(*id)) { ERR("id buffer is to small"); return PMEM2_E_BUFFER_TOO_SMALL; } GUID guid; int ret = get_device_guid(src->value.handle, &guid); if (ret) return ret; if (util_snprintf(id, GUID_SIZE, "%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX", guid.Data1, guid.Data2, guid.Data3, guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]) < 0) { ERR("!snprintf"); return PMEM2_E_ERRNO; } return 0; } int pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc) { LOG(3, "cfg %p, usc %p", src, usc); if (src->type == PMEM2_SOURCE_ANON) { ERR("Anonymous source does not support unsafe shutdown count"); return PMEM2_E_NOSUPP; } ASSERTeq(src->type, PMEM2_SOURCE_HANDLE); *usc = 0; HANDLE vHandle; int err = get_volume_handle(src->value.handle, &vHandle); if (vHandle == INVALID_HANDLE_VALUE) return err; STORAGE_PROPERTY_QUERY prop; DWORD dwSize; prop.PropertyId = StorageDeviceUnsafeShutdownCount; prop.QueryType = PropertyExistsQuery; prop.AdditionalParameters[0] = 0; STORAGE_DEVICE_UNSAFE_SHUTDOWN_COUNT ret; BOOL bResult = DeviceIoControl(vHandle, IOCTL_STORAGE_QUERY_PROPERTY, &prop, sizeof(prop), &ret, sizeof(ret), (LPDWORD)&dwSize, (LPOVERLAPPED)NULL); if (!bResult) { ERR( "Getting unsafe shutdown count is not supported on this system"); CloseHandle(vHandle); return PMEM2_E_NOSUPP; } prop.QueryType = PropertyStandardQuery; bResult = DeviceIoControl(vHandle, IOCTL_STORAGE_QUERY_PROPERTY, &prop, sizeof(prop), &ret, sizeof(ret), (LPDWORD)&dwSize, (LPOVERLAPPED)NULL); CloseHandle(vHandle); if (!bResult) { ERR("!!DeviceIoControl"); return pmem2_lasterror_to_err(); } *usc = ret.UnsafeShutdownCount; return 0; }
5,261
22.283186
93
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/ravl_interval.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ravl_interval.c -- ravl_interval implementation */ #include "alloc.h" #include "map.h" #include "ravl_interval.h" #include "pmem2_utils.h" #include "sys_util.h" #include "os_thread.h" #include "ravl.h" /* * ravl_interval - structure representing two points * on the number line */ struct ravl_interval { struct ravl *tree; ravl_interval_min *get_min; ravl_interval_max *get_max; }; /* * ravl_interval_node - structure holding min, max functions and address */ struct ravl_interval_node { void *addr; ravl_interval_min *get_min; ravl_interval_max *get_max; }; /* * ravl_interval_compare -- compare intervals by its boundaries, * no overlapping allowed */ static int ravl_interval_compare(const void *lhs, const void *rhs) { const struct ravl_interval_node *left = lhs; const struct ravl_interval_node *right = rhs; if (left->get_min(left->addr) < right->get_min(right->addr) && left->get_max(left->addr) <= right->get_min(right->addr)) return -1; if (left->get_min(left->addr) > right->get_min(right->addr) && left->get_max(left->addr) >= right->get_min(right->addr)) return 1; return 0; } /* * ravl_interval_delete - finalize the ravl interval module */ void ravl_interval_delete(struct ravl_interval *ri) { ravl_delete(ri->tree); ri->tree = NULL; Free(ri); } /* * ravl_interval_new -- initialize the ravl interval module */ struct ravl_interval * ravl_interval_new(ravl_interval_min *get_min, ravl_interval_max *get_max) { int ret; struct ravl_interval *interval = pmem2_malloc(sizeof(*interval), &ret); if (ret) goto ret_null; interval->tree = ravl_new_sized(ravl_interval_compare, sizeof(struct ravl_interval_node)); if (!(interval->tree)) goto free_alloc; interval->get_min = get_min; interval->get_max = get_max; return interval; free_alloc: Free(interval); ret_null: return NULL; } /* * ravl_interval_insert -- insert interval entry into the tree */ int ravl_interval_insert(struct ravl_interval *ri, void *addr) { struct ravl_interval_node rin; rin.addr = addr; rin.get_min = ri->get_min; rin.get_max = ri->get_max; if (ravl_emplace_copy(ri->tree, &rin)) return PMEM2_E_ERRNO; return 0; } /* * ravl_interval_remove -- remove interval entry from the tree */ int ravl_interval_remove(struct ravl_interval *ri, struct ravl_interval_node *rin) { struct ravl_node *node = ravl_find(ri->tree, rin, RAVL_PREDICATE_EQUAL); if (!node) return PMEM2_E_MAPPING_NOT_FOUND; ravl_remove(ri->tree, node); return 0; } /* * ravl_interval_find_prior_or_eq -- find overlapping interval starting prior to * the current one or at the same place */ static struct ravl_interval_node * ravl_interval_find_prior_or_eq(struct ravl *tree, struct ravl_interval_node *rin) { struct ravl_node *node; struct ravl_interval_node *cur; node = ravl_find(tree, rin, RAVL_PREDICATE_LESS_EQUAL); if (!node) return NULL; cur = ravl_data(node); /* * If the end of the found interval is below the searched boundary, then * this is not our interval. */ if (cur->get_max(cur->addr) <= rin->get_min(rin->addr)) return NULL; return cur; } /* * ravl_interval_find_later -- find overlapping interval starting later than * the current one */ static struct ravl_interval_node * ravl_interval_find_later(struct ravl *tree, struct ravl_interval_node *rin) { struct ravl_node *node; struct ravl_interval_node *cur; node = ravl_find(tree, rin, RAVL_PREDICATE_GREATER); if (!node) return NULL; cur = ravl_data(node); /* * If the beginning of the found interval is above the end of * the searched range, then this is not our interval. */ if (cur->get_min(cur->addr) >= rin->get_max(rin->addr)) return NULL; return cur; } /* * ravl_interval_find_equal -- find the interval with exact (min, max) range */ struct ravl_interval_node * ravl_interval_find_equal(struct ravl_interval *ri, void *addr) { struct ravl_interval_node range; range.addr = addr; range.get_min = ri->get_min; range.get_max = ri->get_max; struct ravl_node *node; node = ravl_find(ri->tree, &range, RAVL_PREDICATE_EQUAL); if (!node) return NULL; return ravl_data(node); } /* * ravl_interval_find -- find the earliest interval within (min, max) range */ struct ravl_interval_node * ravl_interval_find(struct ravl_interval *ri, void *addr) { struct ravl_interval_node range; range.addr = addr; range.get_min = ri->get_min; range.get_max = ri->get_max; struct ravl_interval_node *cur; cur = ravl_interval_find_prior_or_eq(ri->tree, &range); if (!cur) cur = ravl_interval_find_later(ri->tree, &range); return cur; } /* * ravl_interval_data -- returns the data contained within interval node */ void * ravl_interval_data(struct ravl_interval_node *rin) { return (void *)rin->addr; }
4,963
21.26009
80
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/map_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * map_windows.c -- pmem2_map (Windows) */ #include <stdbool.h> #include "libpmem2.h" #include "alloc.h" #include "auto_flush.h" #include "config.h" #include "map.h" #include "out.h" #include "persist.h" #include "pmem2_utils.h" #include "source.h" #include "util.h" #define HIDWORD(x) ((DWORD)((x) >> 32)) #define LODWORD(x) ((DWORD)((x) & 0xFFFFFFFF)) /* requested CACHE_LINE, available PAGE */ #define REQ_CL_AVAIL_PG \ "requested granularity not available because specified volume is not a direct access (DAX) volume" /* requested BYTE, available PAGE */ #define REQ_BY_AVAIL_PG REQ_CL_AVAIL_PG /* requested BYTE, available CACHE_LINE */ #define REQ_BY_AVAIL_CL \ "requested granularity not available because the platform doesn't support eADR" /* indicates the cases in which the error cannot occur */ #define GRAN_IMPOSSIBLE "impossible" static const char *granularity_err_msg[3][3] = { /* requested granularity / available granularity */ /* -------------------------------------------------------------------- */ /* BYTE CACHE_LINE PAGE */ /* -------------------------------------------------------------------- */ /* BYTE */ {GRAN_IMPOSSIBLE, REQ_BY_AVAIL_CL, REQ_BY_AVAIL_PG}, /* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG}, /* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}}; /* * create_mapping -- creates file mapping object for a file */ static HANDLE create_mapping(HANDLE hfile, size_t offset, size_t length, DWORD protect, unsigned long *err) { size_t max_size = length + offset; SetLastError(0); HANDLE mh = CreateFileMapping(hfile, NULL, /* security attributes */ protect, HIDWORD(max_size), LODWORD(max_size), NULL); *err = GetLastError(); if (!mh) { ERR("!!CreateFileMapping"); return NULL; } if (*err == ERROR_ALREADY_EXISTS) { ERR("!!CreateFileMapping"); CloseHandle(mh); return NULL; } /* if the handle is valid the last error is undefined */ *err = 0; return mh; } /* * is_direct_access -- check if the specified volume is a * direct access (DAX) volume */ static int is_direct_access(HANDLE fh) { DWORD filesystemFlags; if (!GetVolumeInformationByHandleW(fh, NULL, 0, NULL, NULL, &filesystemFlags, NULL, 0)) { ERR("!!GetVolumeInformationByHandleW"); /* always return a negative value */ return pmem2_lasterror_to_err(); } if (filesystemFlags & FILE_DAX_VOLUME) return 1; return 0; } /* * pmem2_map -- map memory according to provided config */ int pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src, struct pmem2_map **map_ptr) { LOG(3, "cfg %p src %p map_ptr %p", cfg, src, map_ptr); int ret = 0; unsigned long err = 0; size_t file_size; *map_ptr = NULL; if ((int)cfg->requested_max_granularity == PMEM2_GRANULARITY_INVALID) { ERR( "please define the max granularity requested for the mapping"); return PMEM2_E_GRANULARITY_NOT_SET; } ret = pmem2_source_size(src, &file_size); if (ret) return ret; size_t src_alignment; ret = pmem2_source_alignment(src, &src_alignment); if (ret) return ret; size_t length; ret = pmem2_config_validate_length(cfg, file_size, src_alignment); if (ret) return ret; size_t effective_offset; ret = pmem2_validate_offset(cfg, &effective_offset, src_alignment); if (ret) return ret; if (src->type == PMEM2_SOURCE_ANON) effective_offset = 0; /* without user-provided length, map to the end of the file */ if (cfg->length) length = cfg->length; else length = file_size - effective_offset; HANDLE map_handle = INVALID_HANDLE_VALUE; if (src->type == PMEM2_SOURCE_HANDLE) { map_handle = src->value.handle; } else if (src->type == PMEM2_SOURCE_ANON) { /* no extra settings */ } else { ASSERT(0); } DWORD proto = PAGE_READWRITE; DWORD access = FILE_MAP_ALL_ACCESS; /* Unsupported flag combinations */ if ((cfg->protection_flag == PMEM2_PROT_NONE) || (cfg->protection_flag == PMEM2_PROT_WRITE) || (cfg->protection_flag == PMEM2_PROT_EXEC) || (cfg->protection_flag == (PMEM2_PROT_WRITE | PMEM2_PROT_EXEC))) { ERR("Windows does not support " "this protection flag combination."); return PMEM2_E_NOSUPP; } /* Translate protection flags into Windows flags */ if (cfg->protection_flag & PMEM2_PROT_WRITE) { if (cfg->protection_flag & PMEM2_PROT_EXEC) { proto = PAGE_EXECUTE_READWRITE; access = FILE_MAP_READ | FILE_MAP_WRITE | FILE_MAP_EXECUTE; } else { /* * Due to the already done exclusion * of incorrect combinations, PROT_WRITE * implies PROT_READ */ proto = PAGE_READWRITE; access = FILE_MAP_READ | FILE_MAP_WRITE; } } else if (cfg->protection_flag & PMEM2_PROT_READ) { if (cfg->protection_flag & PMEM2_PROT_EXEC) { proto = PAGE_EXECUTE_READ; access = FILE_MAP_READ | FILE_MAP_EXECUTE; } else { proto = PAGE_READONLY; access = FILE_MAP_READ; } } if (cfg->sharing == PMEM2_PRIVATE) { if (cfg->protection_flag & PMEM2_PROT_EXEC) { proto = PAGE_EXECUTE_WRITECOPY; access = FILE_MAP_EXECUTE | FILE_MAP_COPY; } else { /* * If FILE_MAP_COPY is set, * protection is changed to read/write */ proto = PAGE_READONLY; access = FILE_MAP_COPY; } } /* create a file mapping handle */ HANDLE mh = create_mapping(map_handle, effective_offset, length, proto, &err); if (!mh) { if (err == ERROR_ALREADY_EXISTS) { ERR("mapping already exists"); return PMEM2_E_MAPPING_EXISTS; } else if (err == ERROR_ACCESS_DENIED) { return PMEM2_E_NO_ACCESS; } return pmem2_lasterror_to_err(); } ret = pmem2_config_validate_addr_alignment(cfg, src); if (ret) return ret; /* let's get addr from cfg struct */ LPVOID addr_hint = cfg->addr; /* obtain a pointer to the mapping view */ void *base = MapViewOfFileEx(mh, access, HIDWORD(effective_offset), LODWORD(effective_offset), length, addr_hint); /* hint address */ if (base == NULL) { ERR("!!MapViewOfFileEx"); if (cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) { DWORD ret_windows = GetLastError(); if (ret_windows == ERROR_INVALID_ADDRESS) ret = PMEM2_E_MAPPING_EXISTS; else ret = pmem2_lasterror_to_err(); } else ret = pmem2_lasterror_to_err(); goto err_close_mapping_handle; } if (!CloseHandle(mh)) { ERR("!!CloseHandle"); ret = pmem2_lasterror_to_err(); goto err_unmap_base; } enum pmem2_granularity available_min_granularity = PMEM2_GRANULARITY_PAGE; if (src->type == PMEM2_SOURCE_HANDLE) { int direct_access = is_direct_access(src->value.handle); if (direct_access < 0) { ret = direct_access; goto err_unmap_base; } bool eADR = (pmem2_auto_flush() == 1); available_min_granularity = get_min_granularity(eADR, direct_access, cfg->sharing); } else if (src->type == PMEM2_SOURCE_ANON) { available_min_granularity = PMEM2_GRANULARITY_BYTE; } else { ASSERT(0); } if (available_min_granularity > cfg->requested_max_granularity) { const char *err = granularity_err_msg [cfg->requested_max_granularity] [available_min_granularity]; if (strcmp(err, GRAN_IMPOSSIBLE) == 0) FATAL( "unhandled granularity error: available_min_granularity: %d" \ "requested_max_granularity: %d", available_min_granularity, cfg->requested_max_granularity); ERR("%s", err); ret = PMEM2_E_GRANULARITY_NOT_SUPPORTED; goto err_unmap_base; } /* prepare pmem2_map structure */ struct pmem2_map *map; map = (struct pmem2_map *)pmem2_malloc(sizeof(*map), &ret); if (!map) goto err_unmap_base; map->addr = base; /* * XXX probably in some cases the reserved length > the content length. * Maybe it is worth to do the research. */ map->reserved_length = length; map->content_length = length; map->effective_granularity = available_min_granularity; map->source = *src; pmem2_set_flush_fns(map); pmem2_set_mem_fns(map); ret = pmem2_register_mapping(map); if (ret) goto err_register; /* return a pointer to the pmem2_map structure */ *map_ptr = map; return ret; err_register: free(map); err_unmap_base: UnmapViewOfFile(base); return ret; err_close_mapping_handle: CloseHandle(mh); return ret; } /* * pmem2_unmap -- unmap the specified region */ int pmem2_unmap(struct pmem2_map **map_ptr) { LOG(3, "mapp %p", map_ptr); struct pmem2_map *map = *map_ptr; int ret = pmem2_unregister_mapping(map); if (ret) return ret; if (!UnmapViewOfFile(map->addr)) { ERR("!!UnmapViewOfFile"); return pmem2_lasterror_to_err(); } Free(map); *map_ptr = NULL; return 0; }
8,611
23.123249
99
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/extent_linux.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * extent_linux.c - implementation of the linux fs extent query API */ #include <string.h> #include <fcntl.h> #include <sys/ioctl.h> #include <linux/fs.h> #include <linux/fiemap.h> #include "libpmem2.h" #include "pmem2_utils.h" #include "file.h" #include "out.h" #include "extent.h" #include "alloc.h" /* * pmem2_extents_create_get -- allocate extents structure and get extents * of the given file */ int pmem2_extents_create_get(int fd, struct extents **exts) { LOG(3, "fd %i extents %p", fd, exts); ASSERT(fd > 2); ASSERTne(exts, NULL); enum pmem2_file_type pmem2_type; struct extents *pexts = NULL; struct fiemap *fmap = NULL; os_stat_t st; if (os_fstat(fd, &st) < 0) { ERR("!fstat %d", fd); return PMEM2_E_ERRNO; } int ret = pmem2_get_type_from_stat(&st, &pmem2_type); if (ret) return ret; /* directories do not have any extents */ if (pmem2_type == PMEM2_FTYPE_DIR) { ERR( "checking extents does not make sense in case of directories"); return PMEM2_E_INVALID_FILE_TYPE; } /* allocate extents structure */ pexts = pmem2_zalloc(sizeof(struct extents), &ret); if (ret) return ret; /* save block size */ LOG(10, "fd %i: block size: %li", fd, (long int)st.st_blksize); pexts->blksize = (uint64_t)st.st_blksize; /* DAX device does not have any extents */ if (pmem2_type == PMEM2_FTYPE_DEVDAX) { *exts = pexts; return 0; } ASSERTeq(pmem2_type, PMEM2_FTYPE_REG); fmap = pmem2_zalloc(sizeof(struct fiemap), &ret); if (ret) goto error_free; fmap->fm_start = 0; fmap->fm_length = (size_t)st.st_size; fmap->fm_flags = 0; fmap->fm_extent_count = 0; fmap->fm_mapped_extents = 0; if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) { ERR("!fiemap ioctl() for fd=%d failed", fd); ret = PMEM2_E_ERRNO; goto error_free; } size_t newsize = sizeof(struct fiemap) + fmap->fm_mapped_extents * sizeof(struct fiemap_extent); struct fiemap *newfmap = pmem2_realloc(fmap, newsize, &ret); if (ret) goto error_free; fmap = newfmap; memset(fmap->fm_extents, 0, fmap->fm_mapped_extents * sizeof(struct fiemap_extent)); fmap->fm_extent_count = fmap->fm_mapped_extents; fmap->fm_mapped_extents = 0; if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) { ERR("!fiemap ioctl() for fd=%d failed", fd); ret = PMEM2_E_ERRNO; goto error_free; } LOG(4, "file with fd=%i has %u extents:", fd, fmap->fm_mapped_extents); /* save number of extents */ pexts->extents_count = fmap->fm_mapped_extents; pexts->extents = pmem2_malloc( pexts->extents_count * sizeof(struct extent), &ret); if (ret) goto error_free; /* save extents */ unsigned e; for (e = 0; e < fmap->fm_mapped_extents; e++) { pexts->extents[e].offset_physical = fmap->fm_extents[e].fe_physical; pexts->extents[e].offset_logical = fmap->fm_extents[e].fe_logical; pexts->extents[e].length = fmap->fm_extents[e].fe_length; LOG(10, " #%u: off_phy: %lu off_log: %lu len: %lu", e, pexts->extents[e].offset_physical, pexts->extents[e].offset_logical, pexts->extents[e].length); } *exts = pexts; Free(fmap); return 0; error_free: Free(pexts->extents); Free(pexts); Free(fmap); return ret; } /* * pmem2_extents_destroy -- free extents structure */ void pmem2_extents_destroy(struct extents **exts) { LOG(3, "extents %p", exts); ASSERTne(exts, NULL); if (*exts) { Free((*exts)->extents); Free(*exts); *exts = NULL; } }
3,519
20.333333
73
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/flush.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ #ifndef X86_64_FLUSH_H #define X86_64_FLUSH_H #include <emmintrin.h> #include <stddef.h> #include <stdint.h> #include "util.h" #include "valgrind_internal.h" #define FLUSH_ALIGN ((uintptr_t)64) static force_inline void pmem_clflush(const void *addr) { _mm_clflush(addr); } #ifdef _MSC_VER static force_inline void pmem_clflushopt(const void *addr) { _mm_clflushopt(addr); } static force_inline void pmem_clwb(const void *addr) { _mm_clwb(addr); } #else /* * The x86 memory instructions are new enough that the compiler * intrinsic functions are not always available. The intrinsic * functions are defined here in terms of asm statements for now. */ static force_inline void pmem_clflushopt(const void *addr) { asm volatile(".byte 0x66; clflush %0" : "+m" \ (*(volatile char *)(addr))); } static force_inline void pmem_clwb(const void *addr) { asm volatile(".byte 0x66; xsaveopt %0" : "+m" \ (*(volatile char *)(addr))); } #endif /* _MSC_VER */ typedef void flush_fn(const void *, size_t); /* * flush_clflush_nolog -- flush the CPU cache, using clflush */ static force_inline void flush_clflush_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) _mm_clflush((char *)uptr); } /* * flush_clflushopt_nolog -- flush the CPU cache, using clflushopt */ static force_inline void flush_clflushopt_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { pmem_clflushopt((char *)uptr); } } /* * flush_clwb_nolog -- flush the CPU cache, using clwb */ static force_inline void flush_clwb_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { pmem_clwb((char *)uptr); } } /* * flush64b_empty -- (internal) do not flush the CPU cache */ static force_inline void flush64b_empty(const void *addr) { /* NOP, but tell pmemcheck about it */ VALGRIND_DO_FLUSH(addr, 64); } #endif
2,521
20.193277
66
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/init.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ #include <string.h> #include <xmmintrin.h> #include "auto_flush.h" #include "cpu.h" #include "flush.h" #include "memcpy_memset.h" #include "os.h" #include "out.h" #include "pmem2_arch.h" #include "valgrind_internal.h" #define MOVNT_THRESHOLD 256 size_t Movnt_threshold = MOVNT_THRESHOLD; /* * memory_barrier -- (internal) issue the fence instruction */ static void memory_barrier(void) { LOG(15, NULL); _mm_sfence(); /* ensure CLWB or CLFLUSHOPT completes */ } /* * flush_clflush -- (internal) flush the CPU cache, using clflush */ static void flush_clflush(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_clflush_nolog(addr, len); } /* * flush_clflushopt -- (internal) flush the CPU cache, using clflushopt */ static void flush_clflushopt(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_clflushopt_nolog(addr, len); } /* * flush_clwb -- (internal) flush the CPU cache, using clwb */ static void flush_clwb(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_clwb_nolog(addr, len); } #if SSE2_AVAILABLE || AVX_AVAILABLE || AVX512F_AVAILABLE #define PMEM2_F_MEM_MOVNT (PMEM2_F_MEM_WC | PMEM2_F_MEM_NONTEMPORAL) #define PMEM2_F_MEM_MOV (PMEM2_F_MEM_WB | PMEM2_F_MEM_TEMPORAL) #define MEMCPY_TEMPLATE(isa, flush, perfbarrier) \ static void *\ memmove_nodrain_##isa##_##flush##perfbarrier(void *dest, const void *src, \ size_t len, unsigned flags, flush_func flushf)\ {\ if (len == 0 || src == dest)\ return dest;\ \ if (flags & PMEM2_F_MEM_NOFLUSH) \ memmove_mov_##isa##_noflush(dest, src, len); \ else if (flags & PMEM2_F_MEM_MOVNT)\ memmove_movnt_##isa ##_##flush##perfbarrier(dest, src, len);\ else if (flags & PMEM2_F_MEM_MOV)\ memmove_mov_##isa##_##flush(dest, src, len);\ else if (len < Movnt_threshold)\ memmove_mov_##isa##_##flush(dest, src, len);\ else\ memmove_movnt_##isa##_##flush##perfbarrier(dest, src, len);\ \ return dest;\ } #define MEMCPY_TEMPLATE_EADR(isa, perfbarrier) \ static void *\ memmove_nodrain_##isa##_eadr##perfbarrier(void *dest, const void *src, \ size_t len, unsigned flags, flush_func flushf)\ {\ if (len == 0 || src == dest)\ return dest;\ \ if (flags & PMEM2_F_MEM_NOFLUSH)\ memmove_mov_##isa##_noflush(dest, src, len);\ else if (flags & PMEM2_F_MEM_NONTEMPORAL)\ memmove_movnt_##isa##_empty##perfbarrier(dest, src, len);\ else\ memmove_mov_##isa##_empty(dest, src, len);\ \ return dest;\ } #define MEMSET_TEMPLATE(isa, flush, perfbarrier)\ static void *\ memset_nodrain_##isa##_##flush##perfbarrier(void *dest, int c, size_t len, \ unsigned flags, flush_func flushf)\ {\ if (len == 0)\ return dest;\ \ if (flags & PMEM2_F_MEM_NOFLUSH) \ memset_mov_##isa##_noflush(dest, c, len); \ else if (flags & PMEM2_F_MEM_MOVNT)\ memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\ else if (flags & PMEM2_F_MEM_MOV)\ memset_mov_##isa##_##flush(dest, c, len);\ else if (len < Movnt_threshold)\ memset_mov_##isa##_##flush(dest, c, len);\ else\ memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\ \ return dest;\ } #define MEMSET_TEMPLATE_EADR(isa, perfbarrier) \ static void *\ memset_nodrain_##isa##_eadr##perfbarrier(void *dest, int c, size_t len, \ unsigned flags, flush_func flushf)\ {\ if (len == 0)\ return dest;\ \ if (flags & PMEM2_F_MEM_NOFLUSH)\ memset_mov_##isa##_noflush(dest, c, len);\ else if (flags & PMEM2_F_MEM_NONTEMPORAL)\ memset_movnt_##isa##_empty##perfbarrier(dest, c, len);\ else\ memset_mov_##isa##_empty(dest, c, len);\ \ return dest;\ } #endif #if SSE2_AVAILABLE MEMCPY_TEMPLATE(sse2, clflush, _nobarrier) MEMCPY_TEMPLATE(sse2, clflushopt, _nobarrier) MEMCPY_TEMPLATE(sse2, clwb, _nobarrier) MEMCPY_TEMPLATE_EADR(sse2, _nobarrier) MEMSET_TEMPLATE(sse2, clflush, _nobarrier) MEMSET_TEMPLATE(sse2, clflushopt, _nobarrier) MEMSET_TEMPLATE(sse2, clwb, _nobarrier) MEMSET_TEMPLATE_EADR(sse2, _nobarrier) MEMCPY_TEMPLATE(sse2, clflush, _wcbarrier) MEMCPY_TEMPLATE(sse2, clflushopt, _wcbarrier) MEMCPY_TEMPLATE(sse2, clwb, _wcbarrier) MEMCPY_TEMPLATE_EADR(sse2, _wcbarrier) MEMSET_TEMPLATE(sse2, clflush, _wcbarrier) MEMSET_TEMPLATE(sse2, clflushopt, _wcbarrier) MEMSET_TEMPLATE(sse2, clwb, _wcbarrier) MEMSET_TEMPLATE_EADR(sse2, _wcbarrier) #endif #if AVX_AVAILABLE MEMCPY_TEMPLATE(avx, clflush, _nobarrier) MEMCPY_TEMPLATE(avx, clflushopt, _nobarrier) MEMCPY_TEMPLATE(avx, clwb, _nobarrier) MEMCPY_TEMPLATE_EADR(avx, _nobarrier) MEMSET_TEMPLATE(avx, clflush, _nobarrier) MEMSET_TEMPLATE(avx, clflushopt, _nobarrier) MEMSET_TEMPLATE(avx, clwb, _nobarrier) MEMSET_TEMPLATE_EADR(avx, _nobarrier) MEMCPY_TEMPLATE(avx, clflush, _wcbarrier) MEMCPY_TEMPLATE(avx, clflushopt, _wcbarrier) MEMCPY_TEMPLATE(avx, clwb, _wcbarrier) MEMCPY_TEMPLATE_EADR(avx, _wcbarrier) MEMSET_TEMPLATE(avx, clflush, _wcbarrier) MEMSET_TEMPLATE(avx, clflushopt, _wcbarrier) MEMSET_TEMPLATE(avx, clwb, _wcbarrier) MEMSET_TEMPLATE_EADR(avx, _wcbarrier) #endif #if AVX512F_AVAILABLE MEMCPY_TEMPLATE(avx512f, clflush, /* cstyle wa */) MEMCPY_TEMPLATE(avx512f, clflushopt, /* */) MEMCPY_TEMPLATE(avx512f, clwb, /* */) MEMCPY_TEMPLATE_EADR(avx512f, /* */) MEMSET_TEMPLATE(avx512f, clflush, /* */) MEMSET_TEMPLATE(avx512f, clflushopt, /* */) MEMSET_TEMPLATE(avx512f, clwb, /* */) MEMSET_TEMPLATE_EADR(avx512f, /* */) #endif enum memcpy_impl { MEMCPY_INVALID, MEMCPY_SSE2, MEMCPY_AVX, MEMCPY_AVX512F }; /* * use_sse2_memcpy_memset -- (internal) SSE2 detected, use it if possible */ static void use_sse2_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl, int wc_workaround) { #if SSE2_AVAILABLE *impl = MEMCPY_SSE2; if (wc_workaround) { info->memmove_nodrain_eadr = memmove_nodrain_sse2_eadr_wcbarrier; if (info->flush == flush_clflush) info->memmove_nodrain = memmove_nodrain_sse2_clflush_wcbarrier; else if (info->flush == flush_clflushopt) info->memmove_nodrain = memmove_nodrain_sse2_clflushopt_wcbarrier; else if (info->flush == flush_clwb) info->memmove_nodrain = memmove_nodrain_sse2_clwb_wcbarrier; else ASSERT(0); info->memset_nodrain_eadr = memset_nodrain_sse2_eadr_wcbarrier; if (info->flush == flush_clflush) info->memset_nodrain = memset_nodrain_sse2_clflush_wcbarrier; else if (info->flush == flush_clflushopt) info->memset_nodrain = memset_nodrain_sse2_clflushopt_wcbarrier; else if (info->flush == flush_clwb) info->memset_nodrain = memset_nodrain_sse2_clwb_wcbarrier; else ASSERT(0); } else { info->memmove_nodrain_eadr = memmove_nodrain_sse2_eadr_nobarrier; if (info->flush == flush_clflush) info->memmove_nodrain = memmove_nodrain_sse2_clflush_nobarrier; else if (info->flush == flush_clflushopt) info->memmove_nodrain = memmove_nodrain_sse2_clflushopt_nobarrier; else if (info->flush == flush_clwb) info->memmove_nodrain = memmove_nodrain_sse2_clwb_nobarrier; else ASSERT(0); info->memset_nodrain_eadr = memset_nodrain_sse2_eadr_nobarrier; if (info->flush == flush_clflush) info->memset_nodrain = memset_nodrain_sse2_clflush_nobarrier; else if (info->flush == flush_clflushopt) info->memset_nodrain = memset_nodrain_sse2_clflushopt_nobarrier; else if (info->flush == flush_clwb) info->memset_nodrain = memset_nodrain_sse2_clwb_nobarrier; else ASSERT(0); } #else LOG(3, "sse2 disabled at build time"); #endif } /* * use_avx_memcpy_memset -- (internal) AVX detected, use it if possible */ static void use_avx_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl, int wc_workaround) { #if AVX_AVAILABLE LOG(3, "avx supported"); char *e = os_getenv("PMEM_AVX"); if (e != NULL && strcmp(e, "0") == 0) { LOG(3, "PMEM_AVX set to 0"); return; } LOG(3, "PMEM_AVX enabled"); *impl = MEMCPY_AVX; if (wc_workaround) { info->memmove_nodrain_eadr = memmove_nodrain_avx_eadr_wcbarrier; if (info->flush == flush_clflush) info->memmove_nodrain = memmove_nodrain_avx_clflush_wcbarrier; else if (info->flush == flush_clflushopt) info->memmove_nodrain = memmove_nodrain_avx_clflushopt_wcbarrier; else if (info->flush == flush_clwb) info->memmove_nodrain = memmove_nodrain_avx_clwb_wcbarrier; else ASSERT(0); info->memset_nodrain_eadr = memset_nodrain_avx_eadr_wcbarrier; if (info->flush == flush_clflush) info->memset_nodrain = memset_nodrain_avx_clflush_wcbarrier; else if (info->flush == flush_clflushopt) info->memset_nodrain = memset_nodrain_avx_clflushopt_wcbarrier; else if (info->flush == flush_clwb) info->memset_nodrain = memset_nodrain_avx_clwb_wcbarrier; else ASSERT(0); } else { info->memmove_nodrain_eadr = memmove_nodrain_avx_eadr_nobarrier; if (info->flush == flush_clflush) info->memmove_nodrain = memmove_nodrain_avx_clflush_nobarrier; else if (info->flush == flush_clflushopt) info->memmove_nodrain = memmove_nodrain_avx_clflushopt_nobarrier; else if (info->flush == flush_clwb) info->memmove_nodrain = memmove_nodrain_avx_clwb_nobarrier; else ASSERT(0); info->memset_nodrain_eadr = memset_nodrain_avx_eadr_nobarrier; if (info->flush == flush_clflush) info->memset_nodrain = memset_nodrain_avx_clflush_nobarrier; else if (info->flush == flush_clflushopt) info->memset_nodrain = memset_nodrain_avx_clflushopt_nobarrier; else if (info->flush == flush_clwb) info->memset_nodrain = memset_nodrain_avx_clwb_nobarrier; else ASSERT(0); } #else LOG(3, "avx supported, but disabled at build time"); #endif } /* * use_avx512f_memcpy_memset -- (internal) AVX512F detected, use it if possible */ static void use_avx512f_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl) { #if AVX512F_AVAILABLE LOG(3, "avx512f supported"); char *e = os_getenv("PMEM_AVX512F"); if (e != NULL && strcmp(e, "0") == 0) { LOG(3, "PMEM_AVX512F set to 0"); return; } LOG(3, "PMEM_AVX512F enabled"); *impl = MEMCPY_AVX512F; info->memmove_nodrain_eadr = memmove_nodrain_avx512f_eadr; if (info->flush == flush_clflush) info->memmove_nodrain = memmove_nodrain_avx512f_clflush; else if (info->flush == flush_clflushopt) info->memmove_nodrain = memmove_nodrain_avx512f_clflushopt; else if (info->flush == flush_clwb) info->memmove_nodrain = memmove_nodrain_avx512f_clwb; else ASSERT(0); info->memset_nodrain_eadr = memset_nodrain_avx512f_eadr; if (info->flush == flush_clflush) info->memset_nodrain = memset_nodrain_avx512f_clflush; else if (info->flush == flush_clflushopt) info->memset_nodrain = memset_nodrain_avx512f_clflushopt; else if (info->flush == flush_clwb) info->memset_nodrain = memset_nodrain_avx512f_clwb; else ASSERT(0); #else LOG(3, "avx512f supported, but disabled at build time"); #endif } /* * pmem_get_cpuinfo -- configure libpmem based on CPUID */ static void pmem_cpuinfo_to_funcs(struct pmem2_arch_info *info, enum memcpy_impl *impl) { LOG(3, NULL); if (is_cpu_clflush_present()) { LOG(3, "clflush supported"); info->flush = flush_clflush; info->flush_has_builtin_fence = 1; info->fence = memory_barrier; } if (is_cpu_clflushopt_present()) { LOG(3, "clflushopt supported"); char *e = os_getenv("PMEM_NO_CLFLUSHOPT"); if (e && strcmp(e, "1") == 0) { LOG(3, "PMEM_NO_CLFLUSHOPT forced no clflushopt"); } else { info->flush = flush_clflushopt; info->flush_has_builtin_fence = 0; info->fence = memory_barrier; } } if (is_cpu_clwb_present()) { LOG(3, "clwb supported"); char *e = os_getenv("PMEM_NO_CLWB"); if (e && strcmp(e, "1") == 0) { LOG(3, "PMEM_NO_CLWB forced no clwb"); } else { info->flush = flush_clwb; info->flush_has_builtin_fence = 0; info->fence = memory_barrier; } } /* * XXX Disable this work around for Intel CPUs with optimized * WC eviction. */ int wc_workaround = is_cpu_genuine_intel(); char *ptr = os_getenv("PMEM_WC_WORKAROUND"); if (ptr) { if (strcmp(ptr, "1") == 0) { LOG(3, "WC workaround forced to 1"); wc_workaround = 1; } else if (strcmp(ptr, "0") == 0) { LOG(3, "WC workaround forced to 0"); wc_workaround = 0; } else { LOG(3, "incorrect value of PMEM_WC_WORKAROUND (%s)", ptr); } } LOG(3, "WC workaround = %d", wc_workaround); ptr = os_getenv("PMEM_NO_MOVNT"); if (ptr && strcmp(ptr, "1") == 0) { LOG(3, "PMEM_NO_MOVNT forced no movnt"); } else { use_sse2_memcpy_memset(info, impl, wc_workaround); if (is_cpu_avx_present()) use_avx_memcpy_memset(info, impl, wc_workaround); if (is_cpu_avx512f_present()) use_avx512f_memcpy_memset(info, impl); } } /* * pmem2_arch_init -- initialize architecture-specific list of pmem operations */ void pmem2_arch_init(struct pmem2_arch_info *info) { LOG(3, NULL); enum memcpy_impl impl = MEMCPY_INVALID; pmem_cpuinfo_to_funcs(info, &impl); /* * For testing, allow overriding the default threshold * for using non-temporal stores in pmem_memcpy_*(), pmem_memmove_*() * and pmem_memset_*(). * It has no effect if movnt is not supported or disabled. */ const char *ptr = os_getenv("PMEM_MOVNT_THRESHOLD"); if (ptr) { long long val = atoll(ptr); if (val < 0) { LOG(3, "Invalid PMEM_MOVNT_THRESHOLD"); } else { LOG(3, "PMEM_MOVNT_THRESHOLD set to %zu", (size_t)val); Movnt_threshold = (size_t)val; } } if (info->flush == flush_clwb) LOG(3, "using clwb"); else if (info->flush == flush_clflushopt) LOG(3, "using clflushopt"); else if (info->flush == flush_clflush) LOG(3, "using clflush"); else FATAL("invalid deep flush function address"); if (impl == MEMCPY_AVX512F) LOG(3, "using movnt AVX512F"); else if (impl == MEMCPY_AVX) LOG(3, "using movnt AVX"); else if (impl == MEMCPY_SSE2) LOG(3, "using movnt SSE2"); }
13,899
25.275992
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/avx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ #ifndef PMEM_AVX_H #define PMEM_AVX_H #include <immintrin.h> #include "util.h" /* * avx_zeroupper -- _mm256_zeroupper wrapper * * _mm256_zeroupper clears upper parts of avx registers. * * It's needed for 2 reasons: * - it improves performance of non-avx code after avx * - it works around problem discovered by Valgrind * * In optimized builds gcc inserts VZEROUPPER automatically before * calling non-avx code (or at the end of the function). But in release * builds it doesn't, so if we don't do this by ourselves, then when * someone memcpy'ies uninitialized data, Valgrind complains whenever * someone reads those registers. * * One notable example is loader, which tries to detect whether it * needs to save whole ymm registers by looking at their current * (possibly uninitialized) value. * * Valgrind complains like that: * Conditional jump or move depends on uninitialised value(s) * at 0x4015CC9: _dl_runtime_resolve_avx_slow * (in /lib/x86_64-linux-gnu/ld-2.24.so) * by 0x10B531: test_realloc_api (obj_basic_integration.c:185) * by 0x10F1EE: main (obj_basic_integration.c:594) * * Note: We have to be careful to not read AVX registers after this * intrinsic, because of this stupid gcc bug: * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82735 */ static force_inline void avx_zeroupper(void) { _mm256_zeroupper(); } static force_inline __m128i m256_get16b(__m256i ymm) { return _mm256_extractf128_si256(ymm, 0); } #ifdef _MSC_VER static force_inline uint64_t m256_get8b(__m256i ymm) { return (uint64_t)_mm_extract_epi64(m256_get16b(ymm), 0); } static force_inline uint32_t m256_get4b(__m256i ymm) { return (uint32_t)m256_get8b(ymm); } static force_inline uint16_t m256_get2b(__m256i ymm) { return (uint16_t)m256_get8b(ymm); } #else static force_inline uint64_t m256_get8b(__m256i ymm) { return (uint64_t)_mm256_extract_epi64(ymm, 0); } static force_inline uint32_t m256_get4b(__m256i ymm) { return (uint32_t)_mm256_extract_epi32(ymm, 0); } static force_inline uint16_t m256_get2b(__m256i ymm) { return (uint16_t)_mm256_extract_epi16(ymm, 0); } #endif #endif
2,238
24.735632
72
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memcpy_memset.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ #ifndef MEMCPY_MEMSET_H #define MEMCPY_MEMSET_H #include <stddef.h> #include <xmmintrin.h> #include "pmem2_arch.h" typedef void barrier_fn(void); typedef void flush64b_fn(const void *); static inline void barrier_after_ntstores(void) { /* * In this configuration pmem_drain does not contain sfence, so we have * to serialize non-temporal store instructions. */ _mm_sfence(); } static inline void no_barrier_after_ntstores(void) { /* * In this configuration pmem_drain contains sfence, so we don't have * to serialize non-temporal store instructions */ } static inline void noflush(const void *addr, size_t len) { /* NOP, not even pmemcheck annotation */ } static inline void noflush64b(const void *addr) { /* NOP, not even pmemcheck annotation */ } typedef void perf_barrier_fn(void); static force_inline void wc_barrier(void) { /* * Currently, for SSE2 and AVX code paths, use of non-temporal stores * on all generations of CPUs must be limited to the number of * write-combining buffers (12) because otherwise, suboptimal eviction * policy might impact performance when writing more data than WC * buffers can simultaneously hold. * * The AVX512 code path is not affected, probably because we are * overwriting whole cache lines. */ _mm_sfence(); } static force_inline void no_barrier(void) { } #ifndef AVX512F_AVAILABLE /* * XXX not supported in MSVC version we currently use. * Enable Windows tests pmem2_mem_ext when MSVC we * use will support AVX512F. */ #ifdef _MSC_VER #define AVX512F_AVAILABLE 0 #else #define AVX512F_AVAILABLE 1 #endif #endif #ifndef AVX_AVAILABLE #define AVX_AVAILABLE 1 #endif #ifndef SSE2_AVAILABLE #define SSE2_AVAILABLE 1 #endif #if SSE2_AVAILABLE void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len); void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len); void memmove_mov_sse2_empty(char *dest, const char *src, size_t len); void memmove_mov_sse2_noflush(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflushopt_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clwb_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_empty_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_noflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflush_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflushopt_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clwb_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_empty_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_noflush_wcbarrier(char *dest, const char *src, size_t len); void memset_mov_sse2_clflush(char *dest, int c, size_t len); void memset_mov_sse2_clflushopt(char *dest, int c, size_t len); void memset_mov_sse2_clwb(char *dest, int c, size_t len); void memset_mov_sse2_empty(char *dest, int c, size_t len); void memset_mov_sse2_noflush(char *dest, int c, size_t len); void memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len); #endif #if AVX_AVAILABLE void memmove_mov_avx_clflush(char *dest, const char *src, size_t len); void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_avx_clwb(char *dest, const char *src, size_t len); void memmove_mov_avx_empty(char *dest, const char *src, size_t len); void memmove_mov_avx_noflush(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflushopt_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clwb_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_empty_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_noflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflush_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflushopt_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clwb_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_empty_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_noflush_wcbarrier(char *dest, const char *src, size_t len); void memset_mov_avx_clflush(char *dest, int c, size_t len); void memset_mov_avx_clflushopt(char *dest, int c, size_t len); void memset_mov_avx_clwb(char *dest, int c, size_t len); void memset_mov_avx_empty(char *dest, int c, size_t len); void memset_mov_avx_noflush(char *dest, int c, size_t len); void memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len); #endif #if AVX512F_AVAILABLE void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len); void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len); void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len); void memmove_mov_avx512f_noflush(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_noflush(char *dest, const char *src, size_t len); void memset_mov_avx512f_clflush(char *dest, int c, size_t len); void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len); void memset_mov_avx512f_clwb(char *dest, int c, size_t len); void memset_mov_avx512f_empty(char *dest, int c, size_t len); void memset_mov_avx512f_noflush(char *dest, int c, size_t len); void memset_movnt_avx512f_clflush(char *dest, int c, size_t len); void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len); void memset_movnt_avx512f_clwb(char *dest, int c, size_t len); void memset_movnt_avx512f_empty(char *dest, int c, size_t len); void memset_movnt_avx512f_noflush(char *dest, int c, size_t len); #endif extern size_t Movnt_threshold; /* * SSE2/AVX1 only: * * How much data WC buffers can hold at the same time, after which sfence * is needed to flush them. * * For some reason sfence affects performance of reading from DRAM, so we have * to prefetch the source data earlier. */ #define PERF_BARRIER_SIZE (12 * CACHELINE_SIZE /* 768 */) /* * How much to prefetch initially. * Cannot be bigger than the size of L1 (32kB) - PERF_BARRIER_SIZE. */ #define INI_PREFETCH_SIZE (64 * CACHELINE_SIZE /* 4096 */) static force_inline void prefetch(const char *addr) { _mm_prefetch(addr, _MM_HINT_T0); } static force_inline void prefetch_ini_fw(const char *src, size_t len) { size_t pref = MIN(len, INI_PREFETCH_SIZE); for (size_t i = 0; i < pref; i += CACHELINE_SIZE) prefetch(src + i); } static force_inline void prefetch_ini_bw(const char *src, size_t len) { size_t pref = MIN(len, INI_PREFETCH_SIZE); for (size_t i = 0; i < pref; i += CACHELINE_SIZE) prefetch(src - i); } static force_inline void prefetch_next_fw(const char *src, const char *srcend) { const char *begin = src + INI_PREFETCH_SIZE; const char *end = begin + PERF_BARRIER_SIZE; if (end > srcend) end = srcend; for (const char *addr = begin; addr < end; addr += CACHELINE_SIZE) prefetch(addr); } static force_inline void prefetch_next_bw(const char *src, const char *srcbegin) { const char *begin = src - INI_PREFETCH_SIZE; const char *end = begin - PERF_BARRIER_SIZE; if (end < srcbegin) end = srcbegin; for (const char *addr = begin; addr >= end; addr -= CACHELINE_SIZE) prefetch(addr); } #endif
9,351
33.131387
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memset/memset_nt_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_sse2.h" #include "out.h" #include "valgrind_internal.h" static force_inline void mm_stream_si128(char *dest, unsigned idx, __m128i src) { _mm_stream_si128((__m128i *)dest + idx, src); barrier(); } static force_inline void memset_movnt4x64b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); mm_stream_si128(dest, 2, xmm); mm_stream_si128(dest, 3, xmm); mm_stream_si128(dest, 4, xmm); mm_stream_si128(dest, 5, xmm); mm_stream_si128(dest, 6, xmm); mm_stream_si128(dest, 7, xmm); mm_stream_si128(dest, 8, xmm); mm_stream_si128(dest, 9, xmm); mm_stream_si128(dest, 10, xmm); mm_stream_si128(dest, 11, xmm); mm_stream_si128(dest, 12, xmm); mm_stream_si128(dest, 13, xmm); mm_stream_si128(dest, 14, xmm); mm_stream_si128(dest, 15, xmm); } static force_inline void memset_movnt2x64b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); mm_stream_si128(dest, 2, xmm); mm_stream_si128(dest, 3, xmm); mm_stream_si128(dest, 4, xmm); mm_stream_si128(dest, 5, xmm); mm_stream_si128(dest, 6, xmm); mm_stream_si128(dest, 7, xmm); } static force_inline void memset_movnt1x64b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); mm_stream_si128(dest, 2, xmm); mm_stream_si128(dest, 3, xmm); } static force_inline void memset_movnt1x32b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); } static force_inline void memset_movnt1x16b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest, xmm); } static force_inline void memset_movnt1x8b(char *dest, __m128i xmm) { uint64_t x = (uint64_t)_mm_cvtsi128_si64(xmm); _mm_stream_si64((long long *)dest, (long long)x); } static force_inline void memset_movnt1x4b(char *dest, __m128i xmm) { uint32_t x = (uint32_t)_mm_cvtsi128_si32(xmm); _mm_stream_si32((int *)dest, (int)x); } static force_inline void memset_movnt_sse2(char *dest, int c, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { char *orig_dest = dest; size_t orig_len = len; __m128i xmm = _mm_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_sse2(dest, xmm, cnt, flush); dest += cnt; len -= cnt; } while (len >= PERF_BARRIER_SIZE) { memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64); if (len) perf_barrier(); } while (len >= 4 * 64) { memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, xmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, xmm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, xmm); else if (len == 16) memset_movnt1x16b(dest, xmm); else if (len == 8) memset_movnt1x8b(dest, xmm); else if (len == 4) memset_movnt1x4b(dest, xmm); else goto nonnt; goto end; } nonnt: memset_small_sse2(dest, xmm, len, flush); end: barrier(); VALGRIND_DO_FLUSH(orig_dest, orig_len); } /* variants without perf_barrier */ void memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
5,912
20.580292
71
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memset/memset_nt_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx.h" #include "out.h" #include "valgrind_internal.h" static force_inline void mm256_stream_si256(char *dest, unsigned idx, __m256i src) { _mm256_stream_si256((__m256i *)dest + idx, src); barrier(); } static force_inline void memset_movnt8x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); mm256_stream_si256(dest, 2, ymm); mm256_stream_si256(dest, 3, ymm); mm256_stream_si256(dest, 4, ymm); mm256_stream_si256(dest, 5, ymm); mm256_stream_si256(dest, 6, ymm); mm256_stream_si256(dest, 7, ymm); mm256_stream_si256(dest, 8, ymm); mm256_stream_si256(dest, 9, ymm); mm256_stream_si256(dest, 10, ymm); mm256_stream_si256(dest, 11, ymm); mm256_stream_si256(dest, 12, ymm); mm256_stream_si256(dest, 13, ymm); mm256_stream_si256(dest, 14, ymm); mm256_stream_si256(dest, 15, ymm); } static force_inline void memset_movnt4x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); mm256_stream_si256(dest, 2, ymm); mm256_stream_si256(dest, 3, ymm); mm256_stream_si256(dest, 4, ymm); mm256_stream_si256(dest, 5, ymm); mm256_stream_si256(dest, 6, ymm); mm256_stream_si256(dest, 7, ymm); } static force_inline void memset_movnt2x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); mm256_stream_si256(dest, 2, ymm); mm256_stream_si256(dest, 3, ymm); } static force_inline void memset_movnt1x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); } static force_inline void memset_movnt1x32b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); } static force_inline void memset_movnt1x16b(char *dest, __m256i ymm) { __m128i xmm0 = m256_get16b(ymm); _mm_stream_si128((__m128i *)dest, xmm0); } static force_inline void memset_movnt1x8b(char *dest, __m256i ymm) { uint64_t x = m256_get8b(ymm); _mm_stream_si64((long long *)dest, (long long)x); } static force_inline void memset_movnt1x4b(char *dest, __m256i ymm) { uint32_t x = m256_get4b(ymm); _mm_stream_si32((int *)dest, (int)x); } static force_inline void memset_movnt_avx(char *dest, int c, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { char *orig_dest = dest; size_t orig_len = len; __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= PERF_BARRIER_SIZE) { memset_movnt8x64b(dest, ymm); dest += 8 * 64; len -= 8 * 64; memset_movnt4x64b(dest, ymm); dest += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64); if (len) perf_barrier(); } if (len >= 8 * 64) { memset_movnt8x64b(dest, ymm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_movnt4x64b(dest, ymm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, ymm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, ymm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, ymm); else if (len == 16) memset_movnt1x16b(dest, ymm); else if (len == 8) memset_movnt1x8b(dest, ymm); else if (len == 4) memset_movnt1x4b(dest, ymm); else goto nonnt; goto end; } nonnt: memset_small_avx(dest, ymm, len, flush); end: avx_zeroupper(); barrier(); VALGRIND_DO_FLUSH(orig_dest, orig_len); } /* variants without perf_barrier */ void memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores, no_barrier); } void memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
6,151
20.43554
71
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memset/memset_t_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx512f.h" static force_inline void mm512_store_si512(char *dest, unsigned idx, __m512i src) { _mm512_store_si512((__m512i *)dest + idx, src); } static force_inline void memset_mov32x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); mm512_store_si512(dest, 4, zmm); mm512_store_si512(dest, 5, zmm); mm512_store_si512(dest, 6, zmm); mm512_store_si512(dest, 7, zmm); mm512_store_si512(dest, 8, zmm); mm512_store_si512(dest, 9, zmm); mm512_store_si512(dest, 10, zmm); mm512_store_si512(dest, 11, zmm); mm512_store_si512(dest, 12, zmm); mm512_store_si512(dest, 13, zmm); mm512_store_si512(dest, 14, zmm); mm512_store_si512(dest, 15, zmm); mm512_store_si512(dest, 16, zmm); mm512_store_si512(dest, 17, zmm); mm512_store_si512(dest, 18, zmm); mm512_store_si512(dest, 19, zmm); mm512_store_si512(dest, 20, zmm); mm512_store_si512(dest, 21, zmm); mm512_store_si512(dest, 22, zmm); mm512_store_si512(dest, 23, zmm); mm512_store_si512(dest, 24, zmm); mm512_store_si512(dest, 25, zmm); mm512_store_si512(dest, 26, zmm); mm512_store_si512(dest, 27, zmm); mm512_store_si512(dest, 28, zmm); mm512_store_si512(dest, 29, zmm); mm512_store_si512(dest, 30, zmm); mm512_store_si512(dest, 31, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); flush64b(dest + 16 * 64); flush64b(dest + 17 * 64); flush64b(dest + 18 * 64); flush64b(dest + 19 * 64); flush64b(dest + 20 * 64); flush64b(dest + 21 * 64); flush64b(dest + 22 * 64); flush64b(dest + 23 * 64); flush64b(dest + 24 * 64); flush64b(dest + 25 * 64); flush64b(dest + 26 * 64); flush64b(dest + 27 * 64); flush64b(dest + 28 * 64); flush64b(dest + 29 * 64); flush64b(dest + 30 * 64); flush64b(dest + 31 * 64); } static force_inline void memset_mov16x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); mm512_store_si512(dest, 4, zmm); mm512_store_si512(dest, 5, zmm); mm512_store_si512(dest, 6, zmm); mm512_store_si512(dest, 7, zmm); mm512_store_si512(dest, 8, zmm); mm512_store_si512(dest, 9, zmm); mm512_store_si512(dest, 10, zmm); mm512_store_si512(dest, 11, zmm); mm512_store_si512(dest, 12, zmm); mm512_store_si512(dest, 13, zmm); mm512_store_si512(dest, 14, zmm); mm512_store_si512(dest, 15, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); } static force_inline void memset_mov8x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); mm512_store_si512(dest, 4, zmm); mm512_store_si512(dest, 5, zmm); mm512_store_si512(dest, 6, zmm); mm512_store_si512(dest, 7, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memset_mov4x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); flush64b(dest + 0 * 64); } static force_inline void memset_mov_avx512f(char *dest, int c, size_t len, flush_fn flush, flush64b_fn flush64b) { __m512i zmm = _mm512_set1_epi8((char)c); /* See comment in memset_movnt_avx512f */ __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx512f(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 32 * 64) { memset_mov32x64b(dest, zmm, flush64b); dest += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memset_mov16x64b(dest, zmm, flush64b); dest += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memset_mov8x64b(dest, zmm, flush64b); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_mov4x64b(dest, zmm, flush64b); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, zmm, flush64b); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, zmm, flush64b); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_avx512f(dest, ymm, len, flush); avx_zeroupper(); } void memset_mov_avx512f_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, noflush, noflush64b); } void memset_mov_avx512f_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_empty_nolog, flush64b_empty); } void memset_mov_avx512f_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_clflush_nolog, pmem_clflush); } void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_clflushopt_nolog, pmem_clflushopt); } void memset_mov_avx512f_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_clwb_nolog, pmem_clwb); }
6,851
22.958042
69
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memset/memset_nt_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx512f.h" #include "out.h" #include "util.h" #include "valgrind_internal.h" static force_inline void mm512_stream_si512(char *dest, unsigned idx, __m512i src) { _mm512_stream_si512((__m512i *)dest + idx, src); barrier(); } static force_inline void memset_movnt32x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); mm512_stream_si512(dest, 4, zmm); mm512_stream_si512(dest, 5, zmm); mm512_stream_si512(dest, 6, zmm); mm512_stream_si512(dest, 7, zmm); mm512_stream_si512(dest, 8, zmm); mm512_stream_si512(dest, 9, zmm); mm512_stream_si512(dest, 10, zmm); mm512_stream_si512(dest, 11, zmm); mm512_stream_si512(dest, 12, zmm); mm512_stream_si512(dest, 13, zmm); mm512_stream_si512(dest, 14, zmm); mm512_stream_si512(dest, 15, zmm); mm512_stream_si512(dest, 16, zmm); mm512_stream_si512(dest, 17, zmm); mm512_stream_si512(dest, 18, zmm); mm512_stream_si512(dest, 19, zmm); mm512_stream_si512(dest, 20, zmm); mm512_stream_si512(dest, 21, zmm); mm512_stream_si512(dest, 22, zmm); mm512_stream_si512(dest, 23, zmm); mm512_stream_si512(dest, 24, zmm); mm512_stream_si512(dest, 25, zmm); mm512_stream_si512(dest, 26, zmm); mm512_stream_si512(dest, 27, zmm); mm512_stream_si512(dest, 28, zmm); mm512_stream_si512(dest, 29, zmm); mm512_stream_si512(dest, 30, zmm); mm512_stream_si512(dest, 31, zmm); } static force_inline void memset_movnt16x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); mm512_stream_si512(dest, 4, zmm); mm512_stream_si512(dest, 5, zmm); mm512_stream_si512(dest, 6, zmm); mm512_stream_si512(dest, 7, zmm); mm512_stream_si512(dest, 8, zmm); mm512_stream_si512(dest, 9, zmm); mm512_stream_si512(dest, 10, zmm); mm512_stream_si512(dest, 11, zmm); mm512_stream_si512(dest, 12, zmm); mm512_stream_si512(dest, 13, zmm); mm512_stream_si512(dest, 14, zmm); mm512_stream_si512(dest, 15, zmm); } static force_inline void memset_movnt8x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); mm512_stream_si512(dest, 4, zmm); mm512_stream_si512(dest, 5, zmm); mm512_stream_si512(dest, 6, zmm); mm512_stream_si512(dest, 7, zmm); } static force_inline void memset_movnt4x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); } static force_inline void memset_movnt2x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); } static force_inline void memset_movnt1x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); } static force_inline void memset_movnt1x32b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest, ymm); } static force_inline void memset_movnt1x16b(char *dest, __m256i ymm) { __m128i xmm = _mm256_extracti128_si256(ymm, 0); _mm_stream_si128((__m128i *)dest, xmm); } static force_inline void memset_movnt1x8b(char *dest, __m256i ymm) { uint64_t x = m256_get8b(ymm); _mm_stream_si64((long long *)dest, (long long)x); } static force_inline void memset_movnt1x4b(char *dest, __m256i ymm) { uint32_t x = m256_get4b(ymm); _mm_stream_si32((int *)dest, (int)x); } static force_inline void memset_movnt_avx512f(char *dest, int c, size_t len, flush_fn flush, barrier_fn barrier) { char *orig_dest = dest; size_t orig_len = len; __m512i zmm = _mm512_set1_epi8((char)c); /* * Can't use _mm512_extracti64x4_epi64, because some versions of gcc * crash. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82887 */ __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx512f(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 32 * 64) { memset_movnt32x64b(dest, zmm); dest += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memset_movnt16x64b(dest, zmm); dest += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memset_movnt8x64b(dest, zmm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_movnt4x64b(dest, zmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, zmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, zmm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, ymm); else if (len == 16) memset_movnt1x16b(dest, ymm); else if (len == 8) memset_movnt1x8b(dest, ymm); else if (len == 4) memset_movnt1x4b(dest, ymm); else goto nonnt; goto end; } nonnt: memset_small_avx512f(dest, ymm, len, flush); end: avx_zeroupper(); barrier(); VALGRIND_DO_FLUSH(orig_dest, orig_len); } void memset_movnt_avx512f_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, noflush, barrier_after_ntstores); } void memset_movnt_avx512f_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_empty_nolog, barrier_after_ntstores); } void memset_movnt_avx512f_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_clflush_nolog, barrier_after_ntstores); } void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores); } void memset_movnt_avx512f_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores); }
6,397
21.607774
71
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memset/memset_t_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_sse2.h" static force_inline void mm_store_si128(char *dest, unsigned idx, __m128i src) { _mm_store_si128((__m128i *)dest + idx, src); } static force_inline void memset_mov4x64b(char *dest, __m128i xmm, flush64b_fn flush64b) { mm_store_si128(dest, 0, xmm); mm_store_si128(dest, 1, xmm); mm_store_si128(dest, 2, xmm); mm_store_si128(dest, 3, xmm); mm_store_si128(dest, 4, xmm); mm_store_si128(dest, 5, xmm); mm_store_si128(dest, 6, xmm); mm_store_si128(dest, 7, xmm); mm_store_si128(dest, 8, xmm); mm_store_si128(dest, 9, xmm); mm_store_si128(dest, 10, xmm); mm_store_si128(dest, 11, xmm); mm_store_si128(dest, 12, xmm); mm_store_si128(dest, 13, xmm); mm_store_si128(dest, 14, xmm); mm_store_si128(dest, 15, xmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m128i xmm, flush64b_fn flush64b) { mm_store_si128(dest, 0, xmm); mm_store_si128(dest, 1, xmm); mm_store_si128(dest, 2, xmm); mm_store_si128(dest, 3, xmm); mm_store_si128(dest, 4, xmm); mm_store_si128(dest, 5, xmm); mm_store_si128(dest, 6, xmm); mm_store_si128(dest, 7, xmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m128i xmm, flush64b_fn flush64b) { mm_store_si128(dest, 0, xmm); mm_store_si128(dest, 1, xmm); mm_store_si128(dest, 2, xmm); mm_store_si128(dest, 3, xmm); flush64b(dest + 0 * 64); } static force_inline void memset_mov_sse2(char *dest, int c, size_t len, flush_fn flush, flush64b_fn flush64b) { __m128i xmm = _mm_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_sse2(dest, xmm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 4 * 64) { memset_mov4x64b(dest, xmm, flush64b); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, xmm, flush64b); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, xmm, flush64b); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_sse2(dest, xmm, len, flush); } void memset_mov_sse2_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, noflush, noflush64b); } void memset_mov_sse2_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_empty_nolog, flush64b_empty); } void memset_mov_sse2_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_clflush_nolog, pmem_clflush); } void memset_mov_sse2_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_clflushopt_nolog, pmem_clflushopt); } void memset_mov_sse2_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_clwb_nolog, pmem_clwb); }
3,304
20.461039
66
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memset/memset_sse2.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #ifndef PMEM2_MEMSET_SSE2_H #define PMEM2_MEMSET_SSE2_H #include <xmmintrin.h> #include <stddef.h> #include <stdint.h> #include <string.h> #include "out.h" static force_inline void memset_small_sse2_noflush(char *dest, __m128i xmm, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; if (len > 48) { /* 49..64 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + 16), xmm); _mm_storeu_si128((__m128i *)(dest + 32), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 33..48 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + 16), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; le32: if (len > 16) { /* 17..32 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 9..16 */ uint64_t d8 = (uint64_t)_mm_cvtsi128_si64(xmm); *(ua_uint64_t *)dest = d8; *(ua_uint64_t *)(dest + len - 8) = d8; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d4 = (uint32_t)_mm_cvtsi128_si32(xmm); *(ua_uint32_t *)dest = d4; *(ua_uint32_t *)(dest + len - 4) = d4; return; } /* 3..4 */ uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm); *(ua_uint16_t *)dest = d2; *(ua_uint16_t *)(dest + len - 2) = d2; return; le2: if (len == 2) { uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm); *(ua_uint16_t *)dest = d2; return; } *(uint8_t *)dest = (uint8_t)_mm_cvtsi128_si32(xmm); } static force_inline void memset_small_sse2(char *dest, __m128i xmm, size_t len, flush_fn flush) { /* * pmemcheck complains about "overwritten stores before they were made * persistent" for overlapping stores (last instruction in each code * path) in the optimized version. * libc's memset also does that, so we can't use it here. */ if (On_pmemcheck) { memset_nodrain_generic(dest, (uint8_t)_mm_cvtsi128_si32(xmm), len, PMEM2_F_MEM_NOFLUSH, NULL); } else { memset_small_sse2_noflush(dest, xmm, len); } flush(dest, len); } #endif
2,213
20.085714
71
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memset/memset_t_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx.h" static force_inline void mm256_store_si256(char *dest, unsigned idx, __m256i src) { _mm256_store_si256((__m256i *)dest + idx, src); } static force_inline void memset_mov8x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); mm256_store_si256(dest, 2, ymm); mm256_store_si256(dest, 3, ymm); mm256_store_si256(dest, 4, ymm); mm256_store_si256(dest, 5, ymm); mm256_store_si256(dest, 6, ymm); mm256_store_si256(dest, 7, ymm); mm256_store_si256(dest, 8, ymm); mm256_store_si256(dest, 9, ymm); mm256_store_si256(dest, 10, ymm); mm256_store_si256(dest, 11, ymm); mm256_store_si256(dest, 12, ymm); mm256_store_si256(dest, 13, ymm); mm256_store_si256(dest, 14, ymm); mm256_store_si256(dest, 15, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memset_mov4x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); mm256_store_si256(dest, 2, ymm); mm256_store_si256(dest, 3, ymm); mm256_store_si256(dest, 4, ymm); mm256_store_si256(dest, 5, ymm); mm256_store_si256(dest, 6, ymm); mm256_store_si256(dest, 7, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); mm256_store_si256(dest, 2, ymm); mm256_store_si256(dest, 3, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); flush64b(dest + 0 * 64); } static force_inline void memset_mov_avx(char *dest, int c, size_t len, flush_fn flush, flush64b_fn flush64b) { __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 8 * 64) { memset_mov8x64b(dest, ymm, flush64b); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_mov4x64b(dest, ymm, flush64b); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, ymm, flush64b); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, ymm, flush64b); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_avx(dest, ymm, len, flush); avx_zeroupper(); } void memset_mov_avx_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, noflush, noflush64b); } void memset_mov_avx_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_empty_nolog, flush64b_empty); } void memset_mov_avx_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_clflush_nolog, pmem_clflush); } void memset_mov_avx_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_clflushopt_nolog, pmem_clflushopt); } void memset_mov_avx_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_clwb_nolog, pmem_clwb); }
3,890
20.73743
65
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memcpy/memcpy_t_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_sse2.h" #include "out.h" static force_inline __m128i mm_loadu_si128(const char *src, unsigned idx) { return _mm_loadu_si128((const __m128i *)src + idx); } static force_inline void mm_store_si128(char *dest, unsigned idx, __m128i src) { _mm_store_si128((__m128i *)dest + idx, src); } static force_inline void memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); __m128i xmm8 = mm_loadu_si128(src, 8); __m128i xmm9 = mm_loadu_si128(src, 9); __m128i xmm10 = mm_loadu_si128(src, 10); __m128i xmm11 = mm_loadu_si128(src, 11); __m128i xmm12 = mm_loadu_si128(src, 12); __m128i xmm13 = mm_loadu_si128(src, 13); __m128i xmm14 = mm_loadu_si128(src, 14); __m128i xmm15 = mm_loadu_si128(src, 15); mm_store_si128(dest, 0, xmm0); mm_store_si128(dest, 1, xmm1); mm_store_si128(dest, 2, xmm2); mm_store_si128(dest, 3, xmm3); mm_store_si128(dest, 4, xmm4); mm_store_si128(dest, 5, xmm5); mm_store_si128(dest, 6, xmm6); mm_store_si128(dest, 7, xmm7); mm_store_si128(dest, 8, xmm8); mm_store_si128(dest, 9, xmm9); mm_store_si128(dest, 10, xmm10); mm_store_si128(dest, 11, xmm11); mm_store_si128(dest, 12, xmm12); mm_store_si128(dest, 13, xmm13); mm_store_si128(dest, 14, xmm14); mm_store_si128(dest, 15, xmm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); mm_store_si128(dest, 0, xmm0); mm_store_si128(dest, 1, xmm1); mm_store_si128(dest, 2, xmm2); mm_store_si128(dest, 3, xmm3); mm_store_si128(dest, 4, xmm4); mm_store_si128(dest, 5, xmm5); mm_store_si128(dest, 6, xmm6); mm_store_si128(dest, 7, xmm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); mm_store_si128(dest, 0, xmm0); mm_store_si128(dest, 1, xmm1); mm_store_si128(dest, 2, xmm2); mm_store_si128(dest, 3, xmm3); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_sse_fw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_sse2(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 4 * 64) { memmove_mov4x64b(dest, src, flush64b); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src, flush64b); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src, flush64b); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_sse2(dest, src, len, flush); } static force_inline void memmove_mov_sse_bw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_sse2(dest, src, cnt, flush); } while (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src, flush64b); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src, flush64b); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src, flush64b); } if (len) memmove_small_sse2(dest - len, src - len, len, flush); } static force_inline void memmove_mov_sse2(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_sse_fw(dest, src, len, flush, flush64b); else memmove_mov_sse_bw(dest, src, len, flush, flush64b); } void memmove_mov_sse2_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, noflush, noflush64b); } void memmove_mov_sse2_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_empty_nolog, flush64b_empty); } void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_clflush_nolog, pmem_clflush); } void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_clflushopt_nolog, pmem_clflushopt); } void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_clwb_nolog, pmem_clwb); }
5,820
22.566802
69
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memcpy/memcpy_avx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #ifndef PMEM2_MEMCPY_AVX_H #define PMEM2_MEMCPY_AVX_H #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "out.h" static force_inline void memmove_small_avx_noflush(char *dest, const char *src, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; /* 33..64 */ __m256i ymm0 = _mm256_loadu_si256((__m256i *)src); __m256i ymm1 = _mm256_loadu_si256((__m256i *)(src + len - 32)); _mm256_storeu_si256((__m256i *)dest, ymm0); _mm256_storeu_si256((__m256i *)(dest + len - 32), ymm1); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm1); return; } /* 9..16 */ ua_uint64_t d80 = *(ua_uint64_t *)src; ua_uint64_t d81 = *(ua_uint64_t *)(src + len - 8); *(ua_uint64_t *)dest = d80; *(ua_uint64_t *)(dest + len - 8) = d81; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ ua_uint32_t d40 = *(ua_uint32_t *)src; ua_uint32_t d41 = *(ua_uint32_t *)(src + len - 4); *(ua_uint32_t *)dest = d40; *(ua_uint32_t *)(dest + len - 4) = d41; return; } /* 3..4 */ ua_uint16_t d20 = *(ua_uint16_t *)src; ua_uint16_t d21 = *(ua_uint16_t *)(src + len - 2); *(ua_uint16_t *)dest = d20; *(ua_uint16_t *)(dest + len - 2) = d21; return; le2: if (len == 2) { *(ua_uint16_t *)dest = *(ua_uint16_t *)src; return; } *(uint8_t *)dest = *(uint8_t *)src; } static force_inline void memmove_small_avx(char *dest, const char *src, size_t len, flush_fn flush) { /* * pmemcheck complains about "overwritten stores before they were made * persistent" for overlapping stores (last instruction in each code * path) in the optimized version. * libc's memcpy also does that, so we can't use it here. */ if (On_pmemcheck) { memmove_nodrain_generic(dest, src, len, PMEM2_F_MEM_NOFLUSH, NULL); } else { memmove_small_avx_noflush(dest, src, len); } flush(dest, len); } #endif
2,173
20.524752
74
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memcpy/memcpy_t_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx.h" static force_inline __m256i mm256_loadu_si256(const char *src, unsigned idx) { return _mm256_loadu_si256((const __m256i *)src + idx); } static force_inline void mm256_store_si256(char *dest, unsigned idx, __m256i src) { _mm256_store_si256((__m256i *)dest + idx, src); } static force_inline void memmove_mov8x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); __m256i ymm8 = mm256_loadu_si256(src, 8); __m256i ymm9 = mm256_loadu_si256(src, 9); __m256i ymm10 = mm256_loadu_si256(src, 10); __m256i ymm11 = mm256_loadu_si256(src, 11); __m256i ymm12 = mm256_loadu_si256(src, 12); __m256i ymm13 = mm256_loadu_si256(src, 13); __m256i ymm14 = mm256_loadu_si256(src, 14); __m256i ymm15 = mm256_loadu_si256(src, 15); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); mm256_store_si256(dest, 2, ymm2); mm256_store_si256(dest, 3, ymm3); mm256_store_si256(dest, 4, ymm4); mm256_store_si256(dest, 5, ymm5); mm256_store_si256(dest, 6, ymm6); mm256_store_si256(dest, 7, ymm7); mm256_store_si256(dest, 8, ymm8); mm256_store_si256(dest, 9, ymm9); mm256_store_si256(dest, 10, ymm10); mm256_store_si256(dest, 11, ymm11); mm256_store_si256(dest, 12, ymm12); mm256_store_si256(dest, 13, ymm13); mm256_store_si256(dest, 14, ymm14); mm256_store_si256(dest, 15, ymm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); mm256_store_si256(dest, 2, ymm2); mm256_store_si256(dest, 3, ymm3); mm256_store_si256(dest, 4, ymm4); mm256_store_si256(dest, 5, ymm5); mm256_store_si256(dest, 6, ymm6); mm256_store_si256(dest, 7, ymm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); mm256_store_si256(dest, 2, ymm2); mm256_store_si256(dest, 3, ymm3); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_avx_fw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 8 * 64) { memmove_mov8x64b(dest, src, flush64b); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_mov4x64b(dest, src, flush64b); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src, flush64b); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src, flush64b); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_avx(dest, src, len, flush); } static force_inline void memmove_mov_avx_bw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx(dest, src, cnt, flush); } while (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_mov8x64b(dest, src, flush64b); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src, flush64b); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src, flush64b); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src, flush64b); } if (len) memmove_small_avx(dest - len, src - len, len, flush); } static force_inline void memmove_mov_avx(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_avx_fw(dest, src, len, flush, flush64b); else memmove_mov_avx_bw(dest, src, len, flush, flush64b); avx_zeroupper(); } void memmove_mov_avx_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, noflush, noflush64b); } void memmove_mov_avx_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_empty_nolog, flush64b_empty); } void memmove_mov_avx_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_clflush_nolog, pmem_clflush); } void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_clflushopt_nolog, pmem_clflushopt); } void memmove_mov_avx_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_clwb_nolog, pmem_clwb); }
6,705
22.780142
68
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memcpy/memcpy_t_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx512f.h" static force_inline __m512i mm512_loadu_si512(const char *src, unsigned idx) { return _mm512_loadu_si512((const __m512i *)src + idx); } static force_inline void mm512_store_si512(char *dest, unsigned idx, __m512i src) { _mm512_store_si512((__m512i *)dest + idx, src); } static force_inline void memmove_mov32x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); __m512i zmm16 = mm512_loadu_si512(src, 16); __m512i zmm17 = mm512_loadu_si512(src, 17); __m512i zmm18 = mm512_loadu_si512(src, 18); __m512i zmm19 = mm512_loadu_si512(src, 19); __m512i zmm20 = mm512_loadu_si512(src, 20); __m512i zmm21 = mm512_loadu_si512(src, 21); __m512i zmm22 = mm512_loadu_si512(src, 22); __m512i zmm23 = mm512_loadu_si512(src, 23); __m512i zmm24 = mm512_loadu_si512(src, 24); __m512i zmm25 = mm512_loadu_si512(src, 25); __m512i zmm26 = mm512_loadu_si512(src, 26); __m512i zmm27 = mm512_loadu_si512(src, 27); __m512i zmm28 = mm512_loadu_si512(src, 28); __m512i zmm29 = mm512_loadu_si512(src, 29); __m512i zmm30 = mm512_loadu_si512(src, 30); __m512i zmm31 = mm512_loadu_si512(src, 31); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); mm512_store_si512(dest, 4, zmm4); mm512_store_si512(dest, 5, zmm5); mm512_store_si512(dest, 6, zmm6); mm512_store_si512(dest, 7, zmm7); mm512_store_si512(dest, 8, zmm8); mm512_store_si512(dest, 9, zmm9); mm512_store_si512(dest, 10, zmm10); mm512_store_si512(dest, 11, zmm11); mm512_store_si512(dest, 12, zmm12); mm512_store_si512(dest, 13, zmm13); mm512_store_si512(dest, 14, zmm14); mm512_store_si512(dest, 15, zmm15); mm512_store_si512(dest, 16, zmm16); mm512_store_si512(dest, 17, zmm17); mm512_store_si512(dest, 18, zmm18); mm512_store_si512(dest, 19, zmm19); mm512_store_si512(dest, 20, zmm20); mm512_store_si512(dest, 21, zmm21); mm512_store_si512(dest, 22, zmm22); mm512_store_si512(dest, 23, zmm23); mm512_store_si512(dest, 24, zmm24); mm512_store_si512(dest, 25, zmm25); mm512_store_si512(dest, 26, zmm26); mm512_store_si512(dest, 27, zmm27); mm512_store_si512(dest, 28, zmm28); mm512_store_si512(dest, 29, zmm29); mm512_store_si512(dest, 30, zmm30); mm512_store_si512(dest, 31, zmm31); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); flush64b(dest + 16 * 64); flush64b(dest + 17 * 64); flush64b(dest + 18 * 64); flush64b(dest + 19 * 64); flush64b(dest + 20 * 64); flush64b(dest + 21 * 64); flush64b(dest + 22 * 64); flush64b(dest + 23 * 64); flush64b(dest + 24 * 64); flush64b(dest + 25 * 64); flush64b(dest + 26 * 64); flush64b(dest + 27 * 64); flush64b(dest + 28 * 64); flush64b(dest + 29 * 64); flush64b(dest + 30 * 64); flush64b(dest + 31 * 64); } static force_inline void memmove_mov16x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); mm512_store_si512(dest, 4, zmm4); mm512_store_si512(dest, 5, zmm5); mm512_store_si512(dest, 6, zmm6); mm512_store_si512(dest, 7, zmm7); mm512_store_si512(dest, 8, zmm8); mm512_store_si512(dest, 9, zmm9); mm512_store_si512(dest, 10, zmm10); mm512_store_si512(dest, 11, zmm11); mm512_store_si512(dest, 12, zmm12); mm512_store_si512(dest, 13, zmm13); mm512_store_si512(dest, 14, zmm14); mm512_store_si512(dest, 15, zmm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); } static force_inline void memmove_mov8x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); mm512_store_si512(dest, 4, zmm4); mm512_store_si512(dest, 5, zmm5); mm512_store_si512(dest, 6, zmm6); mm512_store_si512(dest, 7, zmm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); mm512_store_si512(dest, 0, zmm0); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_avx512f_fw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx512f(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 32 * 64) { memmove_mov32x64b(dest, src, flush64b); dest += 32 * 64; src += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memmove_mov16x64b(dest, src, flush64b); dest += 16 * 64; src += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memmove_mov8x64b(dest, src, flush64b); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_mov4x64b(dest, src, flush64b); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src, flush64b); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src, flush64b); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_avx512f(dest, src, len, flush); } static force_inline void memmove_mov_avx512f_bw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx512f(dest, src, cnt, flush); } while (len >= 32 * 64) { dest -= 32 * 64; src -= 32 * 64; len -= 32 * 64; memmove_mov32x64b(dest, src, flush64b); } if (len >= 16 * 64) { dest -= 16 * 64; src -= 16 * 64; len -= 16 * 64; memmove_mov16x64b(dest, src, flush64b); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_mov8x64b(dest, src, flush64b); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src, flush64b); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src, flush64b); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src, flush64b); } if (len) memmove_small_avx512f(dest - len, src - len, len, flush); } static force_inline void memmove_mov_avx512f(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_avx512f_fw(dest, src, len, flush, flush64b); else memmove_mov_avx512f_bw(dest, src, len, flush, flush64b); avx_zeroupper(); } void memmove_mov_avx512f_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, noflush, noflush64b); } void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_empty_nolog, flush64b_empty); } void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_clflush_nolog, pmem_clflush); } void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_clflushopt_nolog, pmem_clflushopt); } void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_clwb_nolog, pmem_clwb); }
11,422
25.020501
72
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memcpy/memcpy_sse2.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #ifndef PMEM2_MEMCPY_SSE2_H #define PMEM2_MEMCPY_SSE2_H #include <xmmintrin.h> #include <stddef.h> #include <stdint.h> #include "out.h" static force_inline void memmove_small_sse2_noflush(char *dest, const char *src, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; if (len > 48) { /* 49..64 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16)); __m128i xmm2 = _mm_loadu_si128((__m128i *)(src + 32)); __m128i xmm3 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + 16), xmm1); _mm_storeu_si128((__m128i *)(dest + 32), xmm2); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm3); return; } /* 33..48 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16)); __m128i xmm2 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + 16), xmm1); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm2); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm1); return; } /* 9..16 */ uint64_t d80 = *(ua_uint64_t *)src; uint64_t d81 = *(ua_uint64_t *)(src + len - 8); *(ua_uint64_t *)dest = d80; *(ua_uint64_t *)(dest + len - 8) = d81; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d40 = *(ua_uint32_t *)src; uint32_t d41 = *(ua_uint32_t *)(src + len - 4); *(ua_uint32_t *)dest = d40; *(ua_uint32_t *)(dest + len - 4) = d41; return; } /* 3..4 */ uint16_t d20 = *(ua_uint16_t *)src; uint16_t d21 = *(ua_uint16_t *)(src + len - 2); *(ua_uint16_t *)dest = d20; *(ua_uint16_t *)(dest + len - 2) = d21; return; le2: if (len == 2) { *(ua_uint16_t *)dest = *(ua_uint16_t *)src; return; } *(uint8_t *)dest = *(uint8_t *)src; } static force_inline void memmove_small_sse2(char *dest, const char *src, size_t len, flush_fn flush) { /* * pmemcheck complains about "overwritten stores before they were made * persistent" for overlapping stores (last instruction in each code * path) in the optimized version. * libc's memcpy also does that, so we can't use it here. */ if (On_pmemcheck) { memmove_nodrain_generic(dest, src, len, PMEM2_F_MEM_NOFLUSH, NULL); } else { memmove_small_sse2_noflush(dest, src, len); } flush(dest, len); } #endif
2,726
22.307692
75
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memcpy/memcpy_nt_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx.h" #include "valgrind_internal.h" static force_inline __m256i mm256_loadu_si256(const char *src, unsigned idx) { return _mm256_loadu_si256((const __m256i *)src + idx); } static force_inline void mm256_stream_si256(char *dest, unsigned idx, __m256i src) { _mm256_stream_si256((__m256i *)dest + idx, src); barrier(); } static force_inline void memmove_movnt8x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); __m256i ymm8 = mm256_loadu_si256(src, 8); __m256i ymm9 = mm256_loadu_si256(src, 9); __m256i ymm10 = mm256_loadu_si256(src, 10); __m256i ymm11 = mm256_loadu_si256(src, 11); __m256i ymm12 = mm256_loadu_si256(src, 12); __m256i ymm13 = mm256_loadu_si256(src, 13); __m256i ymm14 = mm256_loadu_si256(src, 14); __m256i ymm15 = mm256_loadu_si256(src, 15); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); mm256_stream_si256(dest, 2, ymm2); mm256_stream_si256(dest, 3, ymm3); mm256_stream_si256(dest, 4, ymm4); mm256_stream_si256(dest, 5, ymm5); mm256_stream_si256(dest, 6, ymm6); mm256_stream_si256(dest, 7, ymm7); mm256_stream_si256(dest, 8, ymm8); mm256_stream_si256(dest, 9, ymm9); mm256_stream_si256(dest, 10, ymm10); mm256_stream_si256(dest, 11, ymm11); mm256_stream_si256(dest, 12, ymm12); mm256_stream_si256(dest, 13, ymm13); mm256_stream_si256(dest, 14, ymm14); mm256_stream_si256(dest, 15, ymm15); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); mm256_stream_si256(dest, 2, ymm2); mm256_stream_si256(dest, 3, ymm3); mm256_stream_si256(dest, 4, ymm4); mm256_stream_si256(dest, 5, ymm5); mm256_stream_si256(dest, 6, ymm6); mm256_stream_si256(dest, 7, ymm7); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); mm256_stream_si256(dest, 2, ymm2); mm256_stream_si256(dest, 3, ymm3); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src); mm256_stream_si256(dest, 0, ymm0); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, xmm0); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); } static force_inline void memmove_movnt_avx_fw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } const char *srcend = src + len; prefetch_ini_fw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_fw(src, srcend); memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64); if (len) perf_barrier(); } if (len >= 8 * 64) { memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; goto end; } nonnt: memmove_small_avx(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx_bw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx(dest, src, cnt, flush); } const char *srcbegin = src - len; prefetch_ini_bw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_bw(src, srcbegin); dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64); if (len) perf_barrier(); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } goto end; } nonnt: dest -= len; src -= len; memmove_small_avx(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx(char *dest, const char *src, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_avx_fw(dest, src, len, flush, perf_barrier); else memmove_movnt_avx_bw(dest, src, len, flush, perf_barrier); barrier(); VALGRIND_DO_FLUSH(dest, len); } /* variants without perf_barrier */ void memmove_movnt_avx_noflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, noflush, barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_empty_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_clflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_clflushopt_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_clwb_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memmove_movnt_avx_noflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, noflush, barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_empty_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_clflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_clflushopt_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_clwb_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
10,092
21.731982
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memcpy/memcpy_nt_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_sse2.h" #include "valgrind_internal.h" static force_inline __m128i mm_loadu_si128(const char *src, unsigned idx) { return _mm_loadu_si128((const __m128i *)src + idx); } static force_inline void mm_stream_si128(char *dest, unsigned idx, __m128i src) { _mm_stream_si128((__m128i *)dest + idx, src); barrier(); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); __m128i xmm8 = mm_loadu_si128(src, 8); __m128i xmm9 = mm_loadu_si128(src, 9); __m128i xmm10 = mm_loadu_si128(src, 10); __m128i xmm11 = mm_loadu_si128(src, 11); __m128i xmm12 = mm_loadu_si128(src, 12); __m128i xmm13 = mm_loadu_si128(src, 13); __m128i xmm14 = mm_loadu_si128(src, 14); __m128i xmm15 = mm_loadu_si128(src, 15); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); mm_stream_si128(dest, 2, xmm2); mm_stream_si128(dest, 3, xmm3); mm_stream_si128(dest, 4, xmm4); mm_stream_si128(dest, 5, xmm5); mm_stream_si128(dest, 6, xmm6); mm_stream_si128(dest, 7, xmm7); mm_stream_si128(dest, 8, xmm8); mm_stream_si128(dest, 9, xmm9); mm_stream_si128(dest, 10, xmm10); mm_stream_si128(dest, 11, xmm11); mm_stream_si128(dest, 12, xmm12); mm_stream_si128(dest, 13, xmm13); mm_stream_si128(dest, 14, xmm14); mm_stream_si128(dest, 15, xmm15); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); mm_stream_si128(dest, 2, xmm2); mm_stream_si128(dest, 3, xmm3); mm_stream_si128(dest, 4, xmm4); mm_stream_si128(dest, 5, xmm5); mm_stream_si128(dest, 6, xmm6); mm_stream_si128(dest, 7, xmm7); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); mm_stream_si128(dest, 2, xmm2); mm_stream_si128(dest, 3, xmm3); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); mm_stream_si128(dest, 0, xmm0); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); } static force_inline void memmove_movnt_sse_fw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_sse2(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } const char *srcend = src + len; prefetch_ini_fw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_fw(src, srcend); memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64); if (len) perf_barrier(); } while (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) return; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; return; } nonnt: memmove_small_sse2(dest, src, len, flush); } static force_inline void memmove_movnt_sse_bw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_sse2(dest, src, cnt, flush); } const char *srcbegin = src - len; prefetch_ini_bw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_bw(src, srcbegin); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64); if (len) perf_barrier(); } while (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) return; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } return; } nonnt: dest -= len; src -= len; memmove_small_sse2(dest, src, len, flush); } static force_inline void memmove_movnt_sse2(char *dest, const char *src, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_sse_fw(dest, src, len, flush, perf_barrier); else memmove_movnt_sse_bw(dest, src, len, flush, perf_barrier); barrier(); VALGRIND_DO_FLUSH(dest, len); } /* variants without perf_barrier */ void memmove_movnt_sse2_noflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, noflush, barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_empty_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_clflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_clflushopt_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_clwb_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memmove_movnt_sse2_noflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, noflush, barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_empty_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_clflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_clflushopt_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_clwb_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
9,636
21.463869
80
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/x86_64/memcpy/memcpy_nt_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx512f.h" #include "valgrind_internal.h" static force_inline __m512i mm512_loadu_si512(const char *src, unsigned idx) { return _mm512_loadu_si512((const __m512i *)src + idx); } static force_inline void mm512_stream_si512(char *dest, unsigned idx, __m512i src) { _mm512_stream_si512((__m512i *)dest + idx, src); barrier(); } static force_inline void memmove_movnt32x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); __m512i zmm16 = mm512_loadu_si512(src, 16); __m512i zmm17 = mm512_loadu_si512(src, 17); __m512i zmm18 = mm512_loadu_si512(src, 18); __m512i zmm19 = mm512_loadu_si512(src, 19); __m512i zmm20 = mm512_loadu_si512(src, 20); __m512i zmm21 = mm512_loadu_si512(src, 21); __m512i zmm22 = mm512_loadu_si512(src, 22); __m512i zmm23 = mm512_loadu_si512(src, 23); __m512i zmm24 = mm512_loadu_si512(src, 24); __m512i zmm25 = mm512_loadu_si512(src, 25); __m512i zmm26 = mm512_loadu_si512(src, 26); __m512i zmm27 = mm512_loadu_si512(src, 27); __m512i zmm28 = mm512_loadu_si512(src, 28); __m512i zmm29 = mm512_loadu_si512(src, 29); __m512i zmm30 = mm512_loadu_si512(src, 30); __m512i zmm31 = mm512_loadu_si512(src, 31); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); mm512_stream_si512(dest, 4, zmm4); mm512_stream_si512(dest, 5, zmm5); mm512_stream_si512(dest, 6, zmm6); mm512_stream_si512(dest, 7, zmm7); mm512_stream_si512(dest, 8, zmm8); mm512_stream_si512(dest, 9, zmm9); mm512_stream_si512(dest, 10, zmm10); mm512_stream_si512(dest, 11, zmm11); mm512_stream_si512(dest, 12, zmm12); mm512_stream_si512(dest, 13, zmm13); mm512_stream_si512(dest, 14, zmm14); mm512_stream_si512(dest, 15, zmm15); mm512_stream_si512(dest, 16, zmm16); mm512_stream_si512(dest, 17, zmm17); mm512_stream_si512(dest, 18, zmm18); mm512_stream_si512(dest, 19, zmm19); mm512_stream_si512(dest, 20, zmm20); mm512_stream_si512(dest, 21, zmm21); mm512_stream_si512(dest, 22, zmm22); mm512_stream_si512(dest, 23, zmm23); mm512_stream_si512(dest, 24, zmm24); mm512_stream_si512(dest, 25, zmm25); mm512_stream_si512(dest, 26, zmm26); mm512_stream_si512(dest, 27, zmm27); mm512_stream_si512(dest, 28, zmm28); mm512_stream_si512(dest, 29, zmm29); mm512_stream_si512(dest, 30, zmm30); mm512_stream_si512(dest, 31, zmm31); } static force_inline void memmove_movnt16x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); mm512_stream_si512(dest, 4, zmm4); mm512_stream_si512(dest, 5, zmm5); mm512_stream_si512(dest, 6, zmm6); mm512_stream_si512(dest, 7, zmm7); mm512_stream_si512(dest, 8, zmm8); mm512_stream_si512(dest, 9, zmm9); mm512_stream_si512(dest, 10, zmm10); mm512_stream_si512(dest, 11, zmm11); mm512_stream_si512(dest, 12, zmm12); mm512_stream_si512(dest, 13, zmm13); mm512_stream_si512(dest, 14, zmm14); mm512_stream_si512(dest, 15, zmm15); } static force_inline void memmove_movnt8x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); mm512_stream_si512(dest, 4, zmm4); mm512_stream_si512(dest, 5, zmm5); mm512_stream_si512(dest, 6, zmm6); mm512_stream_si512(dest, 7, zmm7); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); mm512_stream_si512(dest, 0, zmm0); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m256i zmm0 = _mm256_loadu_si256((__m256i *)src); _mm256_stream_si256((__m256i *)dest, zmm0); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i ymm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, ymm0); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); } static force_inline void memmove_movnt_avx512f_fw(char *dest, const char *src, size_t len, flush_fn flush) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx512f(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 32 * 64) { memmove_movnt32x64b(dest, src); dest += 32 * 64; src += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memmove_movnt16x64b(dest, src); dest += 16 * 64; src += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; goto end; } nonnt: memmove_small_avx512f(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx512f_bw(char *dest, const char *src, size_t len, flush_fn flush) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx512f(dest, src, cnt, flush); } while (len >= 32 * 64) { dest -= 32 * 64; src -= 32 * 64; len -= 32 * 64; memmove_movnt32x64b(dest, src); } if (len >= 16 * 64) { dest -= 16 * 64; src -= 16 * 64; len -= 16 * 64; memmove_movnt16x64b(dest, src); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } goto end; } nonnt: dest -= len; src -= len; memmove_small_avx512f(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx512f(char *dest, const char *src, size_t len, flush_fn flush, barrier_fn barrier) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_avx512f_fw(dest, src, len, flush); else memmove_movnt_avx512f_bw(dest, src, len, flush); barrier(); VALGRIND_DO_FLUSH(dest, len); } void memmove_movnt_avx512f_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, noflush, barrier_after_ntstores); } void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_empty_nolog, barrier_after_ntstores); } void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_clflush_nolog, barrier_after_ntstores); } void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores); } void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores); }
11,246
23.45
78
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/aarch64/arm_cacheops.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * ARM inline assembly to flush and invalidate caches * clwb => dc cvac * clflushopt => dc civac * fence => dmb ish * sfence => dmb ishst */ /* * Cache instructions on ARM: * ARMv8.0-a DC CVAC - cache clean to Point of Coherency * Meant for thread synchronization, usually implies * real memory flush but may mean less. * ARMv8.2-a DC CVAP - cache clean to Point of Persistency * Meant exactly for our use. * ARMv8.5-a DC CVADP - cache clean to Point of Deep Persistency * As of mid-2019 not on any commercially available CPU. * Any of the above may be disabled for EL0, but it's probably safe to consider * that a system configuration error. * Other flags include I (like "DC CIVAC") that invalidates the cache line, but * we don't want that. * * Memory fences: * * DMB [ISH] MFENCE * * DMB [ISH]ST SFENCE * * DMB [ISH]LD LFENCE * * Memory domains (cache coherency): * * non-shareable - local to a single core * * inner shareable (ISH) - a group of CPU clusters/sockets/other hardware * Linux requires that anything within one operating system/hypervisor * is within the same Inner Shareable domain. * * outer shareable (OSH) - one or more separate ISH domains * * full system (SY) - anything that can possibly access memory * Docs: ARM DDI 0487E.a page B2-144. * * Exception (privilege) levels: * * EL0 - userspace (ring 3) * * EL1 - kernel (ring 0) * * EL2 - hypervisor (ring -1) * * EL3 - "secure world" (ring -3) */ #ifndef AARCH64_CACHEOPS_H #define AARCH64_CACHEOPS_H #include <stdlib.h> static inline void arm_clean_va_to_poc(void const *p __attribute__((unused))) { asm volatile("dc cvac, %0" : : "r" (p) : "memory"); } static inline void arm_store_memory_barrier(void) { asm volatile("dmb ishst" : : : "memory"); } #endif
1,988
30.571429
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/libpmem2/ppc64/init.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, IBM Corporation */ /* Copyright 2019-2020, Intel Corporation */ #include <errno.h> #include <sys/mman.h> #include "out.h" #include "pmem2_arch.h" #include "util.h" /* * Older assemblers versions do not support the latest versions of L, e.g. * Binutils 2.34. * Workaround this by using longs. */ #define __SYNC(l) ".long (0x7c0004AC | ((" #l ") << 21))" #define __DCBF(ra, rb, l) ".long (0x7c0000AC | ((" #l ") << 21)" \ " | ((" #ra ") << 16) | ((" #rb ") << 11))" static void ppc_fence(void) { LOG(15, NULL); /* * Force a memory barrier to flush out all cache lines. * Uses a heavyweight sync in order to guarantee the memory ordering * even with a data cache flush. * According to the POWER ISA 3.1, phwsync (aka. sync (L=4)) is treated * as a hwsync by processors compatible with previous versions of the * POWER ISA. */ asm volatile(__SYNC(4) : : : "memory"); } static void ppc_flush(const void *addr, size_t size) { LOG(15, "addr %p size %zu", addr, size); uintptr_t uptr = (uintptr_t)addr; uintptr_t end = uptr + size; /* round down the address */ uptr &= ~(CACHELINE_SIZE - 1); while (uptr < end) { /* * Flush the data cache block. * According to the POWER ISA 3.1, dcbstps (aka. dcbf (L=6)) * behaves as dcbf (L=0) on previous processors. */ asm volatile(__DCBF(0, %0, 6) : :"r"(uptr) : "memory"); uptr += CACHELINE_SIZE; } } void pmem2_arch_init(struct pmem2_arch_info *info) { LOG(3, "libpmem*: PPC64 support"); info->fence = ppc_fence; info->flush = ppc_flush; }
1,594
22.80597
74
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/windows/getopt/getopt.c
/* * *Copyright (c) 2012, Kim Gräsman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Kim Gräsman nor the * names of contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "getopt.h" #include <stddef.h> #include <string.h> #include <stdio.h> char* optarg; int optopt; /* The variable optind [...] shall be initialized to 1 by the system. */ int optind = 1; int opterr; static char* optcursor = NULL; static char *first = NULL; /* rotates argv array */ static void rotate(char **argv, int argc) { if (argc <= 1) return; char *tmp = argv[0]; memmove(argv, argv + 1, (argc - 1) * sizeof(char *)); argv[argc - 1] = tmp; } /* Implemented based on [1] and [2] for optional arguments. optopt is handled FreeBSD-style, per [3]. Other GNU and FreeBSD extensions are purely accidental. [1] https://pubs.opengroup.org/onlinepubs/000095399/functions/getopt.html [2] https://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html [3] https://www.freebsd.org/cgi/man.cgi?query=getopt&sektion=3&manpath=FreeBSD+9.0-RELEASE */ int getopt(int argc, char* const argv[], const char* optstring) { int optchar = -1; const char* optdecl = NULL; optarg = NULL; opterr = 0; optopt = 0; /* Unspecified, but we need it to avoid overrunning the argv bounds. */ if (optind >= argc) goto no_more_optchars; /* If, when getopt() is called argv[optind] is a null pointer, getopt() shall return -1 without changing optind. */ if (argv[optind] == NULL) goto no_more_optchars; /* If, when getopt() is called *argv[optind] is not the character '-', permute argv to move non options to the end */ if (*argv[optind] != '-') { if (argc - optind <= 1) goto no_more_optchars; if (!first) first = argv[optind]; do { rotate((char **)(argv + optind), argc - optind); } while (*argv[optind] != '-' && argv[optind] != first); if (argv[optind] == first) goto no_more_optchars; } /* If, when getopt() is called argv[optind] points to the string "-", getopt() shall return -1 without changing optind. */ if (strcmp(argv[optind], "-") == 0) goto no_more_optchars; /* If, when getopt() is called argv[optind] points to the string "--", getopt() shall return -1 after incrementing optind. */ if (strcmp(argv[optind], "--") == 0) { ++optind; if (first) { do { rotate((char **)(argv + optind), argc - optind); } while (argv[optind] != first); } goto no_more_optchars; } if (optcursor == NULL || *optcursor == '\0') optcursor = argv[optind] + 1; optchar = *optcursor; /* FreeBSD: The variable optopt saves the last known option character returned by getopt(). */ optopt = optchar; /* The getopt() function shall return the next option character (if one is found) from argv that matches a character in optstring, if there is one that matches. */ optdecl = strchr(optstring, optchar); if (optdecl) { /* [I]f a character is followed by a colon, the option takes an argument. */ if (optdecl[1] == ':') { optarg = ++optcursor; if (*optarg == '\0') { /* GNU extension: Two colons mean an option takes an optional arg; if there is text in the current argv-element (i.e., in the same word as the option name itself, for example, "-oarg"), then it is returned in optarg, otherwise optarg is set to zero. */ if (optdecl[2] != ':') { /* If the option was the last character in the string pointed to by an element of argv, then optarg shall contain the next element of argv, and optind shall be incremented by 2. If the resulting value of optind is greater than argc, this indicates a missing option-argument, and getopt() shall return an error indication. Otherwise, optarg shall point to the string following the option character in that element of argv, and optind shall be incremented by 1. */ if (++optind < argc) { optarg = argv[optind]; } else { /* If it detects a missing option-argument, it shall return the colon character ( ':' ) if the first character of optstring was a colon, or a question-mark character ( '?' ) otherwise. */ optarg = NULL; fprintf(stderr, "%s: option requires an argument -- '%c'\n", argv[0], optchar); optchar = (optstring[0] == ':') ? ':' : '?'; } } else { optarg = NULL; } } optcursor = NULL; } } else { fprintf(stderr,"%s: invalid option -- '%c'\n", argv[0], optchar); /* If getopt() encounters an option character that is not contained in optstring, it shall return the question-mark ( '?' ) character. */ optchar = '?'; } if (optcursor == NULL || *++optcursor == '\0') ++optind; return optchar; no_more_optchars: optcursor = NULL; first = NULL; return -1; } /* Implementation based on [1]. [1] https://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html */ int getopt_long(int argc, char* const argv[], const char* optstring, const struct option* longopts, int* longindex) { const struct option* o = longopts; const struct option* match = NULL; int num_matches = 0; size_t argument_name_length = 0; const char* current_argument = NULL; int retval = -1; optarg = NULL; optopt = 0; if (optind >= argc) return -1; /* If, when getopt() is called argv[optind] is a null pointer, getopt_long() shall return -1 without changing optind. */ if (argv[optind] == NULL) goto no_more_optchars; /* If, when getopt_long() is called *argv[optind] is not the character '-', permute argv to move non options to the end */ if (*argv[optind] != '-') { if (argc - optind <= 1) goto no_more_optchars; if (!first) first = argv[optind]; do { rotate((char **)(argv + optind), argc - optind); } while (*argv[optind] != '-' && argv[optind] != first); if (argv[optind] == first) goto no_more_optchars; } if (strlen(argv[optind]) < 3 || strncmp(argv[optind], "--", 2) != 0) return getopt(argc, argv, optstring); /* It's an option; starts with -- and is longer than two chars. */ current_argument = argv[optind] + 2; argument_name_length = strcspn(current_argument, "="); for (; o->name; ++o) { if (strncmp(o->name, current_argument, argument_name_length) == 0) { match = o; ++num_matches; if (strlen(o->name) == argument_name_length) { /* found match is exactly the one which we are looking for */ num_matches = 1; break; } } } if (num_matches == 1) { /* If longindex is not NULL, it points to a variable which is set to the index of the long option relative to longopts. */ if (longindex) *longindex = (int)(match - longopts); /* If flag is NULL, then getopt_long() shall return val. Otherwise, getopt_long() returns 0, and flag shall point to a variable which shall be set to val if the option is found, but left unchanged if the option is not found. */ if (match->flag) *(match->flag) = match->val; retval = match->flag ? 0 : match->val; if (match->has_arg != no_argument) { optarg = strchr(argv[optind], '='); if (optarg != NULL) ++optarg; if (match->has_arg == required_argument) { /* Only scan the next argv for required arguments. Behavior is not specified, but has been observed with Ubuntu and Mac OSX. */ if (optarg == NULL && ++optind < argc) { optarg = argv[optind]; } if (optarg == NULL) retval = ':'; } } else if (strchr(argv[optind], '=')) { /* An argument was provided to a non-argument option. I haven't seen this specified explicitly, but both GNU and BSD-based implementations show this behavior. */ retval = '?'; } } else { /* Unknown option or ambiguous match. */ retval = '?'; if (num_matches == 0) { fprintf(stderr, "%s: unrecognized option -- '%s'\n", argv[0], argv[optind]); } else { fprintf(stderr, "%s: option '%s' is ambiguous\n", argv[0], argv[optind]); } } ++optind; return retval; no_more_optchars: first = NULL; return -1; }
9,866
32.561224
91
c
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/windows/getopt/getopt.h
/* * *Copyright (c) 2012, Kim Gräsman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Kim Gräsman nor the * names of contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef INCLUDED_GETOPT_PORT_H #define INCLUDED_GETOPT_PORT_H #if defined(__cplusplus) extern "C" { #endif #define no_argument 0 #define required_argument 1 #define optional_argument 2 extern char* optarg; extern int optind, opterr, optopt; struct option { const char* name; int has_arg; int* flag; int val; }; int getopt(int argc, char* const argv[], const char* optstring); int getopt_long(int argc, char* const argv[], const char* optstring, const struct option* longopts, int* longindex); #if defined(__cplusplus) } #endif #endif // INCLUDED_GETOPT_PORT_H
2,137
35.237288
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/windows/include/win_mmap.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * win_mmap.h -- (internal) tracks the regions mapped by mmap */ #ifndef WIN_MMAP_H #define WIN_MMAP_H 1 #include "queue.h" #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define rounddown(x, y) (((x) / (y)) * (y)) void win_mmap_init(void); void win_mmap_fini(void); /* allocation/mmap granularity */ extern unsigned long long Mmap_align; typedef enum FILE_MAPPING_TRACKER_FLAGS { FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED = 0x0001, /* * This should hold the value of all flags ORed for debug purpose. */ FILE_MAPPING_TRACKER_FLAGS_MASK = FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED } FILE_MAPPING_TRACKER_FLAGS; /* * this structure tracks the file mappings outstanding per file handle */ typedef struct FILE_MAPPING_TRACKER { PMDK_SORTEDQ_ENTRY(FILE_MAPPING_TRACKER) ListEntry; HANDLE FileHandle; HANDLE FileMappingHandle; void *BaseAddress; void *EndAddress; DWORD Access; os_off_t Offset; size_t FileLen; FILE_MAPPING_TRACKER_FLAGS Flags; } FILE_MAPPING_TRACKER, *PFILE_MAPPING_TRACKER; extern SRWLOCK FileMappingQLock; extern PMDK_SORTEDQ_HEAD(FMLHead, FILE_MAPPING_TRACKER) FileMappingQHead; #endif /* WIN_MMAP_H */
2,871
34.02439
74
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/windows/include/platform.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * platform.h -- dirty hacks to compile Linux code on Windows using VC++ * * This is included to each source file using "/FI" (forced include) option. * * XXX - it is a subject for refactoring */ #ifndef PLATFORM_H #define PLATFORM_H 1 #pragma warning(disable : 4996) #pragma warning(disable : 4200) /* allow flexible array member */ #pragma warning(disable : 4819) /* non unicode characters */ #ifdef __cplusplus extern "C" { #endif /* Prevent PMDK compilation for 32-bit platforms */ #if defined(_WIN32) && !defined(_WIN64) #error "32-bit builds of PMDK are not supported!" #endif #define _CRT_RAND_S /* rand_s() */ #include <windows.h> #include <stdint.h> #include <time.h> #include <io.h> #include <process.h> #include <fcntl.h> #include <sys/types.h> #include <malloc.h> #include <signal.h> #include <intrin.h> #include <direct.h> /* use uuid_t definition from util.h */ #ifdef uuid_t #undef uuid_t #endif /* a few trivial substitutions */ #define PATH_MAX MAX_PATH #define __thread __declspec(thread) #define __func__ __FUNCTION__ #ifdef _DEBUG #define DEBUG #endif /* * The inline keyword is available only in VC++. * https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx */ #ifndef __cplusplus #define inline __inline #endif /* XXX - no equivalents in VC++ */ #define __attribute__(a) #define __builtin_constant_p(cnd) 0 /* * missing definitions */ /* errno.h */ #define ELIBACC 79 /* cannot access a needed shared library */ /* sys/stat.h */ #define S_IRUSR S_IREAD #define S_IWUSR S_IWRITE #define S_IRGRP S_IRUSR #define S_IWGRP S_IWUSR #define O_SYNC 0 typedef int mode_t; #define fchmod(fd, mode) 0 /* XXX - dummy */ #define setlinebuf(fp) setvbuf(fp, NULL, _IOLBF, BUFSIZ); /* unistd.h */ typedef long long os_off_t; typedef long long ssize_t; int setenv(const char *name, const char *value, int overwrite); int unsetenv(const char *name); /* fcntl.h */ int posix_fallocate(int fd, os_off_t offset, os_off_t len); /* string.h */ #define strtok_r strtok_s /* time.h */ #define CLOCK_MONOTONIC 1 #define CLOCK_REALTIME 2 int clock_gettime(int id, struct timespec *ts); /* signal.h */ typedef unsigned long long sigset_t; /* one bit for each signal */ C_ASSERT(NSIG <= sizeof(sigset_t) * 8); struct sigaction { void (*sa_handler) (int signum); /* void (*sa_sigaction)(int, siginfo_t *, void *); */ sigset_t sa_mask; int sa_flags; void (*sa_restorer) (void); }; __inline int sigemptyset(sigset_t *set) { *set = 0; return 0; } __inline int sigfillset(sigset_t *set) { *set = ~0; return 0; } __inline int sigaddset(sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } *set |= (1ULL << (signum - 1)); return 0; } __inline int sigdelset(sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } *set &= ~(1ULL << (signum - 1)); return 0; } __inline int sigismember(const sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } return ((*set & (1ULL << (signum - 1))) ? 1 : 0); } /* sched.h */ /* * sched_yield -- yield the processor */ __inline int sched_yield(void) { SwitchToThread(); return 0; /* always succeeds */ } /* * helper macros for library ctor/dtor function declarations */ #define MSVC_CONSTR(func) \ void func(void); \ __pragma(comment(linker, "/include:_" #func)) \ __pragma(section(".CRT$XCU", read)) \ __declspec(allocate(".CRT$XCU")) \ const void (WINAPI *_##func)(void) = (const void (WINAPI *)(void))func; #define MSVC_DESTR(func) \ void func(void); \ static void _##func##_reg(void) { atexit(func); }; \ MSVC_CONSTR(_##func##_reg) #ifdef __cplusplus } #endif #endif /* PLATFORM_H */
5,431
22.929515
76
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/windows/include/endian.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * endian.h -- convert values between host and big-/little-endian byte order */ #ifndef ENDIAN_H #define ENDIAN_H 1 /* * XXX: On Windows we can assume little-endian architecture */ #include <intrin.h> #define htole16(a) (a) #define htole32(a) (a) #define htole64(a) (a) #define le16toh(a) (a) #define le32toh(a) (a) #define le64toh(a) (a) #define htobe16(x) _byteswap_ushort(x) #define htobe32(x) _byteswap_ulong(x) #define htobe64(x) _byteswap_uint64(x) #define be16toh(x) _byteswap_ushort(x) #define be32toh(x) _byteswap_ulong(x) #define be64toh(x) _byteswap_uint64(x) #endif /* ENDIAN_H */
696
20.121212
76
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/windows/include/sys/file.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * sys/file.h -- file locking */
1,750
45.078947
74
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/windows/include/sys/param.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * sys/param.h -- a few useful macros */ #ifndef SYS_PARAM_H #define SYS_PARAM_H 1 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define howmany(x, y) (((x) + ((y) - 1)) / (y)) #define BPB 8 /* bits per byte */ #define setbit(b, i) ((b)[(i) / BPB] |= 1 << ((i) % BPB)) #define isset(b, i) ((b)[(i) / BPB] & (1 << ((i) % BPB))) #define isclr(b, i) (((b)[(i) / BPB] & (1 << ((i) % BPB))) == 0) #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #endif /* SYS_PARAM_H */
612
24.541667
64
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemblk.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemblk.h -- definitions of libpmemblk entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemblk provides support for arrays of atomically-writable blocks. * * See libpmemblk(7) for details. */ #ifndef LIBPMEMBLK_H #define LIBPMEMBLK_H 1 #include <sys/types.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemblk_open pmemblk_openW #define pmemblk_create pmemblk_createW #define pmemblk_check pmemblk_checkW #define pmemblk_check_version pmemblk_check_versionW #define pmemblk_errormsg pmemblk_errormsgW #define pmemblk_ctl_get pmemblk_ctl_getW #define pmemblk_ctl_set pmemblk_ctl_setW #define pmemblk_ctl_exec pmemblk_ctl_execW #else #define pmemblk_open pmemblk_openU #define pmemblk_create pmemblk_createU #define pmemblk_check pmemblk_checkU #define pmemblk_check_version pmemblk_check_versionU #define pmemblk_errormsg pmemblk_errormsgU #define pmemblk_ctl_get pmemblk_ctl_getU #define pmemblk_ctl_set pmemblk_ctl_setU #define pmemblk_ctl_exec pmemblk_ctl_execU #endif #endif #ifdef __cplusplus extern "C" { #endif /* * opaque type, internal to libpmemblk */ typedef struct pmemblk PMEMblkpool; /* * PMEMBLK_MAJOR_VERSION and PMEMBLK_MINOR_VERSION provide the current version * of the libpmemblk API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmemblk_check_version(). */ #define PMEMBLK_MAJOR_VERSION 1 #define PMEMBLK_MINOR_VERSION 1 #ifndef _WIN32 const char *pmemblk_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemblk_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemblk_check_versionW(unsigned major_required, unsigned minor_required); #endif /* XXX - unify minimum pool size for both OS-es */ #ifndef _WIN32 #if defined(__x86_64__) || defined(__M_X64__) || defined(__aarch64__) /* minimum pool size: 16MiB + 4KiB (minimum BTT size + mmap alignment) */ #define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 8)) #elif defined(__PPC64__) /* minimum pool size: 16MiB + 128KiB (minimum BTT size + mmap alignment) */ #define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 128)) #else #error unable to recognize ISA at compile time #endif #else /* minimum pool size: 16MiB + 64KiB (minimum BTT size + mmap alignment) */ #define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 64)) #endif /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMBLK_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #define PMEMBLK_MIN_BLK ((size_t)512) #ifndef _WIN32 PMEMblkpool *pmemblk_open(const char *path, size_t bsize); #else PMEMblkpool *pmemblk_openU(const char *path, size_t bsize); PMEMblkpool *pmemblk_openW(const wchar_t *path, size_t bsize); #endif #ifndef _WIN32 PMEMblkpool *pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode); #else PMEMblkpool *pmemblk_createU(const char *path, size_t bsize, size_t poolsize, mode_t mode); PMEMblkpool *pmemblk_createW(const wchar_t *path, size_t bsize, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemblk_check(const char *path, size_t bsize); #else int pmemblk_checkU(const char *path, size_t bsize); int pmemblk_checkW(const wchar_t *path, size_t bsize); #endif void pmemblk_close(PMEMblkpool *pbp); size_t pmemblk_bsize(PMEMblkpool *pbp); size_t pmemblk_nblock(PMEMblkpool *pbp); int pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno); int pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno); int pmemblk_set_zero(PMEMblkpool *pbp, long long blockno); int pmemblk_set_error(PMEMblkpool *pbp, long long blockno); /* * Passing NULL to pmemblk_set_funcs() tells libpmemblk to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemblk. */ void pmemblk_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); #ifndef _WIN32 const char *pmemblk_errormsg(void); #else const char *pmemblk_errormsgU(void); const wchar_t *pmemblk_errormsgW(void); #endif #ifndef _WIN32 /* EXPERIMENTAL */ int pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg); #else int pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg); int pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg); int pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg); #endif #ifdef __cplusplus } #endif #endif /* libpmemblk.h */
5,183
30.418182
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmempool.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * libpmempool.h -- definitions of libpmempool entry points * * See libpmempool(7) for details. */ #ifndef LIBPMEMPOOL_H #define LIBPMEMPOOL_H 1 #include <stdint.h> #include <stddef.h> #include <limits.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmempool_check_status pmempool_check_statusW #define pmempool_check_args pmempool_check_argsW #define pmempool_check_init pmempool_check_initW #define pmempool_check pmempool_checkW #define pmempool_sync pmempool_syncW #define pmempool_transform pmempool_transformW #define pmempool_rm pmempool_rmW #define pmempool_check_version pmempool_check_versionW #define pmempool_errormsg pmempool_errormsgW #define pmempool_feature_enable pmempool_feature_enableW #define pmempool_feature_disable pmempool_feature_disableW #define pmempool_feature_query pmempool_feature_queryW #else #define pmempool_check_status pmempool_check_statusU #define pmempool_check_args pmempool_check_argsU #define pmempool_check_init pmempool_check_initU #define pmempool_check pmempool_checkU #define pmempool_sync pmempool_syncU #define pmempool_transform pmempool_transformU #define pmempool_rm pmempool_rmU #define pmempool_check_version pmempool_check_versionU #define pmempool_errormsg pmempool_errormsgU #define pmempool_feature_enable pmempool_feature_enableU #define pmempool_feature_disable pmempool_feature_disableU #define pmempool_feature_query pmempool_feature_queryU #endif #endif #ifdef __cplusplus extern "C" { #endif /* PMEMPOOL CHECK */ /* * pool types */ enum pmempool_pool_type { PMEMPOOL_POOL_TYPE_DETECT, PMEMPOOL_POOL_TYPE_LOG, PMEMPOOL_POOL_TYPE_BLK, PMEMPOOL_POOL_TYPE_OBJ, PMEMPOOL_POOL_TYPE_BTT, PMEMPOOL_POOL_TYPE_RESERVED1, /* used to be cto */ }; /* * perform repairs */ #define PMEMPOOL_CHECK_REPAIR (1U << 0) /* * emulate repairs */ #define PMEMPOOL_CHECK_DRY_RUN (1U << 1) /* * perform hazardous repairs */ #define PMEMPOOL_CHECK_ADVANCED (1U << 2) /* * do not ask before repairs */ #define PMEMPOOL_CHECK_ALWAYS_YES (1U << 3) /* * generate info statuses */ #define PMEMPOOL_CHECK_VERBOSE (1U << 4) /* * generate string format statuses */ #define PMEMPOOL_CHECK_FORMAT_STR (1U << 5) /* * types of check statuses */ enum pmempool_check_msg_type { PMEMPOOL_CHECK_MSG_TYPE_INFO, PMEMPOOL_CHECK_MSG_TYPE_ERROR, PMEMPOOL_CHECK_MSG_TYPE_QUESTION, }; /* * check result types */ enum pmempool_check_result { PMEMPOOL_CHECK_RESULT_CONSISTENT, PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT, PMEMPOOL_CHECK_RESULT_REPAIRED, PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR, PMEMPOOL_CHECK_RESULT_ERROR, PMEMPOOL_CHECK_RESULT_SYNC_REQ, }; /* * check context */ typedef struct pmempool_check_ctx PMEMpoolcheck; /* * finalize the check and get the result */ enum pmempool_check_result pmempool_check_end(PMEMpoolcheck *ppc); /* PMEMPOOL RM */ #define PMEMPOOL_RM_FORCE (1U << 0) /* ignore any errors */ #define PMEMPOOL_RM_POOLSET_LOCAL (1U << 1) /* remove local poolsets */ #define PMEMPOOL_RM_POOLSET_REMOTE (1U << 2) /* remove remote poolsets */ /* * LIBPMEMPOOL SYNC */ /* * fix bad blocks - it requires creating or reading special recovery files */ #define PMEMPOOL_SYNC_FIX_BAD_BLOCKS (1U << 0) /* * do not apply changes, only check if operation is viable */ #define PMEMPOOL_SYNC_DRY_RUN (1U << 1) /* * LIBPMEMPOOL TRANSFORM */ /* * do not apply changes, only check if operation is viable */ #define PMEMPOOL_TRANSFORM_DRY_RUN (1U << 1) /* * PMEMPOOL_MAJOR_VERSION and PMEMPOOL_MINOR_VERSION provide the current version * of the libpmempool API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmempool_check_version(). */ #define PMEMPOOL_MAJOR_VERSION 1 #define PMEMPOOL_MINOR_VERSION 3 /* * check status */ struct pmempool_check_statusU { enum pmempool_check_msg_type type; struct { const char *msg; const char *answer; } str; }; #ifndef _WIN32 #define pmempool_check_status pmempool_check_statusU #else struct pmempool_check_statusW { enum pmempool_check_msg_type type; struct { const wchar_t *msg; const wchar_t *answer; } str; }; #endif /* * check context arguments */ struct pmempool_check_argsU { const char *path; const char *backup_path; enum pmempool_pool_type pool_type; unsigned flags; }; #ifndef _WIN32 #define pmempool_check_args pmempool_check_argsU #else struct pmempool_check_argsW { const wchar_t *path; const wchar_t *backup_path; enum pmempool_pool_type pool_type; unsigned flags; }; #endif /* * initialize a check context */ #ifndef _WIN32 PMEMpoolcheck * pmempool_check_init(struct pmempool_check_args *args, size_t args_size); #else PMEMpoolcheck * pmempool_check_initU(struct pmempool_check_argsU *args, size_t args_size); PMEMpoolcheck * pmempool_check_initW(struct pmempool_check_argsW *args, size_t args_size); #endif /* * start / resume the check */ #ifndef _WIN32 struct pmempool_check_status *pmempool_check(PMEMpoolcheck *ppc); #else struct pmempool_check_statusU *pmempool_checkU(PMEMpoolcheck *ppc); struct pmempool_check_statusW *pmempool_checkW(PMEMpoolcheck *ppc); #endif /* * LIBPMEMPOOL SYNC & TRANSFORM */ /* * Synchronize data between replicas within a poolset. * * EXPERIMENTAL */ #ifndef _WIN32 int pmempool_sync(const char *poolset_file, unsigned flags); #else int pmempool_syncU(const char *poolset_file, unsigned flags); int pmempool_syncW(const wchar_t *poolset_file, unsigned flags); #endif /* * Modify internal structure of a poolset. * * EXPERIMENTAL */ #ifndef _WIN32 int pmempool_transform(const char *poolset_file_src, const char *poolset_file_dst, unsigned flags); #else int pmempool_transformU(const char *poolset_file_src, const char *poolset_file_dst, unsigned flags); int pmempool_transformW(const wchar_t *poolset_file_src, const wchar_t *poolset_file_dst, unsigned flags); #endif /* PMEMPOOL feature enable, disable, query */ /* * feature types */ enum pmempool_feature { PMEMPOOL_FEAT_SINGLEHDR, PMEMPOOL_FEAT_CKSUM_2K, PMEMPOOL_FEAT_SHUTDOWN_STATE, PMEMPOOL_FEAT_CHECK_BAD_BLOCKS, }; /* PMEMPOOL FEATURE ENABLE */ #ifndef _WIN32 int pmempool_feature_enable(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_enableU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_enableW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL FEATURE DISABLE */ #ifndef _WIN32 int pmempool_feature_disable(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_disableU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_disableW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL FEATURE QUERY */ #ifndef _WIN32 int pmempool_feature_query(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_queryU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_queryW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL RM */ #ifndef _WIN32 int pmempool_rm(const char *path, unsigned flags); #else int pmempool_rmU(const char *path, unsigned flags); int pmempool_rmW(const wchar_t *path, unsigned flags); #endif #ifndef _WIN32 const char *pmempool_check_version(unsigned major_required, unsigned minor_required); #else const char *pmempool_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmempool_check_versionW(unsigned major_required, unsigned minor_required); #endif #ifndef _WIN32 const char *pmempool_errormsg(void); #else const char *pmempool_errormsgU(void); const wchar_t *pmempool_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmempool.h */
8,009
22.910448
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/librpmem.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * librpmem.h -- definitions of librpmem entry points (EXPERIMENTAL) * * This library provides low-level support for remote access to persistent * memory utilizing RDMA-capable RNICs. * * See librpmem(7) for details. */ #ifndef LIBRPMEM_H #define LIBRPMEM_H 1 #include <sys/types.h> #include <stdint.h> #ifdef __cplusplus extern "C" { #endif typedef struct rpmem_pool RPMEMpool; #define RPMEM_POOL_HDR_SIG_LEN 8 #define RPMEM_POOL_HDR_UUID_LEN 16 /* uuid byte length */ #define RPMEM_POOL_USER_FLAGS_LEN 16 struct rpmem_pool_attr { char signature[RPMEM_POOL_HDR_SIG_LEN]; /* pool signature */ uint32_t major; /* format major version number */ uint32_t compat_features; /* mask: compatible "may" features */ uint32_t incompat_features; /* mask: "must support" features */ uint32_t ro_compat_features; /* mask: force RO if unsupported */ unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* pool uuid */ unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN]; /* first part uuid */ unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* next pool uuid */ unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* prev pool uuid */ unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN]; /* user flags */ }; RPMEMpool *rpmem_create(const char *target, const char *pool_set_name, void *pool_addr, size_t pool_size, unsigned *nlanes, const struct rpmem_pool_attr *create_attr); RPMEMpool *rpmem_open(const char *target, const char *pool_set_name, void *pool_addr, size_t pool_size, unsigned *nlanes, struct rpmem_pool_attr *open_attr); int rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr); int rpmem_close(RPMEMpool *rpp); #define RPMEM_PERSIST_RELAXED (1U << 0) #define RPMEM_FLUSH_RELAXED (1U << 0) int rpmem_flush(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane, unsigned flags); int rpmem_drain(RPMEMpool *rpp, unsigned lane, unsigned flags); int rpmem_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane, unsigned flags); int rpmem_read(RPMEMpool *rpp, void *buff, size_t offset, size_t length, unsigned lane); int rpmem_deep_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane); #define RPMEM_REMOVE_FORCE 0x1 #define RPMEM_REMOVE_POOL_SET 0x2 int rpmem_remove(const char *target, const char *pool_set, int flags); /* * RPMEM_MAJOR_VERSION and RPMEM_MINOR_VERSION provide the current version of * the librpmem API as provided by this header file. Applications can verify * that the version available at run-time is compatible with the version used * at compile-time by passing these defines to rpmem_check_version(). */ #define RPMEM_MAJOR_VERSION 1 #define RPMEM_MINOR_VERSION 3 const char *rpmem_check_version(unsigned major_required, unsigned minor_required); const char *rpmem_errormsg(void); /* minimum size of a pool */ #define RPMEM_MIN_POOL ((size_t)(1024 * 8)) /* 8 KB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define RPMEM_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #ifdef __cplusplus } #endif #endif /* librpmem.h */
3,197
31.30303
77
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj.h -- definitions of libpmemobj entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemobj provides a pmem-resident transactional object store. * * See libpmemobj(7) for details. */ #ifndef LIBPMEMOBJ_H #define LIBPMEMOBJ_H 1 #include <libpmemobj/action.h> #include <libpmemobj/atomic.h> #include <libpmemobj/ctl.h> #include <libpmemobj/iterator.h> #include <libpmemobj/lists_atomic.h> #include <libpmemobj/pool.h> #include <libpmemobj/thread.h> #include <libpmemobj/tx.h> #endif /* libpmemobj.h */
662
23.555556
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemlog.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemlog.h -- definitions of libpmemlog entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemlog provides support for pmem-resident log files. * * See libpmemlog(7) for details. */ #ifndef LIBPMEMLOG_H #define LIBPMEMLOG_H 1 #include <sys/types.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemlog_open pmemlog_openW #define pmemlog_create pmemlog_createW #define pmemlog_check pmemlog_checkW #define pmemlog_check_version pmemlog_check_versionW #define pmemlog_errormsg pmemlog_errormsgW #define pmemlog_ctl_get pmemlog_ctl_getW #define pmemlog_ctl_set pmemlog_ctl_setW #define pmemlog_ctl_exec pmemlog_ctl_execW #else #define pmemlog_open pmemlog_openU #define pmemlog_create pmemlog_createU #define pmemlog_check pmemlog_checkU #define pmemlog_check_version pmemlog_check_versionU #define pmemlog_errormsg pmemlog_errormsgU #define pmemlog_ctl_get pmemlog_ctl_getU #define pmemlog_ctl_set pmemlog_ctl_setU #define pmemlog_ctl_exec pmemlog_ctl_execU #endif #else #include <sys/uio.h> #endif #ifdef __cplusplus extern "C" { #endif /* * opaque type, internal to libpmemlog */ typedef struct pmemlog PMEMlogpool; /* * PMEMLOG_MAJOR_VERSION and PMEMLOG_MINOR_VERSION provide the current * version of the libpmemlog API as provided by this header file. * Applications can verify that the version available at run-time * is compatible with the version used at compile-time by passing * these defines to pmemlog_check_version(). */ #define PMEMLOG_MAJOR_VERSION 1 #define PMEMLOG_MINOR_VERSION 1 #ifndef _WIN32 const char *pmemlog_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemlog_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemlog_check_versionW(unsigned major_required, unsigned minor_required); #endif /* * support for PMEM-resident log files... */ #define PMEMLOG_MIN_POOL ((size_t)(1024 * 1024 * 2)) /* min pool size: 2MiB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMLOG_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #ifndef _WIN32 PMEMlogpool *pmemlog_open(const char *path); #else PMEMlogpool *pmemlog_openU(const char *path); PMEMlogpool *pmemlog_openW(const wchar_t *path); #endif #ifndef _WIN32 PMEMlogpool *pmemlog_create(const char *path, size_t poolsize, mode_t mode); #else PMEMlogpool *pmemlog_createU(const char *path, size_t poolsize, mode_t mode); PMEMlogpool *pmemlog_createW(const wchar_t *path, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemlog_check(const char *path); #else int pmemlog_checkU(const char *path); int pmemlog_checkW(const wchar_t *path); #endif void pmemlog_close(PMEMlogpool *plp); size_t pmemlog_nbyte(PMEMlogpool *plp); int pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count); int pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt); long long pmemlog_tell(PMEMlogpool *plp); void pmemlog_rewind(PMEMlogpool *plp); void pmemlog_walk(PMEMlogpool *plp, size_t chunksize, int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg); /* * Passing NULL to pmemlog_set_funcs() tells libpmemlog to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemlog. */ void pmemlog_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); #ifndef _WIN32 const char *pmemlog_errormsg(void); #else const char *pmemlog_errormsgU(void); const wchar_t *pmemlog_errormsgW(void); #endif #ifndef _WIN32 /* EXPERIMENTAL */ int pmemlog_ctl_get(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_set(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_exec(PMEMlogpool *plp, const char *name, void *arg); #else int pmemlog_ctl_getU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_getW(PMEMlogpool *plp, const wchar_t *name, void *arg); int pmemlog_ctl_setU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_setW(PMEMlogpool *plp, const wchar_t *name, void *arg); int pmemlog_ctl_execU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_execW(PMEMlogpool *plp, const wchar_t *name, void *arg); #endif #ifdef __cplusplus } #endif #endif /* libpmemlog.h */
4,540
28.679739
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmem.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmem.h -- definitions of libpmem entry points * * This library provides support for programming with persistent memory (pmem). * * libpmem provides support for using raw pmem directly. * * See libpmem(7) for details. */ #ifndef LIBPMEM_H #define LIBPMEM_H 1 #include <sys/types.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmem_map_file pmem_map_fileW #define pmem_check_version pmem_check_versionW #define pmem_errormsg pmem_errormsgW #else #define pmem_map_file pmem_map_fileU #define pmem_check_version pmem_check_versionU #define pmem_errormsg pmem_errormsgU #endif #endif #ifdef __cplusplus extern "C" { #endif /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEM_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ /* * flags supported by pmem_map_file() */ #define PMEM_FILE_CREATE (1 << 0) #define PMEM_FILE_EXCL (1 << 1) #define PMEM_FILE_SPARSE (1 << 2) #define PMEM_FILE_TMPFILE (1 << 3) #ifndef _WIN32 void *pmem_map_file(const char *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); #else void *pmem_map_fileU(const char *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); void *pmem_map_fileW(const wchar_t *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); #endif int pmem_unmap(void *addr, size_t len); int pmem_is_pmem(const void *addr, size_t len); void pmem_persist(const void *addr, size_t len); int pmem_msync(const void *addr, size_t len); int pmem_has_auto_flush(void); void pmem_flush(const void *addr, size_t len); void pmem_deep_flush(const void *addr, size_t len); int pmem_deep_drain(const void *addr, size_t len); int pmem_deep_persist(const void *addr, size_t len); void pmem_drain(void); int pmem_has_hw_drain(void); void *pmem_memmove_persist(void *pmemdest, const void *src, size_t len); void *pmem_memcpy_persist(void *pmemdest, const void *src, size_t len); void *pmem_memset_persist(void *pmemdest, int c, size_t len); void *pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len); void *pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len); void *pmem_memset_nodrain(void *pmemdest, int c, size_t len); #define PMEM_F_MEM_NODRAIN (1U << 0) #define PMEM_F_MEM_NONTEMPORAL (1U << 1) #define PMEM_F_MEM_TEMPORAL (1U << 2) #define PMEM_F_MEM_WC (1U << 3) #define PMEM_F_MEM_WB (1U << 4) #define PMEM_F_MEM_NOFLUSH (1U << 5) #define PMEM_F_MEM_VALID_FLAGS (PMEM_F_MEM_NODRAIN | \ PMEM_F_MEM_NONTEMPORAL | \ PMEM_F_MEM_TEMPORAL | \ PMEM_F_MEM_WC | \ PMEM_F_MEM_WB | \ PMEM_F_MEM_NOFLUSH) void *pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags); void *pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags); void *pmem_memset(void *pmemdest, int c, size_t len, unsigned flags); /* * PMEM_MAJOR_VERSION and PMEM_MINOR_VERSION provide the current version of the * libpmem API as provided by this header file. Applications can verify that * the version available at run-time is compatible with the version used at * compile-time by passing these defines to pmem_check_version(). */ #define PMEM_MAJOR_VERSION 1 #define PMEM_MINOR_VERSION 1 #ifndef _WIN32 const char *pmem_check_version(unsigned major_required, unsigned minor_required); #else const char *pmem_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmem_check_versionW(unsigned major_required, unsigned minor_required); #endif #ifndef _WIN32 const char *pmem_errormsg(void); #else const char *pmem_errormsgU(void); const wchar_t *pmem_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmem.h */
3,829
28.015152
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmem2.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * libpmem2.h -- definitions of libpmem2 entry points (EXPERIMENTAL) * * This library provides support for programming with persistent memory (pmem). * * libpmem2 provides support for using raw pmem directly. * * See libpmem2(7) for details. */ #ifndef LIBPMEM2_H #define LIBPMEM2_H 1 #include <stddef.h> #include <stdint.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmem2_source_device_id pmem2_source_device_idW #define pmem2_errormsg pmem2_errormsgW #define pmem2_perror pmem2_perrorW #else #define pmem2_source_device_id pmem2_source_device_idU #define pmem2_errormsg pmem2_errormsgU #define pmem2_perror pmem2_perrorU #endif #endif #ifdef __cplusplus extern "C" { #endif #define PMEM2_E_UNKNOWN (-100000) #define PMEM2_E_NOSUPP (-100001) #define PMEM2_E_FILE_HANDLE_NOT_SET (-100003) #define PMEM2_E_INVALID_FILE_HANDLE (-100004) #define PMEM2_E_INVALID_FILE_TYPE (-100005) #define PMEM2_E_MAP_RANGE (-100006) #define PMEM2_E_MAPPING_EXISTS (-100007) #define PMEM2_E_GRANULARITY_NOT_SET (-100008) #define PMEM2_E_GRANULARITY_NOT_SUPPORTED (-100009) #define PMEM2_E_OFFSET_OUT_OF_RANGE (-100010) #define PMEM2_E_OFFSET_UNALIGNED (-100011) #define PMEM2_E_INVALID_ALIGNMENT_FORMAT (-100012) #define PMEM2_E_INVALID_ALIGNMENT_VALUE (-100013) #define PMEM2_E_INVALID_SIZE_FORMAT (-100014) #define PMEM2_E_LENGTH_UNALIGNED (-100015) #define PMEM2_E_MAPPING_NOT_FOUND (-100016) #define PMEM2_E_BUFFER_TOO_SMALL (-100017) #define PMEM2_E_SOURCE_EMPTY (-100018) #define PMEM2_E_INVALID_SHARING_VALUE (-100019) #define PMEM2_E_SRC_DEVDAX_PRIVATE (-100020) #define PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE (-100021) #define PMEM2_E_ADDRESS_UNALIGNED (-100022) #define PMEM2_E_ADDRESS_NULL (-100023) #define PMEM2_E_DEEP_FLUSH_RANGE (-100024) #define PMEM2_E_INVALID_REGION_FORMAT (-100025) #define PMEM2_E_DAX_REGION_NOT_FOUND (-100026) #define PMEM2_E_INVALID_DEV_FORMAT (-100027) #define PMEM2_E_CANNOT_READ_BOUNDS (-100028) #define PMEM2_E_NO_BAD_BLOCK_FOUND (-100029) #define PMEM2_E_LENGTH_OUT_OF_RANGE (-100030) #define PMEM2_E_INVALID_PROT_FLAG (-100031) #define PMEM2_E_NO_ACCESS (-100032) /* source setup */ struct pmem2_source; int pmem2_source_from_fd(struct pmem2_source **src, int fd); int pmem2_source_from_anon(struct pmem2_source **src, size_t size); #ifdef _WIN32 int pmem2_source_from_handle(struct pmem2_source **src, HANDLE handle); #endif int pmem2_source_size(const struct pmem2_source *src, size_t *size); int pmem2_source_alignment(const struct pmem2_source *src, size_t *alignment); int pmem2_source_delete(struct pmem2_source **src); /* vm reservation setup */ struct pmem2_vm_reservation; int pmem2_vm_reservation_new(struct pmem2_vm_reservation **rsv, size_t size, void *address); int pmem2_vm_reservation_delete(struct pmem2_vm_reservation **rsv); /* config setup */ struct pmem2_config; int pmem2_config_new(struct pmem2_config **cfg); int pmem2_config_delete(struct pmem2_config **cfg); enum pmem2_granularity { PMEM2_GRANULARITY_BYTE, PMEM2_GRANULARITY_CACHE_LINE, PMEM2_GRANULARITY_PAGE, }; int pmem2_config_set_required_store_granularity(struct pmem2_config *cfg, enum pmem2_granularity g); int pmem2_config_set_offset(struct pmem2_config *cfg, size_t offset); int pmem2_config_set_length(struct pmem2_config *cfg, size_t length); enum pmem2_sharing_type { PMEM2_SHARED, PMEM2_PRIVATE, }; int pmem2_config_set_sharing(struct pmem2_config *cfg, enum pmem2_sharing_type type); #define PMEM2_PROT_EXEC (1U << 29) #define PMEM2_PROT_READ (1U << 30) #define PMEM2_PROT_WRITE (1U << 31) #define PMEM2_PROT_NONE 0 int pmem2_config_set_protection(struct pmem2_config *cfg, unsigned prot); enum pmem2_address_request_type { PMEM2_ADDRESS_FIXED_REPLACE = 1, PMEM2_ADDRESS_FIXED_NOREPLACE = 2, }; int pmem2_config_set_address(struct pmem2_config *cfg, void *addr, enum pmem2_address_request_type request_type); int pmem2_config_set_vm_reservation(struct pmem2_config *cfg, struct pmem2_vm_reservation *rsv, size_t offset); void pmem2_config_clear_address(struct pmem2_config *cfg); /* mapping */ struct pmem2_map; int pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src, struct pmem2_map **map_ptr); int pmem2_unmap(struct pmem2_map **map_ptr); void *pmem2_map_get_address(struct pmem2_map *map); size_t pmem2_map_get_size(struct pmem2_map *map); enum pmem2_granularity pmem2_map_get_store_granularity(struct pmem2_map *map); /* flushing */ typedef void (*pmem2_persist_fn)(const void *ptr, size_t size); typedef void (*pmem2_flush_fn)(const void *ptr, size_t size); typedef void (*pmem2_drain_fn)(void); pmem2_persist_fn pmem2_get_persist_fn(struct pmem2_map *map); pmem2_flush_fn pmem2_get_flush_fn(struct pmem2_map *map); pmem2_drain_fn pmem2_get_drain_fn(struct pmem2_map *map); #define PMEM2_F_MEM_NODRAIN (1U << 0) #define PMEM2_F_MEM_NONTEMPORAL (1U << 1) #define PMEM2_F_MEM_TEMPORAL (1U << 2) #define PMEM2_F_MEM_WC (1U << 3) #define PMEM2_F_MEM_WB (1U << 4) #define PMEM2_F_MEM_NOFLUSH (1U << 5) #define PMEM2_F_MEM_VALID_FLAGS (PMEM2_F_MEM_NODRAIN | \ PMEM2_F_MEM_NONTEMPORAL | \ PMEM2_F_MEM_TEMPORAL | \ PMEM2_F_MEM_WC | \ PMEM2_F_MEM_WB | \ PMEM2_F_MEM_NOFLUSH) typedef void *(*pmem2_memmove_fn)(void *pmemdest, const void *src, size_t len, unsigned flags); typedef void *(*pmem2_memcpy_fn)(void *pmemdest, const void *src, size_t len, unsigned flags); typedef void *(*pmem2_memset_fn)(void *pmemdest, int c, size_t len, unsigned flags); pmem2_memmove_fn pmem2_get_memmove_fn(struct pmem2_map *map); pmem2_memcpy_fn pmem2_get_memcpy_fn(struct pmem2_map *map); pmem2_memset_fn pmem2_get_memset_fn(struct pmem2_map *map); /* RAS */ int pmem2_deep_flush(struct pmem2_map *map, void *ptr, size_t size); #ifndef _WIN32 int pmem2_source_device_id(const struct pmem2_source *src, char *id, size_t *len); #else int pmem2_source_device_idW(const struct pmem2_source *src, wchar_t *id, size_t *len); int pmem2_source_device_idU(const struct pmem2_source *src, char *id, size_t *len); #endif int pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc); struct pmem2_badblock_context; struct pmem2_badblock { size_t offset; size_t length; }; int pmem2_badblock_context_new(const struct pmem2_source *src, struct pmem2_badblock_context **bbctx); int pmem2_badblock_next(struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb); void pmem2_badblock_context_delete( struct pmem2_badblock_context **bbctx); int pmem2_badblock_clear(struct pmem2_badblock_context *bbctx, const struct pmem2_badblock *bb); /* error handling */ #ifndef _WIN32 const char *pmem2_errormsg(void); #else const char *pmem2_errormsgU(void); const wchar_t *pmem2_errormsgW(void); #endif int pmem2_err_to_errno(int); #ifndef _WIN32 void pmem2_perror(const char *format, ...) __attribute__((__format__(__printf__, 1, 2))); #else void pmem2_perrorU(const char *format, ...); void pmem2_perrorW(const wchar_t *format, ...); #endif #ifdef __cplusplus } #endif #endif /* libpmem2.h */
7,202
25.677778
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/ctl.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * libpmemobj/ctl.h -- definitions of pmemobj_ctl related entry points */ #ifndef LIBPMEMOBJ_CTL_H #define LIBPMEMOBJ_CTL_H 1 #include <stddef.h> #include <sys/types.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Allocation class interface * * When requesting an object from the allocator, the first step is to determine * which allocation class best approximates the size of the object. * Once found, the appropriate free list, called bucket, for that * class is selected in a fashion that minimizes contention between threads. * Depending on the requested size and the allocation class, it might happen * that the object size (including required metadata) would be bigger than the * allocation class size - called unit size. In those situations, the object is * constructed from two or more units (up to 64). * * If the requested number of units cannot be retrieved from the selected * bucket, the thread reaches out to the global, shared, heap which manages * memory in 256 kilobyte chunks and gives it out in a best-fit fashion. This * operation must be performed under an exclusive lock. * Once the thread is in the possession of a chunk, the lock is dropped, and the * memory is split into units that repopulate the bucket. * * These are the CTL entry points that control allocation classes: * - heap.alloc_class.[class_id].desc * Creates/retrieves allocation class information * * It's VERY important to remember that the allocation classes are a RUNTIME * property of the allocator - they are NOT stored persistently in the pool. * It's recommended to always create custom allocation classes immediately after * creating or opening the pool, before any use. * If there are existing objects created using a class that is no longer stored * in the runtime state of the allocator, they can be normally freed, but * allocating equivalent objects will be done using the allocation class that * is currently defined for that size. * * Please see the libpmemobj man page for more information about entry points. */ /* * Persistent allocation header */ enum pobj_header_type { /* * 64-byte header used up until the version 1.3 of the library, * functionally equivalent to the compact header. * It's not recommended to create any new classes with this header. */ POBJ_HEADER_LEGACY, /* * 16-byte header used by the default allocation classes. All library * metadata is by default allocated using this header. * Supports type numbers and variably sized allocations. */ POBJ_HEADER_COMPACT, /* * 0-byte header with metadata stored exclusively in a bitmap. This * ensures that objects are allocated in memory contiguously and * without attached headers. * This can be used to create very small allocation classes, but it * does not support type numbers. * Additionally, allocations with this header can only span a single * unit. * Objects allocated with this header do show up when iterating through * the heap using pmemobj_first/pmemobj_next functions, but have a * type_num equal 0. */ POBJ_HEADER_NONE, MAX_POBJ_HEADER_TYPES }; /* * Description of allocation classes */ struct pobj_alloc_class_desc { /* * The number of bytes in a single unit of allocation. A single * allocation can span up to 64 units (or 1 in the case of no header). * If one creates an allocation class with a certain unit size and * forces it to handle bigger sizes, more than one unit * will be used. * For example, an allocation class with a compact header and 128 bytes * unit size, for a request of 200 bytes will create a memory block * containing 256 bytes that spans two units. The usable size of that * allocation will be 240 bytes: 2 * 128 - 16 (header). */ size_t unit_size; /* * Desired alignment of objects from the allocation class. * If non zero, must be a power of two and an even divisor of unit size. * * All allocation classes have default alignment * of 64. User data alignment is affected by the size of a header. For * compact one this means that the alignment is 48 bytes. * */ size_t alignment; /* * The minimum number of units that must be present in a * single, contiguous, memory block. * Those blocks (internally called runs), are fetched on demand from the * heap. Accessing that global state is a serialization point for the * allocator and thus it is imperative for performance and scalability * that a reasonable amount of memory is fetched in a single call. * Threads generally do not share memory blocks from which they * allocate, but blocks do go back to the global heap if they are no * longer actively used for allocation. */ unsigned units_per_block; /* * The header of allocations that originate from this allocation class. */ enum pobj_header_type header_type; /* * The identifier of this allocation class. */ unsigned class_id; }; enum pobj_stats_enabled { POBJ_STATS_ENABLED_TRANSIENT, POBJ_STATS_ENABLED_BOTH, POBJ_STATS_ENABLED_PERSISTENT, POBJ_STATS_DISABLED, }; #ifndef _WIN32 /* EXPERIMENTAL */ int pmemobj_ctl_get(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_set(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_exec(PMEMobjpool *pop, const char *name, void *arg); #else int pmemobj_ctl_getU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_getW(PMEMobjpool *pop, const wchar_t *name, void *arg); int pmemobj_ctl_setU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_setW(PMEMobjpool *pop, const wchar_t *name, void *arg); int pmemobj_ctl_execU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_execW(PMEMobjpool *pop, const wchar_t *name, void *arg); #ifndef PMDK_UTF8_API #define pmemobj_ctl_get pmemobj_ctl_getW #define pmemobj_ctl_set pmemobj_ctl_setW #define pmemobj_ctl_exec pmemobj_ctl_execW #else #define pmemobj_ctl_get pmemobj_ctl_getU #define pmemobj_ctl_set pmemobj_ctl_setU #define pmemobj_ctl_exec pmemobj_ctl_execU #endif #endif #ifdef __cplusplus } #endif #endif /* libpmemobj/ctl.h */
6,198
34.221591
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/lists_atomic.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/lists_atomic.h -- definitions of libpmemobj atomic lists macros */ #ifndef LIBPMEMOBJ_LISTS_ATOMIC_H #define LIBPMEMOBJ_LISTS_ATOMIC_H 1 #include <libpmemobj/lists_atomic_base.h> #include <libpmemobj/thread.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional persistent atomic circular doubly-linked list */ #define POBJ_LIST_ENTRY(type)\ struct {\ TOID(type) pe_next;\ TOID(type) pe_prev;\ } #define POBJ_LIST_HEAD(name, type)\ struct name {\ TOID(type) pe_first;\ PMEMmutex lock;\ } #define POBJ_LIST_FIRST(head) ((head)->pe_first) #define POBJ_LIST_LAST(head, field) (\ TOID_IS_NULL((head)->pe_first) ?\ (head)->pe_first :\ D_RO((head)->pe_first)->field.pe_prev) #define POBJ_LIST_EMPTY(head) (TOID_IS_NULL((head)->pe_first)) #define POBJ_LIST_NEXT(elm, field) (D_RO(elm)->field.pe_next) #define POBJ_LIST_PREV(elm, field) (D_RO(elm)->field.pe_prev) #define POBJ_LIST_DEST_HEAD 1 #define POBJ_LIST_DEST_TAIL 0 #define POBJ_LIST_DEST_BEFORE 1 #define POBJ_LIST_DEST_AFTER 0 #define POBJ_LIST_FOREACH(var, head, field)\ for (_pobj_debug_notice("POBJ_LIST_FOREACH", __FILE__, __LINE__),\ (var) = POBJ_LIST_FIRST((head));\ TOID_IS_NULL((var)) == 0;\ TOID_EQUALS(POBJ_LIST_NEXT((var), field),\ POBJ_LIST_FIRST((head))) ?\ TOID_ASSIGN((var), OID_NULL) :\ ((var) = POBJ_LIST_NEXT((var), field))) #define POBJ_LIST_FOREACH_REVERSE(var, head, field)\ for (_pobj_debug_notice("POBJ_LIST_FOREACH_REVERSE", __FILE__, __LINE__),\ (var) = POBJ_LIST_LAST((head), field);\ TOID_IS_NULL((var)) == 0;\ TOID_EQUALS(POBJ_LIST_PREV((var), field),\ POBJ_LIST_LAST((head), field)) ?\ TOID_ASSIGN((var), OID_NULL) :\ ((var) = POBJ_LIST_PREV((var), field))) #define POBJ_LIST_INSERT_HEAD(pop, head, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), OID_NULL,\ POBJ_LIST_DEST_HEAD, (elm).oid) #define POBJ_LIST_INSERT_TAIL(pop, head, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), OID_NULL,\ POBJ_LIST_DEST_TAIL, (elm).oid) #define POBJ_LIST_INSERT_AFTER(pop, head, listelm, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid,\ 0 /* after */, (elm).oid) #define POBJ_LIST_INSERT_BEFORE(pop, head, listelm, elm, field)\ pmemobj_list_insert((pop), \ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid,\ 1 /* before */, (elm).oid) #define POBJ_LIST_INSERT_NEW_HEAD(pop, head, field, size, constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), OID_NULL, POBJ_LIST_DEST_HEAD, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_TAIL(pop, head, field, size, constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), OID_NULL, POBJ_LIST_DEST_TAIL, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_AFTER(pop, head, listelm, field, size,\ constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), (listelm).oid, 0 /* after */, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_BEFORE(pop, head, listelm, field, size,\ constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid, 1 /* before */, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_REMOVE(pop, head, elm, field)\ pmemobj_list_remove((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (elm).oid, 0 /* no free */) #define POBJ_LIST_REMOVE_FREE(pop, head, elm, field)\ pmemobj_list_remove((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (elm).oid, 1 /* free */) #define POBJ_LIST_MOVE_ELEMENT_HEAD(pop, head, head_new, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new), OID_NULL, POBJ_LIST_DEST_HEAD, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_TAIL(pop, head, head_new, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new), OID_NULL, POBJ_LIST_DEST_TAIL, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_AFTER(pop,\ head, head_new, listelm, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new),\ (listelm).oid,\ 0 /* after */, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_BEFORE(pop,\ head, head_new, listelm, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new),\ (listelm).oid,\ 1 /* before */, (elm).oid) #ifdef __cplusplus } #endif #endif /* libpmemobj/lists_atomic.h */
5,121
30.042424
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/iterator.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/iterator.h -- definitions of libpmemobj iterator macros */ #ifndef LIBPMEMOBJ_ITERATOR_H #define LIBPMEMOBJ_ITERATOR_H 1 #include <libpmemobj/iterator_base.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif static inline PMEMoid POBJ_FIRST_TYPE_NUM(PMEMobjpool *pop, uint64_t type_num) { PMEMoid _pobj_ret = pmemobj_first(pop); while (!OID_IS_NULL(_pobj_ret) && pmemobj_type_num(_pobj_ret) != type_num) { _pobj_ret = pmemobj_next(_pobj_ret); } return _pobj_ret; } static inline PMEMoid POBJ_NEXT_TYPE_NUM(PMEMoid o) { PMEMoid _pobj_ret = o; do { _pobj_ret = pmemobj_next(_pobj_ret);\ } while (!OID_IS_NULL(_pobj_ret) && pmemobj_type_num(_pobj_ret) != pmemobj_type_num(o)); return _pobj_ret; } #define POBJ_FIRST(pop, t) ((TOID(t))POBJ_FIRST_TYPE_NUM(pop, TOID_TYPE_NUM(t))) #define POBJ_NEXT(o) ((__typeof__(o))POBJ_NEXT_TYPE_NUM((o).oid)) /* * Iterates through every existing allocated object. */ #define POBJ_FOREACH(pop, varoid)\ for (_pobj_debug_notice("POBJ_FOREACH", __FILE__, __LINE__),\ varoid = pmemobj_first(pop);\ (varoid).off != 0; varoid = pmemobj_next(varoid)) /* * Safe variant of POBJ_FOREACH in which pmemobj_free on varoid is allowed */ #define POBJ_FOREACH_SAFE(pop, varoid, nvaroid)\ for (_pobj_debug_notice("POBJ_FOREACH_SAFE", __FILE__, __LINE__),\ varoid = pmemobj_first(pop);\ (varoid).off != 0 && (nvaroid = pmemobj_next(varoid), 1);\ varoid = nvaroid) /* * Iterates through every object of the specified type. */ #define POBJ_FOREACH_TYPE(pop, var)\ POBJ_FOREACH(pop, (var).oid)\ if (pmemobj_type_num((var).oid) == TOID_TYPE_NUM_OF(var)) /* * Safe variant of POBJ_FOREACH_TYPE in which pmemobj_free on var * is allowed. */ #define POBJ_FOREACH_SAFE_TYPE(pop, var, nvar)\ POBJ_FOREACH_SAFE(pop, (var).oid, (nvar).oid)\ if (pmemobj_type_num((var).oid) == TOID_TYPE_NUM_OF(var)) #ifdef __cplusplus } #endif #endif /* libpmemobj/iterator.h */
2,041
23.60241
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/lists_atomic_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/lists_atomic_base.h -- definitions of libpmemobj atomic lists */ #ifndef LIBPMEMOBJ_LISTS_ATOMIC_BASE_H #define LIBPMEMOBJ_LISTS_ATOMIC_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional persistent atomic circular doubly-linked list */ int pmemobj_list_insert(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid dest, int before, PMEMoid oid); PMEMoid pmemobj_list_insert_new(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid dest, int before, size_t size, uint64_t type_num, pmemobj_constr constructor, void *arg); int pmemobj_list_remove(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid oid, int free); int pmemobj_list_move(PMEMobjpool *pop, size_t pe_old_offset, void *head_old, size_t pe_new_offset, void *head_new, PMEMoid dest, int before, PMEMoid oid); #ifdef __cplusplus } #endif #endif /* libpmemobj/lists_atomic_base.h */
1,022
24.575
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/tx_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * libpmemobj/tx_base.h -- definitions of libpmemobj transactional entry points */ #ifndef LIBPMEMOBJ_TX_BASE_H #define LIBPMEMOBJ_TX_BASE_H 1 #include <setjmp.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Transactions * * Stages are changed only by the pmemobj_tx_* functions, each transition * to the TX_STAGE_ONABORT is followed by a longjmp to the jmp_buf provided in * the pmemobj_tx_begin function. */ enum pobj_tx_stage { TX_STAGE_NONE, /* no transaction in this thread */ TX_STAGE_WORK, /* transaction in progress */ TX_STAGE_ONCOMMIT, /* successfully committed */ TX_STAGE_ONABORT, /* tx_begin failed or transaction aborted */ TX_STAGE_FINALLY, /* always called */ MAX_TX_STAGE }; /* * Always returns the current transaction stage for a thread. */ enum pobj_tx_stage pmemobj_tx_stage(void); enum pobj_tx_param { TX_PARAM_NONE, TX_PARAM_MUTEX, /* PMEMmutex */ TX_PARAM_RWLOCK, /* PMEMrwlock */ TX_PARAM_CB, /* pmemobj_tx_callback cb, void *arg */ }; enum pobj_log_type { TX_LOG_TYPE_SNAPSHOT, TX_LOG_TYPE_INTENT, }; enum pobj_tx_failure_behavior { POBJ_TX_FAILURE_ABORT, POBJ_TX_FAILURE_RETURN, }; #if !defined(pmdk_use_attr_deprec_with_msg) && defined(__COVERITY__) #define pmdk_use_attr_deprec_with_msg 0 #endif #if !defined(pmdk_use_attr_deprec_with_msg) && defined(__clang__) #if __has_extension(attribute_deprecated_with_message) #define pmdk_use_attr_deprec_with_msg 1 #else #define pmdk_use_attr_deprec_with_msg 0 #endif #endif #if !defined(pmdk_use_attr_deprec_with_msg) && \ defined(__GNUC__) && !defined(__INTEL_COMPILER) #if __GNUC__ * 100 + __GNUC_MINOR__ >= 601 /* 6.1 */ #define pmdk_use_attr_deprec_with_msg 1 #else #define pmdk_use_attr_deprec_with_msg 0 #endif #endif #if !defined(pmdk_use_attr_deprec_with_msg) #define pmdk_use_attr_deprec_with_msg 0 #endif #if pmdk_use_attr_deprec_with_msg #define tx_lock_deprecated __attribute__((deprecated(\ "enum pobj_tx_lock is deprecated, use enum pobj_tx_param"))) #else #define tx_lock_deprecated #endif /* deprecated, do not use */ enum tx_lock_deprecated pobj_tx_lock { TX_LOCK_NONE tx_lock_deprecated = TX_PARAM_NONE, TX_LOCK_MUTEX tx_lock_deprecated = TX_PARAM_MUTEX, TX_LOCK_RWLOCK tx_lock_deprecated = TX_PARAM_RWLOCK, }; typedef void (*pmemobj_tx_callback)(PMEMobjpool *pop, enum pobj_tx_stage stage, void *); #define POBJ_TX_XALLOC_VALID_FLAGS (POBJ_XALLOC_ZERO |\ POBJ_XALLOC_NO_FLUSH |\ POBJ_XALLOC_ARENA_MASK |\ POBJ_XALLOC_CLASS_MASK |\ POBJ_XALLOC_NO_ABORT) #define POBJ_XADD_NO_FLUSH POBJ_FLAG_NO_FLUSH #define POBJ_XADD_NO_SNAPSHOT POBJ_FLAG_NO_SNAPSHOT #define POBJ_XADD_ASSUME_INITIALIZED POBJ_FLAG_ASSUME_INITIALIZED #define POBJ_XADD_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XADD_VALID_FLAGS (POBJ_XADD_NO_FLUSH |\ POBJ_XADD_NO_SNAPSHOT |\ POBJ_XADD_ASSUME_INITIALIZED |\ POBJ_XADD_NO_ABORT) #define POBJ_XLOCK_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XLOCK_VALID_FLAGS (POBJ_XLOCK_NO_ABORT) #define POBJ_XFREE_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XFREE_VALID_FLAGS (POBJ_XFREE_NO_ABORT) #define POBJ_XPUBLISH_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XPUBLISH_VALID_FLAGS (POBJ_XPUBLISH_NO_ABORT) #define POBJ_XLOG_APPEND_BUFFER_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XLOG_APPEND_BUFFER_VALID_FLAGS (POBJ_XLOG_APPEND_BUFFER_NO_ABORT) /* * Starts a new transaction in the current thread. * If called within an open transaction, starts a nested transaction. * * If successful, transaction stage changes to TX_STAGE_WORK and function * returns zero. Otherwise, stage changes to TX_STAGE_ONABORT and an error * number is returned. */ int pmemobj_tx_begin(PMEMobjpool *pop, jmp_buf env, ...); /* * Adds lock of given type to current transaction. * 'Flags' is a bitmask of the following values: * - POBJ_XLOCK_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. */ int pmemobj_tx_xlock(enum pobj_tx_param type, void *lockp, uint64_t flags); /* * Adds lock of given type to current transaction. */ int pmemobj_tx_lock(enum pobj_tx_param type, void *lockp); /* * Aborts current transaction * * Causes transition to TX_STAGE_ONABORT. * * This function must be called during TX_STAGE_WORK. */ void pmemobj_tx_abort(int errnum); /* * Commits current transaction * * This function must be called during TX_STAGE_WORK. */ void pmemobj_tx_commit(void); /* * Cleanups current transaction. Must always be called after pmemobj_tx_begin, * even if starting the transaction failed. * * If called during TX_STAGE_NONE, has no effect. * * Always causes transition to TX_STAGE_NONE. * * If transaction was successful, returns 0. Otherwise returns error code set * by pmemobj_tx_abort. * * This function must *not* be called during TX_STAGE_WORK. */ int pmemobj_tx_end(void); /* * Performs the actions associated with current stage of the transaction, * and makes the transition to the next stage. Current stage must always * be obtained by calling pmemobj_tx_stage. * * This function must be called in transaction. */ void pmemobj_tx_process(void); /* * Returns last transaction error code. */ int pmemobj_tx_errno(void); /* * Takes a "snapshot" of the memory block of given size and located at given * offset 'off' in the object 'oid' and saves it in the undo log. * The application is then free to directly modify the object in that memory * range. In case of failure or abort, all the changes within this range will * be rolled-back automatically. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_add_range(PMEMoid oid, uint64_t off, size_t size); /* * Takes a "snapshot" of the given memory region and saves it in the undo log. * The application is then free to directly modify the object in that memory * range. In case of failure or abort, all the changes within this range will * be rolled-back automatically. The supplied block of memory has to be within * the given pool. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_add_range_direct(const void *ptr, size_t size); /* * Behaves exactly the same as pmemobj_tx_add_range when 'flags' equals 0. * 'Flags' is a bitmask of the following values: * - POBJ_XADD_NO_FLUSH - skips flush on commit * - POBJ_XADD_NO_SNAPSHOT - added range will not be snapshotted * - POBJ_XADD_ASSUME_INITIALIZED - added range is assumed to be initialized * - POBJ_XADD_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. */ int pmemobj_tx_xadd_range(PMEMoid oid, uint64_t off, size_t size, uint64_t flags); /* * Behaves exactly the same as pmemobj_tx_add_range_direct when 'flags' equals * 0. 'Flags' is a bitmask of the following values: * - POBJ_XADD_NO_FLUSH - skips flush on commit * - POBJ_XADD_NO_SNAPSHOT - added range will not be snapshotted * - POBJ_XADD_ASSUME_INITIALIZED - added range is assumed to be initialized * - POBJ_XADD_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. */ int pmemobj_tx_xadd_range_direct(const void *ptr, size_t size, uint64_t flags); /* * Transactionally allocates a new object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_alloc(size_t size, uint64_t type_num); /* * Transactionally allocates a new object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XALLOC_ZERO - zero the allocated object * - POBJ_XALLOC_NO_FLUSH - skip flush on commit * - POBJ_XALLOC_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_xalloc(size_t size, uint64_t type_num, uint64_t flags); /* * Transactionally allocates new zeroed object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_zalloc(size_t size, uint64_t type_num); /* * Transactionally resizes an existing object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_realloc(PMEMoid oid, size_t size, uint64_t type_num); /* * Transactionally resizes an existing object, if extended new space is zeroed. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_zrealloc(PMEMoid oid, size_t size, uint64_t type_num); /* * Transactionally allocates a new object with duplicate of the string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_strdup(const char *s, uint64_t type_num); /* * Transactionally allocates a new object with duplicate of the string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XALLOC_ZERO - zero the allocated object * - POBJ_XALLOC_NO_FLUSH - skip flush on commit * - POBJ_XALLOC_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_xstrdup(const char *s, uint64_t type_num, uint64_t flags); /* * Transactionally allocates a new object with duplicate of the wide character * string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_wcsdup(const wchar_t *s, uint64_t type_num); /* * Transactionally allocates a new object with duplicate of the wide character * string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XALLOC_ZERO - zero the allocated object * - POBJ_XALLOC_NO_FLUSH - skip flush on commit * - POBJ_XALLOC_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_xwcsdup(const wchar_t *s, uint64_t type_num, uint64_t flags); /* * Transactionally frees an existing object. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_free(PMEMoid oid); /* * Transactionally frees an existing object. * * If successful, returns zero. * Otherwise, the stage changes to TX_STAGE_ONABORT and the error number is * returned. * 'Flags' is a bitmask of the following values: * - POBJ_XFREE_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_xfree(PMEMoid oid, uint64_t flags); /* * Append user allocated buffer to the ulog. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_log_append_buffer(enum pobj_log_type type, void *addr, size_t size); /* * Append user allocated buffer to the ulog. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XLOG_APPEND_BUFFER_NO_ABORT - if the function does not end * successfully, do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_xlog_append_buffer(enum pobj_log_type type, void *addr, size_t size, uint64_t flags); /* * Enables or disables automatic ulog allocations. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_log_auto_alloc(enum pobj_log_type type, int on_off); /* * Calculates and returns size for user buffers for snapshots. */ size_t pmemobj_tx_log_snapshots_max_size(size_t *sizes, size_t nsizes); /* * Calculates and returns size for user buffers for intents. */ size_t pmemobj_tx_log_intents_max_size(size_t nintents); /* * Sets volatile pointer to the user data for the current transaction. */ void pmemobj_tx_set_user_data(void *data); /* * Gets volatile pointer to the user data associated with the current * transaction. */ void *pmemobj_tx_get_user_data(void); /* * Sets the failure behavior of transactional functions. * * This function must be called during TX_STAGE_WORK. */ void pmemobj_tx_set_failure_behavior(enum pobj_tx_failure_behavior behavior); /* * Returns failure behavior for the current transaction. * * This function must be called during TX_STAGE_WORK. */ enum pobj_tx_failure_behavior pmemobj_tx_get_failure_behavior(void); #ifdef __cplusplus } #endif #endif /* libpmemobj/tx_base.h */
14,087
30.237251
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/pool_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * libpmemobj/pool_base.h -- definitions of libpmemobj pool entry points */ #ifndef LIBPMEMOBJ_POOL_BASE_H #define LIBPMEMOBJ_POOL_BASE_H 1 #include <stddef.h> #include <sys/types.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif //NEW //#define _GNU_SOURCE //#include <sys/types.h> //#include <sys/stat.h> #include <fcntl.h> #include <sys/mman.h> //int __real_open(const char *__path, int __oflag); //int __wrap_open(const char *__path, int __oflag); void* open_device(const char* pathname); //END NEW #define PMEMOBJ_MIN_POOL ((size_t)(1024 * 1024 * 256)) /* 8 MiB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMOBJ_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ /* * Pool management. */ #ifdef _WIN32 #ifndef PMDK_UTF8_API #define pmemobj_open pmemobj_openW #define pmemobj_create pmemobj_createW #define pmemobj_check pmemobj_checkW #else #define pmemobj_open pmemobj_openU #define pmemobj_create pmemobj_createU #define pmemobj_check pmemobj_checkU #endif #endif #ifndef _WIN32 PMEMobjpool *pmemobj_open(const char *path, const char *layout); #else PMEMobjpool *pmemobj_openU(const char *path, const char *layout); PMEMobjpool *pmemobj_openW(const wchar_t *path, const wchar_t *layout); #endif #ifndef _WIN32 PMEMobjpool *pmemobj_create(const char *path, const char *layout, size_t poolsize, mode_t mode); #else PMEMobjpool *pmemobj_createU(const char *path, const char *layout, size_t poolsize, mode_t mode); PMEMobjpool *pmemobj_createW(const wchar_t *path, const wchar_t *layout, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemobj_check(const char *path, const char *layout); #else int pmemobj_checkU(const char *path, const char *layout); int pmemobj_checkW(const wchar_t *path, const wchar_t *layout); #endif void pmemobj_close(PMEMobjpool *pop); /* * If called for the first time on a newly created pool, the root object * of given size is allocated. Otherwise, it returns the existing root object. * In such case, the size must be not less than the actual root object size * stored in the pool. If it's larger, the root object is automatically * resized. * * This function is thread-safe. */ PMEMoid pmemobj_root(PMEMobjpool *pop, size_t size); /* * Same as above, but calls the constructor function when the object is first * created and on all subsequent reallocations. */ PMEMoid pmemobj_root_construct(PMEMobjpool *pop, size_t size, pmemobj_constr constructor, void *arg); /* * Returns the size in bytes of the root object. Always equal to the requested * size. */ size_t pmemobj_root_size(PMEMobjpool *pop); /* * Sets volatile pointer to the user data for specified pool. */ void pmemobj_set_user_data(PMEMobjpool *pop, void *data); /* * Gets volatile pointer to the user data associated with the specified pool. */ void *pmemobj_get_user_data(PMEMobjpool *pop); #ifdef __cplusplus } #endif #endif /* libpmemobj/pool_base.h */
3,095
24.377049
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/action_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * libpmemobj/action_base.h -- definitions of libpmemobj action interface */ #ifndef LIBPMEMOBJ_ACTION_BASE_H #define LIBPMEMOBJ_ACTION_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif enum pobj_action_type { /* a heap action (e.g., alloc) */ POBJ_ACTION_TYPE_HEAP, /* a single memory operation (e.g., value set) */ POBJ_ACTION_TYPE_MEM, POBJ_MAX_ACTION_TYPE }; struct pobj_action_heap { /* offset to the element being freed/allocated */ uint64_t offset; /* usable size of the element being allocated */ uint64_t usable_size; }; struct pobj_action { /* * These fields are internal for the implementation and are not * guaranteed to be stable across different versions of the API. * Use with caution. * * This structure should NEVER be stored on persistent memory! */ enum pobj_action_type type; uint32_t data[3]; union { struct pobj_action_heap heap; uint64_t data2[14]; }; }; #define POBJ_ACTION_XRESERVE_VALID_FLAGS\ (POBJ_XALLOC_CLASS_MASK |\ POBJ_XALLOC_ARENA_MASK |\ POBJ_XALLOC_ZERO) PMEMoid pmemobj_reserve(PMEMobjpool *pop, struct pobj_action *act, size_t size, uint64_t type_num); PMEMoid pmemobj_xreserve(PMEMobjpool *pop, struct pobj_action *act, size_t size, uint64_t type_num, uint64_t flags); void pmemobj_set_value(PMEMobjpool *pop, struct pobj_action *act, uint64_t *ptr, uint64_t value); void pmemobj_defer_free(PMEMobjpool *pop, PMEMoid oid, struct pobj_action *act); int pmemobj_publish(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt); int pmemobj_tx_publish(struct pobj_action *actv, size_t actvcnt); int pmemobj_tx_xpublish(struct pobj_action *actv, size_t actvcnt, uint64_t flags); void pmemobj_cancel(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt); #ifdef __cplusplus } #endif #endif /* libpmemobj/action_base.h */
1,935
24.813333
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/types.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * libpmemobj/types.h -- definitions of libpmemobj type-safe macros */ #ifndef LIBPMEMOBJ_TYPES_H #define LIBPMEMOBJ_TYPES_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif #define TOID_NULL(t) ((TOID(t))OID_NULL) #define PMEMOBJ_MAX_LAYOUT ((size_t)1024) /* * Type safety macros */ #if !(defined _MSC_VER || defined __clang__) #define TOID_ASSIGN(o, value)(\ {\ (o).oid = value;\ (o); /* to avoid "error: statement with no effect" */\ }) #else /* _MSC_VER or __clang__ */ #define TOID_ASSIGN(o, value) ((o).oid = value, (o)) #endif #if (defined _MSC_VER && _MSC_VER < 1912) /* * XXX - workaround for offsetof issue in VS 15.3, * it has been fixed since Visual Studio 2017 Version 15.5 * (_MSC_VER == 1912) */ #ifdef PMEMOBJ_OFFSETOF_WA #ifdef _CRT_USE_BUILTIN_OFFSETOF #undef offsetof #define offsetof(s, m) ((size_t)&reinterpret_cast < char const volatile& > \ ((((s *)0)->m))) #endif #else #ifdef _CRT_USE_BUILTIN_OFFSETOF #error "Invalid definition of offsetof() macro - see: \ https://developercommunity.visualstudio.com/content/problem/96174/\ offsetof-macro-is-broken-for-nested-objects.html \ Please upgrade your VS, fix offsetof as described under the link or define \ PMEMOBJ_OFFSETOF_WA to enable workaround in libpmemobj.h" #endif #endif #endif /* _MSC_VER */ #define TOID_EQUALS(lhs, rhs)\ ((lhs).oid.off == (rhs).oid.off &&\ (lhs).oid.pool_uuid_lo == (rhs).oid.pool_uuid_lo) /* type number of root object */ #define POBJ_ROOT_TYPE_NUM 0 #define _toid_struct #define _toid_union #define _toid_enum #define _POBJ_LAYOUT_REF(name) (sizeof(_pobj_layout_##name##_ref)) /* * Typed OID */ #define TOID(t)\ union _toid_##t##_toid #ifdef __cplusplus #define _TOID_CONSTR(t)\ _toid_##t##_toid()\ { }\ _toid_##t##_toid(PMEMoid _oid) : oid(_oid)\ { } #else #define _TOID_CONSTR(t) #endif /* * Declaration of typed OID */ #define _TOID_DECLARE(t, i)\ typedef uint8_t _toid_##t##_toid_type_num[(i) + 1];\ TOID(t)\ {\ _TOID_CONSTR(t)\ PMEMoid oid;\ t *_type;\ _toid_##t##_toid_type_num *_type_num;\ } /* * Declaration of typed OID of an object */ #define TOID_DECLARE(t, i) _TOID_DECLARE(t, i) /* * Declaration of typed OID of a root object */ #define TOID_DECLARE_ROOT(t) _TOID_DECLARE(t, POBJ_ROOT_TYPE_NUM) /* * Type number of specified type */ #define TOID_TYPE_NUM(t) (sizeof(_toid_##t##_toid_type_num) - 1) /* * Type number of object read from typed OID */ #define TOID_TYPE_NUM_OF(o) (sizeof(*(o)._type_num) - 1) /* * NULL check */ #define TOID_IS_NULL(o) ((o).oid.off == 0) /* * Validates whether type number stored in typed OID is the same * as type number stored in object's metadata */ #define TOID_VALID(o) (TOID_TYPE_NUM_OF(o) == pmemobj_type_num((o).oid)) /* * Checks whether the object is of a given type */ #define OID_INSTANCEOF(o, t) (TOID_TYPE_NUM(t) == pmemobj_type_num(o)) /* * Begin of layout declaration */ #define POBJ_LAYOUT_BEGIN(name)\ typedef uint8_t _pobj_layout_##name##_ref[__COUNTER__ + 1] /* * End of layout declaration */ #define POBJ_LAYOUT_END(name)\ typedef char _pobj_layout_##name##_cnt[__COUNTER__ + 1 -\ _POBJ_LAYOUT_REF(name)]; /* * Number of types declared inside layout without the root object */ #define POBJ_LAYOUT_TYPES_NUM(name) (sizeof(_pobj_layout_##name##_cnt) - 1) /* * Declaration of typed OID inside layout declaration */ #define POBJ_LAYOUT_TOID(name, t)\ TOID_DECLARE(t, (__COUNTER__ + 1 - _POBJ_LAYOUT_REF(name))); /* * Declaration of typed OID of root inside layout declaration */ #define POBJ_LAYOUT_ROOT(name, t)\ TOID_DECLARE_ROOT(t); /* * Name of declared layout */ #define POBJ_LAYOUT_NAME(name) #name #define TOID_TYPEOF(o) __typeof__(*(o)._type) #define TOID_OFFSETOF(o, field) offsetof(TOID_TYPEOF(o), field) /* * XXX - DIRECT_RW and DIRECT_RO are not available when compiled using VC++ * as C code (/TC). Use /TP option. */ #ifndef _MSC_VER #define DIRECT_RW(o) (\ {__typeof__(o) _o; _o._type = NULL; (void)_o;\ (__typeof__(*(o)._type) *)pmemobj_direct((o).oid); }) #define DIRECT_RO(o) ((const __typeof__(*(o)._type) *)pmemobj_direct((o).oid)) #elif defined(__cplusplus) /* * XXX - On Windows, these macros do not behave exactly the same as on Linux. */ #define DIRECT_RW(o) \ (reinterpret_cast < __typeof__((o)._type) > (pmemobj_direct((o).oid))) #define DIRECT_RO(o) \ (reinterpret_cast < const __typeof__((o)._type) > \ (pmemobj_direct((o).oid))) #endif /* (defined(_MSC_VER) || defined(__cplusplus)) */ #define D_RW DIRECT_RW #define D_RO DIRECT_RO #ifdef __cplusplus } #endif #endif /* libpmemobj/types.h */
4,701
21.825243
78
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/base.h -- definitions of base libpmemobj entry points */ #ifndef LIBPMEMOBJ_BASE_H #define LIBPMEMOBJ_BASE_H 1 #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include <stddef.h> #include <stdint.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemobj_check_version pmemobj_check_versionW #define pmemobj_errormsg pmemobj_errormsgW #else #define pmemobj_check_version pmemobj_check_versionU #define pmemobj_errormsg pmemobj_errormsgU #endif #endif #ifdef __cplusplus extern "C" { #endif /* * opaque type internal to libpmemobj */ typedef struct pmemobjpool PMEMobjpool; #define PMEMOBJ_MAX_ALLOC_SIZE ((size_t)0x3FFDFFFC0) /* * allocation functions flags */ #define POBJ_FLAG_ZERO (((uint64_t)1) << 0) #define POBJ_FLAG_NO_FLUSH (((uint64_t)1) << 1) #define POBJ_FLAG_NO_SNAPSHOT (((uint64_t)1) << 2) #define POBJ_FLAG_ASSUME_INITIALIZED (((uint64_t)1) << 3) #define POBJ_FLAG_TX_NO_ABORT (((uint64_t)1) << 4) #define POBJ_CLASS_ID(id) (((uint64_t)(id)) << 48) #define POBJ_ARENA_ID(id) (((uint64_t)(id)) << 32) #define POBJ_XALLOC_CLASS_MASK ((((uint64_t)1 << 16) - 1) << 48) #define POBJ_XALLOC_ARENA_MASK ((((uint64_t)1 << 16) - 1) << 32) #define POBJ_XALLOC_ZERO POBJ_FLAG_ZERO #define POBJ_XALLOC_NO_FLUSH POBJ_FLAG_NO_FLUSH #define POBJ_XALLOC_NO_ABORT POBJ_FLAG_TX_NO_ABORT /* * pmemobj_mem* flags */ #define PMEMOBJ_F_MEM_NODRAIN (1U << 0) #define PMEMOBJ_F_MEM_NONTEMPORAL (1U << 1) #define PMEMOBJ_F_MEM_TEMPORAL (1U << 2) #define PMEMOBJ_F_MEM_WC (1U << 3) #define PMEMOBJ_F_MEM_WB (1U << 4) #define PMEMOBJ_F_MEM_NOFLUSH (1U << 5) /* * pmemobj_mem*, pmemobj_xflush & pmemobj_xpersist flags */ #define PMEMOBJ_F_RELAXED (1U << 31) /* * Persistent memory object */ /* * Object handle */ typedef struct pmemoid { uint64_t pool_uuid_lo; uint64_t off; } PMEMoid; static const PMEMoid OID_NULL = { 0, 0 }; #define OID_IS_NULL(o) ((o).off == 0) #define OID_EQUALS(lhs, rhs)\ ((lhs).off == (rhs).off &&\ (lhs).pool_uuid_lo == (rhs).pool_uuid_lo) PMEMobjpool *pmemobj_pool_by_ptr(const void *addr); PMEMobjpool *pmemobj_pool_by_oid(PMEMoid oid); #ifndef _WIN32 extern int _pobj_cache_invalidate; extern __thread struct _pobj_pcache { PMEMobjpool *pop; uint64_t uuid_lo; int invalidate; } _pobj_cached_pool; /* * Returns the direct pointer of an object. */ static inline void * pmemobj_direct_inline(PMEMoid oid) { if (oid.off == 0 || oid.pool_uuid_lo == 0) return NULL; struct _pobj_pcache *cache = &_pobj_cached_pool; if (_pobj_cache_invalidate != cache->invalidate || cache->uuid_lo != oid.pool_uuid_lo) { cache->invalidate = _pobj_cache_invalidate; if (!(cache->pop = pmemobj_pool_by_oid(oid))) { cache->uuid_lo = 0; return NULL; } cache->uuid_lo = oid.pool_uuid_lo; } return (void *)((uintptr_t)cache->pop + oid.off); } #endif /* _WIN32 */ /* * Returns the direct pointer of an object. */ #if defined(_WIN32) || defined(_PMEMOBJ_INTRNL) ||\ defined(PMEMOBJ_DIRECT_NON_INLINE) void *pmemobj_direct(PMEMoid oid); #else #define pmemobj_direct pmemobj_direct_inline #endif struct pmemvlt { uint64_t runid; }; #define PMEMvlt(T)\ struct {\ struct pmemvlt vlt;\ T value;\ } /* * Returns lazily initialized volatile variable. (EXPERIMENTAL) */ void *pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt, void *ptr, size_t size, int (*constr)(void *ptr, void *arg), void *arg); /* * Returns the OID of the object pointed to by addr. */ PMEMoid pmemobj_oid(const void *addr); /* * Returns the number of usable bytes in the object. May be greater than * the requested size of the object because of internal alignment. * * Can be used with objects allocated by any of the available methods. */ size_t pmemobj_alloc_usable_size(PMEMoid oid); /* * Returns the type number of the object. */ uint64_t pmemobj_type_num(PMEMoid oid); /* * Pmemobj specific low-level memory manipulation functions. * * These functions are meant to be used with pmemobj pools, because they provide * additional functionality specific to this type of pool. These may include * for example replication support. They also take advantage of the knowledge * of the type of memory in the pool (pmem/non-pmem) to assure persistence. */ /* * Pmemobj version of memcpy. Data copied is made persistent. */ void *pmemobj_memcpy_persist(PMEMobjpool *pop, void *dest, const void *src, size_t len); /* * Pmemobj version of memset. Data range set is made persistent. */ void *pmemobj_memset_persist(PMEMobjpool *pop, void *dest, int c, size_t len); /* * Pmemobj version of memcpy. Data copied is made persistent (unless opted-out * using flags). */ void *pmemobj_memcpy(PMEMobjpool *pop, void *dest, const void *src, size_t len, unsigned flags); /* * Pmemobj version of memmove. Data copied is made persistent (unless opted-out * using flags). */ void *pmemobj_memmove(PMEMobjpool *pop, void *dest, const void *src, size_t len, unsigned flags); /* * Pmemobj version of memset. Data range set is made persistent (unless * opted-out using flags). */ void *pmemobj_memset(PMEMobjpool *pop, void *dest, int c, size_t len, unsigned flags); /* * Pmemobj version of pmem_persist. */ void pmemobj_persist(PMEMobjpool *pop, const void *addr, size_t len); /* * Pmemobj version of pmem_persist with additional flags argument. */ int pmemobj_xpersist(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags); /* * Pmemobj version of pmem_flush. */ void pmemobj_flush(PMEMobjpool *pop, const void *addr, size_t len); /* * Pmemobj version of pmem_flush with additional flags argument. */ int pmemobj_xflush(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags); /* * Pmemobj version of pmem_drain. */ void pmemobj_drain(PMEMobjpool *pop); /* * Version checking. */ /* * PMEMOBJ_MAJOR_VERSION and PMEMOBJ_MINOR_VERSION provide the current version * of the libpmemobj API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmemobj_check_version(). */ #define PMEMOBJ_MAJOR_VERSION 2 #define PMEMOBJ_MINOR_VERSION 4 #ifndef _WIN32 const char *pmemobj_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemobj_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemobj_check_versionW(unsigned major_required, unsigned minor_required); #endif /* * Passing NULL to pmemobj_set_funcs() tells libpmemobj to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemobj. */ void pmemobj_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); typedef int (*pmemobj_constr)(PMEMobjpool *pop, void *ptr, void *arg); /* * (debug helper function) logs notice message if used inside a transaction */ void _pobj_debug_notice(const char *func_name, const char *file, int line); #ifndef _WIN32 const char *pmemobj_errormsg(void); #else const char *pmemobj_errormsgU(void); const wchar_t *pmemobj_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmemobj/base.h */
7,415
23.72
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/tx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/tx.h -- definitions of libpmemobj transactional macros */ #ifndef LIBPMEMOBJ_TX_H #define LIBPMEMOBJ_TX_H 1 #include <errno.h> #include <string.h> #include <libpmemobj/tx_base.h> #include <libpmemobj/types.h> extern uint64_t waitCycles; extern uint64_t resetCycles; //extern int current_tx1 = 1 ; #ifdef __cplusplus extern "C" { #endif #ifdef POBJ_TX_CRASH_ON_NO_ONABORT #define TX_ONABORT_CHECK do {\ if (_stage == TX_STAGE_ONABORT)\ abort();\ } while (0) #else #define TX_ONABORT_CHECK do {} while (0) #endif #define _POBJ_TX_BEGIN(pop, ...)\ {\ jmp_buf _tx_env;\ enum pobj_tx_stage _stage;\ int _pobj_errno;\ if (setjmp(_tx_env)) {\ errno = pmemobj_tx_errno();\ } else {\ _pobj_errno = pmemobj_tx_begin(pop, _tx_env, __VA_ARGS__,\ TX_PARAM_NONE);\ if (_pobj_errno)\ errno = _pobj_errno;\ }\ while ((_stage = pmemobj_tx_stage()) != TX_STAGE_NONE) {\ switch (_stage) {\ case TX_STAGE_WORK: #define TX_BEGIN_PARAM(pop, ...)\ _POBJ_TX_BEGIN(pop, ##__VA_ARGS__) #define TX_BEGIN_LOCK TX_BEGIN_PARAM /* Just to let compiler warn when incompatible function pointer is used */ static inline pmemobj_tx_callback _pobj_validate_cb_sig(pmemobj_tx_callback cb) { return cb; } #define TX_BEGIN_CB(pop, cb, arg, ...) _POBJ_TX_BEGIN(pop, TX_PARAM_CB,\ _pobj_validate_cb_sig(cb), arg, ##__VA_ARGS__) #define TX_BEGIN(pop) _POBJ_TX_BEGIN(pop, TX_PARAM_NONE) #define TX_ONABORT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONABORT: #define TX_ONCOMMIT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONCOMMIT: #define TX_FINALLY\ pmemobj_tx_process();\ break;\ case TX_STAGE_FINALLY: #define TX_END\ pmemobj_tx_process();\ break;\ default:\ TX_ONABORT_CHECK;\ pmemobj_tx_process();\ break;\ }\ }\ _pobj_errno = pmemobj_tx_end();\ if (_pobj_errno)\ errno = _pobj_errno;\ } #define TX_ADD(o)\ pmemobj_tx_add_range((o).oid, 0, sizeof(*(o)._type)) #define TX_ADD_FIELD(o, field)\ TX_ADD_DIRECT(&(D_RO(o)->field)) #define TX_ADD_DIRECT(p)\ pmemobj_tx_add_range_direct(p, sizeof(*(p))) #define TX_ADD_FIELD_DIRECT(p, field)\ pmemobj_tx_add_range_direct(&(p)->field, sizeof((p)->field)) #define TX_XADD(o, flags)\ pmemobj_tx_xadd_range((o).oid, 0, sizeof(*(o)._type), flags) #define TX_XADD_FIELD(o, field, flags)\ TX_XADD_DIRECT(&(D_RO(o)->field), flags) #define TX_XADD_DIRECT(p, flags)\ pmemobj_tx_xadd_range_direct(p, sizeof(*(p)), flags) #define TX_XADD_FIELD_DIRECT(p, field, flags)\ pmemobj_tx_xadd_range_direct(&(p)->field, sizeof((p)->field), flags) #define TX_NEW(t)\ ((TOID(t))pmemobj_tx_alloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ALLOC(t, size)\ ((TOID(t))pmemobj_tx_alloc(size, TOID_TYPE_NUM(t))) #define TX_ZNEW(t)\ ((TOID(t))pmemobj_tx_zalloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ZALLOC(t, size)\ ((TOID(t))pmemobj_tx_zalloc(size, TOID_TYPE_NUM(t))) #define TX_XALLOC(t, size, flags)\ ((TOID(t))pmemobj_tx_xalloc(size, TOID_TYPE_NUM(t), flags)) /* XXX - not available when compiled with VC++ as C code (/TC) */ #if !defined(_MSC_VER) || defined(__cplusplus) #define TX_REALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_realloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #define TX_ZREALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_zrealloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #endif /* !defined(_MSC_VER) || defined(__cplusplus) */ #define TX_STRDUP(s, type_num)\ pmemobj_tx_strdup(s, type_num) #define TX_XSTRDUP(s, type_num, flags)\ pmemobj_tx_xstrdup(s, type_num, flags) #define TX_WCSDUP(s, type_num)\ pmemobj_tx_wcsdup(s, type_num) #define TX_XWCSDUP(s, type_num, flags)\ pmemobj_tx_xwcsdup(s, type_num, flags) #define TX_FREE(o)\ pmemobj_tx_free((o).oid) #define TX_XFREE(o, flags)\ pmemobj_tx_xfree((o).oid, flags) #define TX_SET(o, field, value) (\ TX_ADD_FIELD(o, field),\ D_RW(o)->field = (value)) #define TX_SET_DIRECT(p, field, value) (\ TX_ADD_FIELD_DIRECT(p, field),\ (p)->field = (value)) static inline void * TX_MEMCPY(void *dest, const void *src, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memcpy(dest, src, num); } static inline void * TX_MEMSET(void *dest, int c, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memset(dest, c, num); } #ifdef __cplusplus } #endif #endif /* libpmemobj/tx.h */
4,386
21.848958
74
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/atomic_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/atomic_base.h -- definitions of libpmemobj atomic entry points */ #ifndef LIBPMEMOBJ_ATOMIC_BASE_H #define LIBPMEMOBJ_ATOMIC_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional atomic allocations * * Those functions can be used outside transactions. The allocations are always * aligned to the cache-line boundary. */ #define POBJ_XALLOC_VALID_FLAGS (POBJ_XALLOC_ZERO |\ POBJ_XALLOC_CLASS_MASK) /* * Allocates a new object from the pool and calls a constructor function before * returning. It is guaranteed that allocated object is either properly * initialized, or if it's interrupted before the constructor completes, the * memory reserved for the object is automatically reclaimed. */ int pmemobj_alloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, pmemobj_constr constructor, void *arg); /* * Allocates with flags a new object from the pool. */ int pmemobj_xalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, uint64_t flags, pmemobj_constr constructor, void *arg); /* * Allocates a new zeroed object from the pool. */ int pmemobj_zalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Resizes an existing object. */ int pmemobj_realloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Resizes an existing object, if extended new space is zeroed. */ int pmemobj_zrealloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Allocates a new object with duplicate of the string s. */ int pmemobj_strdup(PMEMobjpool *pop, PMEMoid *oidp, const char *s, uint64_t type_num); /* * Allocates a new object with duplicate of the wide character string s. */ int pmemobj_wcsdup(PMEMobjpool *pop, PMEMoid *oidp, const wchar_t *s, uint64_t type_num); /* * Frees an existing object. */ void pmemobj_free(PMEMoid *oidp); struct pobj_defrag_result { size_t total; /* number of processed objects */ size_t relocated; /* number of relocated objects */ }; /* * Performs defragmentation on the provided array of objects. */ int pmemobj_defrag(PMEMobjpool *pop, PMEMoid **oidv, size_t oidcnt, struct pobj_defrag_result *result); #ifdef __cplusplus } #endif #endif /* libpmemobj/atomic_base.h */
2,386
24.393617
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/thread.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/thread.h -- definitions of libpmemobj thread/locking entry points */ #ifndef LIBPMEMOBJ_THREAD_H #define LIBPMEMOBJ_THREAD_H 1 #include <time.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Locking. */ #define _POBJ_CL_SIZE 64 /* cache line size */ typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMmutex; typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMrwlock; typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMcond; void pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout); int pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp); void pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout); int pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout); int pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); void pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout); int pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp, PMEMmutex *__restrict mutexp); #ifdef __cplusplus } #endif #endif /* libpmemobj/thread.h */
2,150
28.875
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/action.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * libpmemobj/action.h -- definitions of libpmemobj action interface */ #ifndef LIBPMEMOBJ_ACTION_H #define LIBPMEMOBJ_ACTION_H 1 #include <libpmemobj/action_base.h> #ifdef __cplusplus extern "C" { #endif #define POBJ_RESERVE_NEW(pop, t, act)\ ((TOID(t))pmemobj_reserve(pop, act, sizeof(t), TOID_TYPE_NUM(t))) #define POBJ_RESERVE_ALLOC(pop, t, size, act)\ ((TOID(t))pmemobj_reserve(pop, act, size, TOID_TYPE_NUM(t))) #define POBJ_XRESERVE_NEW(pop, t, act, flags)\ ((TOID(t))pmemobj_xreserve(pop, act, sizeof(t), TOID_TYPE_NUM(t), flags)) #define POBJ_XRESERVE_ALLOC(pop, t, size, act, flags)\ ((TOID(t))pmemobj_xreserve(pop, act, size, TOID_TYPE_NUM(t), flags)) #ifdef __cplusplus } #endif #endif /* libpmemobj/action_base.h */
829
23.411765
73
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/atomic.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/atomic.h -- definitions of libpmemobj atomic macros */ #ifndef LIBPMEMOBJ_ATOMIC_H #define LIBPMEMOBJ_ATOMIC_H 1 #include <libpmemobj/atomic_base.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif #define POBJ_NEW(pop, o, t, constr, arg)\ pmemobj_alloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t),\ (constr), (arg)) #define POBJ_ALLOC(pop, o, t, size, constr, arg)\ pmemobj_alloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t),\ (constr), (arg)) #define POBJ_ZNEW(pop, o, t)\ pmemobj_zalloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t)) #define POBJ_ZALLOC(pop, o, t, size)\ pmemobj_zalloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_REALLOC(pop, o, t, size)\ pmemobj_realloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_ZREALLOC(pop, o, t, size)\ pmemobj_zrealloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_FREE(o)\ pmemobj_free((PMEMoid *)(o)) #ifdef __cplusplus } #endif #endif /* libpmemobj/atomic.h */
1,115
23.26087
66
h
null
NearPMSW-main/nearpm/checkpointing/pmdkArrSwap-checkpoint/src/include/libpmemobj/iterator_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/iterator_base.h -- definitions of libpmemobj iterator entry points */ #ifndef LIBPMEMOBJ_ITERATOR_BASE_H #define LIBPMEMOBJ_ITERATOR_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * The following functions allow access to the entire collection of objects. * * Use with conjunction with non-transactional allocations. Pmemobj pool acts * as a generic container (list) of objects that are not assigned to any * user-defined data structures. */ /* * Returns the first object of the specified type number. */ PMEMoid pmemobj_first(PMEMobjpool *pop); /* * Returns the next object of the same type. */ PMEMoid pmemobj_next(PMEMoid oid); #ifdef __cplusplus } #endif #endif /* libpmemobj/iterator_base.h */
855
20.4
80
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/anet.c
/* anet.c -- Basic TCP socket stuff made a bit less boring * * Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include <sys/types.h> #include <sys/socket.h> #include <sys/stat.h> #include <sys/un.h> #include <sys/time.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <arpa/inet.h> #include <unistd.h> #include <fcntl.h> #include <string.h> #include <netdb.h> #include <errno.h> #include <stdarg.h> #include <stdio.h> #include "anet.h" static void anetSetError(char *err, const char *fmt, ...) { va_list ap; if (!err) return; va_start(ap, fmt); vsnprintf(err, ANET_ERR_LEN, fmt, ap); va_end(ap); } int anetSetBlock(char *err, int fd, int non_block) { int flags; /* Set the socket blocking (if non_block is zero) or non-blocking. * Note that fcntl(2) for F_GETFL and F_SETFL can't be * interrupted by a signal. */ if ((flags = fcntl(fd, F_GETFL)) == -1) { anetSetError(err, "fcntl(F_GETFL): %s", strerror(errno)); return ANET_ERR; } if (non_block) flags |= O_NONBLOCK; else flags &= ~O_NONBLOCK; if (fcntl(fd, F_SETFL, flags) == -1) { anetSetError(err, "fcntl(F_SETFL,O_NONBLOCK): %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } int anetNonBlock(char *err, int fd) { return anetSetBlock(err,fd,1); } int anetBlock(char *err, int fd) { return anetSetBlock(err,fd,0); } /* Set TCP keep alive option to detect dead peers. The interval option * is only used for Linux as we are using Linux-specific APIs to set * the probe send time, interval, and count. */ int anetKeepAlive(char *err, int fd, int interval) { int val = 1; if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val)) == -1) { anetSetError(err, "setsockopt SO_KEEPALIVE: %s", strerror(errno)); return ANET_ERR; } #ifdef __linux__ /* Default settings are more or less garbage, with the keepalive time * set to 7200 by default on Linux. Modify settings to make the feature * actually useful. */ /* Send first probe after interval. */ val = interval; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &val, sizeof(val)) < 0) { anetSetError(err, "setsockopt TCP_KEEPIDLE: %s\n", strerror(errno)); return ANET_ERR; } /* Send next probes after the specified interval. Note that we set the * delay as interval / 3, as we send three probes before detecting * an error (see the next setsockopt call). */ val = interval/3; if (val == 0) val = 1; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &val, sizeof(val)) < 0) { anetSetError(err, "setsockopt TCP_KEEPINTVL: %s\n", strerror(errno)); return ANET_ERR; } /* Consider the socket in error state after three we send three ACK * probes without getting a reply. */ val = 3; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &val, sizeof(val)) < 0) { anetSetError(err, "setsockopt TCP_KEEPCNT: %s\n", strerror(errno)); return ANET_ERR; } #else ((void) interval); /* Avoid unused var warning for non Linux systems. */ #endif return ANET_OK; } static int anetSetTcpNoDelay(char *err, int fd, int val) { if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)) == -1) { anetSetError(err, "setsockopt TCP_NODELAY: %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } int anetEnableTcpNoDelay(char *err, int fd) { return anetSetTcpNoDelay(err, fd, 1); } int anetDisableTcpNoDelay(char *err, int fd) { return anetSetTcpNoDelay(err, fd, 0); } int anetSetSendBuffer(char *err, int fd, int buffsize) { if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buffsize, sizeof(buffsize)) == -1) { anetSetError(err, "setsockopt SO_SNDBUF: %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } int anetTcpKeepAlive(char *err, int fd) { int yes = 1; if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &yes, sizeof(yes)) == -1) { anetSetError(err, "setsockopt SO_KEEPALIVE: %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } /* Set the socket send timeout (SO_SNDTIMEO socket option) to the specified * number of milliseconds, or disable it if the 'ms' argument is zero. */ int anetSendTimeout(char *err, int fd, long long ms) { struct timeval tv; tv.tv_sec = ms/1000; tv.tv_usec = (ms%1000)*1000; if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)) == -1) { anetSetError(err, "setsockopt SO_SNDTIMEO: %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } /* anetGenericResolve() is called by anetResolve() and anetResolveIP() to * do the actual work. It resolves the hostname "host" and set the string * representation of the IP address into the buffer pointed by "ipbuf". * * If flags is set to ANET_IP_ONLY the function only resolves hostnames * that are actually already IPv4 or IPv6 addresses. This turns the function * into a validating / normalizing function. */ int anetGenericResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len, int flags) { struct addrinfo hints, *info; int rv; memset(&hints,0,sizeof(hints)); if (flags & ANET_IP_ONLY) hints.ai_flags = AI_NUMERICHOST; hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; /* specify socktype to avoid dups */ if ((rv = getaddrinfo(host, NULL, &hints, &info)) != 0) { anetSetError(err, "%s", gai_strerror(rv)); return ANET_ERR; } if (info->ai_family == AF_INET) { struct sockaddr_in *sa = (struct sockaddr_in *)info->ai_addr; inet_ntop(AF_INET, &(sa->sin_addr), ipbuf, ipbuf_len); } else { struct sockaddr_in6 *sa = (struct sockaddr_in6 *)info->ai_addr; inet_ntop(AF_INET6, &(sa->sin6_addr), ipbuf, ipbuf_len); } freeaddrinfo(info); return ANET_OK; } int anetResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len) { return anetGenericResolve(err,host,ipbuf,ipbuf_len,ANET_NONE); } int anetResolveIP(char *err, char *host, char *ipbuf, size_t ipbuf_len) { return anetGenericResolve(err,host,ipbuf,ipbuf_len,ANET_IP_ONLY); } static int anetSetReuseAddr(char *err, int fd) { int yes = 1; /* Make sure connection-intensive things like the redis benckmark * will be able to close/open sockets a zillion of times */ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) == -1) { anetSetError(err, "setsockopt SO_REUSEADDR: %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } static int anetCreateSocket(char *err, int domain) { int s; if ((s = socket(domain, SOCK_STREAM, 0)) == -1) { anetSetError(err, "creating socket: %s", strerror(errno)); return ANET_ERR; } /* Make sure connection-intensive things like the redis benchmark * will be able to close/open sockets a zillion of times */ if (anetSetReuseAddr(err,s) == ANET_ERR) { close(s); return ANET_ERR; } return s; } #define ANET_CONNECT_NONE 0 #define ANET_CONNECT_NONBLOCK 1 #define ANET_CONNECT_BE_BINDING 2 /* Best effort binding. */ static int anetTcpGenericConnect(char *err, char *addr, int port, char *source_addr, int flags) { int s = ANET_ERR, rv; char portstr[6]; /* strlen("65535") + 1; */ struct addrinfo hints, *servinfo, *bservinfo, *p, *b; snprintf(portstr,sizeof(portstr),"%d",port); memset(&hints,0,sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; if ((rv = getaddrinfo(addr,portstr,&hints,&servinfo)) != 0) { anetSetError(err, "%s", gai_strerror(rv)); return ANET_ERR; } for (p = servinfo; p != NULL; p = p->ai_next) { /* Try to create the socket and to connect it. * If we fail in the socket() call, or on connect(), we retry with * the next entry in servinfo. */ if ((s = socket(p->ai_family,p->ai_socktype,p->ai_protocol)) == -1) continue; if (anetSetReuseAddr(err,s) == ANET_ERR) goto error; if (flags & ANET_CONNECT_NONBLOCK && anetNonBlock(err,s) != ANET_OK) goto error; if (source_addr) { int bound = 0; /* Using getaddrinfo saves us from self-determining IPv4 vs IPv6 */ if ((rv = getaddrinfo(source_addr, NULL, &hints, &bservinfo)) != 0) { anetSetError(err, "%s", gai_strerror(rv)); goto error; } for (b = bservinfo; b != NULL; b = b->ai_next) { if (bind(s,b->ai_addr,b->ai_addrlen) != -1) { bound = 1; break; } } freeaddrinfo(bservinfo); if (!bound) { anetSetError(err, "bind: %s", strerror(errno)); goto error; } } if (connect(s,p->ai_addr,p->ai_addrlen) == -1) { /* If the socket is non-blocking, it is ok for connect() to * return an EINPROGRESS error here. */ if (errno == EINPROGRESS && flags & ANET_CONNECT_NONBLOCK) goto end; close(s); s = ANET_ERR; continue; } /* If we ended an iteration of the for loop without errors, we * have a connected socket. Let's return to the caller. */ goto end; } if (p == NULL) anetSetError(err, "creating socket: %s", strerror(errno)); error: if (s != ANET_ERR) { close(s); s = ANET_ERR; } end: freeaddrinfo(servinfo); /* Handle best effort binding: if a binding address was used, but it is * not possible to create a socket, try again without a binding address. */ if (s == ANET_ERR && source_addr && (flags & ANET_CONNECT_BE_BINDING)) { return anetTcpGenericConnect(err,addr,port,NULL,flags); } else { return s; } } int anetTcpConnect(char *err, char *addr, int port) { return anetTcpGenericConnect(err,addr,port,NULL,ANET_CONNECT_NONE); } int anetTcpNonBlockConnect(char *err, char *addr, int port) { return anetTcpGenericConnect(err,addr,port,NULL,ANET_CONNECT_NONBLOCK); } int anetTcpNonBlockBindConnect(char *err, char *addr, int port, char *source_addr) { return anetTcpGenericConnect(err,addr,port,source_addr, ANET_CONNECT_NONBLOCK); } int anetTcpNonBlockBestEffortBindConnect(char *err, char *addr, int port, char *source_addr) { return anetTcpGenericConnect(err,addr,port,source_addr, ANET_CONNECT_NONBLOCK|ANET_CONNECT_BE_BINDING); } int anetUnixGenericConnect(char *err, char *path, int flags) { int s; struct sockaddr_un sa; if ((s = anetCreateSocket(err,AF_LOCAL)) == ANET_ERR) return ANET_ERR; sa.sun_family = AF_LOCAL; strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1); if (flags & ANET_CONNECT_NONBLOCK) { if (anetNonBlock(err,s) != ANET_OK) return ANET_ERR; } if (connect(s,(struct sockaddr*)&sa,sizeof(sa)) == -1) { if (errno == EINPROGRESS && flags & ANET_CONNECT_NONBLOCK) return s; anetSetError(err, "connect: %s", strerror(errno)); close(s); return ANET_ERR; } return s; } int anetUnixConnect(char *err, char *path) { return anetUnixGenericConnect(err,path,ANET_CONNECT_NONE); } int anetUnixNonBlockConnect(char *err, char *path) { return anetUnixGenericConnect(err,path,ANET_CONNECT_NONBLOCK); } /* Like read(2) but make sure 'count' is read before to return * (unless error or EOF condition is encountered) */ int anetRead(int fd, char *buf, int count) { ssize_t nread, totlen = 0; while(totlen != count) { nread = read(fd,buf,count-totlen); if (nread == 0) return totlen; if (nread == -1) return -1; totlen += nread; buf += nread; } return totlen; } /* Like write(2) but make sure 'count' is written before to return * (unless error is encountered) */ int anetWrite(int fd, char *buf, int count) { ssize_t nwritten, totlen = 0; while(totlen != count) { nwritten = write(fd,buf,count-totlen); if (nwritten == 0) return totlen; if (nwritten == -1) return -1; totlen += nwritten; buf += nwritten; } return totlen; } static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog) { if (bind(s,sa,len) == -1) { anetSetError(err, "bind: %s", strerror(errno)); close(s); return ANET_ERR; } if (listen(s, backlog) == -1) { anetSetError(err, "listen: %s", strerror(errno)); close(s); return ANET_ERR; } return ANET_OK; } static int anetV6Only(char *err, int s) { int yes = 1; if (setsockopt(s,IPPROTO_IPV6,IPV6_V6ONLY,&yes,sizeof(yes)) == -1) { anetSetError(err, "setsockopt: %s", strerror(errno)); close(s); return ANET_ERR; } return ANET_OK; } static int _anetTcpServer(char *err, int port, char *bindaddr, int af, int backlog) { int s, rv; char _port[6]; /* strlen("65535") */ struct addrinfo hints, *servinfo, *p; snprintf(_port,6,"%d",port); memset(&hints,0,sizeof(hints)); hints.ai_family = af; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_PASSIVE; /* No effect if bindaddr != NULL */ if ((rv = getaddrinfo(bindaddr,_port,&hints,&servinfo)) != 0) { anetSetError(err, "%s", gai_strerror(rv)); return ANET_ERR; } for (p = servinfo; p != NULL; p = p->ai_next) { if ((s = socket(p->ai_family,p->ai_socktype,p->ai_protocol)) == -1) continue; if (af == AF_INET6 && anetV6Only(err,s) == ANET_ERR) goto error; if (anetSetReuseAddr(err,s) == ANET_ERR) goto error; if (anetListen(err,s,p->ai_addr,p->ai_addrlen,backlog) == ANET_ERR) goto error; goto end; } if (p == NULL) { anetSetError(err, "unable to bind socket, errno: %d", errno); goto error; } error: s = ANET_ERR; end: freeaddrinfo(servinfo); return s; } int anetTcpServer(char *err, int port, char *bindaddr, int backlog) { return _anetTcpServer(err, port, bindaddr, AF_INET, backlog); } int anetTcp6Server(char *err, int port, char *bindaddr, int backlog) { return _anetTcpServer(err, port, bindaddr, AF_INET6, backlog); } int anetUnixServer(char *err, char *path, mode_t perm, int backlog) { int s; struct sockaddr_un sa; if ((s = anetCreateSocket(err,AF_LOCAL)) == ANET_ERR) return ANET_ERR; memset(&sa,0,sizeof(sa)); sa.sun_family = AF_LOCAL; strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1); if (anetListen(err,s,(struct sockaddr*)&sa,sizeof(sa),backlog) == ANET_ERR) return ANET_ERR; if (perm) chmod(sa.sun_path, perm); return s; } static int anetGenericAccept(char *err, int s, struct sockaddr *sa, socklen_t *len) { int fd; while(1) { fd = accept(s,sa,len); if (fd == -1) { if (errno == EINTR) continue; else { anetSetError(err, "accept: %s", strerror(errno)); return ANET_ERR; } } break; } return fd; } int anetTcpAccept(char *err, int s, char *ip, size_t ip_len, int *port) { int fd; struct sockaddr_storage sa; socklen_t salen = sizeof(sa); if ((fd = anetGenericAccept(err,s,(struct sockaddr*)&sa,&salen)) == -1) return ANET_ERR; if (sa.ss_family == AF_INET) { struct sockaddr_in *s = (struct sockaddr_in *)&sa; if (ip) inet_ntop(AF_INET,(void*)&(s->sin_addr),ip,ip_len); if (port) *port = ntohs(s->sin_port); } else { struct sockaddr_in6 *s = (struct sockaddr_in6 *)&sa; if (ip) inet_ntop(AF_INET6,(void*)&(s->sin6_addr),ip,ip_len); if (port) *port = ntohs(s->sin6_port); } return fd; } int anetUnixAccept(char *err, int s) { int fd; struct sockaddr_un sa; socklen_t salen = sizeof(sa); if ((fd = anetGenericAccept(err,s,(struct sockaddr*)&sa,&salen)) == -1) return ANET_ERR; return fd; } int anetPeerToString(int fd, char *ip, size_t ip_len, int *port) { struct sockaddr_storage sa; socklen_t salen = sizeof(sa); if (getpeername(fd,(struct sockaddr*)&sa,&salen) == -1) goto error; if (ip_len == 0) goto error; if (sa.ss_family == AF_INET) { struct sockaddr_in *s = (struct sockaddr_in *)&sa; if (ip) inet_ntop(AF_INET,(void*)&(s->sin_addr),ip,ip_len); if (port) *port = ntohs(s->sin_port); } else if (sa.ss_family == AF_INET6) { struct sockaddr_in6 *s = (struct sockaddr_in6 *)&sa; if (ip) inet_ntop(AF_INET6,(void*)&(s->sin6_addr),ip,ip_len); if (port) *port = ntohs(s->sin6_port); } else if (sa.ss_family == AF_UNIX) { if (ip) strncpy(ip,"/unixsocket",ip_len); if (port) *port = 0; } else { goto error; } return 0; error: if (ip) { if (ip_len >= 2) { ip[0] = '?'; ip[1] = '\0'; } else if (ip_len == 1) { ip[0] = '\0'; } } if (port) *port = 0; return -1; } /* Format an IP,port pair into something easy to parse. If IP is IPv6 * (matches for ":"), the ip is surrounded by []. IP and port are just * separated by colons. This the standard to display addresses within Redis. */ int anetFormatAddr(char *buf, size_t buf_len, char *ip, int port) { return snprintf(buf,buf_len, strchr(ip,':') ? "[%s]:%d" : "%s:%d", ip, port); } /* Like anetFormatAddr() but extract ip and port from the socket's peer. */ int anetFormatPeer(int fd, char *buf, size_t buf_len) { char ip[INET6_ADDRSTRLEN]; int port; anetPeerToString(fd,ip,sizeof(ip),&port); return anetFormatAddr(buf, buf_len, ip, port); } int anetSockName(int fd, char *ip, size_t ip_len, int *port) { struct sockaddr_storage sa; socklen_t salen = sizeof(sa); if (getsockname(fd,(struct sockaddr*)&sa,&salen) == -1) { if (port) *port = 0; ip[0] = '?'; ip[1] = '\0'; return -1; } if (sa.ss_family == AF_INET) { struct sockaddr_in *s = (struct sockaddr_in *)&sa; if (ip) inet_ntop(AF_INET,(void*)&(s->sin_addr),ip,ip_len); if (port) *port = ntohs(s->sin_port); } else { struct sockaddr_in6 *s = (struct sockaddr_in6 *)&sa; if (ip) inet_ntop(AF_INET6,(void*)&(s->sin6_addr),ip,ip_len); if (port) *port = ntohs(s->sin6_port); } return 0; } int anetFormatSock(int fd, char *fmt, size_t fmt_len) { char ip[INET6_ADDRSTRLEN]; int port; anetSockName(fd,ip,sizeof(ip),&port); return anetFormatAddr(fmt, fmt_len, ip, port); }
20,633
30.454268
90
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/blocked.c
/* blocked.c - generic support for blocking operations like BLPOP & WAIT. * * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * --------------------------------------------------------------------------- * * API: * * getTimeoutFromObjectOrReply() is just an utility function to parse a * timeout argument since blocking operations usually require a timeout. * * blockClient() set the CLIENT_BLOCKED flag in the client, and set the * specified block type 'btype' filed to one of BLOCKED_* macros. * * unblockClient() unblocks the client doing the following: * 1) It calls the btype-specific function to cleanup the state. * 2) It unblocks the client by unsetting the CLIENT_BLOCKED flag. * 3) It puts the client into a list of just unblocked clients that are * processed ASAP in the beforeSleep() event loop callback, so that * if there is some query buffer to process, we do it. This is also * required because otherwise there is no 'readable' event fired, we * already read the pending commands. We also set the CLIENT_UNBLOCKED * flag to remember the client is in the unblocked_clients list. * * processUnblockedClients() is called inside the beforeSleep() function * to process the query buffer from unblocked clients and remove the clients * from the blocked_clients queue. * * replyToBlockedClientTimedOut() is called by the cron function when * a client blocked reaches the specified timeout (if the timeout is set * to 0, no timeout is processed). * It usually just needs to send a reply to the client. * * When implementing a new type of blocking opeation, the implementation * should modify unblockClient() and replyToBlockedClientTimedOut() in order * to handle the btype-specific behavior of this two functions. * If the blocking operation waits for certain keys to change state, the * clusterRedirectBlockedClientIfNeeded() function should also be updated. */ #include "server.h" /* Get a timeout value from an object and store it into 'timeout'. * The final timeout is always stored as milliseconds as a time where the * timeout will expire, however the parsing is performed according to * the 'unit' that can be seconds or milliseconds. * * Note that if the timeout is zero (usually from the point of view of * commands API this means no timeout) the value stored into 'timeout' * is zero. */ int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int unit) { long long tval; if (getLongLongFromObjectOrReply(c,object,&tval, "timeout is not an integer or out of range") != C_OK) return C_ERR; if (tval < 0) { addReplyError(c,"timeout is negative"); return C_ERR; } if (tval > 0) { if (unit == UNIT_SECONDS) tval *= 1000; tval += mstime(); } *timeout = tval; return C_OK; } /* Block a client for the specific operation type. Once the CLIENT_BLOCKED * flag is set client query buffer is not longer processed, but accumulated, * and will be processed when the client is unblocked. */ void blockClient(client *c, int btype) { c->flags |= CLIENT_BLOCKED; c->btype = btype; server.bpop_blocked_clients++; } /* This function is called in the beforeSleep() function of the event loop * in order to process the pending input buffer of clients that were * unblocked after a blocking operation. */ void processUnblockedClients(void) { listNode *ln; client *c; while (listLength(server.unblocked_clients)) { ln = listFirst(server.unblocked_clients); serverAssert(ln != NULL); c = ln->value; listDelNode(server.unblocked_clients,ln); c->flags &= ~CLIENT_UNBLOCKED; /* Process remaining data in the input buffer, unless the client * is blocked again. Actually processInputBuffer() checks that the * client is not blocked before to proceed, but things may change and * the code is conceptually more correct this way. */ if (!(c->flags & CLIENT_BLOCKED)) { if (c->querybuf && sdslen(c->querybuf) > 0) { processInputBuffer(c); } } } } /* Unblock a client calling the right function depending on the kind * of operation the client is blocking for. */ void unblockClient(client *c) { if (c->btype == BLOCKED_LIST) { unblockClientWaitingData(c); } else if (c->btype == BLOCKED_WAIT) { unblockClientWaitingReplicas(c); } else { serverPanic("Unknown btype in unblockClient()."); } /* Clear the flags, and put the client in the unblocked list so that * we'll process new commands in its query buffer ASAP. */ c->flags &= ~CLIENT_BLOCKED; c->btype = BLOCKED_NONE; server.bpop_blocked_clients--; /* The client may already be into the unblocked list because of a previous * blocking operation, don't add back it into the list multiple times. */ if (!(c->flags & CLIENT_UNBLOCKED)) { c->flags |= CLIENT_UNBLOCKED; listAddNodeTail(server.unblocked_clients,c); } } /* This function gets called when a blocked client timed out in order to * send it a reply of some kind. */ void replyToBlockedClientTimedOut(client *c) { if (c->btype == BLOCKED_LIST) { addReply(c,shared.nullmultibulk); } else if (c->btype == BLOCKED_WAIT) { addReplyLongLong(c,replicationCountAcksByOffset(c->bpop.reploffset)); } else { serverPanic("Unknown btype in replyToBlockedClientTimedOut()."); } } /* Mass-unblock clients because something changed in the instance that makes * blocking no longer safe. For example clients blocked in list operations * in an instance which turns from master to slave is unsafe, so this function * is called when a master turns into a slave. * * The semantics is to send an -UNBLOCKED error to the client, disconnecting * it at the same time. */ void disconnectAllBlockedClients(void) { listNode *ln; listIter li; listRewind(server.clients,&li); while((ln = listNext(&li))) { client *c = listNodeValue(ln); if (c->flags & CLIENT_BLOCKED) { addReplySds(c,sdsnew( "-UNBLOCKED force unblock from blocking operation, " "instance state changed (master -> slave?)\r\n")); unblockClient(c); c->flags |= CLIENT_CLOSE_AFTER_REPLY; } } }
7,966
40.712042
87
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/help.h
/* Automatically generated by utils/generate-command-help.rb, do not edit. */ #ifndef __REDIS_HELP_H #define __REDIS_HELP_H static char *commandGroups[] = { "generic", "string", "list", "set", "sorted_set", "hash", "pubsub", "transactions", "connection", "server", "scripting", "hyperloglog", "cluster", "geo" }; struct commandHelp { char *name; char *params; char *summary; int group; char *since; } commandHelp[] = { { "APPEND", "key value", "Append a value to a key", 1, "2.0.0" }, { "AUTH", "password", "Authenticate to the server", 8, "1.0.0" }, { "BGREWRITEAOF", "-", "Asynchronously rewrite the append-only file", 9, "1.0.0" }, { "BGSAVE", "-", "Asynchronously save the dataset to disk", 9, "1.0.0" }, { "BITCOUNT", "key [start end]", "Count set bits in a string", 1, "2.6.0" }, { "BITFIELD", "key [GET type offset] [SET type offset value] [INCRBY type offset increment] [OVERFLOW WRAP|SAT|FAIL]", "Perform arbitrary bitfield integer operations on strings", 1, "3.2.0" }, { "BITOP", "operation destkey key [key ...]", "Perform bitwise operations between strings", 1, "2.6.0" }, { "BITPOS", "key bit [start] [end]", "Find first bit set or clear in a string", 1, "2.8.7" }, { "BLPOP", "key [key ...] timeout", "Remove and get the first element in a list, or block until one is available", 2, "2.0.0" }, { "BRPOP", "key [key ...] timeout", "Remove and get the last element in a list, or block until one is available", 2, "2.0.0" }, { "BRPOPLPUSH", "source destination timeout", "Pop a value from a list, push it to another list and return it; or block until one is available", 2, "2.2.0" }, { "CLIENT GETNAME", "-", "Get the current connection name", 9, "2.6.9" }, { "CLIENT KILL", "[ip:port] [ID client-id] [TYPE normal|master|slave|pubsub] [ADDR ip:port] [SKIPME yes/no]", "Kill the connection of a client", 9, "2.4.0" }, { "CLIENT LIST", "-", "Get the list of client connections", 9, "2.4.0" }, { "CLIENT PAUSE", "timeout", "Stop processing commands from clients for some time", 9, "2.9.50" }, { "CLIENT REPLY", "ON|OFF|SKIP", "Instruct the server whether to reply to commands", 9, "3.2" }, { "CLIENT SETNAME", "connection-name", "Set the current connection name", 9, "2.6.9" }, { "CLUSTER ADDSLOTS", "slot [slot ...]", "Assign new hash slots to receiving node", 12, "3.0.0" }, { "CLUSTER COUNT-FAILURE-REPORTS", "node-id", "Return the number of failure reports active for a given node", 12, "3.0.0" }, { "CLUSTER COUNTKEYSINSLOT", "slot", "Return the number of local keys in the specified hash slot", 12, "3.0.0" }, { "CLUSTER DELSLOTS", "slot [slot ...]", "Set hash slots as unbound in receiving node", 12, "3.0.0" }, { "CLUSTER FAILOVER", "[FORCE|TAKEOVER]", "Forces a slave to perform a manual failover of its master.", 12, "3.0.0" }, { "CLUSTER FORGET", "node-id", "Remove a node from the nodes table", 12, "3.0.0" }, { "CLUSTER GETKEYSINSLOT", "slot count", "Return local key names in the specified hash slot", 12, "3.0.0" }, { "CLUSTER INFO", "-", "Provides info about Redis Cluster node state", 12, "3.0.0" }, { "CLUSTER KEYSLOT", "key", "Returns the hash slot of the specified key", 12, "3.0.0" }, { "CLUSTER MEET", "ip port", "Force a node cluster to handshake with another node", 12, "3.0.0" }, { "CLUSTER NODES", "-", "Get Cluster config for the node", 12, "3.0.0" }, { "CLUSTER REPLICATE", "node-id", "Reconfigure a node as a slave of the specified master node", 12, "3.0.0" }, { "CLUSTER RESET", "[HARD|SOFT]", "Reset a Redis Cluster node", 12, "3.0.0" }, { "CLUSTER SAVECONFIG", "-", "Forces the node to save cluster state on disk", 12, "3.0.0" }, { "CLUSTER SET-CONFIG-EPOCH", "config-epoch", "Set the configuration epoch in a new node", 12, "3.0.0" }, { "CLUSTER SETSLOT", "slot IMPORTING|MIGRATING|STABLE|NODE [node-id]", "Bind a hash slot to a specific node", 12, "3.0.0" }, { "CLUSTER SLAVES", "node-id", "List slave nodes of the specified master node", 12, "3.0.0" }, { "CLUSTER SLOTS", "-", "Get array of Cluster slot to node mappings", 12, "3.0.0" }, { "COMMAND", "-", "Get array of Redis command details", 9, "2.8.13" }, { "COMMAND COUNT", "-", "Get total number of Redis commands", 9, "2.8.13" }, { "COMMAND GETKEYS", "-", "Extract keys given a full Redis command", 9, "2.8.13" }, { "COMMAND INFO", "command-name [command-name ...]", "Get array of specific Redis command details", 9, "2.8.13" }, { "CONFIG GET", "parameter", "Get the value of a configuration parameter", 9, "2.0.0" }, { "CONFIG RESETSTAT", "-", "Reset the stats returned by INFO", 9, "2.0.0" }, { "CONFIG REWRITE", "-", "Rewrite the configuration file with the in memory configuration", 9, "2.8.0" }, { "CONFIG SET", "parameter value", "Set a configuration parameter to the given value", 9, "2.0.0" }, { "DBSIZE", "-", "Return the number of keys in the selected database", 9, "1.0.0" }, { "DEBUG OBJECT", "key", "Get debugging information about a key", 9, "1.0.0" }, { "DEBUG SEGFAULT", "-", "Make the server crash", 9, "1.0.0" }, { "DECR", "key", "Decrement the integer value of a key by one", 1, "1.0.0" }, { "DECRBY", "key decrement", "Decrement the integer value of a key by the given number", 1, "1.0.0" }, { "DEL", "key [key ...]", "Delete a key", 0, "1.0.0" }, { "DISCARD", "-", "Discard all commands issued after MULTI", 7, "2.0.0" }, { "DUMP", "key", "Return a serialized version of the value stored at the specified key.", 0, "2.6.0" }, { "ECHO", "message", "Echo the given string", 8, "1.0.0" }, { "EVAL", "script numkeys key [key ...] arg [arg ...]", "Execute a Lua script server side", 10, "2.6.0" }, { "EVALSHA", "sha1 numkeys key [key ...] arg [arg ...]", "Execute a Lua script server side", 10, "2.6.0" }, { "EXEC", "-", "Execute all commands issued after MULTI", 7, "1.2.0" }, { "EXISTS", "key [key ...]", "Determine if a key exists", 0, "1.0.0" }, { "EXPIRE", "key seconds", "Set a key's time to live in seconds", 0, "1.0.0" }, { "EXPIREAT", "key timestamp", "Set the expiration for a key as a UNIX timestamp", 0, "1.2.0" }, { "FLUSHALL", "-", "Remove all keys from all databases", 9, "1.0.0" }, { "FLUSHDB", "-", "Remove all keys from the current database", 9, "1.0.0" }, { "GEOADD", "key longitude latitude member [longitude latitude member ...]", "Add one or more geospatial items in the geospatial index represented using a sorted set", 13, "3.2.0" }, { "GEODIST", "key member1 member2 [unit]", "Returns the distance between two members of a geospatial index", 13, "3.2.0" }, { "GEOHASH", "key member [member ...]", "Returns members of a geospatial index as standard geohash strings", 13, "3.2.0" }, { "GEOPOS", "key member [member ...]", "Returns longitude and latitude of members of a geospatial index", 13, "3.2.0" }, { "GEORADIUS", "key longitude latitude radius m|km|ft|mi [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count] [ASC|DESC] [STORE key] [STOREDIST key]", "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point", 13, "3.2.0" }, { "GEORADIUSBYMEMBER", "key member radius m|km|ft|mi [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count] [ASC|DESC] [STORE key] [STOREDIST key]", "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a member", 13, "3.2.0" }, { "GET", "key", "Get the value of a key", 1, "1.0.0" }, { "GETBIT", "key offset", "Returns the bit value at offset in the string value stored at key", 1, "2.2.0" }, { "GETRANGE", "key start end", "Get a substring of the string stored at a key", 1, "2.4.0" }, { "GETSET", "key value", "Set the string value of a key and return its old value", 1, "1.0.0" }, { "HDEL", "key field [field ...]", "Delete one or more hash fields", 5, "2.0.0" }, { "HEXISTS", "key field", "Determine if a hash field exists", 5, "2.0.0" }, { "HGET", "key field", "Get the value of a hash field", 5, "2.0.0" }, { "HGETALL", "key", "Get all the fields and values in a hash", 5, "2.0.0" }, { "HINCRBY", "key field increment", "Increment the integer value of a hash field by the given number", 5, "2.0.0" }, { "HINCRBYFLOAT", "key field increment", "Increment the float value of a hash field by the given amount", 5, "2.6.0" }, { "HKEYS", "key", "Get all the fields in a hash", 5, "2.0.0" }, { "HLEN", "key", "Get the number of fields in a hash", 5, "2.0.0" }, { "HMGET", "key field [field ...]", "Get the values of all the given hash fields", 5, "2.0.0" }, { "HMSET", "key field value [field value ...]", "Set multiple hash fields to multiple values", 5, "2.0.0" }, { "HSCAN", "key cursor [MATCH pattern] [COUNT count]", "Incrementally iterate hash fields and associated values", 5, "2.8.0" }, { "HSET", "key field value", "Set the string value of a hash field", 5, "2.0.0" }, { "HSETNX", "key field value", "Set the value of a hash field, only if the field does not exist", 5, "2.0.0" }, { "HSTRLEN", "key field", "Get the length of the value of a hash field", 5, "3.2.0" }, { "HVALS", "key", "Get all the values in a hash", 5, "2.0.0" }, { "INCR", "key", "Increment the integer value of a key by one", 1, "1.0.0" }, { "INCRBY", "key increment", "Increment the integer value of a key by the given amount", 1, "1.0.0" }, { "INCRBYFLOAT", "key increment", "Increment the float value of a key by the given amount", 1, "2.6.0" }, { "INFO", "[section]", "Get information and statistics about the server", 9, "1.0.0" }, { "KEYS", "pattern", "Find all keys matching the given pattern", 0, "1.0.0" }, { "LASTSAVE", "-", "Get the UNIX time stamp of the last successful save to disk", 9, "1.0.0" }, { "LINDEX", "key index", "Get an element from a list by its index", 2, "1.0.0" }, { "LINSERT", "key BEFORE|AFTER pivot value", "Insert an element before or after another element in a list", 2, "2.2.0" }, { "LLEN", "key", "Get the length of a list", 2, "1.0.0" }, { "LPOP", "key", "Remove and get the first element in a list", 2, "1.0.0" }, { "LPUSH", "key value [value ...]", "Prepend one or multiple values to a list", 2, "1.0.0" }, { "LPUSHX", "key value", "Prepend a value to a list, only if the list exists", 2, "2.2.0" }, { "LRANGE", "key start stop", "Get a range of elements from a list", 2, "1.0.0" }, { "LREM", "key count value", "Remove elements from a list", 2, "1.0.0" }, { "LSET", "key index value", "Set the value of an element in a list by its index", 2, "1.0.0" }, { "LTRIM", "key start stop", "Trim a list to the specified range", 2, "1.0.0" }, { "MGET", "key [key ...]", "Get the values of all the given keys", 1, "1.0.0" }, { "MIGRATE", "host port key|"" destination-db timeout [COPY] [REPLACE] [KEYS key]", "Atomically transfer a key from a Redis instance to another one.", 0, "2.6.0" }, { "MONITOR", "-", "Listen for all requests received by the server in real time", 9, "1.0.0" }, { "MOVE", "key db", "Move a key to another database", 0, "1.0.0" }, { "MSET", "key value [key value ...]", "Set multiple keys to multiple values", 1, "1.0.1" }, { "MSETNX", "key value [key value ...]", "Set multiple keys to multiple values, only if none of the keys exist", 1, "1.0.1" }, { "MULTI", "-", "Mark the start of a transaction block", 7, "1.2.0" }, { "OBJECT", "subcommand [arguments [arguments ...]]", "Inspect the internals of Redis objects", 0, "2.2.3" }, { "PERSIST", "key", "Remove the expiration from a key", 0, "2.2.0" }, { "PEXPIRE", "key milliseconds", "Set a key's time to live in milliseconds", 0, "2.6.0" }, { "PEXPIREAT", "key milliseconds-timestamp", "Set the expiration for a key as a UNIX timestamp specified in milliseconds", 0, "2.6.0" }, { "PFADD", "key element [element ...]", "Adds the specified elements to the specified HyperLogLog.", 11, "2.8.9" }, { "PFCOUNT", "key [key ...]", "Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s).", 11, "2.8.9" }, { "PFMERGE", "destkey sourcekey [sourcekey ...]", "Merge N different HyperLogLogs into a single one.", 11, "2.8.9" }, { "PING", "[message]", "Ping the server", 8, "1.0.0" }, { "PSETEX", "key milliseconds value", "Set the value and expiration in milliseconds of a key", 1, "2.6.0" }, { "PSUBSCRIBE", "pattern [pattern ...]", "Listen for messages published to channels matching the given patterns", 6, "2.0.0" }, { "PTTL", "key", "Get the time to live for a key in milliseconds", 0, "2.6.0" }, { "PUBLISH", "channel message", "Post a message to a channel", 6, "2.0.0" }, { "PUBSUB", "subcommand [argument [argument ...]]", "Inspect the state of the Pub/Sub subsystem", 6, "2.8.0" }, { "PUNSUBSCRIBE", "[pattern [pattern ...]]", "Stop listening for messages posted to channels matching the given patterns", 6, "2.0.0" }, { "QUIT", "-", "Close the connection", 8, "1.0.0" }, { "RANDOMKEY", "-", "Return a random key from the keyspace", 0, "1.0.0" }, { "READONLY", "-", "Enables read queries for a connection to a cluster slave node", 12, "3.0.0" }, { "READWRITE", "-", "Disables read queries for a connection to a cluster slave node", 12, "3.0.0" }, { "RENAME", "key newkey", "Rename a key", 0, "1.0.0" }, { "RENAMENX", "key newkey", "Rename a key, only if the new key does not exist", 0, "1.0.0" }, { "RESTORE", "key ttl serialized-value [REPLACE]", "Create a key using the provided serialized value, previously obtained using DUMP.", 0, "2.6.0" }, { "ROLE", "-", "Return the role of the instance in the context of replication", 9, "2.8.12" }, { "RPOP", "key", "Remove and get the last element in a list", 2, "1.0.0" }, { "RPOPLPUSH", "source destination", "Remove the last element in a list, prepend it to another list and return it", 2, "1.2.0" }, { "RPUSH", "key value [value ...]", "Append one or multiple values to a list", 2, "1.0.0" }, { "RPUSHX", "key value", "Append a value to a list, only if the list exists", 2, "2.2.0" }, { "SADD", "key member [member ...]", "Add one or more members to a set", 3, "1.0.0" }, { "SAVE", "-", "Synchronously save the dataset to disk", 9, "1.0.0" }, { "SCAN", "cursor [MATCH pattern] [COUNT count]", "Incrementally iterate the keys space", 0, "2.8.0" }, { "SCARD", "key", "Get the number of members in a set", 3, "1.0.0" }, { "SCRIPT DEBUG", "YES|SYNC|NO", "Set the debug mode for executed scripts.", 10, "3.2.0" }, { "SCRIPT EXISTS", "script [script ...]", "Check existence of scripts in the script cache.", 10, "2.6.0" }, { "SCRIPT FLUSH", "-", "Remove all the scripts from the script cache.", 10, "2.6.0" }, { "SCRIPT KILL", "-", "Kill the script currently in execution.", 10, "2.6.0" }, { "SCRIPT LOAD", "script", "Load the specified Lua script into the script cache.", 10, "2.6.0" }, { "SDIFF", "key [key ...]", "Subtract multiple sets", 3, "1.0.0" }, { "SDIFFSTORE", "destination key [key ...]", "Subtract multiple sets and store the resulting set in a key", 3, "1.0.0" }, { "SELECT", "index", "Change the selected database for the current connection", 8, "1.0.0" }, { "SET", "key value [EX seconds] [PX milliseconds] [NX|XX]", "Set the string value of a key", 1, "1.0.0" }, { "SETBIT", "key offset value", "Sets or clears the bit at offset in the string value stored at key", 1, "2.2.0" }, { "SETEX", "key seconds value", "Set the value and expiration of a key", 1, "2.0.0" }, { "SETNX", "key value", "Set the value of a key, only if the key does not exist", 1, "1.0.0" }, { "SETRANGE", "key offset value", "Overwrite part of a string at key starting at the specified offset", 1, "2.2.0" }, { "SHUTDOWN", "[NOSAVE|SAVE]", "Synchronously save the dataset to disk and then shut down the server", 9, "1.0.0" }, { "SINTER", "key [key ...]", "Intersect multiple sets", 3, "1.0.0" }, { "SINTERSTORE", "destination key [key ...]", "Intersect multiple sets and store the resulting set in a key", 3, "1.0.0" }, { "SISMEMBER", "key member", "Determine if a given value is a member of a set", 3, "1.0.0" }, { "SLAVEOF", "host port", "Make the server a slave of another instance, or promote it as master", 9, "1.0.0" }, { "SLOWLOG", "subcommand [argument]", "Manages the Redis slow queries log", 9, "2.2.12" }, { "SMEMBERS", "key", "Get all the members in a set", 3, "1.0.0" }, { "SMOVE", "source destination member", "Move a member from one set to another", 3, "1.0.0" }, { "SORT", "key [BY pattern] [LIMIT offset count] [GET pattern [GET pattern ...]] [ASC|DESC] [ALPHA] [STORE destination]", "Sort the elements in a list, set or sorted set", 0, "1.0.0" }, { "SPOP", "key [count]", "Remove and return one or multiple random members from a set", 3, "1.0.0" }, { "SRANDMEMBER", "key [count]", "Get one or multiple random members from a set", 3, "1.0.0" }, { "SREM", "key member [member ...]", "Remove one or more members from a set", 3, "1.0.0" }, { "SSCAN", "key cursor [MATCH pattern] [COUNT count]", "Incrementally iterate Set elements", 3, "2.8.0" }, { "STRLEN", "key", "Get the length of the value stored in a key", 1, "2.2.0" }, { "SUBSCRIBE", "channel [channel ...]", "Listen for messages published to the given channels", 6, "2.0.0" }, { "SUNION", "key [key ...]", "Add multiple sets", 3, "1.0.0" }, { "SUNIONSTORE", "destination key [key ...]", "Add multiple sets and store the resulting set in a key", 3, "1.0.0" }, { "SYNC", "-", "Internal command used for replication", 9, "1.0.0" }, { "TIME", "-", "Return the current server time", 9, "2.6.0" }, { "TTL", "key", "Get the time to live for a key", 0, "1.0.0" }, { "TYPE", "key", "Determine the type stored at key", 0, "1.0.0" }, { "UNSUBSCRIBE", "[channel [channel ...]]", "Stop listening for messages posted to the given channels", 6, "2.0.0" }, { "UNWATCH", "-", "Forget about all watched keys", 7, "2.2.0" }, { "WAIT", "numslaves timeout", "Wait for the synchronous replication of all the write commands sent in the context of the current connection", 0, "3.0.0" }, { "WATCH", "key [key ...]", "Watch the given keys to determine execution of the MULTI/EXEC block", 7, "2.2.0" }, { "ZADD", "key [NX|XX] [CH] [INCR] score member [score member ...]", "Add one or more members to a sorted set, or update its score if it already exists", 4, "1.2.0" }, { "ZCARD", "key", "Get the number of members in a sorted set", 4, "1.2.0" }, { "ZCOUNT", "key min max", "Count the members in a sorted set with scores within the given values", 4, "2.0.0" }, { "ZINCRBY", "key increment member", "Increment the score of a member in a sorted set", 4, "1.2.0" }, { "ZINTERSTORE", "destination numkeys key [key ...] [WEIGHTS weight] [AGGREGATE SUM|MIN|MAX]", "Intersect multiple sorted sets and store the resulting sorted set in a new key", 4, "2.0.0" }, { "ZLEXCOUNT", "key min max", "Count the number of members in a sorted set between a given lexicographical range", 4, "2.8.9" }, { "ZRANGE", "key start stop [WITHSCORES]", "Return a range of members in a sorted set, by index", 4, "1.2.0" }, { "ZRANGEBYLEX", "key min max [LIMIT offset count]", "Return a range of members in a sorted set, by lexicographical range", 4, "2.8.9" }, { "ZRANGEBYSCORE", "key min max [WITHSCORES] [LIMIT offset count]", "Return a range of members in a sorted set, by score", 4, "1.0.5" }, { "ZRANK", "key member", "Determine the index of a member in a sorted set", 4, "2.0.0" }, { "ZREM", "key member [member ...]", "Remove one or more members from a sorted set", 4, "1.2.0" }, { "ZREMRANGEBYLEX", "key min max", "Remove all members in a sorted set between the given lexicographical range", 4, "2.8.9" }, { "ZREMRANGEBYRANK", "key start stop", "Remove all members in a sorted set within the given indexes", 4, "2.0.0" }, { "ZREMRANGEBYSCORE", "key min max", "Remove all members in a sorted set within the given scores", 4, "1.2.0" }, { "ZREVRANGE", "key start stop [WITHSCORES]", "Return a range of members in a sorted set, by index, with scores ordered from high to low", 4, "1.2.0" }, { "ZREVRANGEBYLEX", "key max min [LIMIT offset count]", "Return a range of members in a sorted set, by lexicographical range, ordered from higher to lower strings.", 4, "2.8.9" }, { "ZREVRANGEBYSCORE", "key max min [WITHSCORES] [LIMIT offset count]", "Return a range of members in a sorted set, by score, with scores ordered from high to low", 4, "2.2.0" }, { "ZREVRANK", "key member", "Determine the index of a member in a sorted set, with scores ordered from high to low", 4, "2.0.0" }, { "ZSCAN", "key cursor [MATCH pattern] [COUNT count]", "Incrementally iterate sorted sets elements and associated scores", 4, "2.8.0" }, { "ZSCORE", "key member", "Get the score associated with the given member in a sorted set", 4, "1.2.0" }, { "ZUNIONSTORE", "destination numkeys key [key ...] [WEIGHTS weight] [AGGREGATE SUM|MIN|MAX]", "Add multiple sorted sets and store the resulting sorted set in a new key", 4, "2.0.0" } }; #endif
24,462
23.030452
134
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/sha1.h
#ifndef SHA1_H #define SHA1_H /* ================ sha1.h ================ */ /* SHA-1 in C By Steve Reid <steve@edmweb.com> 100% Public Domain */ typedef struct { uint32_t state[5]; uint32_t count[2]; unsigned char buffer[64]; } SHA1_CTX; void SHA1Transform(uint32_t state[5], const unsigned char buffer[64]); void SHA1Init(SHA1_CTX* context); void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len); void SHA1Final(unsigned char digest[20], SHA1_CTX* context); #ifdef REDIS_TEST int sha1Test(int argc, char **argv); #endif #endif
566
21.68
76
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/config.h
/* * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __CONFIG_H #define __CONFIG_H #ifdef __APPLE__ #include <AvailabilityMacros.h> #endif #ifdef __linux__ #include <linux/version.h> #include <features.h> #endif /* Define redis_fstat to fstat or fstat64() */ #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6) #define redis_fstat fstat64 #define redis_stat stat64 #else #define redis_fstat fstat #define redis_stat stat #endif /* Test for proc filesystem */ #ifdef __linux__ #define HAVE_PROC_STAT 1 #define HAVE_PROC_MAPS 1 #define HAVE_PROC_SMAPS 1 #define HAVE_PROC_SOMAXCONN 1 #endif /* Test for task_info() */ #if defined(__APPLE__) #define HAVE_TASKINFO 1 #endif /* Test for backtrace() */ #if defined(__APPLE__) || (defined(__linux__) && defined(__GLIBC__)) #define HAVE_BACKTRACE 1 #endif /* MSG_NOSIGNAL. */ #ifdef __linux__ #define HAVE_MSG_NOSIGNAL 1 #endif /* Test for polling API */ #ifdef __linux__ #define HAVE_EPOLL 1 #endif #if (defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined (__NetBSD__) #define HAVE_KQUEUE 1 #endif #ifdef __sun #include <sys/feature_tests.h> #ifdef _DTRACE_VERSION #define HAVE_EVPORT 1 #endif #endif /* Define aof_fsync to fdatasync() in Linux and fsync() for all the rest */ #ifdef __linux__ #define aof_fsync fdatasync #else #define aof_fsync fsync #endif /* Define rdb_fsync_range to sync_file_range() on Linux, otherwise we use * the plain fsync() call. */ #ifdef __linux__ #if defined(__GLIBC__) && defined(__GLIBC_PREREQ) #if (LINUX_VERSION_CODE >= 0x020611 && __GLIBC_PREREQ(2, 6)) #define HAVE_SYNC_FILE_RANGE 1 #endif #else #if (LINUX_VERSION_CODE >= 0x020611) #define HAVE_SYNC_FILE_RANGE 1 #endif #endif #endif #ifdef HAVE_SYNC_FILE_RANGE #define rdb_fsync_range(fd,off,size) sync_file_range(fd,off,size,SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE) #else #define rdb_fsync_range(fd,off,size) fsync(fd) #endif /* Check if we can use setproctitle(). * BSD systems have support for it, we provide an implementation for * Linux and osx. */ #if (defined __NetBSD__ || defined __FreeBSD__ || defined __OpenBSD__) #define USE_SETPROCTITLE #endif #if ((defined __linux && defined(__GLIBC__)) || defined __APPLE__) #define USE_SETPROCTITLE #define INIT_SETPROCTITLE_REPLACEMENT void spt_init(int argc, char *argv[]); void setproctitle(const char *fmt, ...); #endif /* Byte ordering detection */ #include <sys/types.h> /* This will likely define BYTE_ORDER */ #ifndef BYTE_ORDER #if (BSD >= 199103) # include <machine/endian.h> #else #if defined(linux) || defined(__linux__) # include <endian.h> #else #define LITTLE_ENDIAN 1234 /* least-significant byte first (vax, pc) */ #define BIG_ENDIAN 4321 /* most-significant byte first (IBM, net) */ #define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long (pdp)*/ #if defined(__i386__) || defined(__x86_64__) || defined(__amd64__) || \ defined(vax) || defined(ns32000) || defined(sun386) || \ defined(MIPSEL) || defined(_MIPSEL) || defined(BIT_ZERO_ON_RIGHT) || \ defined(__alpha__) || defined(__alpha) #define BYTE_ORDER LITTLE_ENDIAN #endif #if defined(sel) || defined(pyr) || defined(mc68000) || defined(sparc) || \ defined(is68k) || defined(tahoe) || defined(ibm032) || defined(ibm370) || \ defined(MIPSEB) || defined(_MIPSEB) || defined(_IBMR2) || defined(DGUX) ||\ defined(apollo) || defined(__convex__) || defined(_CRAY) || \ defined(__hppa) || defined(__hp9000) || \ defined(__hp9000s300) || defined(__hp9000s700) || \ defined (BIT_ZERO_ON_LEFT) || defined(m68k) || defined(__sparc) #define BYTE_ORDER BIG_ENDIAN #endif #endif /* linux */ #endif /* BSD */ #endif /* BYTE_ORDER */ /* Sometimes after including an OS-specific header that defines the * endianess we end with __BYTE_ORDER but not with BYTE_ORDER that is what * the Redis code uses. In this case let's define everything without the * underscores. */ #ifndef BYTE_ORDER #ifdef __BYTE_ORDER #if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) #ifndef LITTLE_ENDIAN #define LITTLE_ENDIAN __LITTLE_ENDIAN #endif #ifndef BIG_ENDIAN #define BIG_ENDIAN __BIG_ENDIAN #endif #if (__BYTE_ORDER == __LITTLE_ENDIAN) #define BYTE_ORDER LITTLE_ENDIAN #else #define BYTE_ORDER BIG_ENDIAN #endif #endif #endif #endif #if !defined(BYTE_ORDER) || \ (BYTE_ORDER != BIG_ENDIAN && BYTE_ORDER != LITTLE_ENDIAN) /* you must determine what the correct bit order is for * your compiler - the next line is an intentional error * which will force your compiles to bomb until you fix * the above macros. */ #error "Undefined or invalid BYTE_ORDER" #endif #if (__i386 || __amd64 || __powerpc__) && __GNUC__ #define GNUC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) #if defined(__clang__) #define HAVE_ATOMIC #endif #if (defined(__GLIBC__) && defined(__GLIBC_PREREQ)) #if (GNUC_VERSION >= 40100 && __GLIBC_PREREQ(2, 6)) #define HAVE_ATOMIC #endif #endif #endif #endif
6,550
30.195238
130
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/ae_epoll.c
/* Linux epoll(2) based ae.c module * * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <sys/epoll.h> typedef struct aeApiState { int epfd; struct epoll_event *events; } aeApiState; static int aeApiCreate(aeEventLoop *eventLoop) { aeApiState *state = zmalloc(sizeof(aeApiState)); if (!state) return -1; state->events = zmalloc(sizeof(struct epoll_event)*eventLoop->setsize); if (!state->events) { zfree(state); return -1; } state->epfd = epoll_create(1024); /* 1024 is just a hint for the kernel */ if (state->epfd == -1) { zfree(state->events); zfree(state); return -1; } eventLoop->apidata = state; return 0; } static int aeApiResize(aeEventLoop *eventLoop, int setsize) { aeApiState *state = eventLoop->apidata; state->events = zrealloc(state->events, sizeof(struct epoll_event)*setsize); return 0; } static void aeApiFree(aeEventLoop *eventLoop) { aeApiState *state = eventLoop->apidata; close(state->epfd); zfree(state->events); zfree(state); } static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; struct epoll_event ee = {0}; /* avoid valgrind warning */ /* If the fd was already monitored for some event, we need a MOD * operation. Otherwise we need an ADD operation. */ int op = eventLoop->events[fd].mask == AE_NONE ? EPOLL_CTL_ADD : EPOLL_CTL_MOD; ee.events = 0; mask |= eventLoop->events[fd].mask; /* Merge old events */ if (mask & AE_READABLE) ee.events |= EPOLLIN; if (mask & AE_WRITABLE) ee.events |= EPOLLOUT; ee.data.fd = fd; if (epoll_ctl(state->epfd,op,fd,&ee) == -1) return -1; return 0; } static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int delmask) { aeApiState *state = eventLoop->apidata; struct epoll_event ee = {0}; /* avoid valgrind warning */ int mask = eventLoop->events[fd].mask & (~delmask); ee.events = 0; if (mask & AE_READABLE) ee.events |= EPOLLIN; if (mask & AE_WRITABLE) ee.events |= EPOLLOUT; ee.data.fd = fd; if (mask != AE_NONE) { epoll_ctl(state->epfd,EPOLL_CTL_MOD,fd,&ee); } else { /* Note, Kernel < 2.6.9 requires a non null event pointer even for * EPOLL_CTL_DEL. */ epoll_ctl(state->epfd,EPOLL_CTL_DEL,fd,&ee); } } static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { aeApiState *state = eventLoop->apidata; int retval, numevents = 0; retval = epoll_wait(state->epfd,state->events,eventLoop->setsize, tvp ? (tvp->tv_sec*1000 + tvp->tv_usec/1000) : -1); if (retval > 0) { int j; numevents = retval; for (j = 0; j < numevents; j++) { int mask = 0; struct epoll_event *e = state->events+j; if (e->events & EPOLLIN) mask |= AE_READABLE; if (e->events & EPOLLOUT) mask |= AE_WRITABLE; if (e->events & EPOLLERR) mask |= AE_WRITABLE; if (e->events & EPOLLHUP) mask |= AE_WRITABLE; eventLoop->fired[j].fd = e->data.fd; eventLoop->fired[j].mask = mask; } } return numevents; } static char *aeApiName(void) { return "epoll"; }
4,846
34.639706
80
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/ae_select.c
/* Select()-based ae.c module. * * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <sys/select.h> #include <string.h> typedef struct aeApiState { fd_set rfds, wfds; /* We need to have a copy of the fd sets as it's not safe to reuse * FD sets after select(). */ fd_set _rfds, _wfds; } aeApiState; static int aeApiCreate(aeEventLoop *eventLoop) { aeApiState *state = zmalloc(sizeof(aeApiState)); if (!state) return -1; FD_ZERO(&state->rfds); FD_ZERO(&state->wfds); eventLoop->apidata = state; return 0; } static int aeApiResize(aeEventLoop *eventLoop, int setsize) { /* Just ensure we have enough room in the fd_set type. */ if (setsize >= FD_SETSIZE) return -1; return 0; } static void aeApiFree(aeEventLoop *eventLoop) { zfree(eventLoop->apidata); } static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; if (mask & AE_READABLE) FD_SET(fd,&state->rfds); if (mask & AE_WRITABLE) FD_SET(fd,&state->wfds); return 0; } static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; if (mask & AE_READABLE) FD_CLR(fd,&state->rfds); if (mask & AE_WRITABLE) FD_CLR(fd,&state->wfds); } static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { aeApiState *state = eventLoop->apidata; int retval, j, numevents = 0; memcpy(&state->_rfds,&state->rfds,sizeof(fd_set)); memcpy(&state->_wfds,&state->wfds,sizeof(fd_set)); retval = select(eventLoop->maxfd+1, &state->_rfds,&state->_wfds,NULL,tvp); if (retval > 0) { for (j = 0; j <= eventLoop->maxfd; j++) { int mask = 0; aeFileEvent *fe = &eventLoop->events[j]; if (fe->mask == AE_NONE) continue; if (fe->mask & AE_READABLE && FD_ISSET(j,&state->_rfds)) mask |= AE_READABLE; if (fe->mask & AE_WRITABLE && FD_ISSET(j,&state->_wfds)) mask |= AE_WRITABLE; eventLoop->fired[numevents].fd = j; eventLoop->fired[numevents].mask = mask; numevents++; } } return numevents; } static char *aeApiName(void) { return "select"; }
3,828
34.785047
78
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/intset.c
/* * Copyright (c) 2009-2012, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "intset.h" #include "zmalloc.h" #include "endianconv.h" /* Note that these encodings are ordered, so: * INTSET_ENC_INT16 < INTSET_ENC_INT32 < INTSET_ENC_INT64. */ #define INTSET_ENC_INT16 (sizeof(int16_t)) #define INTSET_ENC_INT32 (sizeof(int32_t)) #define INTSET_ENC_INT64 (sizeof(int64_t)) /* Return the required encoding for the provided value. */ static uint8_t _intsetValueEncoding(int64_t v) { if (v < INT32_MIN || v > INT32_MAX) return INTSET_ENC_INT64; else if (v < INT16_MIN || v > INT16_MAX) return INTSET_ENC_INT32; else return INTSET_ENC_INT16; } /* Return the value at pos, given an encoding. */ static int64_t _intsetGetEncoded(intset *is, int pos, uint8_t enc) { int64_t v64; int32_t v32; int16_t v16; if (enc == INTSET_ENC_INT64) { memcpy(&v64,((int64_t*)is->contents)+pos,sizeof(v64)); memrev64ifbe(&v64); return v64; } else if (enc == INTSET_ENC_INT32) { memcpy(&v32,((int32_t*)is->contents)+pos,sizeof(v32)); memrev32ifbe(&v32); return v32; } else { memcpy(&v16,((int16_t*)is->contents)+pos,sizeof(v16)); memrev16ifbe(&v16); return v16; } } /* Return the value at pos, using the configured encoding. */ static int64_t _intsetGet(intset *is, int pos) { return _intsetGetEncoded(is,pos,intrev32ifbe(is->encoding)); } /* Set the value at pos, using the configured encoding. */ static void _intsetSet(intset *is, int pos, int64_t value) { uint32_t encoding = intrev32ifbe(is->encoding); if (encoding == INTSET_ENC_INT64) { ((int64_t*)is->contents)[pos] = value; memrev64ifbe(((int64_t*)is->contents)+pos); } else if (encoding == INTSET_ENC_INT32) { ((int32_t*)is->contents)[pos] = value; memrev32ifbe(((int32_t*)is->contents)+pos); } else { ((int16_t*)is->contents)[pos] = value; memrev16ifbe(((int16_t*)is->contents)+pos); } } /* Create an empty intset. */ intset *intsetNew(void) { intset *is = zmalloc(sizeof(intset)); is->encoding = intrev32ifbe(INTSET_ENC_INT16); is->length = 0; return is; } /* Resize the intset */ static intset *intsetResize(intset *is, uint32_t len) { uint32_t size = len*intrev32ifbe(is->encoding); is = zrealloc(is,sizeof(intset)+size); return is; } /* Search for the position of "value". Return 1 when the value was found and * sets "pos" to the position of the value within the intset. Return 0 when * the value is not present in the intset and sets "pos" to the position * where "value" can be inserted. */ static uint8_t intsetSearch(intset *is, int64_t value, uint32_t *pos) { int min = 0, max = intrev32ifbe(is->length)-1, mid = -1; int64_t cur = -1; /* The value can never be found when the set is empty */ if (intrev32ifbe(is->length) == 0) { if (pos) *pos = 0; return 0; } else { /* Check for the case where we know we cannot find the value, * but do know the insert position. */ if (value > _intsetGet(is,intrev32ifbe(is->length)-1)) { if (pos) *pos = intrev32ifbe(is->length); return 0; } else if (value < _intsetGet(is,0)) { if (pos) *pos = 0; return 0; } } while(max >= min) { mid = ((unsigned int)min + (unsigned int)max) >> 1; cur = _intsetGet(is,mid); if (value > cur) { min = mid+1; } else if (value < cur) { max = mid-1; } else { break; } } if (value == cur) { if (pos) *pos = mid; return 1; } else { if (pos) *pos = min; return 0; } } /* Upgrades the intset to a larger encoding and inserts the given integer. */ static intset *intsetUpgradeAndAdd(intset *is, int64_t value) { uint8_t curenc = intrev32ifbe(is->encoding); uint8_t newenc = _intsetValueEncoding(value); int length = intrev32ifbe(is->length); int prepend = value < 0 ? 1 : 0; /* First set new encoding and resize */ is->encoding = intrev32ifbe(newenc); is = intsetResize(is,intrev32ifbe(is->length)+1); /* Upgrade back-to-front so we don't overwrite values. * Note that the "prepend" variable is used to make sure we have an empty * space at either the beginning or the end of the intset. */ while(length--) _intsetSet(is,length+prepend,_intsetGetEncoded(is,length,curenc)); /* Set the value at the beginning or the end. */ if (prepend) _intsetSet(is,0,value); else _intsetSet(is,intrev32ifbe(is->length),value); is->length = intrev32ifbe(intrev32ifbe(is->length)+1); return is; } static void intsetMoveTail(intset *is, uint32_t from, uint32_t to) { void *src, *dst; uint32_t bytes = intrev32ifbe(is->length)-from; uint32_t encoding = intrev32ifbe(is->encoding); if (encoding == INTSET_ENC_INT64) { src = (int64_t*)is->contents+from; dst = (int64_t*)is->contents+to; bytes *= sizeof(int64_t); } else if (encoding == INTSET_ENC_INT32) { src = (int32_t*)is->contents+from; dst = (int32_t*)is->contents+to; bytes *= sizeof(int32_t); } else { src = (int16_t*)is->contents+from; dst = (int16_t*)is->contents+to; bytes *= sizeof(int16_t); } memmove(dst,src,bytes); } /* Insert an integer in the intset */ intset *intsetAdd(intset *is, int64_t value, uint8_t *success) { uint8_t valenc = _intsetValueEncoding(value); uint32_t pos; if (success) *success = 1; /* Upgrade encoding if necessary. If we need to upgrade, we know that * this value should be either appended (if > 0) or prepended (if < 0), * because it lies outside the range of existing values. */ if (valenc > intrev32ifbe(is->encoding)) { /* This always succeeds, so we don't need to curry *success. */ return intsetUpgradeAndAdd(is,value); } else { /* Abort if the value is already present in the set. * This call will populate "pos" with the right position to insert * the value when it cannot be found. */ if (intsetSearch(is,value,&pos)) { if (success) *success = 0; return is; } is = intsetResize(is,intrev32ifbe(is->length)+1); if (pos < intrev32ifbe(is->length)) intsetMoveTail(is,pos,pos+1); } _intsetSet(is,pos,value); is->length = intrev32ifbe(intrev32ifbe(is->length)+1); return is; } /* Delete integer from intset */ intset *intsetRemove(intset *is, int64_t value, int *success) { uint8_t valenc = _intsetValueEncoding(value); uint32_t pos; if (success) *success = 0; if (valenc <= intrev32ifbe(is->encoding) && intsetSearch(is,value,&pos)) { uint32_t len = intrev32ifbe(is->length); /* We know we can delete */ if (success) *success = 1; /* Overwrite value with tail and update length */ if (pos < (len-1)) intsetMoveTail(is,pos+1,pos); is = intsetResize(is,len-1); is->length = intrev32ifbe(len-1); } return is; } /* Determine whether a value belongs to this set */ uint8_t intsetFind(intset *is, int64_t value) { uint8_t valenc = _intsetValueEncoding(value); return valenc <= intrev32ifbe(is->encoding) && intsetSearch(is,value,NULL); } /* Return random member */ int64_t intsetRandom(intset *is) { return _intsetGet(is,rand()%intrev32ifbe(is->length)); } /* Sets the value to the value at the given position. When this position is * out of range the function returns 0, when in range it returns 1. */ uint8_t intsetGet(intset *is, uint32_t pos, int64_t *value) { if (pos < intrev32ifbe(is->length)) { *value = _intsetGet(is,pos); return 1; } return 0; } /* Return intset length */ uint32_t intsetLen(intset *is) { return intrev32ifbe(is->length); } /* Return intset blob size in bytes. */ size_t intsetBlobLen(intset *is) { return sizeof(intset)+intrev32ifbe(is->length)*intrev32ifbe(is->encoding); } #ifdef REDIS_TEST #include <sys/time.h> #include <time.h> #if 0 static void intsetRepr(intset *is) { for (uint32_t i = 0; i < intrev32ifbe(is->length); i++) { printf("%lld\n", (uint64_t)_intsetGet(is,i)); } printf("\n"); } static void error(char *err) { printf("%s\n", err); exit(1); } #endif static void ok(void) { printf("OK\n"); } static long long usec(void) { struct timeval tv; gettimeofday(&tv,NULL); return (((long long)tv.tv_sec)*1000000)+tv.tv_usec; } #define assert(_e) ((_e)?(void)0:(_assert(#_e,__FILE__,__LINE__),exit(1))) static void _assert(char *estr, char *file, int line) { printf("\n\n=== ASSERTION FAILED ===\n"); printf("==> %s:%d '%s' is not true\n",file,line,estr); } static intset *createSet(int bits, int size) { uint64_t mask = (1<<bits)-1; uint64_t value; intset *is = intsetNew(); for (int i = 0; i < size; i++) { if (bits > 32) { value = (rand()*rand()) & mask; } else { value = rand() & mask; } is = intsetAdd(is,value,NULL); } return is; } static void checkConsistency(intset *is) { for (uint32_t i = 0; i < (intrev32ifbe(is->length)-1); i++) { uint32_t encoding = intrev32ifbe(is->encoding); if (encoding == INTSET_ENC_INT16) { int16_t *i16 = (int16_t*)is->contents; assert(i16[i] < i16[i+1]); } else if (encoding == INTSET_ENC_INT32) { int32_t *i32 = (int32_t*)is->contents; assert(i32[i] < i32[i+1]); } else { int64_t *i64 = (int64_t*)is->contents; assert(i64[i] < i64[i+1]); } } } #define UNUSED(x) (void)(x) int intsetTest(int argc, char **argv) { uint8_t success; int i; intset *is; srand(time(NULL)); UNUSED(argc); UNUSED(argv); printf("Value encodings: "); { assert(_intsetValueEncoding(-32768) == INTSET_ENC_INT16); assert(_intsetValueEncoding(+32767) == INTSET_ENC_INT16); assert(_intsetValueEncoding(-32769) == INTSET_ENC_INT32); assert(_intsetValueEncoding(+32768) == INTSET_ENC_INT32); assert(_intsetValueEncoding(-2147483648) == INTSET_ENC_INT32); assert(_intsetValueEncoding(+2147483647) == INTSET_ENC_INT32); assert(_intsetValueEncoding(-2147483649) == INTSET_ENC_INT64); assert(_intsetValueEncoding(+2147483648) == INTSET_ENC_INT64); assert(_intsetValueEncoding(-9223372036854775808ull) == INTSET_ENC_INT64); assert(_intsetValueEncoding(+9223372036854775807ull) == INTSET_ENC_INT64); ok(); } printf("Basic adding: "); { is = intsetNew(); is = intsetAdd(is,5,&success); assert(success); is = intsetAdd(is,6,&success); assert(success); is = intsetAdd(is,4,&success); assert(success); is = intsetAdd(is,4,&success); assert(!success); ok(); } printf("Large number of random adds: "); { uint32_t inserts = 0; is = intsetNew(); for (i = 0; i < 1024; i++) { is = intsetAdd(is,rand()%0x800,&success); if (success) inserts++; } assert(intrev32ifbe(is->length) == inserts); checkConsistency(is); ok(); } printf("Upgrade from int16 to int32: "); { is = intsetNew(); is = intsetAdd(is,32,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); is = intsetAdd(is,65535,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); assert(intsetFind(is,32)); assert(intsetFind(is,65535)); checkConsistency(is); is = intsetNew(); is = intsetAdd(is,32,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); is = intsetAdd(is,-65535,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); assert(intsetFind(is,32)); assert(intsetFind(is,-65535)); checkConsistency(is); ok(); } printf("Upgrade from int16 to int64: "); { is = intsetNew(); is = intsetAdd(is,32,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); is = intsetAdd(is,4294967295,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); assert(intsetFind(is,32)); assert(intsetFind(is,4294967295)); checkConsistency(is); is = intsetNew(); is = intsetAdd(is,32,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); is = intsetAdd(is,-4294967295,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); assert(intsetFind(is,32)); assert(intsetFind(is,-4294967295)); checkConsistency(is); ok(); } printf("Upgrade from int32 to int64: "); { is = intsetNew(); is = intsetAdd(is,65535,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); is = intsetAdd(is,4294967295,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); assert(intsetFind(is,65535)); assert(intsetFind(is,4294967295)); checkConsistency(is); is = intsetNew(); is = intsetAdd(is,65535,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); is = intsetAdd(is,-4294967295,NULL); assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); assert(intsetFind(is,65535)); assert(intsetFind(is,-4294967295)); checkConsistency(is); ok(); } printf("Stress lookups: "); { long num = 100000, size = 10000; int i, bits = 20; long long start; is = createSet(bits,size); checkConsistency(is); start = usec(); for (i = 0; i < num; i++) intsetSearch(is,rand() % ((1<<bits)-1),NULL); printf("%ld lookups, %ld element set, %lldusec\n", num,size,usec()-start); } printf("Stress add+delete: "); { int i, v1, v2; is = intsetNew(); for (i = 0; i < 0xffff; i++) { v1 = rand() % 0xfff; is = intsetAdd(is,v1,NULL); assert(intsetFind(is,v1)); v2 = rand() % 0xfff; is = intsetRemove(is,v2,NULL); assert(!intsetFind(is,v2)); } checkConsistency(is); ok(); } return 0; } #endif
16,271
32.006085
79
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/bio.h
/* * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* Exported API */ void bioInit(void); void bioCreateBackgroundJob(int type, void *arg1, void *arg2, void *arg3); unsigned long long bioPendingJobsOfType(int type); void bioWaitPendingJobsLE(int type, unsigned long long num); time_t bioOlderJobOfType(int type); void bioKillThreads(void); /* Background job opcodes */ #define BIO_CLOSE_FILE 0 /* Deferred close(2) syscall. */ #define BIO_AOF_FSYNC 1 /* Deferred AOF fsync. */ #define BIO_NUM_OPS 2
2,073
48.380952
78
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/setproctitle.c
/* ========================================================================== * setproctitle.c - Linux/Darwin setproctitle. * -------------------------------------------------------------------------- * Copyright (C) 2010 William Ahern * Copyright (C) 2013 Salvatore Sanfilippo * Copyright (C) 2013 Stam He * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN * NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * ========================================================================== */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include <stddef.h> /* NULL size_t */ #include <stdarg.h> /* va_list va_start va_end */ #include <stdlib.h> /* malloc(3) setenv(3) clearenv(3) setproctitle(3) getprogname(3) */ #include <stdio.h> /* vsnprintf(3) snprintf(3) */ #include <string.h> /* strlen(3) strchr(3) strdup(3) memset(3) memcpy(3) */ #include <errno.h> /* errno program_invocation_name program_invocation_short_name */ #if !defined(HAVE_SETPROCTITLE) #define HAVE_SETPROCTITLE (defined __NetBSD__ || defined __FreeBSD__ || defined __OpenBSD__) #endif #if !HAVE_SETPROCTITLE #if (defined __linux || defined __APPLE__) extern char **environ; static struct { /* original value */ const char *arg0; /* title space available */ char *base, *end; /* pointer to original nul character within base */ char *nul; _Bool reset; int error; } SPT; #ifndef SPT_MIN #define SPT_MIN(a, b) (((a) < (b))? (a) : (b)) #endif static inline size_t spt_min(size_t a, size_t b) { return SPT_MIN(a, b); } /* spt_min() */ /* * For discussion on the portability of the various methods, see * http://lists.freebsd.org/pipermail/freebsd-stable/2008-June/043136.html */ static int spt_clearenv(void) { #if __GLIBC__ clearenv(); return 0; #else extern char **environ; static char **tmp; if (!(tmp = malloc(sizeof *tmp))) return errno; tmp[0] = NULL; environ = tmp; return 0; #endif } /* spt_clearenv() */ static int spt_copyenv(char *oldenv[]) { extern char **environ; char *eq; int i, error; if (environ != oldenv) return 0; if ((error = spt_clearenv())) goto error; for (i = 0; oldenv[i]; i++) { if (!(eq = strchr(oldenv[i], '='))) continue; *eq = '\0'; error = (0 != setenv(oldenv[i], eq + 1, 1))? errno : 0; *eq = '='; if (error) goto error; } return 0; error: environ = oldenv; return error; } /* spt_copyenv() */ static int spt_copyargs(int argc, char *argv[]) { char *tmp; int i; for (i = 1; i < argc || (i >= argc && argv[i]); i++) { if (!argv[i]) continue; if (!(tmp = strdup(argv[i]))) return errno; argv[i] = tmp; } return 0; } /* spt_copyargs() */ void spt_init(int argc, char *argv[]) { char **envp = environ; char *base, *end, *nul, *tmp; int i, error; if (!(base = argv[0])) return; nul = &base[strlen(base)]; end = nul + 1; for (i = 0; i < argc || (i >= argc && argv[i]); i++) { if (!argv[i] || argv[i] < end) continue; end = argv[i] + strlen(argv[i]) + 1; } for (i = 0; envp[i]; i++) { if (envp[i] < end) continue; end = envp[i] + strlen(envp[i]) + 1; } if (!(SPT.arg0 = strdup(argv[0]))) goto syerr; #if __GLIBC__ if (!(tmp = strdup(program_invocation_name))) goto syerr; program_invocation_name = tmp; if (!(tmp = strdup(program_invocation_short_name))) goto syerr; program_invocation_short_name = tmp; #elif __APPLE__ if (!(tmp = strdup(getprogname()))) goto syerr; setprogname(tmp); #endif if ((error = spt_copyenv(envp))) goto error; if ((error = spt_copyargs(argc, argv))) goto error; SPT.nul = nul; SPT.base = base; SPT.end = end; return; syerr: error = errno; error: SPT.error = error; } /* spt_init() */ #ifndef SPT_MAXTITLE #define SPT_MAXTITLE 255 #endif void setproctitle(const char *fmt, ...) { char buf[SPT_MAXTITLE + 1]; /* use buffer in case argv[0] is passed */ va_list ap; char *nul; int len, error; if (!SPT.base) return; if (fmt) { va_start(ap, fmt); len = vsnprintf(buf, sizeof buf, fmt, ap); va_end(ap); } else { len = snprintf(buf, sizeof buf, "%s", SPT.arg0); } if (len <= 0) { error = errno; goto error; } if (!SPT.reset) { memset(SPT.base, 0, SPT.end - SPT.base); SPT.reset = 1; } else { memset(SPT.base, 0, spt_min(sizeof buf, SPT.end - SPT.base)); } len = spt_min(len, spt_min(sizeof buf, SPT.end - SPT.base) - 1); memcpy(SPT.base, buf, len); nul = &SPT.base[len]; if (nul < SPT.nul) { *SPT.nul = '.'; } else if (nul == SPT.nul && &nul[1] < SPT.end) { *SPT.nul = ' '; *++nul = '\0'; } return; error: SPT.error = error; } /* setproctitle() */ #endif /* __linux || __APPLE__ */ #endif /* !HAVE_SETPROCTITLE */
5,656
20.591603
92
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/util.c
/* * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <ctype.h> #include <limits.h> #include <math.h> #include <unistd.h> #include <sys/time.h> #include <float.h> #include <stdint.h> #include <errno.h> #include "util.h" #include "sha1.h" /* Glob-style pattern matching. */ int stringmatchlen(const char *pattern, int patternLen, const char *string, int stringLen, int nocase) { while(patternLen) { switch(pattern[0]) { case '*': while (pattern[1] == '*') { pattern++; patternLen--; } if (patternLen == 1) return 1; /* match */ while(stringLen) { if (stringmatchlen(pattern+1, patternLen-1, string, stringLen, nocase)) return 1; /* match */ string++; stringLen--; } return 0; /* no match */ break; case '?': if (stringLen == 0) return 0; /* no match */ string++; stringLen--; break; case '[': { int not, match; pattern++; patternLen--; not = pattern[0] == '^'; if (not) { pattern++; patternLen--; } match = 0; while(1) { if (pattern[0] == '\\') { pattern++; patternLen--; if (pattern[0] == string[0]) match = 1; } else if (pattern[0] == ']') { break; } else if (patternLen == 0) { pattern--; patternLen++; break; } else if (pattern[1] == '-' && patternLen >= 3) { int start = pattern[0]; int end = pattern[2]; int c = string[0]; if (start > end) { int t = start; start = end; end = t; } if (nocase) { start = tolower(start); end = tolower(end); c = tolower(c); } pattern += 2; patternLen -= 2; if (c >= start && c <= end) match = 1; } else { if (!nocase) { if (pattern[0] == string[0]) match = 1; } else { if (tolower((int)pattern[0]) == tolower((int)string[0])) match = 1; } } pattern++; patternLen--; } if (not) match = !match; if (!match) return 0; /* no match */ string++; stringLen--; break; } case '\\': if (patternLen >= 2) { pattern++; patternLen--; } /* fall through */ default: if (!nocase) { if (pattern[0] != string[0]) return 0; /* no match */ } else { if (tolower((int)pattern[0]) != tolower((int)string[0])) return 0; /* no match */ } string++; stringLen--; break; } pattern++; patternLen--; if (stringLen == 0) { while(*pattern == '*') { pattern++; patternLen--; } break; } } if (patternLen == 0 && stringLen == 0) return 1; return 0; } int stringmatch(const char *pattern, const char *string, int nocase) { return stringmatchlen(pattern,strlen(pattern),string,strlen(string),nocase); } /* Convert a string representing an amount of memory into the number of * bytes, so for instance memtoll("1Gb") will return 1073741824 that is * (1024*1024*1024). * * On parsing error, if *err is not NULL, it's set to 1, otherwise it's * set to 0. On error the function return value is 0, regardless of the * fact 'err' is NULL or not. */ long long memtoll(const char *p, int *err) { const char *u; char buf[128]; long mul; /* unit multiplier */ long long val; unsigned int digits; if (err) *err = 0; /* Search the first non digit character. */ u = p; if (*u == '-') u++; while(*u && isdigit(*u)) u++; if (*u == '\0' || !strcasecmp(u,"b")) { mul = 1; } else if (!strcasecmp(u,"k")) { mul = 1000; } else if (!strcasecmp(u,"kb")) { mul = 1024; } else if (!strcasecmp(u,"m")) { mul = 1000*1000; } else if (!strcasecmp(u,"mb")) { mul = 1024*1024; } else if (!strcasecmp(u,"g")) { mul = 1000L*1000*1000; } else if (!strcasecmp(u,"gb")) { mul = 1024L*1024*1024; } else { if (err) *err = 1; return 0; } /* Copy the digits into a buffer, we'll use strtoll() to convert * the digit (without the unit) into a number. */ digits = u-p; if (digits >= sizeof(buf)) { if (err) *err = 1; return 0; } memcpy(buf,p,digits); buf[digits] = '\0'; char *endptr; errno = 0; val = strtoll(buf,&endptr,10); if ((val == 0 && errno == EINVAL) || *endptr != '\0') { if (err) *err = 1; return 0; } return val*mul; } /* Return the number of digits of 'v' when converted to string in radix 10. * See ll2string() for more information. */ uint32_t digits10(uint64_t v) { if (v < 10) return 1; if (v < 100) return 2; if (v < 1000) return 3; if (v < 1000000000000UL) { if (v < 100000000UL) { if (v < 1000000) { if (v < 10000) return 4; return 5 + (v >= 100000); } return 7 + (v >= 10000000UL); } if (v < 10000000000UL) { return 9 + (v >= 1000000000UL); } return 11 + (v >= 100000000000UL); } return 12 + digits10(v / 1000000000000UL); } /* Like digits10() but for signed values. */ uint32_t sdigits10(int64_t v) { if (v < 0) { /* Abs value of LLONG_MIN requires special handling. */ uint64_t uv = (v != LLONG_MIN) ? (uint64_t)-v : ((uint64_t) LLONG_MAX)+1; return digits10(uv)+1; /* +1 for the minus. */ } else { return digits10(v); } } /* Convert a long long into a string. Returns the number of * characters needed to represent the number. * If the buffer is not big enough to store the string, 0 is returned. * * Based on the following article (that apparently does not provide a * novel approach but only publicizes an already used technique): * * https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920 * * Modified in order to handle signed integers since the original code was * designed for unsigned integers. */ int ll2string(char* dst, size_t dstlen, long long svalue) { static const char digits[201] = "0001020304050607080910111213141516171819" "2021222324252627282930313233343536373839" "4041424344454647484950515253545556575859" "6061626364656667686970717273747576777879" "8081828384858687888990919293949596979899"; int negative; unsigned long long value; /* The main loop works with 64bit unsigned integers for simplicity, so * we convert the number here and remember if it is negative. */ if (svalue < 0) { if (svalue != LLONG_MIN) { value = -svalue; } else { value = ((unsigned long long) LLONG_MAX)+1; } negative = 1; } else { value = svalue; negative = 0; } /* Check length. */ uint32_t const length = digits10(value)+negative; if (length >= dstlen) return 0; /* Null term. */ uint32_t next = length; dst[next] = '\0'; next--; while (value >= 100) { int const i = (value % 100) * 2; value /= 100; dst[next] = digits[i + 1]; dst[next - 1] = digits[i]; next -= 2; } /* Handle last 1-2 digits. */ if (value < 10) { dst[next] = '0' + (uint32_t) value; } else { int i = (uint32_t) value * 2; dst[next] = digits[i + 1]; dst[next - 1] = digits[i]; } /* Add sign. */ if (negative) dst[0] = '-'; return length; } /* Convert a string into a long long. Returns 1 if the string could be parsed * into a (non-overflowing) long long, 0 otherwise. The value will be set to * the parsed value when appropriate. */ int string2ll(const char *s, size_t slen, long long *value) { const char *p = s; size_t plen = 0; int negative = 0; unsigned long long v; if (plen == slen) return 0; /* Special case: first and only digit is 0. */ if (slen == 1 && p[0] == '0') { if (value != NULL) *value = 0; return 1; } if (p[0] == '-') { negative = 1; p++; plen++; /* Abort on only a negative sign. */ if (plen == slen) return 0; } /* First digit should be 1-9, otherwise the string should just be 0. */ if (p[0] >= '1' && p[0] <= '9') { v = p[0]-'0'; p++; plen++; } else if (p[0] == '0' && slen == 1) { *value = 0; return 1; } else { return 0; } while (plen < slen && p[0] >= '0' && p[0] <= '9') { if (v > (ULLONG_MAX / 10)) /* Overflow. */ return 0; v *= 10; if (v > (ULLONG_MAX - (p[0]-'0'))) /* Overflow. */ return 0; v += p[0]-'0'; p++; plen++; } /* Return if not all bytes were used. */ if (plen < slen) return 0; if (negative) { if (v > ((unsigned long long)(-(LLONG_MIN+1))+1)) /* Overflow. */ return 0; if (value != NULL) *value = -v; } else { if (v > LLONG_MAX) /* Overflow. */ return 0; if (value != NULL) *value = v; } return 1; } /* Convert a string into a long. Returns 1 if the string could be parsed into a * (non-overflowing) long, 0 otherwise. The value will be set to the parsed * value when appropriate. */ int string2l(const char *s, size_t slen, long *lval) { long long llval; if (!string2ll(s,slen,&llval)) return 0; if (llval < LONG_MIN || llval > LONG_MAX) return 0; *lval = (long)llval; return 1; } /* Convert a double to a string representation. Returns the number of bytes * required. The representation should always be parsable by strtod(3). */ int d2string(char *buf, size_t len, double value) { if (isnan(value)) { len = snprintf(buf,len,"nan"); } else if (isinf(value)) { if (value < 0) len = snprintf(buf,len,"-inf"); else len = snprintf(buf,len,"inf"); } else if (value == 0) { /* See: http://en.wikipedia.org/wiki/Signed_zero, "Comparisons". */ if (1.0/value < 0) len = snprintf(buf,len,"-0"); else len = snprintf(buf,len,"0"); } else { #if (DBL_MANT_DIG >= 52) && (LLONG_MAX == 0x7fffffffffffffffLL) /* Check if the float is in a safe range to be casted into a * long long. We are assuming that long long is 64 bit here. * Also we are assuming that there are no implementations around where * double has precision < 52 bit. * * Under this assumptions we test if a double is inside an interval * where casting to long long is safe. Then using two castings we * make sure the decimal part is zero. If all this is true we use * integer printing function that is much faster. */ double min = -4503599627370495; /* (2^52)-1 */ double max = 4503599627370496; /* -(2^52) */ if (value > min && value < max && value == ((double)((long long)value))) len = ll2string(buf,len,(long long)value); else #endif len = snprintf(buf,len,"%.17g",value); } return len; } /* Generate the Redis "Run ID", a SHA1-sized random number that identifies a * given execution of Redis, so that if you are talking with an instance * having run_id == A, and you reconnect and it has run_id == B, you can be * sure that it is either a different instance or it was restarted. */ void getRandomHexChars(char *p, unsigned int len) { char *charset = "0123456789abcdef"; unsigned int j; /* Global state. */ static int seed_initialized = 0; static unsigned char seed[20]; /* The SHA1 seed, from /dev/urandom. */ static uint64_t counter = 0; /* The counter we hash with the seed. */ if (!seed_initialized) { /* Initialize a seed and use SHA1 in counter mode, where we hash * the same seed with a progressive counter. For the goals of this * function we just need non-colliding strings, there are no * cryptographic security needs. */ FILE *fp = fopen("/dev/urandom","r"); if (fp && fread(seed,sizeof(seed),1,fp) == 1) seed_initialized = 1; if (fp) fclose(fp); } if (seed_initialized) { while(len) { unsigned char digest[20]; SHA1_CTX ctx; unsigned int copylen = len > 20 ? 20 : len; SHA1Init(&ctx); SHA1Update(&ctx, seed, sizeof(seed)); SHA1Update(&ctx, (unsigned char*)&counter,sizeof(counter)); SHA1Final(digest, &ctx); counter++; memcpy(p,digest,copylen); /* Convert to hex digits. */ for (j = 0; j < copylen; j++) p[j] = charset[p[j] & 0x0F]; len -= copylen; p += copylen; } } else { /* If we can't read from /dev/urandom, do some reasonable effort * in order to create some entropy, since this function is used to * generate run_id and cluster instance IDs */ char *x = p; unsigned int l = len; struct timeval tv; pid_t pid = getpid(); /* Use time and PID to fill the initial array. */ gettimeofday(&tv,NULL); if (l >= sizeof(tv.tv_usec)) { memcpy(x,&tv.tv_usec,sizeof(tv.tv_usec)); l -= sizeof(tv.tv_usec); x += sizeof(tv.tv_usec); } if (l >= sizeof(tv.tv_sec)) { memcpy(x,&tv.tv_sec,sizeof(tv.tv_sec)); l -= sizeof(tv.tv_sec); x += sizeof(tv.tv_sec); } if (l >= sizeof(pid)) { memcpy(x,&pid,sizeof(pid)); l -= sizeof(pid); x += sizeof(pid); } /* Finally xor it with rand() output, that was already seeded with * time() at startup, and convert to hex digits. */ for (j = 0; j < len; j++) { p[j] ^= rand(); p[j] = charset[p[j] & 0x0F]; } } } /* Given the filename, return the absolute path as an SDS string, or NULL * if it fails for some reason. Note that "filename" may be an absolute path * already, this will be detected and handled correctly. * * The function does not try to normalize everything, but only the obvious * case of one or more "../" appearning at the start of "filename" * relative path. */ sds getAbsolutePath(char *filename) { char cwd[1024]; sds abspath; sds relpath = sdsnew(filename); relpath = sdstrim(relpath," \r\n\t"); if (relpath[0] == '/') return relpath; /* Path is already absolute. */ /* If path is relative, join cwd and relative path. */ if (getcwd(cwd,sizeof(cwd)) == NULL) { sdsfree(relpath); return NULL; } abspath = sdsnew(cwd); if (sdslen(abspath) && abspath[sdslen(abspath)-1] != '/') abspath = sdscat(abspath,"/"); /* At this point we have the current path always ending with "/", and * the trimmed relative path. Try to normalize the obvious case of * trailing ../ elements at the start of the path. * * For every "../" we find in the filename, we remove it and also remove * the last element of the cwd, unless the current cwd is "/". */ while (sdslen(relpath) >= 3 && relpath[0] == '.' && relpath[1] == '.' && relpath[2] == '/') { sdsrange(relpath,3,-1); if (sdslen(abspath) > 1) { char *p = abspath + sdslen(abspath)-2; int trimlen = 1; while(*p != '/') { p--; trimlen++; } sdsrange(abspath,0,-(trimlen+1)); } } /* Finally glue the two parts together. */ abspath = sdscatsds(abspath,relpath); sdsfree(relpath); return abspath; } /* Return true if the specified path is just a file basename without any * relative or absolute path. This function just checks that no / or \ * character exists inside the specified path, that's enough in the * environments where Redis runs. */ int pathIsBaseName(char *path) { return strchr(path,'/') == NULL && strchr(path,'\\') == NULL; } #ifdef REDIS_TEST #include <assert.h> static void test_string2ll(void) { char buf[32]; long long v; /* May not start with +. */ strcpy(buf,"+1"); assert(string2ll(buf,strlen(buf),&v) == 0); /* Leading space. */ strcpy(buf," 1"); assert(string2ll(buf,strlen(buf),&v) == 0); /* Trailing space. */ strcpy(buf,"1 "); assert(string2ll(buf,strlen(buf),&v) == 0); /* May not start with 0. */ strcpy(buf,"01"); assert(string2ll(buf,strlen(buf),&v) == 0); strcpy(buf,"-1"); assert(string2ll(buf,strlen(buf),&v) == 1); assert(v == -1); strcpy(buf,"0"); assert(string2ll(buf,strlen(buf),&v) == 1); assert(v == 0); strcpy(buf,"1"); assert(string2ll(buf,strlen(buf),&v) == 1); assert(v == 1); strcpy(buf,"99"); assert(string2ll(buf,strlen(buf),&v) == 1); assert(v == 99); strcpy(buf,"-99"); assert(string2ll(buf,strlen(buf),&v) == 1); assert(v == -99); strcpy(buf,"-9223372036854775808"); assert(string2ll(buf,strlen(buf),&v) == 1); assert(v == LLONG_MIN); strcpy(buf,"-9223372036854775809"); /* overflow */ assert(string2ll(buf,strlen(buf),&v) == 0); strcpy(buf,"9223372036854775807"); assert(string2ll(buf,strlen(buf),&v) == 1); assert(v == LLONG_MAX); strcpy(buf,"9223372036854775808"); /* overflow */ assert(string2ll(buf,strlen(buf),&v) == 0); } static void test_string2l(void) { char buf[32]; long v; /* May not start with +. */ strcpy(buf,"+1"); assert(string2l(buf,strlen(buf),&v) == 0); /* May not start with 0. */ strcpy(buf,"01"); assert(string2l(buf,strlen(buf),&v) == 0); strcpy(buf,"-1"); assert(string2l(buf,strlen(buf),&v) == 1); assert(v == -1); strcpy(buf,"0"); assert(string2l(buf,strlen(buf),&v) == 1); assert(v == 0); strcpy(buf,"1"); assert(string2l(buf,strlen(buf),&v) == 1); assert(v == 1); strcpy(buf,"99"); assert(string2l(buf,strlen(buf),&v) == 1); assert(v == 99); strcpy(buf,"-99"); assert(string2l(buf,strlen(buf),&v) == 1); assert(v == -99); #if LONG_MAX != LLONG_MAX strcpy(buf,"-2147483648"); assert(string2l(buf,strlen(buf),&v) == 1); assert(v == LONG_MIN); strcpy(buf,"-2147483649"); /* overflow */ assert(string2l(buf,strlen(buf),&v) == 0); strcpy(buf,"2147483647"); assert(string2l(buf,strlen(buf),&v) == 1); assert(v == LONG_MAX); strcpy(buf,"2147483648"); /* overflow */ assert(string2l(buf,strlen(buf),&v) == 0); #endif } static void test_ll2string(void) { char buf[32]; long long v; int sz; v = 0; sz = ll2string(buf, sizeof buf, v); assert(sz == 1); assert(!strcmp(buf, "0")); v = -1; sz = ll2string(buf, sizeof buf, v); assert(sz == 2); assert(!strcmp(buf, "-1")); v = 99; sz = ll2string(buf, sizeof buf, v); assert(sz == 2); assert(!strcmp(buf, "99")); v = -99; sz = ll2string(buf, sizeof buf, v); assert(sz == 3); assert(!strcmp(buf, "-99")); v = -2147483648; sz = ll2string(buf, sizeof buf, v); assert(sz == 11); assert(!strcmp(buf, "-2147483648")); v = LLONG_MIN; sz = ll2string(buf, sizeof buf, v); assert(sz == 20); assert(!strcmp(buf, "-9223372036854775808")); v = LLONG_MAX; sz = ll2string(buf, sizeof buf, v); assert(sz == 19); assert(!strcmp(buf, "9223372036854775807")); } #define UNUSED(x) (void)(x) int utilTest(int argc, char **argv) { UNUSED(argc); UNUSED(argv); test_string2ll(); test_string2l(); test_ll2string(); return 0; } #endif
22,734
29.394385
102
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/ae.h
/* A simple event-driven programming library. Originally I wrote this code * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated * it in form of a library for easy reuse. * * Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __AE_H__ #define __AE_H__ #include <time.h> #define AE_OK 0 #define AE_ERR -1 #define AE_NONE 0 #define AE_READABLE 1 #define AE_WRITABLE 2 #define AE_FILE_EVENTS 1 #define AE_TIME_EVENTS 2 #define AE_ALL_EVENTS (AE_FILE_EVENTS|AE_TIME_EVENTS) #define AE_DONT_WAIT 4 #define AE_NOMORE -1 #define AE_DELETED_EVENT_ID -1 /* Macros */ #define AE_NOTUSED(V) ((void) V) struct aeEventLoop; /* Types and data structures */ typedef void aeFileProc(struct aeEventLoop *eventLoop, int fd, void *clientData, int mask); typedef int aeTimeProc(struct aeEventLoop *eventLoop, long long id, void *clientData); typedef void aeEventFinalizerProc(struct aeEventLoop *eventLoop, void *clientData); typedef void aeBeforeSleepProc(struct aeEventLoop *eventLoop); /* File event structure */ typedef struct aeFileEvent { int mask; /* one of AE_(READABLE|WRITABLE) */ aeFileProc *rfileProc; aeFileProc *wfileProc; void *clientData; } aeFileEvent; /* Time event structure */ typedef struct aeTimeEvent { long long id; /* time event identifier. */ long when_sec; /* seconds */ long when_ms; /* milliseconds */ aeTimeProc *timeProc; aeEventFinalizerProc *finalizerProc; void *clientData; struct aeTimeEvent *next; } aeTimeEvent; /* A fired event */ typedef struct aeFiredEvent { int fd; int mask; } aeFiredEvent; /* State of an event based program */ typedef struct aeEventLoop { int maxfd; /* highest file descriptor currently registered */ int setsize; /* max number of file descriptors tracked */ long long timeEventNextId; time_t lastTime; /* Used to detect system clock skew */ aeFileEvent *events; /* Registered events */ aeFiredEvent *fired; /* Fired events */ aeTimeEvent *timeEventHead; int stop; void *apidata; /* This is used for polling API specific data */ aeBeforeSleepProc *beforesleep; } aeEventLoop; /* Prototypes */ aeEventLoop *aeCreateEventLoop(int setsize); void aeDeleteEventLoop(aeEventLoop *eventLoop); void aeStop(aeEventLoop *eventLoop); int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, aeFileProc *proc, void *clientData); void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask); int aeGetFileEvents(aeEventLoop *eventLoop, int fd); long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, aeTimeProc *proc, void *clientData, aeEventFinalizerProc *finalizerProc); int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id); int aeProcessEvents(aeEventLoop *eventLoop, int flags); int aeWait(int fd, int mask, long long milliseconds); void aeMain(aeEventLoop *eventLoop); char *aeGetApiName(void); void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep); int aeGetSetSize(aeEventLoop *eventLoop); int aeResizeSetSize(aeEventLoop *eventLoop, int setsize); #endif
4,681
36.758065
91
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/multi.c
/* * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" /* ================================ MULTI/EXEC ============================== */ /* Client state initialization for MULTI/EXEC */ void initClientMultiState(client *c) { c->mstate.commands = NULL; c->mstate.count = 0; } /* Release all the resources associated with MULTI/EXEC state */ void freeClientMultiState(client *c) { int j; for (j = 0; j < c->mstate.count; j++) { int i; multiCmd *mc = c->mstate.commands+j; for (i = 0; i < mc->argc; i++) decrRefCount(mc->argv[i]); zfree(mc->argv); } zfree(c->mstate.commands); } /* Add a new command into the MULTI commands queue */ void queueMultiCommand(client *c) { multiCmd *mc; int j; c->mstate.commands = zrealloc(c->mstate.commands, sizeof(multiCmd)*(c->mstate.count+1)); mc = c->mstate.commands+c->mstate.count; mc->cmd = c->cmd; mc->argc = c->argc; mc->argv = zmalloc(sizeof(robj*)*c->argc); memcpy(mc->argv,c->argv,sizeof(robj*)*c->argc); for (j = 0; j < c->argc; j++) incrRefCount(mc->argv[j]); c->mstate.count++; } void discardTransaction(client *c) { freeClientMultiState(c); initClientMultiState(c); c->flags &= ~(CLIENT_MULTI|CLIENT_DIRTY_CAS|CLIENT_DIRTY_EXEC); unwatchAllKeys(c); } /* Flag the transacation as DIRTY_EXEC so that EXEC will fail. * Should be called every time there is an error while queueing a command. */ void flagTransaction(client *c) { if (c->flags & CLIENT_MULTI) c->flags |= CLIENT_DIRTY_EXEC; } void multiCommand(client *c) { if (c->flags & CLIENT_MULTI) { addReplyError(c,"MULTI calls can not be nested"); return; } c->flags |= CLIENT_MULTI; addReply(c,shared.ok); } void discardCommand(client *c) { if (!(c->flags & CLIENT_MULTI)) { addReplyError(c,"DISCARD without MULTI"); return; } discardTransaction(c); addReply(c,shared.ok); } /* Send a MULTI command to all the slaves and AOF file. Check the execCommand * implementation for more information. */ void execCommandPropagateMulti(client *c) { robj *multistring = createStringObject("MULTI",5); propagate(server.multiCommand,c->db->id,&multistring,1, PROPAGATE_AOF|PROPAGATE_REPL); decrRefCount(multistring); } void execCommand(client *c) { int j; robj **orig_argv; int orig_argc; struct redisCommand *orig_cmd; int must_propagate = 0; /* Need to propagate MULTI/EXEC to AOF / slaves? */ if (!(c->flags & CLIENT_MULTI)) { addReplyError(c,"EXEC without MULTI"); return; } /* Check if we need to abort the EXEC because: * 1) Some WATCHed key was touched. * 2) There was a previous error while queueing commands. * A failed EXEC in the first case returns a multi bulk nil object * (technically it is not an error but a special behavior), while * in the second an EXECABORT error is returned. */ if (c->flags & (CLIENT_DIRTY_CAS|CLIENT_DIRTY_EXEC)) { addReply(c, c->flags & CLIENT_DIRTY_EXEC ? shared.execaborterr : shared.nullmultibulk); discardTransaction(c); goto handle_monitor; } /* Exec all the queued commands */ unwatchAllKeys(c); /* Unwatch ASAP otherwise we'll waste CPU cycles */ orig_argv = c->argv; orig_argc = c->argc; orig_cmd = c->cmd; addReplyMultiBulkLen(c,c->mstate.count); for (j = 0; j < c->mstate.count; j++) { c->argc = c->mstate.commands[j].argc; c->argv = c->mstate.commands[j].argv; c->cmd = c->mstate.commands[j].cmd; /* Propagate a MULTI request once we encounter the first write op. * This way we'll deliver the MULTI/..../EXEC block as a whole and * both the AOF and the replication link will have the same consistency * and atomicity guarantees. */ if (!must_propagate && !(c->cmd->flags & CMD_READONLY)) { execCommandPropagateMulti(c); must_propagate = 1; } call(c,CMD_CALL_FULL); /* Commands may alter argc/argv, restore mstate. */ c->mstate.commands[j].argc = c->argc; c->mstate.commands[j].argv = c->argv; c->mstate.commands[j].cmd = c->cmd; } c->argv = orig_argv; c->argc = orig_argc; c->cmd = orig_cmd; discardTransaction(c); /* Make sure the EXEC command will be propagated as well if MULTI * was already propagated. */ if (must_propagate) server.dirty++; handle_monitor: /* Send EXEC to clients waiting data from MONITOR. We do it here * since the natural order of commands execution is actually: * MUTLI, EXEC, ... commands inside transaction ... * Instead EXEC is flagged as CMD_SKIP_MONITOR in the command * table, and we do it here with correct ordering. */ if (listLength(server.monitors) && !server.loading) replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc); } /* ===================== WATCH (CAS alike for MULTI/EXEC) =================== * * The implementation uses a per-DB hash table mapping keys to list of clients * WATCHing those keys, so that given a key that is going to be modified * we can mark all the associated clients as dirty. * * Also every client contains a list of WATCHed keys so that's possible to * un-watch such keys when the client is freed or when UNWATCH is called. */ /* In the client->watched_keys list we need to use watchedKey structures * as in order to identify a key in Redis we need both the key name and the * DB */ typedef struct watchedKey { robj *key; redisDb *db; } watchedKey; /* Watch for the specified key */ void watchForKey(client *c, robj *key) { list *clients = NULL; listIter li; listNode *ln; watchedKey *wk; /* Check if we are already watching for this key */ listRewind(c->watched_keys,&li); while((ln = listNext(&li))) { wk = listNodeValue(ln); if (wk->db == c->db && equalStringObjects(key,wk->key)) return; /* Key already watched */ } /* This key is not already watched in this DB. Let's add it */ clients = dictFetchValue(c->db->watched_keys,key); if (!clients) { clients = listCreate(); dictAdd(c->db->watched_keys,key,clients); incrRefCount(key); } listAddNodeTail(clients,c); /* Add the new key to the list of keys watched by this client */ wk = zmalloc(sizeof(*wk)); wk->key = key; wk->db = c->db; incrRefCount(key); listAddNodeTail(c->watched_keys,wk); } /* Unwatch all the keys watched by this client. To clean the EXEC dirty * flag is up to the caller. */ void unwatchAllKeys(client *c) { listIter li; listNode *ln; if (listLength(c->watched_keys) == 0) return; listRewind(c->watched_keys,&li); while((ln = listNext(&li))) { list *clients; watchedKey *wk; /* Lookup the watched key -> clients list and remove the client * from the list */ wk = listNodeValue(ln); clients = dictFetchValue(wk->db->watched_keys, wk->key); serverAssertWithInfo(c,NULL,clients != NULL); listDelNode(clients,listSearchKey(clients,c)); /* Kill the entry at all if this was the only client */ if (listLength(clients) == 0) dictDelete(wk->db->watched_keys, wk->key); /* Remove this watched key from the client->watched list */ listDelNode(c->watched_keys,ln); decrRefCount(wk->key); zfree(wk); } } /* "Touch" a key, so that if this key is being WATCHed by some client the * next EXEC will fail. */ void touchWatchedKey(redisDb *db, robj *key) { list *clients; listIter li; listNode *ln; if (dictSize(db->watched_keys) == 0) return; clients = dictFetchValue(db->watched_keys, key); if (!clients) return; /* Mark all the clients watching this key as CLIENT_DIRTY_CAS */ /* Check if we are already watching for this key */ listRewind(clients,&li); while((ln = listNext(&li))) { client *c = listNodeValue(ln); c->flags |= CLIENT_DIRTY_CAS; } } /* On FLUSHDB or FLUSHALL all the watched keys that are present before the * flush but will be deleted as effect of the flushing operation should * be touched. "dbid" is the DB that's getting the flush. -1 if it is * a FLUSHALL operation (all the DBs flushed). */ void touchWatchedKeysOnFlush(int dbid) { listIter li1, li2; listNode *ln; /* For every client, check all the waited keys */ listRewind(server.clients,&li1); while((ln = listNext(&li1))) { client *c = listNodeValue(ln); listRewind(c->watched_keys,&li2); while((ln = listNext(&li2))) { watchedKey *wk = listNodeValue(ln); /* For every watched key matching the specified DB, if the * key exists, mark the client as dirty, as the key will be * removed. */ if (dbid == -1 || wk->db->id == dbid) { if (dictFind(wk->db->dict, wk->key->ptr) != NULL) c->flags |= CLIENT_DIRTY_CAS; } } } } void watchCommand(client *c) { int j; if (c->flags & CLIENT_MULTI) { addReplyError(c,"WATCH inside MULTI is not allowed"); return; } for (j = 1; j < c->argc; j++) watchForKey(c,c->argv[j]); addReply(c,shared.ok); } void unwatchCommand(client *c) { unwatchAllKeys(c); c->flags &= (~CLIENT_DIRTY_CAS); addReply(c,shared.ok); }
11,229
33.660494
80
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/lzf_d.c
/* * Copyright (c) 2000-2010 Marc Alexander Lehmann <schmorp@schmorp.de> * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #include "lzfP.h" #if AVOID_ERRNO # define SET_ERRNO(n) #else # include <errno.h> # define SET_ERRNO(n) errno = (n) #endif #if USE_REP_MOVSB /* small win on amd, big loss on intel */ #if (__i386 || __amd64) && __GNUC__ >= 3 # define lzf_movsb(dst, src, len) \ asm ("rep movsb" \ : "=D" (dst), "=S" (src), "=c" (len) \ : "0" (dst), "1" (src), "2" (len)); #endif #endif unsigned int lzf_decompress (const void *const in_data, unsigned int in_len, void *out_data, unsigned int out_len) { u8 const *ip = (const u8 *)in_data; u8 *op = (u8 *)out_data; u8 const *const in_end = ip + in_len; u8 *const out_end = op + out_len; do { unsigned int ctrl = *ip++; if (ctrl < (1 << 5)) /* literal run */ { ctrl++; if (op + ctrl > out_end) { SET_ERRNO (E2BIG); return 0; } #if CHECK_INPUT if (ip + ctrl > in_end) { SET_ERRNO (EINVAL); return 0; } #endif #ifdef lzf_movsb lzf_movsb (op, ip, ctrl); #else switch (ctrl) { case 32: *op++ = *ip++; case 31: *op++ = *ip++; case 30: *op++ = *ip++; case 29: *op++ = *ip++; case 28: *op++ = *ip++; case 27: *op++ = *ip++; case 26: *op++ = *ip++; case 25: *op++ = *ip++; case 24: *op++ = *ip++; case 23: *op++ = *ip++; case 22: *op++ = *ip++; case 21: *op++ = *ip++; case 20: *op++ = *ip++; case 19: *op++ = *ip++; case 18: *op++ = *ip++; case 17: *op++ = *ip++; case 16: *op++ = *ip++; case 15: *op++ = *ip++; case 14: *op++ = *ip++; case 13: *op++ = *ip++; case 12: *op++ = *ip++; case 11: *op++ = *ip++; case 10: *op++ = *ip++; case 9: *op++ = *ip++; case 8: *op++ = *ip++; case 7: *op++ = *ip++; case 6: *op++ = *ip++; case 5: *op++ = *ip++; case 4: *op++ = *ip++; case 3: *op++ = *ip++; case 2: *op++ = *ip++; case 1: *op++ = *ip++; } #endif } else /* back reference */ { unsigned int len = ctrl >> 5; u8 *ref = op - ((ctrl & 0x1f) << 8) - 1; #if CHECK_INPUT if (ip >= in_end) { SET_ERRNO (EINVAL); return 0; } #endif if (len == 7) { len += *ip++; #if CHECK_INPUT if (ip >= in_end) { SET_ERRNO (EINVAL); return 0; } #endif } ref -= *ip++; if (op + len + 2 > out_end) { SET_ERRNO (E2BIG); return 0; } if (ref < (u8 *)out_data) { SET_ERRNO (EINVAL); return 0; } #ifdef lzf_movsb len += 2; lzf_movsb (op, ref, len); #else switch (len) { default: len += 2; if (op >= ref + len) { /* disjunct areas */ memcpy (op, ref, len); op += len; } else { /* overlapping, use octte by octte copying */ do *op++ = *ref++; while (--len); } break; case 9: *op++ = *ref++; case 8: *op++ = *ref++; case 7: *op++ = *ref++; case 6: *op++ = *ref++; case 5: *op++ = *ref++; case 4: *op++ = *ref++; case 3: *op++ = *ref++; case 2: *op++ = *ref++; case 1: *op++ = *ref++; case 0: *op++ = *ref++; /* two octets more */ *op++ = *ref++; } #endif } } while (ip < in_end); return op - (u8 *)out_data; }
6,088
31.736559
109
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/sha1.c
/* from valgrind tests */ /* ================ sha1.c ================ */ /* SHA-1 in C By Steve Reid <steve@edmweb.com> 100% Public Domain Test Vectors (from FIPS PUB 180-1) "abc" A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1 A million repetitions of "a" 34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F */ /* #define LITTLE_ENDIAN * This should be #define'd already, if true. */ /* #define SHA1HANDSOFF * Copies data before messing with it. */ #define SHA1HANDSOFF #include <stdio.h> #include <string.h> #include <stdint.h> #include "solarisfixes.h" #include "sha1.h" #include "config.h" #define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits)))) /* blk0() and blk() perform the initial expand. */ /* I got the idea of expanding during the round function from SSLeay */ #if BYTE_ORDER == LITTLE_ENDIAN #define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \ |(rol(block->l[i],8)&0x00FF00FF)) #elif BYTE_ORDER == BIG_ENDIAN #define blk0(i) block->l[i] #else #error "Endianness not defined!" #endif #define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \ ^block->l[(i+2)&15]^block->l[i&15],1)) /* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */ #define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30); #define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30); #define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30); #define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30); #define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30); /* Hash a single 512-bit block. This is the core of the algorithm. */ void SHA1Transform(uint32_t state[5], const unsigned char buffer[64]) { uint32_t a, b, c, d, e; typedef union { unsigned char c[64]; uint32_t l[16]; } CHAR64LONG16; #ifdef SHA1HANDSOFF CHAR64LONG16 block[1]; /* use array to appear as a pointer */ memcpy(block, buffer, 64); #else /* The following had better never be used because it causes the * pointer-to-const buffer to be cast into a pointer to non-const. * And the result is written through. I threw a "const" in, hoping * this will cause a diagnostic. */ CHAR64LONG16* block = (const CHAR64LONG16*)buffer; #endif /* Copy context->state[] to working vars */ a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; /* 4 rounds of 20 operations each. Loop unrolled. */ R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3); R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7); R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11); R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15); R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19); R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23); R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27); R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31); R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35); R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39); R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43); R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47); R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51); R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55); R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59); R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63); R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67); R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71); R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75); R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79); /* Add the working vars back into context.state[] */ state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; /* Wipe variables */ a = b = c = d = e = 0; #ifdef SHA1HANDSOFF memset(block, '\0', sizeof(block)); #endif } /* SHA1Init - Initialize new context */ void SHA1Init(SHA1_CTX* context) { /* SHA1 initialization constants */ context->state[0] = 0x67452301; context->state[1] = 0xEFCDAB89; context->state[2] = 0x98BADCFE; context->state[3] = 0x10325476; context->state[4] = 0xC3D2E1F0; context->count[0] = context->count[1] = 0; } /* Run your data through this. */ void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len) { uint32_t i, j; j = context->count[0]; if ((context->count[0] += len << 3) < j) context->count[1]++; context->count[1] += (len>>29); j = (j >> 3) & 63; if ((j + len) > 63) { memcpy(&context->buffer[j], data, (i = 64-j)); SHA1Transform(context->state, context->buffer); for ( ; i + 63 < len; i += 64) { SHA1Transform(context->state, &data[i]); } j = 0; } else i = 0; memcpy(&context->buffer[j], &data[i], len - i); } /* Add padding and return the message digest. */ void SHA1Final(unsigned char digest[20], SHA1_CTX* context) { unsigned i; unsigned char finalcount[8]; unsigned char c; #if 0 /* untested "improvement" by DHR */ /* Convert context->count to a sequence of bytes * in finalcount. Second element first, but * big-endian order within element. * But we do it all backwards. */ unsigned char *fcp = &finalcount[8]; for (i = 0; i < 2; i++) { uint32_t t = context->count[i]; int j; for (j = 0; j < 4; t >>= 8, j++) *--fcp = (unsigned char) t; } #else for (i = 0; i < 8; i++) { finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)] >> ((3-(i & 3)) * 8) ) & 255); /* Endian independent */ } #endif c = 0200; SHA1Update(context, &c, 1); while ((context->count[0] & 504) != 448) { c = 0000; SHA1Update(context, &c, 1); } SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */ for (i = 0; i < 20; i++) { digest[i] = (unsigned char) ((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255); } /* Wipe variables */ memset(context, '\0', sizeof(*context)); memset(&finalcount, '\0', sizeof(finalcount)); } /* ================ end of sha1.c ================ */ #ifdef REDIS_TEST #define BUFSIZE 4096 #define UNUSED(x) (void)(x) int sha1Test(int argc, char **argv) { SHA1_CTX ctx; unsigned char hash[20], buf[BUFSIZE]; int i; UNUSED(argc); UNUSED(argv); for(i=0;i<BUFSIZE;i++) buf[i] = i; SHA1Init(&ctx); for(i=0;i<1000;i++) SHA1Update(&ctx, buf, BUFSIZE); SHA1Final(hash, &ctx); printf("SHA1="); for(i=0;i<20;i++) printf("%02x", hash[i]); printf("\n"); return 0; } #endif
7,252
30.811404
84
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/bio.c
/* Background I/O service for Redis. * * This file implements operations that we need to perform in the background. * Currently there is only a single operation, that is a background close(2) * system call. This is needed as when the process is the last owner of a * reference to a file closing it means unlinking it, and the deletion of the * file is slow, blocking the server. * * In the future we'll either continue implementing new things we need or * we'll switch to libeio. However there are probably long term uses for this * file as we may want to put here Redis specific background tasks (for instance * it is not impossible that we'll need a non blocking FLUSHDB/FLUSHALL * implementation). * * DESIGN * ------ * * The design is trivial, we have a structure representing a job to perform * and a different thread and job queue for every job type. * Every thread wait for new jobs in its queue, and process every job * sequentially. * * Jobs of the same type are guaranteed to be processed from the least * recently inserted to the most recently inserted (older jobs processed * first). * * Currently there is no way for the creator of the job to be notified about * the completion of the operation, this will only be added when/if needed. * * ---------------------------------------------------------------------------- * * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include "bio.h" static pthread_t bio_threads[BIO_NUM_OPS]; static pthread_mutex_t bio_mutex[BIO_NUM_OPS]; static pthread_cond_t bio_condvar[BIO_NUM_OPS]; static list *bio_jobs[BIO_NUM_OPS]; /* The following array is used to hold the number of pending jobs for every * OP type. This allows us to export the bioPendingJobsOfType() API that is * useful when the main thread wants to perform some operation that may involve * objects shared with the background thread. The main thread will just wait * that there are no longer jobs of this type to be executed before performing * the sensible operation. This data is also useful for reporting. */ static unsigned long long bio_pending[BIO_NUM_OPS]; /* This structure represents a background Job. It is only used locally to this * file as the API does not expose the internals at all. */ struct bio_job { time_t time; /* Time at which the job was created. */ /* Job specific arguments pointers. If we need to pass more than three * arguments we can just pass a pointer to a structure or alike. */ void *arg1, *arg2, *arg3; }; void *bioProcessBackgroundJobs(void *arg); /* Make sure we have enough stack to perform all the things we do in the * main thread. */ #define REDIS_THREAD_STACK_SIZE (1024*1024*4) /* Initialize the background system, spawning the thread. */ void bioInit(void) { pthread_attr_t attr; pthread_t thread; size_t stacksize; int j; /* Initialization of state vars and objects */ for (j = 0; j < BIO_NUM_OPS; j++) { pthread_mutex_init(&bio_mutex[j],NULL); pthread_cond_init(&bio_condvar[j],NULL); bio_jobs[j] = listCreate(); bio_pending[j] = 0; } /* Set the stack size as by default it may be small in some system */ pthread_attr_init(&attr); pthread_attr_getstacksize(&attr,&stacksize); if (!stacksize) stacksize = 1; /* The world is full of Solaris Fixes */ while (stacksize < REDIS_THREAD_STACK_SIZE) stacksize *= 2; pthread_attr_setstacksize(&attr, stacksize); /* Ready to spawn our threads. We use the single argument the thread * function accepts in order to pass the job ID the thread is * responsible of. */ for (j = 0; j < BIO_NUM_OPS; j++) { void *arg = (void*)(unsigned long) j; if (pthread_create(&thread,&attr,bioProcessBackgroundJobs,arg) != 0) { serverLog(LL_WARNING,"Fatal: Can't initialize Background Jobs."); exit(1); } bio_threads[j] = thread; } } void bioCreateBackgroundJob(int type, void *arg1, void *arg2, void *arg3) { struct bio_job *job = zmalloc(sizeof(*job)); job->time = time(NULL); job->arg1 = arg1; job->arg2 = arg2; job->arg3 = arg3; pthread_mutex_lock(&bio_mutex[type]); listAddNodeTail(bio_jobs[type],job); bio_pending[type]++; pthread_cond_signal(&bio_condvar[type]); pthread_mutex_unlock(&bio_mutex[type]); } void *bioProcessBackgroundJobs(void *arg) { struct bio_job *job; unsigned long type = (unsigned long) arg; sigset_t sigset; /* Check that the type is within the right interval. */ if (type >= BIO_NUM_OPS) { serverLog(LL_WARNING, "Warning: bio thread started with wrong type %lu",type); return NULL; } /* Make the thread killable at any time, so that bioKillThreads() * can work reliably. */ pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); pthread_mutex_lock(&bio_mutex[type]); /* Block SIGALRM so we are sure that only the main thread will * receive the watchdog signal. */ sigemptyset(&sigset); sigaddset(&sigset, SIGALRM); if (pthread_sigmask(SIG_BLOCK, &sigset, NULL)) serverLog(LL_WARNING, "Warning: can't mask SIGALRM in bio.c thread: %s", strerror(errno)); while(1) { listNode *ln; /* The loop always starts with the lock hold. */ if (listLength(bio_jobs[type]) == 0) { pthread_cond_wait(&bio_condvar[type],&bio_mutex[type]); continue; } /* Pop the job from the queue. */ ln = listFirst(bio_jobs[type]); job = ln->value; /* It is now possible to unlock the background system as we know have * a stand alone job structure to process.*/ pthread_mutex_unlock(&bio_mutex[type]); /* Process the job accordingly to its type. */ if (type == BIO_CLOSE_FILE) { close((long)job->arg1); } else if (type == BIO_AOF_FSYNC) { aof_fsync((long)job->arg1); } else { serverPanic("Wrong job type in bioProcessBackgroundJobs()."); } zfree(job); /* Lock again before reiterating the loop, if there are no longer * jobs to process we'll block again in pthread_cond_wait(). */ pthread_mutex_lock(&bio_mutex[type]); listDelNode(bio_jobs[type],ln); bio_pending[type]--; } } /* Return the number of pending jobs of the specified type. */ unsigned long long bioPendingJobsOfType(int type) { unsigned long long val; pthread_mutex_lock(&bio_mutex[type]); val = bio_pending[type]; pthread_mutex_unlock(&bio_mutex[type]); return val; } /* Kill the running bio threads in an unclean way. This function should be * used only when it's critical to stop the threads for some reason. * Currently Redis does this only on crash (for instance on SIGSEGV) in order * to perform a fast memory check without other threads messing with memory. */ void bioKillThreads(void) { int err, j; for (j = 0; j < BIO_NUM_OPS; j++) { if (pthread_cancel(bio_threads[j]) == 0) { if ((err = pthread_join(bio_threads[j],NULL)) != 0) { serverLog(LL_WARNING, "Bio thread for job type #%d can be joined: %s", j, strerror(err)); } else { serverLog(LL_WARNING, "Bio thread for job type #%d terminated",j); } } } }
9,103
38.929825
80
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/notify.c
/* * Copyright (c) 2013, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" /* This file implements keyspace events notification via Pub/Sub ad * described at http://redis.io/topics/keyspace-events. */ /* Turn a string representing notification classes into an integer * representing notification classes flags xored. * * The function returns -1 if the input contains characters not mapping to * any class. */ int keyspaceEventsStringToFlags(char *classes) { char *p = classes; int c, flags = 0; while((c = *p++) != '\0') { switch(c) { case 'A': flags |= NOTIFY_ALL; break; case 'g': flags |= NOTIFY_GENERIC; break; case '$': flags |= NOTIFY_STRING; break; case 'l': flags |= NOTIFY_LIST; break; case 's': flags |= NOTIFY_SET; break; case 'h': flags |= NOTIFY_HASH; break; case 'z': flags |= NOTIFY_ZSET; break; case 'x': flags |= NOTIFY_EXPIRED; break; case 'e': flags |= NOTIFY_EVICTED; break; case 'K': flags |= NOTIFY_KEYSPACE; break; case 'E': flags |= NOTIFY_KEYEVENT; break; default: return -1; } } return flags; } /* This function does exactly the revese of the function above: it gets * as input an integer with the xored flags and returns a string representing * the selected classes. The string returned is an sds string that needs to * be released with sdsfree(). */ sds keyspaceEventsFlagsToString(int flags) { sds res; res = sdsempty(); if ((flags & NOTIFY_ALL) == NOTIFY_ALL) { res = sdscatlen(res,"A",1); } else { if (flags & NOTIFY_GENERIC) res = sdscatlen(res,"g",1); if (flags & NOTIFY_STRING) res = sdscatlen(res,"$",1); if (flags & NOTIFY_LIST) res = sdscatlen(res,"l",1); if (flags & NOTIFY_SET) res = sdscatlen(res,"s",1); if (flags & NOTIFY_HASH) res = sdscatlen(res,"h",1); if (flags & NOTIFY_ZSET) res = sdscatlen(res,"z",1); if (flags & NOTIFY_EXPIRED) res = sdscatlen(res,"x",1); if (flags & NOTIFY_EVICTED) res = sdscatlen(res,"e",1); } if (flags & NOTIFY_KEYSPACE) res = sdscatlen(res,"K",1); if (flags & NOTIFY_KEYEVENT) res = sdscatlen(res,"E",1); return res; } /* The API provided to the rest of the Redis core is a simple function: * * notifyKeyspaceEvent(char *event, robj *key, int dbid); * * 'event' is a C string representing the event name. * 'key' is a Redis object representing the key name. * 'dbid' is the database ID where the key lives. */ void notifyKeyspaceEvent(int type, char *event, robj *key, int dbid) { sds chan; robj *chanobj, *eventobj; int len = -1; char buf[24]; /* If notifications for this class of events are off, return ASAP. */ if (!(server.notify_keyspace_events & type)) return; eventobj = createStringObject(event,strlen(event)); /* __keyspace@<db>__:<key> <event> notifications. */ if (server.notify_keyspace_events & NOTIFY_KEYSPACE) { chan = sdsnewlen("__keyspace@",11); len = ll2string(buf,sizeof(buf),dbid); chan = sdscatlen(chan, buf, len); chan = sdscatlen(chan, "__:", 3); chan = sdscatsds(chan, key->ptr); chanobj = createObject(OBJ_STRING, chan); pubsubPublishMessage(chanobj, eventobj); decrRefCount(chanobj); } /* __keyevente@<db>__:<event> <key> notifications. */ if (server.notify_keyspace_events & NOTIFY_KEYEVENT) { chan = sdsnewlen("__keyevent@",11); if (len == -1) len = ll2string(buf,sizeof(buf),dbid); chan = sdscatlen(chan, buf, len); chan = sdscatlen(chan, "__:", 3); chan = sdscatsds(chan, eventobj->ptr); chanobj = createObject(OBJ_STRING, chan); pubsubPublishMessage(chanobj, key); decrRefCount(chanobj); } decrRefCount(eventobj); }
5,418
40.366412
78
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/debugmacro.h
/* This file contains debugging macros to be used when investigating issues. * * ----------------------------------------------------------------------------- * * Copyright (c) 2016, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #define D(...) \ do { \ FILE *fp = fopen("/tmp/log.txt","a"); \ fprintf(fp,"%s:%s:%d:\t", __FILE__, __func__, __LINE__); \ fprintf(fp,__VA_ARGS__); \ fprintf(fp,"\n"); \ fclose(fp); \ } while (0);
2,356
55.119048
80
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/intset.h
/* * Copyright (c) 2009-2012, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __INTSET_H #define __INTSET_H #include <stdint.h> typedef struct intset { uint32_t encoding; uint32_t length; int8_t contents[]; } intset; intset *intsetNew(void); intset *intsetAdd(intset *is, int64_t value, uint8_t *success); intset *intsetRemove(intset *is, int64_t value, int *success); uint8_t intsetFind(intset *is, int64_t value); int64_t intsetRandom(intset *is); uint8_t intsetGet(intset *is, uint32_t pos, int64_t *value); uint32_t intsetLen(intset *is); size_t intsetBlobLen(intset *is); #ifdef REDIS_TEST int intsetTest(int argc, char *argv[]); #endif #endif // __INTSET_H
2,296
40.763636
78
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/rio.c
/* rio.c is a simple stream-oriented I/O abstraction that provides an interface * to write code that can consume/produce data using different concrete input * and output devices. For instance the same rdb.c code using the rio * abstraction can be used to read and write the RDB format using in-memory * buffers or files. * * A rio object provides the following methods: * read: read from stream. * write: write to stream. * tell: get the current offset. * * It is also possible to set a 'checksum' method that is used by rio.c in order * to compute a checksum of the data written or read, or to query the rio object * for the current checksum. * * ---------------------------------------------------------------------------- * * Copyright (c) 2009-2012, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include <string.h> #include <stdio.h> #include <unistd.h> #include "rio.h" #include "util.h" #include "crc64.h" #include "config.h" #include "server.h" /* ------------------------- Buffer I/O implementation ----------------------- */ /* Returns 1 or 0 for success/failure. */ static size_t rioBufferWrite(rio *r, const void *buf, size_t len) { r->io.buffer.ptr = sdscatlen(r->io.buffer.ptr,(char*)buf,len); r->io.buffer.pos += len; return 1; } /* Returns 1 or 0 for success/failure. */ static size_t rioBufferRead(rio *r, void *buf, size_t len) { if (sdslen(r->io.buffer.ptr)-r->io.buffer.pos < len) return 0; /* not enough buffer to return len bytes. */ memcpy(buf,r->io.buffer.ptr+r->io.buffer.pos,len); r->io.buffer.pos += len; return 1; } /* Returns read/write position in buffer. */ static off_t rioBufferTell(rio *r) { return r->io.buffer.pos; } /* Flushes any buffer to target device if applicable. Returns 1 on success * and 0 on failures. */ static int rioBufferFlush(rio *r) { UNUSED(r); return 1; /* Nothing to do, our write just appends to the buffer. */ } static const rio rioBufferIO = { rioBufferRead, rioBufferWrite, rioBufferTell, rioBufferFlush, NULL, /* update_checksum */ 0, /* current checksum */ 0, /* bytes read or written */ 0, /* read/write chunk size */ { { NULL, 0 } } /* union for io-specific vars */ }; void rioInitWithBuffer(rio *r, sds s) { *r = rioBufferIO; r->io.buffer.ptr = s; r->io.buffer.pos = 0; } /* --------------------- Stdio file pointer implementation ------------------- */ /* Returns 1 or 0 for success/failure. */ static size_t rioFileWrite(rio *r, const void *buf, size_t len) { size_t retval; retval = fwrite(buf,len,1,r->io.file.fp); r->io.file.buffered += len; if (r->io.file.autosync && r->io.file.buffered >= r->io.file.autosync) { fflush(r->io.file.fp); aof_fsync(fileno(r->io.file.fp)); r->io.file.buffered = 0; } return retval; } /* Returns 1 or 0 for success/failure. */ static size_t rioFileRead(rio *r, void *buf, size_t len) { return fread(buf,len,1,r->io.file.fp); } /* Returns read/write position in file. */ static off_t rioFileTell(rio *r) { return ftello(r->io.file.fp); } /* Flushes any buffer to target device if applicable. Returns 1 on success * and 0 on failures. */ static int rioFileFlush(rio *r) { return (fflush(r->io.file.fp) == 0) ? 1 : 0; } static const rio rioFileIO = { rioFileRead, rioFileWrite, rioFileTell, rioFileFlush, NULL, /* update_checksum */ 0, /* current checksum */ 0, /* bytes read or written */ 0, /* read/write chunk size */ { { NULL, 0 } } /* union for io-specific vars */ }; void rioInitWithFile(rio *r, FILE *fp) { *r = rioFileIO; r->io.file.fp = fp; r->io.file.buffered = 0; r->io.file.autosync = 0; } /* ------------------- File descriptors set implementation ------------------- */ /* Returns 1 or 0 for success/failure. * The function returns success as long as we are able to correctly write * to at least one file descriptor. * * When buf is NULL and len is 0, the function performs a flush operation * if there is some pending buffer, so this function is also used in order * to implement rioFdsetFlush(). */ static size_t rioFdsetWrite(rio *r, const void *buf, size_t len) { ssize_t retval; int j; unsigned char *p = (unsigned char*) buf; int doflush = (buf == NULL && len == 0); /* To start we always append to our buffer. If it gets larger than * a given size, we actually write to the sockets. */ if (len) { r->io.fdset.buf = sdscatlen(r->io.fdset.buf,buf,len); len = 0; /* Prevent entering the while below if we don't flush. */ if (sdslen(r->io.fdset.buf) > PROTO_IOBUF_LEN) doflush = 1; } if (doflush) { p = (unsigned char*) r->io.fdset.buf; len = sdslen(r->io.fdset.buf); } /* Write in little chunchs so that when there are big writes we * parallelize while the kernel is sending data in background to * the TCP socket. */ while(len) { size_t count = len < 1024 ? len : 1024; int broken = 0; for (j = 0; j < r->io.fdset.numfds; j++) { if (r->io.fdset.state[j] != 0) { /* Skip FDs alraedy in error. */ broken++; continue; } /* Make sure to write 'count' bytes to the socket regardless * of short writes. */ size_t nwritten = 0; while(nwritten != count) { retval = write(r->io.fdset.fds[j],p+nwritten,count-nwritten); if (retval <= 0) { /* With blocking sockets, which is the sole user of this * rio target, EWOULDBLOCK is returned only because of * the SO_SNDTIMEO socket option, so we translate the error * into one more recognizable by the user. */ if (retval == -1 && errno == EWOULDBLOCK) errno = ETIMEDOUT; break; } nwritten += retval; } if (nwritten != count) { /* Mark this FD as broken. */ r->io.fdset.state[j] = errno; if (r->io.fdset.state[j] == 0) r->io.fdset.state[j] = EIO; } } if (broken == r->io.fdset.numfds) return 0; /* All the FDs in error. */ p += count; len -= count; r->io.fdset.pos += count; } if (doflush) sdsclear(r->io.fdset.buf); return 1; } /* Returns 1 or 0 for success/failure. */ static size_t rioFdsetRead(rio *r, void *buf, size_t len) { UNUSED(r); UNUSED(buf); UNUSED(len); return 0; /* Error, this target does not support reading. */ } /* Returns read/write position in file. */ static off_t rioFdsetTell(rio *r) { return r->io.fdset.pos; } /* Flushes any buffer to target device if applicable. Returns 1 on success * and 0 on failures. */ static int rioFdsetFlush(rio *r) { /* Our flush is implemented by the write method, that recognizes a * buffer set to NULL with a count of zero as a flush request. */ return rioFdsetWrite(r,NULL,0); } static const rio rioFdsetIO = { rioFdsetRead, rioFdsetWrite, rioFdsetTell, rioFdsetFlush, NULL, /* update_checksum */ 0, /* current checksum */ 0, /* bytes read or written */ 0, /* read/write chunk size */ { { NULL, 0 } } /* union for io-specific vars */ }; void rioInitWithFdset(rio *r, int *fds, int numfds) { int j; *r = rioFdsetIO; r->io.fdset.fds = zmalloc(sizeof(int)*numfds); r->io.fdset.state = zmalloc(sizeof(int)*numfds); memcpy(r->io.fdset.fds,fds,sizeof(int)*numfds); for (j = 0; j < numfds; j++) r->io.fdset.state[j] = 0; r->io.fdset.numfds = numfds; r->io.fdset.pos = 0; r->io.fdset.buf = sdsempty(); } /* release the rio stream. */ void rioFreeFdset(rio *r) { zfree(r->io.fdset.fds); zfree(r->io.fdset.state); sdsfree(r->io.fdset.buf); } /* ---------------------------- Generic functions ---------------------------- */ /* This function can be installed both in memory and file streams when checksum * computation is needed. */ void rioGenericUpdateChecksum(rio *r, const void *buf, size_t len) { r->cksum = crc64(r->cksum,buf,len); } /* Set the file-based rio object to auto-fsync every 'bytes' file written. * By default this is set to zero that means no automatic file sync is * performed. * * This feature is useful in a few contexts since when we rely on OS write * buffers sometimes the OS buffers way too much, resulting in too many * disk I/O concentrated in very little time. When we fsync in an explicit * way instead the I/O pressure is more distributed across time. */ void rioSetAutoSync(rio *r, off_t bytes) { serverAssert(r->read == rioFileIO.read); r->io.file.autosync = bytes; } /* --------------------------- Higher level interface -------------------------- * * The following higher level functions use lower level rio.c functions to help * generating the Redis protocol for the Append Only File. */ /* Write multi bulk count in the format: "*<count>\r\n". */ size_t rioWriteBulkCount(rio *r, char prefix, int count) { char cbuf[128]; int clen; cbuf[0] = prefix; clen = 1+ll2string(cbuf+1,sizeof(cbuf)-1,count); cbuf[clen++] = '\r'; cbuf[clen++] = '\n'; if (rioWrite(r,cbuf,clen) == 0) return 0; return clen; } /* Write binary-safe string in the format: "$<count>\r\n<payload>\r\n". */ size_t rioWriteBulkString(rio *r, const char *buf, size_t len) { size_t nwritten; if ((nwritten = rioWriteBulkCount(r,'$',len)) == 0) return 0; if (len > 0 && rioWrite(r,buf,len) == 0) return 0; if (rioWrite(r,"\r\n",2) == 0) return 0; return nwritten+len+2; } /* Write a long long value in format: "$<count>\r\n<payload>\r\n". */ size_t rioWriteBulkLongLong(rio *r, long long l) { char lbuf[32]; unsigned int llen; llen = ll2string(lbuf,sizeof(lbuf),l); return rioWriteBulkString(r,lbuf,llen); } /* Write a double value in the format: "$<count>\r\n<payload>\r\n" */ size_t rioWriteBulkDouble(rio *r, double d) { char dbuf[128]; unsigned int dlen; dlen = snprintf(dbuf,sizeof(dbuf),"%.17g",d); return rioWriteBulkString(r,dbuf,dlen); }
12,104
33.389205
81
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/ae_evport.c
/* ae.c module for illumos event ports. * * Copyright (c) 2012, Joyent, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <assert.h> #include <errno.h> #include <port.h> #include <poll.h> #include <sys/types.h> #include <sys/time.h> #include <stdio.h> static int evport_debug = 0; /* * This file implements the ae API using event ports, present on Solaris-based * systems since Solaris 10. Using the event port interface, we associate file * descriptors with the port. Each association also includes the set of poll(2) * events that the consumer is interested in (e.g., POLLIN and POLLOUT). * * There's one tricky piece to this implementation: when we return events via * aeApiPoll, the corresponding file descriptors become dissociated from the * port. This is necessary because poll events are level-triggered, so if the * fd didn't become dissociated, it would immediately fire another event since * the underlying state hasn't changed yet. We must re-associate the file * descriptor, but only after we know that our caller has actually read from it. * The ae API does not tell us exactly when that happens, but we do know that * it must happen by the time aeApiPoll is called again. Our solution is to * keep track of the last fds returned by aeApiPoll and re-associate them next * time aeApiPoll is invoked. * * To summarize, in this module, each fd association is EITHER (a) represented * only via the in-kernel association OR (b) represented by pending_fds and * pending_masks. (b) is only true for the last fds we returned from aeApiPoll, * and only until we enter aeApiPoll again (at which point we restore the * in-kernel association). */ #define MAX_EVENT_BATCHSZ 512 typedef struct aeApiState { int portfd; /* event port */ int npending; /* # of pending fds */ int pending_fds[MAX_EVENT_BATCHSZ]; /* pending fds */ int pending_masks[MAX_EVENT_BATCHSZ]; /* pending fds' masks */ } aeApiState; static int aeApiCreate(aeEventLoop *eventLoop) { int i; aeApiState *state = zmalloc(sizeof(aeApiState)); if (!state) return -1; state->portfd = port_create(); if (state->portfd == -1) { zfree(state); return -1; } state->npending = 0; for (i = 0; i < MAX_EVENT_BATCHSZ; i++) { state->pending_fds[i] = -1; state->pending_masks[i] = AE_NONE; } eventLoop->apidata = state; return 0; } static int aeApiResize(aeEventLoop *eventLoop, int setsize) { /* Nothing to resize here. */ return 0; } static void aeApiFree(aeEventLoop *eventLoop) { aeApiState *state = eventLoop->apidata; close(state->portfd); zfree(state); } static int aeApiLookupPending(aeApiState *state, int fd) { int i; for (i = 0; i < state->npending; i++) { if (state->pending_fds[i] == fd) return (i); } return (-1); } /* * Helper function to invoke port_associate for the given fd and mask. */ static int aeApiAssociate(const char *where, int portfd, int fd, int mask) { int events = 0; int rv, err; if (mask & AE_READABLE) events |= POLLIN; if (mask & AE_WRITABLE) events |= POLLOUT; if (evport_debug) fprintf(stderr, "%s: port_associate(%d, 0x%x) = ", where, fd, events); rv = port_associate(portfd, PORT_SOURCE_FD, fd, events, (void *)(uintptr_t)mask); err = errno; if (evport_debug) fprintf(stderr, "%d (%s)\n", rv, rv == 0 ? "no error" : strerror(err)); if (rv == -1) { fprintf(stderr, "%s: port_associate: %s\n", where, strerror(err)); if (err == EAGAIN) fprintf(stderr, "aeApiAssociate: event port limit exceeded."); } return rv; } static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; int fullmask, pfd; if (evport_debug) fprintf(stderr, "aeApiAddEvent: fd %d mask 0x%x\n", fd, mask); /* * Since port_associate's "events" argument replaces any existing events, we * must be sure to include whatever events are already associated when * we call port_associate() again. */ fullmask = mask | eventLoop->events[fd].mask; pfd = aeApiLookupPending(state, fd); if (pfd != -1) { /* * This fd was recently returned from aeApiPoll. It should be safe to * assume that the consumer has processed that poll event, but we play * it safer by simply updating pending_mask. The fd will be * re-associated as usual when aeApiPoll is called again. */ if (evport_debug) fprintf(stderr, "aeApiAddEvent: adding to pending fd %d\n", fd); state->pending_masks[pfd] |= fullmask; return 0; } return (aeApiAssociate("aeApiAddEvent", state->portfd, fd, fullmask)); } static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; int fullmask, pfd; if (evport_debug) fprintf(stderr, "del fd %d mask 0x%x\n", fd, mask); pfd = aeApiLookupPending(state, fd); if (pfd != -1) { if (evport_debug) fprintf(stderr, "deleting event from pending fd %d\n", fd); /* * This fd was just returned from aeApiPoll, so it's not currently * associated with the port. All we need to do is update * pending_mask appropriately. */ state->pending_masks[pfd] &= ~mask; if (state->pending_masks[pfd] == AE_NONE) state->pending_fds[pfd] = -1; return; } /* * The fd is currently associated with the port. Like with the add case * above, we must look at the full mask for the file descriptor before * updating that association. We don't have a good way of knowing what the * events are without looking into the eventLoop state directly. We rely on * the fact that our caller has already updated the mask in the eventLoop. */ fullmask = eventLoop->events[fd].mask; if (fullmask == AE_NONE) { /* * We're removing *all* events, so use port_dissociate to remove the * association completely. Failure here indicates a bug. */ if (evport_debug) fprintf(stderr, "aeApiDelEvent: port_dissociate(%d)\n", fd); if (port_dissociate(state->portfd, PORT_SOURCE_FD, fd) != 0) { perror("aeApiDelEvent: port_dissociate"); abort(); /* will not return */ } } else if (aeApiAssociate("aeApiDelEvent", state->portfd, fd, fullmask) != 0) { /* * ENOMEM is a potentially transient condition, but the kernel won't * generally return it unless things are really bad. EAGAIN indicates * we've reached an resource limit, for which it doesn't make sense to * retry (counter-intuitively). All other errors indicate a bug. In any * of these cases, the best we can do is to abort. */ abort(); /* will not return */ } } static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { aeApiState *state = eventLoop->apidata; struct timespec timeout, *tsp; int mask, i; uint_t nevents; port_event_t event[MAX_EVENT_BATCHSZ]; /* * If we've returned fd events before, we must re-associate them with the * port now, before calling port_get(). See the block comment at the top of * this file for an explanation of why. */ for (i = 0; i < state->npending; i++) { if (state->pending_fds[i] == -1) /* This fd has since been deleted. */ continue; if (aeApiAssociate("aeApiPoll", state->portfd, state->pending_fds[i], state->pending_masks[i]) != 0) { /* See aeApiDelEvent for why this case is fatal. */ abort(); } state->pending_masks[i] = AE_NONE; state->pending_fds[i] = -1; } state->npending = 0; if (tvp != NULL) { timeout.tv_sec = tvp->tv_sec; timeout.tv_nsec = tvp->tv_usec * 1000; tsp = &timeout; } else { tsp = NULL; } /* * port_getn can return with errno == ETIME having returned some events (!). * So if we get ETIME, we check nevents, too. */ nevents = 1; if (port_getn(state->portfd, event, MAX_EVENT_BATCHSZ, &nevents, tsp) == -1 && (errno != ETIME || nevents == 0)) { if (errno == ETIME || errno == EINTR) return 0; /* Any other error indicates a bug. */ perror("aeApiPoll: port_get"); abort(); } state->npending = nevents; for (i = 0; i < nevents; i++) { mask = 0; if (event[i].portev_events & POLLIN) mask |= AE_READABLE; if (event[i].portev_events & POLLOUT) mask |= AE_WRITABLE; eventLoop->fired[i].fd = event[i].portev_object; eventLoop->fired[i].mask = mask; if (evport_debug) fprintf(stderr, "aeApiPoll: fd %d mask 0x%x\n", (int)event[i].portev_object, mask); state->pending_fds[i] = event[i].portev_object; state->pending_masks[i] = (uintptr_t)event[i].portev_user; } return nevents; } static char *aeApiName(void) { return "evport"; }
10,939
33.080997
81
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/sds.h
/* SDSLib 2.0 -- A C dynamic strings library * * Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2015, Oran Agra * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __SDS_H #define __SDS_H #define SDS_MAX_PREALLOC (1024*1024) #include <sys/types.h> #include <stdarg.h> #include <stdint.h> #ifdef USE_PMDK #include "libpmemobj.h" #endif typedef char *sds; /* Note: sdshdr5 is never used, we just access the flags byte directly. * However is here to document the layout of type 5 SDS strings. */ struct __attribute__ ((__packed__)) sdshdr5 { unsigned char flags; /* 3 lsb of type, and 5 msb of string length */ char buf[]; }; struct __attribute__ ((__packed__)) sdshdr8 { uint8_t len; /* used */ uint8_t alloc; /* excluding the header and null terminator */ unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; struct __attribute__ ((__packed__)) sdshdr16 { uint16_t len; /* used */ uint16_t alloc; /* excluding the header and null terminator */ unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; struct __attribute__ ((__packed__)) sdshdr32 { uint32_t len; /* used */ uint32_t alloc; /* excluding the header and null terminator */ unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; struct __attribute__ ((__packed__)) sdshdr64 { uint64_t len; /* used */ uint64_t alloc; /* excluding the header and null terminator */ unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; #define SDS_TYPE_5 0 #define SDS_TYPE_8 1 #define SDS_TYPE_16 2 #define SDS_TYPE_32 3 #define SDS_TYPE_64 4 #define SDS_TYPE_MASK 7 #define SDS_TYPE_BITS 3 #define SDS_HDR_VAR(T,s) struct sdshdr##T *sh = (void*)((s)-(sizeof(struct sdshdr##T))); #define SDS_HDR(T,s) ((struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T)))) #define SDS_TYPE_5_LEN(f) ((f)>>SDS_TYPE_BITS) static inline size_t sdslen(const sds s) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: return SDS_TYPE_5_LEN(flags); case SDS_TYPE_8: return SDS_HDR(8,s)->len; case SDS_TYPE_16: return SDS_HDR(16,s)->len; case SDS_TYPE_32: return SDS_HDR(32,s)->len; case SDS_TYPE_64: return SDS_HDR(64,s)->len; } return 0; } static inline size_t sdsavail(const sds s) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: { return 0; } case SDS_TYPE_8: { SDS_HDR_VAR(8,s); return sh->alloc - sh->len; } case SDS_TYPE_16: { SDS_HDR_VAR(16,s); return sh->alloc - sh->len; } case SDS_TYPE_32: { SDS_HDR_VAR(32,s); return sh->alloc - sh->len; } case SDS_TYPE_64: { SDS_HDR_VAR(64,s); return sh->alloc - sh->len; } } return 0; } static inline void sdssetlen(sds s, size_t newlen) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: { unsigned char *fp = ((unsigned char*)s)-1; *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); } break; case SDS_TYPE_8: SDS_HDR(8,s)->len = newlen; break; case SDS_TYPE_16: SDS_HDR(16,s)->len = newlen; break; case SDS_TYPE_32: SDS_HDR(32,s)->len = newlen; break; case SDS_TYPE_64: SDS_HDR(64,s)->len = newlen; break; } } static inline void sdsinclen(sds s, size_t inc) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: { unsigned char *fp = ((unsigned char*)s)-1; unsigned char newlen = SDS_TYPE_5_LEN(flags)+inc; *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); } break; case SDS_TYPE_8: SDS_HDR(8,s)->len += inc; break; case SDS_TYPE_16: SDS_HDR(16,s)->len += inc; break; case SDS_TYPE_32: SDS_HDR(32,s)->len += inc; break; case SDS_TYPE_64: SDS_HDR(64,s)->len += inc; break; } } /* sdsalloc() = sdsavail() + sdslen() */ static inline size_t sdsalloc(const sds s) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: return SDS_TYPE_5_LEN(flags); case SDS_TYPE_8: return SDS_HDR(8,s)->alloc; case SDS_TYPE_16: return SDS_HDR(16,s)->alloc; case SDS_TYPE_32: return SDS_HDR(32,s)->alloc; case SDS_TYPE_64: return SDS_HDR(64,s)->alloc; } return 0; } static inline void sdssetalloc(sds s, size_t newlen) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: /* Nothing to do, this type has no total allocation info. */ break; case SDS_TYPE_8: SDS_HDR(8,s)->alloc = newlen; break; case SDS_TYPE_16: SDS_HDR(16,s)->alloc = newlen; break; case SDS_TYPE_32: SDS_HDR(32,s)->alloc = newlen; break; case SDS_TYPE_64: SDS_HDR(64,s)->alloc = newlen; break; } } sds sdsnewlen(const void *init, size_t initlen); sds sdsnew(const char *init); sds sdsempty(void); sds sdsdup(const sds s); void sdsfree(sds s); sds sdsgrowzero(sds s, size_t len); sds sdscatlen(sds s, const void *t, size_t len); sds sdscat(sds s, const char *t); sds sdscatsds(sds s, const sds t); sds sdscpylen(sds s, const char *t, size_t len); sds sdscpy(sds s, const char *t); #ifdef USE_PMDK sds sdsnewlenPM(const void *init, size_t initlen); sds sdsdupPM(const sds s, void **oid_reference); void sdsfreePM(sds s); PMEMoid *sdsPMEMoidBackReference(sds s); #endif sds sdscatvprintf(sds s, const char *fmt, va_list ap); #ifdef __GNUC__ sds sdscatprintf(sds s, const char *fmt, ...) __attribute__((format(printf, 2, 3))); #else sds sdscatprintf(sds s, const char *fmt, ...); #endif sds sdscatfmt(sds s, char const *fmt, ...); sds sdstrim(sds s, const char *cset); void sdsrange(sds s, int start, int end); void sdsupdatelen(sds s); void sdsclear(sds s); int sdscmp(const sds s1, const sds s2); sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count); void sdsfreesplitres(sds *tokens, int count); void sdstolower(sds s); void sdstoupper(sds s); sds sdsfromlonglong(long long value); sds sdscatrepr(sds s, const char *p, size_t len); sds *sdssplitargs(const char *line, int *argc); sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen); sds sdsjoin(char **argv, int argc, char *sep); sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen); /* Low level functions exposed to the user API */ sds sdsMakeRoomFor(sds s, size_t addlen); void sdsIncrLen(sds s, int incr); sds sdsRemoveFreeSpace(sds s); size_t sdsAllocSize(sds s); void *sdsAllocPtr(sds s); /* Export the allocator used by SDS to the program using SDS. * Sometimes the program SDS is linked to, may use a different set of * allocators, but may want to allocate or free things that SDS will * respectively free or allocate. */ void *sds_malloc(size_t size); void *sds_realloc(void *ptr, size_t size); void sds_free(void *ptr); #ifdef REDIS_TEST int sdsTest(int argc, char *argv[]); #endif #endif
9,170
31.178947
88
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/redisassert.h
/* redisassert.h -- Drop in replacemnet assert.h that prints the stack trace * in the Redis logs. * * This file should be included instead of "assert.h" inside libraries used by * Redis that are using assertions, so instead of Redis disappearing with * SIGABORT, we get the details and stack trace inside the log file. * * ---------------------------------------------------------------------------- * * Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __REDIS_ASSERT_H__ #define __REDIS_ASSERT_H__ #include <unistd.h> /* for _exit() */ #define assert(_e) ((_e)?(void)0 : (_serverAssert(#_e,__FILE__,__LINE__),_exit(1))) void _serverAssert(char *estr, char *file, int line); #endif
2,276
46.4375
83
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/anet.h
/* anet.c -- Basic TCP socket stuff made a bit less boring * * Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef ANET_H #define ANET_H #include <sys/types.h> #define ANET_OK 0 #define ANET_ERR -1 #define ANET_ERR_LEN 256 /* Flags used with certain functions. */ #define ANET_NONE 0 #define ANET_IP_ONLY (1<<0) #if defined(__sun) || defined(_AIX) #define AF_LOCAL AF_UNIX #endif #ifdef _AIX #undef ip_len #endif int anetTcpConnect(char *err, char *addr, int port); int anetTcpNonBlockConnect(char *err, char *addr, int port); int anetTcpNonBlockBindConnect(char *err, char *addr, int port, char *source_addr); int anetTcpNonBlockBestEffortBindConnect(char *err, char *addr, int port, char *source_addr); int anetUnixConnect(char *err, char *path); int anetUnixNonBlockConnect(char *err, char *path); int anetRead(int fd, char *buf, int count); int anetResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len); int anetResolveIP(char *err, char *host, char *ipbuf, size_t ipbuf_len); int anetTcpServer(char *err, int port, char *bindaddr, int backlog); int anetTcp6Server(char *err, int port, char *bindaddr, int backlog); int anetUnixServer(char *err, char *path, mode_t perm, int backlog); int anetTcpAccept(char *err, int serversock, char *ip, size_t ip_len, int *port); int anetUnixAccept(char *err, int serversock); int anetWrite(int fd, char *buf, int count); int anetNonBlock(char *err, int fd); int anetBlock(char *err, int fd); int anetEnableTcpNoDelay(char *err, int fd); int anetDisableTcpNoDelay(char *err, int fd); int anetTcpKeepAlive(char *err, int fd); int anetSendTimeout(char *err, int fd, long long ms); int anetPeerToString(int fd, char *ip, size_t ip_len, int *port); int anetKeepAlive(char *err, int fd, int interval); int anetSockName(int fd, char *ip, size_t ip_len, int *port); int anetFormatAddr(char *fmt, size_t fmt_len, char *ip, int port); int anetFormatPeer(int fd, char *fmt, size_t fmt_len); int anetFormatSock(int fd, char *fmt, size_t fmt_len); #endif
3,562
42.987654
93
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/lzfP.h
/* * Copyright (c) 2000-2007 Marc Alexander Lehmann <schmorp@schmorp.de> * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef LZFP_h #define LZFP_h #define STANDALONE 1 /* at the moment, this is ok. */ #ifndef STANDALONE # include "lzf.h" #endif /* * Size of hashtable is (1 << HLOG) * sizeof (char *) * decompression is independent of the hash table size * the difference between 15 and 14 is very small * for small blocks (and 14 is usually a bit faster). * For a low-memory/faster configuration, use HLOG == 13; * For best compression, use 15 or 16 (or more, up to 22). */ #ifndef HLOG # define HLOG 16 #endif /* * Sacrifice very little compression quality in favour of compression speed. * This gives almost the same compression as the default code, and is * (very roughly) 15% faster. This is the preferred mode of operation. */ #ifndef VERY_FAST # define VERY_FAST 1 #endif /* * Sacrifice some more compression quality in favour of compression speed. * (roughly 1-2% worse compression for large blocks and * 9-10% for small, redundant, blocks and >>20% better speed in both cases) * In short: when in need for speed, enable this for binary data, * possibly disable this for text data. */ #ifndef ULTRA_FAST # define ULTRA_FAST 0 #endif /* * Unconditionally aligning does not cost very much, so do it if unsure */ #ifndef STRICT_ALIGN # define STRICT_ALIGN !(defined(__i386) || defined (__amd64)) #endif /* * You may choose to pre-set the hash table (might be faster on some * modern cpus and large (>>64k) blocks, and also makes compression * deterministic/repeatable when the configuration otherwise is the same). */ #ifndef INIT_HTAB # define INIT_HTAB 0 #endif /* * Avoid assigning values to errno variable? for some embedding purposes * (linux kernel for example), this is necessary. NOTE: this breaks * the documentation in lzf.h. Avoiding errno has no speed impact. */ #ifndef AVOID_ERRNO # define AVOID_ERRNO 0 #endif /* * Whether to pass the LZF_STATE variable as argument, or allocate it * on the stack. For small-stack environments, define this to 1. * NOTE: this breaks the prototype in lzf.h. */ #ifndef LZF_STATE_ARG # define LZF_STATE_ARG 0 #endif /* * Whether to add extra checks for input validity in lzf_decompress * and return EINVAL if the input stream has been corrupted. This * only shields against overflowing the input buffer and will not * detect most corrupted streams. * This check is not normally noticeable on modern hardware * (<1% slowdown), but might slow down older cpus considerably. */ #ifndef CHECK_INPUT # define CHECK_INPUT 1 #endif /* * Whether to store pointers or offsets inside the hash table. On * 64 bit architetcures, pointers take up twice as much space, * and might also be slower. Default is to autodetect. */ /*#define LZF_USER_OFFSETS autodetect */ /*****************************************************************************/ /* nothing should be changed below */ #ifdef __cplusplus # include <cstring> # include <climits> using namespace std; #else # include <string.h> # include <limits.h> #endif #ifndef LZF_USE_OFFSETS # if defined (WIN32) # define LZF_USE_OFFSETS defined(_M_X64) # else # if __cplusplus > 199711L # include <cstdint> # else # include <stdint.h> # endif # define LZF_USE_OFFSETS (UINTPTR_MAX > 0xffffffffU) # endif #endif typedef unsigned char u8; #if LZF_USE_OFFSETS # define LZF_HSLOT_BIAS ((const u8 *)in_data) typedef unsigned int LZF_HSLOT; #else # define LZF_HSLOT_BIAS 0 typedef const u8 *LZF_HSLOT; #endif typedef LZF_HSLOT LZF_STATE[1 << (HLOG)]; #if !STRICT_ALIGN /* for unaligned accesses we need a 16 bit datatype. */ # if USHRT_MAX == 65535 typedef unsigned short u16; # elif UINT_MAX == 65535 typedef unsigned int u16; # else # undef STRICT_ALIGN # define STRICT_ALIGN 1 # endif #endif #if ULTRA_FAST # undef VERY_FAST #endif #endif
5,826
30.327957
79
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/sdsalloc.h
/* SDSLib 2.0 -- A C dynamic strings library * * Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* SDS allocator selection. * * This file is used in order to change the SDS allocator at compile time. * Just define the following defines to what you want to use. Also add * the include of your alternate allocator if needed (not needed in order * to use the default libc allocator). */ #include "zmalloc.h" #define s_malloc zmalloc #define s_realloc zrealloc #define s_free zfree
2,083
47.465116
78
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/ae.c
/* A simple event-driven programming library. Originally I wrote this code * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated * it in form of a library for easy reuse. * * Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <sys/time.h> #include <sys/types.h> #include <unistd.h> #include <stdlib.h> #include <poll.h> #include <string.h> #include <time.h> #include <errno.h> #include "ae.h" #include "zmalloc.h" #include "config.h" /* Include the best multiplexing layer supported by this system. * The following should be ordered by performances, descending. */ #ifdef HAVE_EVPORT #include "ae_evport.c" #else #ifdef HAVE_EPOLL #include "ae_epoll.c" #else #ifdef HAVE_KQUEUE #include "ae_kqueue.c" #else #include "ae_select.c" #endif #endif #endif aeEventLoop *aeCreateEventLoop(int setsize) { aeEventLoop *eventLoop; int i; if ((eventLoop = zmalloc(sizeof(*eventLoop))) == NULL) goto err; eventLoop->events = zmalloc(sizeof(aeFileEvent)*setsize); eventLoop->fired = zmalloc(sizeof(aeFiredEvent)*setsize); if (eventLoop->events == NULL || eventLoop->fired == NULL) goto err; eventLoop->setsize = setsize; eventLoop->lastTime = time(NULL); eventLoop->timeEventHead = NULL; eventLoop->timeEventNextId = 0; eventLoop->stop = 0; eventLoop->maxfd = -1; eventLoop->beforesleep = NULL; if (aeApiCreate(eventLoop) == -1) goto err; /* Events with mask == AE_NONE are not set. So let's initialize the * vector with it. */ for (i = 0; i < setsize; i++) eventLoop->events[i].mask = AE_NONE; return eventLoop; err: if (eventLoop) { zfree(eventLoop->events); zfree(eventLoop->fired); zfree(eventLoop); } return NULL; } /* Return the current set size. */ int aeGetSetSize(aeEventLoop *eventLoop) { return eventLoop->setsize; } /* Resize the maximum set size of the event loop. * If the requested set size is smaller than the current set size, but * there is already a file descriptor in use that is >= the requested * set size minus one, AE_ERR is returned and the operation is not * performed at all. * * Otherwise AE_OK is returned and the operation is successful. */ int aeResizeSetSize(aeEventLoop *eventLoop, int setsize) { int i; if (setsize == eventLoop->setsize) return AE_OK; if (eventLoop->maxfd >= setsize) return AE_ERR; if (aeApiResize(eventLoop,setsize) == -1) return AE_ERR; eventLoop->events = zrealloc(eventLoop->events,sizeof(aeFileEvent)*setsize); eventLoop->fired = zrealloc(eventLoop->fired,sizeof(aeFiredEvent)*setsize); eventLoop->setsize = setsize; /* Make sure that if we created new slots, they are initialized with * an AE_NONE mask. */ for (i = eventLoop->maxfd+1; i < setsize; i++) eventLoop->events[i].mask = AE_NONE; return AE_OK; } void aeDeleteEventLoop(aeEventLoop *eventLoop) { aeApiFree(eventLoop); zfree(eventLoop->events); zfree(eventLoop->fired); zfree(eventLoop); } void aeStop(aeEventLoop *eventLoop) { eventLoop->stop = 1; } int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, aeFileProc *proc, void *clientData) { if (fd >= eventLoop->setsize) { errno = ERANGE; return AE_ERR; } aeFileEvent *fe = &eventLoop->events[fd]; if (aeApiAddEvent(eventLoop, fd, mask) == -1) return AE_ERR; fe->mask |= mask; if (mask & AE_READABLE) fe->rfileProc = proc; if (mask & AE_WRITABLE) fe->wfileProc = proc; fe->clientData = clientData; if (fd > eventLoop->maxfd) eventLoop->maxfd = fd; return AE_OK; } void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask) { if (fd >= eventLoop->setsize) return; aeFileEvent *fe = &eventLoop->events[fd]; if (fe->mask == AE_NONE) return; aeApiDelEvent(eventLoop, fd, mask); fe->mask = fe->mask & (~mask); if (fd == eventLoop->maxfd && fe->mask == AE_NONE) { /* Update the max fd */ int j; for (j = eventLoop->maxfd-1; j >= 0; j--) if (eventLoop->events[j].mask != AE_NONE) break; eventLoop->maxfd = j; } } int aeGetFileEvents(aeEventLoop *eventLoop, int fd) { if (fd >= eventLoop->setsize) return 0; aeFileEvent *fe = &eventLoop->events[fd]; return fe->mask; } static void aeGetTime(long *seconds, long *milliseconds) { struct timeval tv; gettimeofday(&tv, NULL); *seconds = tv.tv_sec; *milliseconds = tv.tv_usec/1000; } static void aeAddMillisecondsToNow(long long milliseconds, long *sec, long *ms) { long cur_sec, cur_ms, when_sec, when_ms; aeGetTime(&cur_sec, &cur_ms); when_sec = cur_sec + milliseconds/1000; when_ms = cur_ms + milliseconds%1000; if (when_ms >= 1000) { when_sec ++; when_ms -= 1000; } *sec = when_sec; *ms = when_ms; } long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, aeTimeProc *proc, void *clientData, aeEventFinalizerProc *finalizerProc) { long long id = eventLoop->timeEventNextId++; aeTimeEvent *te; te = zmalloc(sizeof(*te)); if (te == NULL) return AE_ERR; te->id = id; aeAddMillisecondsToNow(milliseconds,&te->when_sec,&te->when_ms); te->timeProc = proc; te->finalizerProc = finalizerProc; te->clientData = clientData; te->next = eventLoop->timeEventHead; eventLoop->timeEventHead = te; return id; } int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id) { aeTimeEvent *te = eventLoop->timeEventHead; while(te) { if (te->id == id) { te->id = AE_DELETED_EVENT_ID; return AE_OK; } te = te->next; } return AE_ERR; /* NO event with the specified ID found */ } /* Search the first timer to fire. * This operation is useful to know how many time the select can be * put in sleep without to delay any event. * If there are no timers NULL is returned. * * Note that's O(N) since time events are unsorted. * Possible optimizations (not needed by Redis so far, but...): * 1) Insert the event in order, so that the nearest is just the head. * Much better but still insertion or deletion of timers is O(N). * 2) Use a skiplist to have this operation as O(1) and insertion as O(log(N)). */ static aeTimeEvent *aeSearchNearestTimer(aeEventLoop *eventLoop) { aeTimeEvent *te = eventLoop->timeEventHead; aeTimeEvent *nearest = NULL; while(te) { if (!nearest || te->when_sec < nearest->when_sec || (te->when_sec == nearest->when_sec && te->when_ms < nearest->when_ms)) nearest = te; te = te->next; } return nearest; } /* Process time events */ static int processTimeEvents(aeEventLoop *eventLoop) { int processed = 0; aeTimeEvent *te, *prev; long long maxId; time_t now = time(NULL); /* If the system clock is moved to the future, and then set back to the * right value, time events may be delayed in a random way. Often this * means that scheduled operations will not be performed soon enough. * * Here we try to detect system clock skews, and force all the time * events to be processed ASAP when this happens: the idea is that * processing events earlier is less dangerous than delaying them * indefinitely, and practice suggests it is. */ if (now < eventLoop->lastTime) { te = eventLoop->timeEventHead; while(te) { te->when_sec = 0; te = te->next; } } eventLoop->lastTime = now; prev = NULL; te = eventLoop->timeEventHead; maxId = eventLoop->timeEventNextId-1; while(te) { long now_sec, now_ms; long long id; /* Remove events scheduled for deletion. */ if (te->id == AE_DELETED_EVENT_ID) { aeTimeEvent *next = te->next; if (prev == NULL) eventLoop->timeEventHead = te->next; else prev->next = te->next; if (te->finalizerProc) te->finalizerProc(eventLoop, te->clientData); zfree(te); te = next; continue; } /* Make sure we don't process time events created by time events in * this iteration. Note that this check is currently useless: we always * add new timers on the head, however if we change the implementation * detail, this check may be useful again: we keep it here for future * defense. */ if (te->id > maxId) { te = te->next; continue; } aeGetTime(&now_sec, &now_ms); if (now_sec > te->when_sec || (now_sec == te->when_sec && now_ms >= te->when_ms)) { int retval; id = te->id; retval = te->timeProc(eventLoop, id, te->clientData); processed++; if (retval != AE_NOMORE) { aeAddMillisecondsToNow(retval,&te->when_sec,&te->when_ms); } else { te->id = AE_DELETED_EVENT_ID; } } prev = te; te = te->next; } return processed; } /* Process every pending time event, then every pending file event * (that may be registered by time event callbacks just processed). * Without special flags the function sleeps until some file event * fires, or when the next time event occurs (if any). * * If flags is 0, the function does nothing and returns. * if flags has AE_ALL_EVENTS set, all the kind of events are processed. * if flags has AE_FILE_EVENTS set, file events are processed. * if flags has AE_TIME_EVENTS set, time events are processed. * if flags has AE_DONT_WAIT set the function returns ASAP until all * the events that's possible to process without to wait are processed. * * The function returns the number of events processed. */ int aeProcessEvents(aeEventLoop *eventLoop, int flags) { int processed = 0, numevents; /* Nothing to do? return ASAP */ if (!(flags & AE_TIME_EVENTS) && !(flags & AE_FILE_EVENTS)) return 0; /* Note that we want call select() even if there are no * file events to process as long as we want to process time * events, in order to sleep until the next time event is ready * to fire. */ if (eventLoop->maxfd != -1 || ((flags & AE_TIME_EVENTS) && !(flags & AE_DONT_WAIT))) { int j; aeTimeEvent *shortest = NULL; struct timeval tv, *tvp; if (flags & AE_TIME_EVENTS && !(flags & AE_DONT_WAIT)) shortest = aeSearchNearestTimer(eventLoop); if (shortest) { long now_sec, now_ms; aeGetTime(&now_sec, &now_ms); tvp = &tv; /* How many milliseconds we need to wait for the next * time event to fire? */ long long ms = (shortest->when_sec - now_sec)*1000 + shortest->when_ms - now_ms; if (ms > 0) { tvp->tv_sec = ms/1000; tvp->tv_usec = (ms % 1000)*1000; } else { tvp->tv_sec = 0; tvp->tv_usec = 0; } } else { /* If we have to check for events but need to return * ASAP because of AE_DONT_WAIT we need to set the timeout * to zero */ if (flags & AE_DONT_WAIT) { tv.tv_sec = tv.tv_usec = 0; tvp = &tv; } else { /* Otherwise we can block */ tvp = NULL; /* wait forever */ } } numevents = aeApiPoll(eventLoop, tvp); for (j = 0; j < numevents; j++) { aeFileEvent *fe = &eventLoop->events[eventLoop->fired[j].fd]; int mask = eventLoop->fired[j].mask; int fd = eventLoop->fired[j].fd; int rfired = 0; /* note the fe->mask & mask & ... code: maybe an already processed * event removed an element that fired and we still didn't * processed, so we check if the event is still valid. */ if (fe->mask & mask & AE_READABLE) { rfired = 1; fe->rfileProc(eventLoop,fd,fe->clientData,mask); } if (fe->mask & mask & AE_WRITABLE) { if (!rfired || fe->wfileProc != fe->rfileProc) fe->wfileProc(eventLoop,fd,fe->clientData,mask); } processed++; } } /* Check time events */ if (flags & AE_TIME_EVENTS) processed += processTimeEvents(eventLoop); return processed; /* return the number of processed file/time events */ } /* Wait for milliseconds until the given file descriptor becomes * writable/readable/exception */ int aeWait(int fd, int mask, long long milliseconds) { struct pollfd pfd; int retmask = 0, retval; memset(&pfd, 0, sizeof(pfd)); pfd.fd = fd; if (mask & AE_READABLE) pfd.events |= POLLIN; if (mask & AE_WRITABLE) pfd.events |= POLLOUT; if ((retval = poll(&pfd, 1, milliseconds))== 1) { if (pfd.revents & POLLIN) retmask |= AE_READABLE; if (pfd.revents & POLLOUT) retmask |= AE_WRITABLE; if (pfd.revents & POLLERR) retmask |= AE_WRITABLE; if (pfd.revents & POLLHUP) retmask |= AE_WRITABLE; return retmask; } else { return retval; } } void aeMain(aeEventLoop *eventLoop) { eventLoop->stop = 0; while (!eventLoop->stop) { if (eventLoop->beforesleep != NULL) eventLoop->beforesleep(eventLoop); aeProcessEvents(eventLoop, AE_ALL_EVENTS); } } char *aeGetApiName(void) { return aeApiName(); } void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep) { eventLoop->beforesleep = beforesleep; }
15,529
32.32618
83
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/testhelp.h
/* This is a really minimal testing framework for C. * * Example: * * test_cond("Check if 1 == 1", 1==1) * test_cond("Check if 5 > 10", 5 > 10) * test_report() * * ---------------------------------------------------------------------------- * * Copyright (c) 2010-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __TESTHELP_H #define __TESTHELP_H int __failed_tests = 0; int __test_num = 0; #define test_cond(descr,_c) do { \ __test_num++; printf("%d - %s: ", __test_num, descr); \ if(_c) printf("PASSED\n"); else {printf("FAILED\n"); __failed_tests++;} \ } while(0); #define test_report() do { \ printf("%d tests, %d passed, %d failed\n", __test_num, \ __test_num-__failed_tests, __failed_tests); \ if (__failed_tests) { \ printf("=== WARNING === We have failed tests here...\n"); \ exit(1); \ } \ } while(0); #endif
2,431
40.931034
79
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/ziplist.h
/* * Copyright (c) 2009-2012, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _ZIPLIST_H #define _ZIPLIST_H #define ZIPLIST_HEAD 0 #define ZIPLIST_TAIL 1 unsigned char *ziplistNew(void); unsigned char *ziplistMerge(unsigned char **first, unsigned char **second); unsigned char *ziplistPush(unsigned char *zl, unsigned char *s, unsigned int slen, int where); unsigned char *ziplistIndex(unsigned char *zl, int index); unsigned char *ziplistNext(unsigned char *zl, unsigned char *p); unsigned char *ziplistPrev(unsigned char *zl, unsigned char *p); unsigned int ziplistGet(unsigned char *p, unsigned char **sval, unsigned int *slen, long long *lval); unsigned char *ziplistInsert(unsigned char *zl, unsigned char *p, unsigned char *s, unsigned int slen); unsigned char *ziplistDelete(unsigned char *zl, unsigned char **p); unsigned char *ziplistDeleteRange(unsigned char *zl, int index, unsigned int num); unsigned int ziplistCompare(unsigned char *p, unsigned char *s, unsigned int slen); unsigned char *ziplistFind(unsigned char *p, unsigned char *vstr, unsigned int vlen, unsigned int skip); unsigned int ziplistLen(unsigned char *zl); size_t ziplistBlobLen(unsigned char *zl); #ifdef REDIS_TEST int ziplistTest(int argc, char *argv[]); #endif #endif /* _ZIPLIST_H */
2,890
49.719298
104
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/fmacros.h
/* * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _REDIS_FMACRO_H #define _REDIS_FMACRO_H #define _BSD_SOURCE #if defined(__linux__) #define _GNU_SOURCE #define _DEFAULT_SOURCE #endif #if defined(_AIX) #define _ALL_SOURCE #endif #if defined(__linux__) || defined(__OpenBSD__) #define _XOPEN_SOURCE 700 /* * On NetBSD, _XOPEN_SOURCE undefines _NETBSD_SOURCE and * thus hides inet_aton etc. */ #elif !defined(__NetBSD__) #define _XOPEN_SOURCE #endif #if defined(__sun) #define _POSIX_C_SOURCE 199506L #endif #define _LARGEFILE_SOURCE #define _FILE_OFFSET_BITS 64 #endif
2,147
33.645161
78
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/pqsort.h
/* The following is the NetBSD libc qsort implementation modified in order to * support partial sorting of ranges for Redis. * * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * See the pqsort.c file for the original copyright notice. */ #ifndef __PQSORT_H #define __PQSORT_H void pqsort(void *a, size_t n, size_t es, int (*cmp) (const void *, const void *), size_t lrange, size_t rrange); #endif
1,964
46.926829
78
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/slowlog.h
/* * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #define SLOWLOG_ENTRY_MAX_ARGC 32 #define SLOWLOG_ENTRY_MAX_STRING 128 /* This structure defines an entry inside the slow log list */ typedef struct slowlogEntry { robj **argv; int argc; long long id; /* Unique entry identifier. */ long long duration; /* Time spent by the query, in nanoseconds. */ time_t time; /* Unix time at which the query was executed. */ } slowlogEntry; /* Exported API */ void slowlogInit(void); void slowlogPushEntryIfNeeded(robj **argv, int argc, long long duration); /* Exported commands */ void slowlogCommand(client *c);
2,197
44.791667
78
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/util.h
/* * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __REDIS_UTIL_H #define __REDIS_UTIL_H #include <stdint.h> #include "sds.h" int stringmatchlen(const char *p, int plen, const char *s, int slen, int nocase); int stringmatch(const char *p, const char *s, int nocase); long long memtoll(const char *p, int *err); uint32_t digits10(uint64_t v); uint32_t sdigits10(int64_t v); int ll2string(char *s, size_t len, long long value); int string2ll(const char *s, size_t slen, long long *value); int string2l(const char *s, size_t slen, long *value); int d2string(char *buf, size_t len, double value); sds getAbsolutePath(char *filename); int pathIsBaseName(char *path); #ifdef REDIS_TEST int utilTest(int argc, char **argv); #endif #endif
2,303
42.471698
81
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/t_hash.c
/* * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include <math.h> /*----------------------------------------------------------------------------- * Hash type API *----------------------------------------------------------------------------*/ /* Check the length of a number of objects to see if we need to convert a * ziplist to a real hash. Note that we only check string encoded objects * as their string length can be queried in constant time. */ void hashTypeTryConversion(robj *o, robj **argv, int start, int end) { int i; if (o->encoding != OBJ_ENCODING_ZIPLIST) return; for (i = start; i <= end; i++) { if (sdsEncodedObject(argv[i]) && sdslen(argv[i]->ptr) > server.hash_max_ziplist_value) { hashTypeConvert(o, OBJ_ENCODING_HT); break; } } } /* Encode given objects in-place when the hash uses a dict. */ void hashTypeTryObjectEncoding(robj *subject, robj **o1, robj **o2) { if (subject->encoding == OBJ_ENCODING_HT) { if (o1) *o1 = tryObjectEncoding(*o1); if (o2) *o2 = tryObjectEncoding(*o2); } } /* Get the value from a ziplist encoded hash, identified by field. * Returns -1 when the field cannot be found. */ int hashTypeGetFromZiplist(robj *o, robj *field, unsigned char **vstr, unsigned int *vlen, long long *vll) { unsigned char *zl, *fptr = NULL, *vptr = NULL; int ret; serverAssert(o->encoding == OBJ_ENCODING_ZIPLIST); field = getDecodedObject(field); zl = o->ptr; fptr = ziplistIndex(zl, ZIPLIST_HEAD); if (fptr != NULL) { fptr = ziplistFind(fptr, field->ptr, sdslen(field->ptr), 1); if (fptr != NULL) { /* Grab pointer to the value (fptr points to the field) */ vptr = ziplistNext(zl, fptr); serverAssert(vptr != NULL); } } decrRefCount(field); if (vptr != NULL) { ret = ziplistGet(vptr, vstr, vlen, vll); serverAssert(ret); return 0; } return -1; } /* Get the value from a hash table encoded hash, identified by field. * Returns -1 when the field cannot be found. */ int hashTypeGetFromHashTable(robj *o, robj *field, robj **value) { dictEntry *de; serverAssert(o->encoding == OBJ_ENCODING_HT); de = dictFind(o->ptr, field); if (de == NULL) return -1; *value = dictGetVal(de); return 0; } /* Higher level function of hashTypeGet*() that always returns a Redis * object (either new or with refcount incremented), so that the caller * can retain a reference or call decrRefCount after the usage. * * The lower level function can prevent copy on write so it is * the preferred way of doing read operations. */ robj *hashTypeGetObject(robj *o, robj *field) { robj *value = NULL; if (o->encoding == OBJ_ENCODING_ZIPLIST) { unsigned char *vstr = NULL; unsigned int vlen = UINT_MAX; long long vll = LLONG_MAX; if (hashTypeGetFromZiplist(o, field, &vstr, &vlen, &vll) == 0) { if (vstr) { value = createStringObject((char*)vstr, vlen); } else { value = createStringObjectFromLongLong(vll); } } } else if (o->encoding == OBJ_ENCODING_HT) { robj *aux; if (hashTypeGetFromHashTable(o, field, &aux) == 0) { incrRefCount(aux); value = aux; } } else { serverPanic("Unknown hash encoding"); } return value; } /* Higher level function using hashTypeGet*() to return the length of the * object associated with the requested field, or 0 if the field does not * exist. */ size_t hashTypeGetValueLength(robj *o, robj *field) { size_t len = 0; if (o->encoding == OBJ_ENCODING_ZIPLIST) { unsigned char *vstr = NULL; unsigned int vlen = UINT_MAX; long long vll = LLONG_MAX; if (hashTypeGetFromZiplist(o, field, &vstr, &vlen, &vll) == 0) len = vstr ? vlen : sdigits10(vll); } else if (o->encoding == OBJ_ENCODING_HT) { robj *aux; if (hashTypeGetFromHashTable(o, field, &aux) == 0) len = stringObjectLen(aux); } else { serverPanic("Unknown hash encoding"); } return len; } /* Test if the specified field exists in the given hash. Returns 1 if the field * exists, and 0 when it doesn't. */ int hashTypeExists(robj *o, robj *field) { if (o->encoding == OBJ_ENCODING_ZIPLIST) { unsigned char *vstr = NULL; unsigned int vlen = UINT_MAX; long long vll = LLONG_MAX; if (hashTypeGetFromZiplist(o, field, &vstr, &vlen, &vll) == 0) return 1; } else if (o->encoding == OBJ_ENCODING_HT) { robj *aux; if (hashTypeGetFromHashTable(o, field, &aux) == 0) return 1; } else { serverPanic("Unknown hash encoding"); } return 0; } /* Add an element, discard the old if the key already exists. * Return 0 on insert and 1 on update. * This function will take care of incrementing the reference count of the * retained fields and value objects. */ int hashTypeSet(robj *o, robj *field, robj *value) { int update = 0; if (o->encoding == OBJ_ENCODING_ZIPLIST) { unsigned char *zl, *fptr, *vptr; field = getDecodedObject(field); value = getDecodedObject(value); zl = o->ptr; fptr = ziplistIndex(zl, ZIPLIST_HEAD); if (fptr != NULL) { fptr = ziplistFind(fptr, field->ptr, sdslen(field->ptr), 1); if (fptr != NULL) { /* Grab pointer to the value (fptr points to the field) */ vptr = ziplistNext(zl, fptr); serverAssert(vptr != NULL); update = 1; /* Delete value */ zl = ziplistDelete(zl, &vptr); /* Insert new value */ zl = ziplistInsert(zl, vptr, value->ptr, sdslen(value->ptr)); } } if (!update) { /* Push new field/value pair onto the tail of the ziplist */ zl = ziplistPush(zl, field->ptr, sdslen(field->ptr), ZIPLIST_TAIL); zl = ziplistPush(zl, value->ptr, sdslen(value->ptr), ZIPLIST_TAIL); } o->ptr = zl; decrRefCount(field); decrRefCount(value); /* Check if the ziplist needs to be converted to a hash table */ if (hashTypeLength(o) > server.hash_max_ziplist_entries) hashTypeConvert(o, OBJ_ENCODING_HT); } else if (o->encoding == OBJ_ENCODING_HT) { if (dictReplace(o->ptr, field, value)) { /* Insert */ incrRefCount(field); } else { /* Update */ update = 1; } incrRefCount(value); } else { serverPanic("Unknown hash encoding"); } return update; } /* Delete an element from a hash. * Return 1 on deleted and 0 on not found. */ int hashTypeDelete(robj *o, robj *field) { int deleted = 0; if (o->encoding == OBJ_ENCODING_ZIPLIST) { unsigned char *zl, *fptr; field = getDecodedObject(field); zl = o->ptr; fptr = ziplistIndex(zl, ZIPLIST_HEAD); if (fptr != NULL) { fptr = ziplistFind(fptr, field->ptr, sdslen(field->ptr), 1); if (fptr != NULL) { zl = ziplistDelete(zl,&fptr); zl = ziplistDelete(zl,&fptr); o->ptr = zl; deleted = 1; } } decrRefCount(field); } else if (o->encoding == OBJ_ENCODING_HT) { if (dictDelete((dict*)o->ptr, field) == C_OK) { deleted = 1; /* Always check if the dictionary needs a resize after a delete. */ if (htNeedsResize(o->ptr)) dictResize(o->ptr); } } else { serverPanic("Unknown hash encoding"); } return deleted; } /* Return the number of elements in a hash. */ unsigned long hashTypeLength(robj *o) { unsigned long length = ULONG_MAX; if (o->encoding == OBJ_ENCODING_ZIPLIST) { length = ziplistLen(o->ptr) / 2; } else if (o->encoding == OBJ_ENCODING_HT) { length = dictSize((dict*)o->ptr); } else { serverPanic("Unknown hash encoding"); } return length; } hashTypeIterator *hashTypeInitIterator(robj *subject) { hashTypeIterator *hi = zmalloc(sizeof(hashTypeIterator)); hi->subject = subject; hi->encoding = subject->encoding; if (hi->encoding == OBJ_ENCODING_ZIPLIST) { hi->fptr = NULL; hi->vptr = NULL; } else if (hi->encoding == OBJ_ENCODING_HT) { hi->di = dictGetIterator(subject->ptr); } else { serverPanic("Unknown hash encoding"); } return hi; } void hashTypeReleaseIterator(hashTypeIterator *hi) { if (hi->encoding == OBJ_ENCODING_HT) { dictReleaseIterator(hi->di); } zfree(hi); } /* Move to the next entry in the hash. Return C_OK when the next entry * could be found and C_ERR when the iterator reaches the end. */ int hashTypeNext(hashTypeIterator *hi) { if (hi->encoding == OBJ_ENCODING_ZIPLIST) { unsigned char *zl; unsigned char *fptr, *vptr; zl = hi->subject->ptr; fptr = hi->fptr; vptr = hi->vptr; if (fptr == NULL) { /* Initialize cursor */ serverAssert(vptr == NULL); fptr = ziplistIndex(zl, 0); } else { /* Advance cursor */ serverAssert(vptr != NULL); fptr = ziplistNext(zl, vptr); } if (fptr == NULL) return C_ERR; /* Grab pointer to the value (fptr points to the field) */ vptr = ziplistNext(zl, fptr); serverAssert(vptr != NULL); /* fptr, vptr now point to the first or next pair */ hi->fptr = fptr; hi->vptr = vptr; } else if (hi->encoding == OBJ_ENCODING_HT) { if ((hi->de = dictNext(hi->di)) == NULL) return C_ERR; } else { serverPanic("Unknown hash encoding"); } return C_OK; } /* Get the field or value at iterator cursor, for an iterator on a hash value * encoded as a ziplist. Prototype is similar to `hashTypeGetFromZiplist`. */ void hashTypeCurrentFromZiplist(hashTypeIterator *hi, int what, unsigned char **vstr, unsigned int *vlen, long long *vll) { int ret; serverAssert(hi->encoding == OBJ_ENCODING_ZIPLIST); if (what & OBJ_HASH_KEY) { ret = ziplistGet(hi->fptr, vstr, vlen, vll); serverAssert(ret); } else { ret = ziplistGet(hi->vptr, vstr, vlen, vll); serverAssert(ret); } } /* Get the field or value at iterator cursor, for an iterator on a hash value * encoded as a ziplist. Prototype is similar to `hashTypeGetFromHashTable`. */ void hashTypeCurrentFromHashTable(hashTypeIterator *hi, int what, robj **dst) { serverAssert(hi->encoding == OBJ_ENCODING_HT); if (what & OBJ_HASH_KEY) { *dst = dictGetKey(hi->de); } else { *dst = dictGetVal(hi->de); } } /* A non copy-on-write friendly but higher level version of hashTypeCurrent*() * that returns an object with incremented refcount (or a new object). It is up * to the caller to decrRefCount() the object if no reference is retained. */ robj *hashTypeCurrentObject(hashTypeIterator *hi, int what) { robj *dst; if (hi->encoding == OBJ_ENCODING_ZIPLIST) { unsigned char *vstr = NULL; unsigned int vlen = UINT_MAX; long long vll = LLONG_MAX; hashTypeCurrentFromZiplist(hi, what, &vstr, &vlen, &vll); if (vstr) { dst = createStringObject((char*)vstr, vlen); } else { dst = createStringObjectFromLongLong(vll); } } else if (hi->encoding == OBJ_ENCODING_HT) { hashTypeCurrentFromHashTable(hi, what, &dst); incrRefCount(dst); } else { serverPanic("Unknown hash encoding"); } return dst; } robj *hashTypeLookupWriteOrCreate(client *c, robj *key) { robj *o = lookupKeyWrite(c->db,key); if (o == NULL) { o = createHashObject(); dbAdd(c->db,key,o); } else { if (o->type != OBJ_HASH) { addReply(c,shared.wrongtypeerr); return NULL; } } return o; } void hashTypeConvertZiplist(robj *o, int enc) { serverAssert(o->encoding == OBJ_ENCODING_ZIPLIST); if (enc == OBJ_ENCODING_ZIPLIST) { /* Nothing to do... */ } else if (enc == OBJ_ENCODING_HT) { hashTypeIterator *hi; dict *dict; int ret; hi = hashTypeInitIterator(o); dict = dictCreate(&hashDictType, NULL); while (hashTypeNext(hi) != C_ERR) { robj *field, *value; field = hashTypeCurrentObject(hi, OBJ_HASH_KEY); field = tryObjectEncoding(field); value = hashTypeCurrentObject(hi, OBJ_HASH_VALUE); value = tryObjectEncoding(value); ret = dictAdd(dict, field, value); if (ret != DICT_OK) { serverLogHexDump(LL_WARNING,"ziplist with dup elements dump", o->ptr,ziplistBlobLen(o->ptr)); serverAssert(ret == DICT_OK); } } hashTypeReleaseIterator(hi); zfree(o->ptr); o->encoding = OBJ_ENCODING_HT; o->ptr = dict; } else { serverPanic("Unknown hash encoding"); } } void hashTypeConvert(robj *o, int enc) { if (o->encoding == OBJ_ENCODING_ZIPLIST) { hashTypeConvertZiplist(o, enc); } else if (o->encoding == OBJ_ENCODING_HT) { serverPanic("Not implemented"); } else { serverPanic("Unknown hash encoding"); } } /*----------------------------------------------------------------------------- * Hash type commands *----------------------------------------------------------------------------*/ void hsetCommand(client *c) { int update; robj *o; if ((o = hashTypeLookupWriteOrCreate(c,c->argv[1])) == NULL) return; hashTypeTryConversion(o,c->argv,2,3); hashTypeTryObjectEncoding(o,&c->argv[2], &c->argv[3]); update = hashTypeSet(o,c->argv[2],c->argv[3]); addReply(c, update ? shared.czero : shared.cone); signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_HASH,"hset",c->argv[1],c->db->id); server.dirty++; } void hsetnxCommand(client *c) { robj *o; if ((o = hashTypeLookupWriteOrCreate(c,c->argv[1])) == NULL) return; hashTypeTryConversion(o,c->argv,2,3); if (hashTypeExists(o, c->argv[2])) { addReply(c, shared.czero); } else { hashTypeTryObjectEncoding(o,&c->argv[2], &c->argv[3]); hashTypeSet(o,c->argv[2],c->argv[3]); addReply(c, shared.cone); signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_HASH,"hset",c->argv[1],c->db->id); server.dirty++; } } void hmsetCommand(client *c) { int i; robj *o; if ((c->argc % 2) == 1) { addReplyError(c,"wrong number of arguments for HMSET"); return; } if ((o = hashTypeLookupWriteOrCreate(c,c->argv[1])) == NULL) return; hashTypeTryConversion(o,c->argv,2,c->argc-1); for (i = 2; i < c->argc; i += 2) { hashTypeTryObjectEncoding(o,&c->argv[i], &c->argv[i+1]); hashTypeSet(o,c->argv[i],c->argv[i+1]); } addReply(c, shared.ok); signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_HASH,"hset",c->argv[1],c->db->id); server.dirty++; } void hincrbyCommand(client *c) { long long value, incr, oldvalue; robj *o, *current, *new; if (getLongLongFromObjectOrReply(c,c->argv[3],&incr,NULL) != C_OK) return; if ((o = hashTypeLookupWriteOrCreate(c,c->argv[1])) == NULL) return; if ((current = hashTypeGetObject(o,c->argv[2])) != NULL) { if (getLongLongFromObjectOrReply(c,current,&value, "hash value is not an integer") != C_OK) { decrRefCount(current); return; } decrRefCount(current); } else { value = 0; } oldvalue = value; if ((incr < 0 && oldvalue < 0 && incr < (LLONG_MIN-oldvalue)) || (incr > 0 && oldvalue > 0 && incr > (LLONG_MAX-oldvalue))) { addReplyError(c,"increment or decrement would overflow"); return; } value += incr; new = createStringObjectFromLongLong(value); hashTypeTryObjectEncoding(o,&c->argv[2],NULL); hashTypeSet(o,c->argv[2],new); decrRefCount(new); addReplyLongLong(c,value); signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_HASH,"hincrby",c->argv[1],c->db->id); server.dirty++; } void hincrbyfloatCommand(client *c) { double long value, incr; robj *o, *current, *new, *aux; if (getLongDoubleFromObjectOrReply(c,c->argv[3],&incr,NULL) != C_OK) return; if ((o = hashTypeLookupWriteOrCreate(c,c->argv[1])) == NULL) return; if ((current = hashTypeGetObject(o,c->argv[2])) != NULL) { if (getLongDoubleFromObjectOrReply(c,current,&value, "hash value is not a valid float") != C_OK) { decrRefCount(current); return; } decrRefCount(current); } else { value = 0; } value += incr; new = createStringObjectFromLongDouble(value,1); hashTypeTryObjectEncoding(o,&c->argv[2],NULL); hashTypeSet(o,c->argv[2],new); addReplyBulk(c,new); signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_HASH,"hincrbyfloat",c->argv[1],c->db->id); server.dirty++; /* Always replicate HINCRBYFLOAT as an HSET command with the final value * in order to make sure that differences in float pricision or formatting * will not create differences in replicas or after an AOF restart. */ aux = createStringObject("HSET",4); rewriteClientCommandArgument(c,0,aux); decrRefCount(aux); rewriteClientCommandArgument(c,3,new); decrRefCount(new); } static void addHashFieldToReply(client *c, robj *o, robj *field) { int ret; if (o == NULL) { addReply(c, shared.nullbulk); return; } if (o->encoding == OBJ_ENCODING_ZIPLIST) { unsigned char *vstr = NULL; unsigned int vlen = UINT_MAX; long long vll = LLONG_MAX; ret = hashTypeGetFromZiplist(o, field, &vstr, &vlen, &vll); if (ret < 0) { addReply(c, shared.nullbulk); } else { if (vstr) { addReplyBulkCBuffer(c, vstr, vlen); } else { addReplyBulkLongLong(c, vll); } } } else if (o->encoding == OBJ_ENCODING_HT) { robj *value; ret = hashTypeGetFromHashTable(o, field, &value); if (ret < 0) { addReply(c, shared.nullbulk); } else { addReplyBulk(c, value); } } else { serverPanic("Unknown hash encoding"); } } void hgetCommand(client *c) { robj *o; if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.nullbulk)) == NULL || checkType(c,o,OBJ_HASH)) return; addHashFieldToReply(c, o, c->argv[2]); } void hmgetCommand(client *c) { robj *o; int i; /* Don't abort when the key cannot be found. Non-existing keys are empty * hashes, where HMGET should respond with a series of null bulks. */ o = lookupKeyRead(c->db, c->argv[1]); if (o != NULL && o->type != OBJ_HASH) { addReply(c, shared.wrongtypeerr); return; } addReplyMultiBulkLen(c, c->argc-2); for (i = 2; i < c->argc; i++) { addHashFieldToReply(c, o, c->argv[i]); } } void hdelCommand(client *c) { robj *o; int j, deleted = 0, keyremoved = 0; if ((o = lookupKeyWriteOrReply(c,c->argv[1],shared.czero)) == NULL || checkType(c,o,OBJ_HASH)) return; for (j = 2; j < c->argc; j++) { if (hashTypeDelete(o,c->argv[j])) { deleted++; if (hashTypeLength(o) == 0) { dbDelete(c->db,c->argv[1]); keyremoved = 1; break; } } } if (deleted) { signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_HASH,"hdel",c->argv[1],c->db->id); if (keyremoved) notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1], c->db->id); server.dirty += deleted; } addReplyLongLong(c,deleted); } void hlenCommand(client *c) { robj *o; if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL || checkType(c,o,OBJ_HASH)) return; addReplyLongLong(c,hashTypeLength(o)); } void hstrlenCommand(client *c) { robj *o; if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL || checkType(c,o,OBJ_HASH)) return; addReplyLongLong(c,hashTypeGetValueLength(o,c->argv[2])); } static void addHashIteratorCursorToReply(client *c, hashTypeIterator *hi, int what) { if (hi->encoding == OBJ_ENCODING_ZIPLIST) { unsigned char *vstr = NULL; unsigned int vlen = UINT_MAX; long long vll = LLONG_MAX; hashTypeCurrentFromZiplist(hi, what, &vstr, &vlen, &vll); if (vstr) { addReplyBulkCBuffer(c, vstr, vlen); } else { addReplyBulkLongLong(c, vll); } } else if (hi->encoding == OBJ_ENCODING_HT) { robj *value; hashTypeCurrentFromHashTable(hi, what, &value); addReplyBulk(c, value); } else { serverPanic("Unknown hash encoding"); } } void genericHgetallCommand(client *c, int flags) { robj *o; hashTypeIterator *hi; int multiplier = 0; int length, count = 0; if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.emptymultibulk)) == NULL || checkType(c,o,OBJ_HASH)) return; if (flags & OBJ_HASH_KEY) multiplier++; if (flags & OBJ_HASH_VALUE) multiplier++; length = hashTypeLength(o) * multiplier; addReplyMultiBulkLen(c, length); hi = hashTypeInitIterator(o); while (hashTypeNext(hi) != C_ERR) { if (flags & OBJ_HASH_KEY) { addHashIteratorCursorToReply(c, hi, OBJ_HASH_KEY); count++; } if (flags & OBJ_HASH_VALUE) { addHashIteratorCursorToReply(c, hi, OBJ_HASH_VALUE); count++; } } hashTypeReleaseIterator(hi); serverAssert(count == length); } void hkeysCommand(client *c) { genericHgetallCommand(c,OBJ_HASH_KEY); } void hvalsCommand(client *c) { genericHgetallCommand(c,OBJ_HASH_VALUE); } void hgetallCommand(client *c) { genericHgetallCommand(c,OBJ_HASH_KEY|OBJ_HASH_VALUE); } void hexistsCommand(client *c) { robj *o; if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL || checkType(c,o,OBJ_HASH)) return; addReply(c, hashTypeExists(o,c->argv[2]) ? shared.cone : shared.czero); } void hscanCommand(client *c) { robj *o; unsigned long cursor; if (parseScanCursorOrReply(c,c->argv[2],&cursor) == C_ERR) return; if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.emptyscan)) == NULL || checkType(c,o,OBJ_HASH)) return; scanGenericCommand(c,o,cursor); }
24,913
30.026152
85
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/sparkline.c
/* sparkline.c -- ASCII Sparklines * This code is modified from http://github.com/antirez/aspark and adapted * in order to return SDS strings instead of outputting directly to * the terminal. * * --------------------------------------------------------------------------- * * Copyright(C) 2011-2014 Salvatore Sanfilippo <antirez@gmail.com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include <math.h> /* This is the charset used to display the graphs, but multiple rows are used * to increase the resolution. */ static char charset[] = "_-`"; static char charset_fill[] = "_o#"; static int charset_len = sizeof(charset)-1; static int label_margin_top = 1; /* ---------------------------------------------------------------------------- * Sequences are arrays of samples we use to represent data to turn * into sparklines. This is the API in order to generate a sparkline: * * struct sequence *seq = createSparklineSequence(); * sparklineSequenceAddSample(seq, 10, NULL); * sparklineSequenceAddSample(seq, 20, NULL); * sparklineSequenceAddSample(seq, 30, "last sample label"); * sds output = sparklineRender(sdsempty(), seq, 80, 4, SPARKLINE_FILL); * freeSparklineSequence(seq); * ------------------------------------------------------------------------- */ /* Create a new sequence. */ struct sequence *createSparklineSequence(void) { struct sequence *seq = zmalloc(sizeof(*seq)); seq->length = 0; seq->samples = NULL; return seq; } /* Add a new sample into a sequence. */ void sparklineSequenceAddSample(struct sequence *seq, double value, char *label) { label = (label == NULL || label[0] == '\0') ? NULL : zstrdup(label); if (seq->length == 0) { seq->min = seq->max = value; } else { if (value < seq->min) seq->min = value; else if (value > seq->max) seq->max = value; } seq->samples = zrealloc(seq->samples,sizeof(struct sample)*(seq->length+1)); seq->samples[seq->length].value = value; seq->samples[seq->length].label = label; seq->length++; if (label) seq->labels++; } /* Free a sequence. */ void freeSparklineSequence(struct sequence *seq) { int j; for (j = 0; j < seq->length; j++) zfree(seq->samples[j].label); zfree(seq->samples); zfree(seq); } /* ---------------------------------------------------------------------------- * ASCII rendering of sequence * ------------------------------------------------------------------------- */ /* Render part of a sequence, so that render_sequence() call call this function * with differnent parts in order to create the full output without overflowing * the current terminal columns. */ sds sparklineRenderRange(sds output, struct sequence *seq, int rows, int offset, int len, int flags) { int j; double relmax = seq->max - seq->min; int steps = charset_len*rows; int row = 0; char *chars = zmalloc(len); int loop = 1; int opt_fill = flags & SPARKLINE_FILL; int opt_log = flags & SPARKLINE_LOG_SCALE; if (opt_log) { relmax = log(relmax+1); } else if (relmax == 0) { relmax = 1; } while(loop) { loop = 0; memset(chars,' ',len); for (j = 0; j < len; j++) { struct sample *s = &seq->samples[j+offset]; double relval = s->value - seq->min; int step; if (opt_log) relval = log(relval+1); step = (int) (relval*steps)/relmax; if (step < 0) step = 0; if (step >= steps) step = steps-1; if (row < rows) { /* Print the character needed to create the sparkline */ int charidx = step-((rows-row-1)*charset_len); loop = 1; if (charidx >= 0 && charidx < charset_len) { chars[j] = opt_fill ? charset_fill[charidx] : charset[charidx]; } else if(opt_fill && charidx >= charset_len) { chars[j] = '|'; } } else { /* Labels spacing */ if (seq->labels && row-rows < label_margin_top) { loop = 1; break; } /* Print the label if needed. */ if (s->label) { int label_len = strlen(s->label); int label_char = row - rows - label_margin_top; if (label_len > label_char) { loop = 1; chars[j] = s->label[label_char]; } } } } if (loop) { row++; output = sdscatlen(output,chars,len); output = sdscatlen(output,"\n",1); } } zfree(chars); return output; } /* Turn a sequence into its ASCII representation */ sds sparklineRender(sds output, struct sequence *seq, int columns, int rows, int flags) { int j; for (j = 0; j < seq->length; j += columns) { int sublen = (seq->length-j) < columns ? (seq->length-j) : columns; if (j != 0) output = sdscatlen(output,"\n",1); output = sparklineRenderRange(output, seq, rows, j, sublen, flags); } return output; }
6,588
36.225989
102
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/ae_kqueue.c
/* Kqueue(2)-based ae.c module * * Copyright (C) 2009 Harish Mallipeddi - harish.mallipeddi@gmail.com * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <sys/types.h> #include <sys/event.h> #include <sys/time.h> typedef struct aeApiState { int kqfd; struct kevent *events; } aeApiState; static int aeApiCreate(aeEventLoop *eventLoop) { aeApiState *state = zmalloc(sizeof(aeApiState)); if (!state) return -1; state->events = zmalloc(sizeof(struct kevent)*eventLoop->setsize); if (!state->events) { zfree(state); return -1; } state->kqfd = kqueue(); if (state->kqfd == -1) { zfree(state->events); zfree(state); return -1; } eventLoop->apidata = state; return 0; } static int aeApiResize(aeEventLoop *eventLoop, int setsize) { aeApiState *state = eventLoop->apidata; state->events = zrealloc(state->events, sizeof(struct kevent)*setsize); return 0; } static void aeApiFree(aeEventLoop *eventLoop) { aeApiState *state = eventLoop->apidata; close(state->kqfd); zfree(state->events); zfree(state); } static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; struct kevent ke; if (mask & AE_READABLE) { EV_SET(&ke, fd, EVFILT_READ, EV_ADD, 0, 0, NULL); if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; } if (mask & AE_WRITABLE) { EV_SET(&ke, fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL); if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; } return 0; } static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; struct kevent ke; if (mask & AE_READABLE) { EV_SET(&ke, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); kevent(state->kqfd, &ke, 1, NULL, 0, NULL); } if (mask & AE_WRITABLE) { EV_SET(&ke, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); kevent(state->kqfd, &ke, 1, NULL, 0, NULL); } } static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { aeApiState *state = eventLoop->apidata; int retval, numevents = 0; if (tvp != NULL) { struct timespec timeout; timeout.tv_sec = tvp->tv_sec; timeout.tv_nsec = tvp->tv_usec * 1000; retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize, &timeout); } else { retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize, NULL); } if (retval > 0) { int j; numevents = retval; for(j = 0; j < numevents; j++) { int mask = 0; struct kevent *e = state->events+j; if (e->filter == EVFILT_READ) mask |= AE_READABLE; if (e->filter == EVFILT_WRITE) mask |= AE_WRITABLE; eventLoop->fired[j].fd = e->ident; eventLoop->fired[j].mask = mask; } } return numevents; } static char *aeApiName(void) { return "kqueue"; }
4,567
31.863309
80
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/rdb.h
/* * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __RDB_H #define __RDB_H #include <stdio.h> #include "rio.h" /* TBD: include only necessary headers. */ #include "server.h" /* The current RDB version. When the format changes in a way that is no longer * backward compatible this number gets incremented. */ #define RDB_VERSION 7 /* Defines related to the dump file format. To store 32 bits lengths for short * keys requires a lot of space, so we check the most significant 2 bits of * the first byte to interpreter the length: * * 00|000000 => if the two MSB are 00 the len is the 6 bits of this byte * 01|000000 00000000 => 01, the len is 14 byes, 6 bits + 8 bits of next byte * 10|000000 [32 bit integer] => if it's 01, a full 32 bit len will follow * 11|000000 this means: specially encoded object will follow. The six bits * number specify the kind of object that follows. * See the RDB_ENC_* defines. * * Lengths up to 63 are stored using a single byte, most DB keys, and may * values, will fit inside. */ #define RDB_6BITLEN 0 #define RDB_14BITLEN 1 #define RDB_32BITLEN 2 #define RDB_ENCVAL 3 #define RDB_LENERR UINT_MAX /* When a length of a string object stored on disk has the first two bits * set, the remaining two bits specify a special encoding for the object * accordingly to the following defines: */ #define RDB_ENC_INT8 0 /* 8 bit signed integer */ #define RDB_ENC_INT16 1 /* 16 bit signed integer */ #define RDB_ENC_INT32 2 /* 32 bit signed integer */ #define RDB_ENC_LZF 3 /* string compressed with FASTLZ */ /* Dup object types to RDB object types. Only reason is readability (are we * dealing with RDB types or with in-memory object types?). */ #define RDB_TYPE_STRING 0 #define RDB_TYPE_LIST 1 #define RDB_TYPE_SET 2 #define RDB_TYPE_ZSET 3 #define RDB_TYPE_HASH 4 /* NOTE: WHEN ADDING NEW RDB TYPE, UPDATE rdbIsObjectType() BELOW */ /* Object types for encoded objects. */ #define RDB_TYPE_HASH_ZIPMAP 9 #define RDB_TYPE_LIST_ZIPLIST 10 #define RDB_TYPE_SET_INTSET 11 #define RDB_TYPE_ZSET_ZIPLIST 12 #define RDB_TYPE_HASH_ZIPLIST 13 #define RDB_TYPE_LIST_QUICKLIST 14 /* NOTE: WHEN ADDING NEW RDB TYPE, UPDATE rdbIsObjectType() BELOW */ /* Test if a type is an object type. */ #define rdbIsObjectType(t) ((t >= 0 && t <= 4) || (t >= 9 && t <= 14)) /* Special RDB opcodes (saved/loaded with rdbSaveType/rdbLoadType). */ #define RDB_OPCODE_AUX 250 #define RDB_OPCODE_RESIZEDB 251 #define RDB_OPCODE_EXPIRETIME_MS 252 #define RDB_OPCODE_EXPIRETIME 253 #define RDB_OPCODE_SELECTDB 254 #define RDB_OPCODE_EOF 255 int rdbSaveType(rio *rdb, unsigned char type); int rdbLoadType(rio *rdb); int rdbSaveTime(rio *rdb, time_t t); time_t rdbLoadTime(rio *rdb); int rdbSaveLen(rio *rdb, uint32_t len); uint32_t rdbLoadLen(rio *rdb, int *isencoded); int rdbSaveObjectType(rio *rdb, robj *o); int rdbLoadObjectType(rio *rdb); int rdbLoad(char *filename); int rdbSaveBackground(char *filename); int rdbSaveToSlavesSockets(void); void rdbRemoveTempFile(pid_t childpid); int rdbSave(char *filename); ssize_t rdbSaveObject(rio *rdb, robj *o); size_t rdbSavedObjectLen(robj *o); robj *rdbLoadObject(int type, rio *rdb); void backgroundSaveDoneHandler(int exitcode, int bysignal); int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime, long long now); robj *rdbLoadStringObject(rio *rdb); #endif
5,003
40.7
93
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/latency.h
/* latency.h -- latency monitor API header file * See latency.c for more information. * * ---------------------------------------------------------------------------- * * Copyright (c) 2014, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __LATENCY_H #define __LATENCY_H #define LATENCY_TS_LEN 160 /* History length for every monitored event. */ /* Representation of a latency sample: the sampling time and the latency * observed in milliseconds. */ struct latencySample { int32_t time; /* We don't use time_t to force 4 bytes usage everywhere. */ uint32_t latency; /* Latency in milliseconds. */ }; /* The latency time series for a given event. */ struct latencyTimeSeries { int idx; /* Index of the next sample to store. */ uint32_t max; /* Max latency observed for this event. */ struct latencySample samples[LATENCY_TS_LEN]; /* Latest history. */ }; /* Latency statistics structure. */ struct latencyStats { uint32_t all_time_high; /* Absolute max observed since latest reset. */ uint32_t avg; /* Average of current samples. */ uint32_t min; /* Min of current samples. */ uint32_t max; /* Max of current samples. */ uint32_t mad; /* Mean absolute deviation. */ uint32_t samples; /* Number of non-zero samples. */ time_t period; /* Number of seconds since first event and now. */ }; void latencyMonitorInit(void); void latencyAddSample(char *event, mstime_t latency); int THPIsEnabled(void); /* Latency monitoring macros. */ /* Start monitoring an event. We just set the current time. */ #define latencyStartMonitor(var) if (server.latency_monitor_threshold) { \ var = mstime(); \ } else { \ var = 0; \ } /* End monitoring an event, compute the difference with the current time * to check the amount of time elapsed. */ #define latencyEndMonitor(var) if (server.latency_monitor_threshold) { \ var = mstime() - var; \ } /* Add the sample only if the elapsed time is >= to the configured threshold. */ #define latencyAddSampleIfNeeded(event,var) \ if (server.latency_monitor_threshold && \ (var) >= server.latency_monitor_threshold) \ latencyAddSample((event),(var)); /* Remove time from a nested event. */ #define latencyRemoveNestedEvent(event_var,nested_var) \ event_var += nested_var; #endif /* __LATENCY_H */
3,914
40.648936
80
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/slowlog.c
/* Slowlog implements a system that is able to remember the latest N * queries that took more than M microseconds to execute. * * The execution time to reach to be logged in the slow log is set * using the 'slowlog-log-slower-than' config directive, that is also * readable and writable using the CONFIG SET/GET command. * * The slow queries log is actually not "logged" in the Redis log file * but is accessible thanks to the SLOWLOG command. * * ---------------------------------------------------------------------------- * * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include "slowlog.h" /* Create a new slowlog entry. * Incrementing the ref count of all the objects retained is up to * this function. */ slowlogEntry *slowlogCreateEntry(robj **argv, int argc, long long duration) { slowlogEntry *se = zmalloc(sizeof(*se)); int j, slargc = argc; if (slargc > SLOWLOG_ENTRY_MAX_ARGC) slargc = SLOWLOG_ENTRY_MAX_ARGC; se->argc = slargc; se->argv = zmalloc(sizeof(robj*)*slargc); for (j = 0; j < slargc; j++) { /* Logging too many arguments is a useless memory waste, so we stop * at SLOWLOG_ENTRY_MAX_ARGC, but use the last argument to specify * how many remaining arguments there were in the original command. */ if (slargc != argc && j == slargc-1) { se->argv[j] = createObject(OBJ_STRING, sdscatprintf(sdsempty(),"... (%d more arguments)", argc-slargc+1)); } else { /* Trim too long strings as well... */ if (argv[j]->type == OBJ_STRING && sdsEncodedObject(argv[j]) && sdslen(argv[j]->ptr) > SLOWLOG_ENTRY_MAX_STRING) { sds s = sdsnewlen(argv[j]->ptr, SLOWLOG_ENTRY_MAX_STRING); s = sdscatprintf(s,"... (%lu more bytes)", (unsigned long) sdslen(argv[j]->ptr) - SLOWLOG_ENTRY_MAX_STRING); se->argv[j] = createObject(OBJ_STRING,s); } else { se->argv[j] = argv[j]; incrRefCount(argv[j]); } } } se->time = time(NULL); se->duration = duration; se->id = server.slowlog_entry_id++; return se; } /* Free a slow log entry. The argument is void so that the prototype of this * function matches the one of the 'free' method of adlist.c. * * This function will take care to release all the retained object. */ void slowlogFreeEntry(void *septr) { slowlogEntry *se = septr; int j; for (j = 0; j < se->argc; j++) decrRefCount(se->argv[j]); zfree(se->argv); zfree(se); } /* Initialize the slow log. This function should be called a single time * at server startup. */ void slowlogInit(void) { server.slowlog = listCreate(); server.slowlog_entry_id = 0; listSetFreeMethod(server.slowlog,slowlogFreeEntry); } /* Push a new entry into the slow log. * This function will make sure to trim the slow log accordingly to the * configured max length. */ void slowlogPushEntryIfNeeded(robj **argv, int argc, long long duration) { if (server.slowlog_log_slower_than < 0) return; /* Slowlog disabled */ if (duration >= server.slowlog_log_slower_than) listAddNodeHead(server.slowlog,slowlogCreateEntry(argv,argc,duration)); /* Remove old entries if needed. */ while (listLength(server.slowlog) > server.slowlog_max_len) listDelNode(server.slowlog,listLast(server.slowlog)); } /* Remove all the entries from the current slow log. */ void slowlogReset(void) { while (listLength(server.slowlog) > 0) listDelNode(server.slowlog,listLast(server.slowlog)); } /* The SLOWLOG command. Implements all the subcommands needed to handle the * Redis slow log. */ void slowlogCommand(client *c) { if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"reset")) { slowlogReset(); addReply(c,shared.ok); } else if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"len")) { addReplyLongLong(c,listLength(server.slowlog)); } else if ((c->argc == 2 || c->argc == 3) && !strcasecmp(c->argv[1]->ptr,"get")) { long count = 10, sent = 0; listIter li; void *totentries; listNode *ln; slowlogEntry *se; if (c->argc == 3 && getLongFromObjectOrReply(c,c->argv[2],&count,NULL) != C_OK) return; listRewind(server.slowlog,&li); totentries = addDeferredMultiBulkLength(c); while(count-- && (ln = listNext(&li))) { int j; se = ln->value; addReplyMultiBulkLen(c,4); addReplyLongLong(c,se->id); addReplyLongLong(c,se->time); addReplyLongLong(c,se->duration); addReplyMultiBulkLen(c,se->argc); for (j = 0; j < se->argc; j++) addReplyBulk(c,se->argv[j]); sent++; } setDeferredMultiBulkLength(c,totentries,sent); } else { addReplyError(c, "Unknown SLOWLOG subcommand or wrong # of args. Try GET, RESET, LEN."); } }
6,716
38.511765
83
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/lzf.h
/* * Copyright (c) 2000-2008 Marc Alexander Lehmann <schmorp@schmorp.de> * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef LZF_H #define LZF_H /*********************************************************************** ** ** lzf -- an extremely fast/free compression/decompression-method ** http://liblzf.plan9.de/ ** ** This algorithm is believed to be patent-free. ** ***********************************************************************/ #define LZF_VERSION 0x0105 /* 1.5, API version */ /* * Compress in_len bytes stored at the memory block starting at * in_data and write the result to out_data, up to a maximum length * of out_len bytes. * * If the output buffer is not large enough or any error occurs return 0, * otherwise return the number of bytes used, which might be considerably * more than in_len (but less than 104% of the original size), so it * makes sense to always use out_len == in_len - 1), to ensure _some_ * compression, and store the data uncompressed otherwise (with a flag, of * course. * * lzf_compress might use different algorithms on different systems and * even different runs, thus might result in different compressed strings * depending on the phase of the moon or similar factors. However, all * these strings are architecture-independent and will result in the * original data when decompressed using lzf_decompress. * * The buffers must not be overlapping. * * If the option LZF_STATE_ARG is enabled, an extra argument must be * supplied which is not reflected in this header file. Refer to lzfP.h * and lzf_c.c. * */ unsigned int lzf_compress (const void *const in_data, unsigned int in_len, void *out_data, unsigned int out_len); /* * Decompress data compressed with some version of the lzf_compress * function and stored at location in_data and length in_len. The result * will be stored at out_data up to a maximum of out_len characters. * * If the output buffer is not large enough to hold the decompressed * data, a 0 is returned and errno is set to E2BIG. Otherwise the number * of decompressed bytes (i.e. the original length of the data) is * returned. * * If an error in the compressed data is detected, a zero is returned and * errno is set to EINVAL. * * This function is very fast, about as fast as a copying loop. */ unsigned int lzf_decompress (const void *const in_data, unsigned int in_len, void *out_data, unsigned int out_len); #endif
4,407
42.643564
79
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/rand.c
/* Pseudo random number generation functions derived from the drand48() * function obtained from pysam source code. * * This functions are used in order to replace the default math.random() * Lua implementation with something having exactly the same behavior * across different systems (by default Lua uses libc's rand() that is not * required to implement a specific PRNG generating the same sequence * in different systems if seeded with the same integer). * * The original code appears to be under the public domain. * I modified it removing the non needed functions and all the * 1960-style C coding stuff... * * ---------------------------------------------------------------------------- * * Copyright (c) 2010-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdint.h> #define N 16 #define MASK ((1 << (N - 1)) + (1 << (N - 1)) - 1) #define LOW(x) ((unsigned)(x) & MASK) #define HIGH(x) LOW((x) >> N) #define MUL(x, y, z) { int32_t l = (long)(x) * (long)(y); \ (z)[0] = LOW(l); (z)[1] = HIGH(l); } #define CARRY(x, y) ((int32_t)(x) + (long)(y) > MASK) #define ADDEQU(x, y, z) (z = CARRY(x, (y)), x = LOW(x + (y))) #define X0 0x330E #define X1 0xABCD #define X2 0x1234 #define A0 0xE66D #define A1 0xDEEC #define A2 0x5 #define C 0xB #define SET3(x, x0, x1, x2) ((x)[0] = (x0), (x)[1] = (x1), (x)[2] = (x2)) #define SETLOW(x, y, n) SET3(x, LOW((y)[n]), LOW((y)[(n)+1]), LOW((y)[(n)+2])) #define SEED(x0, x1, x2) (SET3(x, x0, x1, x2), SET3(a, A0, A1, A2), c = C) #define REST(v) for (i = 0; i < 3; i++) { xsubi[i] = x[i]; x[i] = temp[i]; } \ return (v); #define HI_BIT (1L << (2 * N - 1)) static uint32_t x[3] = { X0, X1, X2 }, a[3] = { A0, A1, A2 }, c = C; static void next(void); int32_t redisLrand48() { next(); return (((int32_t)x[2] << (N - 1)) + (x[1] >> 1)); } void redisSrand48(int32_t seedval) { SEED(X0, LOW(seedval), HIGH(seedval)); } static void next(void) { uint32_t p[2], q[2], r[2], carry0, carry1; MUL(a[0], x[0], p); ADDEQU(p[0], c, carry0); ADDEQU(p[1], carry0, carry1); MUL(a[0], x[1], q); ADDEQU(p[1], q[0], carry0); MUL(a[1], x[0], r); x[2] = LOW(carry0 + carry1 + CARRY(p[1], r[0]) + q[1] + r[1] + a[0] * x[2] + a[1] * x[1] + a[2] * x[0]); x[1] = LOW(p[1] + r[0]); x[0] = LOW(p[0]); }
3,848
39.946809
79
c
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/rand.h
/* * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef REDIS_RANDOM_H #define REDIS_RANDOM_H int32_t redisLrand48(); void redisSrand48(int32_t seedval); #define REDIS_LRAND48_MAX INT32_MAX #endif
1,763
44.230769
78
h
null
NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/src/pqsort.c
/* The following is the NetBSD libc qsort implementation modified in order to * support partial sorting of ranges for Redis. * * Copyright(C) 2009-2012 Salvatore Sanfilippo. All rights reserved. * * The original copyright notice follows. */ /* $NetBSD: qsort.c,v 1.19 2009/01/30 23:38:44 lukem Exp $ */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <sys/types.h> #include <errno.h> #include <stdlib.h> static inline char *med3 (char *, char *, char *, int (*)(const void *, const void *)); static inline void swapfunc (char *, char *, size_t, int); #define min(a, b) (a) < (b) ? a : b /* * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function". */ #define swapcode(TYPE, parmi, parmj, n) { \ size_t i = (n) / sizeof (TYPE); \ TYPE *pi = (TYPE *)(void *)(parmi); \ TYPE *pj = (TYPE *)(void *)(parmj); \ do { \ TYPE t = *pi; \ *pi++ = *pj; \ *pj++ = t; \ } while (--i > 0); \ } #define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \ es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1; static inline void swapfunc(char *a, char *b, size_t n, int swaptype) { if (swaptype <= 1) swapcode(long, a, b, n) else swapcode(char, a, b, n) } #define swap(a, b) \ if (swaptype == 0) { \ long t = *(long *)(void *)(a); \ *(long *)(void *)(a) = *(long *)(void *)(b); \ *(long *)(void *)(b) = t; \ } else \ swapfunc(a, b, es, swaptype) #define vecswap(a, b, n) if ((n) > 0) swapfunc((a), (b), (size_t)(n), swaptype) static inline char * med3(char *a, char *b, char *c, int (*cmp) (const void *, const void *)) { return cmp(a, b) < 0 ? (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a )) :(cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c )); } static void _pqsort(void *a, size_t n, size_t es, int (*cmp) (const void *, const void *), void *lrange, void *rrange) { char *pa, *pb, *pc, *pd, *pl, *pm, *pn; size_t d, r; int swaptype, cmp_result; loop: SWAPINIT(a, es); if (n < 7) { for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es) for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; pl -= es) swap(pl, pl - es); return; } pm = (char *) a + (n / 2) * es; if (n > 7) { pl = (char *) a; pn = (char *) a + (n - 1) * es; if (n > 40) { d = (n / 8) * es; pl = med3(pl, pl + d, pl + 2 * d, cmp); pm = med3(pm - d, pm, pm + d, cmp); pn = med3(pn - 2 * d, pn - d, pn, cmp); } pm = med3(pl, pm, pn, cmp); } swap(a, pm); pa = pb = (char *) a + es; pc = pd = (char *) a + (n - 1) * es; for (;;) { while (pb <= pc && (cmp_result = cmp(pb, a)) <= 0) { if (cmp_result == 0) { swap(pa, pb); pa += es; } pb += es; } while (pb <= pc && (cmp_result = cmp(pc, a)) >= 0) { if (cmp_result == 0) { swap(pc, pd); pd -= es; } pc -= es; } if (pb > pc) break; swap(pb, pc); pb += es; pc -= es; } pn = (char *) a + n * es; r = min(pa - (char *) a, pb - pa); vecswap(a, pb - r, r); r = min((size_t)(pd - pc), pn - pd - es); vecswap(pb, pn - r, r); if ((r = pb - pa) > es) { void *_l = a, *_r = ((unsigned char*)a)+r-1; if (!((lrange < _l && rrange < _l) || (lrange > _r && rrange > _r))) _pqsort(a, r / es, es, cmp, lrange, rrange); } if ((r = pd - pc) > es) { void *_l, *_r; /* Iterate rather than recurse to save stack space */ a = pn - r; n = r / es; _l = a; _r = ((unsigned char*)a)+r-1; if (!((lrange < _l && rrange < _l) || (lrange > _r && rrange > _r))) goto loop; } /* qsort(pn - r, r / es, es, cmp);*/ } void pqsort(void *a, size_t n, size_t es, int (*cmp) (const void *, const void *), size_t lrange, size_t rrange) { _pqsort(a,n,es,cmp,((unsigned char*)a)+(lrange*es), ((unsigned char*)a)+((rrange+1)*es)-1); }
5,528
28.725806
79
c