repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/config.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * config.h -- internal definitions for pmem2_config */ #ifndef PMEM2_CONFIG_H #define PMEM2_CONFIG_H #include "libpmem2.h" #define PMEM2_GRANULARITY_INVALID ((enum pmem2_granularity) (-1)) #define PMEM2_ADDRESS_ANY 0 /* default value of the address request type */ struct pmem2_config { /* offset from the beginning of the file */ size_t offset; size_t length; /* length of the mapping */ /* persistence granularity requested by user */ void *addr; /* address of the mapping */ int addr_request; /* address request type */ enum pmem2_granularity requested_max_granularity; enum pmem2_sharing_type sharing; /* the way the file will be mapped */ unsigned protection_flag; }; void pmem2_config_init(struct pmem2_config *cfg); int pmem2_config_validate_length(const struct pmem2_config *cfg, size_t file_len, size_t alignment); int pmem2_config_validate_addr_alignment(const struct pmem2_config *cfg, const struct pmem2_source *src); #endif /* PMEM2_CONFIG_H */
1,070
28.75
75
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * map.h -- internal definitions for libpmem2 */ #ifndef PMEM2_MAP_H #define PMEM2_MAP_H #include <stddef.h> #include <stdbool.h> #include "libpmem2.h" #include "os.h" #include "source.h" #ifdef _WIN32 #include <windows.h> #endif #ifdef __cplusplus extern "C" { #endif typedef int (*pmem2_deep_flush_fn)(struct pmem2_map *map, void *ptr, size_t size); struct pmem2_map { void *addr; /* base address */ size_t reserved_length; /* length of the mapping reservation */ size_t content_length; /* length of the mapped content */ /* effective persistence granularity */ enum pmem2_granularity effective_granularity; pmem2_persist_fn persist_fn; pmem2_flush_fn flush_fn; pmem2_drain_fn drain_fn; pmem2_deep_flush_fn deep_flush_fn; pmem2_memmove_fn memmove_fn; pmem2_memcpy_fn memcpy_fn; pmem2_memset_fn memset_fn; struct pmem2_source source; }; enum pmem2_granularity get_min_granularity(bool eADR, bool is_pmem, enum pmem2_sharing_type sharing); struct pmem2_map *pmem2_map_find(const void *addr, size_t len); int pmem2_register_mapping(struct pmem2_map *map); int pmem2_unregister_mapping(struct pmem2_map *map); void pmem2_map_init(void); void pmem2_map_fini(void); int pmem2_validate_offset(const struct pmem2_config *cfg, size_t *offset, size_t alignment); #ifdef __cplusplus } #endif #endif /* map.h */
1,426
22.016129
67
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/deep_flush.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * deep_flush.h -- functions for deep flush functionality */ #ifndef PMEM2_DEEP_FLUSH_H #define PMEM2_DEEP_FLUSH_H 1 #include "map.h" #ifdef __cplusplus extern "C" { #endif int pmem2_deep_flush_write(unsigned region_id); int pmem2_deep_flush_dax(struct pmem2_map *map, void *ptr, size_t size); int pmem2_deep_flush_page(struct pmem2_map *map, void *ptr, size_t size); int pmem2_deep_flush_cache(struct pmem2_map *map, void *ptr, size_t size); int pmem2_deep_flush_byte(struct pmem2_map *map, void *ptr, size_t size); #ifdef __cplusplus } #endif #endif
644
22.035714
74
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/persist.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * persist.c -- pmem2_get_[persist|flush|drain]_fn */ #include <errno.h> #include <stdlib.h> #include "libpmem2.h" #include "map.h" #include "out.h" #include "os.h" #include "persist.h" #include "deep_flush.h" #include "pmem2_arch.h" #include "pmem2_utils.h" #include "valgrind_internal.h" static struct pmem2_arch_info Info; /* * memmove_nodrain_libc -- (internal) memmove to pmem using libc */ static void * memmove_nodrain_libc(void *pmemdest, const void *src, size_t len, unsigned flags, flush_func flush) { #ifdef DEBUG if (flags & ~PMEM2_F_MEM_VALID_FLAGS) ERR("invalid flags 0x%x", flags); #endif LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len, flags); memmove(pmemdest, src, len); if (!(flags & PMEM2_F_MEM_NOFLUSH)) flush(pmemdest, len); return pmemdest; } /* * memset_nodrain_libc -- (internal) memset to pmem using libc */ static void * memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags, flush_func flush) { #ifdef DEBUG if (flags & ~PMEM2_F_MEM_VALID_FLAGS) ERR("invalid flags 0x%x", flags); #endif LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len, flags); memset(pmemdest, c, len); if (!(flags & PMEM2_F_MEM_NOFLUSH)) flush(pmemdest, len); return pmemdest; } /* * pmem2_persist_init -- initialize persist module */ void pmem2_persist_init(void) { Info.memmove_nodrain = NULL; Info.memset_nodrain = NULL; Info.memmove_nodrain_eadr = NULL; Info.memset_nodrain_eadr = NULL; Info.flush = NULL; Info.fence = NULL; Info.flush_has_builtin_fence = 0; pmem2_arch_init(&Info); char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY"); long long no_generic = 0; if (ptr) no_generic = atoll(ptr); if (Info.memmove_nodrain == NULL) { if (no_generic) { Info.memmove_nodrain = memmove_nodrain_libc; Info.memmove_nodrain_eadr = memmove_nodrain_libc; LOG(3, "using libc memmove"); } else { Info.memmove_nodrain = memmove_nodrain_generic; Info.memmove_nodrain_eadr = memmove_nodrain_generic; LOG(3, "using generic memmove"); } } if (Info.memset_nodrain == NULL) { if (no_generic) { Info.memset_nodrain = memset_nodrain_libc; Info.memset_nodrain_eadr = memset_nodrain_libc; LOG(3, "using libc memset"); } else { Info.memset_nodrain = memset_nodrain_generic; Info.memset_nodrain_eadr = memset_nodrain_generic; LOG(3, "using generic memset"); } } } /* * pmem2_drain -- wait for any PM stores to drain from HW buffers */ static void pmem2_drain(void) { LOG(15, NULL); Info.fence(); } /* * pmem2_log_flush -- log the flush attempt for the given range */ static inline void pmem2_log_flush(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len); } /* * pmem2_flush_nop -- NOP version of the flush routine, used in cases where * memory behind the mapping is already in persistence domain */ static void pmem2_flush_nop(const void *addr, size_t len) { pmem2_log_flush(addr, len); /* nothing more to do, other than telling pmemcheck about it */ VALGRIND_DO_FLUSH(addr, len); } /* * pmem2_flush_cpu_cache -- flush processor cache for the given range */ static void pmem2_flush_cpu_cache(const void *addr, size_t len) { pmem2_log_flush(addr, len); Info.flush(addr, len); } /* * pmem2_persist_noflush -- make all changes to a range of pmem persistent */ static void pmem2_persist_noflush(const void *addr, size_t len) { pmem2_flush_nop(addr, len); pmem2_drain(); } /* * pmem2_persist_cpu_cache -- make all changes to a range of pmem persistent */ static void pmem2_persist_cpu_cache(const void *addr, size_t len) { pmem2_flush_cpu_cache(addr, len); pmem2_drain(); } /* * pmem2_flush_file_buffers -- flush CPU and OS caches for the given range */ static int pmem2_flush_file_buffers(const void *addr, size_t len, int autorestart) { int olderrno = errno; pmem2_log_flush(addr, len); /* * Flushing using OS-provided mechanisms requires that the address * be a multiple of the page size. * Align address down and change len so that [addr, addr + len) still * contains the initial range. */ /* round address down to page boundary */ uintptr_t new_addr = ALIGN_DOWN((uintptr_t)addr, Pagesize); /* increase len by the amount we gain when we round addr down */ len += (uintptr_t)addr - new_addr; addr = (const void *)new_addr; int ret = 0; /* * Find all the mappings overlapping with the [addr, addr + len) range * and flush them, one by one. */ do { struct pmem2_map *map = pmem2_map_find(addr, len); if (!map) break; size_t flush; size_t remaining = map->reserved_length; if (map->addr < addr) { /* * Addr is inside of the mapping, so we have to decrease * the remaining length by an offset from the start * of our mapping. */ remaining -= (uintptr_t)addr - (uintptr_t)map->addr; } else if (map->addr == addr) { /* perfect match, there's nothing to do in this case */ } else { /* * map->addr > addr, so we have to skip the hole * between addr and map->addr. */ len -= (uintptr_t)map->addr - (uintptr_t)addr; addr = map->addr; } if (len > remaining) flush = remaining; else flush = len; int ret1 = pmem2_flush_file_buffers_os(map, addr, flush, autorestart); if (ret1 != 0) ret = ret1; addr = ((const char *)addr) + flush; len -= flush; } while (len > 0); errno = olderrno; return ret; } /* * pmem2_persist_pages -- flush processor cache for the given range */ static void pmem2_persist_pages(const void *addr, size_t len) { /* * Restarting on EINTR in general is a bad idea, but we don't have * any way to communicate the failure outside. */ const int autorestart = 1; int ret = pmem2_flush_file_buffers(addr, len, autorestart); if (ret) { /* * 1) There's no way to propagate this error. Silently ignoring * it would lead to data corruption. * 2) non-pmem code path shouldn't be used in production. * * The only sane thing to do is to crash the application. Sorry. */ abort(); } } /* * pmem2_drain_nop -- variant of pmem2_drain for page granularity; * it is a NOP because the flush part has built-in drain */ static void pmem2_drain_nop(void) { LOG(15, NULL); } /* * pmem2_deep_flush_page -- do nothing - pmem2_persist_fn already did msync */ int pmem2_deep_flush_page(struct pmem2_map *map, void *ptr, size_t size) { LOG(3, "map %p ptr %p size %zu", map, ptr, size); return 0; } /* * pmem2_deep_flush_cache -- flush buffers for fsdax or write * to deep_flush for DevDax */ int pmem2_deep_flush_cache(struct pmem2_map *map, void *ptr, size_t size) { LOG(3, "map %p ptr %p size %zu", map, ptr, size); enum pmem2_file_type type = map->source.value.ftype; /* * XXX: this should be moved to pmem2_deep_flush_dax * while refactoring abstraction */ if (type == PMEM2_FTYPE_DEVDAX) pmem2_persist_cpu_cache(ptr, size); int ret = pmem2_deep_flush_dax(map, ptr, size); if (ret < 0) { LOG(1, "cannot perform deep flush cache for map %p", map); return ret; } return 0; } /* * pmem2_deep_flush_byte -- flush cpu cache and perform deep flush for dax */ int pmem2_deep_flush_byte(struct pmem2_map *map, void *ptr, size_t size) { LOG(3, "map %p ptr %p size %zu", map, ptr, size); if (map->source.type == PMEM2_SOURCE_ANON) { ERR("Anonymous source does not support deep flush"); return PMEM2_E_NOSUPP; } ASSERT(map->source.type == PMEM2_SOURCE_FD || map->source.type == PMEM2_SOURCE_HANDLE); enum pmem2_file_type type = map->source.value.ftype; /* * XXX: this should be moved to pmem2_deep_flush_dax * while refactoring abstraction */ if (type == PMEM2_FTYPE_DEVDAX) pmem2_persist_cpu_cache(ptr, size); int ret = pmem2_deep_flush_dax(map, ptr, size); if (ret < 0) { LOG(1, "cannot perform deep flush byte for map %p", map); return ret; } return 0; } /* * pmem2_set_flush_fns -- set function pointers related to flushing */ void pmem2_set_flush_fns(struct pmem2_map *map) { switch (map->effective_granularity) { case PMEM2_GRANULARITY_PAGE: map->persist_fn = pmem2_persist_pages; map->flush_fn = pmem2_persist_pages; map->drain_fn = pmem2_drain_nop; map->deep_flush_fn = pmem2_deep_flush_page; break; case PMEM2_GRANULARITY_CACHE_LINE: map->persist_fn = pmem2_persist_cpu_cache; map->flush_fn = pmem2_flush_cpu_cache; map->drain_fn = pmem2_drain; map->deep_flush_fn = pmem2_deep_flush_cache; break; case PMEM2_GRANULARITY_BYTE: map->persist_fn = pmem2_persist_noflush; map->flush_fn = pmem2_flush_nop; map->drain_fn = pmem2_drain; map->deep_flush_fn = pmem2_deep_flush_byte; break; default: abort(); } } /* * pmem2_get_persist_fn - return a pointer to a function responsible for * persisting data in range owned by pmem2_map */ pmem2_persist_fn pmem2_get_persist_fn(struct pmem2_map *map) { return map->persist_fn; } /* * pmem2_get_flush_fn - return a pointer to a function responsible for * flushing data in range owned by pmem2_map */ pmem2_flush_fn pmem2_get_flush_fn(struct pmem2_map *map) { return map->flush_fn; } /* * pmem2_get_drain_fn - return a pointer to a function responsible for * draining flushes in range owned by pmem2_map */ pmem2_drain_fn pmem2_get_drain_fn(struct pmem2_map *map) { return map->drain_fn; } /* * pmem2_memmove_nonpmem -- mem[move|cpy] followed by an msync */ static void * pmem2_memmove_nonpmem(void *pmemdest, const void *src, size_t len, unsigned flags) { #ifdef DEBUG if (flags & ~PMEM2_F_MEM_VALID_FLAGS) ERR("invalid flags 0x%x", flags); #endif PMEM2_API_START("pmem2_memmove"); Info.memmove_nodrain(pmemdest, src, len, flags & ~PMEM2_F_MEM_NODRAIN, Info.flush); pmem2_persist_pages(pmemdest, len); PMEM2_API_END("pmem2_memmove"); return pmemdest; } /* * pmem2_memset_nonpmem -- memset followed by an msync */ static void * pmem2_memset_nonpmem(void *pmemdest, int c, size_t len, unsigned flags) { #ifdef DEBUG if (flags & ~PMEM2_F_MEM_VALID_FLAGS) ERR("invalid flags 0x%x", flags); #endif PMEM2_API_START("pmem2_memset"); Info.memset_nodrain(pmemdest, c, len, flags & ~PMEM2_F_MEM_NODRAIN, Info.flush); pmem2_persist_pages(pmemdest, len); PMEM2_API_END("pmem2_memset"); return pmemdest; } /* * pmem2_memmove -- mem[move|cpy] to pmem */ static void * pmem2_memmove(void *pmemdest, const void *src, size_t len, unsigned flags) { #ifdef DEBUG if (flags & ~PMEM2_F_MEM_VALID_FLAGS) ERR("invalid flags 0x%x", flags); #endif PMEM2_API_START("pmem2_memmove"); Info.memmove_nodrain(pmemdest, src, len, flags, Info.flush); if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0) pmem2_drain(); PMEM2_API_END("pmem2_memmove"); return pmemdest; } /* * pmem2_memset -- memset to pmem */ static void * pmem2_memset(void *pmemdest, int c, size_t len, unsigned flags) { #ifdef DEBUG if (flags & ~PMEM2_F_MEM_VALID_FLAGS) ERR("invalid flags 0x%x", flags); #endif PMEM2_API_START("pmem2_memset"); Info.memset_nodrain(pmemdest, c, len, flags, Info.flush); if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0) pmem2_drain(); PMEM2_API_END("pmem2_memset"); return pmemdest; } /* * pmem2_memmove_eadr -- mem[move|cpy] to pmem, platform supports eADR */ static void * pmem2_memmove_eadr(void *pmemdest, const void *src, size_t len, unsigned flags) { #ifdef DEBUG if (flags & ~PMEM2_F_MEM_VALID_FLAGS) ERR("invalid flags 0x%x", flags); #endif PMEM2_API_START("pmem2_memmove"); Info.memmove_nodrain_eadr(pmemdest, src, len, flags, Info.flush); if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0) pmem2_drain(); PMEM2_API_END("pmem2_memmove"); return pmemdest; } /* * pmem2_memset_eadr -- memset to pmem, platform supports eADR */ static void * pmem2_memset_eadr(void *pmemdest, int c, size_t len, unsigned flags) { #ifdef DEBUG if (flags & ~PMEM2_F_MEM_VALID_FLAGS) ERR("invalid flags 0x%x", flags); #endif PMEM2_API_START("pmem2_memset"); Info.memset_nodrain_eadr(pmemdest, c, len, flags, Info.flush); if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0) pmem2_drain(); PMEM2_API_END("pmem2_memset"); return pmemdest; } /* * pmem2_set_mem_fns -- set function pointers related to mem[move|cpy|set] */ void pmem2_set_mem_fns(struct pmem2_map *map) { switch (map->effective_granularity) { case PMEM2_GRANULARITY_PAGE: map->memmove_fn = pmem2_memmove_nonpmem; map->memcpy_fn = pmem2_memmove_nonpmem; map->memset_fn = pmem2_memset_nonpmem; break; case PMEM2_GRANULARITY_CACHE_LINE: map->memmove_fn = pmem2_memmove; map->memcpy_fn = pmem2_memmove; map->memset_fn = pmem2_memset; break; case PMEM2_GRANULARITY_BYTE: map->memmove_fn = pmem2_memmove_eadr; map->memcpy_fn = pmem2_memmove_eadr; map->memset_fn = pmem2_memset_eadr; break; default: abort(); } } /* * pmem2_get_memmove_fn - return a pointer to a function */ pmem2_memmove_fn pmem2_get_memmove_fn(struct pmem2_map *map) { return map->memmove_fn; } /* * pmem2_get_memcpy_fn - return a pointer to a function */ pmem2_memcpy_fn pmem2_get_memcpy_fn(struct pmem2_map *map) { return map->memcpy_fn; } /* * pmem2_get_memset_fn - return a pointer to a function */ pmem2_memset_fn pmem2_get_memset_fn(struct pmem2_map *map) { return map->memset_fn; } #if VG_PMEMCHECK_ENABLED /* * pmem2_emit_log -- logs library and function names to pmemcheck store log */ void pmem2_emit_log(const char *func, int order) { util_emit_log("libpmem2", func, order); } #endif
13,665
21.58843
76
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/persist_posix.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * persist_posix.c -- POSIX-specific part of persist implementation */ #include <errno.h> #include <stdint.h> #include <sys/mman.h> #include "out.h" #include "persist.h" #include "pmem2_utils.h" #include "valgrind_internal.h" /* * pmem2_flush_file_buffers_os -- flush CPU and OS file caches for the given * range */ int pmem2_flush_file_buffers_os(struct pmem2_map *map, const void *addr, size_t len, int autorestart) { /* * msync accepts addresses aligned to the page boundary, so we may sync * more and part of it may have been marked as undefined/inaccessible. * Msyncing such memory is not a bug, so as a workaround temporarily * disable error reporting. */ VALGRIND_DO_DISABLE_ERROR_REPORTING; int ret; do { ret = msync((void *)addr, len, MS_SYNC); if (ret < 0) { ERR("!msync"); } else { /* full flush */ VALGRIND_DO_PERSIST((uintptr_t)addr, len); } } while (autorestart && ret < 0 && errno == EINTR); VALGRIND_DO_ENABLE_ERROR_REPORTING; if (ret) return PMEM2_E_ERRNO; return 0; }
1,126
21.098039
80
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/pmem2_utils_linux.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ #include <errno.h> #include <fcntl.h> #include <limits.h> #include <stdio.h> #include <stdlib.h> #include <sys/stat.h> #include <sys/sysmacros.h> #include "libpmem2.h" #include "out.h" #include "pmem2_utils.h" #include "region_namespace.h" #include "source.h" /* * pmem2_get_type_from_stat -- determine type of file based on output of stat * syscall */ int pmem2_get_type_from_stat(const os_stat_t *st, enum pmem2_file_type *type) { if (S_ISREG(st->st_mode)) { *type = PMEM2_FTYPE_REG; return 0; } if (S_ISDIR(st->st_mode)) { *type = PMEM2_FTYPE_DIR; return 0; } if (!S_ISCHR(st->st_mode)) { ERR("file type 0%o not supported", st->st_mode & S_IFMT); return PMEM2_E_INVALID_FILE_TYPE; } char spath[PATH_MAX]; int ret = util_snprintf(spath, PATH_MAX, "/sys/dev/char/%u:%u/subsystem", os_major(st->st_rdev), os_minor(st->st_rdev)); if (ret < 0) { /* impossible */ ERR("!snprintf"); ASSERTinfo(0, "snprintf failed"); return PMEM2_E_ERRNO; } LOG(4, "device subsystem path \"%s\"", spath); char npath[PATH_MAX]; char *rpath = realpath(spath, npath); if (rpath == NULL) { ERR("!realpath \"%s\"", spath); return PMEM2_E_ERRNO; } char *basename = strrchr(rpath, '/'); if (!basename || strcmp("dax", basename + 1) != 0) { LOG(3, "%s path does not match device dax prefix path", rpath); return PMEM2_E_INVALID_FILE_TYPE; } *type = PMEM2_FTYPE_DEVDAX; return 0; }
1,507
20.239437
77
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/source_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * source_windows.c -- windows specific pmem2_source implementation */ #include <Windows.h> #include "config.h" #include "libpmem2.h" #include "config.h" #include "out.h" #include "pmem2_utils.h" #include "source.h" #include "util.h" /* * pmem2_source_from_fd -- create a new data source instance */ int pmem2_source_from_fd(struct pmem2_source **src, int fd) { *src = NULL; if (fd < 0) return PMEM2_E_INVALID_FILE_HANDLE; HANDLE handle = (HANDLE)_get_osfhandle(fd); if (handle == INVALID_HANDLE_VALUE) { /* * _get_osfhandle aborts in an error case, so technically * this is dead code. But according to MSDN it is * setting an errno on failure, so we can return it in case of * "windows magic" happen and this function "accidentally" * will not abort. */ ERR("!_get_osfhandle"); if (errno == EBADF) return PMEM2_E_INVALID_FILE_HANDLE; return PMEM2_E_ERRNO; } return pmem2_source_from_handle(src, handle); } /* * pmem2_win_stat -- retrieve information about handle */ static int pmem2_win_stat(HANDLE handle, BY_HANDLE_FILE_INFORMATION *info) { if (!GetFileInformationByHandle(handle, info)) { ERR("!!GetFileInformationByHandle"); if (GetLastError() == ERROR_INVALID_HANDLE) return PMEM2_E_INVALID_FILE_HANDLE; else return pmem2_lasterror_to_err(); } if (info->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { ERR( "using directory doesn't make any sense in context of pmem2"); return PMEM2_E_INVALID_FILE_TYPE; } return 0; } /* * pmem2_source_from_fd -- create a new data source instance */ int pmem2_source_from_handle(struct pmem2_source **src, HANDLE handle) { *src = NULL; int ret; if (handle == INVALID_HANDLE_VALUE) return PMEM2_E_INVALID_FILE_HANDLE; BY_HANDLE_FILE_INFORMATION file_info; ret = pmem2_win_stat(handle, &file_info); if (ret) return ret; /* XXX: winapi doesn't provide option to get open flags from HANDLE */ struct pmem2_source *srcp = pmem2_malloc(sizeof(**src), &ret); if (ret) return ret; ASSERTne(srcp, NULL); srcp->type = PMEM2_SOURCE_HANDLE; srcp->value.handle = handle; *src = srcp; return 0; } /* * pmem2_source_size -- get a size of the file handle stored in the provided * source */ int pmem2_source_size(const struct pmem2_source *src, size_t *size) { LOG(3, "type %d", src->type); int ret; if (src->type == PMEM2_SOURCE_ANON) { *size = src->value.size; return 0; } ASSERTeq(src->type, PMEM2_SOURCE_HANDLE); BY_HANDLE_FILE_INFORMATION info; ret = pmem2_win_stat(src->value.handle, &info); if (ret) return ret; *size = ((size_t)info.nFileSizeHigh << 32) | info.nFileSizeLow; LOG(4, "file length %zu", *size); return 0; } /* * pmem2_source_alignment -- get alignment from the system info */ int pmem2_source_alignment(const struct pmem2_source *src, size_t *alignment) { LOG(3, "type %d", src->type); SYSTEM_INFO info; GetSystemInfo(&info); *alignment = (size_t)info.dwAllocationGranularity; if (!util_is_pow2(*alignment)) { ERR("alignment (%zu) has to be a power of two", *alignment); return PMEM2_E_INVALID_ALIGNMENT_VALUE; } LOG(4, "alignment %zu", *alignment); return 0; }
3,248
20.235294
76
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/pmem2_utils_none.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ #include <errno.h> #include "libpmem2.h" #include "out.h" #include "pmem2_utils.h" #include "source.h" /* * pmem2_device_dax_alignment -- checks the alignment of a given * dax device from given source */ int pmem2_device_dax_alignment(const struct pmem2_source *src, size_t *alignment) { ERR("Cannot read Device Dax alignment - ndctl is not available"); return PMEM2_E_NOSUPP; } /* * pmem2_device_dax_size -- checks the size of a given dax device from * given source */ int pmem2_device_dax_size(const struct pmem2_source *src, size_t *size) { ERR("Cannot read Device Dax size - ndctl is not available"); return PMEM2_E_NOSUPP; }
727
20.411765
77
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/auto_flush_linux.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * auto_flush_linux.c -- Linux auto flush detection */ #define _GNU_SOURCE #include <inttypes.h> #include <fcntl.h> #include <sys/stat.h> #include <string.h> #include <errno.h> #include "out.h" #include "os.h" #include "fs.h" #include "auto_flush.h" #define BUS_DEVICE_PATH "/sys/bus/nd/devices" #define PERSISTENCE_DOMAIN "persistence_domain" #define DOMAIN_VALUE_LEN 32 /* * check_cpu_cache -- (internal) check if file contains "cpu_cache" entry */ static int check_cpu_cache(const char *domain_path) { LOG(3, "domain_path: %s", domain_path); char domain_value[DOMAIN_VALUE_LEN]; int domain_fd; int cpu_cache = 0; if ((domain_fd = os_open(domain_path, O_RDONLY)) < 0) { LOG(1, "!open(\"%s\", O_RDONLY)", domain_path); goto end; } ssize_t len = read(domain_fd, domain_value, DOMAIN_VALUE_LEN); if (len < 0) { ERR("!read(%d, %p, %d)", domain_fd, domain_value, DOMAIN_VALUE_LEN); cpu_cache = -1; goto end; } else if (len == 0) { errno = EIO; ERR("read(%d, %p, %d) empty string", domain_fd, domain_value, DOMAIN_VALUE_LEN); cpu_cache = -1; goto end; } else if (domain_value[len - 1] != '\n') { ERR("!read(%d, %p, %d) invalid format", domain_fd, domain_value, DOMAIN_VALUE_LEN); cpu_cache = -1; goto end; } domain_value[len - 1] = '\0'; LOG(15, "detected persistent_domain: %s", domain_value); if (strcmp(domain_value, "cpu_cache") == 0) { LOG(15, "cpu_cache in persistent_domain: %s", domain_path); cpu_cache = 1; } else { LOG(15, "cpu_cache not in persistent_domain: %s", domain_path); cpu_cache = 0; } end: if (domain_fd >= 0) os_close(domain_fd); return cpu_cache; } /* * check_domain_in_region -- (internal) check if region * contains persistence_domain file */ static int check_domain_in_region(const char *region_path) { LOG(3, "region_path: %s", region_path); struct fs *reg = NULL; struct fs_entry *reg_entry; char domain_path[PATH_MAX]; int cpu_cache = 0; reg = fs_new(region_path); if (reg == NULL) { ERR("!fs_new: \"%s\"", region_path); cpu_cache = -1; goto end; } while ((reg_entry = fs_read(reg)) != NULL) { /* * persistence_domain has to be a file type entry * and it has to be first level child for region; * there is no need to run into deeper levels */ if (reg_entry->type != FS_ENTRY_FILE || strcmp(reg_entry->name, PERSISTENCE_DOMAIN) != 0 || reg_entry->level != 1) continue; int ret = util_snprintf(domain_path, PATH_MAX, "%s/"PERSISTENCE_DOMAIN, region_path); if (ret < 0) { ERR("!snprintf"); cpu_cache = -1; goto end; } cpu_cache = check_cpu_cache(domain_path); } end: if (reg) fs_delete(reg); return cpu_cache; } /* * pmem2_auto_flush -- check if platform supports auto flush for all regions * * Traverse "/sys/bus/nd/devices" path to find all the nvdimm regions, * then for each region checks if "persistence_domain" file exists and * contains "cpu_cache" string. * If for any region "persistence_domain" entry does not exists, or its * context is not as expected, assume eADR is not available on this platform. */ int pmem2_auto_flush(void) { LOG(15, NULL); char *device_path; int cpu_cache = 0; device_path = BUS_DEVICE_PATH; os_stat_t sdev; if (os_stat(device_path, &sdev) != 0 || S_ISDIR(sdev.st_mode) == 0) { LOG(3, "eADR not supported"); return cpu_cache; } struct fs *dev = fs_new(device_path); if (dev == NULL) { ERR("!fs_new: \"%s\"", device_path); return -1; } struct fs_entry *dev_entry; while ((dev_entry = fs_read(dev)) != NULL) { /* * Skip if not a symlink, because we expect that * region on sysfs path is a symlink. * Skip if depth is different than 1, because region * we are interested in should be the first level * child for device. */ if ((dev_entry->type != FS_ENTRY_SYMLINK) || !strstr(dev_entry->name, "region") || dev_entry->level != 1) continue; LOG(15, "Start traversing region: %s", dev_entry->path); cpu_cache = check_domain_in_region(dev_entry->path); if (cpu_cache != 1) goto end; } end: fs_delete(dev); return cpu_cache; }
4,214
21.783784
77
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/config.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * config.c -- pmem2_config implementation */ #include <unistd.h> #include "alloc.h" #include "config.h" #include "libpmem2.h" #include "out.h" #include "pmem2.h" #include "pmem2_utils.h" /* * pmem2_config_init -- initialize cfg structure. */ void pmem2_config_init(struct pmem2_config *cfg) { cfg->offset = 0; cfg->length = 0; cfg->addr = NULL; cfg->addr_request = PMEM2_ADDRESS_ANY; cfg->requested_max_granularity = PMEM2_GRANULARITY_INVALID; cfg->sharing = PMEM2_SHARED; cfg->protection_flag = PMEM2_PROT_READ | PMEM2_PROT_WRITE; } /* * pmem2_config_new -- allocates and initialize cfg structure. */ int pmem2_config_new(struct pmem2_config **cfg) { int ret; *cfg = pmem2_malloc(sizeof(**cfg), &ret); if (ret) return ret; ASSERTne(cfg, NULL); pmem2_config_init(*cfg); return 0; } /* * pmem2_config_delete -- deallocate cfg structure. */ int pmem2_config_delete(struct pmem2_config **cfg) { Free(*cfg); *cfg = NULL; return 0; } /* * pmem2_config_set_required_store_granularity -- set granularity * requested by user in the pmem2_config structure */ int pmem2_config_set_required_store_granularity(struct pmem2_config *cfg, enum pmem2_granularity g) { switch (g) { case PMEM2_GRANULARITY_BYTE: case PMEM2_GRANULARITY_CACHE_LINE: case PMEM2_GRANULARITY_PAGE: break; default: ERR("unknown granularity value %d", g); return PMEM2_E_GRANULARITY_NOT_SUPPORTED; } cfg->requested_max_granularity = g; return 0; } /* * pmem2_config_set_offset -- set offset in the pmem2_config structure */ int pmem2_config_set_offset(struct pmem2_config *cfg, size_t offset) { /* mmap func takes offset as a type of off_t */ if (offset > (size_t)INT64_MAX) { ERR("offset is greater than INT64_MAX"); return PMEM2_E_OFFSET_OUT_OF_RANGE; } cfg->offset = offset; return 0; } /* * pmem2_config_set_length -- set length in the pmem2_config structure */ int pmem2_config_set_length(struct pmem2_config *cfg, size_t length) { cfg->length = length; return 0; } /* * pmem2_config_validate_length -- validate that length in the pmem2_config * structure is consistent with the file length */ int pmem2_config_validate_length(const struct pmem2_config *cfg, size_t file_len, size_t alignment) { ASSERTne(alignment, 0); if (file_len == 0) { ERR("file length is equal 0"); return PMEM2_E_SOURCE_EMPTY; } if (cfg->length % alignment) { ERR("length is not a multiple of %lu", alignment); return PMEM2_E_LENGTH_UNALIGNED; } /* overflow check */ const size_t end = cfg->offset + cfg->length; if (end < cfg->offset) { ERR("overflow of offset and length"); return PMEM2_E_MAP_RANGE; } /* let's align the file size */ size_t aligned_file_len = file_len; if (file_len % alignment) aligned_file_len = ALIGN_UP(file_len, alignment); /* validate mapping fit into the file */ if (end > aligned_file_len) { ERR("mapping larger than file size"); return PMEM2_E_MAP_RANGE; } return 0; } /* * pmem2_config_set_sharing -- set the way pmem2_map will map the file */ int pmem2_config_set_sharing(struct pmem2_config *cfg, enum pmem2_sharing_type type) { switch (type) { case PMEM2_SHARED: case PMEM2_PRIVATE: cfg->sharing = type; break; default: ERR("unknown sharing value %d", type); return PMEM2_E_INVALID_SHARING_VALUE; } return 0; } /* * pmem2_config_validate_addr_alignment -- validate that addr in the * pmem2_config structure is a multiple of the alignment required for * specific cfg */ int pmem2_config_validate_addr_alignment(const struct pmem2_config *cfg, const struct pmem2_source *src) { /* cannot NULL % alignment, NULL is valid */ if (!cfg->addr) return 0; size_t alignment; int ret = pmem2_source_alignment(src, &alignment); if (ret) return ret; ASSERTne(alignment, 0); if ((size_t)cfg->addr % alignment) { ERR("address %p is not a multiple of %lu", cfg->addr, alignment); return PMEM2_E_ADDRESS_UNALIGNED; } return 0; } /* * pmem2_config_set_address -- set addr and addr_request in the config * struct */ int pmem2_config_set_address(struct pmem2_config *cfg, void *addr, enum pmem2_address_request_type request_type) { if (request_type != PMEM2_ADDRESS_FIXED_NOREPLACE) { ERR("invalid address request_type 0x%x", request_type); return PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE; } if (request_type == PMEM2_ADDRESS_FIXED_NOREPLACE && !addr) { ERR( "cannot use address request type PMEM2_ADDRESS_FIXED_NOREPLACE with addr being NULL"); return PMEM2_E_ADDRESS_NULL; } cfg->addr = addr; cfg->addr_request = (int)request_type; return 0; } /* * pmem2_config_set_vm_reservation -- set vm_reservation in the * pmem2_config structure */ int pmem2_config_set_vm_reservation(struct pmem2_config *cfg, struct pmem2_vm_reservation *rsv, size_t offset) { return PMEM2_E_NOSUPP; } /* * pmem2_config_clear_address -- reset addr and addr_request in the config * to the default values */ void pmem2_config_clear_address(struct pmem2_config *cfg) { cfg->addr = NULL; cfg->addr_request = PMEM2_ADDRESS_ANY; } /* * pmem2_config_set_protection -- set protection flags * in the config struct */ int pmem2_config_set_protection(struct pmem2_config *cfg, unsigned prot) { unsigned unknown_prot = prot & ~(PMEM2_PROT_READ | PMEM2_PROT_WRITE | PMEM2_PROT_EXEC | PMEM2_PROT_NONE); if (unknown_prot) { ERR("invalid flag %u", prot); return PMEM2_E_INVALID_PROT_FLAG; } cfg->protection_flag = prot; return 0; }
5,603
20.227273
89
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/ravl_interval.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ravl_interval.h -- internal definitions for ravl_interval */ #ifndef RAVL_INTERVAL_H #define RAVL_INTERVAL_H #include "libpmem2.h" #include "os_thread.h" #include "ravl.h" struct ravl_interval; struct ravl_interval_node; typedef size_t ravl_interval_min(void *addr); typedef size_t ravl_interval_max(void *addr); struct ravl_interval *ravl_interval_new(ravl_interval_min *min, ravl_interval_min *max); void ravl_interval_delete(struct ravl_interval *ri); int ravl_interval_insert(struct ravl_interval *ri, void *addr); int ravl_interval_remove(struct ravl_interval *ri, struct ravl_interval_node *rin); struct ravl_interval_node *ravl_interval_find_equal(struct ravl_interval *ri, void *addr); struct ravl_interval_node *ravl_interval_find(struct ravl_interval *ri, void *addr); void *ravl_interval_data(struct ravl_interval_node *rin); #endif
947
27.727273
77
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/memops_generic.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * memops_generic.c -- architecture-independent memmove & memset fallback * * This fallback is needed to fulfill guarantee that pmem_mem[cpy|set|move] * will use at least 8-byte stores (for 8-byte aligned buffers and sizes), * even when accelerated implementation is missing or disabled. * This guarantee is needed to maintain correctness eg in pmemobj. * Libc may do the same, but this behavior is not documented, so we can't rely * on that. */ #include <stddef.h> #include "out.h" #include "pmem2_arch.h" #include "util.h" /* * pmem2_flush_flags -- internal wrapper around pmem_flush */ static inline void pmem2_flush_flags(const void *addr, size_t len, unsigned flags, flush_func flush) { if (!(flags & PMEM2_F_MEM_NOFLUSH)) flush(addr, len); } /* * cpy128 -- (internal) copy 128 bytes from src to dst */ static force_inline void cpy128(uint64_t *dst, const uint64_t *src) { /* * We use atomics here just to be sure compiler will not split stores. * Order of stores doesn't matter. */ uint64_t tmp[16]; util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed); util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed); util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed); util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed); util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed); util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed); util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed); util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed); util_atomic_load_explicit64(&src[8], &tmp[8], memory_order_relaxed); util_atomic_load_explicit64(&src[9], &tmp[9], memory_order_relaxed); util_atomic_load_explicit64(&src[10], &tmp[10], memory_order_relaxed); util_atomic_load_explicit64(&src[11], &tmp[11], memory_order_relaxed); util_atomic_load_explicit64(&src[12], &tmp[12], memory_order_relaxed); util_atomic_load_explicit64(&src[13], &tmp[13], memory_order_relaxed); util_atomic_load_explicit64(&src[14], &tmp[14], memory_order_relaxed); util_atomic_load_explicit64(&src[15], &tmp[15], memory_order_relaxed); util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed); util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed); util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed); util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed); util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed); util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed); util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed); util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed); util_atomic_store_explicit64(&dst[8], tmp[8], memory_order_relaxed); util_atomic_store_explicit64(&dst[9], tmp[9], memory_order_relaxed); util_atomic_store_explicit64(&dst[10], tmp[10], memory_order_relaxed); util_atomic_store_explicit64(&dst[11], tmp[11], memory_order_relaxed); util_atomic_store_explicit64(&dst[12], tmp[12], memory_order_relaxed); util_atomic_store_explicit64(&dst[13], tmp[13], memory_order_relaxed); util_atomic_store_explicit64(&dst[14], tmp[14], memory_order_relaxed); util_atomic_store_explicit64(&dst[15], tmp[15], memory_order_relaxed); } /* * cpy64 -- (internal) copy 64 bytes from src to dst */ static force_inline void cpy64(uint64_t *dst, const uint64_t *src) { /* * We use atomics here just to be sure compiler will not split stores. * Order of stores doesn't matter. */ uint64_t tmp[8]; util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed); util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed); util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed); util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed); util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed); util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed); util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed); util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed); util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed); util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed); util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed); util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed); util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed); util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed); util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed); util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed); } /* * cpy8 -- (internal) copy 8 bytes from src to dst */ static force_inline void cpy8(uint64_t *dst, const uint64_t *src) { uint64_t tmp; util_atomic_load_explicit64(src, &tmp, memory_order_relaxed); util_atomic_store_explicit64(dst, tmp, memory_order_relaxed); } /* * store8 -- (internal) store 8 bytes */ static force_inline void store8(uint64_t *dst, uint64_t c) { util_atomic_store_explicit64(dst, c, memory_order_relaxed); } /* * memmove_nodrain_generic -- generic memmove to pmem without hw drain */ void * memmove_nodrain_generic(void *dst, const void *src, size_t len, unsigned flags, flush_func flush) { LOG(15, "pmemdest %p src %p len %zu flags 0x%x", dst, src, len, flags); char *cdst = dst; const char *csrc = src; size_t remaining; (void) flags; if ((uintptr_t)cdst - (uintptr_t)csrc >= len) { size_t cnt = (uint64_t)cdst & 7; if (cnt > 0) { cnt = 8 - cnt; if (cnt > len) cnt = len; for (size_t i = 0; i < cnt; ++i) cdst[i] = csrc[i]; pmem2_flush_flags(cdst, cnt, flags, flush); cdst += cnt; csrc += cnt; len -= cnt; } uint64_t *dst8 = (uint64_t *)cdst; const uint64_t *src8 = (const uint64_t *)csrc; while (len >= 128 && CACHELINE_SIZE == 128) { cpy128(dst8, src8); pmem2_flush_flags(dst8, 128, flags, flush); len -= 128; dst8 += 16; src8 += 16; } while (len >= 64) { cpy64(dst8, src8); pmem2_flush_flags(dst8, 64, flags, flush); len -= 64; dst8 += 8; src8 += 8; } remaining = len; while (len >= 8) { cpy8(dst8, src8); len -= 8; dst8++; src8++; } cdst = (char *)dst8; csrc = (const char *)src8; for (size_t i = 0; i < len; ++i) *cdst++ = *csrc++; if (remaining) pmem2_flush_flags(cdst - remaining, remaining, flags, flush); } else { cdst += len; csrc += len; size_t cnt = (uint64_t)cdst & 7; if (cnt > 0) { if (cnt > len) cnt = len; cdst -= cnt; csrc -= cnt; len -= cnt; for (size_t i = cnt; i > 0; --i) cdst[i - 1] = csrc[i - 1]; pmem2_flush_flags(cdst, cnt, flags, flush); } uint64_t *dst8 = (uint64_t *)cdst; const uint64_t *src8 = (const uint64_t *)csrc; while (len >= 128 && CACHELINE_SIZE == 128) { dst8 -= 16; src8 -= 16; cpy128(dst8, src8); pmem2_flush_flags(dst8, 128, flags, flush); len -= 128; } while (len >= 64) { dst8 -= 8; src8 -= 8; cpy64(dst8, src8); pmem2_flush_flags(dst8, 64, flags, flush); len -= 64; } remaining = len; while (len >= 8) { --dst8; --src8; cpy8(dst8, src8); len -= 8; } cdst = (char *)dst8; csrc = (const char *)src8; for (size_t i = len; i > 0; --i) *--cdst = *--csrc; if (remaining) pmem2_flush_flags(cdst, remaining, flags, flush); } return dst; } /* * memset_nodrain_generic -- generic memset to pmem without hw drain */ void * memset_nodrain_generic(void *dst, int c, size_t len, unsigned flags, flush_func flush) { LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", dst, c, len, flags); (void) flags; char *cdst = dst; size_t cnt = (uint64_t)cdst & 7; if (cnt > 0) { cnt = 8 - cnt; if (cnt > len) cnt = len; for (size_t i = 0; i < cnt; ++i) cdst[i] = (char)c; pmem2_flush_flags(cdst, cnt, flags, flush); cdst += cnt; len -= cnt; } uint64_t *dst8 = (uint64_t *)cdst; uint64_t u = (unsigned char)c; uint64_t tmp = (u << 56) | (u << 48) | (u << 40) | (u << 32) | (u << 24) | (u << 16) | (u << 8) | u; while (len >= 128 && CACHELINE_SIZE == 128) { store8(&dst8[0], tmp); store8(&dst8[1], tmp); store8(&dst8[2], tmp); store8(&dst8[3], tmp); store8(&dst8[4], tmp); store8(&dst8[5], tmp); store8(&dst8[6], tmp); store8(&dst8[7], tmp); store8(&dst8[8], tmp); store8(&dst8[9], tmp); store8(&dst8[10], tmp); store8(&dst8[11], tmp); store8(&dst8[12], tmp); store8(&dst8[13], tmp); store8(&dst8[14], tmp); store8(&dst8[15], tmp); pmem2_flush_flags(dst8, 128, flags, flush); len -= 128; dst8 += 16; } while (len >= 64) { store8(&dst8[0], tmp); store8(&dst8[1], tmp); store8(&dst8[2], tmp); store8(&dst8[3], tmp); store8(&dst8[4], tmp); store8(&dst8[5], tmp); store8(&dst8[6], tmp); store8(&dst8[7], tmp); pmem2_flush_flags(dst8, 64, flags, flush); len -= 64; dst8 += 8; } size_t remaining = len; while (len >= 8) { store8(dst8, tmp); len -= 8; dst8++; } cdst = (char *)dst8; for (size_t i = 0; i < len; ++i) *cdst++ = (char)c; if (remaining) pmem2_flush_flags(cdst - remaining, remaining, flags, flush); return dst; }
9,345
26.488235
78
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/pmem2_arch.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * pmem2_arch.h -- core-arch interface */ #ifndef PMEM2_ARCH_H #define PMEM2_ARCH_H #include <stddef.h> #include "libpmem2.h" #include "util.h" #include "valgrind_internal.h" #ifdef __cplusplus extern "C" { #endif struct pmem2_arch_info; typedef void (*fence_func)(void); typedef void (*flush_func)(const void *, size_t); typedef void *(*memmove_nodrain_func)(void *pmemdest, const void *src, size_t len, unsigned flags, flush_func flush); typedef void *(*memset_nodrain_func)(void *pmemdest, int c, size_t len, unsigned flags, flush_func flush); struct pmem2_arch_info { memmove_nodrain_func memmove_nodrain; memmove_nodrain_func memmove_nodrain_eadr; memset_nodrain_func memset_nodrain; memset_nodrain_func memset_nodrain_eadr; flush_func flush; fence_func fence; int flush_has_builtin_fence; }; void pmem2_arch_init(struct pmem2_arch_info *info); /* * flush_empty_nolog -- (internal) do not flush the CPU cache */ static force_inline void flush_empty_nolog(const void *addr, size_t len) { /* NOP, but tell pmemcheck about it */ VALGRIND_DO_FLUSH(addr, len); } void *memmove_nodrain_generic(void *pmemdest, const void *src, size_t len, unsigned flags, flush_func flush); void *memset_nodrain_generic(void *pmemdest, int c, size_t len, unsigned flags, flush_func flush); #ifdef __cplusplus } #endif #endif
1,427
22.8
79
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/region_namespace_ndctl.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * region_namespace_ndctl.c -- common ndctl functions */ #include <ndctl/libndctl.h> #include <ndctl/libdaxctl.h> #include <sys/sysmacros.h> #include <fcntl.h> #include "libpmem2.h" #include "pmem2_utils.h" #include "region_namespace_ndctl.h" #include "region_namespace.h" #include "out.h" /* * ndctl_match_devdax -- (internal) returns 0 if the devdax matches * with the given file, 1 if it doesn't match, * and a negative value in case of an error. */ static int ndctl_match_devdax(dev_t st_rdev, const char *devname) { LOG(3, "st_rdev %lu devname %s", st_rdev, devname); if (*devname == '\0') return 1; char path[PATH_MAX]; os_stat_t stat; if (util_snprintf(path, PATH_MAX, "/dev/%s", devname) < 0) { ERR("!snprintf"); return PMEM2_E_ERRNO; } if (os_stat(path, &stat)) { ERR("!stat %s", path); return PMEM2_E_ERRNO; } if (st_rdev != stat.st_rdev) { LOG(10, "skipping not matching device: %s", path); return 1; } LOG(4, "found matching device: %s", path); return 0; } #define BUFF_LENGTH 64 /* * ndctl_match_fsdax -- (internal) returns 0 if the device matches * with the given file, 1 if it doesn't match, * and a negative value in case of an error. */ static int ndctl_match_fsdax(dev_t st_dev, const char *devname) { LOG(3, "st_dev %lu devname %s", st_dev, devname); if (*devname == '\0') return 1; char path[PATH_MAX]; char dev_id[BUFF_LENGTH]; if (util_snprintf(path, PATH_MAX, "/sys/block/%s/dev", devname) < 0) { ERR("!snprintf"); return PMEM2_E_ERRNO; } if (util_snprintf(dev_id, BUFF_LENGTH, "%d:%d", major(st_dev), minor(st_dev)) < 0) { ERR("!snprintf"); return PMEM2_E_ERRNO; } int fd = os_open(path, O_RDONLY); if (fd < 0) { ERR("!open \"%s\"", path); return PMEM2_E_ERRNO; } char buff[BUFF_LENGTH]; ssize_t nread = read(fd, buff, BUFF_LENGTH); if (nread < 0) { ERR("!read"); int oerrno = errno; /* save the errno */ os_close(fd); errno = oerrno; return PMEM2_E_ERRNO; } os_close(fd); if (nread == 0) { ERR("%s is empty", path); return PMEM2_E_INVALID_DEV_FORMAT; } if (buff[nread - 1] != '\n') { ERR("%s doesn't end with new line", path); return PMEM2_E_INVALID_DEV_FORMAT; } buff[nread - 1] = '\0'; if (strcmp(buff, dev_id) != 0) { LOG(10, "skipping not matching device: %s", path); return 1; } LOG(4, "found matching device: %s", path); return 0; } /* * pmem2_region_namespace -- returns the region * (and optionally the namespace) * where the given file is located */ int pmem2_region_namespace(struct ndctl_ctx *ctx, const struct pmem2_source *src, struct ndctl_region **pregion, struct ndctl_namespace **pndns) { LOG(3, "ctx %p src %p pregion %p pnamespace %p", ctx, src, pregion, pndns); struct ndctl_bus *bus; struct ndctl_region *region; struct ndctl_namespace *ndns; if (pregion) *pregion = NULL; if (pndns) *pndns = NULL; if (src->value.ftype == PMEM2_FTYPE_DIR) { ERR("cannot check region or namespace of a directory"); return PMEM2_E_INVALID_FILE_TYPE; } FOREACH_BUS_REGION_NAMESPACE(ctx, bus, region, ndns) { struct ndctl_btt *btt; struct ndctl_dax *dax = NULL; struct ndctl_pfn *pfn; const char *devname; if ((dax = ndctl_namespace_get_dax(ndns))) { if (src->value.ftype == PMEM2_FTYPE_REG) continue; ASSERTeq(src->value.ftype, PMEM2_FTYPE_DEVDAX); struct daxctl_region *dax_region; dax_region = ndctl_dax_get_daxctl_region(dax); if (!dax_region) { ERR("!cannot find dax region"); return PMEM2_E_DAX_REGION_NOT_FOUND; } struct daxctl_dev *dev; daxctl_dev_foreach(dax_region, dev) { devname = daxctl_dev_get_devname(dev); int ret = ndctl_match_devdax(src->value.st_rdev, devname); if (ret < 0) return ret; if (ret == 0) { if (pregion) *pregion = region; if (pndns) *pndns = ndns; return 0; } } } else { if (src->value.ftype == PMEM2_FTYPE_DEVDAX) continue; ASSERTeq(src->value.ftype, PMEM2_FTYPE_REG); if ((btt = ndctl_namespace_get_btt(ndns))) { devname = ndctl_btt_get_block_device(btt); } else if ((pfn = ndctl_namespace_get_pfn(ndns))) { devname = ndctl_pfn_get_block_device(pfn); } else { devname = ndctl_namespace_get_block_device(ndns); } int ret = ndctl_match_fsdax(src->value.st_dev, devname); if (ret < 0) return ret; if (ret == 0) { if (pregion) *pregion = region; if (pndns) *pndns = ndns; return 0; } } } LOG(10, "did not found any matching device"); return 0; } /* * pmem2_region_get_id -- returns the region id */ int pmem2_get_region_id(const struct pmem2_source *src, unsigned *region_id) { LOG(3, "src %p region_id %p", src, region_id); struct ndctl_region *region; struct ndctl_namespace *ndns; struct ndctl_ctx *ctx; errno = ndctl_new(&ctx) * (-1); if (errno) { ERR("!ndctl_new"); return PMEM2_E_ERRNO; } int rv = pmem2_region_namespace(ctx, src, &region, &ndns); if (rv) { LOG(1, "getting region and namespace failed"); goto end; } if (!region) { ERR("unknown region"); rv = PMEM2_E_DAX_REGION_NOT_FOUND; goto end; } *region_id = ndctl_region_get_id(region); end: ndctl_unref(ctx); return rv; }
5,467
20.111969
72
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/pmem2_utils_other.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ #include <errno.h> #include <sys/stat.h> #include "libpmem2.h" #include "out.h" #include "pmem2_utils.h" #ifdef _WIN32 #define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) #define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) #endif int pmem2_get_type_from_stat(const os_stat_t *st, enum pmem2_file_type *type) { if (S_ISREG(st->st_mode)) { *type = PMEM2_FTYPE_REG; return 0; } if (S_ISDIR(st->st_mode)) { *type = PMEM2_FTYPE_DIR; return 0; } ERR("file type 0%o not supported", st->st_mode & S_IFMT); return PMEM2_E_INVALID_FILE_TYPE; } /* * pmem2_device_dax_size -- checks the size of a given * dax device from given source structure */ int pmem2_device_dax_size(const struct pmem2_source *src, size_t *size) { const char *err = "BUG: pmem2_device_dax_size should never be called on this OS"; ERR("%s", err); ASSERTinfo(0, err); return PMEM2_E_NOSUPP; } /* * pmem2_device_dax_alignment -- checks the alignment of a given * dax device from given source */ int pmem2_device_dax_alignment(const struct pmem2_source *src, size_t *alignment) { const char *err = "BUG: pmem2_device_dax_alignment should never be called on this OS"; ERR("%s", err); ASSERTinfo(0, err); return PMEM2_E_NOSUPP; }
1,301
20.7
77
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/deep_flush.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * deep_flush.c -- pmem2_deep_flush implementation */ #include <stdlib.h> #include "libpmem2.h" #include "deep_flush.h" #include "out.h" /* * pmem2_deep_flush -- performs deep flush operation */ int pmem2_deep_flush(struct pmem2_map *map, void *ptr, size_t size) { LOG(3, "map %p ptr %p size %zu", map, ptr, size); uintptr_t map_addr = (uintptr_t)map->addr; uintptr_t map_end = map_addr + map->content_length; uintptr_t flush_addr = (uintptr_t)ptr; uintptr_t flush_end = flush_addr + size; if (flush_addr < map_addr || flush_end > map_end) { ERR("requested deep flush rage ptr %p size %zu" "exceeds map range %p", ptr, size, map); return PMEM2_E_DEEP_FLUSH_RANGE; } int ret = map->deep_flush_fn(map, ptr, size); if (ret) { LOG(1, "cannot perform deep flush operation for map %p", map); return ret; } return 0; }
929
21.682927
64
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/map_posix.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * map_posix.c -- pmem2_map (POSIX) */ #include <errno.h> #include <stdbool.h> #include <string.h> #include <sys/mman.h> #include "libpmem2.h" #include "alloc.h" #include "auto_flush.h" #include "config.h" #include "file.h" #include "map.h" #include "out.h" #include "persist.h" #include "pmem2_utils.h" #include "source.h" #include "valgrind_internal.h" #ifndef MAP_SYNC #define MAP_SYNC 0x80000 #endif #ifndef MAP_SHARED_VALIDATE #define MAP_SHARED_VALIDATE 0x03 #endif #define MEGABYTE ((uintptr_t)1 << 20) #define GIGABYTE ((uintptr_t)1 << 30) /* indicates the cases in which the error cannot occur */ #define GRAN_IMPOSSIBLE "impossible" #ifdef __linux__ /* requested CACHE_LINE, available PAGE */ #define REQ_CL_AVAIL_PG \ "requested granularity not available because fd doesn't point to DAX-enabled file " \ "or kernel doesn't support MAP_SYNC flag (Linux >= 4.15)" /* requested BYTE, available PAGE */ #define REQ_BY_AVAIL_PG REQ_CL_AVAIL_PG /* requested BYTE, available CACHE_LINE */ #define REQ_BY_AVAIL_CL \ "requested granularity not available because the platform doesn't support eADR" static const char *granularity_err_msg[3][3] = { /* requested granularity / available granularity */ /* -------------------------------------------------------------------- */ /* BYTE CACHE_LINE PAGE */ /* -------------------------------------------------------------------- */ /* BYTE */ {GRAN_IMPOSSIBLE, REQ_BY_AVAIL_CL, REQ_BY_AVAIL_PG}, /* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG}, /* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}}; #else /* requested CACHE_LINE, available PAGE */ #define REQ_CL_AVAIL_PG \ "the operating system doesn't provide a method of detecting granularity" /* requested BYTE, available PAGE */ #define REQ_BY_AVAIL_PG \ "the operating system doesn't provide a method of detecting whether the platform supports eADR" static const char *granularity_err_msg[3][3] = { /* requested granularity / available granularity */ /* -------------------------------------------------------------------- */ /* BYTE CACHE_LINE PAGE */ /* -------------------------------------------------------------------- */ /* BYTE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_BY_AVAIL_PG}, /* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG}, /* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}}; #endif /* * get_map_alignment -- (internal) choose the desired mapping alignment * * The smallest supported alignment is 2 megabytes because of the object * alignment requirements. Changing this value to 4 kilobytes constitutes a * layout change. * * Use 1GB page alignment only if the mapping length is at least * twice as big as the page size. */ static inline size_t get_map_alignment(size_t len, size_t req_align) { size_t align = 2 * MEGABYTE; if (req_align) align = req_align; else if (len >= 2 * GIGABYTE) align = GIGABYTE; return align; } /* * map_reserve -- (internal) reserve an address for mmap() * * ALSR in 64-bit Linux kernel uses 28-bit of randomness for mmap * (bit positions 12-39), which means the base mapping address is randomized * within [0..1024GB] range, with 4KB granularity. Assuming additional * 1GB alignment, it results in 1024 possible locations. */ static int map_reserve(size_t len, size_t alignment, void **reserv, size_t *reslen, const struct pmem2_config *cfg) { ASSERTne(reserv, NULL); /* let's get addr from the cfg */ void *mmap_addr = cfg->addr; int mmap_addr_flag = 0; size_t dlength; /* dummy length */ /* if addr is initialized, dlength == len */ if (mmap_addr) dlength = len; else dlength = len + alignment; /* dummy length */ /* "translate" pmem2 addr request type into linux flag */ if (cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) { /* * glibc started exposing this flag in version 4.17 but we can still * imitate it even if it is not supported by libc or kernel */ #ifdef MAP_FIXED_NOREPLACE mmap_addr_flag = MAP_FIXED_NOREPLACE; #else mmap_addr_flag = 0; #endif } /* * Create dummy mapping to find an unused region of given size. * Request for increased size for later address alignment. * Use MAP_PRIVATE with read-only access to simulate * zero cost for overcommit accounting. Note: MAP_NORESERVE * flag is ignored if overcommit is disabled (mode 2). */ char *daddr = mmap(mmap_addr, dlength, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | mmap_addr_flag, -1, 0); if (daddr == MAP_FAILED) { if (errno == EEXIST) { ERR("!mmap MAP_FIXED_NOREPLACE"); return PMEM2_E_MAPPING_EXISTS; } ERR("!mmap MAP_ANONYMOUS"); return PMEM2_E_ERRNO; } /* * When kernel does not support MAP_FIXED_NOREPLACE flag we imitate it. * If kernel does not support flag and given addr is occupied, kernel * chooses new addr randomly and returns it. We do not want that * behavior, so we validate it and fail when addresses do not match. */ if (mmap_addr && cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) { /* mapping passed and gave different addr, while it shouldn't */ if (daddr != mmap_addr) { munmap(daddr, dlength); ERR("mapping exists in the given address"); return PMEM2_E_MAPPING_EXISTS; } } LOG(4, "system choice %p", daddr); *reserv = (void *)roundup((uintptr_t)daddr, alignment); /* * since the last part of the reservation from (reserv + reslen == end) * will be unmapped, the 'end' address has to be page-aligned. * 'reserv' is already page-aligned (or even aligned to multiple of page * size) so it is enough to page-align the 'reslen' value. */ *reslen = roundup(len, Pagesize); LOG(4, "hint %p", *reserv); /* * The placeholder mapping is divided into few parts: * * 1 2 3 4 5 * |......|uuuuuuuuu|rrr|.................| * * Addresses: * 1 == daddr * 2 == reserv * 3 == reserv + len * 4 == reserv + reslen == end (has to be page-aligned) * 5 == daddr + dlength * * Key: * - '.' is an unused part of the placeholder * - 'u' is where the actual mapping lies * - 'r' is what reserved as padding */ /* unmap the placeholder before the actual mapping */ const size_t before = (uintptr_t)(*reserv) - (uintptr_t)daddr; if (before) { if (munmap(daddr, before)) { ERR("!munmap"); return PMEM2_E_ERRNO; } } /* unmap the placeholder after the actual mapping */ const size_t after = dlength - *reslen - before; void *end = (void *)((uintptr_t)(*reserv) + (uintptr_t)*reslen); if (after) if (munmap(end, after)) { ERR("!munmap"); return PMEM2_E_ERRNO; } return 0; } /* * file_map -- (internal) memory map given file into memory * If (flags & MAP_PRIVATE) it uses just mmap. Otherwise, it tries to mmap with * (flags | MAP_SHARED_VALIDATE | MAP_SYNC) which allows flushing from the * user-space. If MAP_SYNC fails and the user did not specify it by himself it * falls back to the mmap with user-provided flags. */ static int file_map(void *reserv, size_t len, int proto, int flags, int fd, off_t offset, bool *map_sync, void **base) { LOG(15, "reserve %p len %zu proto %x flags %x fd %d offset %ld " "map_sync %p", reserv, len, proto, flags, fd, offset, map_sync); ASSERTne(map_sync, NULL); ASSERTne(base, NULL); /* * MAP_PRIVATE and MAP_SHARED are mutually exclusive, therefore mmap * with MAP_PRIVATE is executed separately. */ if (flags & MAP_PRIVATE) { *base = mmap(reserv, len, proto, flags, fd, offset); if (*base == MAP_FAILED) { ERR("!mmap"); return PMEM2_E_ERRNO; } LOG(4, "mmap with MAP_PRIVATE succeeded"); *map_sync = false; return 0; } /* try to mmap with MAP_SYNC flag */ const int sync_flags = MAP_SHARED_VALIDATE | MAP_SYNC; *base = mmap(reserv, len, proto, flags | sync_flags, fd, offset); if (*base != MAP_FAILED) { LOG(4, "mmap with MAP_SYNC succeeded"); *map_sync = true; return 0; } /* try to mmap with MAP_SHARED flag (without MAP_SYNC) */ if (errno == EINVAL || errno == ENOTSUP) { LOG(4, "mmap with MAP_SYNC not supported"); *base = mmap(reserv, len, proto, flags | MAP_SHARED, fd, offset); if (*base != MAP_FAILED) { *map_sync = false; return 0; } } ERR("!mmap"); return PMEM2_E_ERRNO; } /* * unmap -- (internal) unmap a memory range */ static int unmap(void *addr, size_t len) { int retval = munmap(addr, len); if (retval < 0) { ERR("!munmap"); return PMEM2_E_ERRNO; } return 0; } /* * pmem2_map -- map memory according to provided config */ int pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src, struct pmem2_map **map_ptr) { LOG(3, "cfg %p src %p map_ptr %p", cfg, src, map_ptr); int ret = 0; struct pmem2_map *map; size_t file_len; *map_ptr = NULL; if (cfg->requested_max_granularity == PMEM2_GRANULARITY_INVALID) { ERR( "please define the max granularity requested for the mapping"); return PMEM2_E_GRANULARITY_NOT_SET; } size_t src_alignment; ret = pmem2_source_alignment(src, &src_alignment); if (ret) return ret; /* get file size */ ret = pmem2_source_size(src, &file_len); if (ret) return ret; /* get offset */ size_t effective_offset; ret = pmem2_validate_offset(cfg, &effective_offset, src_alignment); if (ret) return ret; ASSERTeq(effective_offset, cfg->offset); if (src->type == PMEM2_SOURCE_ANON) effective_offset = 0; os_off_t off = (os_off_t)effective_offset; /* map input and output variables */ bool map_sync = false; /* * MAP_SHARED - is required to mmap directly the underlying hardware * MAP_FIXED - is required to mmap at exact address pointed by hint */ int flags = MAP_FIXED; void *addr; /* "translate" pmem2 protection flags into linux flags */ int proto = 0; if (cfg->protection_flag == PMEM2_PROT_NONE) proto = PROT_NONE; if (cfg->protection_flag & PMEM2_PROT_EXEC) proto |= PROT_EXEC; if (cfg->protection_flag & PMEM2_PROT_READ) proto |= PROT_READ; if (cfg->protection_flag & PMEM2_PROT_WRITE) proto |= PROT_WRITE; if (src->type == PMEM2_SOURCE_FD) { if (src->value.ftype == PMEM2_FTYPE_DIR) { ERR("the directory is not a supported file type"); return PMEM2_E_INVALID_FILE_TYPE; } ASSERT(src->value.ftype == PMEM2_FTYPE_REG || src->value.ftype == PMEM2_FTYPE_DEVDAX); if (cfg->sharing == PMEM2_PRIVATE && src->value.ftype == PMEM2_FTYPE_DEVDAX) { ERR( "device DAX does not support mapping with MAP_PRIVATE"); return PMEM2_E_SRC_DEVDAX_PRIVATE; } } size_t content_length, reserved_length = 0; ret = pmem2_config_validate_length(cfg, file_len, src_alignment); if (ret) return ret; /* without user-provided length, map to the end of the file */ if (cfg->length) content_length = cfg->length; else content_length = file_len - effective_offset; size_t alignment = get_map_alignment(content_length, src_alignment); ret = pmem2_config_validate_addr_alignment(cfg, src); if (ret) return ret; /* find a hint for the mapping */ void *reserv = NULL; ret = map_reserve(content_length, alignment, &reserv, &reserved_length, cfg); if (ret != 0) { if (ret == PMEM2_E_MAPPING_EXISTS) LOG(1, "given mapping region is already occupied"); else LOG(1, "cannot find a contiguous region of given size"); return ret; } ASSERTne(reserv, NULL); if (cfg->sharing == PMEM2_PRIVATE) { flags |= MAP_PRIVATE; } int map_fd = INVALID_FD; if (src->type == PMEM2_SOURCE_FD) { map_fd = src->value.fd; } else if (src->type == PMEM2_SOURCE_ANON) { flags |= MAP_ANONYMOUS; } else { ASSERT(0); } ret = file_map(reserv, content_length, proto, flags, map_fd, off, &map_sync, &addr); if (ret) { /* unmap the reservation mapping */ munmap(reserv, reserved_length); if (ret == -EACCES) return PMEM2_E_NO_ACCESS; else if (ret == -ENOTSUP) return PMEM2_E_NOSUPP; else return ret; } LOG(3, "mapped at %p", addr); bool eADR = (pmem2_auto_flush() == 1); enum pmem2_granularity available_min_granularity = src->type == PMEM2_SOURCE_ANON ? PMEM2_GRANULARITY_BYTE : get_min_granularity(eADR, map_sync, cfg->sharing); if (available_min_granularity > cfg->requested_max_granularity) { const char *err = granularity_err_msg [cfg->requested_max_granularity] [available_min_granularity]; if (strcmp(err, GRAN_IMPOSSIBLE) == 0) FATAL( "unhandled granularity error: available_min_granularity: %d" \ "requested_max_granularity: %d", available_min_granularity, cfg->requested_max_granularity); ERR("%s", err); ret = PMEM2_E_GRANULARITY_NOT_SUPPORTED; goto err; } /* prepare pmem2_map structure */ map = (struct pmem2_map *)pmem2_malloc(sizeof(*map), &ret); if (!map) goto err; map->addr = addr; map->reserved_length = reserved_length; map->content_length = content_length; map->effective_granularity = available_min_granularity; pmem2_set_flush_fns(map); pmem2_set_mem_fns(map); map->source = *src; map->source.value.fd = INVALID_FD; /* fd should not be used after map */ ret = pmem2_register_mapping(map); if (ret) goto err_register; *map_ptr = map; if (src->type == PMEM2_SOURCE_FD) { VALGRIND_REGISTER_PMEM_MAPPING(map->addr, map->content_length); VALGRIND_REGISTER_PMEM_FILE(src->value.fd, map->addr, map->content_length, 0); } return 0; err_register: free(map); err: unmap(addr, reserved_length); return ret; } /* * pmem2_unmap -- unmap the specified mapping */ int pmem2_unmap(struct pmem2_map **map_ptr) { LOG(3, "map_ptr %p", map_ptr); int ret = 0; struct pmem2_map *map = *map_ptr; ret = pmem2_unregister_mapping(map); if (ret) return ret; ret = unmap(map->addr, map->reserved_length); if (ret) return ret; VALGRIND_REMOVE_PMEM_MAPPING(map->addr, map->content_length); Free(map); *map_ptr = NULL; return ret; }
13,869
25.879845
96
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/auto_flush_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * auto_flush_windows.c -- Windows auto flush detection */ #include <windows.h> #include <inttypes.h> #include "alloc.h" #include "out.h" #include "os.h" #include "endian.h" #include "auto_flush_windows.h" /* * is_nfit_available -- (internal) check if platform supports NFIT table. */ static int is_nfit_available() { LOG(3, "is_nfit_available()"); DWORD signatures_size; char *signatures = NULL; int is_nfit = 0; DWORD offset = 0; signatures_size = EnumSystemFirmwareTables(ACPI_SIGNATURE, NULL, 0); if (signatures_size == 0) { ERR("!EnumSystemFirmwareTables"); return -1; } signatures = (char *)Malloc(signatures_size + 1); if (signatures == NULL) { ERR("!malloc"); return -1; } int ret = EnumSystemFirmwareTables(ACPI_SIGNATURE, signatures, signatures_size); signatures[signatures_size] = '\0'; if (ret != signatures_size) { ERR("!EnumSystemFirmwareTables"); goto err; } while (offset <= signatures_size) { int nfit_sig = strncmp(signatures + offset, NFIT_STR_SIGNATURE, NFIT_SIGNATURE_LEN); if (nfit_sig == 0) { is_nfit = 1; break; } offset += NFIT_SIGNATURE_LEN; } Free(signatures); return is_nfit; err: Free(signatures); return -1; } /* * is_auto_flush_cap_set -- (internal) check if specific * capabilities bits are set. * * ACPI 6.2A Specification: * Bit[0] - CPU Cache Flush to NVDIMM Durability on * Power Loss Capable. If set to 1, indicates that platform * ensures the entire CPU store data path is flushed to * persistent memory on system power loss. * Bit[1] - Memory Controller Flush to NVDIMM Durability on Power Loss Capable. * If set to 1, indicates that platform provides mechanisms to automatically * flush outstanding write data from the memory controller to persistent memory * in the event of platform power loss. Note: If bit 0 is set to 1 then this bit * shall be set to 1 as well. */ static int is_auto_flush_cap_set(uint32_t capabilities) { LOG(3, "is_auto_flush_cap_set capabilities 0x%" PRIx32, capabilities); int CPU_cache_flush = CHECK_BIT(capabilities, 0); int memory_controller_flush = CHECK_BIT(capabilities, 1); LOG(15, "CPU_cache_flush %d, memory_controller_flush %d", CPU_cache_flush, memory_controller_flush); if (memory_controller_flush == 1 && CPU_cache_flush == 1) return 1; return 0; } /* * parse_nfit_buffer -- (internal) parse nfit buffer * if platform_capabilities struct is available return pcs structure. */ static struct platform_capabilities parse_nfit_buffer(const unsigned char *nfit_buffer, unsigned long buffer_size) { LOG(3, "parse_nfit_buffer nfit_buffer %s, buffer_size %lu", nfit_buffer, buffer_size); uint16_t type; uint16_t length; size_t offset = sizeof(struct nfit_header); struct platform_capabilities pcs = {0}; while (offset < buffer_size) { type = *(nfit_buffer + offset); length = *(nfit_buffer + offset + 2); if (type == PCS_TYPE_NUMBER) { if (length == sizeof(struct platform_capabilities)) { memmove(&pcs, nfit_buffer + offset, length); return pcs; } } offset += length; } return pcs; } /* * pmem2_auto_flush -- check if platform supports auto flush. */ int pmem2_auto_flush(void) { LOG(3, NULL); DWORD nfit_buffer_size = 0; DWORD nfit_written = 0; PVOID nfit_buffer = NULL; struct nfit_header *nfit_data; struct platform_capabilities *pc = NULL; int eADR = 0; int is_nfit = is_nfit_available(); if (is_nfit == 0) { LOG(15, "ACPI NFIT table not available"); return 0; } if (is_nfit < 0 || is_nfit != 1) { LOG(1, "!is_nfit_available"); return -1; } /* get the entire nfit size */ nfit_buffer_size = GetSystemFirmwareTable( (DWORD)ACPI_SIGNATURE, (DWORD)NFIT_REV_SIGNATURE, NULL, 0); if (nfit_buffer_size == 0) { ERR("!GetSystemFirmwareTable"); return -1; } /* reserve buffer */ nfit_buffer = (unsigned char *)Malloc(nfit_buffer_size); if (nfit_buffer == NULL) { ERR("!malloc"); goto err; } /* write actual nfit to buffer */ nfit_written = GetSystemFirmwareTable( (DWORD)ACPI_SIGNATURE, (DWORD)NFIT_REV_SIGNATURE, nfit_buffer, nfit_buffer_size); if (nfit_written == 0) { ERR("!GetSystemFirmwareTable"); goto err; } if (nfit_buffer_size != nfit_written) { errno = ERROR_INVALID_DATA; ERR("!GetSystemFirmwareTable invalid data"); goto err; } nfit_data = (struct nfit_header *)nfit_buffer; int nfit_sig = strncmp(nfit_data->signature, NFIT_STR_SIGNATURE, NFIT_SIGNATURE_LEN); if (nfit_sig != 0) { ERR("!NFIT buffer has invalid data"); goto err; } struct platform_capabilities pcs = parse_nfit_buffer( nfit_buffer, nfit_buffer_size); eADR = is_auto_flush_cap_set(pcs.capabilities); Free(nfit_buffer); return eADR; err: Free(nfit_buffer); return -1; }
4,857
23.535354
80
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/badblocks_ndctl.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * badblocks_ndctl.c -- implementation of DIMMs API based on the ndctl library */ #define _GNU_SOURCE #include <sys/types.h> #include <libgen.h> #include <limits.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <sys/sysmacros.h> #include <fcntl.h> #include <ndctl/libndctl.h> #include <ndctl/libdaxctl.h> #include "libpmem2.h" #include "pmem2_utils.h" #include "source.h" #include "region_namespace_ndctl.h" #include "file.h" #include "out.h" #include "badblocks.h" #include "set_badblocks.h" #include "extent.h" typedef int pmem2_badblock_next_type( struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb); typedef void *pmem2_badblock_get_next_type( struct pmem2_badblock_context *bbctx); struct pmem2_badblock_context { /* file descriptor */ int fd; /* pmem2 file type */ enum pmem2_file_type file_type; /* ndctl context */ struct ndctl_ctx *ctx; /* * Function pointer to: * - pmem2_badblock_next_namespace() or * - pmem2_badblock_next_region() */ pmem2_badblock_next_type *pmem2_badblock_next_func; /* * Function pointer to: * - pmem2_namespace_get_first_badblock() or * - pmem2_namespace_get_next_badblock() or * - pmem2_region_get_first_badblock() or * - pmem2_region_get_next_badblock() */ pmem2_badblock_get_next_type *pmem2_badblock_get_next_func; /* needed only by the ndctl namespace badblock iterator */ struct ndctl_namespace *ndns; /* needed only by the ndctl region badblock iterator */ struct { struct ndctl_bus *bus; struct ndctl_region *region; unsigned long long ns_res; /* address of the namespace */ unsigned long long ns_beg; /* the begining of the namespace */ unsigned long long ns_end; /* the end of the namespace */ } rgn; /* file's extents */ struct extents *exts; unsigned first_extent; struct pmem2_badblock last_bb; }; /* forward declarations */ static int pmem2_badblock_next_namespace( struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb); static int pmem2_badblock_next_region( struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb); static void *pmem2_namespace_get_first_badblock( struct pmem2_badblock_context *bbctx); static void *pmem2_region_get_first_badblock( struct pmem2_badblock_context *bbctx); /* * badblocks_get_namespace_bounds -- (internal) returns the bounds * (offset and size) of the given namespace * relative to the beginning of its region */ static int badblocks_get_namespace_bounds(struct ndctl_region *region, struct ndctl_namespace *ndns, unsigned long long *ns_offset, unsigned long long *ns_size) { LOG(3, "region %p namespace %p ns_offset %p ns_size %p", region, ndns, ns_offset, ns_size); struct ndctl_pfn *pfn = ndctl_namespace_get_pfn(ndns); struct ndctl_dax *dax = ndctl_namespace_get_dax(ndns); ASSERTne(ns_offset, NULL); ASSERTne(ns_size, NULL); if (pfn) { *ns_offset = ndctl_pfn_get_resource(pfn); if (*ns_offset == ULLONG_MAX) { ERR("(pfn) cannot read offset of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } *ns_size = ndctl_pfn_get_size(pfn); if (*ns_size == ULLONG_MAX) { ERR("(pfn) cannot read size of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } LOG(10, "(pfn) ns_offset 0x%llx ns_size %llu", *ns_offset, *ns_size); } else if (dax) { *ns_offset = ndctl_dax_get_resource(dax); if (*ns_offset == ULLONG_MAX) { ERR("(dax) cannot read offset of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } *ns_size = ndctl_dax_get_size(dax); if (*ns_size == ULLONG_MAX) { ERR("(dax) cannot read size of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } LOG(10, "(dax) ns_offset 0x%llx ns_size %llu", *ns_offset, *ns_size); } else { /* raw or btt */ *ns_offset = ndctl_namespace_get_resource(ndns); if (*ns_offset == ULLONG_MAX) { ERR("(raw/btt) cannot read offset of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } *ns_size = ndctl_namespace_get_size(ndns); if (*ns_size == ULLONG_MAX) { ERR("(raw/btt) cannot read size of the namespace"); return PMEM2_E_CANNOT_READ_BOUNDS; } LOG(10, "(raw/btt) ns_offset 0x%llx ns_size %llu", *ns_offset, *ns_size); } unsigned long long region_offset = ndctl_region_get_resource(region); if (region_offset == ULLONG_MAX) { ERR("!cannot read offset of the region"); return PMEM2_E_ERRNO; } LOG(10, "region_offset 0x%llx", region_offset); *ns_offset -= region_offset; return 0; } /* * badblocks_devdax_clear_one_badblock -- (internal) clear one bad block * in the dax device */ static int badblocks_devdax_clear_one_badblock(struct ndctl_bus *bus, unsigned long long address, unsigned long long length) { LOG(3, "bus %p address 0x%llx length %llu (bytes)", bus, address, length); int ret; struct ndctl_cmd *cmd_ars_cap = ndctl_bus_cmd_new_ars_cap(bus, address, length); if (cmd_ars_cap == NULL) { ERR("ndctl_bus_cmd_new_ars_cap() failed (bus '%s')", ndctl_bus_get_provider(bus)); return PMEM2_E_ERRNO; } ret = ndctl_cmd_submit(cmd_ars_cap); if (ret) { ERR("ndctl_cmd_submit() failed (bus '%s')", ndctl_bus_get_provider(bus)); /* ndctl_cmd_submit() returns -errno */ goto out_ars_cap; } struct ndctl_range range; ret = ndctl_cmd_ars_cap_get_range(cmd_ars_cap, &range); if (ret) { ERR("ndctl_cmd_ars_cap_get_range() failed"); /* ndctl_cmd_ars_cap_get_range() returns -errno */ goto out_ars_cap; } struct ndctl_cmd *cmd_clear_error = ndctl_bus_cmd_new_clear_error( range.address, range.length, cmd_ars_cap); ret = ndctl_cmd_submit(cmd_clear_error); if (ret) { ERR("ndctl_cmd_submit() failed (bus '%s')", ndctl_bus_get_provider(bus)); /* ndctl_cmd_submit() returns -errno */ goto out_clear_error; } size_t cleared = ndctl_cmd_clear_error_get_cleared(cmd_clear_error); LOG(4, "cleared %zu out of %llu bad blocks", cleared, length); ASSERT(cleared <= length); if (cleared < length) { ERR("failed to clear %llu out of %llu bad blocks", length - cleared, length); errno = ENXIO; /* ndctl handles such error in this way */ ret = PMEM2_E_ERRNO; } else { ret = 0; } out_clear_error: ndctl_cmd_unref(cmd_clear_error); out_ars_cap: ndctl_cmd_unref(cmd_ars_cap); return ret; } /* * pmem2_badblock_context_new -- allocate and create a new bad block context */ int pmem2_badblock_context_new(const struct pmem2_source *src, struct pmem2_badblock_context **bbctx) { LOG(3, "src %p bbctx %p", src, bbctx); ASSERTne(bbctx, NULL); if (src->type == PMEM2_SOURCE_ANON) { ERR("Anonymous source does not support bad blocks"); return PMEM2_E_NOSUPP; } ASSERTeq(src->type, PMEM2_SOURCE_FD); struct ndctl_ctx *ctx; struct ndctl_region *region; struct ndctl_namespace *ndns; struct pmem2_badblock_context *tbbctx = NULL; enum pmem2_file_type pmem2_type; int ret = PMEM2_E_UNKNOWN; *bbctx = NULL; errno = ndctl_new(&ctx) * (-1); if (errno) { ERR("!ndctl_new"); return PMEM2_E_ERRNO; } pmem2_type = src->value.ftype; ret = pmem2_region_namespace(ctx, src, &region, &ndns); if (ret) { LOG(1, "getting region and namespace failed"); goto exit_ndctl_unref; } tbbctx = pmem2_zalloc(sizeof(struct pmem2_badblock_context), &ret); if (ret) goto exit_ndctl_unref; tbbctx->fd = src->value.fd; tbbctx->file_type = pmem2_type; tbbctx->ctx = ctx; if (region == NULL || ndns == NULL) { /* did not found any matching device */ *bbctx = tbbctx; return 0; } if (ndctl_namespace_get_mode(ndns) == NDCTL_NS_MODE_FSDAX) { tbbctx->ndns = ndns; tbbctx->pmem2_badblock_next_func = pmem2_badblock_next_namespace; tbbctx->pmem2_badblock_get_next_func = pmem2_namespace_get_first_badblock; } else { unsigned long long ns_beg, ns_size, ns_end; ret = badblocks_get_namespace_bounds( region, ndns, &ns_beg, &ns_size); if (ret) { LOG(1, "cannot read namespace's bounds"); goto error_free_all; } ns_end = ns_beg + ns_size - 1; LOG(10, "namespace: begin %llu, end %llu size %llu (in 512B sectors)", B2SEC(ns_beg), B2SEC(ns_end + 1) - 1, B2SEC(ns_size)); tbbctx->rgn.bus = ndctl_region_get_bus(region); tbbctx->rgn.region = region; tbbctx->rgn.ns_beg = ns_beg; tbbctx->rgn.ns_end = ns_end; tbbctx->rgn.ns_res = ns_beg + ndctl_region_get_resource(region); tbbctx->pmem2_badblock_next_func = pmem2_badblock_next_region; tbbctx->pmem2_badblock_get_next_func = pmem2_region_get_first_badblock; } if (pmem2_type == PMEM2_FTYPE_REG) { /* only regular files have extents */ ret = pmem2_extents_create_get(src->value.fd, &tbbctx->exts); if (ret) { LOG(1, "getting extents of fd %i failed", src->value.fd); goto error_free_all; } } /* set the context */ *bbctx = tbbctx; return 0; error_free_all: pmem2_extents_destroy(&tbbctx->exts); Free(tbbctx); exit_ndctl_unref: ndctl_unref(ctx); return ret; } /* * pmem2_badblock_context_delete -- delete and free the bad block context */ void pmem2_badblock_context_delete(struct pmem2_badblock_context **bbctx) { LOG(3, "bbctx %p", bbctx); ASSERTne(bbctx, NULL); if (*bbctx == NULL) return; struct pmem2_badblock_context *tbbctx = *bbctx; pmem2_extents_destroy(&tbbctx->exts); ndctl_unref(tbbctx->ctx); Free(tbbctx); *bbctx = NULL; } /* * pmem2_namespace_get_next_badblock -- (internal) wrapper for * ndctl_namespace_get_next_badblock */ static void * pmem2_namespace_get_next_badblock(struct pmem2_badblock_context *bbctx) { LOG(3, "bbctx %p", bbctx); return ndctl_namespace_get_next_badblock(bbctx->ndns); } /* * pmem2_namespace_get_first_badblock -- (internal) wrapper for * ndctl_namespace_get_first_badblock */ static void * pmem2_namespace_get_first_badblock(struct pmem2_badblock_context *bbctx) { LOG(3, "bbctx %p", bbctx); bbctx->pmem2_badblock_get_next_func = pmem2_namespace_get_next_badblock; return ndctl_namespace_get_first_badblock(bbctx->ndns); } /* * pmem2_region_get_next_badblock -- (internal) wrapper for * ndctl_region_get_next_badblock */ static void * pmem2_region_get_next_badblock(struct pmem2_badblock_context *bbctx) { LOG(3, "bbctx %p", bbctx); return ndctl_region_get_next_badblock(bbctx->rgn.region); } /* * pmem2_region_get_first_badblock -- (internal) wrapper for * ndctl_region_get_first_badblock */ static void * pmem2_region_get_first_badblock(struct pmem2_badblock_context *bbctx) { LOG(3, "bbctx %p", bbctx); bbctx->pmem2_badblock_get_next_func = pmem2_region_get_next_badblock; return ndctl_region_get_first_badblock(bbctx->rgn.region); } /* * pmem2_badblock_next_namespace -- (internal) version of pmem2_badblock_next() * called for ndctl with namespace badblock * iterator * * This function works only for fsdax, but does not require any special * permissions. */ static int pmem2_badblock_next_namespace(struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb) { LOG(3, "bbctx %p bb %p", bbctx, bb); ASSERTne(bbctx, NULL); ASSERTne(bb, NULL); struct badblock *bbn; bbn = bbctx->pmem2_badblock_get_next_func(bbctx); if (bbn == NULL) return PMEM2_E_NO_BAD_BLOCK_FOUND; /* * libndctl returns offset and length of a bad block * both expressed in 512B sectors. Offset is relative * to the beginning of the namespace. */ bb->offset = SEC2B(bbn->offset); bb->length = SEC2B(bbn->len); return 0; } /* * pmem2_badblock_next_region -- (internal) version of pmem2_badblock_next() * called for ndctl with region badblock iterator * * This function works for all types of namespaces, but requires read access to * privileged device information. */ static int pmem2_badblock_next_region(struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb) { LOG(3, "bbctx %p bb %p", bbctx, bb); ASSERTne(bbctx, NULL); ASSERTne(bb, NULL); unsigned long long bb_beg, bb_end; unsigned long long beg, end; struct badblock *bbn; unsigned long long ns_beg = bbctx->rgn.ns_beg; unsigned long long ns_end = bbctx->rgn.ns_end; do { bbn = bbctx->pmem2_badblock_get_next_func(bbctx); if (bbn == NULL) return PMEM2_E_NO_BAD_BLOCK_FOUND; LOG(10, "region bad block: begin %llu end %llu length %u (in 512B sectors)", bbn->offset, bbn->offset + bbn->len - 1, bbn->len); /* * libndctl returns offset and length of a bad block * both expressed in 512B sectors. Offset is relative * to the beginning of the region. */ bb_beg = SEC2B(bbn->offset); bb_end = bb_beg + SEC2B(bbn->len) - 1; } while (bb_beg > ns_end || ns_beg > bb_end); beg = (bb_beg > ns_beg) ? bb_beg : ns_beg; end = (bb_end < ns_end) ? bb_end : ns_end; /* * Form a new bad block structure with offset and length * expressed in bytes and offset relative to the beginning * of the namespace. */ bb->offset = beg - ns_beg; bb->length = end - beg + 1; LOG(4, "namespace bad block: begin %llu end %llu length %llu (in 512B sectors)", B2SEC(beg - ns_beg), B2SEC(end - ns_beg), B2SEC(end - beg) + 1); return 0; } /* * pmem2_badblock_next -- get the next bad block */ int pmem2_badblock_next(struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb) { LOG(3, "bbctx %p bb %p", bbctx, bb); ASSERTne(bbctx, NULL); ASSERTne(bb, NULL); struct pmem2_badblock bbn; unsigned long long bb_beg; unsigned long long bb_end; unsigned long long bb_len; unsigned long long bb_off; unsigned long long ext_beg; unsigned long long ext_end; unsigned e; int ret; if (bbctx->rgn.region == NULL && bbctx->ndns == NULL) { /* did not found any matching device */ return PMEM2_E_NO_BAD_BLOCK_FOUND; } struct extents *exts = bbctx->exts; /* DAX devices have no extents */ if (!exts) { ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn); *bb = bbn; return ret; } /* * There is at least one extent. * Loop until: * 1) a bad block overlaps with an extent or * 2) there are no more bad blocks. */ int bb_overlaps_with_extent = 0; do { if (bbctx->last_bb.length) { /* * We have saved the last bad block to check it * with the next extent saved * in bbctx->first_extent. */ ASSERTne(bbctx->first_extent, 0); bbn = bbctx->last_bb; bbctx->last_bb.offset = 0; bbctx->last_bb.length = 0; } else { ASSERTeq(bbctx->first_extent, 0); /* look for the next bad block */ ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn); if (ret) return ret; } bb_beg = bbn.offset; bb_end = bb_beg + bbn.length - 1; for (e = bbctx->first_extent; e < exts->extents_count; e++) { ext_beg = exts->extents[e].offset_physical; ext_end = ext_beg + exts->extents[e].length - 1; /* check if the bad block overlaps with the extent */ if (bb_beg <= ext_end && ext_beg <= bb_end) { /* bad block overlaps with the extent */ bb_overlaps_with_extent = 1; if (bb_end > ext_end && e + 1 < exts->extents_count) { /* * The bad block is longer than * the extent and there are * more extents. * Save the current bad block * to check it with the next extent. */ bbctx->first_extent = e + 1; bbctx->last_bb = bbn; } else { /* * All extents were checked * with the current bad block. */ bbctx->first_extent = 0; bbctx->last_bb.length = 0; bbctx->last_bb.offset = 0; } break; } } /* check all extents with the next bad block */ if (bb_overlaps_with_extent == 0) { bbctx->first_extent = 0; bbctx->last_bb.length = 0; bbctx->last_bb.offset = 0; } } while (bb_overlaps_with_extent == 0); /* bad block overlaps with an extent */ bb_beg = (bb_beg > ext_beg) ? bb_beg : ext_beg; bb_end = (bb_end < ext_end) ? bb_end : ext_end; bb_len = bb_end - bb_beg + 1; bb_off = bb_beg + exts->extents[e].offset_logical - exts->extents[e].offset_physical; LOG(10, "bad block found: physical offset: %llu, length: %llu", bb_beg, bb_len); /* make sure the offset is block-aligned */ unsigned long long not_block_aligned = bb_off & (exts->blksize - 1); if (not_block_aligned) { bb_off -= not_block_aligned; bb_len += not_block_aligned; } /* make sure the length is block-aligned */ bb_len = ALIGN_UP(bb_len, exts->blksize); LOG(4, "bad block found: logical offset: %llu, length: %llu", bb_off, bb_len); /* * Return the bad block with offset and length * expressed in bytes and offset relative * to the beginning of the file. */ bb->offset = bb_off; bb->length = bb_len; return 0; } /* * pmem2_badblock_clear_fsdax -- (internal) clear one bad block * in a FSDAX device */ static int pmem2_badblock_clear_fsdax(int fd, const struct pmem2_badblock *bb) { LOG(3, "fd %i badblock %p", fd, bb); ASSERTne(bb, NULL); LOG(10, "clearing a bad block: fd %i logical offset %zu length %zu (in 512B sectors)", fd, B2SEC(bb->offset), B2SEC(bb->length)); /* fallocate() takes offset as the off_t type */ if (bb->offset > (size_t)INT64_MAX) { ERR("bad block's offset is greater than INT64_MAX"); return PMEM2_E_OFFSET_OUT_OF_RANGE; } /* fallocate() takes length as the off_t type */ if (bb->length > (size_t)INT64_MAX) { ERR("bad block's length is greater than INT64_MAX"); return PMEM2_E_LENGTH_OUT_OF_RANGE; } off_t offset = (off_t)bb->offset; off_t length = (off_t)bb->length; /* deallocate bad blocks */ if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset, length)) { ERR("!fallocate"); return PMEM2_E_ERRNO; } /* allocate new blocks */ if (fallocate(fd, FALLOC_FL_KEEP_SIZE, offset, length)) { ERR("!fallocate"); return PMEM2_E_ERRNO; } return 0; } /* * pmem2_badblock_clear_devdax -- (internal) clear one bad block * in a DAX device */ static int pmem2_badblock_clear_devdax(const struct pmem2_badblock_context *bbctx, const struct pmem2_badblock *bb) { LOG(3, "bbctx %p bb %p", bbctx, bb); ASSERTne(bb, NULL); ASSERTne(bbctx, NULL); ASSERTne(bbctx->rgn.bus, NULL); ASSERTne(bbctx->rgn.ns_res, 0); LOG(4, "clearing a bad block: offset %zu length %zu (in 512B sectors)", B2SEC(bb->offset), B2SEC(bb->length)); int ret = badblocks_devdax_clear_one_badblock(bbctx->rgn.bus, bb->offset + bbctx->rgn.ns_res, bb->length); if (ret) { LOG(1, "failed to clear a bad block: offset %zu length %zu (in 512B sectors)", B2SEC(bb->offset), B2SEC(bb->length)); return ret; } return 0; } /* * pmem2_badblock_clear -- clear one bad block */ int pmem2_badblock_clear(struct pmem2_badblock_context *bbctx, const struct pmem2_badblock *bb) { LOG(3, "bbctx %p badblock %p", bbctx, bb); ASSERTne(bbctx, NULL); ASSERTne(bb, NULL); if (bbctx->file_type == PMEM2_FTYPE_DEVDAX) return pmem2_badblock_clear_devdax(bbctx, bb); ASSERTeq(bbctx->file_type, PMEM2_FTYPE_REG); return pmem2_badblock_clear_fsdax(bbctx->fd, bb); }
19,316
24.218016
80
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/region_namespace_ndctl.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * region_namespace_ndctl.h -- internal definitions for libpmem2 * common ndctl functions */ #ifndef PMDK_REGION_NAMESPACE_NDCTL_H #define PMDK_REGION_NAMESPACE_NDCTL_H 1 #include "os.h" #ifdef __cplusplus extern "C" { #endif #define FOREACH_BUS_REGION_NAMESPACE(ctx, bus, region, ndns) \ ndctl_bus_foreach(ctx, bus) \ ndctl_region_foreach(bus, region) \ ndctl_namespace_foreach(region, ndns) int pmem2_region_namespace(struct ndctl_ctx *ctx, const struct pmem2_source *src, struct ndctl_region **pregion, struct ndctl_namespace **pndns); #ifdef __cplusplus } #endif #endif /* PMDK_REGION_NAMESPACE_NDCTL_H */
754
21.878788
64
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/vm_reservation.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * vm_reservation.c -- implementation of virtual memory allocation API */ #include "libpmem2.h" /* * pmem2_vm_reservation_new -- creates new virtual memory reservation */ int pmem2_vm_reservation_new(struct pmem2_vm_reservation **rsv, size_t size, void *address) { return PMEM2_E_NOSUPP; } /* * pmem2_vm_reservation_delete -- deletes reservation bound to * structure pmem2_vm_reservation */ int pmem2_vm_reservation_delete(struct pmem2_vm_reservation **rsv) { return PMEM2_E_NOSUPP; }
614
20.206897
70
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/usc_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * usc_windows.c -- pmem2 usc function for windows */ #include "alloc.h" #include "source.h" #include "out.h" #include "libpmem2.h" #include "pmem2_utils.h" #define GUID_SIZE sizeof("XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX") #define VOLUME_PATH_SIZE sizeof("\\\\?\\Volume{}") + (GUID_SIZE - 2 /* \0 */) /* * get_volume_handle -- returns volume handle */ static int get_volume_handle(HANDLE handle, HANDLE *volume_handle) { wchar_t *volume; wchar_t tmp[10]; DWORD len = GetFinalPathNameByHandleW(handle, tmp, 10, VOLUME_NAME_GUID); if (len == 0) { ERR("!!GetFinalPathNameByHandleW"); return pmem2_lasterror_to_err(); } len *= sizeof(wchar_t); int err; volume = pmem2_malloc(len, &err); if (volume == NULL) return err; if (!GetFinalPathNameByHandleW(handle, volume, len, VOLUME_NAME_GUID)) { Free(volume); ERR("!!GetFinalPathNameByHandleW"); return pmem2_lasterror_to_err(); } ASSERTeq(volume[VOLUME_PATH_SIZE], '\\'); volume[VOLUME_PATH_SIZE] = '\0'; *volume_handle = CreateFileW(volume, /* path to the file */ /* request access to send ioctl to the file */ FILE_READ_ATTRIBUTES, /* do not block access to the file */ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, /* security attributes */ OPEN_EXISTING, /* open only if it exists */ FILE_ATTRIBUTE_NORMAL, /* no attributes */ NULL); /* used only for new files */ Free(volume); if (*volume_handle == INVALID_HANDLE_VALUE) { ERR("!!CreateFileW"); return pmem2_lasterror_to_err(); } return 0; } static int get_device_guid(HANDLE handle, GUID *guid) { HANDLE vHandle; int ret = get_volume_handle(handle, &vHandle); if (vHandle == INVALID_HANDLE_VALUE) return ret; STORAGE_DEVICE_NUMBER_EX sdn; sdn.DeviceNumber = -1; DWORD dwBytesReturned = 0; if (!DeviceIoControl(vHandle, IOCTL_STORAGE_GET_DEVICE_NUMBER_EX, NULL, 0, &sdn, sizeof(sdn), &dwBytesReturned, NULL)) { /* * IOCTL_STORAGE_GET_DEVICE_NUMBER_EX is not supported * on this server */ ERR( "Getting device id (IOCTL_STORAGE_GET_DEVICE_NUMBER_EX) is not supported on this system"); CloseHandle(vHandle); return PMEM2_E_NOSUPP; } *guid = sdn.DeviceGuid; CloseHandle(vHandle); return 0; } int pmem2_source_device_idW(const struct pmem2_source *src, wchar_t *id, size_t *len) { if (src->type == PMEM2_SOURCE_ANON) { ERR("Anonymous source does not have device id"); return PMEM2_E_NOSUPP; } ASSERTeq(src->type, PMEM2_SOURCE_HANDLE); if (id == NULL) { *len = GUID_SIZE * sizeof(*id); return 0; } if (*len < GUID_SIZE * sizeof(*id)) { ERR("id buffer is to small"); return PMEM2_E_BUFFER_TOO_SMALL; } GUID guid; int ret = get_device_guid(src->value.handle, &guid); if (ret) return ret; _snwprintf(id, GUID_SIZE, L"%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX", guid.Data1, guid.Data2, guid.Data3, guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]); return 0; } int pmem2_source_device_idU(const struct pmem2_source *src, char *id, size_t *len) { if (src->type == PMEM2_SOURCE_ANON) { ERR("Anonymous source does not have device id"); return PMEM2_E_NOSUPP; } ASSERTeq(src->type, PMEM2_SOURCE_HANDLE); if (id == NULL) { *len = GUID_SIZE * sizeof(*id); return 0; } if (*len < GUID_SIZE * sizeof(*id)) { ERR("id buffer is to small"); return PMEM2_E_BUFFER_TOO_SMALL; } GUID guid; int ret = get_device_guid(src->value.handle, &guid); if (ret) return ret; if (util_snprintf(id, GUID_SIZE, "%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX", guid.Data1, guid.Data2, guid.Data3, guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]) < 0) { ERR("!snprintf"); return PMEM2_E_ERRNO; } return 0; } int pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc) { LOG(3, "cfg %p, usc %p", src, usc); if (src->type == PMEM2_SOURCE_ANON) { ERR("Anonymous source does not support unsafe shutdown count"); return PMEM2_E_NOSUPP; } ASSERTeq(src->type, PMEM2_SOURCE_HANDLE); *usc = 0; HANDLE vHandle; int err = get_volume_handle(src->value.handle, &vHandle); if (vHandle == INVALID_HANDLE_VALUE) return err; STORAGE_PROPERTY_QUERY prop; DWORD dwSize; prop.PropertyId = StorageDeviceUnsafeShutdownCount; prop.QueryType = PropertyExistsQuery; prop.AdditionalParameters[0] = 0; STORAGE_DEVICE_UNSAFE_SHUTDOWN_COUNT ret; BOOL bResult = DeviceIoControl(vHandle, IOCTL_STORAGE_QUERY_PROPERTY, &prop, sizeof(prop), &ret, sizeof(ret), (LPDWORD)&dwSize, (LPOVERLAPPED)NULL); if (!bResult) { ERR( "Getting unsafe shutdown count is not supported on this system"); CloseHandle(vHandle); return PMEM2_E_NOSUPP; } prop.QueryType = PropertyStandardQuery; bResult = DeviceIoControl(vHandle, IOCTL_STORAGE_QUERY_PROPERTY, &prop, sizeof(prop), &ret, sizeof(ret), (LPDWORD)&dwSize, (LPOVERLAPPED)NULL); CloseHandle(vHandle); if (!bResult) { ERR("!!DeviceIoControl"); return pmem2_lasterror_to_err(); } *usc = ret.UnsafeShutdownCount; return 0; }
5,261
22.283186
93
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/ravl_interval.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ravl_interval.c -- ravl_interval implementation */ #include "alloc.h" #include "map.h" #include "ravl_interval.h" #include "pmem2_utils.h" #include "sys_util.h" #include "os_thread.h" #include "ravl.h" /* * ravl_interval - structure representing two points * on the number line */ struct ravl_interval { struct ravl *tree; ravl_interval_min *get_min; ravl_interval_max *get_max; }; /* * ravl_interval_node - structure holding min, max functions and address */ struct ravl_interval_node { void *addr; ravl_interval_min *get_min; ravl_interval_max *get_max; }; /* * ravl_interval_compare -- compare intervals by its boundaries, * no overlapping allowed */ static int ravl_interval_compare(const void *lhs, const void *rhs) { const struct ravl_interval_node *left = lhs; const struct ravl_interval_node *right = rhs; if (left->get_min(left->addr) < right->get_min(right->addr) && left->get_max(left->addr) <= right->get_min(right->addr)) return -1; if (left->get_min(left->addr) > right->get_min(right->addr) && left->get_max(left->addr) >= right->get_min(right->addr)) return 1; return 0; } /* * ravl_interval_delete - finalize the ravl interval module */ void ravl_interval_delete(struct ravl_interval *ri) { ravl_delete(ri->tree); ri->tree = NULL; Free(ri); } /* * ravl_interval_new -- initialize the ravl interval module */ struct ravl_interval * ravl_interval_new(ravl_interval_min *get_min, ravl_interval_max *get_max) { int ret; struct ravl_interval *interval = pmem2_malloc(sizeof(*interval), &ret); if (ret) goto ret_null; interval->tree = ravl_new_sized(ravl_interval_compare, sizeof(struct ravl_interval_node)); if (!(interval->tree)) goto free_alloc; interval->get_min = get_min; interval->get_max = get_max; return interval; free_alloc: Free(interval); ret_null: return NULL; } /* * ravl_interval_insert -- insert interval entry into the tree */ int ravl_interval_insert(struct ravl_interval *ri, void *addr) { struct ravl_interval_node rin; rin.addr = addr; rin.get_min = ri->get_min; rin.get_max = ri->get_max; if (ravl_emplace_copy(ri->tree, &rin)) return PMEM2_E_ERRNO; return 0; } /* * ravl_interval_remove -- remove interval entry from the tree */ int ravl_interval_remove(struct ravl_interval *ri, struct ravl_interval_node *rin) { struct ravl_node *node = ravl_find(ri->tree, rin, RAVL_PREDICATE_EQUAL); if (!node) return PMEM2_E_MAPPING_NOT_FOUND; ravl_remove(ri->tree, node); return 0; } /* * ravl_interval_find_prior_or_eq -- find overlapping interval starting prior to * the current one or at the same place */ static struct ravl_interval_node * ravl_interval_find_prior_or_eq(struct ravl *tree, struct ravl_interval_node *rin) { struct ravl_node *node; struct ravl_interval_node *cur; node = ravl_find(tree, rin, RAVL_PREDICATE_LESS_EQUAL); if (!node) return NULL; cur = ravl_data(node); /* * If the end of the found interval is below the searched boundary, then * this is not our interval. */ if (cur->get_max(cur->addr) <= rin->get_min(rin->addr)) return NULL; return cur; } /* * ravl_interval_find_later -- find overlapping interval starting later than * the current one */ static struct ravl_interval_node * ravl_interval_find_later(struct ravl *tree, struct ravl_interval_node *rin) { struct ravl_node *node; struct ravl_interval_node *cur; node = ravl_find(tree, rin, RAVL_PREDICATE_GREATER); if (!node) return NULL; cur = ravl_data(node); /* * If the beginning of the found interval is above the end of * the searched range, then this is not our interval. */ if (cur->get_min(cur->addr) >= rin->get_max(rin->addr)) return NULL; return cur; } /* * ravl_interval_find_equal -- find the interval with exact (min, max) range */ struct ravl_interval_node * ravl_interval_find_equal(struct ravl_interval *ri, void *addr) { struct ravl_interval_node range; range.addr = addr; range.get_min = ri->get_min; range.get_max = ri->get_max; struct ravl_node *node; node = ravl_find(ri->tree, &range, RAVL_PREDICATE_EQUAL); if (!node) return NULL; return ravl_data(node); } /* * ravl_interval_find -- find the earliest interval within (min, max) range */ struct ravl_interval_node * ravl_interval_find(struct ravl_interval *ri, void *addr) { struct ravl_interval_node range; range.addr = addr; range.get_min = ri->get_min; range.get_max = ri->get_max; struct ravl_interval_node *cur; cur = ravl_interval_find_prior_or_eq(ri->tree, &range); if (!cur) cur = ravl_interval_find_later(ri->tree, &range); return cur; } /* * ravl_interval_data -- returns the data contained within interval node */ void * ravl_interval_data(struct ravl_interval_node *rin) { return (void *)rin->addr; }
4,963
21.26009
80
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/map_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * map_windows.c -- pmem2_map (Windows) */ #include <stdbool.h> #include "libpmem2.h" #include "alloc.h" #include "auto_flush.h" #include "config.h" #include "map.h" #include "out.h" #include "persist.h" #include "pmem2_utils.h" #include "source.h" #include "util.h" #define HIDWORD(x) ((DWORD)((x) >> 32)) #define LODWORD(x) ((DWORD)((x) & 0xFFFFFFFF)) /* requested CACHE_LINE, available PAGE */ #define REQ_CL_AVAIL_PG \ "requested granularity not available because specified volume is not a direct access (DAX) volume" /* requested BYTE, available PAGE */ #define REQ_BY_AVAIL_PG REQ_CL_AVAIL_PG /* requested BYTE, available CACHE_LINE */ #define REQ_BY_AVAIL_CL \ "requested granularity not available because the platform doesn't support eADR" /* indicates the cases in which the error cannot occur */ #define GRAN_IMPOSSIBLE "impossible" static const char *granularity_err_msg[3][3] = { /* requested granularity / available granularity */ /* -------------------------------------------------------------------- */ /* BYTE CACHE_LINE PAGE */ /* -------------------------------------------------------------------- */ /* BYTE */ {GRAN_IMPOSSIBLE, REQ_BY_AVAIL_CL, REQ_BY_AVAIL_PG}, /* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG}, /* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}}; /* * create_mapping -- creates file mapping object for a file */ static HANDLE create_mapping(HANDLE hfile, size_t offset, size_t length, DWORD protect, unsigned long *err) { size_t max_size = length + offset; SetLastError(0); HANDLE mh = CreateFileMapping(hfile, NULL, /* security attributes */ protect, HIDWORD(max_size), LODWORD(max_size), NULL); *err = GetLastError(); if (!mh) { ERR("!!CreateFileMapping"); return NULL; } if (*err == ERROR_ALREADY_EXISTS) { ERR("!!CreateFileMapping"); CloseHandle(mh); return NULL; } /* if the handle is valid the last error is undefined */ *err = 0; return mh; } /* * is_direct_access -- check if the specified volume is a * direct access (DAX) volume */ static int is_direct_access(HANDLE fh) { DWORD filesystemFlags; if (!GetVolumeInformationByHandleW(fh, NULL, 0, NULL, NULL, &filesystemFlags, NULL, 0)) { ERR("!!GetVolumeInformationByHandleW"); /* always return a negative value */ return pmem2_lasterror_to_err(); } if (filesystemFlags & FILE_DAX_VOLUME) return 1; return 0; } /* * pmem2_map -- map memory according to provided config */ int pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src, struct pmem2_map **map_ptr) { LOG(3, "cfg %p src %p map_ptr %p", cfg, src, map_ptr); int ret = 0; unsigned long err = 0; size_t file_size; *map_ptr = NULL; if ((int)cfg->requested_max_granularity == PMEM2_GRANULARITY_INVALID) { ERR( "please define the max granularity requested for the mapping"); return PMEM2_E_GRANULARITY_NOT_SET; } ret = pmem2_source_size(src, &file_size); if (ret) return ret; size_t src_alignment; ret = pmem2_source_alignment(src, &src_alignment); if (ret) return ret; size_t length; ret = pmem2_config_validate_length(cfg, file_size, src_alignment); if (ret) return ret; size_t effective_offset; ret = pmem2_validate_offset(cfg, &effective_offset, src_alignment); if (ret) return ret; if (src->type == PMEM2_SOURCE_ANON) effective_offset = 0; /* without user-provided length, map to the end of the file */ if (cfg->length) length = cfg->length; else length = file_size - effective_offset; HANDLE map_handle = INVALID_HANDLE_VALUE; if (src->type == PMEM2_SOURCE_HANDLE) { map_handle = src->value.handle; } else if (src->type == PMEM2_SOURCE_ANON) { /* no extra settings */ } else { ASSERT(0); } DWORD proto = PAGE_READWRITE; DWORD access = FILE_MAP_ALL_ACCESS; /* Unsupported flag combinations */ if ((cfg->protection_flag == PMEM2_PROT_NONE) || (cfg->protection_flag == PMEM2_PROT_WRITE) || (cfg->protection_flag == PMEM2_PROT_EXEC) || (cfg->protection_flag == (PMEM2_PROT_WRITE | PMEM2_PROT_EXEC))) { ERR("Windows does not support " "this protection flag combination."); return PMEM2_E_NOSUPP; } /* Translate protection flags into Windows flags */ if (cfg->protection_flag & PMEM2_PROT_WRITE) { if (cfg->protection_flag & PMEM2_PROT_EXEC) { proto = PAGE_EXECUTE_READWRITE; access = FILE_MAP_READ | FILE_MAP_WRITE | FILE_MAP_EXECUTE; } else { /* * Due to the already done exclusion * of incorrect combinations, PROT_WRITE * implies PROT_READ */ proto = PAGE_READWRITE; access = FILE_MAP_READ | FILE_MAP_WRITE; } } else if (cfg->protection_flag & PMEM2_PROT_READ) { if (cfg->protection_flag & PMEM2_PROT_EXEC) { proto = PAGE_EXECUTE_READ; access = FILE_MAP_READ | FILE_MAP_EXECUTE; } else { proto = PAGE_READONLY; access = FILE_MAP_READ; } } if (cfg->sharing == PMEM2_PRIVATE) { if (cfg->protection_flag & PMEM2_PROT_EXEC) { proto = PAGE_EXECUTE_WRITECOPY; access = FILE_MAP_EXECUTE | FILE_MAP_COPY; } else { /* * If FILE_MAP_COPY is set, * protection is changed to read/write */ proto = PAGE_READONLY; access = FILE_MAP_COPY; } } /* create a file mapping handle */ HANDLE mh = create_mapping(map_handle, effective_offset, length, proto, &err); if (!mh) { if (err == ERROR_ALREADY_EXISTS) { ERR("mapping already exists"); return PMEM2_E_MAPPING_EXISTS; } else if (err == ERROR_ACCESS_DENIED) { return PMEM2_E_NO_ACCESS; } return pmem2_lasterror_to_err(); } ret = pmem2_config_validate_addr_alignment(cfg, src); if (ret) return ret; /* let's get addr from cfg struct */ LPVOID addr_hint = cfg->addr; /* obtain a pointer to the mapping view */ void *base = MapViewOfFileEx(mh, access, HIDWORD(effective_offset), LODWORD(effective_offset), length, addr_hint); /* hint address */ if (base == NULL) { ERR("!!MapViewOfFileEx"); if (cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) { DWORD ret_windows = GetLastError(); if (ret_windows == ERROR_INVALID_ADDRESS) ret = PMEM2_E_MAPPING_EXISTS; else ret = pmem2_lasterror_to_err(); } else ret = pmem2_lasterror_to_err(); goto err_close_mapping_handle; } if (!CloseHandle(mh)) { ERR("!!CloseHandle"); ret = pmem2_lasterror_to_err(); goto err_unmap_base; } enum pmem2_granularity available_min_granularity = PMEM2_GRANULARITY_PAGE; if (src->type == PMEM2_SOURCE_HANDLE) { int direct_access = is_direct_access(src->value.handle); if (direct_access < 0) { ret = direct_access; goto err_unmap_base; } bool eADR = (pmem2_auto_flush() == 1); available_min_granularity = get_min_granularity(eADR, direct_access, cfg->sharing); } else if (src->type == PMEM2_SOURCE_ANON) { available_min_granularity = PMEM2_GRANULARITY_BYTE; } else { ASSERT(0); } if (available_min_granularity > cfg->requested_max_granularity) { const char *err = granularity_err_msg [cfg->requested_max_granularity] [available_min_granularity]; if (strcmp(err, GRAN_IMPOSSIBLE) == 0) FATAL( "unhandled granularity error: available_min_granularity: %d" \ "requested_max_granularity: %d", available_min_granularity, cfg->requested_max_granularity); ERR("%s", err); ret = PMEM2_E_GRANULARITY_NOT_SUPPORTED; goto err_unmap_base; } /* prepare pmem2_map structure */ struct pmem2_map *map; map = (struct pmem2_map *)pmem2_malloc(sizeof(*map), &ret); if (!map) goto err_unmap_base; map->addr = base; /* * XXX probably in some cases the reserved length > the content length. * Maybe it is worth to do the research. */ map->reserved_length = length; map->content_length = length; map->effective_granularity = available_min_granularity; map->source = *src; pmem2_set_flush_fns(map); pmem2_set_mem_fns(map); ret = pmem2_register_mapping(map); if (ret) goto err_register; /* return a pointer to the pmem2_map structure */ *map_ptr = map; return ret; err_register: free(map); err_unmap_base: UnmapViewOfFile(base); return ret; err_close_mapping_handle: CloseHandle(mh); return ret; } /* * pmem2_unmap -- unmap the specified region */ int pmem2_unmap(struct pmem2_map **map_ptr) { LOG(3, "mapp %p", map_ptr); struct pmem2_map *map = *map_ptr; int ret = pmem2_unregister_mapping(map); if (ret) return ret; if (!UnmapViewOfFile(map->addr)) { ERR("!!UnmapViewOfFile"); return pmem2_lasterror_to_err(); } Free(map); *map_ptr = NULL; return 0; }
8,611
23.123249
99
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/extent_linux.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * extent_linux.c - implementation of the linux fs extent query API */ #include <string.h> #include <fcntl.h> #include <sys/ioctl.h> #include <linux/fs.h> #include <linux/fiemap.h> #include "libpmem2.h" #include "pmem2_utils.h" #include "file.h" #include "out.h" #include "extent.h" #include "alloc.h" /* * pmem2_extents_create_get -- allocate extents structure and get extents * of the given file */ int pmem2_extents_create_get(int fd, struct extents **exts) { LOG(3, "fd %i extents %p", fd, exts); ASSERT(fd > 2); ASSERTne(exts, NULL); enum pmem2_file_type pmem2_type; struct extents *pexts = NULL; struct fiemap *fmap = NULL; os_stat_t st; if (os_fstat(fd, &st) < 0) { ERR("!fstat %d", fd); return PMEM2_E_ERRNO; } int ret = pmem2_get_type_from_stat(&st, &pmem2_type); if (ret) return ret; /* directories do not have any extents */ if (pmem2_type == PMEM2_FTYPE_DIR) { ERR( "checking extents does not make sense in case of directories"); return PMEM2_E_INVALID_FILE_TYPE; } /* allocate extents structure */ pexts = pmem2_zalloc(sizeof(struct extents), &ret); if (ret) return ret; /* save block size */ LOG(10, "fd %i: block size: %li", fd, (long int)st.st_blksize); pexts->blksize = (uint64_t)st.st_blksize; /* DAX device does not have any extents */ if (pmem2_type == PMEM2_FTYPE_DEVDAX) { *exts = pexts; return 0; } ASSERTeq(pmem2_type, PMEM2_FTYPE_REG); fmap = pmem2_zalloc(sizeof(struct fiemap), &ret); if (ret) goto error_free; fmap->fm_start = 0; fmap->fm_length = (size_t)st.st_size; fmap->fm_flags = 0; fmap->fm_extent_count = 0; fmap->fm_mapped_extents = 0; if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) { ERR("!fiemap ioctl() for fd=%d failed", fd); ret = PMEM2_E_ERRNO; goto error_free; } size_t newsize = sizeof(struct fiemap) + fmap->fm_mapped_extents * sizeof(struct fiemap_extent); struct fiemap *newfmap = pmem2_realloc(fmap, newsize, &ret); if (ret) goto error_free; fmap = newfmap; memset(fmap->fm_extents, 0, fmap->fm_mapped_extents * sizeof(struct fiemap_extent)); fmap->fm_extent_count = fmap->fm_mapped_extents; fmap->fm_mapped_extents = 0; if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) { ERR("!fiemap ioctl() for fd=%d failed", fd); ret = PMEM2_E_ERRNO; goto error_free; } LOG(4, "file with fd=%i has %u extents:", fd, fmap->fm_mapped_extents); /* save number of extents */ pexts->extents_count = fmap->fm_mapped_extents; pexts->extents = pmem2_malloc( pexts->extents_count * sizeof(struct extent), &ret); if (ret) goto error_free; /* save extents */ unsigned e; for (e = 0; e < fmap->fm_mapped_extents; e++) { pexts->extents[e].offset_physical = fmap->fm_extents[e].fe_physical; pexts->extents[e].offset_logical = fmap->fm_extents[e].fe_logical; pexts->extents[e].length = fmap->fm_extents[e].fe_length; LOG(10, " #%u: off_phy: %lu off_log: %lu len: %lu", e, pexts->extents[e].offset_physical, pexts->extents[e].offset_logical, pexts->extents[e].length); } *exts = pexts; Free(fmap); return 0; error_free: Free(pexts->extents); Free(pexts); Free(fmap); return ret; } /* * pmem2_extents_destroy -- free extents structure */ void pmem2_extents_destroy(struct extents **exts) { LOG(3, "extents %p", exts); ASSERTne(exts, NULL); if (*exts) { Free((*exts)->extents); Free(*exts); *exts = NULL; } }
3,519
20.333333
73
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/flush.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ #ifndef X86_64_FLUSH_H #define X86_64_FLUSH_H #include <emmintrin.h> #include <stddef.h> #include <stdint.h> #include "util.h" #include "valgrind_internal.h" #define FLUSH_ALIGN ((uintptr_t)64) static force_inline void pmem_clflush(const void *addr) { _mm_clflush(addr); } #ifdef _MSC_VER static force_inline void pmem_clflushopt(const void *addr) { _mm_clflushopt(addr); } static force_inline void pmem_clwb(const void *addr) { _mm_clwb(addr); } #else /* * The x86 memory instructions are new enough that the compiler * intrinsic functions are not always available. The intrinsic * functions are defined here in terms of asm statements for now. */ static force_inline void pmem_clflushopt(const void *addr) { asm volatile(".byte 0x66; clflush %0" : "+m" \ (*(volatile char *)(addr))); } static force_inline void pmem_clwb(const void *addr) { asm volatile(".byte 0x66; xsaveopt %0" : "+m" \ (*(volatile char *)(addr))); } #endif /* _MSC_VER */ typedef void flush_fn(const void *, size_t); /* * flush_clflush_nolog -- flush the CPU cache, using clflush */ static force_inline void flush_clflush_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) _mm_clflush((char *)uptr); } /* * flush_clflushopt_nolog -- flush the CPU cache, using clflushopt */ static force_inline void flush_clflushopt_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { pmem_clflushopt((char *)uptr); } } /* * flush_clwb_nolog -- flush the CPU cache, using clwb */ static force_inline void flush_clwb_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { pmem_clwb((char *)uptr); } } /* * flush64b_empty -- (internal) do not flush the CPU cache */ static force_inline void flush64b_empty(const void *addr) { /* NOP, but tell pmemcheck about it */ VALGRIND_DO_FLUSH(addr, 64); } #endif
2,521
20.193277
66
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/init.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ #include <string.h> #include <xmmintrin.h> #include "auto_flush.h" #include "cpu.h" #include "flush.h" #include "memcpy_memset.h" #include "os.h" #include "out.h" #include "pmem2_arch.h" #include "valgrind_internal.h" #define MOVNT_THRESHOLD 256 size_t Movnt_threshold = MOVNT_THRESHOLD; /* * memory_barrier -- (internal) issue the fence instruction */ static void memory_barrier(void) { LOG(15, NULL); _mm_sfence(); /* ensure CLWB or CLFLUSHOPT completes */ } /* * flush_clflush -- (internal) flush the CPU cache, using clflush */ static void flush_clflush(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_clflush_nolog(addr, len); } /* * flush_clflushopt -- (internal) flush the CPU cache, using clflushopt */ static void flush_clflushopt(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_clflushopt_nolog(addr, len); } /* * flush_clwb -- (internal) flush the CPU cache, using clwb */ static void flush_clwb(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_clwb_nolog(addr, len); } #if SSE2_AVAILABLE || AVX_AVAILABLE || AVX512F_AVAILABLE #define PMEM2_F_MEM_MOVNT (PMEM2_F_MEM_WC | PMEM2_F_MEM_NONTEMPORAL) #define PMEM2_F_MEM_MOV (PMEM2_F_MEM_WB | PMEM2_F_MEM_TEMPORAL) #define MEMCPY_TEMPLATE(isa, flush, perfbarrier) \ static void *\ memmove_nodrain_##isa##_##flush##perfbarrier(void *dest, const void *src, \ size_t len, unsigned flags, flush_func flushf)\ {\ if (len == 0 || src == dest)\ return dest;\ \ if (flags & PMEM2_F_MEM_NOFLUSH) \ memmove_mov_##isa##_noflush(dest, src, len); \ else if (flags & PMEM2_F_MEM_MOVNT)\ memmove_movnt_##isa ##_##flush##perfbarrier(dest, src, len);\ else if (flags & PMEM2_F_MEM_MOV)\ memmove_mov_##isa##_##flush(dest, src, len);\ else if (len < Movnt_threshold)\ memmove_mov_##isa##_##flush(dest, src, len);\ else\ memmove_movnt_##isa##_##flush##perfbarrier(dest, src, len);\ \ return dest;\ } #define MEMCPY_TEMPLATE_EADR(isa, perfbarrier) \ static void *\ memmove_nodrain_##isa##_eadr##perfbarrier(void *dest, const void *src, \ size_t len, unsigned flags, flush_func flushf)\ {\ if (len == 0 || src == dest)\ return dest;\ \ if (flags & PMEM2_F_MEM_NOFLUSH)\ memmove_mov_##isa##_noflush(dest, src, len);\ else if (flags & PMEM2_F_MEM_NONTEMPORAL)\ memmove_movnt_##isa##_empty##perfbarrier(dest, src, len);\ else\ memmove_mov_##isa##_empty(dest, src, len);\ \ return dest;\ } #define MEMSET_TEMPLATE(isa, flush, perfbarrier)\ static void *\ memset_nodrain_##isa##_##flush##perfbarrier(void *dest, int c, size_t len, \ unsigned flags, flush_func flushf)\ {\ if (len == 0)\ return dest;\ \ if (flags & PMEM2_F_MEM_NOFLUSH) \ memset_mov_##isa##_noflush(dest, c, len); \ else if (flags & PMEM2_F_MEM_MOVNT)\ memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\ else if (flags & PMEM2_F_MEM_MOV)\ memset_mov_##isa##_##flush(dest, c, len);\ else if (len < Movnt_threshold)\ memset_mov_##isa##_##flush(dest, c, len);\ else\ memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\ \ return dest;\ } #define MEMSET_TEMPLATE_EADR(isa, perfbarrier) \ static void *\ memset_nodrain_##isa##_eadr##perfbarrier(void *dest, int c, size_t len, \ unsigned flags, flush_func flushf)\ {\ if (len == 0)\ return dest;\ \ if (flags & PMEM2_F_MEM_NOFLUSH)\ memset_mov_##isa##_noflush(dest, c, len);\ else if (flags & PMEM2_F_MEM_NONTEMPORAL)\ memset_movnt_##isa##_empty##perfbarrier(dest, c, len);\ else\ memset_mov_##isa##_empty(dest, c, len);\ \ return dest;\ } #endif #if SSE2_AVAILABLE MEMCPY_TEMPLATE(sse2, clflush, _nobarrier) MEMCPY_TEMPLATE(sse2, clflushopt, _nobarrier) MEMCPY_TEMPLATE(sse2, clwb, _nobarrier) MEMCPY_TEMPLATE_EADR(sse2, _nobarrier) MEMSET_TEMPLATE(sse2, clflush, _nobarrier) MEMSET_TEMPLATE(sse2, clflushopt, _nobarrier) MEMSET_TEMPLATE(sse2, clwb, _nobarrier) MEMSET_TEMPLATE_EADR(sse2, _nobarrier) MEMCPY_TEMPLATE(sse2, clflush, _wcbarrier) MEMCPY_TEMPLATE(sse2, clflushopt, _wcbarrier) MEMCPY_TEMPLATE(sse2, clwb, _wcbarrier) MEMCPY_TEMPLATE_EADR(sse2, _wcbarrier) MEMSET_TEMPLATE(sse2, clflush, _wcbarrier) MEMSET_TEMPLATE(sse2, clflushopt, _wcbarrier) MEMSET_TEMPLATE(sse2, clwb, _wcbarrier) MEMSET_TEMPLATE_EADR(sse2, _wcbarrier) #endif #if AVX_AVAILABLE MEMCPY_TEMPLATE(avx, clflush, _nobarrier) MEMCPY_TEMPLATE(avx, clflushopt, _nobarrier) MEMCPY_TEMPLATE(avx, clwb, _nobarrier) MEMCPY_TEMPLATE_EADR(avx, _nobarrier) MEMSET_TEMPLATE(avx, clflush, _nobarrier) MEMSET_TEMPLATE(avx, clflushopt, _nobarrier) MEMSET_TEMPLATE(avx, clwb, _nobarrier) MEMSET_TEMPLATE_EADR(avx, _nobarrier) MEMCPY_TEMPLATE(avx, clflush, _wcbarrier) MEMCPY_TEMPLATE(avx, clflushopt, _wcbarrier) MEMCPY_TEMPLATE(avx, clwb, _wcbarrier) MEMCPY_TEMPLATE_EADR(avx, _wcbarrier) MEMSET_TEMPLATE(avx, clflush, _wcbarrier) MEMSET_TEMPLATE(avx, clflushopt, _wcbarrier) MEMSET_TEMPLATE(avx, clwb, _wcbarrier) MEMSET_TEMPLATE_EADR(avx, _wcbarrier) #endif #if AVX512F_AVAILABLE MEMCPY_TEMPLATE(avx512f, clflush, /* cstyle wa */) MEMCPY_TEMPLATE(avx512f, clflushopt, /* */) MEMCPY_TEMPLATE(avx512f, clwb, /* */) MEMCPY_TEMPLATE_EADR(avx512f, /* */) MEMSET_TEMPLATE(avx512f, clflush, /* */) MEMSET_TEMPLATE(avx512f, clflushopt, /* */) MEMSET_TEMPLATE(avx512f, clwb, /* */) MEMSET_TEMPLATE_EADR(avx512f, /* */) #endif enum memcpy_impl { MEMCPY_INVALID, MEMCPY_SSE2, MEMCPY_AVX, MEMCPY_AVX512F }; /* * use_sse2_memcpy_memset -- (internal) SSE2 detected, use it if possible */ static void use_sse2_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl, int wc_workaround) { #if SSE2_AVAILABLE *impl = MEMCPY_SSE2; if (wc_workaround) { info->memmove_nodrain_eadr = memmove_nodrain_sse2_eadr_wcbarrier; if (info->flush == flush_clflush) info->memmove_nodrain = memmove_nodrain_sse2_clflush_wcbarrier; else if (info->flush == flush_clflushopt) info->memmove_nodrain = memmove_nodrain_sse2_clflushopt_wcbarrier; else if (info->flush == flush_clwb) info->memmove_nodrain = memmove_nodrain_sse2_clwb_wcbarrier; else ASSERT(0); info->memset_nodrain_eadr = memset_nodrain_sse2_eadr_wcbarrier; if (info->flush == flush_clflush) info->memset_nodrain = memset_nodrain_sse2_clflush_wcbarrier; else if (info->flush == flush_clflushopt) info->memset_nodrain = memset_nodrain_sse2_clflushopt_wcbarrier; else if (info->flush == flush_clwb) info->memset_nodrain = memset_nodrain_sse2_clwb_wcbarrier; else ASSERT(0); } else { info->memmove_nodrain_eadr = memmove_nodrain_sse2_eadr_nobarrier; if (info->flush == flush_clflush) info->memmove_nodrain = memmove_nodrain_sse2_clflush_nobarrier; else if (info->flush == flush_clflushopt) info->memmove_nodrain = memmove_nodrain_sse2_clflushopt_nobarrier; else if (info->flush == flush_clwb) info->memmove_nodrain = memmove_nodrain_sse2_clwb_nobarrier; else ASSERT(0); info->memset_nodrain_eadr = memset_nodrain_sse2_eadr_nobarrier; if (info->flush == flush_clflush) info->memset_nodrain = memset_nodrain_sse2_clflush_nobarrier; else if (info->flush == flush_clflushopt) info->memset_nodrain = memset_nodrain_sse2_clflushopt_nobarrier; else if (info->flush == flush_clwb) info->memset_nodrain = memset_nodrain_sse2_clwb_nobarrier; else ASSERT(0); } #else LOG(3, "sse2 disabled at build time"); #endif } /* * use_avx_memcpy_memset -- (internal) AVX detected, use it if possible */ static void use_avx_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl, int wc_workaround) { #if AVX_AVAILABLE LOG(3, "avx supported"); char *e = os_getenv("PMEM_AVX"); if (e != NULL && strcmp(e, "0") == 0) { LOG(3, "PMEM_AVX set to 0"); return; } LOG(3, "PMEM_AVX enabled"); *impl = MEMCPY_AVX; if (wc_workaround) { info->memmove_nodrain_eadr = memmove_nodrain_avx_eadr_wcbarrier; if (info->flush == flush_clflush) info->memmove_nodrain = memmove_nodrain_avx_clflush_wcbarrier; else if (info->flush == flush_clflushopt) info->memmove_nodrain = memmove_nodrain_avx_clflushopt_wcbarrier; else if (info->flush == flush_clwb) info->memmove_nodrain = memmove_nodrain_avx_clwb_wcbarrier; else ASSERT(0); info->memset_nodrain_eadr = memset_nodrain_avx_eadr_wcbarrier; if (info->flush == flush_clflush) info->memset_nodrain = memset_nodrain_avx_clflush_wcbarrier; else if (info->flush == flush_clflushopt) info->memset_nodrain = memset_nodrain_avx_clflushopt_wcbarrier; else if (info->flush == flush_clwb) info->memset_nodrain = memset_nodrain_avx_clwb_wcbarrier; else ASSERT(0); } else { info->memmove_nodrain_eadr = memmove_nodrain_avx_eadr_nobarrier; if (info->flush == flush_clflush) info->memmove_nodrain = memmove_nodrain_avx_clflush_nobarrier; else if (info->flush == flush_clflushopt) info->memmove_nodrain = memmove_nodrain_avx_clflushopt_nobarrier; else if (info->flush == flush_clwb) info->memmove_nodrain = memmove_nodrain_avx_clwb_nobarrier; else ASSERT(0); info->memset_nodrain_eadr = memset_nodrain_avx_eadr_nobarrier; if (info->flush == flush_clflush) info->memset_nodrain = memset_nodrain_avx_clflush_nobarrier; else if (info->flush == flush_clflushopt) info->memset_nodrain = memset_nodrain_avx_clflushopt_nobarrier; else if (info->flush == flush_clwb) info->memset_nodrain = memset_nodrain_avx_clwb_nobarrier; else ASSERT(0); } #else LOG(3, "avx supported, but disabled at build time"); #endif } /* * use_avx512f_memcpy_memset -- (internal) AVX512F detected, use it if possible */ static void use_avx512f_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl) { #if AVX512F_AVAILABLE LOG(3, "avx512f supported"); char *e = os_getenv("PMEM_AVX512F"); if (e != NULL && strcmp(e, "0") == 0) { LOG(3, "PMEM_AVX512F set to 0"); return; } LOG(3, "PMEM_AVX512F enabled"); *impl = MEMCPY_AVX512F; info->memmove_nodrain_eadr = memmove_nodrain_avx512f_eadr; if (info->flush == flush_clflush) info->memmove_nodrain = memmove_nodrain_avx512f_clflush; else if (info->flush == flush_clflushopt) info->memmove_nodrain = memmove_nodrain_avx512f_clflushopt; else if (info->flush == flush_clwb) info->memmove_nodrain = memmove_nodrain_avx512f_clwb; else ASSERT(0); info->memset_nodrain_eadr = memset_nodrain_avx512f_eadr; if (info->flush == flush_clflush) info->memset_nodrain = memset_nodrain_avx512f_clflush; else if (info->flush == flush_clflushopt) info->memset_nodrain = memset_nodrain_avx512f_clflushopt; else if (info->flush == flush_clwb) info->memset_nodrain = memset_nodrain_avx512f_clwb; else ASSERT(0); #else LOG(3, "avx512f supported, but disabled at build time"); #endif } /* * pmem_get_cpuinfo -- configure libpmem based on CPUID */ static void pmem_cpuinfo_to_funcs(struct pmem2_arch_info *info, enum memcpy_impl *impl) { LOG(3, NULL); if (is_cpu_clflush_present()) { LOG(3, "clflush supported"); info->flush = flush_clflush; info->flush_has_builtin_fence = 1; info->fence = memory_barrier; } if (is_cpu_clflushopt_present()) { LOG(3, "clflushopt supported"); char *e = os_getenv("PMEM_NO_CLFLUSHOPT"); if (e && strcmp(e, "1") == 0) { LOG(3, "PMEM_NO_CLFLUSHOPT forced no clflushopt"); } else { info->flush = flush_clflushopt; info->flush_has_builtin_fence = 0; info->fence = memory_barrier; } } if (is_cpu_clwb_present()) { LOG(3, "clwb supported"); char *e = os_getenv("PMEM_NO_CLWB"); if (e && strcmp(e, "1") == 0) { LOG(3, "PMEM_NO_CLWB forced no clwb"); } else { info->flush = flush_clwb; info->flush_has_builtin_fence = 0; info->fence = memory_barrier; } } /* * XXX Disable this work around for Intel CPUs with optimized * WC eviction. */ int wc_workaround = is_cpu_genuine_intel(); char *ptr = os_getenv("PMEM_WC_WORKAROUND"); if (ptr) { if (strcmp(ptr, "1") == 0) { LOG(3, "WC workaround forced to 1"); wc_workaround = 1; } else if (strcmp(ptr, "0") == 0) { LOG(3, "WC workaround forced to 0"); wc_workaround = 0; } else { LOG(3, "incorrect value of PMEM_WC_WORKAROUND (%s)", ptr); } } LOG(3, "WC workaround = %d", wc_workaround); ptr = os_getenv("PMEM_NO_MOVNT"); if (ptr && strcmp(ptr, "1") == 0) { LOG(3, "PMEM_NO_MOVNT forced no movnt"); } else { use_sse2_memcpy_memset(info, impl, wc_workaround); if (is_cpu_avx_present()) use_avx_memcpy_memset(info, impl, wc_workaround); if (is_cpu_avx512f_present()) use_avx512f_memcpy_memset(info, impl); } } /* * pmem2_arch_init -- initialize architecture-specific list of pmem operations */ void pmem2_arch_init(struct pmem2_arch_info *info) { LOG(3, NULL); enum memcpy_impl impl = MEMCPY_INVALID; pmem_cpuinfo_to_funcs(info, &impl); /* * For testing, allow overriding the default threshold * for using non-temporal stores in pmem_memcpy_*(), pmem_memmove_*() * and pmem_memset_*(). * It has no effect if movnt is not supported or disabled. */ const char *ptr = os_getenv("PMEM_MOVNT_THRESHOLD"); if (ptr) { long long val = atoll(ptr); if (val < 0) { LOG(3, "Invalid PMEM_MOVNT_THRESHOLD"); } else { LOG(3, "PMEM_MOVNT_THRESHOLD set to %zu", (size_t)val); Movnt_threshold = (size_t)val; } } if (info->flush == flush_clwb) LOG(3, "using clwb"); else if (info->flush == flush_clflushopt) LOG(3, "using clflushopt"); else if (info->flush == flush_clflush) LOG(3, "using clflush"); else FATAL("invalid deep flush function address"); if (impl == MEMCPY_AVX512F) LOG(3, "using movnt AVX512F"); else if (impl == MEMCPY_AVX) LOG(3, "using movnt AVX"); else if (impl == MEMCPY_SSE2) LOG(3, "using movnt SSE2"); }
13,899
25.275992
79
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/avx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ #ifndef PMEM_AVX_H #define PMEM_AVX_H #include <immintrin.h> #include "util.h" /* * avx_zeroupper -- _mm256_zeroupper wrapper * * _mm256_zeroupper clears upper parts of avx registers. * * It's needed for 2 reasons: * - it improves performance of non-avx code after avx * - it works around problem discovered by Valgrind * * In optimized builds gcc inserts VZEROUPPER automatically before * calling non-avx code (or at the end of the function). But in release * builds it doesn't, so if we don't do this by ourselves, then when * someone memcpy'ies uninitialized data, Valgrind complains whenever * someone reads those registers. * * One notable example is loader, which tries to detect whether it * needs to save whole ymm registers by looking at their current * (possibly uninitialized) value. * * Valgrind complains like that: * Conditional jump or move depends on uninitialised value(s) * at 0x4015CC9: _dl_runtime_resolve_avx_slow * (in /lib/x86_64-linux-gnu/ld-2.24.so) * by 0x10B531: test_realloc_api (obj_basic_integration.c:185) * by 0x10F1EE: main (obj_basic_integration.c:594) * * Note: We have to be careful to not read AVX registers after this * intrinsic, because of this stupid gcc bug: * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82735 */ static force_inline void avx_zeroupper(void) { _mm256_zeroupper(); } static force_inline __m128i m256_get16b(__m256i ymm) { return _mm256_extractf128_si256(ymm, 0); } #ifdef _MSC_VER static force_inline uint64_t m256_get8b(__m256i ymm) { return (uint64_t)_mm_extract_epi64(m256_get16b(ymm), 0); } static force_inline uint32_t m256_get4b(__m256i ymm) { return (uint32_t)m256_get8b(ymm); } static force_inline uint16_t m256_get2b(__m256i ymm) { return (uint16_t)m256_get8b(ymm); } #else static force_inline uint64_t m256_get8b(__m256i ymm) { return (uint64_t)_mm256_extract_epi64(ymm, 0); } static force_inline uint32_t m256_get4b(__m256i ymm) { return (uint32_t)_mm256_extract_epi32(ymm, 0); } static force_inline uint16_t m256_get2b(__m256i ymm) { return (uint16_t)_mm256_extract_epi16(ymm, 0); } #endif #endif
2,238
24.735632
72
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memcpy_memset.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ #ifndef MEMCPY_MEMSET_H #define MEMCPY_MEMSET_H #include <stddef.h> #include <xmmintrin.h> #include "pmem2_arch.h" typedef void barrier_fn(void); typedef void flush64b_fn(const void *); static inline void barrier_after_ntstores(void) { /* * In this configuration pmem_drain does not contain sfence, so we have * to serialize non-temporal store instructions. */ _mm_sfence(); } static inline void no_barrier_after_ntstores(void) { /* * In this configuration pmem_drain contains sfence, so we don't have * to serialize non-temporal store instructions */ } static inline void noflush(const void *addr, size_t len) { /* NOP, not even pmemcheck annotation */ } static inline void noflush64b(const void *addr) { /* NOP, not even pmemcheck annotation */ } typedef void perf_barrier_fn(void); static force_inline void wc_barrier(void) { /* * Currently, for SSE2 and AVX code paths, use of non-temporal stores * on all generations of CPUs must be limited to the number of * write-combining buffers (12) because otherwise, suboptimal eviction * policy might impact performance when writing more data than WC * buffers can simultaneously hold. * * The AVX512 code path is not affected, probably because we are * overwriting whole cache lines. */ _mm_sfence(); } static force_inline void no_barrier(void) { } #ifndef AVX512F_AVAILABLE /* * XXX not supported in MSVC version we currently use. * Enable Windows tests pmem2_mem_ext when MSVC we * use will support AVX512F. */ #ifdef _MSC_VER #define AVX512F_AVAILABLE 0 #else #define AVX512F_AVAILABLE 1 #endif #endif #ifndef AVX_AVAILABLE #define AVX_AVAILABLE 1 #endif #ifndef SSE2_AVAILABLE #define SSE2_AVAILABLE 1 #endif #if SSE2_AVAILABLE void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len); void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len); void memmove_mov_sse2_empty(char *dest, const char *src, size_t len); void memmove_mov_sse2_noflush(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflushopt_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clwb_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_empty_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_noflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflush_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflushopt_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clwb_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_empty_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_noflush_wcbarrier(char *dest, const char *src, size_t len); void memset_mov_sse2_clflush(char *dest, int c, size_t len); void memset_mov_sse2_clflushopt(char *dest, int c, size_t len); void memset_mov_sse2_clwb(char *dest, int c, size_t len); void memset_mov_sse2_empty(char *dest, int c, size_t len); void memset_mov_sse2_noflush(char *dest, int c, size_t len); void memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len); #endif #if AVX_AVAILABLE void memmove_mov_avx_clflush(char *dest, const char *src, size_t len); void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_avx_clwb(char *dest, const char *src, size_t len); void memmove_mov_avx_empty(char *dest, const char *src, size_t len); void memmove_mov_avx_noflush(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflushopt_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clwb_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_empty_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_noflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflush_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflushopt_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clwb_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_empty_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_noflush_wcbarrier(char *dest, const char *src, size_t len); void memset_mov_avx_clflush(char *dest, int c, size_t len); void memset_mov_avx_clflushopt(char *dest, int c, size_t len); void memset_mov_avx_clwb(char *dest, int c, size_t len); void memset_mov_avx_empty(char *dest, int c, size_t len); void memset_mov_avx_noflush(char *dest, int c, size_t len); void memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len); #endif #if AVX512F_AVAILABLE void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len); void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len); void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len); void memmove_mov_avx512f_noflush(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_noflush(char *dest, const char *src, size_t len); void memset_mov_avx512f_clflush(char *dest, int c, size_t len); void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len); void memset_mov_avx512f_clwb(char *dest, int c, size_t len); void memset_mov_avx512f_empty(char *dest, int c, size_t len); void memset_mov_avx512f_noflush(char *dest, int c, size_t len); void memset_movnt_avx512f_clflush(char *dest, int c, size_t len); void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len); void memset_movnt_avx512f_clwb(char *dest, int c, size_t len); void memset_movnt_avx512f_empty(char *dest, int c, size_t len); void memset_movnt_avx512f_noflush(char *dest, int c, size_t len); #endif extern size_t Movnt_threshold; /* * SSE2/AVX1 only: * * How much data WC buffers can hold at the same time, after which sfence * is needed to flush them. * * For some reason sfence affects performance of reading from DRAM, so we have * to prefetch the source data earlier. */ #define PERF_BARRIER_SIZE (12 * CACHELINE_SIZE /* 768 */) /* * How much to prefetch initially. * Cannot be bigger than the size of L1 (32kB) - PERF_BARRIER_SIZE. */ #define INI_PREFETCH_SIZE (64 * CACHELINE_SIZE /* 4096 */) static force_inline void prefetch(const char *addr) { _mm_prefetch(addr, _MM_HINT_T0); } static force_inline void prefetch_ini_fw(const char *src, size_t len) { size_t pref = MIN(len, INI_PREFETCH_SIZE); for (size_t i = 0; i < pref; i += CACHELINE_SIZE) prefetch(src + i); } static force_inline void prefetch_ini_bw(const char *src, size_t len) { size_t pref = MIN(len, INI_PREFETCH_SIZE); for (size_t i = 0; i < pref; i += CACHELINE_SIZE) prefetch(src - i); } static force_inline void prefetch_next_fw(const char *src, const char *srcend) { const char *begin = src + INI_PREFETCH_SIZE; const char *end = begin + PERF_BARRIER_SIZE; if (end > srcend) end = srcend; for (const char *addr = begin; addr < end; addr += CACHELINE_SIZE) prefetch(addr); } static force_inline void prefetch_next_bw(const char *src, const char *srcbegin) { const char *begin = src - INI_PREFETCH_SIZE; const char *end = begin - PERF_BARRIER_SIZE; if (end < srcbegin) end = srcbegin; for (const char *addr = begin; addr >= end; addr -= CACHELINE_SIZE) prefetch(addr); } #endif
9,351
33.131387
79
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memset/memset_nt_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_sse2.h" #include "out.h" #include "valgrind_internal.h" static force_inline void mm_stream_si128(char *dest, unsigned idx, __m128i src) { _mm_stream_si128((__m128i *)dest + idx, src); barrier(); } static force_inline void memset_movnt4x64b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); mm_stream_si128(dest, 2, xmm); mm_stream_si128(dest, 3, xmm); mm_stream_si128(dest, 4, xmm); mm_stream_si128(dest, 5, xmm); mm_stream_si128(dest, 6, xmm); mm_stream_si128(dest, 7, xmm); mm_stream_si128(dest, 8, xmm); mm_stream_si128(dest, 9, xmm); mm_stream_si128(dest, 10, xmm); mm_stream_si128(dest, 11, xmm); mm_stream_si128(dest, 12, xmm); mm_stream_si128(dest, 13, xmm); mm_stream_si128(dest, 14, xmm); mm_stream_si128(dest, 15, xmm); } static force_inline void memset_movnt2x64b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); mm_stream_si128(dest, 2, xmm); mm_stream_si128(dest, 3, xmm); mm_stream_si128(dest, 4, xmm); mm_stream_si128(dest, 5, xmm); mm_stream_si128(dest, 6, xmm); mm_stream_si128(dest, 7, xmm); } static force_inline void memset_movnt1x64b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); mm_stream_si128(dest, 2, xmm); mm_stream_si128(dest, 3, xmm); } static force_inline void memset_movnt1x32b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); } static force_inline void memset_movnt1x16b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest, xmm); } static force_inline void memset_movnt1x8b(char *dest, __m128i xmm) { uint64_t x = (uint64_t)_mm_cvtsi128_si64(xmm); _mm_stream_si64((long long *)dest, (long long)x); } static force_inline void memset_movnt1x4b(char *dest, __m128i xmm) { uint32_t x = (uint32_t)_mm_cvtsi128_si32(xmm); _mm_stream_si32((int *)dest, (int)x); } static force_inline void memset_movnt_sse2(char *dest, int c, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { char *orig_dest = dest; size_t orig_len = len; __m128i xmm = _mm_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_sse2(dest, xmm, cnt, flush); dest += cnt; len -= cnt; } while (len >= PERF_BARRIER_SIZE) { memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64); if (len) perf_barrier(); } while (len >= 4 * 64) { memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, xmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, xmm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, xmm); else if (len == 16) memset_movnt1x16b(dest, xmm); else if (len == 8) memset_movnt1x8b(dest, xmm); else if (len == 4) memset_movnt1x4b(dest, xmm); else goto nonnt; goto end; } nonnt: memset_small_sse2(dest, xmm, len, flush); end: barrier(); VALGRIND_DO_FLUSH(orig_dest, orig_len); } /* variants without perf_barrier */ void memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
5,912
20.580292
71
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memset/memset_nt_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx.h" #include "out.h" #include "valgrind_internal.h" static force_inline void mm256_stream_si256(char *dest, unsigned idx, __m256i src) { _mm256_stream_si256((__m256i *)dest + idx, src); barrier(); } static force_inline void memset_movnt8x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); mm256_stream_si256(dest, 2, ymm); mm256_stream_si256(dest, 3, ymm); mm256_stream_si256(dest, 4, ymm); mm256_stream_si256(dest, 5, ymm); mm256_stream_si256(dest, 6, ymm); mm256_stream_si256(dest, 7, ymm); mm256_stream_si256(dest, 8, ymm); mm256_stream_si256(dest, 9, ymm); mm256_stream_si256(dest, 10, ymm); mm256_stream_si256(dest, 11, ymm); mm256_stream_si256(dest, 12, ymm); mm256_stream_si256(dest, 13, ymm); mm256_stream_si256(dest, 14, ymm); mm256_stream_si256(dest, 15, ymm); } static force_inline void memset_movnt4x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); mm256_stream_si256(dest, 2, ymm); mm256_stream_si256(dest, 3, ymm); mm256_stream_si256(dest, 4, ymm); mm256_stream_si256(dest, 5, ymm); mm256_stream_si256(dest, 6, ymm); mm256_stream_si256(dest, 7, ymm); } static force_inline void memset_movnt2x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); mm256_stream_si256(dest, 2, ymm); mm256_stream_si256(dest, 3, ymm); } static force_inline void memset_movnt1x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); } static force_inline void memset_movnt1x32b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); } static force_inline void memset_movnt1x16b(char *dest, __m256i ymm) { __m128i xmm0 = m256_get16b(ymm); _mm_stream_si128((__m128i *)dest, xmm0); } static force_inline void memset_movnt1x8b(char *dest, __m256i ymm) { uint64_t x = m256_get8b(ymm); _mm_stream_si64((long long *)dest, (long long)x); } static force_inline void memset_movnt1x4b(char *dest, __m256i ymm) { uint32_t x = m256_get4b(ymm); _mm_stream_si32((int *)dest, (int)x); } static force_inline void memset_movnt_avx(char *dest, int c, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { char *orig_dest = dest; size_t orig_len = len; __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= PERF_BARRIER_SIZE) { memset_movnt8x64b(dest, ymm); dest += 8 * 64; len -= 8 * 64; memset_movnt4x64b(dest, ymm); dest += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64); if (len) perf_barrier(); } if (len >= 8 * 64) { memset_movnt8x64b(dest, ymm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_movnt4x64b(dest, ymm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, ymm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, ymm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, ymm); else if (len == 16) memset_movnt1x16b(dest, ymm); else if (len == 8) memset_movnt1x8b(dest, ymm); else if (len == 4) memset_movnt1x4b(dest, ymm); else goto nonnt; goto end; } nonnt: memset_small_avx(dest, ymm, len, flush); end: avx_zeroupper(); barrier(); VALGRIND_DO_FLUSH(orig_dest, orig_len); } /* variants without perf_barrier */ void memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores, no_barrier); } void memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
6,151
20.43554
71
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memset/memset_t_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx512f.h" static force_inline void mm512_store_si512(char *dest, unsigned idx, __m512i src) { _mm512_store_si512((__m512i *)dest + idx, src); } static force_inline void memset_mov32x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); mm512_store_si512(dest, 4, zmm); mm512_store_si512(dest, 5, zmm); mm512_store_si512(dest, 6, zmm); mm512_store_si512(dest, 7, zmm); mm512_store_si512(dest, 8, zmm); mm512_store_si512(dest, 9, zmm); mm512_store_si512(dest, 10, zmm); mm512_store_si512(dest, 11, zmm); mm512_store_si512(dest, 12, zmm); mm512_store_si512(dest, 13, zmm); mm512_store_si512(dest, 14, zmm); mm512_store_si512(dest, 15, zmm); mm512_store_si512(dest, 16, zmm); mm512_store_si512(dest, 17, zmm); mm512_store_si512(dest, 18, zmm); mm512_store_si512(dest, 19, zmm); mm512_store_si512(dest, 20, zmm); mm512_store_si512(dest, 21, zmm); mm512_store_si512(dest, 22, zmm); mm512_store_si512(dest, 23, zmm); mm512_store_si512(dest, 24, zmm); mm512_store_si512(dest, 25, zmm); mm512_store_si512(dest, 26, zmm); mm512_store_si512(dest, 27, zmm); mm512_store_si512(dest, 28, zmm); mm512_store_si512(dest, 29, zmm); mm512_store_si512(dest, 30, zmm); mm512_store_si512(dest, 31, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); flush64b(dest + 16 * 64); flush64b(dest + 17 * 64); flush64b(dest + 18 * 64); flush64b(dest + 19 * 64); flush64b(dest + 20 * 64); flush64b(dest + 21 * 64); flush64b(dest + 22 * 64); flush64b(dest + 23 * 64); flush64b(dest + 24 * 64); flush64b(dest + 25 * 64); flush64b(dest + 26 * 64); flush64b(dest + 27 * 64); flush64b(dest + 28 * 64); flush64b(dest + 29 * 64); flush64b(dest + 30 * 64); flush64b(dest + 31 * 64); } static force_inline void memset_mov16x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); mm512_store_si512(dest, 4, zmm); mm512_store_si512(dest, 5, zmm); mm512_store_si512(dest, 6, zmm); mm512_store_si512(dest, 7, zmm); mm512_store_si512(dest, 8, zmm); mm512_store_si512(dest, 9, zmm); mm512_store_si512(dest, 10, zmm); mm512_store_si512(dest, 11, zmm); mm512_store_si512(dest, 12, zmm); mm512_store_si512(dest, 13, zmm); mm512_store_si512(dest, 14, zmm); mm512_store_si512(dest, 15, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); } static force_inline void memset_mov8x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); mm512_store_si512(dest, 4, zmm); mm512_store_si512(dest, 5, zmm); mm512_store_si512(dest, 6, zmm); mm512_store_si512(dest, 7, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memset_mov4x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); flush64b(dest + 0 * 64); } static force_inline void memset_mov_avx512f(char *dest, int c, size_t len, flush_fn flush, flush64b_fn flush64b) { __m512i zmm = _mm512_set1_epi8((char)c); /* See comment in memset_movnt_avx512f */ __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx512f(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 32 * 64) { memset_mov32x64b(dest, zmm, flush64b); dest += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memset_mov16x64b(dest, zmm, flush64b); dest += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memset_mov8x64b(dest, zmm, flush64b); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_mov4x64b(dest, zmm, flush64b); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, zmm, flush64b); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, zmm, flush64b); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_avx512f(dest, ymm, len, flush); avx_zeroupper(); } void memset_mov_avx512f_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, noflush, noflush64b); } void memset_mov_avx512f_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_empty_nolog, flush64b_empty); } void memset_mov_avx512f_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_clflush_nolog, pmem_clflush); } void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_clflushopt_nolog, pmem_clflushopt); } void memset_mov_avx512f_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_clwb_nolog, pmem_clwb); }
6,851
22.958042
69
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memset/memset_nt_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx512f.h" #include "out.h" #include "util.h" #include "valgrind_internal.h" static force_inline void mm512_stream_si512(char *dest, unsigned idx, __m512i src) { _mm512_stream_si512((__m512i *)dest + idx, src); barrier(); } static force_inline void memset_movnt32x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); mm512_stream_si512(dest, 4, zmm); mm512_stream_si512(dest, 5, zmm); mm512_stream_si512(dest, 6, zmm); mm512_stream_si512(dest, 7, zmm); mm512_stream_si512(dest, 8, zmm); mm512_stream_si512(dest, 9, zmm); mm512_stream_si512(dest, 10, zmm); mm512_stream_si512(dest, 11, zmm); mm512_stream_si512(dest, 12, zmm); mm512_stream_si512(dest, 13, zmm); mm512_stream_si512(dest, 14, zmm); mm512_stream_si512(dest, 15, zmm); mm512_stream_si512(dest, 16, zmm); mm512_stream_si512(dest, 17, zmm); mm512_stream_si512(dest, 18, zmm); mm512_stream_si512(dest, 19, zmm); mm512_stream_si512(dest, 20, zmm); mm512_stream_si512(dest, 21, zmm); mm512_stream_si512(dest, 22, zmm); mm512_stream_si512(dest, 23, zmm); mm512_stream_si512(dest, 24, zmm); mm512_stream_si512(dest, 25, zmm); mm512_stream_si512(dest, 26, zmm); mm512_stream_si512(dest, 27, zmm); mm512_stream_si512(dest, 28, zmm); mm512_stream_si512(dest, 29, zmm); mm512_stream_si512(dest, 30, zmm); mm512_stream_si512(dest, 31, zmm); } static force_inline void memset_movnt16x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); mm512_stream_si512(dest, 4, zmm); mm512_stream_si512(dest, 5, zmm); mm512_stream_si512(dest, 6, zmm); mm512_stream_si512(dest, 7, zmm); mm512_stream_si512(dest, 8, zmm); mm512_stream_si512(dest, 9, zmm); mm512_stream_si512(dest, 10, zmm); mm512_stream_si512(dest, 11, zmm); mm512_stream_si512(dest, 12, zmm); mm512_stream_si512(dest, 13, zmm); mm512_stream_si512(dest, 14, zmm); mm512_stream_si512(dest, 15, zmm); } static force_inline void memset_movnt8x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); mm512_stream_si512(dest, 4, zmm); mm512_stream_si512(dest, 5, zmm); mm512_stream_si512(dest, 6, zmm); mm512_stream_si512(dest, 7, zmm); } static force_inline void memset_movnt4x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); } static force_inline void memset_movnt2x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); } static force_inline void memset_movnt1x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); } static force_inline void memset_movnt1x32b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest, ymm); } static force_inline void memset_movnt1x16b(char *dest, __m256i ymm) { __m128i xmm = _mm256_extracti128_si256(ymm, 0); _mm_stream_si128((__m128i *)dest, xmm); } static force_inline void memset_movnt1x8b(char *dest, __m256i ymm) { uint64_t x = m256_get8b(ymm); _mm_stream_si64((long long *)dest, (long long)x); } static force_inline void memset_movnt1x4b(char *dest, __m256i ymm) { uint32_t x = m256_get4b(ymm); _mm_stream_si32((int *)dest, (int)x); } static force_inline void memset_movnt_avx512f(char *dest, int c, size_t len, flush_fn flush, barrier_fn barrier) { char *orig_dest = dest; size_t orig_len = len; __m512i zmm = _mm512_set1_epi8((char)c); /* * Can't use _mm512_extracti64x4_epi64, because some versions of gcc * crash. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82887 */ __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx512f(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 32 * 64) { memset_movnt32x64b(dest, zmm); dest += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memset_movnt16x64b(dest, zmm); dest += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memset_movnt8x64b(dest, zmm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_movnt4x64b(dest, zmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, zmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, zmm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, ymm); else if (len == 16) memset_movnt1x16b(dest, ymm); else if (len == 8) memset_movnt1x8b(dest, ymm); else if (len == 4) memset_movnt1x4b(dest, ymm); else goto nonnt; goto end; } nonnt: memset_small_avx512f(dest, ymm, len, flush); end: avx_zeroupper(); barrier(); VALGRIND_DO_FLUSH(orig_dest, orig_len); } void memset_movnt_avx512f_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, noflush, barrier_after_ntstores); } void memset_movnt_avx512f_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_empty_nolog, barrier_after_ntstores); } void memset_movnt_avx512f_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_clflush_nolog, barrier_after_ntstores); } void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores); } void memset_movnt_avx512f_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores); }
6,397
21.607774
71
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memset/memset_t_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_sse2.h" static force_inline void mm_store_si128(char *dest, unsigned idx, __m128i src) { _mm_store_si128((__m128i *)dest + idx, src); } static force_inline void memset_mov4x64b(char *dest, __m128i xmm, flush64b_fn flush64b) { mm_store_si128(dest, 0, xmm); mm_store_si128(dest, 1, xmm); mm_store_si128(dest, 2, xmm); mm_store_si128(dest, 3, xmm); mm_store_si128(dest, 4, xmm); mm_store_si128(dest, 5, xmm); mm_store_si128(dest, 6, xmm); mm_store_si128(dest, 7, xmm); mm_store_si128(dest, 8, xmm); mm_store_si128(dest, 9, xmm); mm_store_si128(dest, 10, xmm); mm_store_si128(dest, 11, xmm); mm_store_si128(dest, 12, xmm); mm_store_si128(dest, 13, xmm); mm_store_si128(dest, 14, xmm); mm_store_si128(dest, 15, xmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m128i xmm, flush64b_fn flush64b) { mm_store_si128(dest, 0, xmm); mm_store_si128(dest, 1, xmm); mm_store_si128(dest, 2, xmm); mm_store_si128(dest, 3, xmm); mm_store_si128(dest, 4, xmm); mm_store_si128(dest, 5, xmm); mm_store_si128(dest, 6, xmm); mm_store_si128(dest, 7, xmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m128i xmm, flush64b_fn flush64b) { mm_store_si128(dest, 0, xmm); mm_store_si128(dest, 1, xmm); mm_store_si128(dest, 2, xmm); mm_store_si128(dest, 3, xmm); flush64b(dest + 0 * 64); } static force_inline void memset_mov_sse2(char *dest, int c, size_t len, flush_fn flush, flush64b_fn flush64b) { __m128i xmm = _mm_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_sse2(dest, xmm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 4 * 64) { memset_mov4x64b(dest, xmm, flush64b); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, xmm, flush64b); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, xmm, flush64b); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_sse2(dest, xmm, len, flush); } void memset_mov_sse2_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, noflush, noflush64b); } void memset_mov_sse2_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_empty_nolog, flush64b_empty); } void memset_mov_sse2_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_clflush_nolog, pmem_clflush); } void memset_mov_sse2_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_clflushopt_nolog, pmem_clflushopt); } void memset_mov_sse2_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_clwb_nolog, pmem_clwb); }
3,304
20.461039
66
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memset/memset_sse2.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #ifndef PMEM2_MEMSET_SSE2_H #define PMEM2_MEMSET_SSE2_H #include <xmmintrin.h> #include <stddef.h> #include <stdint.h> #include <string.h> #include "out.h" static force_inline void memset_small_sse2_noflush(char *dest, __m128i xmm, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; if (len > 48) { /* 49..64 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + 16), xmm); _mm_storeu_si128((__m128i *)(dest + 32), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 33..48 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + 16), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; le32: if (len > 16) { /* 17..32 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 9..16 */ uint64_t d8 = (uint64_t)_mm_cvtsi128_si64(xmm); *(ua_uint64_t *)dest = d8; *(ua_uint64_t *)(dest + len - 8) = d8; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d4 = (uint32_t)_mm_cvtsi128_si32(xmm); *(ua_uint32_t *)dest = d4; *(ua_uint32_t *)(dest + len - 4) = d4; return; } /* 3..4 */ uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm); *(ua_uint16_t *)dest = d2; *(ua_uint16_t *)(dest + len - 2) = d2; return; le2: if (len == 2) { uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm); *(ua_uint16_t *)dest = d2; return; } *(uint8_t *)dest = (uint8_t)_mm_cvtsi128_si32(xmm); } static force_inline void memset_small_sse2(char *dest, __m128i xmm, size_t len, flush_fn flush) { /* * pmemcheck complains about "overwritten stores before they were made * persistent" for overlapping stores (last instruction in each code * path) in the optimized version. * libc's memset also does that, so we can't use it here. */ if (On_pmemcheck) { memset_nodrain_generic(dest, (uint8_t)_mm_cvtsi128_si32(xmm), len, PMEM2_F_MEM_NOFLUSH, NULL); } else { memset_small_sse2_noflush(dest, xmm, len); } flush(dest, len); } #endif
2,213
20.085714
71
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memset/memset_t_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx.h" static force_inline void mm256_store_si256(char *dest, unsigned idx, __m256i src) { _mm256_store_si256((__m256i *)dest + idx, src); } static force_inline void memset_mov8x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); mm256_store_si256(dest, 2, ymm); mm256_store_si256(dest, 3, ymm); mm256_store_si256(dest, 4, ymm); mm256_store_si256(dest, 5, ymm); mm256_store_si256(dest, 6, ymm); mm256_store_si256(dest, 7, ymm); mm256_store_si256(dest, 8, ymm); mm256_store_si256(dest, 9, ymm); mm256_store_si256(dest, 10, ymm); mm256_store_si256(dest, 11, ymm); mm256_store_si256(dest, 12, ymm); mm256_store_si256(dest, 13, ymm); mm256_store_si256(dest, 14, ymm); mm256_store_si256(dest, 15, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memset_mov4x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); mm256_store_si256(dest, 2, ymm); mm256_store_si256(dest, 3, ymm); mm256_store_si256(dest, 4, ymm); mm256_store_si256(dest, 5, ymm); mm256_store_si256(dest, 6, ymm); mm256_store_si256(dest, 7, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); mm256_store_si256(dest, 2, ymm); mm256_store_si256(dest, 3, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); flush64b(dest + 0 * 64); } static force_inline void memset_mov_avx(char *dest, int c, size_t len, flush_fn flush, flush64b_fn flush64b) { __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 8 * 64) { memset_mov8x64b(dest, ymm, flush64b); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_mov4x64b(dest, ymm, flush64b); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, ymm, flush64b); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, ymm, flush64b); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_avx(dest, ymm, len, flush); avx_zeroupper(); } void memset_mov_avx_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, noflush, noflush64b); } void memset_mov_avx_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_empty_nolog, flush64b_empty); } void memset_mov_avx_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_clflush_nolog, pmem_clflush); } void memset_mov_avx_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_clflushopt_nolog, pmem_clflushopt); } void memset_mov_avx_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_clwb_nolog, pmem_clwb); }
3,890
20.73743
65
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memcpy/memcpy_t_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_sse2.h" #include "out.h" static force_inline __m128i mm_loadu_si128(const char *src, unsigned idx) { return _mm_loadu_si128((const __m128i *)src + idx); } static force_inline void mm_store_si128(char *dest, unsigned idx, __m128i src) { _mm_store_si128((__m128i *)dest + idx, src); } static force_inline void memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); __m128i xmm8 = mm_loadu_si128(src, 8); __m128i xmm9 = mm_loadu_si128(src, 9); __m128i xmm10 = mm_loadu_si128(src, 10); __m128i xmm11 = mm_loadu_si128(src, 11); __m128i xmm12 = mm_loadu_si128(src, 12); __m128i xmm13 = mm_loadu_si128(src, 13); __m128i xmm14 = mm_loadu_si128(src, 14); __m128i xmm15 = mm_loadu_si128(src, 15); mm_store_si128(dest, 0, xmm0); mm_store_si128(dest, 1, xmm1); mm_store_si128(dest, 2, xmm2); mm_store_si128(dest, 3, xmm3); mm_store_si128(dest, 4, xmm4); mm_store_si128(dest, 5, xmm5); mm_store_si128(dest, 6, xmm6); mm_store_si128(dest, 7, xmm7); mm_store_si128(dest, 8, xmm8); mm_store_si128(dest, 9, xmm9); mm_store_si128(dest, 10, xmm10); mm_store_si128(dest, 11, xmm11); mm_store_si128(dest, 12, xmm12); mm_store_si128(dest, 13, xmm13); mm_store_si128(dest, 14, xmm14); mm_store_si128(dest, 15, xmm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); mm_store_si128(dest, 0, xmm0); mm_store_si128(dest, 1, xmm1); mm_store_si128(dest, 2, xmm2); mm_store_si128(dest, 3, xmm3); mm_store_si128(dest, 4, xmm4); mm_store_si128(dest, 5, xmm5); mm_store_si128(dest, 6, xmm6); mm_store_si128(dest, 7, xmm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); mm_store_si128(dest, 0, xmm0); mm_store_si128(dest, 1, xmm1); mm_store_si128(dest, 2, xmm2); mm_store_si128(dest, 3, xmm3); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_sse_fw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_sse2(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 4 * 64) { memmove_mov4x64b(dest, src, flush64b); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src, flush64b); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src, flush64b); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_sse2(dest, src, len, flush); } static force_inline void memmove_mov_sse_bw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_sse2(dest, src, cnt, flush); } while (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src, flush64b); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src, flush64b); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src, flush64b); } if (len) memmove_small_sse2(dest - len, src - len, len, flush); } static force_inline void memmove_mov_sse2(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_sse_fw(dest, src, len, flush, flush64b); else memmove_mov_sse_bw(dest, src, len, flush, flush64b); } void memmove_mov_sse2_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, noflush, noflush64b); } void memmove_mov_sse2_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_empty_nolog, flush64b_empty); } void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_clflush_nolog, pmem_clflush); } void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_clflushopt_nolog, pmem_clflushopt); } void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_clwb_nolog, pmem_clwb); }
5,820
22.566802
69
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memcpy/memcpy_avx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #ifndef PMEM2_MEMCPY_AVX_H #define PMEM2_MEMCPY_AVX_H #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "out.h" static force_inline void memmove_small_avx_noflush(char *dest, const char *src, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; /* 33..64 */ __m256i ymm0 = _mm256_loadu_si256((__m256i *)src); __m256i ymm1 = _mm256_loadu_si256((__m256i *)(src + len - 32)); _mm256_storeu_si256((__m256i *)dest, ymm0); _mm256_storeu_si256((__m256i *)(dest + len - 32), ymm1); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm1); return; } /* 9..16 */ ua_uint64_t d80 = *(ua_uint64_t *)src; ua_uint64_t d81 = *(ua_uint64_t *)(src + len - 8); *(ua_uint64_t *)dest = d80; *(ua_uint64_t *)(dest + len - 8) = d81; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ ua_uint32_t d40 = *(ua_uint32_t *)src; ua_uint32_t d41 = *(ua_uint32_t *)(src + len - 4); *(ua_uint32_t *)dest = d40; *(ua_uint32_t *)(dest + len - 4) = d41; return; } /* 3..4 */ ua_uint16_t d20 = *(ua_uint16_t *)src; ua_uint16_t d21 = *(ua_uint16_t *)(src + len - 2); *(ua_uint16_t *)dest = d20; *(ua_uint16_t *)(dest + len - 2) = d21; return; le2: if (len == 2) { *(ua_uint16_t *)dest = *(ua_uint16_t *)src; return; } *(uint8_t *)dest = *(uint8_t *)src; } static force_inline void memmove_small_avx(char *dest, const char *src, size_t len, flush_fn flush) { /* * pmemcheck complains about "overwritten stores before they were made * persistent" for overlapping stores (last instruction in each code * path) in the optimized version. * libc's memcpy also does that, so we can't use it here. */ if (On_pmemcheck) { memmove_nodrain_generic(dest, src, len, PMEM2_F_MEM_NOFLUSH, NULL); } else { memmove_small_avx_noflush(dest, src, len); } flush(dest, len); } #endif
2,173
20.524752
74
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memcpy/memcpy_t_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx.h" static force_inline __m256i mm256_loadu_si256(const char *src, unsigned idx) { return _mm256_loadu_si256((const __m256i *)src + idx); } static force_inline void mm256_store_si256(char *dest, unsigned idx, __m256i src) { _mm256_store_si256((__m256i *)dest + idx, src); } static force_inline void memmove_mov8x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); __m256i ymm8 = mm256_loadu_si256(src, 8); __m256i ymm9 = mm256_loadu_si256(src, 9); __m256i ymm10 = mm256_loadu_si256(src, 10); __m256i ymm11 = mm256_loadu_si256(src, 11); __m256i ymm12 = mm256_loadu_si256(src, 12); __m256i ymm13 = mm256_loadu_si256(src, 13); __m256i ymm14 = mm256_loadu_si256(src, 14); __m256i ymm15 = mm256_loadu_si256(src, 15); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); mm256_store_si256(dest, 2, ymm2); mm256_store_si256(dest, 3, ymm3); mm256_store_si256(dest, 4, ymm4); mm256_store_si256(dest, 5, ymm5); mm256_store_si256(dest, 6, ymm6); mm256_store_si256(dest, 7, ymm7); mm256_store_si256(dest, 8, ymm8); mm256_store_si256(dest, 9, ymm9); mm256_store_si256(dest, 10, ymm10); mm256_store_si256(dest, 11, ymm11); mm256_store_si256(dest, 12, ymm12); mm256_store_si256(dest, 13, ymm13); mm256_store_si256(dest, 14, ymm14); mm256_store_si256(dest, 15, ymm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); mm256_store_si256(dest, 2, ymm2); mm256_store_si256(dest, 3, ymm3); mm256_store_si256(dest, 4, ymm4); mm256_store_si256(dest, 5, ymm5); mm256_store_si256(dest, 6, ymm6); mm256_store_si256(dest, 7, ymm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); mm256_store_si256(dest, 2, ymm2); mm256_store_si256(dest, 3, ymm3); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_avx_fw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 8 * 64) { memmove_mov8x64b(dest, src, flush64b); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_mov4x64b(dest, src, flush64b); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src, flush64b); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src, flush64b); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_avx(dest, src, len, flush); } static force_inline void memmove_mov_avx_bw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx(dest, src, cnt, flush); } while (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_mov8x64b(dest, src, flush64b); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src, flush64b); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src, flush64b); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src, flush64b); } if (len) memmove_small_avx(dest - len, src - len, len, flush); } static force_inline void memmove_mov_avx(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_avx_fw(dest, src, len, flush, flush64b); else memmove_mov_avx_bw(dest, src, len, flush, flush64b); avx_zeroupper(); } void memmove_mov_avx_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, noflush, noflush64b); } void memmove_mov_avx_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_empty_nolog, flush64b_empty); } void memmove_mov_avx_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_clflush_nolog, pmem_clflush); } void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_clflushopt_nolog, pmem_clflushopt); } void memmove_mov_avx_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_clwb_nolog, pmem_clwb); }
6,705
22.780142
68
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memcpy/memcpy_t_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx512f.h" static force_inline __m512i mm512_loadu_si512(const char *src, unsigned idx) { return _mm512_loadu_si512((const __m512i *)src + idx); } static force_inline void mm512_store_si512(char *dest, unsigned idx, __m512i src) { _mm512_store_si512((__m512i *)dest + idx, src); } static force_inline void memmove_mov32x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); __m512i zmm16 = mm512_loadu_si512(src, 16); __m512i zmm17 = mm512_loadu_si512(src, 17); __m512i zmm18 = mm512_loadu_si512(src, 18); __m512i zmm19 = mm512_loadu_si512(src, 19); __m512i zmm20 = mm512_loadu_si512(src, 20); __m512i zmm21 = mm512_loadu_si512(src, 21); __m512i zmm22 = mm512_loadu_si512(src, 22); __m512i zmm23 = mm512_loadu_si512(src, 23); __m512i zmm24 = mm512_loadu_si512(src, 24); __m512i zmm25 = mm512_loadu_si512(src, 25); __m512i zmm26 = mm512_loadu_si512(src, 26); __m512i zmm27 = mm512_loadu_si512(src, 27); __m512i zmm28 = mm512_loadu_si512(src, 28); __m512i zmm29 = mm512_loadu_si512(src, 29); __m512i zmm30 = mm512_loadu_si512(src, 30); __m512i zmm31 = mm512_loadu_si512(src, 31); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); mm512_store_si512(dest, 4, zmm4); mm512_store_si512(dest, 5, zmm5); mm512_store_si512(dest, 6, zmm6); mm512_store_si512(dest, 7, zmm7); mm512_store_si512(dest, 8, zmm8); mm512_store_si512(dest, 9, zmm9); mm512_store_si512(dest, 10, zmm10); mm512_store_si512(dest, 11, zmm11); mm512_store_si512(dest, 12, zmm12); mm512_store_si512(dest, 13, zmm13); mm512_store_si512(dest, 14, zmm14); mm512_store_si512(dest, 15, zmm15); mm512_store_si512(dest, 16, zmm16); mm512_store_si512(dest, 17, zmm17); mm512_store_si512(dest, 18, zmm18); mm512_store_si512(dest, 19, zmm19); mm512_store_si512(dest, 20, zmm20); mm512_store_si512(dest, 21, zmm21); mm512_store_si512(dest, 22, zmm22); mm512_store_si512(dest, 23, zmm23); mm512_store_si512(dest, 24, zmm24); mm512_store_si512(dest, 25, zmm25); mm512_store_si512(dest, 26, zmm26); mm512_store_si512(dest, 27, zmm27); mm512_store_si512(dest, 28, zmm28); mm512_store_si512(dest, 29, zmm29); mm512_store_si512(dest, 30, zmm30); mm512_store_si512(dest, 31, zmm31); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); flush64b(dest + 16 * 64); flush64b(dest + 17 * 64); flush64b(dest + 18 * 64); flush64b(dest + 19 * 64); flush64b(dest + 20 * 64); flush64b(dest + 21 * 64); flush64b(dest + 22 * 64); flush64b(dest + 23 * 64); flush64b(dest + 24 * 64); flush64b(dest + 25 * 64); flush64b(dest + 26 * 64); flush64b(dest + 27 * 64); flush64b(dest + 28 * 64); flush64b(dest + 29 * 64); flush64b(dest + 30 * 64); flush64b(dest + 31 * 64); } static force_inline void memmove_mov16x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); mm512_store_si512(dest, 4, zmm4); mm512_store_si512(dest, 5, zmm5); mm512_store_si512(dest, 6, zmm6); mm512_store_si512(dest, 7, zmm7); mm512_store_si512(dest, 8, zmm8); mm512_store_si512(dest, 9, zmm9); mm512_store_si512(dest, 10, zmm10); mm512_store_si512(dest, 11, zmm11); mm512_store_si512(dest, 12, zmm12); mm512_store_si512(dest, 13, zmm13); mm512_store_si512(dest, 14, zmm14); mm512_store_si512(dest, 15, zmm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); } static force_inline void memmove_mov8x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); mm512_store_si512(dest, 4, zmm4); mm512_store_si512(dest, 5, zmm5); mm512_store_si512(dest, 6, zmm6); mm512_store_si512(dest, 7, zmm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); mm512_store_si512(dest, 0, zmm0); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_avx512f_fw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx512f(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 32 * 64) { memmove_mov32x64b(dest, src, flush64b); dest += 32 * 64; src += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memmove_mov16x64b(dest, src, flush64b); dest += 16 * 64; src += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memmove_mov8x64b(dest, src, flush64b); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_mov4x64b(dest, src, flush64b); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src, flush64b); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src, flush64b); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_avx512f(dest, src, len, flush); } static force_inline void memmove_mov_avx512f_bw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx512f(dest, src, cnt, flush); } while (len >= 32 * 64) { dest -= 32 * 64; src -= 32 * 64; len -= 32 * 64; memmove_mov32x64b(dest, src, flush64b); } if (len >= 16 * 64) { dest -= 16 * 64; src -= 16 * 64; len -= 16 * 64; memmove_mov16x64b(dest, src, flush64b); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_mov8x64b(dest, src, flush64b); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src, flush64b); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src, flush64b); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src, flush64b); } if (len) memmove_small_avx512f(dest - len, src - len, len, flush); } static force_inline void memmove_mov_avx512f(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_avx512f_fw(dest, src, len, flush, flush64b); else memmove_mov_avx512f_bw(dest, src, len, flush, flush64b); avx_zeroupper(); } void memmove_mov_avx512f_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, noflush, noflush64b); } void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_empty_nolog, flush64b_empty); } void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_clflush_nolog, pmem_clflush); } void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_clflushopt_nolog, pmem_clflushopt); } void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_clwb_nolog, pmem_clwb); }
11,422
25.020501
72
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memcpy/memcpy_sse2.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #ifndef PMEM2_MEMCPY_SSE2_H #define PMEM2_MEMCPY_SSE2_H #include <xmmintrin.h> #include <stddef.h> #include <stdint.h> #include "out.h" static force_inline void memmove_small_sse2_noflush(char *dest, const char *src, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; if (len > 48) { /* 49..64 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16)); __m128i xmm2 = _mm_loadu_si128((__m128i *)(src + 32)); __m128i xmm3 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + 16), xmm1); _mm_storeu_si128((__m128i *)(dest + 32), xmm2); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm3); return; } /* 33..48 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16)); __m128i xmm2 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + 16), xmm1); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm2); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm1); return; } /* 9..16 */ uint64_t d80 = *(ua_uint64_t *)src; uint64_t d81 = *(ua_uint64_t *)(src + len - 8); *(ua_uint64_t *)dest = d80; *(ua_uint64_t *)(dest + len - 8) = d81; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d40 = *(ua_uint32_t *)src; uint32_t d41 = *(ua_uint32_t *)(src + len - 4); *(ua_uint32_t *)dest = d40; *(ua_uint32_t *)(dest + len - 4) = d41; return; } /* 3..4 */ uint16_t d20 = *(ua_uint16_t *)src; uint16_t d21 = *(ua_uint16_t *)(src + len - 2); *(ua_uint16_t *)dest = d20; *(ua_uint16_t *)(dest + len - 2) = d21; return; le2: if (len == 2) { *(ua_uint16_t *)dest = *(ua_uint16_t *)src; return; } *(uint8_t *)dest = *(uint8_t *)src; } static force_inline void memmove_small_sse2(char *dest, const char *src, size_t len, flush_fn flush) { /* * pmemcheck complains about "overwritten stores before they were made * persistent" for overlapping stores (last instruction in each code * path) in the optimized version. * libc's memcpy also does that, so we can't use it here. */ if (On_pmemcheck) { memmove_nodrain_generic(dest, src, len, PMEM2_F_MEM_NOFLUSH, NULL); } else { memmove_small_sse2_noflush(dest, src, len); } flush(dest, len); } #endif
2,726
22.307692
75
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memcpy/memcpy_nt_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx.h" #include "valgrind_internal.h" static force_inline __m256i mm256_loadu_si256(const char *src, unsigned idx) { return _mm256_loadu_si256((const __m256i *)src + idx); } static force_inline void mm256_stream_si256(char *dest, unsigned idx, __m256i src) { _mm256_stream_si256((__m256i *)dest + idx, src); barrier(); } static force_inline void memmove_movnt8x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); __m256i ymm8 = mm256_loadu_si256(src, 8); __m256i ymm9 = mm256_loadu_si256(src, 9); __m256i ymm10 = mm256_loadu_si256(src, 10); __m256i ymm11 = mm256_loadu_si256(src, 11); __m256i ymm12 = mm256_loadu_si256(src, 12); __m256i ymm13 = mm256_loadu_si256(src, 13); __m256i ymm14 = mm256_loadu_si256(src, 14); __m256i ymm15 = mm256_loadu_si256(src, 15); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); mm256_stream_si256(dest, 2, ymm2); mm256_stream_si256(dest, 3, ymm3); mm256_stream_si256(dest, 4, ymm4); mm256_stream_si256(dest, 5, ymm5); mm256_stream_si256(dest, 6, ymm6); mm256_stream_si256(dest, 7, ymm7); mm256_stream_si256(dest, 8, ymm8); mm256_stream_si256(dest, 9, ymm9); mm256_stream_si256(dest, 10, ymm10); mm256_stream_si256(dest, 11, ymm11); mm256_stream_si256(dest, 12, ymm12); mm256_stream_si256(dest, 13, ymm13); mm256_stream_si256(dest, 14, ymm14); mm256_stream_si256(dest, 15, ymm15); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); mm256_stream_si256(dest, 2, ymm2); mm256_stream_si256(dest, 3, ymm3); mm256_stream_si256(dest, 4, ymm4); mm256_stream_si256(dest, 5, ymm5); mm256_stream_si256(dest, 6, ymm6); mm256_stream_si256(dest, 7, ymm7); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); mm256_stream_si256(dest, 2, ymm2); mm256_stream_si256(dest, 3, ymm3); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src); mm256_stream_si256(dest, 0, ymm0); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, xmm0); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); } static force_inline void memmove_movnt_avx_fw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } const char *srcend = src + len; prefetch_ini_fw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_fw(src, srcend); memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64); if (len) perf_barrier(); } if (len >= 8 * 64) { memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; goto end; } nonnt: memmove_small_avx(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx_bw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx(dest, src, cnt, flush); } const char *srcbegin = src - len; prefetch_ini_bw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_bw(src, srcbegin); dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64); if (len) perf_barrier(); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } goto end; } nonnt: dest -= len; src -= len; memmove_small_avx(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx(char *dest, const char *src, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_avx_fw(dest, src, len, flush, perf_barrier); else memmove_movnt_avx_bw(dest, src, len, flush, perf_barrier); barrier(); VALGRIND_DO_FLUSH(dest, len); } /* variants without perf_barrier */ void memmove_movnt_avx_noflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, noflush, barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_empty_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_clflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_clflushopt_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_clwb_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memmove_movnt_avx_noflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, noflush, barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_empty_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_clflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_clflushopt_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_clwb_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
10,092
21.731982
79
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memcpy/memcpy_nt_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_sse2.h" #include "valgrind_internal.h" static force_inline __m128i mm_loadu_si128(const char *src, unsigned idx) { return _mm_loadu_si128((const __m128i *)src + idx); } static force_inline void mm_stream_si128(char *dest, unsigned idx, __m128i src) { _mm_stream_si128((__m128i *)dest + idx, src); barrier(); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); __m128i xmm8 = mm_loadu_si128(src, 8); __m128i xmm9 = mm_loadu_si128(src, 9); __m128i xmm10 = mm_loadu_si128(src, 10); __m128i xmm11 = mm_loadu_si128(src, 11); __m128i xmm12 = mm_loadu_si128(src, 12); __m128i xmm13 = mm_loadu_si128(src, 13); __m128i xmm14 = mm_loadu_si128(src, 14); __m128i xmm15 = mm_loadu_si128(src, 15); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); mm_stream_si128(dest, 2, xmm2); mm_stream_si128(dest, 3, xmm3); mm_stream_si128(dest, 4, xmm4); mm_stream_si128(dest, 5, xmm5); mm_stream_si128(dest, 6, xmm6); mm_stream_si128(dest, 7, xmm7); mm_stream_si128(dest, 8, xmm8); mm_stream_si128(dest, 9, xmm9); mm_stream_si128(dest, 10, xmm10); mm_stream_si128(dest, 11, xmm11); mm_stream_si128(dest, 12, xmm12); mm_stream_si128(dest, 13, xmm13); mm_stream_si128(dest, 14, xmm14); mm_stream_si128(dest, 15, xmm15); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); mm_stream_si128(dest, 2, xmm2); mm_stream_si128(dest, 3, xmm3); mm_stream_si128(dest, 4, xmm4); mm_stream_si128(dest, 5, xmm5); mm_stream_si128(dest, 6, xmm6); mm_stream_si128(dest, 7, xmm7); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); mm_stream_si128(dest, 2, xmm2); mm_stream_si128(dest, 3, xmm3); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); mm_stream_si128(dest, 0, xmm0); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); } static force_inline void memmove_movnt_sse_fw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_sse2(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } const char *srcend = src + len; prefetch_ini_fw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_fw(src, srcend); memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64); if (len) perf_barrier(); } while (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) return; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; return; } nonnt: memmove_small_sse2(dest, src, len, flush); } static force_inline void memmove_movnt_sse_bw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_sse2(dest, src, cnt, flush); } const char *srcbegin = src - len; prefetch_ini_bw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_bw(src, srcbegin); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64); if (len) perf_barrier(); } while (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) return; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } return; } nonnt: dest -= len; src -= len; memmove_small_sse2(dest, src, len, flush); } static force_inline void memmove_movnt_sse2(char *dest, const char *src, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_sse_fw(dest, src, len, flush, perf_barrier); else memmove_movnt_sse_bw(dest, src, len, flush, perf_barrier); barrier(); VALGRIND_DO_FLUSH(dest, len); } /* variants without perf_barrier */ void memmove_movnt_sse2_noflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, noflush, barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_empty_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_clflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_clflushopt_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_clwb_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memmove_movnt_sse2_noflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, noflush, barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_empty_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_clflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_clflushopt_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_clwb_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
9,636
21.463869
80
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/x86_64/memcpy/memcpy_nt_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx512f.h" #include "valgrind_internal.h" static force_inline __m512i mm512_loadu_si512(const char *src, unsigned idx) { return _mm512_loadu_si512((const __m512i *)src + idx); } static force_inline void mm512_stream_si512(char *dest, unsigned idx, __m512i src) { _mm512_stream_si512((__m512i *)dest + idx, src); barrier(); } static force_inline void memmove_movnt32x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); __m512i zmm16 = mm512_loadu_si512(src, 16); __m512i zmm17 = mm512_loadu_si512(src, 17); __m512i zmm18 = mm512_loadu_si512(src, 18); __m512i zmm19 = mm512_loadu_si512(src, 19); __m512i zmm20 = mm512_loadu_si512(src, 20); __m512i zmm21 = mm512_loadu_si512(src, 21); __m512i zmm22 = mm512_loadu_si512(src, 22); __m512i zmm23 = mm512_loadu_si512(src, 23); __m512i zmm24 = mm512_loadu_si512(src, 24); __m512i zmm25 = mm512_loadu_si512(src, 25); __m512i zmm26 = mm512_loadu_si512(src, 26); __m512i zmm27 = mm512_loadu_si512(src, 27); __m512i zmm28 = mm512_loadu_si512(src, 28); __m512i zmm29 = mm512_loadu_si512(src, 29); __m512i zmm30 = mm512_loadu_si512(src, 30); __m512i zmm31 = mm512_loadu_si512(src, 31); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); mm512_stream_si512(dest, 4, zmm4); mm512_stream_si512(dest, 5, zmm5); mm512_stream_si512(dest, 6, zmm6); mm512_stream_si512(dest, 7, zmm7); mm512_stream_si512(dest, 8, zmm8); mm512_stream_si512(dest, 9, zmm9); mm512_stream_si512(dest, 10, zmm10); mm512_stream_si512(dest, 11, zmm11); mm512_stream_si512(dest, 12, zmm12); mm512_stream_si512(dest, 13, zmm13); mm512_stream_si512(dest, 14, zmm14); mm512_stream_si512(dest, 15, zmm15); mm512_stream_si512(dest, 16, zmm16); mm512_stream_si512(dest, 17, zmm17); mm512_stream_si512(dest, 18, zmm18); mm512_stream_si512(dest, 19, zmm19); mm512_stream_si512(dest, 20, zmm20); mm512_stream_si512(dest, 21, zmm21); mm512_stream_si512(dest, 22, zmm22); mm512_stream_si512(dest, 23, zmm23); mm512_stream_si512(dest, 24, zmm24); mm512_stream_si512(dest, 25, zmm25); mm512_stream_si512(dest, 26, zmm26); mm512_stream_si512(dest, 27, zmm27); mm512_stream_si512(dest, 28, zmm28); mm512_stream_si512(dest, 29, zmm29); mm512_stream_si512(dest, 30, zmm30); mm512_stream_si512(dest, 31, zmm31); } static force_inline void memmove_movnt16x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); mm512_stream_si512(dest, 4, zmm4); mm512_stream_si512(dest, 5, zmm5); mm512_stream_si512(dest, 6, zmm6); mm512_stream_si512(dest, 7, zmm7); mm512_stream_si512(dest, 8, zmm8); mm512_stream_si512(dest, 9, zmm9); mm512_stream_si512(dest, 10, zmm10); mm512_stream_si512(dest, 11, zmm11); mm512_stream_si512(dest, 12, zmm12); mm512_stream_si512(dest, 13, zmm13); mm512_stream_si512(dest, 14, zmm14); mm512_stream_si512(dest, 15, zmm15); } static force_inline void memmove_movnt8x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); mm512_stream_si512(dest, 4, zmm4); mm512_stream_si512(dest, 5, zmm5); mm512_stream_si512(dest, 6, zmm6); mm512_stream_si512(dest, 7, zmm7); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); mm512_stream_si512(dest, 0, zmm0); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m256i zmm0 = _mm256_loadu_si256((__m256i *)src); _mm256_stream_si256((__m256i *)dest, zmm0); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i ymm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, ymm0); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); } static force_inline void memmove_movnt_avx512f_fw(char *dest, const char *src, size_t len, flush_fn flush) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx512f(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 32 * 64) { memmove_movnt32x64b(dest, src); dest += 32 * 64; src += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memmove_movnt16x64b(dest, src); dest += 16 * 64; src += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; goto end; } nonnt: memmove_small_avx512f(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx512f_bw(char *dest, const char *src, size_t len, flush_fn flush) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx512f(dest, src, cnt, flush); } while (len >= 32 * 64) { dest -= 32 * 64; src -= 32 * 64; len -= 32 * 64; memmove_movnt32x64b(dest, src); } if (len >= 16 * 64) { dest -= 16 * 64; src -= 16 * 64; len -= 16 * 64; memmove_movnt16x64b(dest, src); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } goto end; } nonnt: dest -= len; src -= len; memmove_small_avx512f(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx512f(char *dest, const char *src, size_t len, flush_fn flush, barrier_fn barrier) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_avx512f_fw(dest, src, len, flush); else memmove_movnt_avx512f_bw(dest, src, len, flush); barrier(); VALGRIND_DO_FLUSH(dest, len); } void memmove_movnt_avx512f_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, noflush, barrier_after_ntstores); } void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_empty_nolog, barrier_after_ntstores); } void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_clflush_nolog, barrier_after_ntstores); } void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores); } void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores); }
11,246
23.45
78
c
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/aarch64/arm_cacheops.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * ARM inline assembly to flush and invalidate caches * clwb => dc cvac * clflushopt => dc civac * fence => dmb ish * sfence => dmb ishst */ /* * Cache instructions on ARM: * ARMv8.0-a DC CVAC - cache clean to Point of Coherency * Meant for thread synchronization, usually implies * real memory flush but may mean less. * ARMv8.2-a DC CVAP - cache clean to Point of Persistency * Meant exactly for our use. * ARMv8.5-a DC CVADP - cache clean to Point of Deep Persistency * As of mid-2019 not on any commercially available CPU. * Any of the above may be disabled for EL0, but it's probably safe to consider * that a system configuration error. * Other flags include I (like "DC CIVAC") that invalidates the cache line, but * we don't want that. * * Memory fences: * * DMB [ISH] MFENCE * * DMB [ISH]ST SFENCE * * DMB [ISH]LD LFENCE * * Memory domains (cache coherency): * * non-shareable - local to a single core * * inner shareable (ISH) - a group of CPU clusters/sockets/other hardware * Linux requires that anything within one operating system/hypervisor * is within the same Inner Shareable domain. * * outer shareable (OSH) - one or more separate ISH domains * * full system (SY) - anything that can possibly access memory * Docs: ARM DDI 0487E.a page B2-144. * * Exception (privilege) levels: * * EL0 - userspace (ring 3) * * EL1 - kernel (ring 0) * * EL2 - hypervisor (ring -1) * * EL3 - "secure world" (ring -3) */ #ifndef AARCH64_CACHEOPS_H #define AARCH64_CACHEOPS_H #include <stdlib.h> static inline void arm_clean_va_to_poc(void const *p __attribute__((unused))) { asm volatile("dc cvac, %0" : : "r" (p) : "memory"); } static inline void arm_store_memory_barrier(void) { asm volatile("dmb ishst" : : : "memory"); } #endif
1,988
30.571429
80
h
null
NearPMSW-main/nearpm/logging/pmdk/src/libpmem2/ppc64/init.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, IBM Corporation */ /* Copyright 2019-2020, Intel Corporation */ #include <errno.h> #include <sys/mman.h> #include "out.h" #include "pmem2_arch.h" #include "util.h" /* * Older assemblers versions do not support the latest versions of L, e.g. * Binutils 2.34. * Workaround this by using longs. */ #define __SYNC(l) ".long (0x7c0004AC | ((" #l ") << 21))" #define __DCBF(ra, rb, l) ".long (0x7c0000AC | ((" #l ") << 21)" \ " | ((" #ra ") << 16) | ((" #rb ") << 11))" static void ppc_fence(void) { LOG(15, NULL); /* * Force a memory barrier to flush out all cache lines. * Uses a heavyweight sync in order to guarantee the memory ordering * even with a data cache flush. * According to the POWER ISA 3.1, phwsync (aka. sync (L=4)) is treated * as a hwsync by processors compatible with previous versions of the * POWER ISA. */ asm volatile(__SYNC(4) : : : "memory"); } static void ppc_flush(const void *addr, size_t size) { LOG(15, "addr %p size %zu", addr, size); uintptr_t uptr = (uintptr_t)addr; uintptr_t end = uptr + size; /* round down the address */ uptr &= ~(CACHELINE_SIZE - 1); while (uptr < end) { /* * Flush the data cache block. * According to the POWER ISA 3.1, dcbstps (aka. dcbf (L=6)) * behaves as dcbf (L=0) on previous processors. */ asm volatile(__DCBF(0, %0, 6) : :"r"(uptr) : "memory"); uptr += CACHELINE_SIZE; } } void pmem2_arch_init(struct pmem2_arch_info *info) { LOG(3, "libpmem*: PPC64 support"); info->fence = ppc_fence; info->flush = ppc_flush; }
1,594
22.80597
74
c
null
NearPMSW-main/nearpm/logging/pmdk/src/windows/getopt/getopt.c
/* * *Copyright (c) 2012, Kim Gräsman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Kim Gräsman nor the * names of contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "getopt.h" #include <stddef.h> #include <string.h> #include <stdio.h> char* optarg; int optopt; /* The variable optind [...] shall be initialized to 1 by the system. */ int optind = 1; int opterr; static char* optcursor = NULL; static char *first = NULL; /* rotates argv array */ static void rotate(char **argv, int argc) { if (argc <= 1) return; char *tmp = argv[0]; memmove(argv, argv + 1, (argc - 1) * sizeof(char *)); argv[argc - 1] = tmp; } /* Implemented based on [1] and [2] for optional arguments. optopt is handled FreeBSD-style, per [3]. Other GNU and FreeBSD extensions are purely accidental. [1] https://pubs.opengroup.org/onlinepubs/000095399/functions/getopt.html [2] https://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html [3] https://www.freebsd.org/cgi/man.cgi?query=getopt&sektion=3&manpath=FreeBSD+9.0-RELEASE */ int getopt(int argc, char* const argv[], const char* optstring) { int optchar = -1; const char* optdecl = NULL; optarg = NULL; opterr = 0; optopt = 0; /* Unspecified, but we need it to avoid overrunning the argv bounds. */ if (optind >= argc) goto no_more_optchars; /* If, when getopt() is called argv[optind] is a null pointer, getopt() shall return -1 without changing optind. */ if (argv[optind] == NULL) goto no_more_optchars; /* If, when getopt() is called *argv[optind] is not the character '-', permute argv to move non options to the end */ if (*argv[optind] != '-') { if (argc - optind <= 1) goto no_more_optchars; if (!first) first = argv[optind]; do { rotate((char **)(argv + optind), argc - optind); } while (*argv[optind] != '-' && argv[optind] != first); if (argv[optind] == first) goto no_more_optchars; } /* If, when getopt() is called argv[optind] points to the string "-", getopt() shall return -1 without changing optind. */ if (strcmp(argv[optind], "-") == 0) goto no_more_optchars; /* If, when getopt() is called argv[optind] points to the string "--", getopt() shall return -1 after incrementing optind. */ if (strcmp(argv[optind], "--") == 0) { ++optind; if (first) { do { rotate((char **)(argv + optind), argc - optind); } while (argv[optind] != first); } goto no_more_optchars; } if (optcursor == NULL || *optcursor == '\0') optcursor = argv[optind] + 1; optchar = *optcursor; /* FreeBSD: The variable optopt saves the last known option character returned by getopt(). */ optopt = optchar; /* The getopt() function shall return the next option character (if one is found) from argv that matches a character in optstring, if there is one that matches. */ optdecl = strchr(optstring, optchar); if (optdecl) { /* [I]f a character is followed by a colon, the option takes an argument. */ if (optdecl[1] == ':') { optarg = ++optcursor; if (*optarg == '\0') { /* GNU extension: Two colons mean an option takes an optional arg; if there is text in the current argv-element (i.e., in the same word as the option name itself, for example, "-oarg"), then it is returned in optarg, otherwise optarg is set to zero. */ if (optdecl[2] != ':') { /* If the option was the last character in the string pointed to by an element of argv, then optarg shall contain the next element of argv, and optind shall be incremented by 2. If the resulting value of optind is greater than argc, this indicates a missing option-argument, and getopt() shall return an error indication. Otherwise, optarg shall point to the string following the option character in that element of argv, and optind shall be incremented by 1. */ if (++optind < argc) { optarg = argv[optind]; } else { /* If it detects a missing option-argument, it shall return the colon character ( ':' ) if the first character of optstring was a colon, or a question-mark character ( '?' ) otherwise. */ optarg = NULL; fprintf(stderr, "%s: option requires an argument -- '%c'\n", argv[0], optchar); optchar = (optstring[0] == ':') ? ':' : '?'; } } else { optarg = NULL; } } optcursor = NULL; } } else { fprintf(stderr,"%s: invalid option -- '%c'\n", argv[0], optchar); /* If getopt() encounters an option character that is not contained in optstring, it shall return the question-mark ( '?' ) character. */ optchar = '?'; } if (optcursor == NULL || *++optcursor == '\0') ++optind; return optchar; no_more_optchars: optcursor = NULL; first = NULL; return -1; } /* Implementation based on [1]. [1] https://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html */ int getopt_long(int argc, char* const argv[], const char* optstring, const struct option* longopts, int* longindex) { const struct option* o = longopts; const struct option* match = NULL; int num_matches = 0; size_t argument_name_length = 0; const char* current_argument = NULL; int retval = -1; optarg = NULL; optopt = 0; if (optind >= argc) return -1; /* If, when getopt() is called argv[optind] is a null pointer, getopt_long() shall return -1 without changing optind. */ if (argv[optind] == NULL) goto no_more_optchars; /* If, when getopt_long() is called *argv[optind] is not the character '-', permute argv to move non options to the end */ if (*argv[optind] != '-') { if (argc - optind <= 1) goto no_more_optchars; if (!first) first = argv[optind]; do { rotate((char **)(argv + optind), argc - optind); } while (*argv[optind] != '-' && argv[optind] != first); if (argv[optind] == first) goto no_more_optchars; } if (strlen(argv[optind]) < 3 || strncmp(argv[optind], "--", 2) != 0) return getopt(argc, argv, optstring); /* It's an option; starts with -- and is longer than two chars. */ current_argument = argv[optind] + 2; argument_name_length = strcspn(current_argument, "="); for (; o->name; ++o) { if (strncmp(o->name, current_argument, argument_name_length) == 0) { match = o; ++num_matches; if (strlen(o->name) == argument_name_length) { /* found match is exactly the one which we are looking for */ num_matches = 1; break; } } } if (num_matches == 1) { /* If longindex is not NULL, it points to a variable which is set to the index of the long option relative to longopts. */ if (longindex) *longindex = (int)(match - longopts); /* If flag is NULL, then getopt_long() shall return val. Otherwise, getopt_long() returns 0, and flag shall point to a variable which shall be set to val if the option is found, but left unchanged if the option is not found. */ if (match->flag) *(match->flag) = match->val; retval = match->flag ? 0 : match->val; if (match->has_arg != no_argument) { optarg = strchr(argv[optind], '='); if (optarg != NULL) ++optarg; if (match->has_arg == required_argument) { /* Only scan the next argv for required arguments. Behavior is not specified, but has been observed with Ubuntu and Mac OSX. */ if (optarg == NULL && ++optind < argc) { optarg = argv[optind]; } if (optarg == NULL) retval = ':'; } } else if (strchr(argv[optind], '=')) { /* An argument was provided to a non-argument option. I haven't seen this specified explicitly, but both GNU and BSD-based implementations show this behavior. */ retval = '?'; } } else { /* Unknown option or ambiguous match. */ retval = '?'; if (num_matches == 0) { fprintf(stderr, "%s: unrecognized option -- '%s'\n", argv[0], argv[optind]); } else { fprintf(stderr, "%s: option '%s' is ambiguous\n", argv[0], argv[optind]); } } ++optind; return retval; no_more_optchars: first = NULL; return -1; }
9,866
32.561224
91
c
null
NearPMSW-main/nearpm/logging/pmdk/src/windows/getopt/getopt.h
/* * *Copyright (c) 2012, Kim Gräsman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Kim Gräsman nor the * names of contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef INCLUDED_GETOPT_PORT_H #define INCLUDED_GETOPT_PORT_H #if defined(__cplusplus) extern "C" { #endif #define no_argument 0 #define required_argument 1 #define optional_argument 2 extern char* optarg; extern int optind, opterr, optopt; struct option { const char* name; int has_arg; int* flag; int val; }; int getopt(int argc, char* const argv[], const char* optstring); int getopt_long(int argc, char* const argv[], const char* optstring, const struct option* longopts, int* longindex); #if defined(__cplusplus) } #endif #endif // INCLUDED_GETOPT_PORT_H
2,137
35.237288
79
h
null
NearPMSW-main/nearpm/logging/pmdk/src/windows/include/win_mmap.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * win_mmap.h -- (internal) tracks the regions mapped by mmap */ #ifndef WIN_MMAP_H #define WIN_MMAP_H 1 #include "queue.h" #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define rounddown(x, y) (((x) / (y)) * (y)) void win_mmap_init(void); void win_mmap_fini(void); /* allocation/mmap granularity */ extern unsigned long long Mmap_align; typedef enum FILE_MAPPING_TRACKER_FLAGS { FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED = 0x0001, /* * This should hold the value of all flags ORed for debug purpose. */ FILE_MAPPING_TRACKER_FLAGS_MASK = FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED } FILE_MAPPING_TRACKER_FLAGS; /* * this structure tracks the file mappings outstanding per file handle */ typedef struct FILE_MAPPING_TRACKER { PMDK_SORTEDQ_ENTRY(FILE_MAPPING_TRACKER) ListEntry; HANDLE FileHandle; HANDLE FileMappingHandle; void *BaseAddress; void *EndAddress; DWORD Access; os_off_t Offset; size_t FileLen; FILE_MAPPING_TRACKER_FLAGS Flags; } FILE_MAPPING_TRACKER, *PFILE_MAPPING_TRACKER; extern SRWLOCK FileMappingQLock; extern PMDK_SORTEDQ_HEAD(FMLHead, FILE_MAPPING_TRACKER) FileMappingQHead; #endif /* WIN_MMAP_H */
2,871
34.02439
74
h
null
NearPMSW-main/nearpm/logging/pmdk/src/windows/include/platform.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * platform.h -- dirty hacks to compile Linux code on Windows using VC++ * * This is included to each source file using "/FI" (forced include) option. * * XXX - it is a subject for refactoring */ #ifndef PLATFORM_H #define PLATFORM_H 1 #pragma warning(disable : 4996) #pragma warning(disable : 4200) /* allow flexible array member */ #pragma warning(disable : 4819) /* non unicode characters */ #ifdef __cplusplus extern "C" { #endif /* Prevent PMDK compilation for 32-bit platforms */ #if defined(_WIN32) && !defined(_WIN64) #error "32-bit builds of PMDK are not supported!" #endif #define _CRT_RAND_S /* rand_s() */ #include <windows.h> #include <stdint.h> #include <time.h> #include <io.h> #include <process.h> #include <fcntl.h> #include <sys/types.h> #include <malloc.h> #include <signal.h> #include <intrin.h> #include <direct.h> /* use uuid_t definition from util.h */ #ifdef uuid_t #undef uuid_t #endif /* a few trivial substitutions */ #define PATH_MAX MAX_PATH #define __thread __declspec(thread) #define __func__ __FUNCTION__ #ifdef _DEBUG #define DEBUG #endif /* * The inline keyword is available only in VC++. * https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx */ #ifndef __cplusplus #define inline __inline #endif /* XXX - no equivalents in VC++ */ #define __attribute__(a) #define __builtin_constant_p(cnd) 0 /* * missing definitions */ /* errno.h */ #define ELIBACC 79 /* cannot access a needed shared library */ /* sys/stat.h */ #define S_IRUSR S_IREAD #define S_IWUSR S_IWRITE #define S_IRGRP S_IRUSR #define S_IWGRP S_IWUSR #define O_SYNC 0 typedef int mode_t; #define fchmod(fd, mode) 0 /* XXX - dummy */ #define setlinebuf(fp) setvbuf(fp, NULL, _IOLBF, BUFSIZ); /* unistd.h */ typedef long long os_off_t; typedef long long ssize_t; int setenv(const char *name, const char *value, int overwrite); int unsetenv(const char *name); /* fcntl.h */ int posix_fallocate(int fd, os_off_t offset, os_off_t len); /* string.h */ #define strtok_r strtok_s /* time.h */ #define CLOCK_MONOTONIC 1 #define CLOCK_REALTIME 2 int clock_gettime(int id, struct timespec *ts); /* signal.h */ typedef unsigned long long sigset_t; /* one bit for each signal */ C_ASSERT(NSIG <= sizeof(sigset_t) * 8); struct sigaction { void (*sa_handler) (int signum); /* void (*sa_sigaction)(int, siginfo_t *, void *); */ sigset_t sa_mask; int sa_flags; void (*sa_restorer) (void); }; __inline int sigemptyset(sigset_t *set) { *set = 0; return 0; } __inline int sigfillset(sigset_t *set) { *set = ~0; return 0; } __inline int sigaddset(sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } *set |= (1ULL << (signum - 1)); return 0; } __inline int sigdelset(sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } *set &= ~(1ULL << (signum - 1)); return 0; } __inline int sigismember(const sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } return ((*set & (1ULL << (signum - 1))) ? 1 : 0); } /* sched.h */ /* * sched_yield -- yield the processor */ __inline int sched_yield(void) { SwitchToThread(); return 0; /* always succeeds */ } /* * helper macros for library ctor/dtor function declarations */ #define MSVC_CONSTR(func) \ void func(void); \ __pragma(comment(linker, "/include:_" #func)) \ __pragma(section(".CRT$XCU", read)) \ __declspec(allocate(".CRT$XCU")) \ const void (WINAPI *_##func)(void) = (const void (WINAPI *)(void))func; #define MSVC_DESTR(func) \ void func(void); \ static void _##func##_reg(void) { atexit(func); }; \ MSVC_CONSTR(_##func##_reg) #ifdef __cplusplus } #endif #endif /* PLATFORM_H */
5,431
22.929515
76
h
null
NearPMSW-main/nearpm/logging/pmdk/src/windows/include/endian.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * endian.h -- convert values between host and big-/little-endian byte order */ #ifndef ENDIAN_H #define ENDIAN_H 1 /* * XXX: On Windows we can assume little-endian architecture */ #include <intrin.h> #define htole16(a) (a) #define htole32(a) (a) #define htole64(a) (a) #define le16toh(a) (a) #define le32toh(a) (a) #define le64toh(a) (a) #define htobe16(x) _byteswap_ushort(x) #define htobe32(x) _byteswap_ulong(x) #define htobe64(x) _byteswap_uint64(x) #define be16toh(x) _byteswap_ushort(x) #define be32toh(x) _byteswap_ulong(x) #define be64toh(x) _byteswap_uint64(x) #endif /* ENDIAN_H */
696
20.121212
76
h
null
NearPMSW-main/nearpm/logging/pmdk/src/windows/include/sys/file.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * sys/file.h -- file locking */
1,750
45.078947
74
h
null
NearPMSW-main/nearpm/logging/pmdk/src/windows/include/sys/param.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * sys/param.h -- a few useful macros */ #ifndef SYS_PARAM_H #define SYS_PARAM_H 1 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define howmany(x, y) (((x) + ((y) - 1)) / (y)) #define BPB 8 /* bits per byte */ #define setbit(b, i) ((b)[(i) / BPB] |= 1 << ((i) % BPB)) #define isset(b, i) ((b)[(i) / BPB] & (1 << ((i) % BPB))) #define isclr(b, i) (((b)[(i) / BPB] & (1 << ((i) % BPB))) == 0) #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #endif /* SYS_PARAM_H */
612
24.541667
64
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemblk.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemblk.h -- definitions of libpmemblk entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemblk provides support for arrays of atomically-writable blocks. * * See libpmemblk(7) for details. */ #ifndef LIBPMEMBLK_H #define LIBPMEMBLK_H 1 #include <sys/types.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemblk_open pmemblk_openW #define pmemblk_create pmemblk_createW #define pmemblk_check pmemblk_checkW #define pmemblk_check_version pmemblk_check_versionW #define pmemblk_errormsg pmemblk_errormsgW #define pmemblk_ctl_get pmemblk_ctl_getW #define pmemblk_ctl_set pmemblk_ctl_setW #define pmemblk_ctl_exec pmemblk_ctl_execW #else #define pmemblk_open pmemblk_openU #define pmemblk_create pmemblk_createU #define pmemblk_check pmemblk_checkU #define pmemblk_check_version pmemblk_check_versionU #define pmemblk_errormsg pmemblk_errormsgU #define pmemblk_ctl_get pmemblk_ctl_getU #define pmemblk_ctl_set pmemblk_ctl_setU #define pmemblk_ctl_exec pmemblk_ctl_execU #endif #endif #ifdef __cplusplus extern "C" { #endif /* * opaque type, internal to libpmemblk */ typedef struct pmemblk PMEMblkpool; /* * PMEMBLK_MAJOR_VERSION and PMEMBLK_MINOR_VERSION provide the current version * of the libpmemblk API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmemblk_check_version(). */ #define PMEMBLK_MAJOR_VERSION 1 #define PMEMBLK_MINOR_VERSION 1 #ifndef _WIN32 const char *pmemblk_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemblk_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemblk_check_versionW(unsigned major_required, unsigned minor_required); #endif /* XXX - unify minimum pool size for both OS-es */ #ifndef _WIN32 #if defined(__x86_64__) || defined(__M_X64__) || defined(__aarch64__) /* minimum pool size: 16MiB + 4KiB (minimum BTT size + mmap alignment) */ #define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 8)) #elif defined(__PPC64__) /* minimum pool size: 16MiB + 128KiB (minimum BTT size + mmap alignment) */ #define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 128)) #else #error unable to recognize ISA at compile time #endif #else /* minimum pool size: 16MiB + 64KiB (minimum BTT size + mmap alignment) */ #define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 64)) #endif /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMBLK_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #define PMEMBLK_MIN_BLK ((size_t)512) #ifndef _WIN32 PMEMblkpool *pmemblk_open(const char *path, size_t bsize); #else PMEMblkpool *pmemblk_openU(const char *path, size_t bsize); PMEMblkpool *pmemblk_openW(const wchar_t *path, size_t bsize); #endif #ifndef _WIN32 PMEMblkpool *pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode); #else PMEMblkpool *pmemblk_createU(const char *path, size_t bsize, size_t poolsize, mode_t mode); PMEMblkpool *pmemblk_createW(const wchar_t *path, size_t bsize, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemblk_check(const char *path, size_t bsize); #else int pmemblk_checkU(const char *path, size_t bsize); int pmemblk_checkW(const wchar_t *path, size_t bsize); #endif void pmemblk_close(PMEMblkpool *pbp); size_t pmemblk_bsize(PMEMblkpool *pbp); size_t pmemblk_nblock(PMEMblkpool *pbp); int pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno); int pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno); int pmemblk_set_zero(PMEMblkpool *pbp, long long blockno); int pmemblk_set_error(PMEMblkpool *pbp, long long blockno); /* * Passing NULL to pmemblk_set_funcs() tells libpmemblk to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemblk. */ void pmemblk_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); #ifndef _WIN32 const char *pmemblk_errormsg(void); #else const char *pmemblk_errormsgU(void); const wchar_t *pmemblk_errormsgW(void); #endif #ifndef _WIN32 /* EXPERIMENTAL */ int pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg); #else int pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg); int pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg); int pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg); #endif #ifdef __cplusplus } #endif #endif /* libpmemblk.h */
5,183
30.418182
79
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmempool.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * libpmempool.h -- definitions of libpmempool entry points * * See libpmempool(7) for details. */ #ifndef LIBPMEMPOOL_H #define LIBPMEMPOOL_H 1 #include <stdint.h> #include <stddef.h> #include <limits.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmempool_check_status pmempool_check_statusW #define pmempool_check_args pmempool_check_argsW #define pmempool_check_init pmempool_check_initW #define pmempool_check pmempool_checkW #define pmempool_sync pmempool_syncW #define pmempool_transform pmempool_transformW #define pmempool_rm pmempool_rmW #define pmempool_check_version pmempool_check_versionW #define pmempool_errormsg pmempool_errormsgW #define pmempool_feature_enable pmempool_feature_enableW #define pmempool_feature_disable pmempool_feature_disableW #define pmempool_feature_query pmempool_feature_queryW #else #define pmempool_check_status pmempool_check_statusU #define pmempool_check_args pmempool_check_argsU #define pmempool_check_init pmempool_check_initU #define pmempool_check pmempool_checkU #define pmempool_sync pmempool_syncU #define pmempool_transform pmempool_transformU #define pmempool_rm pmempool_rmU #define pmempool_check_version pmempool_check_versionU #define pmempool_errormsg pmempool_errormsgU #define pmempool_feature_enable pmempool_feature_enableU #define pmempool_feature_disable pmempool_feature_disableU #define pmempool_feature_query pmempool_feature_queryU #endif #endif #ifdef __cplusplus extern "C" { #endif /* PMEMPOOL CHECK */ /* * pool types */ enum pmempool_pool_type { PMEMPOOL_POOL_TYPE_DETECT, PMEMPOOL_POOL_TYPE_LOG, PMEMPOOL_POOL_TYPE_BLK, PMEMPOOL_POOL_TYPE_OBJ, PMEMPOOL_POOL_TYPE_BTT, PMEMPOOL_POOL_TYPE_RESERVED1, /* used to be cto */ }; /* * perform repairs */ #define PMEMPOOL_CHECK_REPAIR (1U << 0) /* * emulate repairs */ #define PMEMPOOL_CHECK_DRY_RUN (1U << 1) /* * perform hazardous repairs */ #define PMEMPOOL_CHECK_ADVANCED (1U << 2) /* * do not ask before repairs */ #define PMEMPOOL_CHECK_ALWAYS_YES (1U << 3) /* * generate info statuses */ #define PMEMPOOL_CHECK_VERBOSE (1U << 4) /* * generate string format statuses */ #define PMEMPOOL_CHECK_FORMAT_STR (1U << 5) /* * types of check statuses */ enum pmempool_check_msg_type { PMEMPOOL_CHECK_MSG_TYPE_INFO, PMEMPOOL_CHECK_MSG_TYPE_ERROR, PMEMPOOL_CHECK_MSG_TYPE_QUESTION, }; /* * check result types */ enum pmempool_check_result { PMEMPOOL_CHECK_RESULT_CONSISTENT, PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT, PMEMPOOL_CHECK_RESULT_REPAIRED, PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR, PMEMPOOL_CHECK_RESULT_ERROR, PMEMPOOL_CHECK_RESULT_SYNC_REQ, }; /* * check context */ typedef struct pmempool_check_ctx PMEMpoolcheck; /* * finalize the check and get the result */ enum pmempool_check_result pmempool_check_end(PMEMpoolcheck *ppc); /* PMEMPOOL RM */ #define PMEMPOOL_RM_FORCE (1U << 0) /* ignore any errors */ #define PMEMPOOL_RM_POOLSET_LOCAL (1U << 1) /* remove local poolsets */ #define PMEMPOOL_RM_POOLSET_REMOTE (1U << 2) /* remove remote poolsets */ /* * LIBPMEMPOOL SYNC */ /* * fix bad blocks - it requires creating or reading special recovery files */ #define PMEMPOOL_SYNC_FIX_BAD_BLOCKS (1U << 0) /* * do not apply changes, only check if operation is viable */ #define PMEMPOOL_SYNC_DRY_RUN (1U << 1) /* * LIBPMEMPOOL TRANSFORM */ /* * do not apply changes, only check if operation is viable */ #define PMEMPOOL_TRANSFORM_DRY_RUN (1U << 1) /* * PMEMPOOL_MAJOR_VERSION and PMEMPOOL_MINOR_VERSION provide the current version * of the libpmempool API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmempool_check_version(). */ #define PMEMPOOL_MAJOR_VERSION 1 #define PMEMPOOL_MINOR_VERSION 3 /* * check status */ struct pmempool_check_statusU { enum pmempool_check_msg_type type; struct { const char *msg; const char *answer; } str; }; #ifndef _WIN32 #define pmempool_check_status pmempool_check_statusU #else struct pmempool_check_statusW { enum pmempool_check_msg_type type; struct { const wchar_t *msg; const wchar_t *answer; } str; }; #endif /* * check context arguments */ struct pmempool_check_argsU { const char *path; const char *backup_path; enum pmempool_pool_type pool_type; unsigned flags; }; #ifndef _WIN32 #define pmempool_check_args pmempool_check_argsU #else struct pmempool_check_argsW { const wchar_t *path; const wchar_t *backup_path; enum pmempool_pool_type pool_type; unsigned flags; }; #endif /* * initialize a check context */ #ifndef _WIN32 PMEMpoolcheck * pmempool_check_init(struct pmempool_check_args *args, size_t args_size); #else PMEMpoolcheck * pmempool_check_initU(struct pmempool_check_argsU *args, size_t args_size); PMEMpoolcheck * pmempool_check_initW(struct pmempool_check_argsW *args, size_t args_size); #endif /* * start / resume the check */ #ifndef _WIN32 struct pmempool_check_status *pmempool_check(PMEMpoolcheck *ppc); #else struct pmempool_check_statusU *pmempool_checkU(PMEMpoolcheck *ppc); struct pmempool_check_statusW *pmempool_checkW(PMEMpoolcheck *ppc); #endif /* * LIBPMEMPOOL SYNC & TRANSFORM */ /* * Synchronize data between replicas within a poolset. * * EXPERIMENTAL */ #ifndef _WIN32 int pmempool_sync(const char *poolset_file, unsigned flags); #else int pmempool_syncU(const char *poolset_file, unsigned flags); int pmempool_syncW(const wchar_t *poolset_file, unsigned flags); #endif /* * Modify internal structure of a poolset. * * EXPERIMENTAL */ #ifndef _WIN32 int pmempool_transform(const char *poolset_file_src, const char *poolset_file_dst, unsigned flags); #else int pmempool_transformU(const char *poolset_file_src, const char *poolset_file_dst, unsigned flags); int pmempool_transformW(const wchar_t *poolset_file_src, const wchar_t *poolset_file_dst, unsigned flags); #endif /* PMEMPOOL feature enable, disable, query */ /* * feature types */ enum pmempool_feature { PMEMPOOL_FEAT_SINGLEHDR, PMEMPOOL_FEAT_CKSUM_2K, PMEMPOOL_FEAT_SHUTDOWN_STATE, PMEMPOOL_FEAT_CHECK_BAD_BLOCKS, }; /* PMEMPOOL FEATURE ENABLE */ #ifndef _WIN32 int pmempool_feature_enable(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_enableU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_enableW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL FEATURE DISABLE */ #ifndef _WIN32 int pmempool_feature_disable(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_disableU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_disableW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL FEATURE QUERY */ #ifndef _WIN32 int pmempool_feature_query(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_queryU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_queryW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL RM */ #ifndef _WIN32 int pmempool_rm(const char *path, unsigned flags); #else int pmempool_rmU(const char *path, unsigned flags); int pmempool_rmW(const wchar_t *path, unsigned flags); #endif #ifndef _WIN32 const char *pmempool_check_version(unsigned major_required, unsigned minor_required); #else const char *pmempool_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmempool_check_versionW(unsigned major_required, unsigned minor_required); #endif #ifndef _WIN32 const char *pmempool_errormsg(void); #else const char *pmempool_errormsgU(void); const wchar_t *pmempool_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmempool.h */
8,009
22.910448
80
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/librpmem.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * librpmem.h -- definitions of librpmem entry points (EXPERIMENTAL) * * This library provides low-level support for remote access to persistent * memory utilizing RDMA-capable RNICs. * * See librpmem(7) for details. */ #ifndef LIBRPMEM_H #define LIBRPMEM_H 1 #include <sys/types.h> #include <stdint.h> #ifdef __cplusplus extern "C" { #endif typedef struct rpmem_pool RPMEMpool; #define RPMEM_POOL_HDR_SIG_LEN 8 #define RPMEM_POOL_HDR_UUID_LEN 16 /* uuid byte length */ #define RPMEM_POOL_USER_FLAGS_LEN 16 struct rpmem_pool_attr { char signature[RPMEM_POOL_HDR_SIG_LEN]; /* pool signature */ uint32_t major; /* format major version number */ uint32_t compat_features; /* mask: compatible "may" features */ uint32_t incompat_features; /* mask: "must support" features */ uint32_t ro_compat_features; /* mask: force RO if unsupported */ unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* pool uuid */ unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN]; /* first part uuid */ unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* next pool uuid */ unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* prev pool uuid */ unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN]; /* user flags */ }; RPMEMpool *rpmem_create(const char *target, const char *pool_set_name, void *pool_addr, size_t pool_size, unsigned *nlanes, const struct rpmem_pool_attr *create_attr); RPMEMpool *rpmem_open(const char *target, const char *pool_set_name, void *pool_addr, size_t pool_size, unsigned *nlanes, struct rpmem_pool_attr *open_attr); int rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr); int rpmem_close(RPMEMpool *rpp); #define RPMEM_PERSIST_RELAXED (1U << 0) #define RPMEM_FLUSH_RELAXED (1U << 0) int rpmem_flush(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane, unsigned flags); int rpmem_drain(RPMEMpool *rpp, unsigned lane, unsigned flags); int rpmem_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane, unsigned flags); int rpmem_read(RPMEMpool *rpp, void *buff, size_t offset, size_t length, unsigned lane); int rpmem_deep_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane); #define RPMEM_REMOVE_FORCE 0x1 #define RPMEM_REMOVE_POOL_SET 0x2 int rpmem_remove(const char *target, const char *pool_set, int flags); /* * RPMEM_MAJOR_VERSION and RPMEM_MINOR_VERSION provide the current version of * the librpmem API as provided by this header file. Applications can verify * that the version available at run-time is compatible with the version used * at compile-time by passing these defines to rpmem_check_version(). */ #define RPMEM_MAJOR_VERSION 1 #define RPMEM_MINOR_VERSION 3 const char *rpmem_check_version(unsigned major_required, unsigned minor_required); const char *rpmem_errormsg(void); /* minimum size of a pool */ #define RPMEM_MIN_POOL ((size_t)(1024 * 8)) /* 8 KB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define RPMEM_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #ifdef __cplusplus } #endif #endif /* librpmem.h */
3,197
31.30303
77
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj.h -- definitions of libpmemobj entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemobj provides a pmem-resident transactional object store. * * See libpmemobj(7) for details. */ #ifndef LIBPMEMOBJ_H #define LIBPMEMOBJ_H 1 #include <libpmemobj/action.h> #include <libpmemobj/atomic.h> #include <libpmemobj/ctl.h> #include <libpmemobj/iterator.h> #include <libpmemobj/lists_atomic.h> #include <libpmemobj/pool.h> #include <libpmemobj/thread.h> #include <libpmemobj/tx.h> #endif /* libpmemobj.h */
662
23.555556
79
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemlog.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemlog.h -- definitions of libpmemlog entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemlog provides support for pmem-resident log files. * * See libpmemlog(7) for details. */ #ifndef LIBPMEMLOG_H #define LIBPMEMLOG_H 1 #include <sys/types.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemlog_open pmemlog_openW #define pmemlog_create pmemlog_createW #define pmemlog_check pmemlog_checkW #define pmemlog_check_version pmemlog_check_versionW #define pmemlog_errormsg pmemlog_errormsgW #define pmemlog_ctl_get pmemlog_ctl_getW #define pmemlog_ctl_set pmemlog_ctl_setW #define pmemlog_ctl_exec pmemlog_ctl_execW #else #define pmemlog_open pmemlog_openU #define pmemlog_create pmemlog_createU #define pmemlog_check pmemlog_checkU #define pmemlog_check_version pmemlog_check_versionU #define pmemlog_errormsg pmemlog_errormsgU #define pmemlog_ctl_get pmemlog_ctl_getU #define pmemlog_ctl_set pmemlog_ctl_setU #define pmemlog_ctl_exec pmemlog_ctl_execU #endif #else #include <sys/uio.h> #endif #ifdef __cplusplus extern "C" { #endif /* * opaque type, internal to libpmemlog */ typedef struct pmemlog PMEMlogpool; /* * PMEMLOG_MAJOR_VERSION and PMEMLOG_MINOR_VERSION provide the current * version of the libpmemlog API as provided by this header file. * Applications can verify that the version available at run-time * is compatible with the version used at compile-time by passing * these defines to pmemlog_check_version(). */ #define PMEMLOG_MAJOR_VERSION 1 #define PMEMLOG_MINOR_VERSION 1 #ifndef _WIN32 const char *pmemlog_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemlog_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemlog_check_versionW(unsigned major_required, unsigned minor_required); #endif /* * support for PMEM-resident log files... */ #define PMEMLOG_MIN_POOL ((size_t)(1024 * 1024 * 2)) /* min pool size: 2MiB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMLOG_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #ifndef _WIN32 PMEMlogpool *pmemlog_open(const char *path); #else PMEMlogpool *pmemlog_openU(const char *path); PMEMlogpool *pmemlog_openW(const wchar_t *path); #endif #ifndef _WIN32 PMEMlogpool *pmemlog_create(const char *path, size_t poolsize, mode_t mode); #else PMEMlogpool *pmemlog_createU(const char *path, size_t poolsize, mode_t mode); PMEMlogpool *pmemlog_createW(const wchar_t *path, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemlog_check(const char *path); #else int pmemlog_checkU(const char *path); int pmemlog_checkW(const wchar_t *path); #endif void pmemlog_close(PMEMlogpool *plp); size_t pmemlog_nbyte(PMEMlogpool *plp); int pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count); int pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt); long long pmemlog_tell(PMEMlogpool *plp); void pmemlog_rewind(PMEMlogpool *plp); void pmemlog_walk(PMEMlogpool *plp, size_t chunksize, int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg); /* * Passing NULL to pmemlog_set_funcs() tells libpmemlog to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemlog. */ void pmemlog_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); #ifndef _WIN32 const char *pmemlog_errormsg(void); #else const char *pmemlog_errormsgU(void); const wchar_t *pmemlog_errormsgW(void); #endif #ifndef _WIN32 /* EXPERIMENTAL */ int pmemlog_ctl_get(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_set(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_exec(PMEMlogpool *plp, const char *name, void *arg); #else int pmemlog_ctl_getU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_getW(PMEMlogpool *plp, const wchar_t *name, void *arg); int pmemlog_ctl_setU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_setW(PMEMlogpool *plp, const wchar_t *name, void *arg); int pmemlog_ctl_execU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_execW(PMEMlogpool *plp, const wchar_t *name, void *arg); #endif #ifdef __cplusplus } #endif #endif /* libpmemlog.h */
4,540
28.679739
80
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmem.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmem.h -- definitions of libpmem entry points * * This library provides support for programming with persistent memory (pmem). * * libpmem provides support for using raw pmem directly. * * See libpmem(7) for details. */ #ifndef LIBPMEM_H #define LIBPMEM_H 1 #include <sys/types.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmem_map_file pmem_map_fileW #define pmem_check_version pmem_check_versionW #define pmem_errormsg pmem_errormsgW #else #define pmem_map_file pmem_map_fileU #define pmem_check_version pmem_check_versionU #define pmem_errormsg pmem_errormsgU #endif #endif #ifdef __cplusplus extern "C" { #endif /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEM_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ /* * flags supported by pmem_map_file() */ #define PMEM_FILE_CREATE (1 << 0) #define PMEM_FILE_EXCL (1 << 1) #define PMEM_FILE_SPARSE (1 << 2) #define PMEM_FILE_TMPFILE (1 << 3) #ifndef _WIN32 void *pmem_map_file(const char *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); #else void *pmem_map_fileU(const char *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); void *pmem_map_fileW(const wchar_t *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); #endif int pmem_unmap(void *addr, size_t len); int pmem_is_pmem(const void *addr, size_t len); void pmem_persist(const void *addr, size_t len); int pmem_msync(const void *addr, size_t len); int pmem_has_auto_flush(void); void pmem_flush(const void *addr, size_t len); void pmem_deep_flush(const void *addr, size_t len); int pmem_deep_drain(const void *addr, size_t len); int pmem_deep_persist(const void *addr, size_t len); void pmem_drain(void); int pmem_has_hw_drain(void); void *pmem_memmove_persist(void *pmemdest, const void *src, size_t len); void *pmem_memcpy_persist(void *pmemdest, const void *src, size_t len); void *pmem_memset_persist(void *pmemdest, int c, size_t len); void *pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len); void *pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len); void *pmem_memset_nodrain(void *pmemdest, int c, size_t len); #define PMEM_F_MEM_NODRAIN (1U << 0) #define PMEM_F_MEM_NONTEMPORAL (1U << 1) #define PMEM_F_MEM_TEMPORAL (1U << 2) #define PMEM_F_MEM_WC (1U << 3) #define PMEM_F_MEM_WB (1U << 4) #define PMEM_F_MEM_NOFLUSH (1U << 5) #define PMEM_F_MEM_VALID_FLAGS (PMEM_F_MEM_NODRAIN | \ PMEM_F_MEM_NONTEMPORAL | \ PMEM_F_MEM_TEMPORAL | \ PMEM_F_MEM_WC | \ PMEM_F_MEM_WB | \ PMEM_F_MEM_NOFLUSH) void *pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags); void *pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags); void *pmem_memset(void *pmemdest, int c, size_t len, unsigned flags); /* * PMEM_MAJOR_VERSION and PMEM_MINOR_VERSION provide the current version of the * libpmem API as provided by this header file. Applications can verify that * the version available at run-time is compatible with the version used at * compile-time by passing these defines to pmem_check_version(). */ #define PMEM_MAJOR_VERSION 1 #define PMEM_MINOR_VERSION 1 #ifndef _WIN32 const char *pmem_check_version(unsigned major_required, unsigned minor_required); #else const char *pmem_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmem_check_versionW(unsigned major_required, unsigned minor_required); #endif #ifndef _WIN32 const char *pmem_errormsg(void); #else const char *pmem_errormsgU(void); const wchar_t *pmem_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmem.h */
3,829
28.015152
80
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmem2.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * libpmem2.h -- definitions of libpmem2 entry points (EXPERIMENTAL) * * This library provides support for programming with persistent memory (pmem). * * libpmem2 provides support for using raw pmem directly. * * See libpmem2(7) for details. */ #ifndef LIBPMEM2_H #define LIBPMEM2_H 1 #include <stddef.h> #include <stdint.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmem2_source_device_id pmem2_source_device_idW #define pmem2_errormsg pmem2_errormsgW #define pmem2_perror pmem2_perrorW #else #define pmem2_source_device_id pmem2_source_device_idU #define pmem2_errormsg pmem2_errormsgU #define pmem2_perror pmem2_perrorU #endif #endif #ifdef __cplusplus extern "C" { #endif #define PMEM2_E_UNKNOWN (-100000) #define PMEM2_E_NOSUPP (-100001) #define PMEM2_E_FILE_HANDLE_NOT_SET (-100003) #define PMEM2_E_INVALID_FILE_HANDLE (-100004) #define PMEM2_E_INVALID_FILE_TYPE (-100005) #define PMEM2_E_MAP_RANGE (-100006) #define PMEM2_E_MAPPING_EXISTS (-100007) #define PMEM2_E_GRANULARITY_NOT_SET (-100008) #define PMEM2_E_GRANULARITY_NOT_SUPPORTED (-100009) #define PMEM2_E_OFFSET_OUT_OF_RANGE (-100010) #define PMEM2_E_OFFSET_UNALIGNED (-100011) #define PMEM2_E_INVALID_ALIGNMENT_FORMAT (-100012) #define PMEM2_E_INVALID_ALIGNMENT_VALUE (-100013) #define PMEM2_E_INVALID_SIZE_FORMAT (-100014) #define PMEM2_E_LENGTH_UNALIGNED (-100015) #define PMEM2_E_MAPPING_NOT_FOUND (-100016) #define PMEM2_E_BUFFER_TOO_SMALL (-100017) #define PMEM2_E_SOURCE_EMPTY (-100018) #define PMEM2_E_INVALID_SHARING_VALUE (-100019) #define PMEM2_E_SRC_DEVDAX_PRIVATE (-100020) #define PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE (-100021) #define PMEM2_E_ADDRESS_UNALIGNED (-100022) #define PMEM2_E_ADDRESS_NULL (-100023) #define PMEM2_E_DEEP_FLUSH_RANGE (-100024) #define PMEM2_E_INVALID_REGION_FORMAT (-100025) #define PMEM2_E_DAX_REGION_NOT_FOUND (-100026) #define PMEM2_E_INVALID_DEV_FORMAT (-100027) #define PMEM2_E_CANNOT_READ_BOUNDS (-100028) #define PMEM2_E_NO_BAD_BLOCK_FOUND (-100029) #define PMEM2_E_LENGTH_OUT_OF_RANGE (-100030) #define PMEM2_E_INVALID_PROT_FLAG (-100031) #define PMEM2_E_NO_ACCESS (-100032) /* source setup */ struct pmem2_source; int pmem2_source_from_fd(struct pmem2_source **src, int fd); int pmem2_source_from_anon(struct pmem2_source **src, size_t size); #ifdef _WIN32 int pmem2_source_from_handle(struct pmem2_source **src, HANDLE handle); #endif int pmem2_source_size(const struct pmem2_source *src, size_t *size); int pmem2_source_alignment(const struct pmem2_source *src, size_t *alignment); int pmem2_source_delete(struct pmem2_source **src); /* vm reservation setup */ struct pmem2_vm_reservation; int pmem2_vm_reservation_new(struct pmem2_vm_reservation **rsv, size_t size, void *address); int pmem2_vm_reservation_delete(struct pmem2_vm_reservation **rsv); /* config setup */ struct pmem2_config; int pmem2_config_new(struct pmem2_config **cfg); int pmem2_config_delete(struct pmem2_config **cfg); enum pmem2_granularity { PMEM2_GRANULARITY_BYTE, PMEM2_GRANULARITY_CACHE_LINE, PMEM2_GRANULARITY_PAGE, }; int pmem2_config_set_required_store_granularity(struct pmem2_config *cfg, enum pmem2_granularity g); int pmem2_config_set_offset(struct pmem2_config *cfg, size_t offset); int pmem2_config_set_length(struct pmem2_config *cfg, size_t length); enum pmem2_sharing_type { PMEM2_SHARED, PMEM2_PRIVATE, }; int pmem2_config_set_sharing(struct pmem2_config *cfg, enum pmem2_sharing_type type); #define PMEM2_PROT_EXEC (1U << 29) #define PMEM2_PROT_READ (1U << 30) #define PMEM2_PROT_WRITE (1U << 31) #define PMEM2_PROT_NONE 0 int pmem2_config_set_protection(struct pmem2_config *cfg, unsigned prot); enum pmem2_address_request_type { PMEM2_ADDRESS_FIXED_REPLACE = 1, PMEM2_ADDRESS_FIXED_NOREPLACE = 2, }; int pmem2_config_set_address(struct pmem2_config *cfg, void *addr, enum pmem2_address_request_type request_type); int pmem2_config_set_vm_reservation(struct pmem2_config *cfg, struct pmem2_vm_reservation *rsv, size_t offset); void pmem2_config_clear_address(struct pmem2_config *cfg); /* mapping */ struct pmem2_map; int pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src, struct pmem2_map **map_ptr); int pmem2_unmap(struct pmem2_map **map_ptr); void *pmem2_map_get_address(struct pmem2_map *map); size_t pmem2_map_get_size(struct pmem2_map *map); enum pmem2_granularity pmem2_map_get_store_granularity(struct pmem2_map *map); /* flushing */ typedef void (*pmem2_persist_fn)(const void *ptr, size_t size); typedef void (*pmem2_flush_fn)(const void *ptr, size_t size); typedef void (*pmem2_drain_fn)(void); pmem2_persist_fn pmem2_get_persist_fn(struct pmem2_map *map); pmem2_flush_fn pmem2_get_flush_fn(struct pmem2_map *map); pmem2_drain_fn pmem2_get_drain_fn(struct pmem2_map *map); #define PMEM2_F_MEM_NODRAIN (1U << 0) #define PMEM2_F_MEM_NONTEMPORAL (1U << 1) #define PMEM2_F_MEM_TEMPORAL (1U << 2) #define PMEM2_F_MEM_WC (1U << 3) #define PMEM2_F_MEM_WB (1U << 4) #define PMEM2_F_MEM_NOFLUSH (1U << 5) #define PMEM2_F_MEM_VALID_FLAGS (PMEM2_F_MEM_NODRAIN | \ PMEM2_F_MEM_NONTEMPORAL | \ PMEM2_F_MEM_TEMPORAL | \ PMEM2_F_MEM_WC | \ PMEM2_F_MEM_WB | \ PMEM2_F_MEM_NOFLUSH) typedef void *(*pmem2_memmove_fn)(void *pmemdest, const void *src, size_t len, unsigned flags); typedef void *(*pmem2_memcpy_fn)(void *pmemdest, const void *src, size_t len, unsigned flags); typedef void *(*pmem2_memset_fn)(void *pmemdest, int c, size_t len, unsigned flags); pmem2_memmove_fn pmem2_get_memmove_fn(struct pmem2_map *map); pmem2_memcpy_fn pmem2_get_memcpy_fn(struct pmem2_map *map); pmem2_memset_fn pmem2_get_memset_fn(struct pmem2_map *map); /* RAS */ int pmem2_deep_flush(struct pmem2_map *map, void *ptr, size_t size); #ifndef _WIN32 int pmem2_source_device_id(const struct pmem2_source *src, char *id, size_t *len); #else int pmem2_source_device_idW(const struct pmem2_source *src, wchar_t *id, size_t *len); int pmem2_source_device_idU(const struct pmem2_source *src, char *id, size_t *len); #endif int pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc); struct pmem2_badblock_context; struct pmem2_badblock { size_t offset; size_t length; }; int pmem2_badblock_context_new(const struct pmem2_source *src, struct pmem2_badblock_context **bbctx); int pmem2_badblock_next(struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb); void pmem2_badblock_context_delete( struct pmem2_badblock_context **bbctx); int pmem2_badblock_clear(struct pmem2_badblock_context *bbctx, const struct pmem2_badblock *bb); /* error handling */ #ifndef _WIN32 const char *pmem2_errormsg(void); #else const char *pmem2_errormsgU(void); const wchar_t *pmem2_errormsgW(void); #endif int pmem2_err_to_errno(int); #ifndef _WIN32 void pmem2_perror(const char *format, ...) __attribute__((__format__(__printf__, 1, 2))); #else void pmem2_perrorU(const char *format, ...); void pmem2_perrorW(const wchar_t *format, ...); #endif #ifdef __cplusplus } #endif #endif /* libpmem2.h */
7,202
25.677778
79
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/ctl.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * libpmemobj/ctl.h -- definitions of pmemobj_ctl related entry points */ #ifndef LIBPMEMOBJ_CTL_H #define LIBPMEMOBJ_CTL_H 1 #include <stddef.h> #include <sys/types.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Allocation class interface * * When requesting an object from the allocator, the first step is to determine * which allocation class best approximates the size of the object. * Once found, the appropriate free list, called bucket, for that * class is selected in a fashion that minimizes contention between threads. * Depending on the requested size and the allocation class, it might happen * that the object size (including required metadata) would be bigger than the * allocation class size - called unit size. In those situations, the object is * constructed from two or more units (up to 64). * * If the requested number of units cannot be retrieved from the selected * bucket, the thread reaches out to the global, shared, heap which manages * memory in 256 kilobyte chunks and gives it out in a best-fit fashion. This * operation must be performed under an exclusive lock. * Once the thread is in the possession of a chunk, the lock is dropped, and the * memory is split into units that repopulate the bucket. * * These are the CTL entry points that control allocation classes: * - heap.alloc_class.[class_id].desc * Creates/retrieves allocation class information * * It's VERY important to remember that the allocation classes are a RUNTIME * property of the allocator - they are NOT stored persistently in the pool. * It's recommended to always create custom allocation classes immediately after * creating or opening the pool, before any use. * If there are existing objects created using a class that is no longer stored * in the runtime state of the allocator, they can be normally freed, but * allocating equivalent objects will be done using the allocation class that * is currently defined for that size. * * Please see the libpmemobj man page for more information about entry points. */ /* * Persistent allocation header */ enum pobj_header_type { /* * 64-byte header used up until the version 1.3 of the library, * functionally equivalent to the compact header. * It's not recommended to create any new classes with this header. */ POBJ_HEADER_LEGACY, /* * 16-byte header used by the default allocation classes. All library * metadata is by default allocated using this header. * Supports type numbers and variably sized allocations. */ POBJ_HEADER_COMPACT, /* * 0-byte header with metadata stored exclusively in a bitmap. This * ensures that objects are allocated in memory contiguously and * without attached headers. * This can be used to create very small allocation classes, but it * does not support type numbers. * Additionally, allocations with this header can only span a single * unit. * Objects allocated with this header do show up when iterating through * the heap using pmemobj_first/pmemobj_next functions, but have a * type_num equal 0. */ POBJ_HEADER_NONE, MAX_POBJ_HEADER_TYPES }; /* * Description of allocation classes */ struct pobj_alloc_class_desc { /* * The number of bytes in a single unit of allocation. A single * allocation can span up to 64 units (or 1 in the case of no header). * If one creates an allocation class with a certain unit size and * forces it to handle bigger sizes, more than one unit * will be used. * For example, an allocation class with a compact header and 128 bytes * unit size, for a request of 200 bytes will create a memory block * containing 256 bytes that spans two units. The usable size of that * allocation will be 240 bytes: 2 * 128 - 16 (header). */ size_t unit_size; /* * Desired alignment of objects from the allocation class. * If non zero, must be a power of two and an even divisor of unit size. * * All allocation classes have default alignment * of 64. User data alignment is affected by the size of a header. For * compact one this means that the alignment is 48 bytes. * */ size_t alignment; /* * The minimum number of units that must be present in a * single, contiguous, memory block. * Those blocks (internally called runs), are fetched on demand from the * heap. Accessing that global state is a serialization point for the * allocator and thus it is imperative for performance and scalability * that a reasonable amount of memory is fetched in a single call. * Threads generally do not share memory blocks from which they * allocate, but blocks do go back to the global heap if they are no * longer actively used for allocation. */ unsigned units_per_block; /* * The header of allocations that originate from this allocation class. */ enum pobj_header_type header_type; /* * The identifier of this allocation class. */ unsigned class_id; }; enum pobj_stats_enabled { POBJ_STATS_ENABLED_TRANSIENT, POBJ_STATS_ENABLED_BOTH, POBJ_STATS_ENABLED_PERSISTENT, POBJ_STATS_DISABLED, }; #ifndef _WIN32 /* EXPERIMENTAL */ int pmemobj_ctl_get(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_set(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_exec(PMEMobjpool *pop, const char *name, void *arg); #else int pmemobj_ctl_getU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_getW(PMEMobjpool *pop, const wchar_t *name, void *arg); int pmemobj_ctl_setU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_setW(PMEMobjpool *pop, const wchar_t *name, void *arg); int pmemobj_ctl_execU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_execW(PMEMobjpool *pop, const wchar_t *name, void *arg); #ifndef PMDK_UTF8_API #define pmemobj_ctl_get pmemobj_ctl_getW #define pmemobj_ctl_set pmemobj_ctl_setW #define pmemobj_ctl_exec pmemobj_ctl_execW #else #define pmemobj_ctl_get pmemobj_ctl_getU #define pmemobj_ctl_set pmemobj_ctl_setU #define pmemobj_ctl_exec pmemobj_ctl_execU #endif #endif #ifdef __cplusplus } #endif #endif /* libpmemobj/ctl.h */
6,198
34.221591
80
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/lists_atomic.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/lists_atomic.h -- definitions of libpmemobj atomic lists macros */ #ifndef LIBPMEMOBJ_LISTS_ATOMIC_H #define LIBPMEMOBJ_LISTS_ATOMIC_H 1 #include <libpmemobj/lists_atomic_base.h> #include <libpmemobj/thread.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional persistent atomic circular doubly-linked list */ #define POBJ_LIST_ENTRY(type)\ struct {\ TOID(type) pe_next;\ TOID(type) pe_prev;\ } #define POBJ_LIST_HEAD(name, type)\ struct name {\ TOID(type) pe_first;\ PMEMmutex lock;\ } #define POBJ_LIST_FIRST(head) ((head)->pe_first) #define POBJ_LIST_LAST(head, field) (\ TOID_IS_NULL((head)->pe_first) ?\ (head)->pe_first :\ D_RO((head)->pe_first)->field.pe_prev) #define POBJ_LIST_EMPTY(head) (TOID_IS_NULL((head)->pe_first)) #define POBJ_LIST_NEXT(elm, field) (D_RO(elm)->field.pe_next) #define POBJ_LIST_PREV(elm, field) (D_RO(elm)->field.pe_prev) #define POBJ_LIST_DEST_HEAD 1 #define POBJ_LIST_DEST_TAIL 0 #define POBJ_LIST_DEST_BEFORE 1 #define POBJ_LIST_DEST_AFTER 0 #define POBJ_LIST_FOREACH(var, head, field)\ for (_pobj_debug_notice("POBJ_LIST_FOREACH", __FILE__, __LINE__),\ (var) = POBJ_LIST_FIRST((head));\ TOID_IS_NULL((var)) == 0;\ TOID_EQUALS(POBJ_LIST_NEXT((var), field),\ POBJ_LIST_FIRST((head))) ?\ TOID_ASSIGN((var), OID_NULL) :\ ((var) = POBJ_LIST_NEXT((var), field))) #define POBJ_LIST_FOREACH_REVERSE(var, head, field)\ for (_pobj_debug_notice("POBJ_LIST_FOREACH_REVERSE", __FILE__, __LINE__),\ (var) = POBJ_LIST_LAST((head), field);\ TOID_IS_NULL((var)) == 0;\ TOID_EQUALS(POBJ_LIST_PREV((var), field),\ POBJ_LIST_LAST((head), field)) ?\ TOID_ASSIGN((var), OID_NULL) :\ ((var) = POBJ_LIST_PREV((var), field))) #define POBJ_LIST_INSERT_HEAD(pop, head, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), OID_NULL,\ POBJ_LIST_DEST_HEAD, (elm).oid) #define POBJ_LIST_INSERT_TAIL(pop, head, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), OID_NULL,\ POBJ_LIST_DEST_TAIL, (elm).oid) #define POBJ_LIST_INSERT_AFTER(pop, head, listelm, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid,\ 0 /* after */, (elm).oid) #define POBJ_LIST_INSERT_BEFORE(pop, head, listelm, elm, field)\ pmemobj_list_insert((pop), \ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid,\ 1 /* before */, (elm).oid) #define POBJ_LIST_INSERT_NEW_HEAD(pop, head, field, size, constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), OID_NULL, POBJ_LIST_DEST_HEAD, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_TAIL(pop, head, field, size, constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), OID_NULL, POBJ_LIST_DEST_TAIL, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_AFTER(pop, head, listelm, field, size,\ constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), (listelm).oid, 0 /* after */, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_BEFORE(pop, head, listelm, field, size,\ constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid, 1 /* before */, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_REMOVE(pop, head, elm, field)\ pmemobj_list_remove((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (elm).oid, 0 /* no free */) #define POBJ_LIST_REMOVE_FREE(pop, head, elm, field)\ pmemobj_list_remove((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (elm).oid, 1 /* free */) #define POBJ_LIST_MOVE_ELEMENT_HEAD(pop, head, head_new, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new), OID_NULL, POBJ_LIST_DEST_HEAD, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_TAIL(pop, head, head_new, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new), OID_NULL, POBJ_LIST_DEST_TAIL, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_AFTER(pop,\ head, head_new, listelm, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new),\ (listelm).oid,\ 0 /* after */, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_BEFORE(pop,\ head, head_new, listelm, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new),\ (listelm).oid,\ 1 /* before */, (elm).oid) #ifdef __cplusplus } #endif #endif /* libpmemobj/lists_atomic.h */
5,121
30.042424
80
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/iterator.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/iterator.h -- definitions of libpmemobj iterator macros */ #ifndef LIBPMEMOBJ_ITERATOR_H #define LIBPMEMOBJ_ITERATOR_H 1 #include <libpmemobj/iterator_base.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif static inline PMEMoid POBJ_FIRST_TYPE_NUM(PMEMobjpool *pop, uint64_t type_num) { PMEMoid _pobj_ret = pmemobj_first(pop); while (!OID_IS_NULL(_pobj_ret) && pmemobj_type_num(_pobj_ret) != type_num) { _pobj_ret = pmemobj_next(_pobj_ret); } return _pobj_ret; } static inline PMEMoid POBJ_NEXT_TYPE_NUM(PMEMoid o) { PMEMoid _pobj_ret = o; do { _pobj_ret = pmemobj_next(_pobj_ret);\ } while (!OID_IS_NULL(_pobj_ret) && pmemobj_type_num(_pobj_ret) != pmemobj_type_num(o)); return _pobj_ret; } #define POBJ_FIRST(pop, t) ((TOID(t))POBJ_FIRST_TYPE_NUM(pop, TOID_TYPE_NUM(t))) #define POBJ_NEXT(o) ((__typeof__(o))POBJ_NEXT_TYPE_NUM((o).oid)) /* * Iterates through every existing allocated object. */ #define POBJ_FOREACH(pop, varoid)\ for (_pobj_debug_notice("POBJ_FOREACH", __FILE__, __LINE__),\ varoid = pmemobj_first(pop);\ (varoid).off != 0; varoid = pmemobj_next(varoid)) /* * Safe variant of POBJ_FOREACH in which pmemobj_free on varoid is allowed */ #define POBJ_FOREACH_SAFE(pop, varoid, nvaroid)\ for (_pobj_debug_notice("POBJ_FOREACH_SAFE", __FILE__, __LINE__),\ varoid = pmemobj_first(pop);\ (varoid).off != 0 && (nvaroid = pmemobj_next(varoid), 1);\ varoid = nvaroid) /* * Iterates through every object of the specified type. */ #define POBJ_FOREACH_TYPE(pop, var)\ POBJ_FOREACH(pop, (var).oid)\ if (pmemobj_type_num((var).oid) == TOID_TYPE_NUM_OF(var)) /* * Safe variant of POBJ_FOREACH_TYPE in which pmemobj_free on var * is allowed. */ #define POBJ_FOREACH_SAFE_TYPE(pop, var, nvar)\ POBJ_FOREACH_SAFE(pop, (var).oid, (nvar).oid)\ if (pmemobj_type_num((var).oid) == TOID_TYPE_NUM_OF(var)) #ifdef __cplusplus } #endif #endif /* libpmemobj/iterator.h */
2,041
23.60241
80
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/lists_atomic_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/lists_atomic_base.h -- definitions of libpmemobj atomic lists */ #ifndef LIBPMEMOBJ_LISTS_ATOMIC_BASE_H #define LIBPMEMOBJ_LISTS_ATOMIC_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional persistent atomic circular doubly-linked list */ int pmemobj_list_insert(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid dest, int before, PMEMoid oid); PMEMoid pmemobj_list_insert_new(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid dest, int before, size_t size, uint64_t type_num, pmemobj_constr constructor, void *arg); int pmemobj_list_remove(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid oid, int free); int pmemobj_list_move(PMEMobjpool *pop, size_t pe_old_offset, void *head_old, size_t pe_new_offset, void *head_new, PMEMoid dest, int before, PMEMoid oid); #ifdef __cplusplus } #endif #endif /* libpmemobj/lists_atomic_base.h */
1,022
24.575
79
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/tx_bup.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/tx.h -- definitions of libpmemobj transactional macros */ #ifndef LIBPMEMOBJ_TX_H #define LIBPMEMOBJ_TX_H 1 #include <errno.h> #include <string.h> #include <libpmemobj/tx_base.h> #include <libpmemobj/types.h> extern uint64_t waitCycles; extern uint64_t resetCycles; #ifdef __cplusplus extern "C" { #endif #ifdef POBJ_TX_CRASH_ON_NO_ONABORT #define TX_ONABORT_CHECK do {\ if (_stage == TX_STAGE_ONABORT)\ abort();\ } while (0) #else #define TX_ONABORT_CHECK do {} while (0) #endif #define _POBJ_TX_BEGIN(pop, ...)\ {\ jmp_buf _tx_env;\ enum pobj_tx_stage _stage;\ int _pobj_errno;\ if (setjmp(_tx_env)) {\ errno = pmemobj_tx_errno();\ } else {\ _pobj_errno = pmemobj_tx_begin(pop, _tx_env, __VA_ARGS__,\ TX_PARAM_NONE);\ if (_pobj_errno)\ errno = _pobj_errno;\ }\ while ((_stage = pmemobj_tx_stage()) != TX_STAGE_NONE) {\ switch (_stage) {\ case TX_STAGE_WORK: #define TX_BEGIN_PARAM(pop, ...)\ _POBJ_TX_BEGIN(pop, ##__VA_ARGS__) #define TX_BEGIN_LOCK TX_BEGIN_PARAM /* Just to let compiler warn when incompatible function pointer is used */ static inline pmemobj_tx_callback _pobj_validate_cb_sig(pmemobj_tx_callback cb) { return cb; } #define TX_BEGIN_CB(pop, cb, arg, ...) _POBJ_TX_BEGIN(pop, TX_PARAM_CB,\ _pobj_validate_cb_sig(cb), arg, ##__VA_ARGS__) #define TX_BEGIN(pop) _POBJ_TX_BEGIN(pop, TX_PARAM_NONE) #define TX_ONABORT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONABORT: #define TX_ONCOMMIT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONCOMMIT: #define TX_FINALLY\ pmemobj_tx_process();\ break;\ case TX_STAGE_FINALLY: #define TX_END\ pmemobj_tx_process();\ break;\ default:\ TX_ONABORT_CHECK;\ pmemobj_tx_process();\ break;\ }\ }\ _pobj_errno = pmemobj_tx_end();\ if (_pobj_errno)\ errno = _pobj_errno;\ } #define TX_ADD(o)\ pmemobj_tx_add_range((o).oid, 0, sizeof(*(o)._type)) #define TX_ADD_FIELD(o, field)\ TX_ADD_DIRECT(&(D_RO(o)->field)) #define TX_ADD_DIRECT(p)\ pmemobj_tx_add_range_direct(p, sizeof(*(p))) #define TX_ADD_FIELD_DIRECT(p, field)\ pmemobj_tx_add_range_direct(&(p)->field, sizeof((p)->field)) #define TX_XADD(o, flags)\ pmemobj_tx_xadd_range((o).oid, 0, sizeof(*(o)._type), flags) #define TX_XADD_FIELD(o, field, flags)\ TX_XADD_DIRECT(&(D_RO(o)->field), flags) #define TX_XADD_DIRECT(p, flags)\ pmemobj_tx_xadd_range_direct(p, sizeof(*(p)), flags) #define TX_XADD_FIELD_DIRECT(p, field, flags)\ pmemobj_tx_xadd_range_direct(&(p)->field, sizeof((p)->field), flags) #define TX_NEW(t)\ ((TOID(t))pmemobj_tx_alloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ALLOC(t, size)\ ((TOID(t))pmemobj_tx_alloc(size, TOID_TYPE_NUM(t))) #define TX_ZNEW(t)\ ((TOID(t))pmemobj_tx_zalloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ZALLOC(t, size)\ ((TOID(t))pmemobj_tx_zalloc(size, TOID_TYPE_NUM(t))) #define TX_XALLOC(t, size, flags)\ ((TOID(t))pmemobj_tx_xalloc(size, TOID_TYPE_NUM(t), flags)) /* XXX - not available when compiled with VC++ as C code (/TC) */ #if !defined(_MSC_VER) || defined(__cplusplus) #define TX_REALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_realloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #define TX_ZREALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_zrealloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #endif /* !defined(_MSC_VER) || defined(__cplusplus) */ #define TX_STRDUP(s, type_num)\ pmemobj_tx_strdup(s, type_num) #define TX_XSTRDUP(s, type_num, flags)\ pmemobj_tx_xstrdup(s, type_num, flags) #define TX_WCSDUP(s, type_num)\ pmemobj_tx_wcsdup(s, type_num) #define TX_XWCSDUP(s, type_num, flags)\ pmemobj_tx_xwcsdup(s, type_num, flags) #define TX_FREE(o)\ pmemobj_tx_free((o).oid) #define TX_XFREE(o, flags)\ pmemobj_tx_xfree((o).oid, flags) #define TX_SET(o, field, value) (\ TX_ADD_FIELD(o, field),\ D_RW(o)->field = (value)) #define TX_SET_DIRECT(p, field, value) (\ TX_ADD_FIELD_DIRECT(p, field),\ (p)->field = (value)) static inline void * TX_MEMCPY(void *dest, const void *src, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memcpy(dest, src, num); } static inline void * TX_MEMSET(void *dest, int c, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memset(dest, c, num); } #ifdef __cplusplus } #endif #endif /* libpmemobj/tx.h */
4,353
22.037037
74
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/tx_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * libpmemobj/tx_base.h -- definitions of libpmemobj transactional entry points */ #ifndef LIBPMEMOBJ_TX_BASE_H #define LIBPMEMOBJ_TX_BASE_H 1 #include <setjmp.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Transactions * * Stages are changed only by the pmemobj_tx_* functions, each transition * to the TX_STAGE_ONABORT is followed by a longjmp to the jmp_buf provided in * the pmemobj_tx_begin function. */ enum pobj_tx_stage { TX_STAGE_NONE, /* no transaction in this thread */ TX_STAGE_WORK, /* transaction in progress */ TX_STAGE_ONCOMMIT, /* successfully committed */ TX_STAGE_ONABORT, /* tx_begin failed or transaction aborted */ TX_STAGE_FINALLY, /* always called */ MAX_TX_STAGE }; /* * Always returns the current transaction stage for a thread. */ enum pobj_tx_stage pmemobj_tx_stage(void); enum pobj_tx_param { TX_PARAM_NONE, TX_PARAM_MUTEX, /* PMEMmutex */ TX_PARAM_RWLOCK, /* PMEMrwlock */ TX_PARAM_CB, /* pmemobj_tx_callback cb, void *arg */ }; enum pobj_log_type { TX_LOG_TYPE_SNAPSHOT, TX_LOG_TYPE_INTENT, }; enum pobj_tx_failure_behavior { POBJ_TX_FAILURE_ABORT, POBJ_TX_FAILURE_RETURN, }; #if !defined(pmdk_use_attr_deprec_with_msg) && defined(__COVERITY__) #define pmdk_use_attr_deprec_with_msg 0 #endif #if !defined(pmdk_use_attr_deprec_with_msg) && defined(__clang__) #if __has_extension(attribute_deprecated_with_message) #define pmdk_use_attr_deprec_with_msg 1 #else #define pmdk_use_attr_deprec_with_msg 0 #endif #endif #if !defined(pmdk_use_attr_deprec_with_msg) && \ defined(__GNUC__) && !defined(__INTEL_COMPILER) #if __GNUC__ * 100 + __GNUC_MINOR__ >= 601 /* 6.1 */ #define pmdk_use_attr_deprec_with_msg 1 #else #define pmdk_use_attr_deprec_with_msg 0 #endif #endif #if !defined(pmdk_use_attr_deprec_with_msg) #define pmdk_use_attr_deprec_with_msg 0 #endif #if pmdk_use_attr_deprec_with_msg #define tx_lock_deprecated __attribute__((deprecated(\ "enum pobj_tx_lock is deprecated, use enum pobj_tx_param"))) #else #define tx_lock_deprecated #endif /* deprecated, do not use */ enum tx_lock_deprecated pobj_tx_lock { TX_LOCK_NONE tx_lock_deprecated = TX_PARAM_NONE, TX_LOCK_MUTEX tx_lock_deprecated = TX_PARAM_MUTEX, TX_LOCK_RWLOCK tx_lock_deprecated = TX_PARAM_RWLOCK, }; typedef void (*pmemobj_tx_callback)(PMEMobjpool *pop, enum pobj_tx_stage stage, void *); #define POBJ_TX_XALLOC_VALID_FLAGS (POBJ_XALLOC_ZERO |\ POBJ_XALLOC_NO_FLUSH |\ POBJ_XALLOC_ARENA_MASK |\ POBJ_XALLOC_CLASS_MASK |\ POBJ_XALLOC_NO_ABORT) #define POBJ_XADD_NO_FLUSH POBJ_FLAG_NO_FLUSH #define POBJ_XADD_NO_SNAPSHOT POBJ_FLAG_NO_SNAPSHOT #define POBJ_XADD_ASSUME_INITIALIZED POBJ_FLAG_ASSUME_INITIALIZED #define POBJ_XADD_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XADD_VALID_FLAGS (POBJ_XADD_NO_FLUSH |\ POBJ_XADD_NO_SNAPSHOT |\ POBJ_XADD_ASSUME_INITIALIZED |\ POBJ_XADD_NO_ABORT) #define POBJ_XLOCK_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XLOCK_VALID_FLAGS (POBJ_XLOCK_NO_ABORT) #define POBJ_XFREE_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XFREE_VALID_FLAGS (POBJ_XFREE_NO_ABORT) #define POBJ_XPUBLISH_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XPUBLISH_VALID_FLAGS (POBJ_XPUBLISH_NO_ABORT) #define POBJ_XLOG_APPEND_BUFFER_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XLOG_APPEND_BUFFER_VALID_FLAGS (POBJ_XLOG_APPEND_BUFFER_NO_ABORT) /* * Starts a new transaction in the current thread. * If called within an open transaction, starts a nested transaction. * * If successful, transaction stage changes to TX_STAGE_WORK and function * returns zero. Otherwise, stage changes to TX_STAGE_ONABORT and an error * number is returned. */ int pmemobj_tx_begin(PMEMobjpool *pop, jmp_buf env, ...); /* * Adds lock of given type to current transaction. * 'Flags' is a bitmask of the following values: * - POBJ_XLOCK_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. */ int pmemobj_tx_xlock(enum pobj_tx_param type, void *lockp, uint64_t flags); /* * Adds lock of given type to current transaction. */ int pmemobj_tx_lock(enum pobj_tx_param type, void *lockp); /* * Aborts current transaction * * Causes transition to TX_STAGE_ONABORT. * * This function must be called during TX_STAGE_WORK. */ void pmemobj_tx_abort(int errnum); /* * Commits current transaction * * This function must be called during TX_STAGE_WORK. */ void pmemobj_tx_commit(void); /* * Cleanups current transaction. Must always be called after pmemobj_tx_begin, * even if starting the transaction failed. * * If called during TX_STAGE_NONE, has no effect. * * Always causes transition to TX_STAGE_NONE. * * If transaction was successful, returns 0. Otherwise returns error code set * by pmemobj_tx_abort. * * This function must *not* be called during TX_STAGE_WORK. */ int pmemobj_tx_end(void); /* * Performs the actions associated with current stage of the transaction, * and makes the transition to the next stage. Current stage must always * be obtained by calling pmemobj_tx_stage. * * This function must be called in transaction. */ void pmemobj_tx_process(void); /* * Returns last transaction error code. */ int pmemobj_tx_errno(void); /* * Takes a "snapshot" of the memory block of given size and located at given * offset 'off' in the object 'oid' and saves it in the undo log. * The application is then free to directly modify the object in that memory * range. In case of failure or abort, all the changes within this range will * be rolled-back automatically. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_add_range(PMEMoid oid, uint64_t off, size_t size); /* * Takes a "snapshot" of the given memory region and saves it in the undo log. * The application is then free to directly modify the object in that memory * range. In case of failure or abort, all the changes within this range will * be rolled-back automatically. The supplied block of memory has to be within * the given pool. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_add_range_direct(const void *ptr, size_t size); /* * Behaves exactly the same as pmemobj_tx_add_range when 'flags' equals 0. * 'Flags' is a bitmask of the following values: * - POBJ_XADD_NO_FLUSH - skips flush on commit * - POBJ_XADD_NO_SNAPSHOT - added range will not be snapshotted * - POBJ_XADD_ASSUME_INITIALIZED - added range is assumed to be initialized * - POBJ_XADD_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. */ int pmemobj_tx_xadd_range(PMEMoid oid, uint64_t off, size_t size, uint64_t flags); /* * Behaves exactly the same as pmemobj_tx_add_range_direct when 'flags' equals * 0. 'Flags' is a bitmask of the following values: * - POBJ_XADD_NO_FLUSH - skips flush on commit * - POBJ_XADD_NO_SNAPSHOT - added range will not be snapshotted * - POBJ_XADD_ASSUME_INITIALIZED - added range is assumed to be initialized * - POBJ_XADD_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. */ int pmemobj_tx_xadd_range_direct(const void *ptr, size_t size, uint64_t flags); /* * Transactionally allocates a new object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_alloc(size_t size, uint64_t type_num); /* * Transactionally allocates a new object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XALLOC_ZERO - zero the allocated object * - POBJ_XALLOC_NO_FLUSH - skip flush on commit * - POBJ_XALLOC_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_xalloc(size_t size, uint64_t type_num, uint64_t flags); /* * Transactionally allocates new zeroed object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_zalloc(size_t size, uint64_t type_num); /* * Transactionally resizes an existing object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_realloc(PMEMoid oid, size_t size, uint64_t type_num); /* * Transactionally resizes an existing object, if extended new space is zeroed. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_zrealloc(PMEMoid oid, size_t size, uint64_t type_num); /* * Transactionally allocates a new object with duplicate of the string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_strdup(const char *s, uint64_t type_num); /* * Transactionally allocates a new object with duplicate of the string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XALLOC_ZERO - zero the allocated object * - POBJ_XALLOC_NO_FLUSH - skip flush on commit * - POBJ_XALLOC_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_xstrdup(const char *s, uint64_t type_num, uint64_t flags); /* * Transactionally allocates a new object with duplicate of the wide character * string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_wcsdup(const wchar_t *s, uint64_t type_num); /* * Transactionally allocates a new object with duplicate of the wide character * string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XALLOC_ZERO - zero the allocated object * - POBJ_XALLOC_NO_FLUSH - skip flush on commit * - POBJ_XALLOC_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_xwcsdup(const wchar_t *s, uint64_t type_num, uint64_t flags); /* * Transactionally frees an existing object. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_free(PMEMoid oid); /* * Transactionally frees an existing object. * * If successful, returns zero. * Otherwise, the stage changes to TX_STAGE_ONABORT and the error number is * returned. * 'Flags' is a bitmask of the following values: * - POBJ_XFREE_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_xfree(PMEMoid oid, uint64_t flags); /* * Append user allocated buffer to the ulog. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_log_append_buffer(enum pobj_log_type type, void *addr, size_t size); /* * Append user allocated buffer to the ulog. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XLOG_APPEND_BUFFER_NO_ABORT - if the function does not end * successfully, do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_xlog_append_buffer(enum pobj_log_type type, void *addr, size_t size, uint64_t flags); /* * Enables or disables automatic ulog allocations. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_log_auto_alloc(enum pobj_log_type type, int on_off); /* * Calculates and returns size for user buffers for snapshots. */ size_t pmemobj_tx_log_snapshots_max_size(size_t *sizes, size_t nsizes); /* * Calculates and returns size for user buffers for intents. */ size_t pmemobj_tx_log_intents_max_size(size_t nintents); /* * Sets volatile pointer to the user data for the current transaction. */ void pmemobj_tx_set_user_data(void *data); /* * Gets volatile pointer to the user data associated with the current * transaction. */ void *pmemobj_tx_get_user_data(void); /* * Sets the failure behavior of transactional functions. * * This function must be called during TX_STAGE_WORK. */ void pmemobj_tx_set_failure_behavior(enum pobj_tx_failure_behavior behavior); /* * Returns failure behavior for the current transaction. * * This function must be called during TX_STAGE_WORK. */ enum pobj_tx_failure_behavior pmemobj_tx_get_failure_behavior(void); #ifdef __cplusplus } #endif #endif /* libpmemobj/tx_base.h */
14,087
30.237251
80
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/pool_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * libpmemobj/pool_base.h -- definitions of libpmemobj pool entry points */ #ifndef LIBPMEMOBJ_POOL_BASE_H #define LIBPMEMOBJ_POOL_BASE_H 1 #include <stddef.h> #include <sys/types.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif //NEW //#define _GNU_SOURCE //#include <sys/types.h> //#include <sys/stat.h> #include <fcntl.h> #include <sys/mman.h> //int __real_open(const char *__path, int __oflag); //int __wrap_open(const char *__path, int __oflag); void* open_device(const char* pathname); //END NEW #define PMEMOBJ_MIN_POOL ((size_t)(1024 * 1024 * 256)) /* 8 MiB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMOBJ_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ /* * Pool management. */ #ifdef _WIN32 #ifndef PMDK_UTF8_API #define pmemobj_open pmemobj_openW #define pmemobj_create pmemobj_createW #define pmemobj_check pmemobj_checkW #else #define pmemobj_open pmemobj_openU #define pmemobj_create pmemobj_createU #define pmemobj_check pmemobj_checkU #endif #endif #ifndef _WIN32 PMEMobjpool *pmemobj_open(const char *path, const char *layout); #else PMEMobjpool *pmemobj_openU(const char *path, const char *layout); PMEMobjpool *pmemobj_openW(const wchar_t *path, const wchar_t *layout); #endif #ifndef _WIN32 PMEMobjpool *pmemobj_create(const char *path, const char *layout, size_t poolsize, mode_t mode); #else PMEMobjpool *pmemobj_createU(const char *path, const char *layout, size_t poolsize, mode_t mode); PMEMobjpool *pmemobj_createW(const wchar_t *path, const wchar_t *layout, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemobj_check(const char *path, const char *layout); #else int pmemobj_checkU(const char *path, const char *layout); int pmemobj_checkW(const wchar_t *path, const wchar_t *layout); #endif void pmemobj_close(PMEMobjpool *pop); /* * If called for the first time on a newly created pool, the root object * of given size is allocated. Otherwise, it returns the existing root object. * In such case, the size must be not less than the actual root object size * stored in the pool. If it's larger, the root object is automatically * resized. * * This function is thread-safe. */ PMEMoid pmemobj_root(PMEMobjpool *pop, size_t size); /* * Same as above, but calls the constructor function when the object is first * created and on all subsequent reallocations. */ PMEMoid pmemobj_root_construct(PMEMobjpool *pop, size_t size, pmemobj_constr constructor, void *arg); /* * Returns the size in bytes of the root object. Always equal to the requested * size. */ size_t pmemobj_root_size(PMEMobjpool *pop); /* * Sets volatile pointer to the user data for specified pool. */ void pmemobj_set_user_data(PMEMobjpool *pop, void *data); /* * Gets volatile pointer to the user data associated with the specified pool. */ void *pmemobj_get_user_data(PMEMobjpool *pop); #ifdef __cplusplus } #endif #endif /* libpmemobj/pool_base.h */
3,095
24.377049
79
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/action_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * libpmemobj/action_base.h -- definitions of libpmemobj action interface */ #ifndef LIBPMEMOBJ_ACTION_BASE_H #define LIBPMEMOBJ_ACTION_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif enum pobj_action_type { /* a heap action (e.g., alloc) */ POBJ_ACTION_TYPE_HEAP, /* a single memory operation (e.g., value set) */ POBJ_ACTION_TYPE_MEM, POBJ_MAX_ACTION_TYPE }; struct pobj_action_heap { /* offset to the element being freed/allocated */ uint64_t offset; /* usable size of the element being allocated */ uint64_t usable_size; }; struct pobj_action { /* * These fields are internal for the implementation and are not * guaranteed to be stable across different versions of the API. * Use with caution. * * This structure should NEVER be stored on persistent memory! */ enum pobj_action_type type; uint32_t data[3]; union { struct pobj_action_heap heap; uint64_t data2[14]; }; }; #define POBJ_ACTION_XRESERVE_VALID_FLAGS\ (POBJ_XALLOC_CLASS_MASK |\ POBJ_XALLOC_ARENA_MASK |\ POBJ_XALLOC_ZERO) PMEMoid pmemobj_reserve(PMEMobjpool *pop, struct pobj_action *act, size_t size, uint64_t type_num); PMEMoid pmemobj_xreserve(PMEMobjpool *pop, struct pobj_action *act, size_t size, uint64_t type_num, uint64_t flags); void pmemobj_set_value(PMEMobjpool *pop, struct pobj_action *act, uint64_t *ptr, uint64_t value); void pmemobj_defer_free(PMEMobjpool *pop, PMEMoid oid, struct pobj_action *act); int pmemobj_publish(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt); int pmemobj_tx_publish(struct pobj_action *actv, size_t actvcnt); int pmemobj_tx_xpublish(struct pobj_action *actv, size_t actvcnt, uint64_t flags); void pmemobj_cancel(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt); #ifdef __cplusplus } #endif #endif /* libpmemobj/action_base.h */
1,935
24.813333
80
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/types.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * libpmemobj/types.h -- definitions of libpmemobj type-safe macros */ #ifndef LIBPMEMOBJ_TYPES_H #define LIBPMEMOBJ_TYPES_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif #define TOID_NULL(t) ((TOID(t))OID_NULL) #define PMEMOBJ_MAX_LAYOUT ((size_t)1024) /* * Type safety macros */ #if !(defined _MSC_VER || defined __clang__) #define TOID_ASSIGN(o, value)(\ {\ (o).oid = value;\ (o); /* to avoid "error: statement with no effect" */\ }) #else /* _MSC_VER or __clang__ */ #define TOID_ASSIGN(o, value) ((o).oid = value, (o)) #endif #if (defined _MSC_VER && _MSC_VER < 1912) /* * XXX - workaround for offsetof issue in VS 15.3, * it has been fixed since Visual Studio 2017 Version 15.5 * (_MSC_VER == 1912) */ #ifdef PMEMOBJ_OFFSETOF_WA #ifdef _CRT_USE_BUILTIN_OFFSETOF #undef offsetof #define offsetof(s, m) ((size_t)&reinterpret_cast < char const volatile& > \ ((((s *)0)->m))) #endif #else #ifdef _CRT_USE_BUILTIN_OFFSETOF #error "Invalid definition of offsetof() macro - see: \ https://developercommunity.visualstudio.com/content/problem/96174/\ offsetof-macro-is-broken-for-nested-objects.html \ Please upgrade your VS, fix offsetof as described under the link or define \ PMEMOBJ_OFFSETOF_WA to enable workaround in libpmemobj.h" #endif #endif #endif /* _MSC_VER */ #define TOID_EQUALS(lhs, rhs)\ ((lhs).oid.off == (rhs).oid.off &&\ (lhs).oid.pool_uuid_lo == (rhs).oid.pool_uuid_lo) /* type number of root object */ #define POBJ_ROOT_TYPE_NUM 0 #define _toid_struct #define _toid_union #define _toid_enum #define _POBJ_LAYOUT_REF(name) (sizeof(_pobj_layout_##name##_ref)) /* * Typed OID */ #define TOID(t)\ union _toid_##t##_toid #ifdef __cplusplus #define _TOID_CONSTR(t)\ _toid_##t##_toid()\ { }\ _toid_##t##_toid(PMEMoid _oid) : oid(_oid)\ { } #else #define _TOID_CONSTR(t) #endif /* * Declaration of typed OID */ #define _TOID_DECLARE(t, i)\ typedef uint8_t _toid_##t##_toid_type_num[(i) + 1];\ TOID(t)\ {\ _TOID_CONSTR(t)\ PMEMoid oid;\ t *_type;\ _toid_##t##_toid_type_num *_type_num;\ } /* * Declaration of typed OID of an object */ #define TOID_DECLARE(t, i) _TOID_DECLARE(t, i) /* * Declaration of typed OID of a root object */ #define TOID_DECLARE_ROOT(t) _TOID_DECLARE(t, POBJ_ROOT_TYPE_NUM) /* * Type number of specified type */ #define TOID_TYPE_NUM(t) (sizeof(_toid_##t##_toid_type_num) - 1) /* * Type number of object read from typed OID */ #define TOID_TYPE_NUM_OF(o) (sizeof(*(o)._type_num) - 1) /* * NULL check */ #define TOID_IS_NULL(o) ((o).oid.off == 0) /* * Validates whether type number stored in typed OID is the same * as type number stored in object's metadata */ #define TOID_VALID(o) (TOID_TYPE_NUM_OF(o) == pmemobj_type_num((o).oid)) /* * Checks whether the object is of a given type */ #define OID_INSTANCEOF(o, t) (TOID_TYPE_NUM(t) == pmemobj_type_num(o)) /* * Begin of layout declaration */ #define POBJ_LAYOUT_BEGIN(name)\ typedef uint8_t _pobj_layout_##name##_ref[__COUNTER__ + 1] /* * End of layout declaration */ #define POBJ_LAYOUT_END(name)\ typedef char _pobj_layout_##name##_cnt[__COUNTER__ + 1 -\ _POBJ_LAYOUT_REF(name)]; /* * Number of types declared inside layout without the root object */ #define POBJ_LAYOUT_TYPES_NUM(name) (sizeof(_pobj_layout_##name##_cnt) - 1) /* * Declaration of typed OID inside layout declaration */ #define POBJ_LAYOUT_TOID(name, t)\ TOID_DECLARE(t, (__COUNTER__ + 1 - _POBJ_LAYOUT_REF(name))); /* * Declaration of typed OID of root inside layout declaration */ #define POBJ_LAYOUT_ROOT(name, t)\ TOID_DECLARE_ROOT(t); /* * Name of declared layout */ #define POBJ_LAYOUT_NAME(name) #name #define TOID_TYPEOF(o) __typeof__(*(o)._type) #define TOID_OFFSETOF(o, field) offsetof(TOID_TYPEOF(o), field) /* * XXX - DIRECT_RW and DIRECT_RO are not available when compiled using VC++ * as C code (/TC). Use /TP option. */ #ifndef _MSC_VER #define DIRECT_RW(o) (\ {__typeof__(o) _o; _o._type = NULL; (void)_o;\ (__typeof__(*(o)._type) *)pmemobj_direct((o).oid); }) #define DIRECT_RO(o) ((const __typeof__(*(o)._type) *)pmemobj_direct((o).oid)) #elif defined(__cplusplus) /* * XXX - On Windows, these macros do not behave exactly the same as on Linux. */ #define DIRECT_RW(o) \ (reinterpret_cast < __typeof__((o)._type) > (pmemobj_direct((o).oid))) #define DIRECT_RO(o) \ (reinterpret_cast < const __typeof__((o)._type) > \ (pmemobj_direct((o).oid))) #endif /* (defined(_MSC_VER) || defined(__cplusplus)) */ #define D_RW DIRECT_RW #define D_RO DIRECT_RO #ifdef __cplusplus } #endif #endif /* libpmemobj/types.h */
4,701
21.825243
78
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/base.h -- definitions of base libpmemobj entry points */ #ifndef LIBPMEMOBJ_BASE_H #define LIBPMEMOBJ_BASE_H 1 #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include <stddef.h> #include <stdint.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemobj_check_version pmemobj_check_versionW #define pmemobj_errormsg pmemobj_errormsgW #else #define pmemobj_check_version pmemobj_check_versionU #define pmemobj_errormsg pmemobj_errormsgU #endif #endif #ifdef __cplusplus extern "C" { #endif /* * opaque type internal to libpmemobj */ typedef struct pmemobjpool PMEMobjpool; #define PMEMOBJ_MAX_ALLOC_SIZE ((size_t)0x3FFDFFFC0) /* * allocation functions flags */ #define POBJ_FLAG_ZERO (((uint64_t)1) << 0) #define POBJ_FLAG_NO_FLUSH (((uint64_t)1) << 1) #define POBJ_FLAG_NO_SNAPSHOT (((uint64_t)1) << 2) #define POBJ_FLAG_ASSUME_INITIALIZED (((uint64_t)1) << 3) #define POBJ_FLAG_TX_NO_ABORT (((uint64_t)1) << 4) #define POBJ_CLASS_ID(id) (((uint64_t)(id)) << 48) #define POBJ_ARENA_ID(id) (((uint64_t)(id)) << 32) #define POBJ_XALLOC_CLASS_MASK ((((uint64_t)1 << 16) - 1) << 48) #define POBJ_XALLOC_ARENA_MASK ((((uint64_t)1 << 16) - 1) << 32) #define POBJ_XALLOC_ZERO POBJ_FLAG_ZERO #define POBJ_XALLOC_NO_FLUSH POBJ_FLAG_NO_FLUSH #define POBJ_XALLOC_NO_ABORT POBJ_FLAG_TX_NO_ABORT /* * pmemobj_mem* flags */ #define PMEMOBJ_F_MEM_NODRAIN (1U << 0) #define PMEMOBJ_F_MEM_NONTEMPORAL (1U << 1) #define PMEMOBJ_F_MEM_TEMPORAL (1U << 2) #define PMEMOBJ_F_MEM_WC (1U << 3) #define PMEMOBJ_F_MEM_WB (1U << 4) #define PMEMOBJ_F_MEM_NOFLUSH (1U << 5) /* * pmemobj_mem*, pmemobj_xflush & pmemobj_xpersist flags */ #define PMEMOBJ_F_RELAXED (1U << 31) /* * Persistent memory object */ /* * Object handle */ typedef struct pmemoid { uint64_t pool_uuid_lo; uint64_t off; } PMEMoid; static const PMEMoid OID_NULL = { 0, 0 }; #define OID_IS_NULL(o) ((o).off == 0) #define OID_EQUALS(lhs, rhs)\ ((lhs).off == (rhs).off &&\ (lhs).pool_uuid_lo == (rhs).pool_uuid_lo) PMEMobjpool *pmemobj_pool_by_ptr(const void *addr); PMEMobjpool *pmemobj_pool_by_oid(PMEMoid oid); #ifndef _WIN32 extern int _pobj_cache_invalidate; extern __thread struct _pobj_pcache { PMEMobjpool *pop; uint64_t uuid_lo; int invalidate; } _pobj_cached_pool; /* * Returns the direct pointer of an object. */ static inline void * pmemobj_direct_inline(PMEMoid oid) { if (oid.off == 0 || oid.pool_uuid_lo == 0) return NULL; struct _pobj_pcache *cache = &_pobj_cached_pool; if (_pobj_cache_invalidate != cache->invalidate || cache->uuid_lo != oid.pool_uuid_lo) { cache->invalidate = _pobj_cache_invalidate; if (!(cache->pop = pmemobj_pool_by_oid(oid))) { cache->uuid_lo = 0; return NULL; } cache->uuid_lo = oid.pool_uuid_lo; } return (void *)((uintptr_t)cache->pop + oid.off); } #endif /* _WIN32 */ /* * Returns the direct pointer of an object. */ #if defined(_WIN32) || defined(_PMEMOBJ_INTRNL) ||\ defined(PMEMOBJ_DIRECT_NON_INLINE) void *pmemobj_direct(PMEMoid oid); #else #define pmemobj_direct pmemobj_direct_inline #endif struct pmemvlt { uint64_t runid; }; #define PMEMvlt(T)\ struct {\ struct pmemvlt vlt;\ T value;\ } /* * Returns lazily initialized volatile variable. (EXPERIMENTAL) */ void *pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt, void *ptr, size_t size, int (*constr)(void *ptr, void *arg), void *arg); /* * Returns the OID of the object pointed to by addr. */ PMEMoid pmemobj_oid(const void *addr); /* * Returns the number of usable bytes in the object. May be greater than * the requested size of the object because of internal alignment. * * Can be used with objects allocated by any of the available methods. */ size_t pmemobj_alloc_usable_size(PMEMoid oid); /* * Returns the type number of the object. */ uint64_t pmemobj_type_num(PMEMoid oid); /* * Pmemobj specific low-level memory manipulation functions. * * These functions are meant to be used with pmemobj pools, because they provide * additional functionality specific to this type of pool. These may include * for example replication support. They also take advantage of the knowledge * of the type of memory in the pool (pmem/non-pmem) to assure persistence. */ /* * Pmemobj version of memcpy. Data copied is made persistent. */ void *pmemobj_memcpy_persist(PMEMobjpool *pop, void *dest, const void *src, size_t len); /* * Pmemobj version of memset. Data range set is made persistent. */ void *pmemobj_memset_persist(PMEMobjpool *pop, void *dest, int c, size_t len); /* * Pmemobj version of memcpy. Data copied is made persistent (unless opted-out * using flags). */ void *pmemobj_memcpy(PMEMobjpool *pop, void *dest, const void *src, size_t len, unsigned flags); /* * Pmemobj version of memmove. Data copied is made persistent (unless opted-out * using flags). */ void *pmemobj_memmove(PMEMobjpool *pop, void *dest, const void *src, size_t len, unsigned flags); /* * Pmemobj version of memset. Data range set is made persistent (unless * opted-out using flags). */ void *pmemobj_memset(PMEMobjpool *pop, void *dest, int c, size_t len, unsigned flags); /* * Pmemobj version of pmem_persist. */ void pmemobj_persist(PMEMobjpool *pop, const void *addr, size_t len); /* * Pmemobj version of pmem_persist with additional flags argument. */ int pmemobj_xpersist(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags); /* * Pmemobj version of pmem_flush. */ void pmemobj_flush(PMEMobjpool *pop, const void *addr, size_t len); /* * Pmemobj version of pmem_flush with additional flags argument. */ int pmemobj_xflush(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags); /* * Pmemobj version of pmem_drain. */ void pmemobj_drain(PMEMobjpool *pop); /* * Version checking. */ /* * PMEMOBJ_MAJOR_VERSION and PMEMOBJ_MINOR_VERSION provide the current version * of the libpmemobj API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmemobj_check_version(). */ #define PMEMOBJ_MAJOR_VERSION 2 #define PMEMOBJ_MINOR_VERSION 4 #ifndef _WIN32 const char *pmemobj_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemobj_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemobj_check_versionW(unsigned major_required, unsigned minor_required); #endif /* * Passing NULL to pmemobj_set_funcs() tells libpmemobj to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemobj. */ void pmemobj_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); typedef int (*pmemobj_constr)(PMEMobjpool *pop, void *ptr, void *arg); /* * (debug helper function) logs notice message if used inside a transaction */ void _pobj_debug_notice(const char *func_name, const char *file, int line); #ifndef _WIN32 const char *pmemobj_errormsg(void); #else const char *pmemobj_errormsgU(void); const wchar_t *pmemobj_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmemobj/base.h */
7,415
23.72
80
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/tx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/tx.h -- definitions of libpmemobj transactional macros */ #ifndef LIBPMEMOBJ_TX_H #define LIBPMEMOBJ_TX_H 1 #include <errno.h> #include <string.h> #include <libpmemobj/tx_base.h> #include <libpmemobj/types.h> extern uint64_t waitCycles; extern uint64_t resetCycles; #ifdef __cplusplus extern "C" { #endif #ifdef POBJ_TX_CRASH_ON_NO_ONABORT #define TX_ONABORT_CHECK do {\ if (_stage == TX_STAGE_ONABORT)\ abort();\ } while (0) #else #define TX_ONABORT_CHECK do {} while (0) #endif #define _POBJ_TX_BEGIN(pop, ...)\ {\ jmp_buf _tx_env;\ enum pobj_tx_stage _stage;\ int _pobj_errno;\ if (setjmp(_tx_env)) {\ errno = pmemobj_tx_errno();\ } else {\ _pobj_errno = pmemobj_tx_begin(pop, _tx_env, __VA_ARGS__,\ TX_PARAM_NONE);\ if (_pobj_errno)\ errno = _pobj_errno;\ }\ while ((_stage = pmemobj_tx_stage()) != TX_STAGE_NONE) {\ switch (_stage) {\ case TX_STAGE_WORK: #define TX_BEGIN_PARAM(pop, ...)\ _POBJ_TX_BEGIN(pop, ##__VA_ARGS__) #define TX_BEGIN_LOCK TX_BEGIN_PARAM /* Just to let compiler warn when incompatible function pointer is used */ static inline pmemobj_tx_callback _pobj_validate_cb_sig(pmemobj_tx_callback cb) { return cb; } #define TX_BEGIN_CB(pop, cb, arg, ...) _POBJ_TX_BEGIN(pop, TX_PARAM_CB,\ _pobj_validate_cb_sig(cb), arg, ##__VA_ARGS__) #define TX_BEGIN(pop) _POBJ_TX_BEGIN(pop, TX_PARAM_NONE) #define TX_ONABORT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONABORT: #define TX_ONCOMMIT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONCOMMIT: #define TX_FINALLY\ pmemobj_tx_process();\ break;\ case TX_STAGE_FINALLY: #define TX_END\ pmemobj_tx_process();\ break;\ default:\ TX_ONABORT_CHECK;\ pmemobj_tx_process();\ break;\ }\ }\ _pobj_errno = pmemobj_tx_end();\ if (_pobj_errno)\ errno = _pobj_errno;\ } #define TX_ADD(o)\ pmemobj_tx_add_range((o).oid, 0, sizeof(*(o)._type)) #define TX_ADD_FIELD(o, field)\ TX_ADD_DIRECT(&(D_RO(o)->field)) #define TX_ADD_DIRECT(p)\ pmemobj_tx_add_range_direct(p, sizeof(*(p))) #define TX_ADD_FIELD_DIRECT(p, field)\ pmemobj_tx_add_range_direct(&(p)->field, sizeof((p)->field)) #define TX_XADD(o, flags)\ pmemobj_tx_xadd_range((o).oid, 0, sizeof(*(o)._type), flags) #define TX_XADD_FIELD(o, field, flags)\ TX_XADD_DIRECT(&(D_RO(o)->field), flags) #define TX_XADD_DIRECT(p, flags)\ pmemobj_tx_xadd_range_direct(p, sizeof(*(p)), flags) #define TX_XADD_FIELD_DIRECT(p, field, flags)\ pmemobj_tx_xadd_range_direct(&(p)->field, sizeof((p)->field), flags) #define TX_NEW(t)\ ((TOID(t))pmemobj_tx_alloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ALLOC(t, size)\ ((TOID(t))pmemobj_tx_alloc(size, TOID_TYPE_NUM(t))) #define TX_ZNEW(t)\ ((TOID(t))pmemobj_tx_zalloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ZALLOC(t, size)\ ((TOID(t))pmemobj_tx_zalloc(size, TOID_TYPE_NUM(t))) #define TX_XALLOC(t, size, flags)\ ((TOID(t))pmemobj_tx_xalloc(size, TOID_TYPE_NUM(t), flags)) /* XXX - not available when compiled with VC++ as C code (/TC) */ #if !defined(_MSC_VER) || defined(__cplusplus) #define TX_REALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_realloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #define TX_ZREALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_zrealloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #endif /* !defined(_MSC_VER) || defined(__cplusplus) */ #define TX_STRDUP(s, type_num)\ pmemobj_tx_strdup(s, type_num) #define TX_XSTRDUP(s, type_num, flags)\ pmemobj_tx_xstrdup(s, type_num, flags) #define TX_WCSDUP(s, type_num)\ pmemobj_tx_wcsdup(s, type_num) #define TX_XWCSDUP(s, type_num, flags)\ pmemobj_tx_xwcsdup(s, type_num, flags) #define TX_FREE(o)\ pmemobj_tx_free((o).oid) #define TX_XFREE(o, flags)\ pmemobj_tx_xfree((o).oid, flags) #define TX_SET(o, field, value) (\ TX_ADD_FIELD(o, field),\ D_RW(o)->field = (value)) #define TX_SET_DIRECT(p, field, value) (\ TX_ADD_FIELD_DIRECT(p, field),\ (p)->field = (value)) static inline void * TX_MEMCPY(void *dest, const void *src, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memcpy(dest, src, num); } static inline void * TX_MEMSET(void *dest, int c, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memset(dest, c, num); } #ifdef __cplusplus } #endif #endif /* libpmemobj/tx.h */
4,353
22.037037
74
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/atomic_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/atomic_base.h -- definitions of libpmemobj atomic entry points */ #ifndef LIBPMEMOBJ_ATOMIC_BASE_H #define LIBPMEMOBJ_ATOMIC_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional atomic allocations * * Those functions can be used outside transactions. The allocations are always * aligned to the cache-line boundary. */ #define POBJ_XALLOC_VALID_FLAGS (POBJ_XALLOC_ZERO |\ POBJ_XALLOC_CLASS_MASK) /* * Allocates a new object from the pool and calls a constructor function before * returning. It is guaranteed that allocated object is either properly * initialized, or if it's interrupted before the constructor completes, the * memory reserved for the object is automatically reclaimed. */ int pmemobj_alloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, pmemobj_constr constructor, void *arg); /* * Allocates with flags a new object from the pool. */ int pmemobj_xalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, uint64_t flags, pmemobj_constr constructor, void *arg); /* * Allocates a new zeroed object from the pool. */ int pmemobj_zalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Resizes an existing object. */ int pmemobj_realloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Resizes an existing object, if extended new space is zeroed. */ int pmemobj_zrealloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Allocates a new object with duplicate of the string s. */ int pmemobj_strdup(PMEMobjpool *pop, PMEMoid *oidp, const char *s, uint64_t type_num); /* * Allocates a new object with duplicate of the wide character string s. */ int pmemobj_wcsdup(PMEMobjpool *pop, PMEMoid *oidp, const wchar_t *s, uint64_t type_num); /* * Frees an existing object. */ void pmemobj_free(PMEMoid *oidp); struct pobj_defrag_result { size_t total; /* number of processed objects */ size_t relocated; /* number of relocated objects */ }; /* * Performs defragmentation on the provided array of objects. */ int pmemobj_defrag(PMEMobjpool *pop, PMEMoid **oidv, size_t oidcnt, struct pobj_defrag_result *result); #ifdef __cplusplus } #endif #endif /* libpmemobj/atomic_base.h */
2,386
24.393617
79
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/thread.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/thread.h -- definitions of libpmemobj thread/locking entry points */ #ifndef LIBPMEMOBJ_THREAD_H #define LIBPMEMOBJ_THREAD_H 1 #include <time.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Locking. */ #define _POBJ_CL_SIZE 64 /* cache line size */ typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMmutex; typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMrwlock; typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMcond; void pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout); int pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp); void pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout); int pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout); int pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); void pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout); int pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp, PMEMmutex *__restrict mutexp); #ifdef __cplusplus } #endif #endif /* libpmemobj/thread.h */
2,150
28.875
79
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/action.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * libpmemobj/action.h -- definitions of libpmemobj action interface */ #ifndef LIBPMEMOBJ_ACTION_H #define LIBPMEMOBJ_ACTION_H 1 #include <libpmemobj/action_base.h> #ifdef __cplusplus extern "C" { #endif #define POBJ_RESERVE_NEW(pop, t, act)\ ((TOID(t))pmemobj_reserve(pop, act, sizeof(t), TOID_TYPE_NUM(t))) #define POBJ_RESERVE_ALLOC(pop, t, size, act)\ ((TOID(t))pmemobj_reserve(pop, act, size, TOID_TYPE_NUM(t))) #define POBJ_XRESERVE_NEW(pop, t, act, flags)\ ((TOID(t))pmemobj_xreserve(pop, act, sizeof(t), TOID_TYPE_NUM(t), flags)) #define POBJ_XRESERVE_ALLOC(pop, t, size, act, flags)\ ((TOID(t))pmemobj_xreserve(pop, act, size, TOID_TYPE_NUM(t), flags)) #ifdef __cplusplus } #endif #endif /* libpmemobj/action_base.h */
829
23.411765
73
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/atomic.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/atomic.h -- definitions of libpmemobj atomic macros */ #ifndef LIBPMEMOBJ_ATOMIC_H #define LIBPMEMOBJ_ATOMIC_H 1 #include <libpmemobj/atomic_base.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif #define POBJ_NEW(pop, o, t, constr, arg)\ pmemobj_alloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t),\ (constr), (arg)) #define POBJ_ALLOC(pop, o, t, size, constr, arg)\ pmemobj_alloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t),\ (constr), (arg)) #define POBJ_ZNEW(pop, o, t)\ pmemobj_zalloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t)) #define POBJ_ZALLOC(pop, o, t, size)\ pmemobj_zalloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_REALLOC(pop, o, t, size)\ pmemobj_realloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_ZREALLOC(pop, o, t, size)\ pmemobj_zrealloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_FREE(o)\ pmemobj_free((PMEMoid *)(o)) #ifdef __cplusplus } #endif #endif /* libpmemobj/atomic.h */
1,115
23.26087
66
h
null
NearPMSW-main/nearpm/logging/pmdk/src/include/libpmemobj/iterator_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/iterator_base.h -- definitions of libpmemobj iterator entry points */ #ifndef LIBPMEMOBJ_ITERATOR_BASE_H #define LIBPMEMOBJ_ITERATOR_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * The following functions allow access to the entire collection of objects. * * Use with conjunction with non-transactional allocations. Pmemobj pool acts * as a generic container (list) of objects that are not assigned to any * user-defined data structures. */ /* * Returns the first object of the specified type number. */ PMEMoid pmemobj_first(PMEMobjpool *pop); /* * Returns the next object of the same type. */ PMEMoid pmemobj_next(PMEMoid oid); #ifdef __cplusplus } #endif #endif /* libpmemobj/iterator_base.h */
855
20.4
80
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/util/csv.h
// SPDX-License-Identifier: Apache-2.0 /* Copyright 2020-2021, Intel Corporation */ #pragma once #include <iostream> #include <map> #include <ostream> #include <set> #include <string> template <typename IdType> class CSV { private: /* Hold data in two-dimensional map of strings: data_matrix[row][column] */ std::map<IdType, std::map<std::string, std::string>> data_matrix; /* List of all columns, which is filled during inserts. Needed for * printing header and data in the same order. * */ std::set<std::string> columns; std::string id_name; public: CSV(std::string id_column_name) : id_name(id_column_name){}; void insert(IdType row, std::string column, std::string data) { columns.insert(column); data_matrix[row][column] = data; } void insert(IdType row, std::string column, const char *data) { insert(row, column, std::string(data)); } template <typename T> void insert(IdType row, std::string column, T data) { insert(row, column, std::to_string(data)); } void print() { // Print first column name std::cout << id_name; for (auto &column : columns) { std::cout << "," << column; } std::cout << "\r\n" << std::flush; for (auto &row : data_matrix) { std::cout << row.first; for (auto &column : columns) { std::cout << "," << data_matrix[row.first][column]; } std::cout << "\r\n" << std::flush; } } };
1,381
21.290323
73
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/util/logging.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // Must not be included from any .h files to avoid polluting the namespace // with macros. #ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_ #define STORAGE_LEVELDB_UTIL_LOGGING_H_ #include "port/port_posix.h" #include <stdint.h> #include <stdio.h> #include <string> namespace leveldb { class Slice; class WritableFile; // Append a human-readable printout of "num" to *str extern void AppendNumberTo(std::string *str, uint64_t num); // Append a human-readable printout of "value" to *str. // Escapes any non-printable characters found in "value". extern void AppendEscapedStringTo(std::string *str, const Slice &value); // Return a human-readable printout of "num" extern std::string NumberToString(uint64_t num); // Return a human-readable version of "value". // Escapes any non-printable characters found in "value". extern std::string EscapeString(const Slice &value); // Parse a human-readable number from "*in" into *value. On success, // advances "*in" past the consumed number and sets "*val" to the // numeric value. Otherwise, returns false and leaves *in in an // unspecified state. extern bool ConsumeDecimalNumber(Slice *in, uint64_t *val); } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_LOGGING_H_
1,519
30.666667
81
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/util/testutil.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #define STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #include "leveldb/env.h" #include "leveldb/slice.h" #include "util/random.h" namespace leveldb { namespace test { // Store in *dst a random string of length "len" and return a Slice that // references the generated data. Slice RandomString(Random *rnd, int len, std::string *dst); // Return a random key with the specified length that may contain interesting // characters (e.g. \x00, \xff, etc.). std::string RandomKey(Random *rnd, int len); // Store in *dst a string of length "len" that will compress to // "N*compressed_fraction" bytes and return a Slice that references // the generated data. Slice CompressibleString(Random *rnd, double compressed_fraction, size_t len, std::string *dst); // A wrapper that allows injection of errors. class ErrorEnv : public EnvWrapper { public: bool writable_file_error_; int num_writable_file_errors_; ErrorEnv() : EnvWrapper(Env::Default()), writable_file_error_(false), num_writable_file_errors_(0) { } virtual Status NewWritableFile(const std::string &fname, WritableFile **result) { if (writable_file_error_) { ++num_writable_file_errors_; *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewWritableFile(fname, result); } virtual Status NewAppendableFile(const std::string &fname, WritableFile **result) { if (writable_file_error_) { ++num_writable_file_errors_; *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewAppendableFile(fname, result); } }; } // namespace test } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_TESTUTIL_H_
1,984
28.191176
99
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/util/mutexlock.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_ #define STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_ #include "port/port_posix.h" #include "port/thread_annotations.h" namespace leveldb { // Helper class that locks a mutex on construction and unlocks the mutex when // the destructor of the MutexLock object is invoked. // // Typical usage: // // void MyClass::MyMethod() { // MutexLock l(&mu_); // mu_ is an instance variable // ... some complex code, possibly with multiple return paths ... // } class SCOPED_LOCKABLE MutexLock { public: explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { this->mu_->Lock(); } ~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); } private: port::Mutex *const mu_; // No copying allowed MutexLock(const MutexLock &); void operator=(const MutexLock &); }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
1,202
24.0625
81
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/util/random.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_RANDOM_H_ #define STORAGE_LEVELDB_UTIL_RANDOM_H_ #include <stdint.h> namespace leveldb { // A very simple random number generator. Not especially good at // generating truly random bits, but good enough for our needs in this // package. class Random { private: uint32_t seed_; public: explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) { // Avoid bad seeds. if (seed_ == 0 || seed_ == 2147483647L) { seed_ = 1; } } uint32_t Next() { static const uint32_t M = 2147483647L; // 2^31-1 static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0 // We are computing // seed_ = (seed_ * A) % M, where M = 2^31-1 // // seed_ must not be zero or M, or else all subsequent computed values // will be zero or M respectively. For all other values, seed_ will end // up cycling through every number in [1,M-1] uint64_t product = seed_ * A; // Compute (product % M) using the fact that ((x << 31) % M) == x. seed_ = static_cast<uint32_t>((product >> 31) + (product & M)); // The first reduction may overflow by 1 bit, so we may need to // repeat. mod == M is not possible; using > allows the faster // sign-bit-based test. if (seed_ > M) { seed_ -= M; } return seed_; } // Returns a uniformly distributed value in the range [0..n-1] // REQUIRES: n > 0 uint32_t Uniform(int n) { return Next() % n; } // Randomly returns true ~"1/n" of the time, and false otherwise. // REQUIRES: n > 0 bool OneIn(int n) { return (Next() % n) == 0; } // Skewed: pick "base" uniformly from range [0,max_log] and then // return "base" random bits. The effect is to pick a number in the // range [0,2^max_log-1] with exponential bias towards smaller numbers. uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); } }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_RANDOM_H_
2,202
26.886076
81
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/util/posix_logger.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // // Logger implementation that can be shared by all environments // where enough posix functionality is available. #ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ #define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ #include "leveldb/env.h" #include <algorithm> #include <stdio.h> #include <sys/time.h> #include <time.h> namespace leveldb { class PosixLogger : public Logger { private: FILE *file_; uint64_t (*gettid_)(); // Return the thread id for the current thread public: PosixLogger(FILE *f, uint64_t (*gettid)()) : file_(f), gettid_(gettid) { } virtual ~PosixLogger() { fclose(file_); } virtual void Logv(const char *format, va_list ap) { const uint64_t thread_id = (*gettid_)(); // We try twice: the first time with a fixed-size stack allocated buffer, // and the second time with a much larger dynamically allocated buffer. char buffer[500]; for (int iter = 0; iter < 2; iter++) { char *base; int bufsize; if (iter == 0) { bufsize = sizeof(buffer); base = buffer; } else { bufsize = 30000; base = new char[bufsize]; } char *p = base; char *limit = base + bufsize; struct timeval now_tv; gettimeofday(&now_tv, NULL); const time_t seconds = now_tv.tv_sec; struct tm t; localtime_r(&seconds, &t); p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ", t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec), static_cast<long long unsigned int>(thread_id)); // Print the message if (p < limit) { va_list backup_ap; va_copy(backup_ap, ap); p += vsnprintf(p, limit - p, format, backup_ap); va_end(backup_ap); } // Truncate to available space if necessary if (p >= limit) { if (iter == 0) { continue; // Try again with larger buffer } else { p = limit - 1; } } // Add newline if necessary if (p == base || p[-1] != '\n') { *p++ = '\n'; } assert(p <= limit); fwrite(base, 1, p - base, file_); fflush(file_); if (base != buffer) { delete[] base; } break; } } }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
2,503
23.54902
81
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/util/env_posix_test_helper.h
// Copyright 2017 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_ #define STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_ namespace leveldb { class EnvPosixTest; // A helper for the POSIX Env to facilitate testing. class EnvPosixTestHelper { private: friend class EnvPosixTest; // Set the maximum number of read-only files that will be opened. // Must be called before creating an Env. static void SetReadOnlyFDLimit(int limit); // Set the maximum number of read-only files that will be mapped via mmap. // Must be called before creating an Env. static void SetReadOnlyMMapLimit(int limit); }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
967
28.333333
81
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/port/port_posix.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // See port_example.h for documentation for the following types/functions. #ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_ #define STORAGE_LEVELDB_PORT_PORT_POSIX_H_ #undef PLATFORM_IS_LITTLE_ENDIAN #if defined(__APPLE__) #include <machine/endian.h> #if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER) #define PLATFORM_IS_LITTLE_ENDIAN (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN) #endif #elif defined(OS_SOLARIS) #include <sys/isa_defs.h> #ifdef _LITTLE_ENDIAN #define PLATFORM_IS_LITTLE_ENDIAN true #else #define PLATFORM_IS_LITTLE_ENDIAN false #endif #elif defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD) #include <sys/endian.h> #include <sys/types.h> #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) #elif defined(OS_HPUX) #define PLATFORM_IS_LITTLE_ENDIAN false #elif defined(OS_ANDROID) // Due to a bug in the NDK x86 <sys/endian.h> definition, // _BYTE_ORDER must be used instead of __BYTE_ORDER on Android. // See http://code.google.com/p/android/issues/detail?id=39824 #include <endian.h> #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) #else #include <endian.h> #endif #include <pthread.h> #if defined(HAVE_CRC32C) #include <crc32c/crc32c.h> #endif // defined(HAVE_CRC32C) #ifdef HAVE_SNAPPY #include <snappy.h> #endif // defined(HAVE_SNAPPY) #include "port/atomic_pointer.h" #include <stdint.h> #include <string> #ifndef PLATFORM_IS_LITTLE_ENDIAN #define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN) #endif #if defined(__APPLE__) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) // Use fsync() on platforms without fdatasync() #define fdatasync fsync #endif #if defined(OS_ANDROID) && __ANDROID_API__ < 9 // fdatasync() was only introduced in API level 9 on Android. Use fsync() // when targetting older platforms. #define fdatasync fsync #endif namespace leveldb { namespace port { static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN; #undef PLATFORM_IS_LITTLE_ENDIAN class CondVar; class Mutex { public: Mutex(); ~Mutex(); void Lock(); void Unlock(); void AssertHeld() { } private: friend class CondVar; pthread_mutex_t mu_; // No copying Mutex(const Mutex &); void operator=(const Mutex &); }; class CondVar { public: explicit CondVar(Mutex *mu); ~CondVar(); void Wait(); void Signal(); void SignalAll(); private: pthread_cond_t cv_; Mutex *mu_; }; typedef pthread_once_t OnceType; #define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT extern void InitOnce(OnceType *once, void (*initializer)()); inline bool Snappy_Compress(const char *input, size_t length, ::std::string *output) { #ifdef HAVE_SNAPPY output->resize(snappy::MaxCompressedLength(length)); size_t outlen; snappy::RawCompress(input, length, &(*output)[0], &outlen); output->resize(outlen); return true; #endif // defined(HAVE_SNAPPY) return false; } inline bool Snappy_GetUncompressedLength(const char *input, size_t length, size_t *result) { #ifdef HAVE_SNAPPY return snappy::GetUncompressedLength(input, length, result); #else return false; #endif // defined(HAVE_SNAPPY) } inline bool Snappy_Uncompress(const char *input, size_t length, char *output) { #ifdef HAVE_SNAPPY return snappy::RawUncompress(input, length, output); #else return false; #endif // defined(HAVE_SNAPPY) } inline bool GetHeapProfile(void (*func)(void *, const char *, int), void *arg) { return false; } inline uint32_t AcceleratedCRC32C(uint32_t crc, const char *buf, size_t size) { #if defined(HAVE_CRC32C) return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t *>(buf), size); #else return 0; #endif // defined(HAVE_CRC32C) } } // namespace port } // namespace leveldb #endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_
4,061
23.768293
98
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/port/thread_annotations.h
// Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ #define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ // Some environments provide custom macros to aid in static thread-safety // analysis. Provide empty definitions of such macros unless they are already // defined. #ifndef EXCLUSIVE_LOCKS_REQUIRED #define EXCLUSIVE_LOCKS_REQUIRED(...) #endif #ifndef SHARED_LOCKS_REQUIRED #define SHARED_LOCKS_REQUIRED(...) #endif #ifndef LOCKS_EXCLUDED #define LOCKS_EXCLUDED(...) #endif #ifndef LOCK_RETURNED #define LOCK_RETURNED(x) #endif #ifndef LOCKABLE #define LOCKABLE #endif #ifndef SCOPED_LOCKABLE #define SCOPED_LOCKABLE #endif #ifndef EXCLUSIVE_LOCK_FUNCTION #define EXCLUSIVE_LOCK_FUNCTION(...) #endif #ifndef SHARED_LOCK_FUNCTION #define SHARED_LOCK_FUNCTION(...) #endif #ifndef EXCLUSIVE_TRYLOCK_FUNCTION #define EXCLUSIVE_TRYLOCK_FUNCTION(...) #endif #ifndef SHARED_TRYLOCK_FUNCTION #define SHARED_TRYLOCK_FUNCTION(...) #endif #ifndef UNLOCK_FUNCTION #define UNLOCK_FUNCTION(...) #endif #ifndef NO_THREAD_SAFETY_ANALYSIS #define NO_THREAD_SAFETY_ANALYSIS #endif #endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
1,429
21.34375
81
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/port/atomic_pointer.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // AtomicPointer provides storage for a lock-free pointer. // Platform-dependent implementation of AtomicPointer: // - If the platform provides a cheap barrier, we use it with raw pointers // - If <atomic> is present (on newer versions of gcc, it is), we use // a <atomic>-based AtomicPointer. However we prefer the memory // barrier based version, because at least on a gcc 4.4 32-bit build // on linux, we have encountered a buggy <atomic> implementation. // Also, some <atomic> implementations are much slower than a memory-barrier // based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for // a barrier based acquire-load). // This code is based on atomicops-internals-* in Google's perftools: // http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase #ifndef PORT_ATOMIC_POINTER_H_ #define PORT_ATOMIC_POINTER_H_ #include <stdint.h> #ifdef LEVELDB_ATOMIC_PRESENT #include <atomic> #endif #ifdef OS_WIN #include <windows.h> #endif #ifdef __APPLE__ #include <libkern/OSAtomic.h> #endif #if defined(_M_X64) || defined(__x86_64__) #define ARCH_CPU_X86_FAMILY 1 #elif defined(_M_IX86) || defined(__i386__) || defined(__i386) #define ARCH_CPU_X86_FAMILY 1 #elif defined(__ARMEL__) #define ARCH_CPU_ARM_FAMILY 1 #elif defined(__aarch64__) #define ARCH_CPU_ARM64_FAMILY 1 #elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__) #define ARCH_CPU_PPC_FAMILY 1 #elif defined(__mips__) #define ARCH_CPU_MIPS_FAMILY 1 #endif namespace leveldb { namespace port { // Define MemoryBarrier() if available // Windows on x86 #if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY) // windows.h already provides a MemoryBarrier(void) macro // http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx #define LEVELDB_HAVE_MEMORY_BARRIER // Mac OS #elif defined(__APPLE__) inline void MemoryBarrier() { OSMemoryBarrier(); } #define LEVELDB_HAVE_MEMORY_BARRIER // Gcc on x86 #elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. __asm__ __volatile__("" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // Sun Studio #elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC) inline void MemoryBarrier() { // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. asm volatile("" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // ARM Linux #elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__) typedef void (*LinuxKernelMemoryBarrierFunc)(void); // The Linux ARM kernel provides a highly optimized device-specific memory // barrier function at a fixed memory address that is mapped in every // user-level process. // // This beats using CPU-specific instructions which are, on single-core // devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more // than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking // shows that the extra function call cost is completely negligible on // multi-core devices. // inline void MemoryBarrier() { (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)(); } #define LEVELDB_HAVE_MEMORY_BARRIER // ARM64 #elif defined(ARCH_CPU_ARM64_FAMILY) inline void MemoryBarrier() { asm volatile("dmb sy" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // PPC #elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { // TODO for some powerpc expert: is there a cheaper suitable variant? // Perhaps by having separate barriers for acquire and release ops. asm volatile("sync" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // MIPS #elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { __asm__ __volatile__("sync" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER #endif // AtomicPointer built using platform-specific MemoryBarrier() #if defined(LEVELDB_HAVE_MEMORY_BARRIER) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *p) : rep_(p) { } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } inline void *Acquire_Load() const { void *result = rep_; MemoryBarrier(); return result; } inline void Release_Store(void *v) { MemoryBarrier(); rep_ = v; } }; // AtomicPointer based on <cstdatomic> #elif defined(LEVELDB_ATOMIC_PRESENT) class AtomicPointer { private: std::atomic<void *> rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { return rep_.load(std::memory_order_acquire); } inline void Release_Store(void *v) { rep_.store(v, std::memory_order_release); } inline void *NoBarrier_Load() const { return rep_.load(std::memory_order_relaxed); } inline void NoBarrier_Store(void *v) { rep_.store(v, std::memory_order_relaxed); } }; // Atomic pointer based on sparc memory barriers #elif defined(__sparcv9) && defined(__GNUC__) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { void *val; __asm__ __volatile__("ldx [%[rep_]], %[val] \n\t" "membar #LoadLoad|#LoadStore \n\t" : [val] "=r"(val) : [rep_] "r"(&rep_) : "memory"); return val; } inline void Release_Store(void *v) { __asm__ __volatile__("membar #LoadStore|#StoreStore \n\t" "stx %[v], [%[rep_]] \n\t" : : [rep_] "r"(&rep_), [v] "r"(v) : "memory"); } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } }; // Atomic pointer based on ia64 acq/rel #elif defined(__ia64) && defined(__GNUC__) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { void *val; __asm__ __volatile__("ld8.acq %[val] = [%[rep_]] \n\t" : [val] "=r"(val) : [rep_] "r"(&rep_) : "memory"); return val; } inline void Release_Store(void *v) { __asm__ __volatile__("st8.rel [%[rep_]] = %[v] \n\t" : : [rep_] "r"(&rep_), [v] "r"(v) : "memory"); } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } }; // We have neither MemoryBarrier(), nor <atomic> #else #error Please implement AtomicPointer for this platform. #endif #undef LEVELDB_HAVE_MEMORY_BARRIER #undef ARCH_CPU_X86_FAMILY #undef ARCH_CPU_ARM_FAMILY #undef ARCH_CPU_ARM64_FAMILY #undef ARCH_CPU_PPC_FAMILY } // namespace port } // namespace leveldb #endif // PORT_ATOMIC_POINTER_H_
7,207
23.26936
84
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/include/leveldb/status.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // A Status encapsulates the result of an operation. It may indicate success, // or it may indicate an error with an associated error message. // // Multiple threads can invoke const methods on a Status without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same Status must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_ #define STORAGE_LEVELDB_INCLUDE_STATUS_H_ #include "leveldb/slice.h" #include <string> namespace leveldb { class Status { public: // Create a success status. Status() : state_(NULL) { } ~Status() { delete[] state_; } // Copy the specified status. Status(const Status &s); void operator=(const Status &s); // Return a success status. static Status OK() { return Status(); } // Return error status of an appropriate type. static Status NotFound(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kNotFound, msg, msg2); } static Status Corruption(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kCorruption, msg, msg2); } static Status NotSupported(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kNotSupported, msg, msg2); } static Status InvalidArgument(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kInvalidArgument, msg, msg2); } static Status IOError(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kIOError, msg, msg2); } // Returns true iff the status indicates success. bool ok() const { return (state_ == NULL); } // Returns true iff the status indicates a NotFound error. bool IsNotFound() const { return code() == kNotFound; } // Returns true iff the status indicates a Corruption error. bool IsCorruption() const { return code() == kCorruption; } // Returns true iff the status indicates an IOError. bool IsIOError() const { return code() == kIOError; } // Returns true iff the status indicates a NotSupportedError. bool IsNotSupportedError() const { return code() == kNotSupported; } // Returns true iff the status indicates an InvalidArgument. bool IsInvalidArgument() const { return code() == kInvalidArgument; } // Return a string representation of this status suitable for printing. // Returns the string "OK" for success. std::string ToString() const; private: // OK status has a NULL state_. Otherwise, state_ is a new[] array // of the following form: // state_[0..3] == length of message // state_[4] == code // state_[5..] == message const char *state_; enum Code { kOk = 0, kNotFound = 1, kCorruption = 2, kNotSupported = 3, kInvalidArgument = 4, kIOError = 5 }; Code code() const { return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]); } Status(Code code, const Slice &msg, const Slice &msg2); static const char *CopyState(const char *s); }; inline Status::Status(const Status &s) { state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); } inline void Status::operator=(const Status &s) { // The following condition catches both aliasing (when this == &s), // and the common case where both s and *this are ok. if (state_ != s.state_) { delete[] state_; state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); } } } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_STATUS_H_
3,658
23.231788
81
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/include/leveldb/slice.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // Slice is a simple structure containing a pointer into some external // storage and a size. The user of a Slice must ensure that the slice // is not used after the corresponding external storage has been // deallocated. // // Multiple threads can invoke const methods on a Slice without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same Slice must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_ #define STORAGE_LEVELDB_INCLUDE_SLICE_H_ #include <assert.h> #include <stddef.h> #include <string.h> #include <string> namespace leveldb { class Slice { public: // Create an empty slice. Slice() : data_(""), size_(0) { } // Create a slice that refers to d[0,n-1]. Slice(const char *d, size_t n) : data_(d), size_(n) { } // Create a slice that refers to the contents of "s" Slice(const std::string &s) : data_(s.data()), size_(s.size()) { } // Create a slice that refers to s[0,strlen(s)-1] Slice(const char *s) : data_(s), size_(strlen(s)) { } // Return a pointer to the beginning of the referenced data const char *data() const { return data_; } // Return the length (in bytes) of the referenced data size_t size() const { return size_; } // Return true iff the length of the referenced data is zero bool empty() const { return size_ == 0; } // Return the ith byte in the referenced data. // REQUIRES: n < size() char operator[](size_t n) const { assert(n < size()); return data_[n]; } // Change this slice to refer to an empty array void clear() { data_ = ""; size_ = 0; } // Drop the first "n" bytes from this slice. void remove_prefix(size_t n) { assert(n <= size()); data_ += n; size_ -= n; } // Return a string that contains the copy of the referenced data. std::string ToString() const { return std::string(data_, size_); } // Three-way comparison. Returns value: // < 0 iff "*this" < "b", // == 0 iff "*this" == "b", // > 0 iff "*this" > "b" int compare(const Slice &b) const; // Return true iff "x" is a prefix of "*this" bool starts_with(const Slice &x) const { return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0)); } private: const char *data_; size_t size_; // Intentionally copyable }; inline bool operator==(const Slice &x, const Slice &y) { return ((x.size() == y.size()) && (memcmp(x.data(), y.data(), x.size()) == 0)); } inline bool operator!=(const Slice &x, const Slice &y) { return !(x == y); } inline int Slice::compare(const Slice &b) const { const size_t min_len = (size_ < b.size_) ? size_ : b.size_; int r = memcmp(data_, b.data_, min_len); if (r == 0) { if (size_ < b.size_) r = -1; else if (size_ > b.size_) r = +1; } return r; } } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
3,163
21.125874
81
h
null
NearPMSW-main/nearpm/logging/pmemkv-bench/bench/include/leveldb/env.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // An Env is an interface used by the leveldb implementation to access // operating system functionality like the filesystem etc. Callers // may wish to provide a custom Env object when opening a database to // get fine gain control; e.g., to rate limit file system operations. // // All Env implementations are safe for concurrent access from // multiple threads without any external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_ #define STORAGE_LEVELDB_INCLUDE_ENV_H_ #include "leveldb/status.h" #include <stdarg.h> #include <stdint.h> #include <string> #include <vector> namespace leveldb { class FileLock; class Logger; class RandomAccessFile; class SequentialFile; class Slice; class WritableFile; class Env { public: Env() { } virtual ~Env(); // Return a default environment suitable for the current operating // system. Sophisticated users may wish to provide their own Env // implementation instead of relying on this default environment. // // The result of Default() belongs to leveldb and must never be deleted. static Env *Default(); // Create a brand new sequentially-readable file with the specified name. // On success, stores a pointer to the new file in *result and returns OK. // On failure stores NULL in *result and returns non-OK. If the file does // not exist, returns a non-OK status. Implementations should return a // NotFound status when the file does not exist. // // The returned file will only be accessed by one thread at a time. virtual Status NewSequentialFile(const std::string &fname, SequentialFile **result) = 0; // Create a brand new random access read-only file with the // specified name. On success, stores a pointer to the new file in // *result and returns OK. On failure stores NULL in *result and // returns non-OK. If the file does not exist, returns a non-OK // status. Implementations should return a NotFound status when the file does // not exist. // // The returned file may be concurrently accessed by multiple threads. virtual Status NewRandomAccessFile(const std::string &fname, RandomAccessFile **result) = 0; // Create an object that writes to a new file with the specified // name. Deletes any existing file with the same name and creates a // new file. On success, stores a pointer to the new file in // *result and returns OK. On failure stores NULL in *result and // returns non-OK. // // The returned file will only be accessed by one thread at a time. virtual Status NewWritableFile(const std::string &fname, WritableFile **result) = 0; // Create an object that either appends to an existing file, or // writes to a new file (if the file does not exist to begin with). // On success, stores a pointer to the new file in *result and // returns OK. On failure stores NULL in *result and returns // non-OK. // // The returned file will only be accessed by one thread at a time. // // May return an IsNotSupportedError error if this Env does // not allow appending to an existing file. Users of Env (including // the leveldb implementation) must be prepared to deal with // an Env that does not support appending. virtual Status NewAppendableFile(const std::string &fname, WritableFile **result); // Returns true iff the named file exists. virtual bool FileExists(const std::string &fname) = 0; // Store in *result the names of the children of the specified directory. // The names are relative to "dir". // Original contents of *results are dropped. virtual Status GetChildren(const std::string &dir, std::vector<std::string> *result) = 0; // Delete the named file. virtual Status DeleteFile(const std::string &fname) = 0; // Create the specified directory. virtual Status CreateDir(const std::string &dirname) = 0; // Delete the specified directory. virtual Status DeleteDir(const std::string &dirname) = 0; // Store the size of fname in *file_size. virtual Status GetFileSize(const std::string &fname, uint64_t *file_size) = 0; // Rename file src to target. virtual Status RenameFile(const std::string &src, const std::string &target) = 0; // Lock the specified file. Used to prevent concurrent access to // the same db by multiple processes. On failure, stores NULL in // *lock and returns non-OK. // // On success, stores a pointer to the object that represents the // acquired lock in *lock and returns OK. The caller should call // UnlockFile(*lock) to release the lock. If the process exits, // the lock will be automatically released. // // If somebody else already holds the lock, finishes immediately // with a failure. I.e., this call does not wait for existing locks // to go away. // // May create the named file if it does not already exist. virtual Status LockFile(const std::string &fname, FileLock **lock) = 0; // Release the lock acquired by a previous successful call to LockFile. // REQUIRES: lock was returned by a successful LockFile() call // REQUIRES: lock has not already been unlocked. virtual Status UnlockFile(FileLock *lock) = 0; // Arrange to run "(*function)(arg)" once in a background thread. // // "function" may run in an unspecified thread. Multiple functions // added to the same Env may run concurrently in different threads. // I.e., the caller may not assume that background work items are // serialized. virtual void Schedule(void (*function)(void *arg), void *arg) = 0; // Start a new thread, invoking "function(arg)" within the new thread. // When "function(arg)" returns, the thread will be destroyed. virtual void StartThread(void (*function)(void *arg), void *arg) = 0; // *path is set to a temporary directory that can be used for testing. It may // or many not have just been created. The directory may or may not differ // between runs of the same process, but subsequent calls will return the // same directory. virtual Status GetTestDirectory(std::string *path) = 0; // Create and return a log file for storing informational messages. virtual Status NewLogger(const std::string &fname, Logger **result) = 0; // Returns the number of micro-seconds since some fixed point in time. Only // useful for computing deltas of time. virtual uint64_t NowMicros() = 0; // Sleep/delay the thread for the prescribed number of micro-seconds. virtual void SleepForMicroseconds(int micros) = 0; private: // No copying allowed Env(const Env &); void operator=(const Env &); }; // A file abstraction for reading sequentially through a file class SequentialFile { public: SequentialFile() { } virtual ~SequentialFile(); // Read up to "n" bytes from the file. "scratch[0..n-1]" may be // written by this routine. Sets "*result" to the data that was // read (including if fewer than "n" bytes were successfully read). // May set "*result" to point at data in "scratch[0..n-1]", so // "scratch[0..n-1]" must be live when "*result" is used. // If an error was encountered, returns a non-OK status. // // REQUIRES: External synchronization virtual Status Read(size_t n, Slice *result, char *scratch) = 0; // Skip "n" bytes from the file. This is guaranteed to be no // slower that reading the same data, but may be faster. // // If end of file is reached, skipping will stop at the end of the // file, and Skip will return OK. // // REQUIRES: External synchronization virtual Status Skip(uint64_t n) = 0; private: // No copying allowed SequentialFile(const SequentialFile &); void operator=(const SequentialFile &); }; // A file abstraction for randomly reading the contents of a file. class RandomAccessFile { public: RandomAccessFile() { } virtual ~RandomAccessFile(); // Read up to "n" bytes from the file starting at "offset". // "scratch[0..n-1]" may be written by this routine. Sets "*result" // to the data that was read (including if fewer than "n" bytes were // successfully read). May set "*result" to point at data in // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when // "*result" is used. If an error was encountered, returns a non-OK // status. // // Safe for concurrent use by multiple threads. virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const = 0; private: // No copying allowed RandomAccessFile(const RandomAccessFile &); void operator=(const RandomAccessFile &); }; // A file abstraction for sequential writing. The implementation // must provide buffering since callers may append small fragments // at a time to the file. class WritableFile { public: WritableFile() { } virtual ~WritableFile(); virtual Status Append(const Slice &data) = 0; virtual Status Close() = 0; virtual Status Flush() = 0; virtual Status Sync() = 0; private: // No copying allowed WritableFile(const WritableFile &); void operator=(const WritableFile &); }; // An interface for writing log messages. class Logger { public: Logger() { } virtual ~Logger(); // Write an entry to the log file with the specified format. virtual void Logv(const char *format, va_list ap) = 0; private: // No copying allowed Logger(const Logger &); void operator=(const Logger &); }; // Identifies a locked file. class FileLock { public: FileLock() { } virtual ~FileLock(); private: // No copying allowed FileLock(const FileLock &); void operator=(const FileLock &); }; // Log the specified data to *info_log if info_log is non-NULL. extern void Log(Logger *info_log, const char *format, ...) #if defined(__GNUC__) || defined(__clang__) __attribute__((__format__(__printf__, 2, 3))) #endif ; // A utility routine: write "data" to the named file. Status WriteStringToFile(Env *env, const Slice &data, const std::string &fname); // A utility routine: read contents of named file into *data Status ReadFileToString(Env *env, const std::string &fname, std::string *data); // An implementation of Env that forwards all calls to another Env. // May be useful to clients who wish to override just part of the // functionality of another Env. class EnvWrapper : public Env { public: // Initialize an EnvWrapper that delegates all calls to *t explicit EnvWrapper(Env *t) : target_(t) { } virtual ~EnvWrapper(); // Return the target to which this Env forwards all calls Env *target() const { return target_; } // The following text is boilerplate that forwards all methods to target() Status NewSequentialFile(const std::string &f, SequentialFile **r) { return target_->NewSequentialFile(f, r); } Status NewRandomAccessFile(const std::string &f, RandomAccessFile **r) { return target_->NewRandomAccessFile(f, r); } Status NewWritableFile(const std::string &f, WritableFile **r) { return target_->NewWritableFile(f, r); } Status NewAppendableFile(const std::string &f, WritableFile **r) { return target_->NewAppendableFile(f, r); } bool FileExists(const std::string &f) { return target_->FileExists(f); } Status GetChildren(const std::string &dir, std::vector<std::string> *r) { return target_->GetChildren(dir, r); } Status DeleteFile(const std::string &f) { return target_->DeleteFile(f); } Status CreateDir(const std::string &d) { return target_->CreateDir(d); } Status DeleteDir(const std::string &d) { return target_->DeleteDir(d); } Status GetFileSize(const std::string &f, uint64_t *s) { return target_->GetFileSize(f, s); } Status RenameFile(const std::string &s, const std::string &t) { return target_->RenameFile(s, t); } Status LockFile(const std::string &f, FileLock **l) { return target_->LockFile(f, l); } Status UnlockFile(FileLock *l) { return target_->UnlockFile(l); } void Schedule(void (*f)(void *), void *a) { return target_->Schedule(f, a); } void StartThread(void (*f)(void *), void *a) { return target_->StartThread(f, a); } virtual Status GetTestDirectory(std::string *path) { return target_->GetTestDirectory(path); } virtual Status NewLogger(const std::string &fname, Logger **result) { return target_->NewLogger(fname, result); } uint64_t NowMicros() { return target_->NowMicros(); } void SleepForMicroseconds(int micros) { target_->SleepForMicroseconds(micros); } private: Env *target_; }; } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
12,539
30.827411
93
h
null
NearPMSW-main/nearpm/logging/include/txopt.h
// The starting address of the selected counter_atomic writes #ifndef TXOPT_H #define TXOPT_H #define COUNTER_ATOMIC_VADDR (4096UL*1024*1024) #define NUM_COUNTER_ATOMIC_PAGE 262144 // The starting address of the flush cache instruction #define CACHE_FLUSH_VADDR (4096UL*1024*1024+4*NUM_COUNTER_ATOMIC_PAGE*1024) // The starting address of the flush metadata cache instruction #define METADATA_CACHE_FLUSH_VADDR (4096UL*1024*1024+(4*NUM_COUNTER_ATOMIC_PAGE+4)*1024) #define STATUS_OUTPUT_VADDR (METADATA_CACHE_FLUSH_VADDR + 1024UL) #define INIT_METADATA_CACHE_VADDR (STATUS_OUTPUT_VADDR + 1024UL) #define TXOPT_VADDR (INIT_METADATA_CACHE_VADDR+1024UL) #define CACHE_LINE_SIZE 64UL #include <vector> #include <deque> #include <cstdlib> #include <cstdint> #include <atomic> #include <stdio.h> #include <cassert> enum opt_flag { FLAG_OPT, FLAG_OPT_VAL, FLAG_OPT_ADDR, FLAG_OPT_DATA, FLAG_OPT_DATA_VAL, /* register no execute */ FLAG_OPT_REG, FLAG_OPT_VAL_REG, FLAG_OPT_ADDR_REG, FLAG_OPT_DATA_REG, FLAG_OPT_DATA_VAL_REG, /* execute registered OPT */ FLAG_OPT_START }; struct opt_t { //int pid; int obj_id; }; // Fields in the OPT packet // Used by both SW and HW struct opt_packet_t { void* opt_obj; void* pmemaddr; //void* data_ptr; //int seg_id; //int data_val; unsigned size; opt_flag type; }; // OPT with both data and addr ready volatile void OPT(void* opt_obj, bool reg, void* pmemaddr, void* data, unsigned size); //#define OPT(opt_obj, pmemaddr, data, size) \ // *((opt_packet_t*)TXOPT_VADDR) = (opt_packet_t){opt_obj, pmemaddr, size, FLAG_OPT_DATA}; // OPT with both data (int) and addr ready volatile void OPT_VAL(void* opt_obj, bool reg, void* pmemaddr, int data_val); // OPT with only data ready volatile void OPT_DATA(void* opt_obj, bool reg, void* data, unsigned size); // OPT with only addr ready volatile void OPT_ADDR(void* opt_obj, bool reg, void* pmemaddr, unsigned size); // OPT with only data (int) ready volatile void OPT_DATA_VAL(void* opt_obj, bool reg, int data_val); // Begin OPT operation volatile void OPT_START(void* opt_obj); // store barrier volatile void s_fence(); // flush both metadata cache and data cache volatile void flush_caches(void* addr, unsigned size); // flush data cache only volatile void cache_flush(void* addr, unsigned size); // flush metadata cache only volatile void metadata_cache_flush(void* addr, unsigned size); // malloc that is cache-line aligned void *aligned_malloc(int size); class CounterAtomic { public: static void* counter_atomic_malloc(unsigned _size); // size is num of bytes static volatile void statOutput(); static volatile void initCounterCache(); uint64_t getValue(); uint64_t getPtr(); CounterAtomic(); CounterAtomic(uint64_t _val); CounterAtomic(bool _val); CounterAtomic& operator=(uint64_t _val); CounterAtomic& operator+(uint64_t _val); CounterAtomic& operator++(); CounterAtomic& operator--(); CounterAtomic& operator-(uint64_t _val); bool operator==(uint64_t _val); bool operator!=(uint64_t _val); private: void init(); static uint64_t getNextAtomicAddr(unsigned _size); static uint64_t getNextCacheFlushAddr(unsigned _size); //static uint64_t getNextPersistBarrierAddr(unsigned _size); static uint64_t getNextCounterCacheFlushAddr(unsigned _size); static uint64_t currAtomicAddr; static uint64_t currCacheFlushAddr; //static uint64_t currPersistentBarrierAddr; static uint64_t currCounterCacheFlushAddr; /* static bool hasAllocateCacheFlush; static bool hasAllocateCounterCacheFlush; static bool hasAllocatePersistBarrier; */ //uint64_t val; uint64_t val_addr = 0; }; #endif
3,665
26.155556
90
h
null
NearPMSW-main/nearpm/checkpointing/memcached-pmem-checkpointing/slab_automove.c
/* Copyright 2017 Facebook. * * Use and distribution licensed under the BSD license. See * the LICENSE file for full text. */ /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #include "memcached.h" #include "slab_automove.h" #include <stdlib.h> #include <string.h> #define MIN_PAGES_FOR_SOURCE 2 #define MIN_PAGES_FOR_RECLAIM 2.5 struct window_data { uint64_t age; uint64_t dirty; uint64_t evicted; }; typedef struct { struct window_data *window_data; uint32_t window_size; uint32_t window_cur; double max_age_ratio; item_stats_automove iam_before[MAX_NUMBER_OF_SLAB_CLASSES]; item_stats_automove iam_after[MAX_NUMBER_OF_SLAB_CLASSES]; slab_stats_automove sam_before[MAX_NUMBER_OF_SLAB_CLASSES]; slab_stats_automove sam_after[MAX_NUMBER_OF_SLAB_CLASSES]; } slab_automove; void *slab_automove_init(struct settings *settings) { uint32_t window_size = settings->slab_automove_window; double max_age_ratio = settings->slab_automove_ratio; slab_automove *a = calloc(1, sizeof(slab_automove)); if (a == NULL) return NULL; a->window_data = calloc(window_size * MAX_NUMBER_OF_SLAB_CLASSES, sizeof(struct window_data)); a->window_size = window_size; a->max_age_ratio = max_age_ratio; if (a->window_data == NULL) { free(a); return NULL; } // do a dry run to fill the before structs fill_item_stats_automove(a->iam_before); fill_slab_stats_automove(a->sam_before); return (void *)a; } void slab_automove_free(void *arg) { slab_automove *a = (slab_automove *)arg; free(a->window_data); free(a); } static void window_sum(struct window_data *wd, struct window_data *w, uint32_t size) { int x; for (x = 0; x < size; x++) { struct window_data *d = &wd[x]; w->age += d->age; w->dirty += d->dirty; w->evicted += d->evicted; } } // TODO: if oldest is dirty, find next oldest. // still need to base ratio off of absolute age void slab_automove_run(void *arg, int *src, int *dst) { slab_automove *a = (slab_automove *)arg; int n; struct window_data w_sum; int oldest = -1; uint64_t oldest_age = 0; int youngest = -1; uint64_t youngest_age = ~0; bool youngest_evicting = false; *src = -1; *dst = -1; // fill after structs fill_item_stats_automove(a->iam_after); fill_slab_stats_automove(a->sam_after); a->window_cur++; // iterate slabs for (n = POWER_SMALLEST; n < MAX_NUMBER_OF_SLAB_CLASSES; n++) { int w_offset = n * a->window_size; struct window_data *wd = &a->window_data[w_offset + (a->window_cur % a->window_size)]; memset(wd, 0, sizeof(struct window_data)); // summarize the window-up-to-now. memset(&w_sum, 0, sizeof(struct window_data)); window_sum(&a->window_data[w_offset], &w_sum, a->window_size); // if page delta, or evicted delta, mark window dirty // (or outofmemory) if (a->iam_after[n].evicted - a->iam_before[n].evicted > 0 || a->iam_after[n].outofmemory - a->iam_before[n].outofmemory > 0) { wd->evicted = 1; wd->dirty = 1; } if (a->sam_after[n].total_pages - a->sam_before[n].total_pages > 0) { wd->dirty = 1; } // set age into window wd->age = a->iam_after[n].age; // grab age as average of window total uint64_t age = w_sum.age / a->window_size; // if > N free chunks and not dirty, make decision. if (a->sam_after[n].free_chunks > a->sam_after[n].chunks_per_page * MIN_PAGES_FOR_RECLAIM) { if (w_sum.dirty == 0) { *src = n; *dst = 0; break; } } // if oldest and have enough pages, is oldest if (age > oldest_age && a->sam_after[n].total_pages > MIN_PAGES_FOR_SOURCE) { oldest = n; oldest_age = age; } // grab evicted count from window // if > half the window and youngest, mark as youngest if (age < youngest_age && w_sum.evicted > a->window_size / 2) { youngest = n; youngest_age = age; youngest_evicting = wd->evicted ? true : false; } } memcpy(a->iam_before, a->iam_after, sizeof(item_stats_automove) * MAX_NUMBER_OF_SLAB_CLASSES); memcpy(a->sam_before, a->sam_after, sizeof(slab_stats_automove) * MAX_NUMBER_OF_SLAB_CLASSES); // if we have a youngest and oldest, and oldest is outside the ratio, // also, only make decisions if window has filled once. if (youngest != -1 && oldest != -1 && a->window_cur > a->window_size) { if (youngest_age < ((double)oldest_age * a->max_age_ratio) && youngest_evicting) { *src = oldest; *dst = youngest; } } return; }
4,939
31.287582
100
c
null
NearPMSW-main/nearpm/checkpointing/memcached-pmem-checkpointing/slabs.h
/* * Copyright 2018 Lenovo * * Licensed under the BSD-3 license. see LICENSE.Lenovo.txt for full text */ /* * Note: * Codes enclosed in `#ifdef PSLAB' and `#endif' are added by Lenovo for * persistent memory support */ /* slabs memory allocation */ #ifndef SLABS_H #define SLABS_H /** Init the subsystem. 1st argument is the limit on no. of bytes to allocate, 0 if no limit. 2nd argument is the growth factor; each slab will use a chunk size equal to the previous slab's chunk size times this factor. 3rd argument specifies if the slab allocator should allocate all memory up front (if true), or allocate memory in chunks as it is needed (if false) */ void slabs_init(const size_t limit, const double factor, const bool prealloc, const uint32_t *slab_sizes); /** Call only during init. Pre-allocates all available memory */ void slabs_prefill_global(void); #ifdef PSLAB int slabs_dump_sizes(uint32_t *slab_sizes, int max); void slabs_prefill_global_from_pmem(void); void slabs_update_policy(void); int do_slabs_renewslab(const unsigned int id, char *ptr); void do_slab_realloc(item *it, unsigned int id); void do_slabs_free(void *ptr, const size_t size, unsigned int id); #endif /** * Given object size, return id to use when allocating/freeing memory for object * 0 means error: can't store such a large object */ unsigned int slabs_clsid(const size_t size); /** Allocate object of given length. 0 on error */ /*@null@*/ #define SLABS_ALLOC_NO_NEWPAGE 1 void *slabs_alloc(const size_t size, unsigned int id, uint64_t *total_bytes, unsigned int flags); /** Free previously allocated object */ void slabs_free(void *ptr, size_t size, unsigned int id); /** Adjust the stats for memory requested */ void slabs_adjust_mem_requested(unsigned int id, size_t old, size_t ntotal); /** Adjust global memory limit up or down */ bool slabs_adjust_mem_limit(size_t new_mem_limit); /** Return a datum for stats in binary protocol */ bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c); typedef struct { unsigned int chunks_per_page; unsigned int chunk_size; long int free_chunks; long int total_pages; } slab_stats_automove; void fill_slab_stats_automove(slab_stats_automove *am); unsigned int global_page_pool_size(bool *mem_flag); /** Fill buffer with stats */ /*@null@*/ void slabs_stats(ADD_STAT add_stats, void *c); /* Hints as to freespace in slab class */ unsigned int slabs_available_chunks(unsigned int id, bool *mem_flag, uint64_t *total_bytes, unsigned int *chunks_perslab); void slabs_mlock(void); void slabs_munlock(void); int start_slab_maintenance_thread(void); void stop_slab_maintenance_thread(void); enum reassign_result_type { REASSIGN_OK=0, REASSIGN_RUNNING, REASSIGN_BADCLASS, REASSIGN_NOSPARE, REASSIGN_SRC_DST_SAME }; enum reassign_result_type slabs_reassign(int src, int dst); void slabs_rebalancer_pause(void); void slabs_rebalancer_resume(void); #ifdef EXTSTORE void slabs_set_storage(void *arg); #endif #endif
3,024
31.180851
122
h
null
NearPMSW-main/nearpm/checkpointing/memcached-pmem-checkpointing/storage.c
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #include "memcached.h" #ifdef EXTSTORE #include "storage.h" #include <stdlib.h> #include <string.h> #include <limits.h> #define PAGE_BUCKET_DEFAULT 0 #define PAGE_BUCKET_COMPACT 1 #define PAGE_BUCKET_CHUNKED 2 #define PAGE_BUCKET_LOWTTL 3 int lru_maintainer_store(void *storage, const int clsid) { //int i; int did_moves = 0; int item_age = settings.ext_item_age; bool mem_limit_reached = false; unsigned int chunks_free; struct lru_pull_tail_return it_info; // FIXME: need to directly ask the slabber how big a class is if (slabs_clsid(settings.ext_item_size) > clsid) return 0; chunks_free = slabs_available_chunks(clsid, &mem_limit_reached, NULL, NULL); // if we are low on chunks and no spare, push out early. if (chunks_free < settings.ext_free_memchunks[clsid] && mem_limit_reached) item_age = 0; it_info.it = NULL; lru_pull_tail(clsid, COLD_LRU, 0, LRU_PULL_RETURN_ITEM, 0, &it_info); /* Item is locked, and we have a reference to it. */ if (it_info.it == NULL) { return did_moves; } obj_io io; item *it = it_info.it; /* First, storage for the header object */ size_t orig_ntotal = ITEM_ntotal(it); uint32_t flags; if ((it->it_flags & ITEM_HDR) == 0 && (item_age == 0 || current_time - it->time > item_age)) { // FIXME: flag conversion again if (settings.inline_ascii_response) { flags = (uint32_t) strtoul(ITEM_suffix(it), (char **) NULL, 10); } else if (it->nsuffix > 0) { flags = *((uint32_t *)ITEM_suffix(it)); } else { flags = 0; } item *hdr_it = do_item_alloc(ITEM_key(it), it->nkey, flags, it->exptime, sizeof(item_hdr)); /* Run the storage write understanding the start of the item is dirty. * We will fill it (time/exptime/etc) from the header item on read. */ if (hdr_it != NULL) { int bucket = (it->it_flags & ITEM_CHUNKED) ? PAGE_BUCKET_CHUNKED : PAGE_BUCKET_DEFAULT; // Compres soon to expire items into similar pages. if (it->exptime - current_time < settings.ext_low_ttl) { bucket = PAGE_BUCKET_LOWTTL; } hdr_it->it_flags |= ITEM_HDR; io.len = orig_ntotal; io.mode = OBJ_IO_WRITE; // NOTE: when the item is read back in, the slab mover // may see it. Important to have refcount>=2 or ~ITEM_LINKED assert(it->refcount >= 2); if (extstore_write_request(storage, bucket, &io) == 0) { // cuddle the hash value into the time field so we don't have // to recalculate it. item *buf_it = (item *) io.buf; buf_it->time = it_info.hv; // copy from past the headers + time headers. // TODO: should be in items.c if (it->it_flags & ITEM_CHUNKED) { // Need to loop through the item and copy item_chunk *sch = (item_chunk *) ITEM_data(it); int remain = orig_ntotal; int copied = 0; // copy original header int hdrtotal = ITEM_ntotal(it) - it->nbytes; memcpy((char *)io.buf+32, (char *)it+32, hdrtotal - 32); copied = hdrtotal; // copy data in like it were one large object. while (sch && remain) { assert(remain >= sch->used); memcpy((char *)io.buf+copied, sch->data, sch->used); // FIXME: use one variable? remain -= sch->used; copied += sch->used; sch = sch->next; } } else { memcpy((char *)io.buf+32, (char *)it+32, io.len-32); } // crc what we copied so we can do it sequentially. buf_it->it_flags &= ~ITEM_LINKED; buf_it->exptime = crc32c(0, (char*)io.buf+32, orig_ntotal-32); extstore_write(storage, &io); item_hdr *hdr = (item_hdr *) ITEM_data(hdr_it); hdr->page_version = io.page_version; hdr->page_id = io.page_id; hdr->offset = io.offset; // overload nbytes for the header it hdr_it->nbytes = it->nbytes; /* success! Now we need to fill relevant data into the new * header and replace. Most of this requires the item lock */ /* CAS gets set while linking. Copy post-replace */ item_replace(it, hdr_it, it_info.hv); ITEM_set_cas(hdr_it, ITEM_get_cas(it)); do_item_remove(hdr_it); did_moves = 1; LOGGER_LOG(NULL, LOG_EVICTIONS, LOGGER_EXTSTORE_WRITE, it, bucket); } else { /* Failed to write for some reason, can't continue. */ slabs_free(hdr_it, ITEM_ntotal(hdr_it), ITEM_clsid(hdr_it)); } } } do_item_remove(it); item_unlock(it_info.hv); return did_moves; } /* Fetch stats from the external storage system and decide to compact. * If we're more than half full, start skewing how aggressively to run * compaction, up to a desired target when all pages are full. */ static int storage_compact_check(void *storage, logger *l, uint32_t *page_id, uint64_t *page_version, uint64_t *page_size, bool *drop_unread) { struct extstore_stats st; int x; double rate; uint64_t frag_limit; uint64_t low_version = ULLONG_MAX; uint64_t lowest_version = ULLONG_MAX; unsigned int low_page = 0; unsigned int lowest_page = 0; extstore_get_stats(storage, &st); if (st.pages_used == 0) return 0; // lets pick a target "wasted" value and slew. if (st.pages_free > settings.ext_compact_under) return 0; *drop_unread = false; // the number of free pages reduces the configured frag limit // this allows us to defrag early if pages are very empty. rate = 1.0 - ((double)st.pages_free / st.page_count); rate *= settings.ext_max_frag; frag_limit = st.page_size * rate; LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_FRAGINFO, NULL, rate, frag_limit); st.page_data = calloc(st.page_count, sizeof(struct extstore_page_data)); extstore_get_page_data(storage, &st); // find oldest page by version that violates the constraint for (x = 0; x < st.page_count; x++) { if (st.page_data[x].version == 0 || st.page_data[x].bucket == PAGE_BUCKET_LOWTTL) continue; if (st.page_data[x].version < lowest_version) { lowest_page = x; lowest_version = st.page_data[x].version; } if (st.page_data[x].bytes_used < frag_limit) { if (st.page_data[x].version < low_version) { low_page = x; low_version = st.page_data[x].version; } } } *page_size = st.page_size; free(st.page_data); // we have a page + version to attempt to reclaim. if (low_version != ULLONG_MAX) { *page_id = low_page; *page_version = low_version; return 1; } else if (lowest_version != ULLONG_MAX && settings.ext_drop_unread && st.pages_free <= settings.ext_drop_under) { // nothing matched the frag rate barrier, so pick the absolute oldest // version if we're configured to drop items. *page_id = lowest_page; *page_version = lowest_version; *drop_unread = true; return 1; } return 0; } static pthread_t storage_compact_tid; static pthread_mutex_t storage_compact_plock; #define MIN_STORAGE_COMPACT_SLEEP 10000 #define MAX_STORAGE_COMPACT_SLEEP 2000000 struct storage_compact_wrap { obj_io io; pthread_mutex_t lock; // gates the bools. bool done; bool submitted; bool miss; // version flipped out from under us }; static void storage_compact_readback(void *storage, logger *l, bool drop_unread, char *readback_buf, uint32_t page_id, uint64_t page_version, uint64_t read_size) { uint64_t offset = 0; unsigned int rescues = 0; unsigned int lost = 0; unsigned int skipped = 0; while (offset < read_size) { item *hdr_it = NULL; item_hdr *hdr = NULL; item *it = (item *)(readback_buf+offset); unsigned int ntotal; // probably zeroed out junk at the end of the wbuf if (it->nkey == 0) { break; } ntotal = ITEM_ntotal(it); uint32_t hv = (uint32_t)it->time; item_lock(hv); // We don't have a conn and don't need to do most of do_item_get hdr_it = assoc_find(ITEM_key(it), it->nkey, hv); if (hdr_it != NULL) { bool do_write = false; refcount_incr(hdr_it); // Check validity but don't bother removing it. if ((hdr_it->it_flags & ITEM_HDR) && !item_is_flushed(hdr_it) && (hdr_it->exptime == 0 || hdr_it->exptime > current_time)) { hdr = (item_hdr *)ITEM_data(hdr_it); if (hdr->page_id == page_id && hdr->page_version == page_version) { // Item header is still completely valid. extstore_delete(storage, page_id, page_version, 1, ntotal); // drop inactive items. if (drop_unread && GET_LRU(hdr_it->slabs_clsid) == COLD_LRU) { do_write = false; skipped++; } else { do_write = true; } } } if (do_write) { bool do_update = false; int tries; obj_io io; io.len = ntotal; io.mode = OBJ_IO_WRITE; for (tries = 10; tries > 0; tries--) { if (extstore_write_request(storage, PAGE_BUCKET_COMPACT, &io) == 0) { memcpy(io.buf, it, io.len); extstore_write(storage, &io); do_update = true; break; } else { usleep(1000); } } if (do_update) { if (it->refcount == 2) { hdr->page_version = io.page_version; hdr->page_id = io.page_id; hdr->offset = io.offset; rescues++; } else { lost++; // TODO: re-alloc and replace header. } } else { lost++; } } do_item_remove(hdr_it); } item_unlock(hv); offset += ntotal; if (read_size - offset < sizeof(struct _stritem)) break; } STATS_LOCK(); stats.extstore_compact_lost += lost; stats.extstore_compact_rescues += rescues; stats.extstore_compact_skipped += skipped; STATS_UNLOCK(); LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_READ_END, NULL, page_id, offset, rescues, lost, skipped); } static void _storage_compact_cb(void *e, obj_io *io, int ret) { struct storage_compact_wrap *wrap = (struct storage_compact_wrap *)io->data; assert(wrap->submitted == true); pthread_mutex_lock(&wrap->lock); if (ret < 1) { wrap->miss = true; } wrap->done = true; pthread_mutex_unlock(&wrap->lock); } // TODO: hoist the storage bits from lru_maintainer_thread in here. // would be nice if they could avoid hammering the same locks though? // I guess it's only COLD. that's probably fine. static void *storage_compact_thread(void *arg) { void *storage = arg; useconds_t to_sleep = MAX_STORAGE_COMPACT_SLEEP; bool compacting = false; uint64_t page_version = 0; uint64_t page_size = 0; uint64_t page_offset = 0; uint32_t page_id = 0; bool drop_unread = false; char *readback_buf = NULL; struct storage_compact_wrap wrap; logger *l = logger_create(); if (l == NULL) { fprintf(stderr, "Failed to allocate logger for storage compaction thread\n"); abort(); } readback_buf = malloc(settings.ext_wbuf_size); if (readback_buf == NULL) { fprintf(stderr, "Failed to allocate readback buffer for storage compaction thread\n"); abort(); } pthread_mutex_init(&wrap.lock, NULL); wrap.done = false; wrap.submitted = false; wrap.io.data = &wrap; wrap.io.buf = (void *)readback_buf; wrap.io.len = settings.ext_wbuf_size; wrap.io.mode = OBJ_IO_READ; wrap.io.cb = _storage_compact_cb; pthread_mutex_lock(&storage_compact_plock); while (1) { pthread_mutex_unlock(&storage_compact_plock); if (to_sleep) { extstore_run_maint(storage); usleep(to_sleep); } pthread_mutex_lock(&storage_compact_plock); if (!compacting && storage_compact_check(storage, l, &page_id, &page_version, &page_size, &drop_unread)) { page_offset = 0; compacting = true; LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_START, NULL, page_id, page_version); } if (compacting) { pthread_mutex_lock(&wrap.lock); if (page_offset < page_size && !wrap.done && !wrap.submitted) { wrap.io.page_version = page_version; wrap.io.page_id = page_id; wrap.io.offset = page_offset; // FIXME: should be smarter about io->next (unlink at use?) wrap.io.next = NULL; wrap.submitted = true; wrap.miss = false; extstore_submit(storage, &wrap.io); } else if (wrap.miss) { LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_ABORT, NULL, page_id); wrap.done = false; wrap.submitted = false; compacting = false; } else if (wrap.submitted && wrap.done) { LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_READ_START, NULL, page_id, page_offset); storage_compact_readback(storage, l, drop_unread, readback_buf, page_id, page_version, settings.ext_wbuf_size); page_offset += settings.ext_wbuf_size; wrap.done = false; wrap.submitted = false; } else if (page_offset >= page_size) { compacting = false; wrap.done = false; wrap.submitted = false; extstore_close_page(storage, page_id, page_version); LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_END, NULL, page_id); } pthread_mutex_unlock(&wrap.lock); if (to_sleep > MIN_STORAGE_COMPACT_SLEEP) to_sleep /= 2; } else { if (to_sleep < MAX_STORAGE_COMPACT_SLEEP) to_sleep += MIN_STORAGE_COMPACT_SLEEP; } } free(readback_buf); return NULL; } // TODO // logger needs logger_destroy() to exist/work before this is safe. /*int stop_storage_compact_thread(void) { int ret; pthread_mutex_lock(&lru_maintainer_lock); do_run_lru_maintainer_thread = 0; pthread_mutex_unlock(&lru_maintainer_lock); if ((ret = pthread_join(lru_maintainer_tid, NULL)) != 0) { fprintf(stderr, "Failed to stop LRU maintainer thread: %s\n", strerror(ret)); return -1; } settings.lru_maintainer_thread = false; return 0; }*/ void storage_compact_pause(void) { pthread_mutex_lock(&storage_compact_plock); } void storage_compact_resume(void) { pthread_mutex_unlock(&storage_compact_plock); } int start_storage_compact_thread(void *arg) { int ret; pthread_mutex_init(&storage_compact_plock, NULL); if ((ret = pthread_create(&storage_compact_tid, NULL, storage_compact_thread, arg)) != 0) { fprintf(stderr, "Can't create storage_compact thread: %s\n", strerror(ret)); return -1; } return 0; } #endif
16,719
35.347826
99
c
null
NearPMSW-main/nearpm/checkpointing/memcached-pmem-checkpointing/pslab.h
/* * Copyright 2018 Lenovo * * Licensed under the BSD-3 license. see LICENSE.Lenovo.txt for full text */ #ifndef PSLAB_H #define PSLAB_H #include <libpmem.h> #define PSLAB_POLICY_DRAM 0 #define PSLAB_POLICY_PMEM 1 #define PSLAB_POLICY_BALANCED 2 #define pmem_member_persist(p, m) \ pmem_persist(&(p)->m, sizeof ((p)->m)) #define pmem_member_flush(p, m) \ pmem_flush(&(p)->m, sizeof ((p)->m)) #define pmem_flush_from(p, t, m) \ pmem_flush(&(p)->m, sizeof (t) - offsetof(t, m)); #define pslab_item_data_persist(it) pmem_persist((it)->data, ITEM_dtotal(it) #define pslab_item_data_flush(it) pmem_flush((it)->data, ITEM_dtotal(it)) int pslab_create(char *pool_name, uint32_t pool_size, uint32_t slab_size, uint32_t *slabclass_sizes, int slabclass_num); int pslab_pre_recover(char *name, uint32_t *slab_sizes, int slab_max, int slab_page_size); int pslab_do_recover(void); time_t pslab_process_started(time_t process_started); void pslab_update_flushtime(uint32_t time); void pslab_use_slab(void *p, int id, unsigned int size); void *pslab_get_free_slab(void *slab); int pslab_contains(char *p); uint64_t pslab_addr2off(void *addr); extern bool pslab_force; #endif
1,186
30.236842
90
h
null
NearPMSW-main/nearpm/checkpointing/memcached-pmem-checkpointing/config.h
/* config.h. Generated from config.h.in by configure. */ /* config.h.in. Generated from configure.ac by autoheader. */ /* Set to nonzero if you want to include DTRACE */ /* #undef ENABLE_DTRACE */ /* Set to nonzero if you want to include SASL */ /* #undef ENABLE_SASL */ /* Set to nonzero if you want to enable a SASL pwdb */ /* #undef ENABLE_SASL_PWDB */ /* machine is bigendian */ /* #undef ENDIAN_BIG */ /* machine is littleendian */ #define ENDIAN_LITTLE 1 /* Set to nonzero if you want to enable extstorextstore */ /* #undef EXTSTORE */ /* Define to 1 if support accept4 */ #define HAVE_ACCEPT4 1 /* Define to 1 if you have the `clock_gettime' function. */ #define HAVE_CLOCK_GETTIME 1 /* Define this if you have an implementation of drop_privileges() */ /* #undef HAVE_DROP_PRIVILEGES */ /* Define this if you have an implementation of drop_worker_privileges() */ /* #undef HAVE_DROP_WORKER_PRIVILEGES */ /* GCC 64bit Atomics available */ /* #undef HAVE_GCC_64ATOMICS */ /* GCC Atomics available */ #define HAVE_GCC_ATOMICS 1 /* Define to 1 if support getopt_long */ #define HAVE_GETOPT_LONG 1 /* Define to 1 if you have the `getpagesizes' function. */ /* #undef HAVE_GETPAGESIZES */ /* Have ntohll */ /* #undef HAVE_HTONLL */ /* Define to 1 if you have the <inttypes.h> header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `memcntl' function. */ /* #undef HAVE_MEMCNTL */ /* Define to 1 if you have the <memory.h> header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `mlockall' function. */ #define HAVE_MLOCKALL 1 /* Define to 1 if you have the `pledge' function. */ /* #undef HAVE_PLEDGE */ /* we have sasl_callback_ft */ /* #undef HAVE_SASL_CALLBACK_FT */ /* Set to nonzero if your SASL implementation supports SASL_CB_GETCONF */ /* #undef HAVE_SASL_CB_GETCONF */ /* Define to 1 if you have the <sasl/sasl.h> header file. */ /* #undef HAVE_SASL_SASL_H */ /* Define to 1 if you have the `setppriv' function. */ /* #undef HAVE_SETPPRIV */ /* Define to 1 if you have the `sigignore' function. */ #define HAVE_SIGIGNORE 1 /* Define to 1 if stdbool.h conforms to C99. */ #define HAVE_STDBOOL_H 1 /* Define to 1 if you have the <stdint.h> header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the <stdlib.h> header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the <strings.h> header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the <string.h> header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the <sys/stat.h> header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the <sys/types.h> header file. */ #define HAVE_SYS_TYPES_H 1 /* Define this if you have umem.h */ /* #undef HAVE_UMEM_H */ /* Define to 1 if you have the <unistd.h> header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if the system has the type `_Bool'. */ #define HAVE__BOOL 1 /* Machine need alignment */ /* #undef NEED_ALIGN */ /* Name of package */ #define PACKAGE "memcached" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "memcached@googlegroups.com" /* Define to the full name of this package. */ #define PACKAGE_NAME "memcached" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "memcached 1.5.4" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "memcached" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "1.5.4" /* Set to nonzero if you want to enable pslab */ #define PSLAB 1 /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Version number of package */ #define VERSION "1.5.4" /* find sigignore on Linux */ #define _GNU_SOURCE 1 /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ /* define to int if socklen_t not available */ /* #undef socklen_t */ #if HAVE_STDBOOL_H #include <stdbool.h> #else #define bool char #define false 0 #define true 1 #endif #ifdef HAVE_INTTYPES_H #include <inttypes.h> #endif
4,134
24.368098
78
h
null
NearPMSW-main/nearpm/checkpointing/memcached-pmem-checkpointing/sasl_defs.h
#ifndef SASL_DEFS_H #define SASL_DEFS_H 1 // Longest one I could find was ``9798-U-RSA-SHA1-ENC'' #define MAX_SASL_MECH_LEN 32 #if defined(HAVE_SASL_SASL_H) && defined(ENABLE_SASL) #include <sasl/sasl.h> void init_sasl(void); extern char my_sasl_hostname[1025]; #else /* End of SASL support */ typedef void* sasl_conn_t; #define init_sasl() {} #define sasl_dispose(x) {} #define sasl_server_new(a, b, c, d, e, f, g, h) 1 #define sasl_listmech(a, b, c, d, e, f, g, h) 1 #define sasl_server_start(a, b, c, d, e, f) 1 #define sasl_server_step(a, b, c, d, e) 1 #define sasl_getprop(a, b, c) {} #define SASL_OK 0 #define SASL_CONTINUE -1 #endif /* sasl compat */ #endif /* SASL_DEFS_H */
693
20.6875
55
h
null
NearPMSW-main/nearpm/checkpointing/memcached-pmem-checkpointing/openbsd_priv.c
#include <errno.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <unistd.h> #include "memcached.h" /* * this section of code will drop all (OpenBSD) privileges including * those normally granted to all userland process (basic privileges). The * effect of this is that after running this code, the process will not able * to fork(), exec(), etc. See pledge(2) for more information. */ void drop_privileges() { extern char *__progname; if (settings.socketpath != NULL) { if (pledge("stdio unix", NULL) == -1) { fprintf(stderr, "%s: pledge: %s\n", __progname, strerror(errno)); exit(EXIT_FAILURE); } } else { if (pledge("stdio inet", NULL) == -1) { fprintf(stderr, "%s: pledge: %s\n", __progname, strerror(errno)); exit(EXIT_FAILURE); } } }
853
28.448276
76
c
null
NearPMSW-main/nearpm/checkpointing/memcached-pmem-checkpointing/logger.h
/* logging functions */ #ifndef LOGGER_H #define LOGGER_H #include "bipbuffer.h" /* TODO: starttime tunable */ #define LOGGER_BUF_SIZE 1024 * 64 #define LOGGER_WATCHER_BUF_SIZE 1024 * 256 #define LOGGER_ENTRY_MAX_SIZE 2048 #define GET_LOGGER() ((logger *) pthread_getspecific(logger_key)); /* Inlined from memcached.h - should go into sub header */ typedef unsigned int rel_time_t; enum log_entry_type { LOGGER_ASCII_CMD = 0, LOGGER_EVICTION, LOGGER_ITEM_GET, LOGGER_ITEM_STORE, LOGGER_CRAWLER_STATUS, LOGGER_SLAB_MOVE, #ifdef EXTSTORE LOGGER_EXTSTORE_WRITE, LOGGER_COMPACT_START, LOGGER_COMPACT_ABORT, LOGGER_COMPACT_READ_START, LOGGER_COMPACT_READ_END, LOGGER_COMPACT_END, LOGGER_COMPACT_FRAGINFO, #endif }; enum log_entry_subtype { LOGGER_TEXT_ENTRY = 0, LOGGER_EVICTION_ENTRY, LOGGER_ITEM_GET_ENTRY, LOGGER_ITEM_STORE_ENTRY, #ifdef EXTSTORE LOGGER_EXT_WRITE_ENTRY, #endif }; enum logger_ret_type { LOGGER_RET_OK = 0, LOGGER_RET_NOSPACE, LOGGER_RET_ERR }; enum logger_parse_entry_ret { LOGGER_PARSE_ENTRY_OK = 0, LOGGER_PARSE_ENTRY_FULLBUF, LOGGER_PARSE_ENTRY_FAILED }; typedef const struct { enum log_entry_subtype subtype; int reqlen; uint16_t eflags; char *format; } entry_details; /* log entry intermediary structures */ struct logentry_eviction { long long int exptime; uint32_t latime; uint16_t it_flags; uint8_t nkey; uint8_t clsid; char key[]; }; #ifdef EXTSTORE struct logentry_ext_write { long long int exptime; uint32_t latime; uint16_t it_flags; uint8_t nkey; uint8_t clsid; uint8_t bucket; char key[]; }; #endif struct logentry_item_get { uint8_t was_found; uint8_t nkey; uint8_t clsid; char key[]; }; struct logentry_item_store { int status; int cmd; rel_time_t ttl; uint8_t nkey; uint8_t clsid; char key[]; }; /* end intermediary structures */ typedef struct _logentry { enum log_entry_subtype event; uint16_t eflags; uint64_t gid; struct timeval tv; /* not monotonic! */ int size; union { void *entry; /* probably an item */ char end; } data[]; } logentry; #define LOG_SYSEVENTS (1<<1) /* threads start/stop/working */ #define LOG_FETCHERS (1<<2) /* get/gets/etc */ #define LOG_MUTATIONS (1<<3) /* set/append/incr/etc */ #define LOG_SYSERRORS (1<<4) /* malloc/etc errors */ #define LOG_CONNEVENTS (1<<5) /* new client, closed, etc */ #define LOG_EVICTIONS (1<<6) /* details of evicted items */ #define LOG_STRICT (1<<7) /* block worker instead of drop */ #define LOG_RAWCMDS (1<<9) /* raw ascii commands */ typedef struct _logger { struct _logger *prev; struct _logger *next; pthread_mutex_t mutex; /* guard for this + *buf */ uint64_t written; /* entries written to the buffer */ uint64_t dropped; /* entries dropped */ uint64_t blocked; /* times blocked instead of dropped */ uint16_t fetcher_ratio; /* log one out of every N fetches */ uint16_t mutation_ratio; /* log one out of every N mutations */ uint16_t eflags; /* flags this logger should log */ bipbuf_t *buf; const entry_details *entry_map; } logger; enum logger_watcher_type { LOGGER_WATCHER_STDERR = 0, LOGGER_WATCHER_CLIENT = 1 }; typedef struct { void *c; /* original connection structure. still with source thread attached */ int sfd; /* client fd */ int id; /* id number for watcher list */ uint64_t skipped; /* lines skipped since last successful print */ bool failed_flush; /* recently failed to write out (EAGAIN), wait before retry */ enum logger_watcher_type t; /* stderr, client, syslog, etc */ uint16_t eflags; /* flags we are interested in */ bipbuf_t *buf; /* per-watcher output buffer */ } logger_watcher; struct logger_stats { uint64_t worker_dropped; uint64_t worker_written; uint64_t watcher_skipped; uint64_t watcher_sent; }; extern pthread_key_t logger_key; /* public functions */ void logger_init(void); logger *logger_create(void); #define LOGGER_LOG(l, flag, type, ...) \ do { \ logger *myl = l; \ if (l == NULL) \ myl = GET_LOGGER(); \ if (myl->eflags & flag) \ logger_log(myl, type, __VA_ARGS__); \ } while (0) enum logger_ret_type logger_log(logger *l, const enum log_entry_type event, const void *entry, ...); enum logger_add_watcher_ret { LOGGER_ADD_WATCHER_TOO_MANY = 0, LOGGER_ADD_WATCHER_OK, LOGGER_ADD_WATCHER_FAILED }; enum logger_add_watcher_ret logger_add_watcher(void *c, const int sfd, uint16_t f); #endif
4,680
24.032086
100
h
null
NearPMSW-main/nearpm/checkpointing/memcached-pmem-checkpointing/thread.c
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* * Thread management for memcached. */ #include "memcached.h" #ifdef EXTSTORE #include "storage.h" #endif #include <assert.h> #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #ifdef __sun #include <atomic.h> #endif #define ITEMS_PER_ALLOC 64 /* An item in the connection queue. */ enum conn_queue_item_modes { queue_new_conn, /* brand new connection. */ queue_redispatch, /* redispatching from side thread */ }; typedef struct conn_queue_item CQ_ITEM; struct conn_queue_item { int sfd; enum conn_states init_state; int event_flags; int read_buffer_size; enum network_transport transport; enum conn_queue_item_modes mode; conn *c; CQ_ITEM *next; }; /* A connection queue. */ typedef struct conn_queue CQ; struct conn_queue { CQ_ITEM *head; CQ_ITEM *tail; pthread_mutex_t lock; }; /* Locks for cache LRU operations */ pthread_mutex_t lru_locks[POWER_LARGEST]; /* Connection lock around accepting new connections */ pthread_mutex_t conn_lock = PTHREAD_MUTEX_INITIALIZER; #if !defined(HAVE_GCC_ATOMICS) && !defined(__sun) pthread_mutex_t atomics_mutex = PTHREAD_MUTEX_INITIALIZER; #endif /* Lock for global stats */ static pthread_mutex_t stats_lock = PTHREAD_MUTEX_INITIALIZER; /* Lock to cause worker threads to hang up after being woken */ static pthread_mutex_t worker_hang_lock; /* Free list of CQ_ITEM structs */ static CQ_ITEM *cqi_freelist; static pthread_mutex_t cqi_freelist_lock; static pthread_mutex_t *item_locks; /* size of the item lock hash table */ static uint32_t item_lock_count; unsigned int item_lock_hashpower; #define hashsize(n) ((unsigned long int)1<<(n)) #define hashmask(n) (hashsize(n)-1) /* * Each libevent instance has a wakeup pipe, which other threads * can use to signal that they've put a new connection on its queue. */ static LIBEVENT_THREAD *threads; /* * Number of worker threads that have finished setting themselves up. */ static int init_count = 0; static pthread_mutex_t init_lock; static pthread_cond_t init_cond; static void thread_libevent_process(int fd, short which, void *arg); /* item_lock() must be held for an item before any modifications to either its * associated hash bucket, or the structure itself. * LRU modifications must hold the item lock, and the LRU lock. * LRU's accessing items must item_trylock() before modifying an item. * Items accessible from an LRU must not be freed or modified * without first locking and removing from the LRU. */ void item_lock(uint32_t hv) { mutex_lock(&item_locks[hv & hashmask(item_lock_hashpower)]); } void *item_trylock(uint32_t hv) { pthread_mutex_t *lock = &item_locks[hv & hashmask(item_lock_hashpower)]; if (pthread_mutex_trylock(lock) == 0) { return lock; } return NULL; } void item_trylock_unlock(void *lock) { mutex_unlock((pthread_mutex_t *) lock); } void item_unlock(uint32_t hv) { mutex_unlock(&item_locks[hv & hashmask(item_lock_hashpower)]); } static void wait_for_thread_registration(int nthreads) { while (init_count < nthreads) { pthread_cond_wait(&init_cond, &init_lock); } } static void register_thread_initialized(void) { pthread_mutex_lock(&init_lock); init_count++; pthread_cond_signal(&init_cond); pthread_mutex_unlock(&init_lock); /* Force worker threads to pile up if someone wants us to */ pthread_mutex_lock(&worker_hang_lock); pthread_mutex_unlock(&worker_hang_lock); } /* Must not be called with any deeper locks held */ void pause_threads(enum pause_thread_types type) { char buf[1]; int i; buf[0] = 0; switch (type) { case PAUSE_ALL_THREADS: lru_maintainer_pause(); slabs_rebalancer_pause(); lru_crawler_pause(); #ifdef EXTSTORE storage_compact_pause(); #endif case PAUSE_WORKER_THREADS: buf[0] = 'p'; pthread_mutex_lock(&worker_hang_lock); break; case RESUME_ALL_THREADS: lru_maintainer_resume(); slabs_rebalancer_resume(); lru_crawler_resume(); #ifdef EXTSTORE storage_compact_resume(); #endif case RESUME_WORKER_THREADS: pthread_mutex_unlock(&worker_hang_lock); break; default: fprintf(stderr, "Unknown lock type: %d\n", type); assert(1 == 0); break; } /* Only send a message if we have one. */ if (buf[0] == 0) { return; } pthread_mutex_lock(&init_lock); init_count = 0; for (i = 0; i < settings.num_threads; i++) { if (write(threads[i].notify_send_fd, buf, 1) != 1) { perror("Failed writing to notify pipe"); /* TODO: This is a fatal problem. Can it ever happen temporarily? */ } } wait_for_thread_registration(settings.num_threads); pthread_mutex_unlock(&init_lock); } /* * Initializes a connection queue. */ static void cq_init(CQ *cq) { pthread_mutex_init(&cq->lock, NULL); cq->head = NULL; cq->tail = NULL; } /* * Looks for an item on a connection queue, but doesn't block if there isn't * one. * Returns the item, or NULL if no item is available */ static CQ_ITEM *cq_pop(CQ *cq) { CQ_ITEM *item; pthread_mutex_lock(&cq->lock); item = cq->head; if (NULL != item) { cq->head = item->next; if (NULL == cq->head) cq->tail = NULL; } pthread_mutex_unlock(&cq->lock); return item; } /* * Adds an item to a connection queue. */ static void cq_push(CQ *cq, CQ_ITEM *item) { item->next = NULL; pthread_mutex_lock(&cq->lock); if (NULL == cq->tail) cq->head = item; else cq->tail->next = item; cq->tail = item; pthread_mutex_unlock(&cq->lock); } /* * Returns a fresh connection queue item. */ static CQ_ITEM *cqi_new(void) { CQ_ITEM *item = NULL; pthread_mutex_lock(&cqi_freelist_lock); if (cqi_freelist) { item = cqi_freelist; cqi_freelist = item->next; } pthread_mutex_unlock(&cqi_freelist_lock); if (NULL == item) { int i; /* Allocate a bunch of items at once to reduce fragmentation */ item = malloc(sizeof(CQ_ITEM) * ITEMS_PER_ALLOC); if (NULL == item) { STATS_LOCK(); stats.malloc_fails++; STATS_UNLOCK(); return NULL; } /* * Link together all the new items except the first one * (which we'll return to the caller) for placement on * the freelist. */ for (i = 2; i < ITEMS_PER_ALLOC; i++) item[i - 1].next = &item[i]; pthread_mutex_lock(&cqi_freelist_lock); item[ITEMS_PER_ALLOC - 1].next = cqi_freelist; cqi_freelist = &item[1]; pthread_mutex_unlock(&cqi_freelist_lock); } return item; } /* * Frees a connection queue item (adds it to the freelist.) */ static void cqi_free(CQ_ITEM *item) { pthread_mutex_lock(&cqi_freelist_lock); item->next = cqi_freelist; cqi_freelist = item; pthread_mutex_unlock(&cqi_freelist_lock); } /* * Creates a worker thread. */ static void create_worker(void *(*func)(void *), void *arg) { pthread_attr_t attr; int ret; pthread_attr_init(&attr); if ((ret = pthread_create(&((LIBEVENT_THREAD*)arg)->thread_id, &attr, func, arg)) != 0) { fprintf(stderr, "Can't create thread: %s\n", strerror(ret)); exit(1); } } /* * Sets whether or not we accept new connections. */ void accept_new_conns(const bool do_accept) { pthread_mutex_lock(&conn_lock); do_accept_new_conns(do_accept); pthread_mutex_unlock(&conn_lock); } /****************************** LIBEVENT THREADS *****************************/ /* * Set up a thread's information. */ static void setup_thread(LIBEVENT_THREAD *me) { me->base = event_init(); if (! me->base) { fprintf(stderr, "Can't allocate event base\n"); exit(1); } /* Listen for notifications from other threads */ event_set(&me->notify_event, me->notify_receive_fd, EV_READ | EV_PERSIST, thread_libevent_process, me); event_base_set(me->base, &me->notify_event); if (event_add(&me->notify_event, 0) == -1) { fprintf(stderr, "Can't monitor libevent notify pipe\n"); exit(1); } me->new_conn_queue = malloc(sizeof(struct conn_queue)); if (me->new_conn_queue == NULL) { perror("Failed to allocate memory for connection queue"); exit(EXIT_FAILURE); } cq_init(me->new_conn_queue); if (pthread_mutex_init(&me->stats.mutex, NULL) != 0) { perror("Failed to initialize mutex"); exit(EXIT_FAILURE); } me->suffix_cache = cache_create("suffix", SUFFIX_SIZE, sizeof(char*), NULL, NULL); if (me->suffix_cache == NULL) { fprintf(stderr, "Failed to create suffix cache\n"); exit(EXIT_FAILURE); } #ifdef EXTSTORE me->io_cache = cache_create("io", sizeof(io_wrap), sizeof(char*), NULL, NULL); if (me->io_cache == NULL) { fprintf(stderr, "Failed to create IO object cache\n"); exit(EXIT_FAILURE); } #endif } /* * Worker thread: main event loop */ static void *worker_libevent(void *arg) { LIBEVENT_THREAD *me = arg; /* Any per-thread setup can happen here; memcached_thread_init() will block until * all threads have finished initializing. */ me->l = logger_create(); me->lru_bump_buf = item_lru_bump_buf_create(); if (me->l == NULL || me->lru_bump_buf == NULL) { abort(); } if (settings.drop_privileges) { drop_worker_privileges(); } register_thread_initialized(); event_base_loop(me->base, 0); return NULL; } /* * Processes an incoming "handle a new connection" item. This is called when * input arrives on the libevent wakeup pipe. */ static void thread_libevent_process(int fd, short which, void *arg) { LIBEVENT_THREAD *me = arg; CQ_ITEM *item; char buf[1]; conn *c; unsigned int timeout_fd; if (read(fd, buf, 1) != 1) { if (settings.verbose > 0) fprintf(stderr, "Can't read from libevent pipe\n"); return; } switch (buf[0]) { case 'c': item = cq_pop(me->new_conn_queue); if (NULL == item) { break; } switch (item->mode) { case queue_new_conn: c = conn_new(item->sfd, item->init_state, item->event_flags, item->read_buffer_size, item->transport, me->base); if (c == NULL) { if (IS_UDP(item->transport)) { fprintf(stderr, "Can't listen for events on UDP socket\n"); exit(1); } else { if (settings.verbose > 0) { fprintf(stderr, "Can't listen for events on fd %d\n", item->sfd); } close(item->sfd); } } else { c->thread = me; } break; case queue_redispatch: conn_worker_readd(item->c); break; } cqi_free(item); break; /* we were told to pause and report in */ case 'p': register_thread_initialized(); break; /* a client socket timed out */ case 't': if (read(fd, &timeout_fd, sizeof(timeout_fd)) != sizeof(timeout_fd)) { if (settings.verbose > 0) fprintf(stderr, "Can't read timeout fd from libevent pipe\n"); return; } conn_close_idle(conns[timeout_fd]); break; } } /* Which thread we assigned a connection to most recently. */ static int last_thread = -1; /* * Dispatches a new connection to another thread. This is only ever called * from the main thread, either during initialization (for UDP) or because * of an incoming connection. */ void dispatch_conn_new(int sfd, enum conn_states init_state, int event_flags, int read_buffer_size, enum network_transport transport) { CQ_ITEM *item = cqi_new(); char buf[1]; if (item == NULL) { close(sfd); /* given that malloc failed this may also fail, but let's try */ fprintf(stderr, "Failed to allocate memory for connection object\n"); return ; } int tid = (last_thread + 1) % settings.num_threads; LIBEVENT_THREAD *thread = threads + tid; last_thread = tid; item->sfd = sfd; item->init_state = init_state; item->event_flags = event_flags; item->read_buffer_size = read_buffer_size; item->transport = transport; item->mode = queue_new_conn; cq_push(thread->new_conn_queue, item); MEMCACHED_CONN_DISPATCH(sfd, thread->thread_id); buf[0] = 'c'; if (write(thread->notify_send_fd, buf, 1) != 1) { perror("Writing to thread notify pipe"); } } /* * Re-dispatches a connection back to the original thread. Can be called from * any side thread borrowing a connection. */ void redispatch_conn(conn *c) { CQ_ITEM *item = cqi_new(); char buf[1]; if (item == NULL) { /* Can't cleanly redispatch connection. close it forcefully. */ c->state = conn_closed; close(c->sfd); return; } LIBEVENT_THREAD *thread = c->thread; item->sfd = c->sfd; item->init_state = conn_new_cmd; item->c = c; item->mode = queue_redispatch; cq_push(thread->new_conn_queue, item); buf[0] = 'c'; if (write(thread->notify_send_fd, buf, 1) != 1) { perror("Writing to thread notify pipe"); } } /* This misses the allow_new_conns flag :( */ void sidethread_conn_close(conn *c) { c->state = conn_closed; if (settings.verbose > 1) fprintf(stderr, "<%d connection closed from side thread.\n", c->sfd); close(c->sfd); STATS_LOCK(); stats_state.curr_conns--; STATS_UNLOCK(); return; } /********************************* ITEM ACCESS *******************************/ /* * Allocates a new item. */ item *item_alloc(char *key, size_t nkey, int flags, rel_time_t exptime, int nbytes) { item *it; /* do_item_alloc handles its own locks */ it = do_item_alloc(key, nkey, flags, exptime, nbytes); return it; } /* * Returns an item if it hasn't been marked as expired, * lazy-expiring as needed. */ item *item_get(const char *key, const size_t nkey, conn *c, const bool do_update) { item *it; uint32_t hv; hv = hash(key, nkey); item_lock(hv); it = do_item_get(key, nkey, hv, c, do_update); item_unlock(hv); return it; } item *item_touch(const char *key, size_t nkey, uint32_t exptime, conn *c) { item *it; uint32_t hv; hv = hash(key, nkey); item_lock(hv); it = do_item_touch(key, nkey, exptime, hv, c); item_unlock(hv); return it; } /* * Links an item into the LRU and hashtable. */ int item_link(item *item) { int ret; uint32_t hv; hv = hash(ITEM_key(item), item->nkey); item_lock(hv); ret = do_item_link(item, hv); item_unlock(hv); return ret; } /* * Decrements the reference count on an item and adds it to the freelist if * needed. */ void item_remove(item *item) { uint32_t hv; hv = hash(ITEM_key(item), item->nkey); item_lock(hv); do_item_remove(item); item_unlock(hv); } /* * Replaces one item with another in the hashtable. * Unprotected by a mutex lock since the core server does not require * it to be thread-safe. */ int item_replace(item *old_it, item *new_it, const uint32_t hv) { return do_item_replace(old_it, new_it, hv); } /* * Unlinks an item from the LRU and hashtable. */ void item_unlink(item *item) { uint32_t hv; hv = hash(ITEM_key(item), item->nkey); item_lock(hv); do_item_unlink(item, hv); item_unlock(hv); } /* * Does arithmetic on a numeric item value. */ enum delta_result_type add_delta(conn *c, const char *key, const size_t nkey, int incr, const int64_t delta, char *buf, uint64_t *cas) { enum delta_result_type ret; uint32_t hv; hv = hash(key, nkey); item_lock(hv); ret = do_add_delta(c, key, nkey, incr, delta, buf, cas, hv); item_unlock(hv); return ret; } /* * Stores an item in the cache (high level, obeys set/add/replace semantics) */ enum store_item_type store_item(item *item, int comm, conn* c) { enum store_item_type ret; uint32_t hv; hv = hash(ITEM_key(item), item->nkey); item_lock(hv); ret = do_store_item(item, comm, c, hv); item_unlock(hv); return ret; } /******************************* GLOBAL STATS ******************************/ void STATS_LOCK() { pthread_mutex_lock(&stats_lock); } void STATS_UNLOCK() { pthread_mutex_unlock(&stats_lock); } void threadlocal_stats_reset(void) { int ii; for (ii = 0; ii < settings.num_threads; ++ii) { pthread_mutex_lock(&threads[ii].stats.mutex); #define X(name) threads[ii].stats.name = 0; THREAD_STATS_FIELDS #ifdef EXTSTORE EXTSTORE_THREAD_STATS_FIELDS #endif #undef X memset(&threads[ii].stats.slab_stats, 0, sizeof(threads[ii].stats.slab_stats)); memset(&threads[ii].stats.lru_hits, 0, sizeof(uint64_t) * POWER_LARGEST); pthread_mutex_unlock(&threads[ii].stats.mutex); } } void threadlocal_stats_aggregate(struct thread_stats *stats) { int ii, sid; /* The struct has a mutex, but we can safely set the whole thing * to zero since it is unused when aggregating. */ memset(stats, 0, sizeof(*stats)); for (ii = 0; ii < settings.num_threads; ++ii) { pthread_mutex_lock(&threads[ii].stats.mutex); #define X(name) stats->name += threads[ii].stats.name; THREAD_STATS_FIELDS #ifdef EXTSTORE EXTSTORE_THREAD_STATS_FIELDS #endif #undef X for (sid = 0; sid < MAX_NUMBER_OF_SLAB_CLASSES; sid++) { #define X(name) stats->slab_stats[sid].name += \ threads[ii].stats.slab_stats[sid].name; SLAB_STATS_FIELDS #undef X } for (sid = 0; sid < POWER_LARGEST; sid++) { stats->lru_hits[sid] += threads[ii].stats.lru_hits[sid]; stats->slab_stats[CLEAR_LRU(sid)].get_hits += threads[ii].stats.lru_hits[sid]; } pthread_mutex_unlock(&threads[ii].stats.mutex); } } void slab_stats_aggregate(struct thread_stats *stats, struct slab_stats *out) { int sid; memset(out, 0, sizeof(*out)); for (sid = 0; sid < MAX_NUMBER_OF_SLAB_CLASSES; sid++) { #define X(name) out->name += stats->slab_stats[sid].name; SLAB_STATS_FIELDS #undef X } } /* * Initializes the thread subsystem, creating various worker threads. * * nthreads Number of worker event handler threads to spawn */ void memcached_thread_init(int nthreads, void *arg) { int i; int power; for (i = 0; i < POWER_LARGEST; i++) { pthread_mutex_init(&lru_locks[i], NULL); } pthread_mutex_init(&worker_hang_lock, NULL); pthread_mutex_init(&init_lock, NULL); pthread_cond_init(&init_cond, NULL); pthread_mutex_init(&cqi_freelist_lock, NULL); cqi_freelist = NULL; /* Want a wide lock table, but don't waste memory */ if (nthreads < 3) { power = 10; } else if (nthreads < 4) { power = 11; } else if (nthreads < 5) { power = 12; } else if (nthreads <= 10) { power = 13; } else if (nthreads <= 20) { power = 14; } else { /* 32k buckets. just under the hashpower default. */ power = 15; } if (power >= hashpower) { fprintf(stderr, "Hash table power size (%d) cannot be equal to or less than item lock table (%d)\n", hashpower, power); fprintf(stderr, "Item lock table grows with `-t N` (worker threadcount)\n"); fprintf(stderr, "Hash table grows with `-o hashpower=N` \n"); exit(1); } item_lock_count = hashsize(power); item_lock_hashpower = power; item_locks = calloc(item_lock_count, sizeof(pthread_mutex_t)); if (! item_locks) { perror("Can't allocate item locks"); exit(1); } for (i = 0; i < item_lock_count; i++) { pthread_mutex_init(&item_locks[i], NULL); } threads = calloc(nthreads, sizeof(LIBEVENT_THREAD)); if (! threads) { perror("Can't allocate thread descriptors"); exit(1); } for (i = 0; i < nthreads; i++) { int fds[2]; if (pipe(fds)) { perror("Can't create notify pipe"); exit(1); } threads[i].notify_receive_fd = fds[0]; threads[i].notify_send_fd = fds[1]; #ifdef EXTSTORE threads[i].storage = arg; #endif setup_thread(&threads[i]); /* Reserve three fds for the libevent base, and two for the pipe */ stats_state.reserved_fds += 5; } /* Create threads after we've done all the libevent setup. */ for (i = 0; i < nthreads; i++) { create_worker(worker_libevent, &threads[i]); } /* Wait for all the threads to set themselves up before returning. */ pthread_mutex_lock(&init_lock); wait_for_thread_registration(nthreads); pthread_mutex_unlock(&init_lock); }
21,856
26.017305
127
c
null
NearPMSW-main/nearpm/checkpointing/memcached-pmem-checkpointing/cache.c
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #include <stdlib.h> #include <string.h> #include <inttypes.h> #ifndef NDEBUG #include <signal.h> #endif #include "cache.h" #ifndef NDEBUG const uint64_t redzone_pattern = 0xdeadbeefcafebabe; int cache_error = 0; #endif const int initial_pool_size = 64; cache_t* cache_create(const char *name, size_t bufsize, size_t align, cache_constructor_t* constructor, cache_destructor_t* destructor) { cache_t* ret = calloc(1, sizeof(cache_t)); char* nm = strdup(name); void** ptr = calloc(initial_pool_size, sizeof(void*)); if (ret == NULL || nm == NULL || ptr == NULL || pthread_mutex_init(&ret->mutex, NULL) == -1) { free(ret); free(nm); free(ptr); return NULL; } ret->name = nm; ret->ptr = ptr; ret->freetotal = initial_pool_size; ret->constructor = constructor; ret->destructor = destructor; #ifndef NDEBUG ret->bufsize = bufsize + 2 * sizeof(redzone_pattern); #else ret->bufsize = bufsize; #endif return ret; } static inline void* get_object(void *ptr) { #ifndef NDEBUG uint64_t *pre = ptr; return pre + 1; #else return ptr; #endif } void cache_destroy(cache_t *cache) { while (cache->freecurr > 0) { void *ptr = cache->ptr[--cache->freecurr]; if (cache->destructor) { cache->destructor(get_object(ptr), NULL); } free(ptr); } free(cache->name); free(cache->ptr); pthread_mutex_destroy(&cache->mutex); free(cache); } void* cache_alloc(cache_t *cache) { void *ret; pthread_mutex_lock(&cache->mutex); ret = do_cache_alloc(cache); pthread_mutex_unlock(&cache->mutex); return ret; } void* do_cache_alloc(cache_t *cache) { void *ret; void *object; if (cache->freecurr > 0) { ret = cache->ptr[--cache->freecurr]; object = get_object(ret); } else { object = ret = malloc(cache->bufsize); if (ret != NULL) { object = get_object(ret); if (cache->constructor != NULL && cache->constructor(object, NULL, 0) != 0) { free(ret); object = NULL; } } } #ifndef NDEBUG if (object != NULL) { /* add a simple form of buffer-check */ uint64_t *pre = ret; *pre = redzone_pattern; ret = pre+1; memcpy(((char*)ret) + cache->bufsize - (2 * sizeof(redzone_pattern)), &redzone_pattern, sizeof(redzone_pattern)); } #endif return object; } void cache_free(cache_t *cache, void *ptr) { pthread_mutex_lock(&cache->mutex); do_cache_free(cache, ptr); pthread_mutex_unlock(&cache->mutex); } void do_cache_free(cache_t *cache, void *ptr) { #ifndef NDEBUG /* validate redzone... */ if (memcmp(((char*)ptr) + cache->bufsize - (2 * sizeof(redzone_pattern)), &redzone_pattern, sizeof(redzone_pattern)) != 0) { raise(SIGABRT); cache_error = 1; return; } uint64_t *pre = ptr; --pre; if (*pre != redzone_pattern) { raise(SIGABRT); cache_error = -1; return; } ptr = pre; #endif if (cache->freecurr < cache->freetotal) { cache->ptr[cache->freecurr++] = ptr; } else { /* try to enlarge free connections array */ size_t newtotal = cache->freetotal * 2; void **new_free = realloc(cache->ptr, sizeof(char *) * newtotal); if (new_free) { cache->freetotal = newtotal; cache->ptr = new_free; cache->ptr[cache->freecurr++] = ptr; } else { if (cache->destructor) { cache->destructor(ptr, NULL); } free(ptr); } } }
3,862
23.762821
77
c